the problem today is that a global granule is used in the virtio-iommu device and in that case some virtio devices are attached to some domains prior to the VFIO device. the default 4kB page size starts to be used and gets frozen. Then we try to attach a VFIO device which doesn't support this granule (because the host is 64kB) and this fails according to the spirit of the current implementation. My gut feeling is the granule should be per domain instead. I initiated a discussion with Jean-Philippe Brucker.
Sent [PATCH 0/2] VIRTIO-IOMMU/VFIO page size related fixes (https://lore.kernel.org/all/20230704111527.3424992-1-eric.auger@redhat.com/)
Test with host kernel 5.14.0-339.el9.aarch64+64k and qemu-kvm-8.0.0-8.el9, this problem is fixed. MALLOC_PERTURB_=1 /usr/libexec/qemu-kvm \u-kvm \ -name 'avocado-vt-vm1' \ -sandbox on \ -blockdev '{"node-name": "file_aavmf_code", "driver": "file", "filename": "/usr/share/edk2/aarch64/QEMU_EFI-silent-pflash.qcow2", "auto-read-only": true, "discard": "unmap"}' \ -blockdev '{"node-name": "drive_aavmf_code", "driver": "qcow2", "read-only": true, "file": "file_aavmf_code"}' \ -blockdev '{"node-name": "file_aavmf_vars", "driver": "file", "filename": "/root/avocado/data/avocado-vt/avocado-vt-vm1_rhel930-aarch64-64k-virtio-scsi_qcow2_filesystem_VARS.qcow2", "auto-read-only": true, "discard": "unmap"}' \ -blockdev '{"node-name": "drive_aavmf_vars", "driver": "qcow2", "read-only": false, "file": "file_aavmf_vars"}' \ -machine virt,gic-version=host,pflash0=drive_aavmf_code,pflash1=drive_aavmf_vars,memory-backend=mem-machine_mem \ -device '{"id": "pcie-root-port-0", "driver": "pcie-root-port", "multifunction": true, "bus": "pcie.0", "addr": "0x1", "chassis": 1}' \ -device '{"id": "pcie-pci-bridge-0", "driver": "pcie-pci-bridge", "addr": "0x0", "bus": "pcie-root-port-0"}' \ -device '{"id": "viommu", "driver": "virtio-iommu-pci", "bus": "pcie.0", "addr": "0x2"}' \ -nodefaults \ -device '{"id": "pcie-root-port-1", "port": 1, "driver": "pcie-root-port", "addr": "0x1.0x1", "bus": "pcie.0", "chassis": 2}' \ -device '{"driver": "virtio-gpu-pci", "bus": "pcie-root-port-1", "addr": "0x0"}' \ -m 11264 \ -object '{"size": 11811160064, "id": "mem-machine_mem", "qom-type": "memory-backend-ram"}' \ -smp 112,maxcpus=112,cores=56,threads=1,clusters=1,sockets=2 \ -cpu 'host' \ -serial unix:'/var/tmp/serial-serial0',server=on,wait=off \ -device '{"id": "pcie-root-port-2", "port": 2, "driver": "pcie-root-port", "addr": "0x1.0x2", "bus": "pcie.0", "chassis": 3}' \ -device '{"driver": "qemu-xhci", "id": "usb1", "bus": "pcie-root-port-2", "addr": "0x0"}' \ -device '{"driver": "usb-tablet", "id": "usb-tablet1", "bus": "usb1.0", "port": "1"}' \ -device '{"id": "pcie-root-port-3", "port": 3, "driver": "pcie-root-port", "addr": "0x1.0x3", "bus": "pcie.0", "chassis": 4}' \ -device '{"id": "virtio_scsi_pci0", "driver": "virtio-scsi-pci", "bus": "pcie-root-port-3", "addr": "0x0"}' \ -blockdev '{"node-name": "file_image1", "driver": "file", "auto-read-only": true, "discard": "unmap", "aio": "threads", "filename": "/home/kvm_autotest_root/images/rhel930-aarch64-64k-virtio-scsi.qcow2", "cache": {"direct": true, "no-flush": false}}' \ -blockdev '{"node-name": "drive_image1", "driver": "qcow2", "read-only": false, "cache": {"direct": true, "no-flush": false}, "file": "file_image1"}' \ -device '{"driver": "scsi-hd", "id": "image1", "drive": "drive_image1", "write-cache": "on"}' \ -device '{"id": "pcie-root-port-4", "port": 4, "driver": "pcie-root-port", "addr": "0x1.0x4", "bus": "pcie.0", "chassis": 5}' \ -device '{"driver": "vfio-pci", "host": "0000:0b:00.0", "bus": "pcie-root-port-4", "addr": "0x0"}' \ -vnc :0 \ -rtc base=utc,clock=host \ -enable-kvm \ -monitor stdio Guest: Red Hat Enterprise Linux 9.3 Beta (Plow) Kernel 5.14.0-337.el9.aarch64+64k on an aarch64 Activate the web console with: systemctl enable --now cockpit.socket dhcp158-107 login: [root@dhcp158-107 ~]# ip route ip route default via 10.19.159.254 dev eth0 proto dhcp src 10.19.158.107 metric 100 10.19.152.0/21 dev eth0 proto kernel scope link src 10.19.158.107 metric 100 [root@dhcp158-107 ~]# ping -c 10 10.19.159.254 ping -c 10 10.19.159.254 PING 10.19.159.254 (10.19.159.254) 56(84) bytes of data. 64 bytes from 10.19.159.254: icmp_seq=1 ttl=64 time=1.46 ms 64 bytes from 10.19.159.254: icmp_seq=2 ttl=64 time=1.32 ms 64 bytes from 10.19.159.254: icmp_seq=3 ttl=64 time=1.38 ms 64 bytes from 10.19.159.254: icmp_seq=4 ttl=64 time=1.16 ms 64 bytes from 10.19.159.254: icmp_seq=5 ttl=64 time=1.22 ms 64 bytes from 10.19.159.254: icmp_seq=6 ttl=64 time=1.01 ms 64 bytes from 10.19.159.254: icmp_seq=7 ttl=64 time=1.27 ms 64 bytes from 10.19.159.254: icmp_seq=8 ttl=64 time=1.27 ms 64 bytes from 10.19.159.254: icmp_seq=9 ttl=64 time=1.20 ms 64 bytes from 10.19.159.254: icmp_seq=10 ttl=64 time=1.20 ms --- 10.19.159.254 ping statistics --- 10 packets transmitted, 10 received, 0% packet loss, time 9012ms rtt min/avg/max/mdev = 1.006/1.249/1.459/0.118 ms
Move to VERIFIED based on comment 9