Description of problem: After an host reboot vdsm reports a VM that doesn't exist at libvirt level [root@ngn42h1 ~]# virsh -r list --all Id Name State ---------------------------------------------------- [root@ngn42h1 ~]# ps aux | grep qemu root 1068 0.0 0.0 24988 1792 ? Ss 16:30 0:00 /usr/bin/qemu-ga --method=virtio-serial --path=/dev/virtio-ports/org.qemu.guest_agent.0 --blacklist=guest-file-open,guest-file-close,guest-file-read,guest-file-write,guest-file-seek,guest-file-flush,guest-exec,guest-exec-status -F/etc/qemu-ga/fsfreeze-hook root 3892 0.0 0.0 112664 968 pts/0 S+ 16:45 0:00 grep --color=auto qemu [root@ngn42h1 ~]# vdsm-client Host getVMFullList [ { "xml": "<domain type='kvm'>\n <name>HostedEngine</name>\n <uuid>6518b1f1-b5cb-4aed-bd16-e3c4b4f980c0</uuid>\n <metadata xmlns:ovirt-tune=\"http://ovirt.org/vm/tune/1.0\" xmlns:ovirt-vm=\"http://ovirt.org/vm/1.0\">\n <ovirt-tune:qos/>\n <ovirt-vm:vm xmlns:ovirt-vm=\"http://ovirt.org/vm/1.0\">\n <ovirt-vm:destroy_on_reboot type=\"bool\">False</ovirt-vm:destroy_on_reboot>\n <ovirt-vm:exitCode type=\"int\">1</ovirt-vm:exitCode>\n <ovirt-vm:exitMessage>VM has been terminated on the host</ovirt-vm:exitMessage>\n <ovirt-vm:exitReason type=\"int\">11</ovirt-vm:exitReason>\n <ovirt-vm:memGuaranteedSize type=\"int\">0</ovirt-vm:memGuaranteedSize>\n <ovirt-vm:pauseTime type=\"float\">4295966.41</ovirt-vm:pauseTime>\n <ovirt-vm:startTime type=\"float\">1517584414.84</ovirt-vm:startTime>\n <ovirt-vm:device mac_address=\"00:16:3E:6A:7A:F9\">\n <ovirt-vm:network>ovirtmgmt</ovirt-vm:network>\n <ovirt-vm:specParams/>\n <ovirt-vm:vm_custom/>\n </ovirt-vm:device>\n <ovirt-vm:device devtype=\"disk\" name=\"hdc\">\n <ovirt-vm:shared>false</ovirt-vm:shared>\n <ovirt-vm:specParams/>\n <ovirt-vm:vm_custom/>\n </ovirt-vm:device>\n <ovirt-vm:device devtype=\"disk\" name=\"vda\">\n <ovirt-vm:domainID>0cb05e46-18ec-492c-ba4f-d45785ccdc0d</ovirt-vm:domainID>\n <ovirt-vm:guestName>/dev/vda</ovirt-vm:guestName>\n <ovirt-vm:imageID>9d3806f6-09bd-454f-82a2-04ff4d311d62</ovirt-vm:imageID>\n <ovirt-vm:poolID>00000000-0000-0000-0000-000000000000</ovirt-vm:poolID>\n <ovirt-vm:shared>exclusive</ovirt-vm:shared>\n <ovirt-vm:volumeID>6318f3b9-d434-444f-a3d7-de2172b31a0c</ovirt-vm:volumeID>\n <ovirt-vm:specParams/>\n <ovirt-vm:vm_custom/>\n <ovirt-vm:volumeChain>\n <ovirt-vm:volumeChainNode>\n <ovirt-vm:domainID>0cb05e46-18ec-492c-ba4f-d45785ccdc0d</ovirt-vm:domainID>\n <ovirt-vm:imageID>9d3806f6-09bd-454f-82a2-04ff4d311d62</ovirt-vm:imageID>\n <ovirt-vm:leaseOffset type=\"int\">112197632</ovirt-vm:leaseOffset>\n <ovirt-vm:leasePath>/dev/0cb05e46-18ec-492c-ba4f-d45785ccdc0d/leases</ovirt-vm:leasePath>\n <ovirt-vm:path>/rhev/data-center/mnt/blockSD/0cb05e46-18ec-492c-ba4f-d45785ccdc0d/images/9d3806f6-09bd-454f-82a2-04ff4d311d62/6318f3b9-d434-444f-a3d7-de2172b31a0c</ovirt-vm:path>\n <ovirt-vm:volumeID>6318f3b9-d434-444f-a3d7-de2172b31a0c</ovirt-vm:volumeID>\n </ovirt-vm:volumeChainNode>\n </ovirt-vm:volumeChain>\n </ovirt-vm:device>\n </ovirt-vm:vm>\n </metadata>\n <memory unit='KiB'>4194304</memory>\n <currentMemory unit='KiB'>4194304</currentMemory>\n <vcpu placement='static'>4</vcpu>\n <sysinfo type='smbios'>\n <system>\n <entry name='manufacturer'>oVirt</entry>\n <entry name='product'>oVirt Node</entry>\n <entry name='version'>7-4.1708.el7.centos</entry>\n <entry name='serial'>9638D503-974F-4772-8BA0-05B2DA3764C9</entry>\n <entry name='uuid'>6518b1f1-b5cb-4aed-bd16-e3c4b4f980c0</entry>\n </system>\n </sysinfo>\n <os>\n <type arch='x86_64' machine='pc-i440fx-rhel7.4.0'>hvm</type>\n <smbios mode='sysinfo'/>\n </os>\n <features>\n <acpi/>\n </features>\n <cpu mode='custom' match='exact' check='partial'>\n <model fallback='allow'>Haswell-noTSX</model>\n </cpu>\n <clock offset='variable' adjustment='0' basis='utc'>\n <timer name='rtc' tickpolicy='catchup'/>\n <timer name='pit' tickpolicy='delay'/>\n <timer name='hpet' present='no'/>\n </clock>\n <on_poweroff>destroy</on_poweroff>\n <on_reboot>destroy</on_reboot>\n <on_crash>destroy</on_crash>\n <devices>\n <emulator>/usr/libexec/qemu-kvm</emulator>\n <disk type='file' device='cdrom'>\n <driver name='qemu' type='raw' error_policy='stop' io='threads'/>\n <source file='/tmp/tmp4mEi6e/seed.iso' startupPolicy='optional'/>\n <target dev='hdc' bus='ide'/>\n <readonly/>\n <address type='drive' controller='0' bus='1' target='0' unit='0'/>\n </disk>\n <disk type='block' device='disk' snapshot='no'>\n <driver name='qemu' type='raw' cache='none' error_policy='stop' io='native'/>\n <source dev='/var/run/vdsm/storage/0cb05e46-18ec-492c-ba4f-d45785ccdc0d/9d3806f6-09bd-454f-82a2-04ff4d311d62/6318f3b9-d434-444f-a3d7-de2172b31a0c'/>\n <target dev='vda' bus='virtio'/>\n <serial>9d3806f6-09bd-454f-82a2-04ff4d311d62</serial>\n <boot order='1'/>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>\n </disk>\n <controller type='scsi' index='0' model='virtio-scsi'>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>\n </controller>\n <controller type='usb' index='0' model='piix3-uhci'>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>\n </controller>\n <controller type='pci' index='0' model='pci-root'/>\n <controller type='ide' index='0'>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>\n </controller>\n <controller type='virtio-serial' index='0'>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>\n </controller>\n <lease>\n <lockspace>0cb05e46-18ec-492c-ba4f-d45785ccdc0d</lockspace>\n <key>6318f3b9-d434-444f-a3d7-de2172b31a0c</key>\n <target path='/dev/0cb05e46-18ec-492c-ba4f-d45785ccdc0d/leases' offset='112197632'/>\n </lease>\n <interface type='bridge'>\n <mac address='00:16:3e:6a:7a:f9'/>\n <source bridge='ovirtmgmt'/>\n <model type='virtio'/>\n <link state='up'/>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>\n </interface>\n <console type='unix'>\n <source mode='bind' path='/var/run/ovirt-vmconsole-console/6518b1f1-b5cb-4aed-bd16-e3c4b4f980c0.sock'/>\n <target type='virtio' port='0'/>\n </console>\n <channel type='unix'>\n <source mode='bind' path='/var/lib/libvirt/qemu/channels/6518b1f1-b5cb-4aed-bd16-e3c4b4f980c0.com.redhat.rhevm.vdsm'/>\n <target type='virtio' name='com.redhat.rhevm.vdsm'/>\n <address type='virtio-serial' controller='0' bus='0' port='1'/>\n </channel>\n <channel type='unix'>\n <source mode='bind' path='/var/lib/libvirt/qemu/channels/6518b1f1-b5cb-4aed-bd16-e3c4b4f980c0.org.qemu.guest_agent.0'/>\n <target type='virtio' name='org.qemu.guest_agent.0'/>\n <address type='virtio-serial' controller='0' bus='0' port='2'/>\n </channel>\n <channel type='unix'>\n <source mode='bind' path='/var/lib/libvirt/qemu/channels/6518b1f1-b5cb-4aed-bd16-e3c4b4f980c0.org.ovirt.hosted-engine-setup.0'/>\n <target type='virtio' name='org.ovirt.hosted-engine-setup.0'/>\n <address type='virtio-serial' controller='0' bus='0' port='3'/>\n </channel>\n <input type='mouse' bus='ps2'/>\n <input type='keyboard' bus='ps2'/>\n <graphics type='vnc' port='-1' autoport='yes' listen='0' passwdValidTo='1970-01-01T00:00:01'>\n <listen type='address' address='0'/>\n </graphics>\n <video>\n <model type='vga' vram='32768' heads='1' primary='yes'/>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>\n </video>\n <memballoon model='none'/>\n <rng model='virtio'>\n <backend model='random'>/dev/urandom</backend>\n <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>\n </rng>\n </devices>\n</domain>\n", "status": "Down", "statusTime": "4295587480", "arch": "x86_64", "vmId": "6518b1f1-b5cb-4aed-bd16-e3c4b4f980c0", "devices": [ { "index": 2, "iface": "ide", "name": "hdc", "vm_custom": {}, "format": "raw", "shared": "false", "vmid": "6518b1f1-b5cb-4aed-bd16-e3c4b4f980c0", "diskType": "file", "specParams": {}, "readonly": true, "address": { "bus": "1", "controller": "0", "type": "drive", "target": "0", "unit": "0" }, "device": "cdrom", "discard": false, "path": "/tmp/tmp4mEi6e/seed.iso", "propagateErrors": "off", "type": "disk" }, { "address": { "slot": "0x06", "bus": "0x00", "domain": "0x0000", "type": "pci", "function": "0x0" }, "serial": "9d3806f6-09bd-454f-82a2-04ff4d311d62", "index": 0, "iface": "virtio", "guestName": "/dev/vda", "cache": "none", "imageID": "9d3806f6-09bd-454f-82a2-04ff4d311d62", "shared": "exclusive", "type": "disk", "domainID": "0cb05e46-18ec-492c-ba4f-d45785ccdc0d", "reqsize": "0", "format": "raw", "poolID": "00000000-0000-0000-0000-000000000000", "device": "disk", "path": "/var/run/vdsm/storage/0cb05e46-18ec-492c-ba4f-d45785ccdc0d/9d3806f6-09bd-454f-82a2-04ff4d311d62/6318f3b9-d434-444f-a3d7-de2172b31a0c", "propagateErrors": "off", "name": "vda", "vm_custom": {}, "bootOrder": "1", "vmid": "6518b1f1-b5cb-4aed-bd16-e3c4b4f980c0", "volumeID": "6318f3b9-d434-444f-a3d7-de2172b31a0c", "diskType": "block", "specParams": {}, "discard": false, "volumeChain": [ { "domainID": "0cb05e46-18ec-492c-ba4f-d45785ccdc0d", "leaseOffset": 112197632, "volumeID": "6318f3b9-d434-444f-a3d7-de2172b31a0c", "leasePath": "/dev/0cb05e46-18ec-492c-ba4f-d45785ccdc0d/leases", "imageID": "9d3806f6-09bd-454f-82a2-04ff4d311d62", "path": "/rhev/data-center/mnt/blockSD/0cb05e46-18ec-492c-ba4f-d45785ccdc0d/images/9d3806f6-09bd-454f-82a2-04ff4d311d62/6318f3b9-d434-444f-a3d7-de2172b31a0c" } ] }, { "device": "console", "specParams": { "consoleType": "virtio", "enableSocket": true }, "vmid": "6518b1f1-b5cb-4aed-bd16-e3c4b4f980c0", "type": "console" }, { "device": "memballoon", "specParams": { "model": "none" }, "type": "memballoon" }, { "device": "vnc", "specParams": { "fileTransferEnable": true, "copyPasteEnable": true, "displayIp": "0" }, "vmid": "6518b1f1-b5cb-4aed-bd16-e3c4b4f980c0", "port": "-1", "type": "vnc" }, { "vmid": "6518b1f1-b5cb-4aed-bd16-e3c4b4f980c0", "specParams": { "source": "urandom" }, "address": { "slot": "0x07", "bus": "0x00", "domain": "0x0000", "type": "pci", "function": "0x0" }, "device": "rng", "model": "virtio", "type": "rng" }, { "device": "pci", "index": "0", "model": "pci-root", "type": "pci" }, { "device": "vga", "specParams": { "vram": "32768", "heads": "1" }, "type": "video", "address": { "slot": "0x02", "bus": "0x00", "domain": "0x0000", "type": "pci", "function": "0x0" } }, { "lease_id": "6318f3b9-d434-444f-a3d7-de2172b31a0c", "sd_id": "0cb05e46-18ec-492c-ba4f-d45785ccdc0d", "offset": "112197632", "device": "lease", "path": "/dev/0cb05e46-18ec-492c-ba4f-d45785ccdc0d/leases", "type": "lease" } ], "guestDiskMapping": {}, "vmType": "kvm", "external": false, "memSize": 4096, "clientIp": "", "smp": "4", "vmName": "HostedEngine" } ] Version-Release number of selected component (if applicable): vdsm.x86_64 4.20.17-1.el7.centos How reproducible: ? Steps to Reproduce: 1. ? 2. 3. Actual results: The VM is not there at libvirt level but vdsm still reports it Expected results: vdsm and libvirt are coherent Additional info:
Created attachment 1390241 [details] vdsm logs
Also an upstream user reported something similar: see http://lists.ovirt.org/pipermail/users/2018-February/086664.html
until the exit code is read by engine all VMs are still going to be reported. Anyway, I do not see a reboot in your logs, just restarts of vdsm (service shutdown immediately followed by start up) > [root@ngn42h1 ~]# ps aux | grep qemu > root 1068 0.0 0.0 24988 1792 ? Ss 16:30 0:00 /usr/bin/qemu-ga you're running nested hosted engine host?
(In reply to Michal Skrivanek from comment #3) > until the exit code is read by engine all VMs are still going to be reported. > Anyway, I do not see a reboot in your logs, just restarts of vdsm (service > shutdown immediately followed by start up) Probably here: 2018-02-02 16:30:09,204+0100 INFO (MainThread) [vds] Exiting (vdsmd:170) 2018-02-02 16:30:43,057+0100 INFO (MainThread) [vds] (PID: 2678) I am the actual vdsm 4.20.17-1.el7.centos ngn42h1.localdomain (3.10.0-693.17.1.el7.x86_64) (vdsmd:148) I don't remember if I properly stopped the engine VM after my attempt or if I simply reboot the host and libvirt-guests shut down the engine VM for me on reboot. > > [root@ngn42h1 ~]# ps aux | grep qemu > > root 1068 0.0 0.0 24988 1792 ? Ss 16:30 0:00 /usr/bin/qemu-ga > you're running nested hosted engine host? Yes, exactly. VM Snapshots helps me a lot to repeat complex tests on a clean env.
well, there's nothing in the logs, and it is expected behavior (comment #3), so I think there's not much to do here, please reopen if you still think there's anything to change