Bug 1718255 - libvirt reports wrong error when failing to attach LUKSv2 disk
Summary: libvirt reports wrong error when failing to attach LUKSv2 disk
Keywords:
Status: CLOSED ERRATA
Alias: None
Product: Red Hat Enterprise Linux Advanced Virtualization
Classification: Red Hat
Component: libvirt
Version: 8.0
Hardware: x86_64
OS: Linux
urgent
medium
Target Milestone: rc
: 8.1
Assignee: Peter Krempa
QA Contact: yisun
URL:
Whiteboard:
Depends On: 1719169
Blocks:
TreeView+ depends on / blocked
 
Reported: 2019-06-07 11:40 UTC by Matthew Booth
Modified: 2020-11-06 04:42 UTC (History)
25 users (show)

Fixed In Version: libvirt-5.10.0-1.el8
Doc Type: If docs needed, set a value
Doc Text:
Clone Of: 1714889
Environment:
Last Closed: 2020-05-05 09:46:14 UTC
Type: Bug
Target Upstream Version:
Embargoed:


Attachments (Terms of Use)

Comment 1 Matthew Booth 2019-06-07 11:43:41 UTC
Libvirt version is libvirt-4.5.0-23.module+el8+2800+2d311f65.x86_64

2019-06-07 11:15:29.245+0000: 583408: debug : qemuMonitorAddDrive:3199 : drive=file=/dev/disk/by-id/scsi-36001405564a251ee00c42f7841f42d56,key-secret=virtio-disk1-luks-secret0,format=luks,if=none,id=drive-virtio-disk1,cache=none,aio=native                                                                                                                                                                 2019-06-07 11:15:29.245+0000: 583408: debug : qemuMonitorAddDrive:3201 : mon:0x7f085801bcf0 vm:0x7f086014cb30 json:1 fd:22                                                                              2019-06-07 11:15:29.245+0000: 583408: debug : qemuMonitorHMPCommandWithFd:1286 : mon:0x7f085801bcf0 vm:0x7f086014cb30 json:1 fd:22                                                                      2019-06-07 11:15:29.245+0000: 583408: debug : qemuMonitorJSONCommandWithFd:304 : Send command '{"execute":"human-monitor-command","arguments":{"command-line":"drive_add dummy file=/dev/disk/by-id/scsi-36001405564a251ee00c42f7841f42d56,key-secret=virtio-disk1-luks-secret0,format=luks,if=none,id=drive-virtio-disk1,cache=none,aio=native"},"id":"libvirt-74"}' for write with FD -1                      2019-06-07 11:15:29.245+0000: 583408: info : qemuMonitorSend:1081 : QEMU_MONITOR_SEND_MSG: mon=0x7f085801bcf0 msg={"execute":"human-monitor-command","arguments":{"command-line":"drive_add dummy file=/dev/disk/by-id/scsi-36001405564a251ee00c42f7841f42d56,key-secret=virtio-disk1-luks-secret0,format=luks,if=none,id=drive-virtio-disk1,cache=none,aio=native"},"id":"libvirt-74"}
 fd=-1
2019-06-07 11:15:29.247+0000: 583393: info : qemuMonitorIOWrite:549 : QEMU_MONITOR_IO_WRITE: mon=0x7f085801bcf0 buf={"execute":"human-monitor-command","arguments":{"command-line":"drive_add dummy file=/dev/disk/by-id/scsi-36001405564a251ee00c42f7841f42d56,key-secret=virtio-disk1-luks-secret0,format=luks,if=none,id=drive-virtio-disk1,cache=none,aio=native"},"id":"libvirt-74"}
 len=263 ret=263 errno=0
2019-06-07 11:15:29.262+0000: 583393: debug : qemuMonitorJSONIOProcessLine:196 : Line [{"return": "LUKS version 2 is not supported\r\n", "id": "libvirt-74"}]
2019-06-07 11:15:29.262+0000: 583393: info : qemuMonitorJSONIOProcessLine:216 : QEMU_MONITOR_RECV_REPLY: mon=0x7f085801bcf0 reply={"return": "LUKS version 2 is not supported\r\n", "id": "libvirt-74"}
2019-06-07 11:15:29.262+0000: 583408: debug : qemuMonitorJSONCommandWithFd:309 : Receive command reply ret=0 rxObject=0x55942a5d4d70
2019-06-07 11:15:29.262+0000: 583408: debug : qemuMonitorAddDeviceWithFd:3025 : device=virtio-blk-pci,scsi=off,bus=pci.5,addr=0x0,drive=drive-virtio-disk1,id=virtio-disk1,write-cache=on,serial=be31337b-d190-45ba-bde6-2e35d99a4781 fd=-1 fdname=<null>
2019-06-07 11:15:29.262+0000: 583408: debug : qemuMonitorAddDeviceWithFd:3028 : mon:0x7f085801bcf0 vm:0x7f086014cb30 json:1 fd:22
2019-06-07 11:15:29.262+0000: 583408: debug : qemuMonitorJSONCommandWithFd:304 : Send command '{"execute":"device_add","arguments":{"driver":"virtio-blk-pci","scsi":"off","bus":"pci.5","addr":"0x0","drive":"drive-virtio-disk1","id":"virtio-disk1","write-cache":"on","serial":"be31337b-d190-45ba-bde6-2e35d99a4781"},"id":"libvirt-75"}' for write with FD -1
2019-06-07 11:15:29.262+0000: 583408: info : qemuMonitorSend:1081 : QEMU_MONITOR_SEND_MSG: mon=0x7f085801bcf0 msg={"execute":"device_add","arguments":{"driver":"virtio-blk-pci","scsi":"off","bus":"pci.5","addr":"0x0","drive":"drive-virtio-disk1","id":"virtio-disk1","write-cache":"on","serial":"be31337b-d190-45ba-bde6-2e35d99a4781"},"id":"libvirt-75"}
 fd=-1
2019-06-07 11:15:29.262+0000: 583393: info : qemuMonitorIOWrite:549 : QEMU_MONITOR_IO_WRITE: mon=0x7f085801bcf0 buf={"execute":"device_add","arguments":{"driver":"virtio-blk-pci","scsi":"off","bus":"pci.5","addr":"0x0","drive":"drive-virtio-disk1","id":"virtio-disk1","write-cache":"on","serial":"be31337b-d190-45ba-bde6-2e35d99a4781"},"id":"libvirt-75"}
 len=240 ret=240 errno=0
2019-06-07 11:15:29.264+0000: 583393: debug : qemuMonitorJSONIOProcessLine:196 : Line [{"id": "libvirt-75", "error": {"class": "GenericError", "desc": "Property 'virtio-blk-device.drive' can't find value 'drive-virtio-disk1'"}}]
2019-06-07 11:15:29.264+0000: 583393: info : qemuMonitorJSONIOProcessLine:216 : QEMU_MONITOR_RECV_REPLY: mon=0x7f085801bcf0 reply={"id": "libvirt-75", "error": {"class": "GenericError", "desc": "Property 'virtio-blk-device.drive' can't find value 'drive-virtio-disk1'"}}
2019-06-07 11:15:29.265+0000: 583408: debug : qemuMonitorJSONCommandWithFd:309 : Receive command reply ret=0 rxObject=0x55942a5d4d70
2019-06-07 11:15:29.265+0000: 583408: debug : qemuMonitorJSONCheckError:384 : unable to execute QEMU command {"execute":"device_add","arguments":{"driver":"virtio-blk-pci","scsi":"off","bus":"pci.5","addr":"0x0","drive":"drive-virtio-disk1","id":"virtio-disk1","write-cache":"on","serial":"be31337b-d190-45ba-bde6-2e35d99a4781"},"id":"libvirt-75"}: {"id":"libvirt-75","error":{"class":"GenericError","desc":"Property 'virtio-blk-device.drive' can't find value 'drive-virtio-disk1'"}}
2019-06-07 11:15:29.265+0000: 583408: error : qemuMonitorJSONCheckError:395 : internal error: unable to execute QEMU command 'device_add': Property 'virtio-blk-device.drive' can't find value 'drive-virtio-disk1'
2019-06-07 11:15:29.265+0000: 583408: debug : qemuMonitorDriveDel:2998 : drivestr=drive-virtio-disk1
2019-06-07 11:15:29.265+0000: 583408: debug : qemuMonitorDriveDel:3000 : mon:0x7f085801bcf0 vm:0x7f086014cb30 json:1 fd:22
2019-06-07 11:15:29.265+0000: 583408: debug : qemuMonitorHMPCommandWithFd:1286 : mon:0x7f085801bcf0 vm:0x7f086014cb30 json:1 fd:22
2019-06-07 11:15:29.265+0000: 583408: debug : qemuMonitorJSONCommandWithFd:304 : Send command '{"execute":"human-monitor-command","arguments":{"command-line":"drive_del drive-virtio-disk1"},"id":"libvirt-76"}' for write with FD -1
2019-06-07 11:15:29.265+0000: 583408: info : qemuMonitorSend:1081 : QEMU_MONITOR_SEND_MSG: mon=0x7f085801bcf0 msg={"execute":"human-monitor-command","arguments":{"command-line":"drive_del drive-virtio-disk1"},"id":"libvirt-76"}
 fd=-1
2019-06-07 11:15:29.265+0000: 583393: info : qemuMonitorIOWrite:549 : QEMU_MONITOR_IO_WRITE: mon=0x7f085801bcf0 buf={"execute":"human-monitor-command","arguments":{"command-line":"drive_del drive-virtio-disk1"},"id":"libvirt-76"}
 len=115 ret=115 errno=0
2019-06-07 11:15:29.266+0000: 583393: debug : qemuMonitorJSONIOProcessLine:196 : Line [{"return": "Device 'drive-virtio-disk1' not found\r\n", "id": "libvirt-76"}]
2019-06-07 11:15:29.266+0000: 583393: info : qemuMonitorJSONIOProcessLine:216 : QEMU_MONITOR_RECV_REPLY: mon=0x7f085801bcf0 reply={"return": "Device 'drive-virtio-disk1' not found\r\n", "id": "libvirt-76"}
2

You can see from the logs that the actual error is:

LUKS version 2 is not supported

However, the error reported to the user is the obtuse:

Device 'drive-virtio-disk1' not found

Does this also indicate that the first error was not handled correctly?

Comment 2 Ján Tomko 2019-06-07 12:26:25 UTC
Yes, libvirt did not catch the first error as an error.

Sadly, we still use the HMP command drive_add and it did not really return an error, just a string:
{"return": "LUKS version 2 is not supported\r\n", "id": "libvirt-74"}]

We try to figure out whether the string is an error or not by strstr:
https://libvirt.org/git/?p=libvirt.git;a=blob;f=src/qemu/qemu_monitor_text.c;h=280cc588402;hb=18006c69#l65

This should be fixed once we switch to using blockdev-add via QMP.

In the meantime we can add (a substring of) this error message to qemuMonitorTextAddDrive,
but looking at all the possible errors in QEMU's crypto/block-luks.c, there does not seem to be
a reasonable substring that would catch all of them.

Comment 3 Peter Krempa 2019-11-27 09:19:53 UTC
commit 407fd434bc6101da57967dfeb62c832753481ea8
Author: Peter Krempa <pkrempa>
Date:   Thu Apr 4 17:10:27 2019 +0200

    qemu: hotplug: Use storage chain helpers in qemuDomainAttachDiskGeneric
    
    Replace the use of qemuHotplugDiskSourceAttach* helpers with
    qemuBuildStorageSourceChainAttachPrepare(Blockdev|Drive).

added support for using blockdev-add to attach disks. Since blockdev-add is a QMP command there is a straightforward and reliable way to report errors.

The blockdev feature was enabled since:

commit c6a9e54ce3252196f1fc6aa9e57537a659646d18
Author: Peter Krempa <pkrempa>
Date:   Mon Jan 7 11:45:19 2019 +0100

    qemu: enable blockdev support

    Now that all pieces are in place (hopefully) let's enable -blockdev.

    We base the capability on presence of the fix for 'auto-read-only' on
    files so that blockdev works properly, mandate that qemu supports
    explicit SCSI id strings to avoid ABI regression and that the fix for
    'savevm' is present so that internal snapshots work.

v5.9.0-390-gc6a9e54ce3

and requires upstream qemu-4.2 or appropriate downstream.

Comment 4 yisun 2019-11-29 08:38:51 UTC
reproduced with libvirt-5.9.0-4.module+el8.2.0+4836+a8e32ad7.x86_64, so qa_ack+
installed: cryptsetup-2.2.2-1.el8.x86_64


# lsscsi
[2:0:0:0]    disk    LIO-ORG  device.logical-  4.0   /dev/sda 


# cryptsetup luksFormat --type luks2 /dev/sda
...
Just follow instructions
and setup a password

# cat luks_sec.xml 
      <secret ephemeral='no' private='yes'>
         <description>LUKS Secret</description>
         <uuid>f981dd17-143f-45bc-88e6-ed1fe20ce9da</uuid>
         <usage type='volume'>
            <volume>/dev/sda</volume>
         </usage>
      </secret>


# virsh secret-define luks_sec.xml 
生成 secret f981dd17-143f-45bc-88e6-ed1fe20ce9da


# MYSECRET=`printf %s "xxxxxx_password" | base64`

# virsh secret-set-value f981dd17-143f-45bc-88e6-ed1fe20ce9da $MYSECRET

# cat disk.xml 
<disk type='block' device='disk'>
      <driver name='qemu' type='raw' cache='none' io='native' discard='unmap'/>
      <source dev='/dev/sda'/>
      <backingStore/>
      <target dev='sdb' bus='virtio'/>
      <encryption format='luks'>
          <secret type='passphrase' uuid='f981dd17-143f-45bc-88e6-ed1fe20ce9da'/>
      </encryption>
</disk>


# virsh attach-device avocado-vt-vm1 disk.xml
错误:从 disk.xml 获得设备失败
错误:internal error: unable to execute QEMU command 'device_add': Property 'virtio-blk-device.drive' can't find value 'drive-virtio-disk1'

And in libvirtd-debug.log, we see:
2019-11-29 08:34:10.576+0000: 23898: info : virObjectRef:386 : OBJECT_REF: obj=0x7fdfe40020d0
2019-11-29 08:34:10.576+0000: 23898: debug : qemuMonitorJSONIOProcessLine:221 : Line [{"return": "LUKS version 2 is not supported\r\n", "id": "libvirt-17"}]
2019-11-29 08:34:10.576+0000: 23898: debug : virJSONValueFromString:1838 : string={"return": "LUKS version 2 is not supported\r\n", "id": "libvirt-17"}

Comment 6 yisun 2019-12-12 01:39:42 UTC
Verified with libvirt-5.10.0-1.module+el8.2.0+5135+ed3b2489.x86_64

1. Prepare a luksV2 encrypted block device
[root@libvirt-rhel-8 ~]# lsscsi
[2:0:0:0]    disk    LIO-ORG  device.logical-  4.0   /dev/sda
[root@libvirt-rhel-8 ~]# cryptsetup luksFormat --type luks2 /dev/sda
WARNING!
========
This will overwrite data on /dev/sda irrevocably.
Are you sure? (Type uppercase yes): YES
Enter passphrase for /dev/sda:
Verify passphrase:

2. Prepare a libvirt secret
[root@libvirt-rhel-8 ~]# cat sec.xml
<secret ephemeral='no' private='yes'>
         <description>LUKS Secret</description>
         <uuid>f981dd17-143f-45bc-88e6-ed1fe20ce9da</uuid>
         <usage type='volume'>
            <volume>/dev/sda</volume>
         </usage>
</secret>

[root@libvirt-rhel-8 ~]# virsh secret-define sec.xml
Secret f981dd17-143f-45bc-88e6-ed1fe20ce9da created

[root@libvirt-rhel-8 ~]# MYSECRET=`printf %s "password" | base64`

[root@libvirt-rhel-8 ~]# virsh secret-set-value f981dd17-143f-45bc-88e6-ed1fe20ce9da $MYSECRET
Secret value set

3. Prepare a disk xml
[root@libvirt-rhel-8 ~]# cat disk.xml
<disk type='block' device='disk'>
      <driver name='qemu' type='raw' cache='none' io='native' discard='unmap'/>
      <source dev='/dev/sda'/>
      <backingStore/>
      <target dev='sdb' bus='virtio'/>
      <encryption format='luks'>
          <secret type='passphrase' uuid='f981dd17-143f-45bc-88e6-ed1fe20ce9da'/>
      </encryption>
</disk>

4. Do disk hot plug, correct error generated.
[root@libvirt-rhel-8 ~]# virsh attach-device avocado-vt-vm1 disk.xml
error: Failed to attach device from disk.xml
error: internal error: unable to execute QEMU command 'blockdev-add': LUKS version 2 is not supported

Comment 7 yisun 2019-12-12 08:46:49 UTC
And cold plug also generated correct error message
[root@libvirt-rhel-8 ~]# virsh attach-device avocado-vt-vm1 disk.xml --config
Device attached successfully

[root@libvirt-rhel-8 ~]# virsh destroy avocado-vt-vm1
Domain avocado-vt-vm1 destroyed

[root@libvirt-rhel-8 ~]# virsh start avocado-vt-vm1
error: Failed to start domain avocado-vt-vm1
error: internal error: process exited while connecting to monitor: 2019-12-12T08:41:28.762663Z qemu-kvm: -blockdev {"node-name":"libvirt-1-format","read-only":false,"discard":"unmap","cache":{"direct":true,"no-flush":false},"driver":"luks","key-secret":"libvirt-1-format-luks-secret0","file":"libvirt-1-storage"}: LUKS version 2 is not supported

Comment 9 errata-xmlrpc 2020-05-05 09:46:14 UTC
Since the problem described in this bug report should be
resolved in a recent advisory, it has been closed with a
resolution of ERRATA.

For information on the advisory, and where to find the updated
files, follow the link below.

If the solution does not work for you, open a new bug report.

https://access.redhat.com/errata/RHBA-2020:2017


Note You need to log in before you can comment on or make changes to this bug.