Description of problem: Qemu coredump when do snapshot in transaction mode with one snapshot path not exist Version-Release number of selected component (if applicable): kernel version:4.18.0-147.el8.x86_64 qemu-kvm version:qemu-kvm-4.2.0-1.module+el8.2.0+4793+b09dd2fb.x86_64 How reproducible: 100% Steps to Reproduce: 1.Start guest with qemu cmds: /usr/libexec/qemu-kvm \ -name 'avocado-vt-vm1' \ -machine q35 \ -nodefaults \ -device VGA,bus=pcie.0,addr=0x1 \ -m 7168 \ -smp 4,maxcpus=4,cores=2,threads=1,dies=1,sockets=2 \ -cpu 'Skylake-Client',+kvm_pv_unhalt \ -chardev socket,id=qmp_id_qmpmonitor1,path=/var/tmp/monitor-qmpmonitor1-20191118-011823-gEG3j1ms,server,nowait \ -mon chardev=qmp_id_qmpmonitor1,mode=control \ -chardev socket,id=qmp_id_catch_monitor,path=/var/tmp/monitor-catch_monitor-20191118-011823-gEG3j1mt,server,nowait \ -mon chardev=qmp_id_catch_monitor,mode=control \ -device pvpanic,ioport=0x505,id=id4p8G4l \ -chardev socket,server,id=chardev_serial0,path=/var/tmp/serial-serial0-20191118-011823-gEG3j1mt,nowait \ -device isa-serial,id=serial0,chardev=chardev_serial0 \ -chardev socket,id=seabioslog_id_20191118-011823-gEG3j1mt,path=/var/tmp/seabios-20191118-011823-gEG3j1mt,server,nowait \ -device isa-debugcon,chardev=seabioslog_id_20191118-011823-gEG3j1mt,iobase=0x402 \ -device pcie-root-port,id=pcie.0-root-port-2,slot=2,chassis=2,addr=0x2,bus=pcie.0 \ -device qemu-xhci,id=usb1,bus=pcie.0-root-port-2,addr=0x0 \ -object iothread,id=iothread0 \ -drive id=drive_image1,if=none,snapshot=off,aio=threads,cache=none,format=qcow2,file=/mnt/nfs/rhel820-64-virtio.qcow2 \ -device pcie-root-port,id=pcie.0-root-port-3,slot=3,chassis=3,addr=0x3,bus=pcie.0 \ -device virtio-scsi-pci,id=scsi0,bus=pcie.0-root-port-3,addr=0x0,iothread=iothread0 \ -device scsi-hd,id=image1,drive=drive_image1,bootindex=0,bus=scsi0.0 \ -drive id=drive_data1,if=none,snapshot=off,aio=threads,cache=none,format=qcow2,file=/mnt/nfs/data.qcow2 \ -device pcie-root-port,id=pcie.0-root-port-6,slot=6,chassis=6,addr=0x6,bus=pcie.0 \ -device virtio-blk-pci,id=data1,drive=drive_data1,bus=pcie.0-root-port-6,addr=0x0,iothread=iothread0 \ -device pcie-root-port,id=pcie.0-root-port-4,slot=4,chassis=4,addr=0x4,bus=pcie.0 \ -device virtio-net-pci,mac=9a:4f:f4:e5:bd:67,id=idkQvhgf,netdev=idnMcj5J,bus=pcie.0-root-port-4,addr=0x0 \ -netdev tap,id=idnMcj5J,vhost=on \ -device usb-tablet,id=usb-tablet1,bus=usb1.0,port=1 \ -vnc :0 \ -rtc base=utc,clock=host,driftfix=slew \ -boot order=cdn,once=c,menu=off,strict=off \ -enable-kvm \ -device pcie-root-port,id=pcie_extra_root_port_0,slot=5,chassis=5,addr=0x5,bus=pcie.0 \ -monitor stdio \ 2.Do snapshot in transaction mode with one snapshot path not exist. "execute": "transaction", "arguments": { "actions": [ {"type": "blockdev-snapshot-sync","data": {"device": "drive_image1","snapshot-file": "sn1","mode": "absolute-paths","format": "qcow2" } },{"type": "blockdev-snapshot-sync","data": {"device": "drive_data1","snapshot-file": "/aa/sn1","mode": "absolute-paths","format": "qcow2" } }]}} Ncat: Connection reset by peer. Actual results: Qemu coredump with info: qemu-kvm: block.c:2240: bdrv_replace_child_noperm: Assertion `bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs)' failed. test.txt: line 38: 23593 Aborted (core dumped) /usr/libexec/qemu-kvm -name 'avocado-vt-vm1' -machine q35 -nodefaults -device VGA,bus=pcie.0,addr=0x1 -m 7168 -smp 4,maxcpus=4,cores=2,threads=1,dies=1,sockets=2 -cpu 'Skylake-Client',+kvm_pv_unhalt -chardev socket,id=qmp_id_qmpmonitor1,path=/var/tmp/monitor-qmpmonitor1-20191118-011823-gEG3j1ms,server,nowait -mon chardev=qmp_id_qmpmonitor1,mode=control ... coredump info: (gdb) bt #0 0x00007fa1048b28df in __GI_raise (sig=sig@entry=6) at ../sysdeps/unix/sysv/linux/raise.c:50 #1 0x00007fa10489ccf5 in __GI_abort () at abort.c:79 #2 0x00007fa10489cbc9 in __assert_fail_base (fmt=0x7fa104a03300 "%s%s%s:%u: %s%sAssertion `%s' failed.\n%n", assertion=0x5572240b44d8 "bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs)", file=0x557224014d30 "block.c", line=2240, function=<optimized out>) at assert.c:92 #3 0x00007fa1048aae96 in __GI___assert_fail (assertion=assertion@entry=0x5572240b44d8 "bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs)", file=file@entry=0x557224014d30 "block.c", line=line@entry=2240, function=function@entry=0x5572240b5d60 <__PRETTY_FUNCTION__.31620> "bdrv_replace_child_noperm") at assert.c:101 #4 0x0000557223e631f8 in bdrv_replace_child_noperm (child=0x557225b9c980, new_bs=new_bs@entry=0x557225c42e40) at block.c:2240 #5 0x0000557223e68be7 in bdrv_replace_node (from=0x557226951a60, to=0x557225c42e40, errp=0x5572247d6138 <error_abort>) at block.c:4196 #6 0x0000557223d069c4 in external_snapshot_abort (common=0x557225d7e170) at blockdev.c:1731 #7 0x0000557223d069c4 in external_snapshot_abort (common=0x557225d7e170) at blockdev.c:1717 #8 0x0000557223d09013 in qmp_transaction (dev_list=<optimized out>, has_props=<optimized out>, props=0x557225cc7d70, errp=errp@entry=0x7ffe704c0c98) at blockdev.c:2360 #9 0x0000557223e32085 in qmp_marshal_transaction (args=<optimized out>, ret=<optimized out>, errp=0x7ffe704c0d08) at qapi/qapi-commands-transaction.c:44 #10 0x0000557223ee798c in do_qmp_dispatch (errp=0x7ffe704c0d00, allow_oob=<optimized out>, request=<optimized out>, cmds=0x5572247d3cc0 <qmp_commands>) at qapi/qmp-dispatch.c:132 #11 0x0000557223ee798c in qmp_dispatch (cmds=0x5572247d3cc0 <qmp_commands>, request=<optimized out>, allow_oob=<optimized out>) at qapi/qmp-dispatch.c:175 #12 0x0000557223e06141 in monitor_qmp_dispatch (mon=0x557225c69ff0, req=<optimized out>) at monitor/qmp.c:120 #13 0x0000557223e0678a in monitor_qmp_bh_dispatcher (data=<optimized out>) at monitor/qmp.c:209 #14 0x0000557223f2f366 in aio_bh_call (bh=0x557225b9dc60) at util/async.c:117 #15 0x0000557223f2f366 in aio_bh_poll (ctx=ctx@entry=0x557225b9c840) at util/async.c:117 #16 0x0000557223f32754 in aio_dispatch (ctx=0x557225b9c840) at util/aio-posix.c:459 #17 0x0000557223f2f242 in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:260 #18 0x00007fa10913467d in g_main_dispatch (context=0x557225c28e80) at gmain.c:3176 #19 0x00007fa10913467d in g_main_context_dispatch (context=context@entry=0x557225c28e80) at gmain.c:3829 #20 0x0000557223f31808 in glib_pollfds_poll () at util/main-loop.c:219 #21 0x0000557223f31808 in os_host_main_loop_wait (timeout=<optimized out>) at util/main-loop.c:242 #22 0x0000557223f31808 in main_loop_wait (nonblocking=<optimized out>) at util/main-loop.c:518 #23 0x0000557223d13201 in main_loop () at vl.c:1828 #24 0x0000557223bbfb82 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4504 Expected results: Snapshot failed. Additional info: Will attach coredump file later.
Additional info: When disable data-plane, not hit this issue.
Test with -blockdev, it works ok. 1.Start guest with qemu cmds: ... -blockdev driver=file,node-name=file_node,filename=/home/kvm_autotest_root/images/rhel820-64-virtio.qcow2 \ -blockdev driver=qcow2,file=file_node,node-name=drive_image1 \ -device pcie-root-port,id=pcie.0-root-port-5,slot=5,chassis=5,addr=0x5,bus=pcie.0 \ -device virtio-blk-pci,id=data1,drive=drive_image1,bus=pcie.0-root-port-5,addr=0x0 \ -blockdev driver=file,node-name=data_node,filename=/home/data.qcow2 \ -blockdev driver=qcow2,file=data_node,node-name=drive_data1 \ -device pcie-root-port,id=pcie.0-root-port-6,slot=6,chassis=6,addr=0x6,bus=pcie.0 \ -device virtio-blk-pci,id=data1,drive=drive_data1,bus=pcie.0-root-port-6,addr=0x0 \ ... 2.Create full backup target sn1. {'execute':'blockdev-create','arguments':{'options': {'driver':'file','filename':'/root/sn1','size':2147483648},'job-id':'job1'}}" {'execute':'blockdev-add','arguments':{'driver':'file','node-name':'drive_sn1','filename':'/root/sn1'}} {'execute':'blockdev-create','arguments':{'options': {'driver': 'qcow2','file':'drive_sn1','size':2147483648},'job-id':'job2'}} {'execute':'blockdev-add','arguments':{'driver':'qcow2','node-name':'sn1','file':'drive_sn1'}} {'execute':'job-dismiss','arguments':{'id':'job1'}} {'execute':'job-dismiss','arguments':{'id':'job2'}} 3.Do full backup on multi disks { "execute": "transaction", "arguments": { "actions": [ {"type": "blockdev-backup", "data": { "device": "drive_data1", "target": "sn1", "sync": "full", "job-id":"j1" } },{"type": "blockdev-backup", "data": { "device": "drive_image1", "target": "sn3", "sync": "full", "job-id":"j2"}}]}} {"timestamp": {"seconds": 1575438781, "microseconds": 160254}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "j1"}} {"timestamp": {"seconds": 1575438781, "microseconds": 160297}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "j1"}} {"timestamp": {"seconds": 1575438781, "microseconds": 160323}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "j1"}} {"timestamp": {"seconds": 1575438781, "microseconds": 160346}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "j1"}} {"error": {"class": "GenericError", "desc": "Cannot find device=sn3 nor node_name=sn3"}}