mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-10-02 17:24:31 +00:00
dma-helpers: ensure AIO callback is invoked after cancellation
dma_aio_cancel unschedules the BH if there is one, which corresponds
to the reschedule_dma case of dma_blk_cb. This can stall the DMA
permanently, because dma_complete will never get invoked and therefore
nobody will ever invoke the original AIO callback in dbs->common.cb.
Fix this by invoking the callback (which is ensured to happen after
a bdrv_aio_cancel_async, or done manually in the dbs->bh case), and
add assertions to check that the DMA state machine is indeed waiting
for dma_complete or reschedule_dma, but never both.
Reported-by: John Snow <jsnow@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 20190729213416.1972-1-pbonzini@redhat.com
Signed-off-by: John Snow <jsnow@redhat.com>
(cherry picked from commit 539343c0a4
)
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
This commit is contained in:
parent
9e06029aea
commit
fbde196c30
|
@ -90,6 +90,7 @@ static void reschedule_dma(void *opaque)
|
||||||
{
|
{
|
||||||
DMAAIOCB *dbs = (DMAAIOCB *)opaque;
|
DMAAIOCB *dbs = (DMAAIOCB *)opaque;
|
||||||
|
|
||||||
|
assert(!dbs->acb && dbs->bh);
|
||||||
qemu_bh_delete(dbs->bh);
|
qemu_bh_delete(dbs->bh);
|
||||||
dbs->bh = NULL;
|
dbs->bh = NULL;
|
||||||
dma_blk_cb(dbs, 0);
|
dma_blk_cb(dbs, 0);
|
||||||
|
@ -111,15 +112,12 @@ static void dma_complete(DMAAIOCB *dbs, int ret)
|
||||||
{
|
{
|
||||||
trace_dma_complete(dbs, ret, dbs->common.cb);
|
trace_dma_complete(dbs, ret, dbs->common.cb);
|
||||||
|
|
||||||
|
assert(!dbs->acb && !dbs->bh);
|
||||||
dma_blk_unmap(dbs);
|
dma_blk_unmap(dbs);
|
||||||
if (dbs->common.cb) {
|
if (dbs->common.cb) {
|
||||||
dbs->common.cb(dbs->common.opaque, ret);
|
dbs->common.cb(dbs->common.opaque, ret);
|
||||||
}
|
}
|
||||||
qemu_iovec_destroy(&dbs->iov);
|
qemu_iovec_destroy(&dbs->iov);
|
||||||
if (dbs->bh) {
|
|
||||||
qemu_bh_delete(dbs->bh);
|
|
||||||
dbs->bh = NULL;
|
|
||||||
}
|
|
||||||
qemu_aio_unref(dbs);
|
qemu_aio_unref(dbs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -179,14 +177,21 @@ static void dma_aio_cancel(BlockAIOCB *acb)
|
||||||
|
|
||||||
trace_dma_aio_cancel(dbs);
|
trace_dma_aio_cancel(dbs);
|
||||||
|
|
||||||
|
assert(!(dbs->acb && dbs->bh));
|
||||||
if (dbs->acb) {
|
if (dbs->acb) {
|
||||||
|
/* This will invoke dma_blk_cb. */
|
||||||
blk_aio_cancel_async(dbs->acb);
|
blk_aio_cancel_async(dbs->acb);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dbs->bh) {
|
if (dbs->bh) {
|
||||||
cpu_unregister_map_client(dbs->bh);
|
cpu_unregister_map_client(dbs->bh);
|
||||||
qemu_bh_delete(dbs->bh);
|
qemu_bh_delete(dbs->bh);
|
||||||
dbs->bh = NULL;
|
dbs->bh = NULL;
|
||||||
}
|
}
|
||||||
|
if (dbs->common.cb) {
|
||||||
|
dbs->common.cb(dbs->common.opaque, -ECANCELED);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static AioContext *dma_get_aio_context(BlockAIOCB *acb)
|
static AioContext *dma_get_aio_context(BlockAIOCB *acb)
|
||||||
|
|
Loading…
Reference in a new issue