block: change drain to look only at one child at a time

bdrv_requests_pending is checking children to also wait until internal
requests (such as metadata writes) have completed.  However, checking
children is in general overkill.  Children requests can be of two kinds:

- requests caused by an operation on bs, e.g. a bdrv_aio_write to bs
causing a write to bs->file->bs.  In this case, the parent's in_flight
count will always be incremented by at least one for every request in
the child.

- asynchronous metadata writes or flushes.  Such writes can be started
even if bs's in_flight count is zero, but not after the .bdrv_drain
callback has been invoked.

This patch therefore changes bdrv_drain to finish I/O in the parent
(after which the parent's in_flight will be locked to zero), call
bdrv_drain (after which the parent will not generate I/O on the child
anymore), and then wait for internal I/O in the children to complete.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Message-Id: <1477565348-5458-6-git-send-email-pbonzini@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
This commit is contained in:
Paolo Bonzini 2016-10-27 12:48:53 +02:00 committed by Fam Zheng
parent 9972354856
commit d42cf28837

View file

@ -156,16 +156,33 @@ bool bdrv_requests_pending(BlockDriverState *bs)
return false;
}
static void bdrv_drain_recurse(BlockDriverState *bs)
static bool bdrv_drain_poll(BlockDriverState *bs)
{
bool waited = false;
while (atomic_read(&bs->in_flight) > 0) {
aio_poll(bdrv_get_aio_context(bs), true);
waited = true;
}
return waited;
}
static bool bdrv_drain_recurse(BlockDriverState *bs)
{
BdrvChild *child;
bool waited;
waited = bdrv_drain_poll(bs);
if (bs->drv && bs->drv->bdrv_drain) {
bs->drv->bdrv_drain(bs);
}
QLIST_FOREACH(child, &bs->children, next) {
bdrv_drain_recurse(child->bs);
waited |= bdrv_drain_recurse(child->bs);
}
return waited;
}
typedef struct {
@ -174,14 +191,6 @@ typedef struct {
bool done;
} BdrvCoDrainData;
static void bdrv_drain_poll(BlockDriverState *bs)
{
while (bdrv_requests_pending(bs)) {
/* Keep iterating */
aio_poll(bdrv_get_aio_context(bs), true);
}
}
static void bdrv_co_drain_bh_cb(void *opaque)
{
BdrvCoDrainData *data = opaque;
@ -189,7 +198,7 @@ static void bdrv_co_drain_bh_cb(void *opaque)
BlockDriverState *bs = data->bs;
bdrv_dec_in_flight(bs);
bdrv_drain_poll(bs);
bdrv_drained_begin(bs);
data->done = true;
qemu_coroutine_enter(co);
}
@ -220,6 +229,11 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
void bdrv_drained_begin(BlockDriverState *bs)
{
if (qemu_in_coroutine()) {
bdrv_co_yield_to_drain(bs);
return;
}
if (!bs->quiesce_counter++) {
aio_disable_external(bdrv_get_aio_context(bs));
bdrv_parent_drained_begin(bs);
@ -227,11 +241,6 @@ void bdrv_drained_begin(BlockDriverState *bs)
bdrv_io_unplugged_begin(bs);
bdrv_drain_recurse(bs);
if (qemu_in_coroutine()) {
bdrv_co_yield_to_drain(bs);
} else {
bdrv_drain_poll(bs);
}
bdrv_io_unplugged_end(bs);
}
@ -299,7 +308,6 @@ void bdrv_drain_all(void)
aio_context_acquire(aio_context);
bdrv_parent_drained_begin(bs);
bdrv_io_unplugged_begin(bs);
bdrv_drain_recurse(bs);
aio_context_release(aio_context);
if (!g_slist_find(aio_ctxs, aio_context)) {
@ -322,10 +330,7 @@ void bdrv_drain_all(void)
aio_context_acquire(aio_context);
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
if (aio_context == bdrv_get_aio_context(bs)) {
if (bdrv_requests_pending(bs)) {
aio_poll(aio_context, true);
waited = true;
}
waited |= bdrv_drain_recurse(bs);
}
}
aio_context_release(aio_context);