aio-posix: partially inline aio_dispatch into aio_poll

This patch prepares for the removal of unnecessary lockcnt inc/dec pairs.
Extract the dispatching loop for file descriptor handlers into a new
function aio_dispatch_handlers, and then inline aio_dispatch into
aio_poll.

aio_dispatch can now become void.

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Message-id: 20170213135235.12274-17-pbonzini@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Paolo Bonzini 2017-02-13 14:52:33 +01:00 committed by Stefan Hajnoczi
parent b9e413dd37
commit a153bf52b3
4 changed files with 20 additions and 45 deletions

View file

@ -310,12 +310,8 @@ bool aio_pending(AioContext *ctx);
/* Dispatch any pending callbacks from the GSource attached to the AioContext.
*
* This is used internally in the implementation of the GSource.
*
* @dispatch_fds: true to process fds, false to skip them
* (can be used as an optimization by callers that know there
* are no fds ready)
*/
bool aio_dispatch(AioContext *ctx, bool dispatch_fds);
void aio_dispatch(AioContext *ctx);
/* Progress in completing AIO work to occur. This can issue new pending
* aio as a result of executing I/O completion or bh callbacks.

View file

@ -386,12 +386,6 @@ static bool aio_dispatch_handlers(AioContext *ctx)
AioHandler *node, *tmp;
bool progress = false;
/*
* We have to walk very carefully in case aio_set_fd_handler is
* called while we're walking.
*/
qemu_lockcnt_inc(&ctx->list_lock);
QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
int revents;
@ -426,33 +420,18 @@ static bool aio_dispatch_handlers(AioContext *ctx)
}
}
qemu_lockcnt_dec(&ctx->list_lock);
return progress;
}
/*
* Note that dispatch_fds == false has the side-effect of post-poning the
* freeing of deleted handlers.
*/
bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
void aio_dispatch(AioContext *ctx)
{
bool progress;
aio_bh_poll(ctx);
/*
* If there are callbacks left that have been queued, we need to call them.
* Do not call select in this case, because it is possible that the caller
* does not need a complete flush (as is the case for aio_poll loops).
*/
progress = aio_bh_poll(ctx);
qemu_lockcnt_inc(&ctx->list_lock);
aio_dispatch_handlers(ctx);
qemu_lockcnt_dec(&ctx->list_lock);
if (dispatch_fds) {
progress |= aio_dispatch_handlers(ctx);
}
/* Run our timers */
progress |= timerlistgroup_run_timers(&ctx->tlg);
return progress;
timerlistgroup_run_timers(&ctx->tlg);
}
/* These thread-local variables are used only in a small part of aio_poll
@ -702,11 +681,16 @@ bool aio_poll(AioContext *ctx, bool blocking)
npfd = 0;
qemu_lockcnt_dec(&ctx->list_lock);
/* Run dispatch even if there were no readable fds to run timers */
if (aio_dispatch(ctx, ret > 0)) {
progress = true;
progress |= aio_bh_poll(ctx);
if (ret > 0) {
qemu_lockcnt_inc(&ctx->list_lock);
progress |= aio_dispatch_handlers(ctx);
qemu_lockcnt_dec(&ctx->list_lock);
}
progress |= timerlistgroup_run_timers(&ctx->tlg);
return progress;
}

View file

@ -309,16 +309,11 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
return progress;
}
bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
void aio_dispatch(AioContext *ctx)
{
bool progress;
progress = aio_bh_poll(ctx);
if (dispatch_fds) {
progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
}
progress |= timerlistgroup_run_timers(&ctx->tlg);
return progress;
aio_bh_poll(ctx);
aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
timerlistgroup_run_timers(&ctx->tlg);
}
bool aio_poll(AioContext *ctx, bool blocking)

View file

@ -258,7 +258,7 @@ aio_ctx_dispatch(GSource *source,
AioContext *ctx = (AioContext *) source;
assert(callback == NULL);
aio_dispatch(ctx, true);
aio_dispatch(ctx);
return true;
}