xen-block: implement BlockDevOps->drained_begin()

Detach event channels during drained sections to stop I/O submission
from the ring. xen-block is no longer reliant on aio_disable_external()
after this patch. This will allow us to remove the
aio_disable_external() API once all other code that relies on it is
converted.

Extend xen_device_set_event_channel_context() to allow ctx=NULL. The
event channel still exists but the event loop does not monitor the file
descriptor. Event channel processing can resume by calling
xen_device_set_event_channel_context() with a non-NULL ctx.

Factor out xen_device_set_event_channel_context() calls in
hw/block/dataplane/xen-block.c into attach/detach helper functions.
Incidentally, these don't require the AioContext lock because
aio_set_fd_handler() is thread-safe.

It's safer to register BlockDevOps after the dataplane instance has been
created. The BlockDevOps .drained_begin/end() callbacks depend on the
dataplane instance, so move the blk_set_dev_ops() call after
xen_block_dataplane_create().

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Message-Id: <20230516190238.8401-12-stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2023-05-16 15:02:29 -04:00 committed by Kevin Wolf
parent ab61335025
commit f6eac904f6
4 changed files with 59 additions and 16 deletions

View file

@ -664,6 +664,30 @@ void xen_block_dataplane_destroy(XenBlockDataPlane *dataplane)
g_free(dataplane);
}
void xen_block_dataplane_detach(XenBlockDataPlane *dataplane)
{
if (!dataplane || !dataplane->event_channel) {
return;
}
/* Only reason for failure is a NULL channel */
xen_device_set_event_channel_context(dataplane->xendev,
dataplane->event_channel,
NULL, &error_abort);
}
void xen_block_dataplane_attach(XenBlockDataPlane *dataplane)
{
if (!dataplane || !dataplane->event_channel) {
return;
}
/* Only reason for failure is a NULL channel */
xen_device_set_event_channel_context(dataplane->xendev,
dataplane->event_channel,
dataplane->ctx, &error_abort);
}
void xen_block_dataplane_stop(XenBlockDataPlane *dataplane)
{
XenDevice *xendev;
@ -674,13 +698,11 @@ void xen_block_dataplane_stop(XenBlockDataPlane *dataplane)
xendev = dataplane->xendev;
aio_context_acquire(dataplane->ctx);
if (dataplane->event_channel) {
/* Only reason for failure is a NULL channel */
xen_device_set_event_channel_context(xendev, dataplane->event_channel,
qemu_get_aio_context(),
&error_abort);
if (!blk_in_drain(dataplane->blk)) {
xen_block_dataplane_detach(dataplane);
}
aio_context_acquire(dataplane->ctx);
/* Xen doesn't have multiple users for nodes, so this can't fail */
blk_set_aio_context(dataplane->blk, qemu_get_aio_context(), &error_abort);
aio_context_release(dataplane->ctx);
@ -819,11 +841,9 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
blk_set_aio_context(dataplane->blk, dataplane->ctx, NULL);
aio_context_release(old_context);
/* Only reason for failure is a NULL channel */
aio_context_acquire(dataplane->ctx);
xen_device_set_event_channel_context(xendev, dataplane->event_channel,
dataplane->ctx, &error_abort);
aio_context_release(dataplane->ctx);
if (!blk_in_drain(dataplane->blk)) {
xen_block_dataplane_attach(dataplane);
}
return;

View file

@ -26,5 +26,7 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
unsigned int protocol,
Error **errp);
void xen_block_dataplane_stop(XenBlockDataPlane *dataplane);
void xen_block_dataplane_attach(XenBlockDataPlane *dataplane);
void xen_block_dataplane_detach(XenBlockDataPlane *dataplane);
#endif /* HW_BLOCK_DATAPLANE_XEN_BLOCK_H */

View file

@ -189,8 +189,26 @@ static void xen_block_resize_cb(void *opaque)
xen_device_backend_printf(xendev, "state", "%u", state);
}
/* Suspend request handling */
static void xen_block_drained_begin(void *opaque)
{
XenBlockDevice *blockdev = opaque;
xen_block_dataplane_detach(blockdev->dataplane);
}
/* Resume request handling */
static void xen_block_drained_end(void *opaque)
{
XenBlockDevice *blockdev = opaque;
xen_block_dataplane_attach(blockdev->dataplane);
}
static const BlockDevOps xen_block_dev_ops = {
.resize_cb = xen_block_resize_cb,
.resize_cb = xen_block_resize_cb,
.drained_begin = xen_block_drained_begin,
.drained_end = xen_block_drained_end,
};
static void xen_block_realize(XenDevice *xendev, Error **errp)
@ -242,8 +260,6 @@ static void xen_block_realize(XenDevice *xendev, Error **errp)
return;
}
blk_set_dev_ops(blk, &xen_block_dev_ops, blockdev);
if (conf->discard_granularity == -1) {
conf->discard_granularity = conf->physical_block_size;
}
@ -277,6 +293,8 @@ static void xen_block_realize(XenDevice *xendev, Error **errp)
blockdev->dataplane =
xen_block_dataplane_create(xendev, blk, conf->logical_block_size,
blockdev->props.iothread);
blk_set_dev_ops(blk, &xen_block_dev_ops, blockdev);
}
static void xen_block_frontend_changed(XenDevice *xendev,

View file

@ -846,8 +846,11 @@ void xen_device_set_event_channel_context(XenDevice *xendev,
NULL, NULL, NULL, NULL, NULL);
channel->ctx = ctx;
aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh), true,
xen_device_event, NULL, xen_device_poll, NULL, channel);
if (ctx) {
aio_set_fd_handler(channel->ctx, qemu_xen_evtchn_fd(channel->xeh),
true, xen_device_event, NULL, xen_device_poll, NULL,
channel);
}
}
XenEventChannel *xen_device_bind_event_channel(XenDevice *xendev,