2014-03-03 10:30:05 +00:00
|
|
|
/*
|
|
|
|
* Event loop thread
|
|
|
|
*
|
|
|
|
* Copyright Red Hat Inc., 2013
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Stefan Hajnoczi <stefanha@redhat.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qom/object.h"
|
|
|
|
#include "qom/object_interfaces.h"
|
|
|
|
#include "qemu/module.h"
|
|
|
|
#include "block/aio.h"
|
|
|
|
#include "sysemu/iothread.h"
|
2014-02-27 10:48:42 +00:00
|
|
|
#include "qmp-commands.h"
|
2014-09-18 11:30:49 +00:00
|
|
|
#include "qemu/error-report.h"
|
2014-03-03 10:30:05 +00:00
|
|
|
|
|
|
|
#define IOTHREADS_PATH "/objects"
|
|
|
|
|
|
|
|
typedef ObjectClass IOThreadClass;
|
|
|
|
|
|
|
|
#define IOTHREAD_GET_CLASS(obj) \
|
|
|
|
OBJECT_GET_CLASS(IOThreadClass, obj, TYPE_IOTHREAD)
|
|
|
|
#define IOTHREAD_CLASS(klass) \
|
|
|
|
OBJECT_CLASS_CHECK(IOThreadClass, klass, TYPE_IOTHREAD)
|
|
|
|
|
|
|
|
static void *iothread_run(void *opaque)
|
|
|
|
{
|
|
|
|
IOThread *iothread = opaque;
|
AioContext: do not rely on aio_poll(ctx, true) result to end a loop
Currently, whenever aio_poll(ctx, true) has completed all pending
work it returns true *and* the next call to aio_poll(ctx, true)
will not block.
This invariant has its roots in qemu_aio_flush()'s implementation
as "while (qemu_aio_wait()) {}". However, qemu_aio_flush() does
not exist anymore and bdrv_drain_all() is implemented differently;
and this invariant is complicated to maintain and subtly different
from the return value of GMainLoop's g_main_context_iteration.
All calls to aio_poll(ctx, true) except one are guarded by a
while() loop checking for a request to be incomplete, or a
BlockDriverState to be idle. The one remaining call (in
iothread.c) uses this to delay the aio_context_release/acquire
pair until the AioContext is quiescent, however:
- we can do the same just by using non-blocking aio_poll,
similar to how vl.c invokes main_loop_wait
- it is buggy, because it does not ensure that the AioContext
is released between an aio_notify and the next time the
iothread goes to sleep. This leads to hangs when stopping
the dataplane thread.
In the end, these semantics are a bad match for the current
users of AioContext. So modify that one exception in iothread.c,
which also fixes the hangs, as well as the testcase so that
it use the same idiom as the actual QEMU code.
Reported-by: Christian Borntraeger <borntraeger@de.ibm.com>
Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-07-09 08:49:46 +00:00
|
|
|
bool blocking;
|
2014-03-03 10:30:05 +00:00
|
|
|
|
2014-02-27 10:48:41 +00:00
|
|
|
qemu_mutex_lock(&iothread->init_done_lock);
|
|
|
|
iothread->thread_id = qemu_get_thread_id();
|
|
|
|
qemu_cond_signal(&iothread->init_done_cond);
|
|
|
|
qemu_mutex_unlock(&iothread->init_done_lock);
|
|
|
|
|
2014-03-03 10:30:05 +00:00
|
|
|
while (!iothread->stopping) {
|
|
|
|
aio_context_acquire(iothread->ctx);
|
AioContext: do not rely on aio_poll(ctx, true) result to end a loop
Currently, whenever aio_poll(ctx, true) has completed all pending
work it returns true *and* the next call to aio_poll(ctx, true)
will not block.
This invariant has its roots in qemu_aio_flush()'s implementation
as "while (qemu_aio_wait()) {}". However, qemu_aio_flush() does
not exist anymore and bdrv_drain_all() is implemented differently;
and this invariant is complicated to maintain and subtly different
from the return value of GMainLoop's g_main_context_iteration.
All calls to aio_poll(ctx, true) except one are guarded by a
while() loop checking for a request to be incomplete, or a
BlockDriverState to be idle. The one remaining call (in
iothread.c) uses this to delay the aio_context_release/acquire
pair until the AioContext is quiescent, however:
- we can do the same just by using non-blocking aio_poll,
similar to how vl.c invokes main_loop_wait
- it is buggy, because it does not ensure that the AioContext
is released between an aio_notify and the next time the
iothread goes to sleep. This leads to hangs when stopping
the dataplane thread.
In the end, these semantics are a bad match for the current
users of AioContext. So modify that one exception in iothread.c,
which also fixes the hangs, as well as the testcase so that
it use the same idiom as the actual QEMU code.
Reported-by: Christian Borntraeger <borntraeger@de.ibm.com>
Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-07-09 08:49:46 +00:00
|
|
|
blocking = true;
|
|
|
|
while (!iothread->stopping && aio_poll(iothread->ctx, blocking)) {
|
2014-03-03 10:30:05 +00:00
|
|
|
/* Progress was made, keep going */
|
AioContext: do not rely on aio_poll(ctx, true) result to end a loop
Currently, whenever aio_poll(ctx, true) has completed all pending
work it returns true *and* the next call to aio_poll(ctx, true)
will not block.
This invariant has its roots in qemu_aio_flush()'s implementation
as "while (qemu_aio_wait()) {}". However, qemu_aio_flush() does
not exist anymore and bdrv_drain_all() is implemented differently;
and this invariant is complicated to maintain and subtly different
from the return value of GMainLoop's g_main_context_iteration.
All calls to aio_poll(ctx, true) except one are guarded by a
while() loop checking for a request to be incomplete, or a
BlockDriverState to be idle. The one remaining call (in
iothread.c) uses this to delay the aio_context_release/acquire
pair until the AioContext is quiescent, however:
- we can do the same just by using non-blocking aio_poll,
similar to how vl.c invokes main_loop_wait
- it is buggy, because it does not ensure that the AioContext
is released between an aio_notify and the next time the
iothread goes to sleep. This leads to hangs when stopping
the dataplane thread.
In the end, these semantics are a bad match for the current
users of AioContext. So modify that one exception in iothread.c,
which also fixes the hangs, as well as the testcase so that
it use the same idiom as the actual QEMU code.
Reported-by: Christian Borntraeger <borntraeger@de.ibm.com>
Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2014-07-09 08:49:46 +00:00
|
|
|
blocking = false;
|
2014-03-03 10:30:05 +00:00
|
|
|
}
|
|
|
|
aio_context_release(iothread->ctx);
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iothread_instance_finalize(Object *obj)
|
|
|
|
{
|
|
|
|
IOThread *iothread = IOTHREAD(obj);
|
|
|
|
|
2014-09-18 11:30:49 +00:00
|
|
|
if (!iothread->ctx) {
|
|
|
|
return;
|
|
|
|
}
|
2014-03-03 10:30:05 +00:00
|
|
|
iothread->stopping = true;
|
|
|
|
aio_notify(iothread->ctx);
|
|
|
|
qemu_thread_join(&iothread->thread);
|
2014-02-27 10:48:41 +00:00
|
|
|
qemu_cond_destroy(&iothread->init_done_cond);
|
|
|
|
qemu_mutex_destroy(&iothread->init_done_lock);
|
2014-03-03 10:30:05 +00:00
|
|
|
aio_context_unref(iothread->ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iothread_complete(UserCreatable *obj, Error **errp)
|
|
|
|
{
|
2014-09-18 11:30:49 +00:00
|
|
|
Error *local_error = NULL;
|
2014-03-03 10:30:05 +00:00
|
|
|
IOThread *iothread = IOTHREAD(obj);
|
|
|
|
|
|
|
|
iothread->stopping = false;
|
2014-02-27 10:48:41 +00:00
|
|
|
iothread->thread_id = -1;
|
2014-09-18 11:30:49 +00:00
|
|
|
iothread->ctx = aio_context_new(&local_error);
|
|
|
|
if (!iothread->ctx) {
|
|
|
|
error_propagate(errp, local_error);
|
|
|
|
return;
|
|
|
|
}
|
2014-02-27 10:48:41 +00:00
|
|
|
|
|
|
|
qemu_mutex_init(&iothread->init_done_lock);
|
|
|
|
qemu_cond_init(&iothread->init_done_cond);
|
2014-03-03 10:30:05 +00:00
|
|
|
|
|
|
|
/* This assumes we are called from a thread with useful CPU affinity for us
|
|
|
|
* to inherit.
|
|
|
|
*/
|
|
|
|
qemu_thread_create(&iothread->thread, "iothread", iothread_run,
|
|
|
|
iothread, QEMU_THREAD_JOINABLE);
|
2014-02-27 10:48:41 +00:00
|
|
|
|
|
|
|
/* Wait for initialization to complete */
|
|
|
|
qemu_mutex_lock(&iothread->init_done_lock);
|
|
|
|
while (iothread->thread_id == -1) {
|
|
|
|
qemu_cond_wait(&iothread->init_done_cond,
|
|
|
|
&iothread->init_done_lock);
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock(&iothread->init_done_lock);
|
2014-03-03 10:30:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void iothread_class_init(ObjectClass *klass, void *class_data)
|
|
|
|
{
|
|
|
|
UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
|
|
|
|
ucc->complete = iothread_complete;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo iothread_info = {
|
|
|
|
.name = TYPE_IOTHREAD,
|
|
|
|
.parent = TYPE_OBJECT,
|
|
|
|
.class_init = iothread_class_init,
|
|
|
|
.instance_size = sizeof(IOThread),
|
|
|
|
.instance_finalize = iothread_instance_finalize,
|
|
|
|
.interfaces = (InterfaceInfo[]) {
|
|
|
|
{TYPE_USER_CREATABLE},
|
|
|
|
{}
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static void iothread_register_types(void)
|
|
|
|
{
|
|
|
|
type_register_static(&iothread_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(iothread_register_types)
|
|
|
|
|
|
|
|
IOThread *iothread_find(const char *id)
|
|
|
|
{
|
|
|
|
Object *container = container_get(object_get_root(), IOTHREADS_PATH);
|
|
|
|
Object *child;
|
|
|
|
|
|
|
|
child = object_property_get_link(container, id, NULL);
|
|
|
|
if (!child) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return (IOThread *)object_dynamic_cast(child, TYPE_IOTHREAD);
|
|
|
|
}
|
|
|
|
|
|
|
|
char *iothread_get_id(IOThread *iothread)
|
|
|
|
{
|
|
|
|
return object_get_canonical_path_component(OBJECT(iothread));
|
|
|
|
}
|
|
|
|
|
|
|
|
AioContext *iothread_get_aio_context(IOThread *iothread)
|
|
|
|
{
|
|
|
|
return iothread->ctx;
|
|
|
|
}
|
2014-02-27 10:48:42 +00:00
|
|
|
|
|
|
|
static int query_one_iothread(Object *object, void *opaque)
|
|
|
|
{
|
|
|
|
IOThreadInfoList ***prev = opaque;
|
|
|
|
IOThreadInfoList *elem;
|
|
|
|
IOThreadInfo *info;
|
|
|
|
IOThread *iothread;
|
|
|
|
|
|
|
|
iothread = (IOThread *)object_dynamic_cast(object, TYPE_IOTHREAD);
|
|
|
|
if (!iothread) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
info = g_new0(IOThreadInfo, 1);
|
|
|
|
info->id = iothread_get_id(iothread);
|
|
|
|
info->thread_id = iothread->thread_id;
|
|
|
|
|
|
|
|
elem = g_new0(IOThreadInfoList, 1);
|
|
|
|
elem->value = info;
|
|
|
|
elem->next = NULL;
|
|
|
|
|
|
|
|
**prev = elem;
|
|
|
|
*prev = &elem->next;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
IOThreadInfoList *qmp_query_iothreads(Error **errp)
|
|
|
|
{
|
|
|
|
IOThreadInfoList *head = NULL;
|
|
|
|
IOThreadInfoList **prev = &head;
|
|
|
|
Object *container = container_get(object_get_root(), IOTHREADS_PATH);
|
|
|
|
|
|
|
|
object_child_foreach(container, query_one_iothread, &prev);
|
|
|
|
return head;
|
|
|
|
}
|