qemu/iothread.c

375 lines
10 KiB
C
Raw Normal View History

/*
* Event loop thread
*
* Copyright Red Hat Inc., 2013
*
* Authors:
* Stefan Hajnoczi <stefanha@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
#include "qom/object.h"
#include "qom/object_interfaces.h"
#include "qemu/module.h"
#include "block/aio.h"
#include "block/block.h"
#include "sysemu/iothread.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-misc.h"
#include "qemu/error-report.h"
#include "qemu/rcu.h"
#include "qemu/main-loop.h"
typedef ObjectClass IOThreadClass;
#define IOTHREAD_GET_CLASS(obj) \
OBJECT_GET_CLASS(IOThreadClass, obj, TYPE_IOTHREAD)
#define IOTHREAD_CLASS(klass) \
OBJECT_CLASS_CHECK(IOThreadClass, klass, TYPE_IOTHREAD)
#ifdef CONFIG_POSIX
/* Benchmark results from 2016 on NVMe SSD drives show max polling times around
* 16-32 microseconds yield IOPS improvements for both iodepth=1 and iodepth=32
* workloads.
*/
#define IOTHREAD_POLL_MAX_NS_DEFAULT 32768ULL
#else
#define IOTHREAD_POLL_MAX_NS_DEFAULT 0ULL
#endif
static __thread IOThread *my_iothread;
AioContext *qemu_get_current_aio_context(void)
{
return my_iothread ? my_iothread->ctx : qemu_get_aio_context();
}
static void *iothread_run(void *opaque)
{
IOThread *iothread = opaque;
rcu_register_thread();
my_iothread = iothread;
iothread->thread_id = qemu_get_thread_id();
qemu_sem_post(&iothread->init_done_sem);
iothread: fix iothread_stop() race condition There is a small chance that iothread_stop() hangs as follows: Thread 3 (Thread 0x7f63eba5f700 (LWP 16105)): #0 0x00007f64012c09b6 in ppoll () at /lib64/libc.so.6 #1 0x000055959992eac9 in ppoll (__ss=0x0, __timeout=0x0, __nfds=<optimized out>, __fds=<optimized out>) at /usr/include/bits/poll2.h:77 #2 0x000055959992eac9 in qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=<optimized out>) at util/qemu-timer.c:322 #3 0x0000559599930711 in aio_poll (ctx=0x55959bdb83c0, blocking=blocking@entry=true) at util/aio-posix.c:629 #4 0x00005595996806fe in iothread_run (opaque=0x55959bd78400) at iothread.c:59 #5 0x00007f640159f609 in start_thread () at /lib64/libpthread.so.0 #6 0x00007f64012cce6f in clone () at /lib64/libc.so.6 Thread 1 (Thread 0x7f640b45b280 (LWP 16103)): #0 0x00007f64015a0b6d in pthread_join () at /lib64/libpthread.so.0 #1 0x00005595999332ef in qemu_thread_join (thread=<optimized out>) at util/qemu-thread-posix.c:547 #2 0x00005595996808ae in iothread_stop (iothread=<optimized out>) at iothread.c:91 #3 0x000055959968094d in iothread_stop_iter (object=<optimized out>, opaque=<optimized out>) at iothread.c:102 #4 0x0000559599857d97 in do_object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0, recurse=recurse@entry=false) at qom/object.c:852 #5 0x0000559599859477 in object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0) at qom/object.c:867 #6 0x0000559599680a6e in iothread_stop_all () at iothread.c:341 #7 0x000055959955b1d5 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4913 The relevant code from iothread_run() is: while (!atomic_read(&iothread->stopping)) { aio_poll(iothread->ctx, true); and iothread_stop(): iothread->stopping = true; aio_notify(iothread->ctx); ... qemu_thread_join(&iothread->thread); The following scenario can occur: 1. IOThread: while (!atomic_read(&iothread->stopping)) -> stopping=false 2. Main loop: iothread->stopping = true; aio_notify(iothread->ctx); 3. IOThread: aio_poll(iothread->ctx, true); -> hang The bug is explained by the AioContext->notify_me doc comments: "If this field is 0, everything (file descriptors, bottom halves, timers) will be re-evaluated before the next blocking poll(), thus the event_notifier_set call can be skipped." The problem is that "everything" does not include checking iothread->stopping. This means iothread_run() will block in aio_poll() if aio_notify() was called just before aio_poll(). This patch fixes the hang by replacing aio_notify() with aio_bh_schedule_oneshot(). This makes aio_poll() or g_main_loop_run() to return. Implementing this properly required a new bool running flag. The new flag prevents races that are tricky if we try to use iothread->stopping. Now iothread->stopping is purely for iothread_stop() and iothread->running is purely for the iothread_run() thread. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 20171207201320.19284-6-stefanha@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2017-12-07 20:13:19 +00:00
while (iothread->running) {
aio_poll(iothread->ctx, true);
iothread: fix iothread hang when stop too soon Lukas reported an hard to reproduce QMP iothread hang on s390 that QEMU might hang at pthread_join() of the QMP monitor iothread before quitting: Thread 1 #0 0x000003ffad10932c in pthread_join #1 0x0000000109e95750 in qemu_thread_join at /home/thuth/devel/qemu/util/qemu-thread-posix.c:570 #2 0x0000000109c95a1c in iothread_stop #3 0x0000000109bb0874 in monitor_cleanup #4 0x0000000109b55042 in main While the iothread is still in the main loop: Thread 4 #0 0x000003ffad0010e4 in ?? #1 0x000003ffad553958 in g_main_context_iterate.isra.19 #2 0x000003ffad553d90 in g_main_loop_run #3 0x0000000109c9585a in iothread_run at /home/thuth/devel/qemu/iothread.c:74 #4 0x0000000109e94752 in qemu_thread_start at /home/thuth/devel/qemu/util/qemu-thread-posix.c:502 #5 0x000003ffad10825a in start_thread #6 0x000003ffad00dcf2 in thread_start IMHO it's because there's a race between the main thread and iothread when stopping the thread in following sequence: main thread iothread =========== ============== aio_poll() iothread_get_g_main_context set iothread->worker_context iothread_stop schedule iothread_stop_bh execute iothread_stop_bh [1] set iothread->running=false (since main_loop==NULL so skip to quit main loop. Note: although main_loop is NULL but worker_context is not!) atomic_read(&iothread->worker_context) [2] create main_loop object g_main_loop_run() [3] pthread_join() [4] We can see that when execute iothread_stop_bh() at [1] it's possible that main_loop is still NULL because it's only created until the first check of the worker_context later at [2]. Then the iothread will hang in the main loop [3] and it'll starve the main thread too [4]. Here the simple solution should be that we check again the "running" variable before check against worker_context. CC: Thomas Huth <thuth@redhat.com> CC: Dr. David Alan Gilbert <dgilbert@redhat.com> CC: Stefan Hajnoczi <stefanha@redhat.com> CC: Lukáš Doktor <ldoktor@redhat.com> CC: Markus Armbruster <armbru@redhat.com> CC: Eric Blake <eblake@redhat.com> CC: Paolo Bonzini <pbonzini@redhat.com> Reported-by: Lukáš Doktor <ldoktor@redhat.com> Signed-off-by: Peter Xu <peterx@redhat.com> Tested-by: Thomas Huth <thuth@redhat.com> Message-id: 20190129051432.22023-1-peterx@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2019-01-29 05:14:32 +00:00
/*
* We must check the running state again in case it was
* changed in previous aio_poll()
*/
if (iothread->running && atomic_read(&iothread->run_gcontext)) {
g_main_context_push_thread_default(iothread->worker_context);
g_main_loop_run(iothread->main_loop);
g_main_context_pop_thread_default(iothread->worker_context);
}
}
rcu_unregister_thread();
return NULL;
}
iothread: fix iothread_stop() race condition There is a small chance that iothread_stop() hangs as follows: Thread 3 (Thread 0x7f63eba5f700 (LWP 16105)): #0 0x00007f64012c09b6 in ppoll () at /lib64/libc.so.6 #1 0x000055959992eac9 in ppoll (__ss=0x0, __timeout=0x0, __nfds=<optimized out>, __fds=<optimized out>) at /usr/include/bits/poll2.h:77 #2 0x000055959992eac9 in qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=<optimized out>) at util/qemu-timer.c:322 #3 0x0000559599930711 in aio_poll (ctx=0x55959bdb83c0, blocking=blocking@entry=true) at util/aio-posix.c:629 #4 0x00005595996806fe in iothread_run (opaque=0x55959bd78400) at iothread.c:59 #5 0x00007f640159f609 in start_thread () at /lib64/libpthread.so.0 #6 0x00007f64012cce6f in clone () at /lib64/libc.so.6 Thread 1 (Thread 0x7f640b45b280 (LWP 16103)): #0 0x00007f64015a0b6d in pthread_join () at /lib64/libpthread.so.0 #1 0x00005595999332ef in qemu_thread_join (thread=<optimized out>) at util/qemu-thread-posix.c:547 #2 0x00005595996808ae in iothread_stop (iothread=<optimized out>) at iothread.c:91 #3 0x000055959968094d in iothread_stop_iter (object=<optimized out>, opaque=<optimized out>) at iothread.c:102 #4 0x0000559599857d97 in do_object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0, recurse=recurse@entry=false) at qom/object.c:852 #5 0x0000559599859477 in object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0) at qom/object.c:867 #6 0x0000559599680a6e in iothread_stop_all () at iothread.c:341 #7 0x000055959955b1d5 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4913 The relevant code from iothread_run() is: while (!atomic_read(&iothread->stopping)) { aio_poll(iothread->ctx, true); and iothread_stop(): iothread->stopping = true; aio_notify(iothread->ctx); ... qemu_thread_join(&iothread->thread); The following scenario can occur: 1. IOThread: while (!atomic_read(&iothread->stopping)) -> stopping=false 2. Main loop: iothread->stopping = true; aio_notify(iothread->ctx); 3. IOThread: aio_poll(iothread->ctx, true); -> hang The bug is explained by the AioContext->notify_me doc comments: "If this field is 0, everything (file descriptors, bottom halves, timers) will be re-evaluated before the next blocking poll(), thus the event_notifier_set call can be skipped." The problem is that "everything" does not include checking iothread->stopping. This means iothread_run() will block in aio_poll() if aio_notify() was called just before aio_poll(). This patch fixes the hang by replacing aio_notify() with aio_bh_schedule_oneshot(). This makes aio_poll() or g_main_loop_run() to return. Implementing this properly required a new bool running flag. The new flag prevents races that are tricky if we try to use iothread->stopping. Now iothread->stopping is purely for iothread_stop() and iothread->running is purely for the iothread_run() thread. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 20171207201320.19284-6-stefanha@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2017-12-07 20:13:19 +00:00
/* Runs in iothread_run() thread */
static void iothread_stop_bh(void *opaque)
{
IOThread *iothread = opaque;
iothread->running = false; /* stop iothread_run() */
if (iothread->main_loop) {
g_main_loop_quit(iothread->main_loop);
}
}
void iothread_stop(IOThread *iothread)
{
if (!iothread->ctx || iothread->stopping) {
return;
}
iothread->stopping = true;
iothread: fix iothread_stop() race condition There is a small chance that iothread_stop() hangs as follows: Thread 3 (Thread 0x7f63eba5f700 (LWP 16105)): #0 0x00007f64012c09b6 in ppoll () at /lib64/libc.so.6 #1 0x000055959992eac9 in ppoll (__ss=0x0, __timeout=0x0, __nfds=<optimized out>, __fds=<optimized out>) at /usr/include/bits/poll2.h:77 #2 0x000055959992eac9 in qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=<optimized out>) at util/qemu-timer.c:322 #3 0x0000559599930711 in aio_poll (ctx=0x55959bdb83c0, blocking=blocking@entry=true) at util/aio-posix.c:629 #4 0x00005595996806fe in iothread_run (opaque=0x55959bd78400) at iothread.c:59 #5 0x00007f640159f609 in start_thread () at /lib64/libpthread.so.0 #6 0x00007f64012cce6f in clone () at /lib64/libc.so.6 Thread 1 (Thread 0x7f640b45b280 (LWP 16103)): #0 0x00007f64015a0b6d in pthread_join () at /lib64/libpthread.so.0 #1 0x00005595999332ef in qemu_thread_join (thread=<optimized out>) at util/qemu-thread-posix.c:547 #2 0x00005595996808ae in iothread_stop (iothread=<optimized out>) at iothread.c:91 #3 0x000055959968094d in iothread_stop_iter (object=<optimized out>, opaque=<optimized out>) at iothread.c:102 #4 0x0000559599857d97 in do_object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0, recurse=recurse@entry=false) at qom/object.c:852 #5 0x0000559599859477 in object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0) at qom/object.c:867 #6 0x0000559599680a6e in iothread_stop_all () at iothread.c:341 #7 0x000055959955b1d5 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4913 The relevant code from iothread_run() is: while (!atomic_read(&iothread->stopping)) { aio_poll(iothread->ctx, true); and iothread_stop(): iothread->stopping = true; aio_notify(iothread->ctx); ... qemu_thread_join(&iothread->thread); The following scenario can occur: 1. IOThread: while (!atomic_read(&iothread->stopping)) -> stopping=false 2. Main loop: iothread->stopping = true; aio_notify(iothread->ctx); 3. IOThread: aio_poll(iothread->ctx, true); -> hang The bug is explained by the AioContext->notify_me doc comments: "If this field is 0, everything (file descriptors, bottom halves, timers) will be re-evaluated before the next blocking poll(), thus the event_notifier_set call can be skipped." The problem is that "everything" does not include checking iothread->stopping. This means iothread_run() will block in aio_poll() if aio_notify() was called just before aio_poll(). This patch fixes the hang by replacing aio_notify() with aio_bh_schedule_oneshot(). This makes aio_poll() or g_main_loop_run() to return. Implementing this properly required a new bool running flag. The new flag prevents races that are tricky if we try to use iothread->stopping. Now iothread->stopping is purely for iothread_stop() and iothread->running is purely for the iothread_run() thread. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 20171207201320.19284-6-stefanha@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2017-12-07 20:13:19 +00:00
aio_bh_schedule_oneshot(iothread->ctx, iothread_stop_bh, iothread);
qemu_thread_join(&iothread->thread);
}
static void iothread_instance_init(Object *obj)
{
IOThread *iothread = IOTHREAD(obj);
iothread->poll_max_ns = IOTHREAD_POLL_MAX_NS_DEFAULT;
iothread->thread_id = -1;
qemu_sem_init(&iothread->init_done_sem, 0);
/* By default, we don't run gcontext */
atomic_set(&iothread->run_gcontext, 0);
}
static void iothread_instance_finalize(Object *obj)
{
IOThread *iothread = IOTHREAD(obj);
iothread_stop(iothread);
iothread: workaround glib bug which hangs qmp-test Free the AIO context earlier than the GMainContext (if we have) to workaround a glib2 bug that GSource context pointer is not cleared even if the context has already been destroyed (while it should). The patch itself only changed the order to destroy the objects, no functional change at all. Without this workaround, we can encounter qmp-test hang with oob (and possibly any other use case when iothread is used with GMainContexts): #0 0x00007f35ffe45334 in __lll_lock_wait () from /lib64/libpthread.so.0 #1 0x00007f35ffe405d8 in _L_lock_854 () from /lib64/libpthread.so.0 #2 0x00007f35ffe404a7 in pthread_mutex_lock () from /lib64/libpthread.so.0 #3 0x00007f35fc5b9c9d in g_source_unref_internal (source=0x24f0600, context=0x7f35f0000960, have_lock=0) at gmain.c:1685 #4 0x0000000000aa6672 in aio_context_unref (ctx=0x24f0600) at /root/qemu/util/async.c:497 #5 0x000000000065851c in iothread_instance_finalize (obj=0x24f0380) at /root/qemu/iothread.c:129 #6 0x0000000000962d79 in object_deinit (obj=0x24f0380, type=0x242e960) at /root/qemu/qom/object.c:462 #7 0x0000000000962e0d in object_finalize (data=0x24f0380) at /root/qemu/qom/object.c:476 #8 0x0000000000964146 in object_unref (obj=0x24f0380) at /root/qemu/qom/object.c:924 #9 0x0000000000965880 in object_finalize_child_property (obj=0x24ec640, name=0x24efca0 "mon_iothread", opaque=0x24f0380) at /root/qemu/qom/object.c:1436 #10 0x0000000000962c33 in object_property_del_child (obj=0x24ec640, child=0x24f0380, errp=0x0) at /root/qemu/qom/object.c:436 #11 0x0000000000962d26 in object_unparent (obj=0x24f0380) at /root/qemu/qom/object.c:455 #12 0x0000000000658f00 in iothread_destroy (iothread=0x24f0380) at /root/qemu/iothread.c:365 #13 0x00000000004c67a8 in monitor_cleanup () at /root/qemu/monitor.c:4663 #14 0x0000000000669e27 in main (argc=16, argv=0x7ffc8b1ae2f8, envp=0x7ffc8b1ae380) at /root/qemu/vl.c:4749 The glib2 bug is fixed in commit 26056558b ("gmain: allow g_source_get_context() on destroyed sources", 2012-07-30), so the first good version is glib2 2.33.10. But we still support building with glib as old as 2.28, so we need the workaround. Let's make sure we destroy the GSources first before its owner context until we drop support for glib older than 2.33.10. Signed-off-by: Peter Xu <peterx@redhat.com> Message-Id: <20180409083956.1780-1-peterx@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2018-04-09 08:39:56 +00:00
/*
* Before glib2 2.33.10, there is a glib2 bug that GSource context
* pointer may not be cleared even if the context has already been
* destroyed (while it should). Here let's free the AIO context
* earlier to bypass that glib bug.
*
* We can remove this comment after the minimum supported glib2
* version boosts to 2.33.10. Before that, let's free the
* GSources first before destroying any GMainContext.
*/
if (iothread->ctx) {
aio_context_unref(iothread->ctx);
iothread->ctx = NULL;
}
if (iothread->worker_context) {
g_main_context_unref(iothread->worker_context);
iothread->worker_context = NULL;
g_main_loop_unref(iothread->main_loop);
iothread->main_loop = NULL;
}
qemu_sem_destroy(&iothread->init_done_sem);
}
static void iothread_init_gcontext(IOThread *iothread)
{
GSource *source;
iothread->worker_context = g_main_context_new();
source = aio_get_g_source(iothread_get_aio_context(iothread));
g_source_attach(source, iothread->worker_context);
g_source_unref(source);
iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE);
}
static void iothread_complete(UserCreatable *obj, Error **errp)
{
Error *local_error = NULL;
IOThread *iothread = IOTHREAD(obj);
char *name, *thread_name;
iothread->stopping = false;
iothread: fix iothread_stop() race condition There is a small chance that iothread_stop() hangs as follows: Thread 3 (Thread 0x7f63eba5f700 (LWP 16105)): #0 0x00007f64012c09b6 in ppoll () at /lib64/libc.so.6 #1 0x000055959992eac9 in ppoll (__ss=0x0, __timeout=0x0, __nfds=<optimized out>, __fds=<optimized out>) at /usr/include/bits/poll2.h:77 #2 0x000055959992eac9 in qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=<optimized out>) at util/qemu-timer.c:322 #3 0x0000559599930711 in aio_poll (ctx=0x55959bdb83c0, blocking=blocking@entry=true) at util/aio-posix.c:629 #4 0x00005595996806fe in iothread_run (opaque=0x55959bd78400) at iothread.c:59 #5 0x00007f640159f609 in start_thread () at /lib64/libpthread.so.0 #6 0x00007f64012cce6f in clone () at /lib64/libc.so.6 Thread 1 (Thread 0x7f640b45b280 (LWP 16103)): #0 0x00007f64015a0b6d in pthread_join () at /lib64/libpthread.so.0 #1 0x00005595999332ef in qemu_thread_join (thread=<optimized out>) at util/qemu-thread-posix.c:547 #2 0x00005595996808ae in iothread_stop (iothread=<optimized out>) at iothread.c:91 #3 0x000055959968094d in iothread_stop_iter (object=<optimized out>, opaque=<optimized out>) at iothread.c:102 #4 0x0000559599857d97 in do_object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0, recurse=recurse@entry=false) at qom/object.c:852 #5 0x0000559599859477 in object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0) at qom/object.c:867 #6 0x0000559599680a6e in iothread_stop_all () at iothread.c:341 #7 0x000055959955b1d5 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4913 The relevant code from iothread_run() is: while (!atomic_read(&iothread->stopping)) { aio_poll(iothread->ctx, true); and iothread_stop(): iothread->stopping = true; aio_notify(iothread->ctx); ... qemu_thread_join(&iothread->thread); The following scenario can occur: 1. IOThread: while (!atomic_read(&iothread->stopping)) -> stopping=false 2. Main loop: iothread->stopping = true; aio_notify(iothread->ctx); 3. IOThread: aio_poll(iothread->ctx, true); -> hang The bug is explained by the AioContext->notify_me doc comments: "If this field is 0, everything (file descriptors, bottom halves, timers) will be re-evaluated before the next blocking poll(), thus the event_notifier_set call can be skipped." The problem is that "everything" does not include checking iothread->stopping. This means iothread_run() will block in aio_poll() if aio_notify() was called just before aio_poll(). This patch fixes the hang by replacing aio_notify() with aio_bh_schedule_oneshot(). This makes aio_poll() or g_main_loop_run() to return. Implementing this properly required a new bool running flag. The new flag prevents races that are tricky if we try to use iothread->stopping. Now iothread->stopping is purely for iothread_stop() and iothread->running is purely for the iothread_run() thread. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 20171207201320.19284-6-stefanha@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2017-12-07 20:13:19 +00:00
iothread->running = true;
iothread->ctx = aio_context_new(&local_error);
if (!iothread->ctx) {
error_propagate(errp, local_error);
return;
}
/*
* Init one GMainContext for the iothread unconditionally, even if
* it's not used
*/
iothread_init_gcontext(iothread);
aio_context_set_poll_params(iothread->ctx,
iothread->poll_max_ns,
iothread->poll_grow,
iothread->poll_shrink,
&local_error);
if (local_error) {
error_propagate(errp, local_error);
aio_context_unref(iothread->ctx);
iothread->ctx = NULL;
return;
}
/* This assumes we are called from a thread with useful CPU affinity for us
* to inherit.
*/
name = object_get_canonical_path_component(OBJECT(obj));
thread_name = g_strdup_printf("IO %s", name);
qemu_thread_create(&iothread->thread, thread_name, iothread_run,
iothread, QEMU_THREAD_JOINABLE);
g_free(thread_name);
g_free(name);
/* Wait for initialization to complete */
while (iothread->thread_id == -1) {
qemu_sem_wait(&iothread->init_done_sem);
}
}
typedef struct {
const char *name;
ptrdiff_t offset; /* field's byte offset in IOThread struct */
} PollParamInfo;
static PollParamInfo poll_max_ns_info = {
"poll-max-ns", offsetof(IOThread, poll_max_ns),
};
static PollParamInfo poll_grow_info = {
"poll-grow", offsetof(IOThread, poll_grow),
};
static PollParamInfo poll_shrink_info = {
"poll-shrink", offsetof(IOThread, poll_shrink),
};
static void iothread_get_poll_param(Object *obj, Visitor *v,
const char *name, void *opaque, Error **errp)
{
IOThread *iothread = IOTHREAD(obj);
PollParamInfo *info = opaque;
int64_t *field = (void *)iothread + info->offset;
visit_type_int64(v, name, field, errp);
}
static void iothread_set_poll_param(Object *obj, Visitor *v,
const char *name, void *opaque, Error **errp)
{
IOThread *iothread = IOTHREAD(obj);
PollParamInfo *info = opaque;
int64_t *field = (void *)iothread + info->offset;
Error *local_err = NULL;
int64_t value;
visit_type_int64(v, name, &value, &local_err);
if (local_err) {
goto out;
}
if (value < 0) {
error_setg(&local_err, "%s value must be in range [0, %"PRId64"]",
info->name, INT64_MAX);
goto out;
}
*field = value;
if (iothread->ctx) {
aio_context_set_poll_params(iothread->ctx,
iothread->poll_max_ns,
iothread->poll_grow,
iothread->poll_shrink,
&local_err);
}
out:
error_propagate(errp, local_err);
}
static void iothread_class_init(ObjectClass *klass, void *class_data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
ucc->complete = iothread_complete;
object_class_property_add(klass, "poll-max-ns", "int",
iothread_get_poll_param,
iothread_set_poll_param,
NULL, &poll_max_ns_info, &error_abort);
object_class_property_add(klass, "poll-grow", "int",
iothread_get_poll_param,
iothread_set_poll_param,
NULL, &poll_grow_info, &error_abort);
object_class_property_add(klass, "poll-shrink", "int",
iothread_get_poll_param,
iothread_set_poll_param,
NULL, &poll_shrink_info, &error_abort);
}
static const TypeInfo iothread_info = {
.name = TYPE_IOTHREAD,
.parent = TYPE_OBJECT,
.class_init = iothread_class_init,
.instance_size = sizeof(IOThread),
.instance_init = iothread_instance_init,
.instance_finalize = iothread_instance_finalize,
.interfaces = (InterfaceInfo[]) {
{TYPE_USER_CREATABLE},
{}
},
};
static void iothread_register_types(void)
{
type_register_static(&iothread_info);
}
type_init(iothread_register_types)
char *iothread_get_id(IOThread *iothread)
{
return object_get_canonical_path_component(OBJECT(iothread));
}
AioContext *iothread_get_aio_context(IOThread *iothread)
{
return iothread->ctx;
}
static int query_one_iothread(Object *object, void *opaque)
{
IOThreadInfoList ***prev = opaque;
IOThreadInfoList *elem;
IOThreadInfo *info;
IOThread *iothread;
iothread = (IOThread *)object_dynamic_cast(object, TYPE_IOTHREAD);
if (!iothread) {
return 0;
}
info = g_new0(IOThreadInfo, 1);
info->id = iothread_get_id(iothread);
info->thread_id = iothread->thread_id;
info->poll_max_ns = iothread->poll_max_ns;
info->poll_grow = iothread->poll_grow;
info->poll_shrink = iothread->poll_shrink;
elem = g_new0(IOThreadInfoList, 1);
elem->value = info;
elem->next = NULL;
**prev = elem;
*prev = &elem->next;
return 0;
}
IOThreadInfoList *qmp_query_iothreads(Error **errp)
{
IOThreadInfoList *head = NULL;
IOThreadInfoList **prev = &head;
Object *container = object_get_objects_root();
object_child_foreach(container, query_one_iothread, &prev);
return head;
}
GMainContext *iothread_get_g_main_context(IOThread *iothread)
{
atomic_set(&iothread->run_gcontext, 1);
aio_notify(iothread->ctx);
return iothread->worker_context;
}
IOThread *iothread_create(const char *id, Error **errp)
{
Object *obj;
obj = object_new_with_props(TYPE_IOTHREAD,
object_get_internal_root(),
id, errp, NULL);
return IOTHREAD(obj);
}
void iothread_destroy(IOThread *iothread)
{
object_unparent(OBJECT(iothread));
}
/* Lookup IOThread by its id. Only finds user-created objects, not internal
* iothread_create() objects. */
IOThread *iothread_by_id(const char *id)
{
return IOTHREAD(object_resolve_path_type(id, TYPE_IOTHREAD, NULL));
}