From: Eric Blake Date: Thu, 13 Nov 2025 01:11:30 +0000 (-0600) Subject: qio: Protect NetListener callback with mutex X-Git-Tag: v10.2.0-rc1~9^2~10 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=9d86181874ab7b0e95ae988f6f80715943c618c6;p=thirdparty%2Fqemu.git qio: Protect NetListener callback with mutex Without a mutex, NetListener can run into this data race between a thread changing the async callback callback function to use when a client connects, and the thread servicing polling of the listening sockets: Thread 1: qio_net_listener_set_client_func(lstnr, f1, ...); => foreach sock: socket => object_ref(lstnr) => sock_src = qio_channel_socket_add_watch_source(sock, ...., lstnr, object_unref); Thread 2: poll() => event POLLIN on socket => ref(GSourceCallback) => if (lstnr->io_func) // while lstnr->io_func is f1 ...interrupt.. Thread 1: qio_net_listener_set_client_func(lstnr, f2, ...); => foreach sock: socket => g_source_unref(sock_src) => foreach sock: socket => object_ref(lstnr) => sock_src = qio_channel_socket_add_watch_source(sock, ...., lstnr, object_unref); Thread 2: => call lstnr->io_func(lstnr->io_data) // now sees f2 => return dispatch(sock) => unref(GSourceCallback) => destroy-notify => object_unref Found by inspection; I did not spend the time trying to add sleeps or execute under gdb to try and actually trigger the race in practice. This is a SEGFAULT waiting to happen if f2 can become NULL because thread 1 deregisters the user's callback while thread 2 is trying to service the callback. Other messes are also theoretically possible, such as running callback f1 with an opaque pointer that should only be passed to f2 (if the client code were to use more than just a binary choice between a single async function or NULL). Mitigating factor: if the code that modifies the QIONetListener can only be reached by the same thread that is executing the polling and async callbacks, then we are not in a two-thread race documented above (even though poll can see two clients trying to connect in the same window of time, any changes made to the listener by the first async callback will be completed before the thread moves on to the second client). However, QEMU is complex enough that this is hard to generically analyze. If QMP commands (like nbd-server-stop) are run in the main loop and the listener uses the main loop, things should be okay. But when a client uses an alternative GMainContext, or if servicing a QMP command hands off to a coroutine to avoid blocking, I am unable to state with certainty whether a given net listener can be modified by a thread different from the polling thread running callbacks. At any rate, it is worth having the API be robust. To ensure that modifying a NetListener can be safely done from any thread, add a mutex that guarantees atomicity to all members of a listener object related to callbacks. This problem has been present since QIONetListener was introduced. Note that this does NOT prevent the case of a second round of the user's old async callback being invoked with the old opaque data, even when the user has already tried to change the async callback during the first async callback; it is only about ensuring that there is no sharding (the eventual io_func(io_data) call that does get made will correspond to a particular combination that the user had requested at some point in time, and not be sharded to a combination that never existed in practice). In other words, this patch maintains the status quo that a user's async callback function already needs to be robust to parallel clients landing in the same window of poll servicing, even when only one client is desired, if that particular listener can be amended in a thread other than the one doing the polling. CC: qemu-stable@nongnu.org Fixes: 53047392 ("io: introduce a network socket listener API", v2.12.0) Signed-off-by: Eric Blake Message-ID: <20251113011625.878876-20-eblake@redhat.com> Reviewed-by: Daniel P. Berrangé [eblake: minor commit message wording improvements] Signed-off-by: Eric Blake --- diff --git a/include/io/net-listener.h b/include/io/net-listener.h index 42fbfab546..c2165dc166 100644 --- a/include/io/net-listener.h +++ b/include/io/net-listener.h @@ -54,6 +54,7 @@ struct QIONetListener { bool connected; + QemuMutex lock; /* Protects remaining fields */ QIONetListenerClientFunc io_func; gpointer io_data; GDestroyNotify io_notify; diff --git a/io/net-listener.c b/io/net-listener.c index 0f16b78fbb..f70acdfc5c 100644 --- a/io/net-listener.c +++ b/io/net-listener.c @@ -23,11 +23,16 @@ #include "io/dns-resolver.h" #include "qapi/error.h" #include "qemu/module.h" +#include "qemu/lockable.h" #include "trace.h" QIONetListener *qio_net_listener_new(void) { - return QIO_NET_LISTENER(object_new(TYPE_QIO_NET_LISTENER)); + QIONetListener *listener; + + listener = QIO_NET_LISTENER(object_new(TYPE_QIO_NET_LISTENER)); + qemu_mutex_init(&listener->lock); + return listener; } void qio_net_listener_set_name(QIONetListener *listener, @@ -44,6 +49,9 @@ static gboolean qio_net_listener_channel_func(QIOChannel *ioc, { QIONetListener *listener = QIO_NET_LISTENER(opaque); QIOChannelSocket *sioc; + QIONetListenerClientFunc io_func; + gpointer io_data; + GMainContext *context; sioc = qio_channel_socket_accept(QIO_CHANNEL_SOCKET(ioc), NULL); @@ -51,10 +59,15 @@ static gboolean qio_net_listener_channel_func(QIOChannel *ioc, return TRUE; } - trace_qio_net_listener_callback(listener, listener->io_func, - listener->context); - if (listener->io_func) { - listener->io_func(listener, sioc, listener->io_data); + WITH_QEMU_LOCK_GUARD(&listener->lock) { + io_func = listener->io_func; + io_data = listener->io_data; + context = listener->context; + } + + trace_qio_net_listener_callback(listener, io_func, context); + if (io_func) { + io_func(listener, sioc, io_data); } object_unref(OBJECT(sioc)); @@ -111,6 +124,9 @@ int qio_net_listener_open_sync(QIONetListener *listener, void qio_net_listener_add(QIONetListener *listener, QIOChannelSocket *sioc) { + QIONetListenerClientFunc io_func; + GMainContext *context; + if (listener->name) { qio_channel_set_name(QIO_CHANNEL(sioc), listener->name); } @@ -126,14 +142,18 @@ void qio_net_listener_add(QIONetListener *listener, object_ref(OBJECT(sioc)); listener->connected = true; - trace_qio_net_listener_watch(listener, listener->io_func, - listener->context, "add"); - if (listener->io_func != NULL) { + WITH_QEMU_LOCK_GUARD(&listener->lock) { + io_func = listener->io_func; + context = listener->context; + } + + trace_qio_net_listener_watch(listener, io_func, context, "add"); + if (io_func) { object_ref(OBJECT(listener)); listener->io_source[listener->nsioc] = qio_channel_add_watch_source( QIO_CHANNEL(listener->sioc[listener->nsioc]), G_IO_IN, qio_net_listener_channel_func, - listener, (GDestroyNotify)object_unref, listener->context); + listener, (GDestroyNotify)object_unref, context); } listener->nsioc++; @@ -148,6 +168,7 @@ void qio_net_listener_set_client_func_full(QIONetListener *listener, { size_t i; + QEMU_LOCK_GUARD(&listener->lock); trace_qio_net_listener_unwatch(listener, listener->io_func, listener->context, "set_client_func"); @@ -228,9 +249,15 @@ QIOChannelSocket *qio_net_listener_wait_client(QIONetListener *listener) .loop = loop }; size_t i; + QIONetListenerClientFunc io_func; + GMainContext *context; - trace_qio_net_listener_unwatch(listener, listener->io_func, - listener->context, "wait_client"); + WITH_QEMU_LOCK_GUARD(&listener->lock) { + io_func = listener->io_func; + context = listener->context; + } + + trace_qio_net_listener_unwatch(listener, io_func, context, "wait_client"); for (i = 0; i < listener->nsioc; i++) { if (listener->io_source[i]) { g_source_destroy(listener->io_source[i]); @@ -260,15 +287,14 @@ QIOChannelSocket *qio_net_listener_wait_client(QIONetListener *listener) g_main_loop_unref(loop); g_main_context_unref(ctxt); - trace_qio_net_listener_watch(listener, listener->io_func, - listener->context, "wait_client"); - if (listener->io_func != NULL) { + trace_qio_net_listener_watch(listener, io_func, context, "wait_client"); + if (io_func != NULL) { for (i = 0; i < listener->nsioc; i++) { object_ref(OBJECT(listener)); listener->io_source[i] = qio_channel_add_watch_source( QIO_CHANNEL(listener->sioc[i]), G_IO_IN, qio_net_listener_channel_func, - listener, (GDestroyNotify)object_unref, listener->context); + listener, (GDestroyNotify)object_unref, context); } } @@ -283,6 +309,7 @@ void qio_net_listener_disconnect(QIONetListener *listener) return; } + QEMU_LOCK_GUARD(&listener->lock); trace_qio_net_listener_unwatch(listener, listener->io_func, listener->context, "disconnect"); for (i = 0; i < listener->nsioc; i++) { @@ -318,6 +345,7 @@ static void qio_net_listener_finalize(Object *obj) g_free(listener->io_source); g_free(listener->sioc); g_free(listener->name); + qemu_mutex_destroy(&listener->lock); } static const TypeInfo qio_net_listener_info = {