]> git.ipfire.org Git - thirdparty/qemu.git/blame - util/aio-win32.c
Update version for 7.2.12 release
[thirdparty/qemu.git] / util / aio-win32.c
CommitLineData
a76bab49
AL
1/*
2 * QEMU aio implementation
3 *
f42b2207
PB
4 * Copyright IBM Corp., 2008
5 * Copyright Red Hat Inc., 2012
a76bab49
AL
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
f42b2207 9 * Paolo Bonzini <pbonzini@redhat.com>
a76bab49
AL
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
13 *
6b620ca3
PB
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
a76bab49
AL
16 */
17
d38ea87a 18#include "qemu/osdep.h"
737e150e 19#include "block/block.h"
eada6d92 20#include "qemu/main-loop.h"
1de7afc9
PB
21#include "qemu/queue.h"
22#include "qemu/sockets.h"
4a1cba38 23#include "qapi/error.h"
b92d9a91 24#include "qemu/rcu_queue.h"
a76bab49 25
f42b2207
PB
26struct AioHandler {
27 EventNotifier *e;
b493317d
PB
28 IOHandler *io_read;
29 IOHandler *io_write;
f42b2207 30 EventNotifierHandler *io_notify;
cd9ba1eb 31 GPollFD pfd;
a76bab49 32 int deleted;
b493317d 33 void *opaque;
dca21ef2 34 bool is_external;
72cf2d4f 35 QLIST_ENTRY(AioHandler) node;
a76bab49
AL
36};
37
fef16601
RN
38static void aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
39{
da0652c0
YL
40 /*
41 * If the GSource is in the process of being destroyed then
42 * g_source_remove_poll() causes an assertion failure. Skip
43 * removal in that case, because glib cleans up its state during
44 * destruction anyway.
45 */
46 if (!g_source_is_destroyed(&ctx->source)) {
47 g_source_remove_poll(&ctx->source, &node->pfd);
48 }
49
fef16601
RN
50 /* If aio_poll is in progress, just mark the node as deleted */
51 if (qemu_lockcnt_count(&ctx->list_lock)) {
52 node->deleted = 1;
53 node->pfd.revents = 0;
54 } else {
55 /* Otherwise, delete it for real. We can't just mark it as
56 * deleted because deleted nodes are only cleaned up after
57 * releasing the list_lock.
58 */
59 QLIST_REMOVE(node, node);
60 g_free(node);
61 }
62}
63
b493317d
PB
64void aio_set_fd_handler(AioContext *ctx,
65 int fd,
dca21ef2 66 bool is_external,
b493317d
PB
67 IOHandler *io_read,
68 IOHandler *io_write,
4a1cba38 69 AioPollFn *io_poll,
826cc324 70 IOHandler *io_poll_ready,
b493317d
PB
71 void *opaque)
72{
73 /* fd is a SOCKET in our case */
fef16601
RN
74 AioHandler *old_node;
75 AioHandler *node = NULL;
b493317d 76
b92d9a91 77 qemu_lockcnt_lock(&ctx->list_lock);
fef16601
RN
78 QLIST_FOREACH(old_node, &ctx->aio_handlers, node) {
79 if (old_node->pfd.fd == fd && !old_node->deleted) {
b493317d
PB
80 break;
81 }
82 }
83
fef16601 84 if (io_read || io_write) {
b493317d 85 HANDLE event;
55d41b16 86 long bitmask = 0;
b493317d 87
fef16601
RN
88 /* Alloc and insert if it's not already there */
89 node = g_new0(AioHandler, 1);
90 node->pfd.fd = fd;
b493317d
PB
91
92 node->pfd.events = 0;
93 if (node->io_read) {
94 node->pfd.events |= G_IO_IN;
95 }
96 if (node->io_write) {
97 node->pfd.events |= G_IO_OUT;
98 }
99
100 node->e = &ctx->notifier;
101
102 /* Update handler with latest information */
103 node->opaque = opaque;
104 node->io_read = io_read;
105 node->io_write = io_write;
dca21ef2 106 node->is_external = is_external;
b493317d 107
55d41b16
AF
108 if (io_read) {
109 bitmask |= FD_READ | FD_ACCEPT | FD_CLOSE;
110 }
111
112 if (io_write) {
113 bitmask |= FD_WRITE | FD_CONNECT;
114 }
115
fef16601 116 QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
b493317d 117 event = event_notifier_get_handle(&ctx->notifier);
55d41b16 118 WSAEventSelect(node->pfd.fd, event, bitmask);
b493317d 119 }
fef16601
RN
120 if (old_node) {
121 aio_remove_fd_handler(ctx, old_node);
122 }
b493317d 123
b92d9a91 124 qemu_lockcnt_unlock(&ctx->list_lock);
b493317d
PB
125 aio_notify(ctx);
126}
127
684e508c
SH
128void aio_set_fd_poll(AioContext *ctx, int fd,
129 IOHandler *io_poll_begin,
130 IOHandler *io_poll_end)
131{
132 /* Not implemented */
133}
134
f42b2207
PB
135void aio_set_event_notifier(AioContext *ctx,
136 EventNotifier *e,
dca21ef2 137 bool is_external,
4a1cba38 138 EventNotifierHandler *io_notify,
826cc324
SH
139 AioPollFn *io_poll,
140 EventNotifierHandler *io_poll_ready)
a76bab49
AL
141{
142 AioHandler *node;
143
b92d9a91 144 qemu_lockcnt_lock(&ctx->list_lock);
a915f4bc 145 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
f42b2207
PB
146 if (node->e == e && !node->deleted) {
147 break;
148 }
a76bab49
AL
149 }
150
a76bab49 151 /* Are we deleting the fd handler? */
f42b2207 152 if (!io_notify) {
a76bab49 153 if (node) {
fef16601 154 aio_remove_fd_handler(ctx, node);
a76bab49
AL
155 }
156 } else {
157 if (node == NULL) {
158 /* Alloc and insert if it's not already there */
3ba235a0 159 node = g_new0(AioHandler, 1);
f42b2207
PB
160 node->e = e;
161 node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
162 node->pfd.events = G_IO_IN;
dca21ef2 163 node->is_external = is_external;
b92d9a91 164 QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
e3713e00
PB
165
166 g_source_add_poll(&ctx->source, &node->pfd);
a76bab49
AL
167 }
168 /* Update handler with latest information */
f42b2207 169 node->io_notify = io_notify;
a76bab49 170 }
7ed2b24c 171
b92d9a91 172 qemu_lockcnt_unlock(&ctx->list_lock);
7ed2b24c 173 aio_notify(ctx);
9958c351
PB
174}
175
684e508c
SH
176void aio_set_event_notifier_poll(AioContext *ctx,
177 EventNotifier *notifier,
178 EventNotifierHandler *io_poll_begin,
179 EventNotifierHandler *io_poll_end)
180{
181 /* Not implemented */
182}
183
a3462c65
PB
184bool aio_prepare(AioContext *ctx)
185{
b493317d
PB
186 static struct timeval tv0;
187 AioHandler *node;
188 bool have_select_revents = false;
189 fd_set rfds, wfds;
190
b92d9a91
PB
191 /*
192 * We have to walk very carefully in case aio_set_fd_handler is
193 * called while we're walking.
194 */
195 qemu_lockcnt_inc(&ctx->list_lock);
196
b493317d
PB
197 /* fill fd sets */
198 FD_ZERO(&rfds);
199 FD_ZERO(&wfds);
b92d9a91 200 QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
b493317d
PB
201 if (node->io_read) {
202 FD_SET ((SOCKET)node->pfd.fd, &rfds);
203 }
204 if (node->io_write) {
205 FD_SET ((SOCKET)node->pfd.fd, &wfds);
206 }
207 }
208
209 if (select(0, &rfds, &wfds, NULL, &tv0) > 0) {
b92d9a91 210 QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
b493317d
PB
211 node->pfd.revents = 0;
212 if (FD_ISSET(node->pfd.fd, &rfds)) {
213 node->pfd.revents |= G_IO_IN;
214 have_select_revents = true;
215 }
216
217 if (FD_ISSET(node->pfd.fd, &wfds)) {
218 node->pfd.revents |= G_IO_OUT;
219 have_select_revents = true;
220 }
221 }
222 }
223
b92d9a91 224 qemu_lockcnt_dec(&ctx->list_lock);
b493317d 225 return have_select_revents;
a3462c65
PB
226}
227
cd9ba1eb
PB
228bool aio_pending(AioContext *ctx)
229{
230 AioHandler *node;
b92d9a91 231 bool result = false;
cd9ba1eb 232
b92d9a91
PB
233 /*
234 * We have to walk very carefully in case aio_set_fd_handler is
235 * called while we're walking.
236 */
237 qemu_lockcnt_inc(&ctx->list_lock);
238 QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
f42b2207 239 if (node->pfd.revents && node->io_notify) {
b92d9a91
PB
240 result = true;
241 break;
cd9ba1eb 242 }
b493317d
PB
243
244 if ((node->pfd.revents & G_IO_IN) && node->io_read) {
b92d9a91
PB
245 result = true;
246 break;
b493317d
PB
247 }
248 if ((node->pfd.revents & G_IO_OUT) && node->io_write) {
b92d9a91
PB
249 result = true;
250 break;
b493317d 251 }
cd9ba1eb
PB
252 }
253
b92d9a91
PB
254 qemu_lockcnt_dec(&ctx->list_lock);
255 return result;
cd9ba1eb
PB
256}
257
a398dea3 258static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
a76bab49 259{
b92d9a91 260 AioHandler *node;
a398dea3 261 bool progress = false;
b92d9a91 262 AioHandler *tmp;
7c0628b2 263
cd9ba1eb 264 /*
87f68d31 265 * We have to walk very carefully in case aio_set_fd_handler is
cd9ba1eb
PB
266 * called while we're walking.
267 */
b92d9a91 268 QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
b493317d 269 int revents = node->pfd.revents;
cd9ba1eb 270
a398dea3 271 if (!node->deleted &&
b493317d 272 (revents || event_notifier_get_handle(node->e) == event) &&
a398dea3 273 node->io_notify) {
f42b2207
PB
274 node->pfd.revents = 0;
275 node->io_notify(node->e);
164a101f
SH
276
277 /* aio_notify() does not count as progress */
8b2d42d2 278 if (node->e != &ctx->notifier) {
164a101f
SH
279 progress = true;
280 }
cd9ba1eb
PB
281 }
282
b493317d
PB
283 if (!node->deleted &&
284 (node->io_read || node->io_write)) {
285 node->pfd.revents = 0;
286 if ((revents & G_IO_IN) && node->io_read) {
287 node->io_read(node->opaque);
288 progress = true;
289 }
290 if ((revents & G_IO_OUT) && node->io_write) {
291 node->io_write(node->opaque);
292 progress = true;
293 }
294
295 /* if the next select() will return an event, we have progressed */
296 if (event == event_notifier_get_handle(&ctx->notifier)) {
297 WSANETWORKEVENTS ev;
298 WSAEnumNetworkEvents(node->pfd.fd, event, &ev);
299 if (ev.lNetworkEvents) {
300 progress = true;
301 }
302 }
303 }
304
abf90d39 305 if (node->deleted) {
b92d9a91 306 if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
abf90d39
PB
307 QLIST_REMOVE(node, node);
308 g_free(node);
b92d9a91 309 qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
abf90d39 310 }
cd9ba1eb
PB
311 }
312 }
313
a398dea3
PB
314 return progress;
315}
316
a153bf52 317void aio_dispatch(AioContext *ctx)
a398dea3 318{
bd451435 319 qemu_lockcnt_inc(&ctx->list_lock);
a153bf52
PB
320 aio_bh_poll(ctx);
321 aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
bd451435 322 qemu_lockcnt_dec(&ctx->list_lock);
a153bf52 323 timerlistgroup_run_timers(&ctx->tlg);
a398dea3
PB
324}
325
326bool aio_poll(AioContext *ctx, bool blocking)
327{
328 AioHandler *node;
e0d034bb 329 HANDLE events[MAXIMUM_WAIT_OBJECTS];
eabc9779 330 bool progress, have_select_revents, first;
e0d034bb 331 unsigned count;
a398dea3
PB
332 int timeout;
333
5710a3e0
PB
334 /*
335 * There cannot be two concurrent aio_poll calls for the same AioContext (or
336 * an aio_poll concurrent with a GSource prepare/check/dispatch callback).
337 * We rely on this below to avoid slow locked accesses to ctx->notify_me.
eada6d92
VR
338 *
339 * aio_poll() may only be called in the AioContext's thread. iohandler_ctx
340 * is special in that it runs in the main thread, but that thread's context
341 * is qemu_aio_context.
5710a3e0 342 */
eada6d92
VR
343 assert(in_aio_context_home_thread(ctx == iohandler_get_aio_context() ?
344 qemu_get_aio_context() : ctx));
a398dea3
PB
345 progress = false;
346
0a9dd166
PB
347 /* aio_notify can avoid the expensive event_notifier_set if
348 * everything (file descriptors, bottom halves, timers) will
349 * be re-evaluated before the next blocking poll(). This is
350 * already true when aio_poll is called with blocking == false;
eabc9779
PB
351 * if blocking == true, it is only true after poll() returns,
352 * so disable the optimization now.
0a9dd166 353 */
eabc9779 354 if (blocking) {
d73415a3 355 qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) + 2);
5710a3e0
PB
356 /*
357 * Write ctx->notify_me before computing the timeout
358 * (reading bottom half flags, etc.). Pairs with
359 * smp_mb in aio_notify().
360 */
361 smp_mb();
eabc9779 362 }
0a9dd166 363
b92d9a91 364 qemu_lockcnt_inc(&ctx->list_lock);
6493c975
PB
365 have_select_revents = aio_prepare(ctx);
366
9eb0bfca 367 /* fill fd sets */
f42b2207 368 count = 0;
b92d9a91 369 QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
c1e1e5fa
FZ
370 if (!node->deleted && node->io_notify
371 && aio_node_check(ctx, node->is_external)) {
e0d034bb 372 assert(count < MAXIMUM_WAIT_OBJECTS);
f42b2207 373 events[count++] = event_notifier_get_handle(node->e);
9eb0bfca
PB
374 }
375 }
a76bab49 376
3672fa50 377 first = true;
a76bab49 378
6493c975
PB
379 /* ctx->notifier is always registered. */
380 assert(count > 0);
381
382 /* Multiple iterations, all of them non-blocking except the first,
383 * may be necessary to process all pending events. After the first
384 * WaitForMultipleObjects call ctx->notify_me will be decremented.
385 */
386 do {
b493317d 387 HANDLE event;
438e1f47
AB
388 int ret;
389
6493c975 390 timeout = blocking && !have_select_revents
845ca10d 391 ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
438e1f47 392 ret = WaitForMultipleObjects(count, events, FALSE, timeout);
eabc9779
PB
393 if (blocking) {
394 assert(first);
d73415a3
SH
395 qatomic_store_release(&ctx->notify_me,
396 qatomic_read(&ctx->notify_me) - 2);
b37548fc 397 aio_notify_accept(ctx);
eabc9779 398 }
f42b2207 399
21a03d17 400 if (first) {
21a03d17
PB
401 progress |= aio_bh_poll(ctx);
402 first = false;
3672fa50 403 }
3672fa50 404
f42b2207 405 /* if we have any signaled events, dispatch event */
b493317d
PB
406 event = NULL;
407 if ((DWORD) (ret - WAIT_OBJECT_0) < count) {
408 event = events[ret - WAIT_OBJECT_0];
a90d411e 409 events[ret - WAIT_OBJECT_0] = events[--count];
b493317d 410 } else if (!have_select_revents) {
f42b2207
PB
411 break;
412 }
413
b493317d 414 have_select_revents = false;
f42b2207 415 blocking = false;
9eb0bfca 416
b493317d 417 progress |= aio_dispatch_handlers(ctx, event);
6493c975 418 } while (count > 0);
bcdc1857 419
bd451435
PB
420 qemu_lockcnt_dec(&ctx->list_lock);
421
e4c7e2d1 422 progress |= timerlistgroup_run_timers(&ctx->tlg);
164a101f 423 return progress;
a76bab49 424}
37fcee5d 425
7e003465 426void aio_context_setup(AioContext *ctx)
37fcee5d
FZ
427{
428}
4a1cba38 429
cd0a6d2b
JW
430void aio_context_destroy(AioContext *ctx)
431{
432}
433
ba607ca8
SH
434void aio_context_use_g_source(AioContext *ctx)
435{
436}
437
82a41186
SH
438void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
439 int64_t grow, int64_t shrink, Error **errp)
4a1cba38 440{
90c558be
PX
441 if (max_ns) {
442 error_setg(errp, "AioContext polling is not implemented on Windows");
443 }
4a1cba38 444}
1793ad02
SG
445
446void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
447 Error **errp)
448{
449}