2 * QEMU aio implementation
4 * Copyright IBM, Corp. 2008
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
17 #ifdef CONFIG_LINUX_IO_URING
20 #include "qemu/coroutine-core.h"
21 #include "qemu/queue.h"
22 #include "qemu/event_notifier.h"
23 #include "qemu/lockcnt.h"
24 #include "qemu/thread.h"
25 #include "qemu/timer.h"
26 #include "block/graph-lock.h"
27 #include "hw/qdev-core.h"
30 typedef struct BlockAIOCB BlockAIOCB
;
31 typedef void BlockCompletionFunc(void *opaque
, int ret
);
33 typedef struct AIOCBInfo
{
34 void (*cancel_async
)(BlockAIOCB
*acb
);
39 const AIOCBInfo
*aiocb_info
;
41 BlockCompletionFunc
*cb
;
46 void *qemu_aio_get(const AIOCBInfo
*aiocb_info
, BlockDriverState
*bs
,
47 BlockCompletionFunc
*cb
, void *opaque
);
48 void qemu_aio_unref(void *p
);
49 void qemu_aio_ref(void *p
);
51 typedef struct AioHandler AioHandler
;
52 typedef QLIST_HEAD(, AioHandler
) AioHandlerList
;
53 typedef void QEMUBHFunc(void *opaque
);
54 typedef bool AioPollFn(void *opaque
);
55 typedef void IOHandler(void *opaque
);
59 typedef struct LuringState LuringState
;
61 /* Is polling disabled? */
62 bool aio_poll_disabled(AioContext
*ctx
);
64 /* Callbacks for file descriptor monitoring implementations */
68 * @ctx: the AioContext
69 * @old_node: the existing handler or NULL if this file descriptor is being
70 * monitored for the first time
71 * @new_node: the new handler or NULL if this file descriptor is being
74 * Add/remove/modify a monitored file descriptor.
76 * Called with ctx->list_lock acquired.
78 void (*update
)(AioContext
*ctx
, AioHandler
*old_node
, AioHandler
*new_node
);
82 * @ctx: the AioContext
83 * @ready_list: list for handlers that become ready
84 * @timeout: maximum duration to wait, in nanoseconds
86 * Wait for file descriptors to become ready and place them on ready_list.
88 * Called with ctx->list_lock incremented but not locked.
90 * Returns: number of ready file descriptors.
92 int (*wait
)(AioContext
*ctx
, AioHandlerList
*ready_list
, int64_t timeout
);
96 * @ctx: the AioContext
98 * Tell aio_poll() when to stop userspace polling early because ->wait()
101 * File descriptor monitoring implementations that cannot poll fd readiness
102 * from userspace should use aio_poll_disabled() here. This ensures that
103 * file descriptors are not starved by handlers that frequently make
104 * progress via userspace polling.
106 * Returns: true if ->wait() should be called, false otherwise.
108 bool (*need_wait
)(AioContext
*ctx
);
112 * Each aio_bh_poll() call carves off a slice of the BH list, so that newly
113 * scheduled BHs are not processed until the next aio_bh_poll() call. All
114 * active aio_bh_poll() calls chain their slices together in a list, so that
115 * nested aio_bh_poll() calls process all scheduled bottom halves.
117 typedef QSLIST_HEAD(, QEMUBH
) BHList
;
118 typedef struct BHListSlice BHListSlice
;
121 QSIMPLEQ_ENTRY(BHListSlice
) next
;
124 typedef QSLIST_HEAD(, AioHandler
) AioHandlerSList
;
129 /* Used by AioContext users to protect from multi-threaded access. */
133 * Keep track of readers and writers of the block layer graph.
134 * This is essential to avoid performing additions and removal
135 * of nodes and edges from block graph while some
136 * other thread is traversing it.
138 BdrvGraphRWlock
*bdrv_graph
;
140 /* The list of registered AIO handlers. Protected by ctx->list_lock. */
141 AioHandlerList aio_handlers
;
143 /* The list of AIO handlers to be deleted. Protected by ctx->list_lock. */
144 AioHandlerList deleted_aio_handlers
;
146 /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
147 * only written from the AioContext home thread, or under the BQL in
148 * the case of the main AioContext. However, it is read from any
149 * thread so it is still accessed with atomic primitives.
151 * If this field is 0, everything (file descriptors, bottom halves,
152 * timers) will be re-evaluated before the next blocking poll() or
153 * io_uring wait; therefore, the event_notifier_set call can be
154 * skipped. If it is non-zero, you may need to wake up a concurrent
155 * aio_poll or the glib main event loop, making event_notifier_set
158 * Bit 0 is reserved for GSource usage of the AioContext, and is 1
159 * between a call to aio_ctx_prepare and the next call to aio_ctx_check.
160 * Bits 1-31 simply count the number of active calls to aio_poll
161 * that are in the prepare or poll phase.
163 * The GSource and aio_poll must use a different mechanism because
164 * there is no certainty that a call to GSource's prepare callback
165 * (via g_main_context_prepare) is indeed followed by check and
166 * dispatch. It's not clear whether this would be a bug, but let's
167 * play safe and allow it---it will just cause extra calls to
168 * event_notifier_set until the next call to dispatch.
170 * Instead, the aio_poll calls include both the prepare and the
171 * dispatch phase, hence a simple counter is enough for them.
175 /* A lock to protect between QEMUBH and AioHandler adders and deleter,
176 * and to ensure that no callbacks are removed while we're walking and
179 QemuLockCnt list_lock
;
181 /* Bottom Halves pending aio_bh_poll() processing */
184 /* Chained BH list slices for each nested aio_bh_poll() call */
185 QSIMPLEQ_HEAD(, BHListSlice
) bh_slice_list
;
187 /* Used by aio_notify.
189 * "notified" is used to avoid expensive event_notifier_test_and_clear
190 * calls. When it is clear, the EventNotifier is clear, or one thread
191 * is going to clear "notified" before processing more events. False
192 * positives are possible, i.e. "notified" could be set even though the
193 * EventNotifier is clear.
195 * Note that event_notifier_set *cannot* be optimized the same way. For
196 * more information on the problem that would result, see "#ifdef BUG2"
197 * in the docs/aio_notify_accept.promela formal model.
200 EventNotifier notifier
;
202 QSLIST_HEAD(, Coroutine
) scheduled_coroutines
;
203 QEMUBH
*co_schedule_bh
;
207 /* Thread pool for performing work and receiving completion callbacks.
208 * Has its own locking.
210 struct ThreadPool
*thread_pool
;
212 #ifdef CONFIG_LINUX_AIO
213 struct LinuxAioState
*linux_aio
;
215 #ifdef CONFIG_LINUX_IO_URING
216 LuringState
*linux_io_uring
;
218 /* State for file descriptor monitoring using Linux io_uring */
219 struct io_uring fdmon_io_uring
;
220 AioHandlerSList submit_list
;
223 /* TimerLists for calling timers - one per clock type. Has its own
226 QEMUTimerListGroup tlg
;
228 /* Number of AioHandlers without .io_poll() */
229 int poll_disable_cnt
;
231 /* Polling mode parameters */
232 int64_t poll_ns
; /* current polling time in nanoseconds */
233 int64_t poll_max_ns
; /* maximum polling time in nanoseconds */
234 int64_t poll_grow
; /* polling time growth factor */
235 int64_t poll_shrink
; /* polling time shrink factor */
237 /* AIO engine parameters */
238 int64_t aio_max_batch
; /* maximum number of requests in a batch */
241 * List of handlers participating in userspace polling. Protected by
242 * ctx->list_lock. Iterated and modified mostly by the event loop thread
243 * from aio_poll() with ctx->list_lock incremented. aio_set_fd_handler()
244 * only touches the list to delete nodes if ctx->list_lock's count is zero.
246 AioHandlerList poll_aio_handlers
;
248 /* Are we in polling mode or monitoring file descriptors? */
251 /* epoll(7) state used when built with CONFIG_EPOLL */
254 const FDMonOps
*fdmon_ops
;
258 * aio_context_new: Allocate a new AioContext.
260 * AioContext provide a mini event-loop that can be waited on synchronously.
261 * They also provide bottom halves, a service to execute a piece of code
262 * as soon as possible.
264 AioContext
*aio_context_new(Error
**errp
);
268 * @ctx: The AioContext to operate on.
270 * Add a reference to an AioContext.
272 void aio_context_ref(AioContext
*ctx
);
276 * @ctx: The AioContext to operate on.
278 * Drop a reference to an AioContext.
280 void aio_context_unref(AioContext
*ctx
);
283 * aio_bh_schedule_oneshot_full: Allocate a new bottom half structure that will
284 * run only once and as soon as possible.
286 * @name: A human-readable identifier for debugging purposes.
288 void aio_bh_schedule_oneshot_full(AioContext
*ctx
, QEMUBHFunc
*cb
, void *opaque
,
292 * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
293 * only once and as soon as possible.
295 * A convenience wrapper for aio_bh_schedule_oneshot_full() that uses cb as the
298 #define aio_bh_schedule_oneshot(ctx, cb, opaque) \
299 aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb)))
302 * aio_bh_new_full: Allocate a new bottom half structure.
304 * Bottom halves are lightweight callbacks whose invocation is guaranteed
305 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
306 * is opaque and must be allocated prior to its use.
308 * @name: A human-readable identifier for debugging purposes.
309 * @reentrancy_guard: A guard set when entering a cb to prevent
310 * device-reentrancy issues
312 QEMUBH
*aio_bh_new_full(AioContext
*ctx
, QEMUBHFunc
*cb
, void *opaque
,
313 const char *name
, MemReentrancyGuard
*reentrancy_guard
);
316 * aio_bh_new: Allocate a new bottom half structure
318 * A convenience wrapper for aio_bh_new_full() that uses the cb as the name
321 #define aio_bh_new(ctx, cb, opaque) \
322 aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), NULL)
325 * aio_bh_new_guarded: Allocate a new bottom half structure with a
328 * A convenience wrapper for aio_bh_new_full() that uses the cb as the name
331 #define aio_bh_new_guarded(ctx, cb, opaque, guard) \
332 aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), guard)
335 * aio_notify: Force processing of pending events.
337 * Similar to signaling a condition variable, aio_notify forces
338 * aio_poll to exit, so that the next call will re-examine pending events.
339 * The caller of aio_notify will usually call aio_poll again very soon,
340 * or go through another iteration of the GLib main loop. Hence, aio_notify
341 * also has the side effect of recalculating the sets of file descriptors
342 * that the main loop waits for.
344 * Calling aio_notify is rarely necessary, because for example scheduling
345 * a bottom half calls it already.
347 void aio_notify(AioContext
*ctx
);
350 * aio_notify_accept: Acknowledge receiving an aio_notify.
352 * aio_notify() uses an EventNotifier in order to wake up a sleeping
353 * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are
354 * usually rare, but the AioContext has to clear the EventNotifier on
355 * every aio_poll() or g_main_context_iteration() in order to avoid
356 * busy waiting. This event_notifier_test_and_clear() cannot be done
357 * using the usual aio_context_set_event_notifier(), because it must
358 * be done before processing all events (file descriptors, bottom halves,
361 * aio_notify_accept() is an optimized event_notifier_test_and_clear()
362 * that is specific to an AioContext's notifier; it is used internally
363 * to clear the EventNotifier only if aio_notify() had been called.
365 void aio_notify_accept(AioContext
*ctx
);
368 * aio_bh_call: Executes callback function of the specified BH.
370 void aio_bh_call(QEMUBH
*bh
);
373 * aio_bh_poll: Poll bottom halves for an AioContext.
375 * These are internal functions used by the QEMU main loop.
376 * And notice that multiple occurrences of aio_bh_poll cannot
377 * be called concurrently
379 int aio_bh_poll(AioContext
*ctx
);
382 * qemu_bh_schedule: Schedule a bottom half.
384 * Scheduling a bottom half interrupts the main loop and causes the
385 * execution of the callback that was passed to qemu_bh_new.
387 * Bottom halves that are scheduled from a bottom half handler are instantly
388 * invoked. This can create an infinite loop if a bottom half handler
391 * @bh: The bottom half to be scheduled.
393 void qemu_bh_schedule(QEMUBH
*bh
);
396 * qemu_bh_cancel: Cancel execution of a bottom half.
398 * Canceling execution of a bottom half undoes the effect of calls to
399 * qemu_bh_schedule without freeing its resources yet. While cancellation
400 * itself is also wait-free and thread-safe, it can of course race with the
401 * loop that executes bottom halves unless you are holding the iothread
402 * mutex. This makes it mostly useless if you are not holding the mutex.
404 * @bh: The bottom half to be canceled.
406 void qemu_bh_cancel(QEMUBH
*bh
);
409 *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
411 * Deleting a bottom half frees the memory that was allocated for it by
412 * qemu_bh_new. It also implies canceling the bottom half if it was
414 * This func is async. The bottom half will do the delete action at the finial
417 * @bh: The bottom half to be deleted.
419 void qemu_bh_delete(QEMUBH
*bh
);
421 /* Return whether there are any pending callbacks from the GSource
422 * attached to the AioContext, before g_poll is invoked.
424 * This is used internally in the implementation of the GSource.
426 bool aio_prepare(AioContext
*ctx
);
428 /* Return whether there are any pending callbacks from the GSource
429 * attached to the AioContext, after g_poll is invoked.
431 * This is used internally in the implementation of the GSource.
433 bool aio_pending(AioContext
*ctx
);
435 /* Dispatch any pending callbacks from the GSource attached to the AioContext.
437 * This is used internally in the implementation of the GSource.
439 void aio_dispatch(AioContext
*ctx
);
441 /* Progress in completing AIO work to occur. This can issue new pending
442 * aio as a result of executing I/O completion or bh callbacks.
444 * Return whether any progress was made by executing AIO or bottom half
445 * handlers. If @blocking == true, this should always be true except
446 * if someone called aio_notify.
448 * If there are no pending bottom halves, but there are pending AIO
449 * operations, it may not be possible to make any progress without
450 * blocking. If @blocking is true, this function will wait until one
451 * or more AIO events have completed, to ensure something has moved
454 bool no_coroutine_fn
aio_poll(AioContext
*ctx
, bool blocking
);
456 /* Register a file descriptor and associated callbacks. Behaves very similarly
457 * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will
458 * be invoked when using aio_poll().
460 * Code that invokes AIO completion functions should rely on this function
461 * instead of qemu_set_fd_handler[2].
463 void aio_set_fd_handler(AioContext
*ctx
,
468 IOHandler
*io_poll_ready
,
471 /* Register an event notifier and associated callbacks. Behaves very similarly
472 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
473 * will be invoked when using aio_poll().
475 * Code that invokes AIO completion functions should rely on this function
476 * instead of event_notifier_set_handler.
478 void aio_set_event_notifier(AioContext
*ctx
,
479 EventNotifier
*notifier
,
480 EventNotifierHandler
*io_read
,
482 EventNotifierHandler
*io_poll_ready
);
485 * Set polling begin/end callbacks for an event notifier that has already been
486 * registered with aio_set_event_notifier. Do nothing if the event notifier is
489 * Note that if the io_poll_end() callback (or the entire notifier) is removed
490 * during polling, it will not be called, so an io_poll_begin() is not
491 * necessarily always followed by an io_poll_end().
493 void aio_set_event_notifier_poll(AioContext
*ctx
,
494 EventNotifier
*notifier
,
495 EventNotifierHandler
*io_poll_begin
,
496 EventNotifierHandler
*io_poll_end
);
498 /* Return a GSource that lets the main loop poll the file descriptors attached
499 * to this AioContext.
501 GSource
*aio_get_g_source(AioContext
*ctx
);
503 /* Return the ThreadPool bound to this AioContext */
504 struct ThreadPool
*aio_get_thread_pool(AioContext
*ctx
);
506 /* Setup the LinuxAioState bound to this AioContext */
507 struct LinuxAioState
*aio_setup_linux_aio(AioContext
*ctx
, Error
**errp
);
509 /* Return the LinuxAioState bound to this AioContext */
510 struct LinuxAioState
*aio_get_linux_aio(AioContext
*ctx
);
512 /* Setup the LuringState bound to this AioContext */
513 LuringState
*aio_setup_linux_io_uring(AioContext
*ctx
, Error
**errp
);
515 /* Return the LuringState bound to this AioContext */
516 LuringState
*aio_get_linux_io_uring(AioContext
*ctx
);
518 * aio_timer_new_with_attrs:
519 * @ctx: the aio context
520 * @type: the clock type
522 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
524 * @cb: the callback to call on timer expiry
525 * @opaque: the opaque pointer to pass to the callback
527 * Allocate a new timer (with attributes) attached to the context @ctx.
528 * The function is responsible for memory allocation.
530 * The preferred interface is aio_timer_init or aio_timer_init_with_attrs.
531 * Use that unless you really need dynamic memory allocation.
533 * Returns: a pointer to the new timer
535 static inline QEMUTimer
*aio_timer_new_with_attrs(AioContext
*ctx
,
537 int scale
, int attributes
,
538 QEMUTimerCB
*cb
, void *opaque
)
540 return timer_new_full(&ctx
->tlg
, type
, scale
, attributes
, cb
, opaque
);
545 * @ctx: the aio context
546 * @type: the clock type
548 * @cb: the callback to call on timer expiry
549 * @opaque: the opaque pointer to pass to the callback
551 * Allocate a new timer attached to the context @ctx.
552 * See aio_timer_new_with_attrs for details.
554 * Returns: a pointer to the new timer
556 static inline QEMUTimer
*aio_timer_new(AioContext
*ctx
, QEMUClockType type
,
558 QEMUTimerCB
*cb
, void *opaque
)
560 return timer_new_full(&ctx
->tlg
, type
, scale
, 0, cb
, opaque
);
564 * aio_timer_init_with_attrs:
565 * @ctx: the aio context
567 * @type: the clock type
569 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
571 * @cb: the callback to call on timer expiry
572 * @opaque: the opaque pointer to pass to the callback
574 * Initialise a new timer (with attributes) attached to the context @ctx.
575 * The caller is responsible for memory allocation.
577 static inline void aio_timer_init_with_attrs(AioContext
*ctx
,
578 QEMUTimer
*ts
, QEMUClockType type
,
579 int scale
, int attributes
,
580 QEMUTimerCB
*cb
, void *opaque
)
582 timer_init_full(ts
, &ctx
->tlg
, type
, scale
, attributes
, cb
, opaque
);
587 * @ctx: the aio context
589 * @type: the clock type
591 * @cb: the callback to call on timer expiry
592 * @opaque: the opaque pointer to pass to the callback
594 * Initialise a new timer attached to the context @ctx.
595 * See aio_timer_init_with_attrs for details.
597 static inline void aio_timer_init(AioContext
*ctx
,
598 QEMUTimer
*ts
, QEMUClockType type
,
600 QEMUTimerCB
*cb
, void *opaque
)
602 timer_init_full(ts
, &ctx
->tlg
, type
, scale
, 0, cb
, opaque
);
606 * aio_compute_timeout:
607 * @ctx: the aio context
609 * Compute the timeout that a blocking aio_poll should use.
611 int64_t aio_compute_timeout(AioContext
*ctx
);
615 * @ctx: the aio context
618 * Start a coroutine on a remote AioContext.
620 * The coroutine must not be entered by anyone else while aio_co_schedule()
621 * is active. In addition the coroutine must have yielded unless ctx
622 * is the context in which the coroutine is running (i.e. the value of
623 * qemu_get_current_aio_context() from the coroutine itself).
625 void aio_co_schedule(AioContext
*ctx
, Coroutine
*co
);
628 * aio_co_reschedule_self:
629 * @new_ctx: the new context
631 * Move the currently running coroutine to new_ctx. If the coroutine is already
632 * running in new_ctx, do nothing.
634 * Note that this function cannot reschedule from iohandler_ctx to
637 void coroutine_fn
aio_co_reschedule_self(AioContext
*new_ctx
);
643 * Restart a coroutine on the AioContext where it was running last, thus
644 * preventing coroutines from jumping from one context to another when they
647 * aio_co_wake may be executed either in coroutine or non-coroutine
648 * context. The coroutine must not be entered by anyone else while
649 * aio_co_wake() is active.
651 void aio_co_wake(Coroutine
*co
);
655 * @ctx: the context to run the coroutine
656 * @co: the coroutine to run
658 * Enter a coroutine in the specified AioContext.
660 void aio_co_enter(AioContext
*ctx
, Coroutine
*co
);
663 * Return the AioContext whose event loop runs in the current thread.
665 * If called from an IOThread this will be the IOThread's AioContext. If
666 * called from the main thread or with the "big QEMU lock" taken it
667 * will be the main loop AioContext.
669 * Note that the return value is never the main loop's iohandler_ctx and the
670 * return value is the main loop AioContext instead.
672 AioContext
*qemu_get_current_aio_context(void);
674 void qemu_set_current_aio_context(AioContext
*ctx
);
678 * @ctx: the aio context
680 * Initialize the aio context.
682 void aio_context_setup(AioContext
*ctx
);
685 * aio_context_destroy:
686 * @ctx: the aio context
688 * Destroy the aio context.
690 void aio_context_destroy(AioContext
*ctx
);
692 /* Used internally, do not call outside AioContext code */
693 void aio_context_use_g_source(AioContext
*ctx
);
696 * aio_context_set_poll_params:
697 * @ctx: the aio context
698 * @max_ns: how long to busy poll for, in nanoseconds
699 * @grow: polling time growth factor
700 * @shrink: polling time shrink factor
702 * Poll mode can be disabled by setting poll_max_ns to 0.
704 void aio_context_set_poll_params(AioContext
*ctx
, int64_t max_ns
,
705 int64_t grow
, int64_t shrink
,
709 * aio_context_set_aio_params:
710 * @ctx: the aio context
711 * @max_batch: maximum number of requests in a batch, 0 means that the
712 * engine will use its default
714 void aio_context_set_aio_params(AioContext
*ctx
, int64_t max_batch
);
717 * aio_context_set_thread_pool_params:
718 * @ctx: the aio context
719 * @min: min number of threads to have readily available in the thread pool
720 * @min: max number of threads the thread pool can contain
722 void aio_context_set_thread_pool_params(AioContext
*ctx
, int64_t min
,
723 int64_t max
, Error
**errp
);