Merge tag 'pull-loongarch-20241016' of https://gitlab.com/gaosong/qemu into staging
[qemu/armbru.git] / util / fdmon-poll.c
blob17df917cf962a9f73000c970ec616f229c4e4f9a
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * poll(2) file descriptor monitoring
5 * Uses ppoll(2) when available, g_poll() otherwise.
6 */
8 #include "qemu/osdep.h"
9 #include "aio-posix.h"
10 #include "qemu/rcu_queue.h"
13 * These thread-local variables are used only in fdmon_poll_wait() around the
14 * call to the poll() system call. In particular they are not used while
15 * aio_poll is performing callbacks, which makes it much easier to think about
16 * reentrancy!
18 * Stack-allocated arrays would be perfect but they have size limitations;
19 * heap allocation is expensive enough that we want to reuse arrays across
20 * calls to aio_poll(). And because poll() has to be called without holding
21 * any lock, the arrays cannot be stored in AioContext. Thread-local data
22 * has none of the disadvantages of these three options.
24 static __thread GPollFD *pollfds;
25 static __thread AioHandler **nodes;
26 static __thread unsigned npfd, nalloc;
27 static __thread Notifier pollfds_cleanup_notifier;
29 static void pollfds_cleanup(Notifier *n, void *unused)
31 g_assert(npfd == 0);
32 g_free(pollfds);
33 g_free(nodes);
34 nalloc = 0;
37 static void add_pollfd(AioHandler *node)
39 if (npfd == nalloc) {
40 if (nalloc == 0) {
41 pollfds_cleanup_notifier.notify = pollfds_cleanup;
42 qemu_thread_atexit_add(&pollfds_cleanup_notifier);
43 nalloc = 8;
44 } else {
45 g_assert(nalloc <= INT_MAX);
46 nalloc *= 2;
48 pollfds = g_renew(GPollFD, pollfds, nalloc);
49 nodes = g_renew(AioHandler *, nodes, nalloc);
51 nodes[npfd] = node;
52 pollfds[npfd] = (GPollFD) {
53 .fd = node->pfd.fd,
54 .events = node->pfd.events,
56 npfd++;
59 static int fdmon_poll_wait(AioContext *ctx, AioHandlerList *ready_list,
60 int64_t timeout)
62 AioHandler *node;
63 int ret;
65 assert(npfd == 0);
67 QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
68 if (!QLIST_IS_INSERTED(node, node_deleted) && node->pfd.events) {
69 add_pollfd(node);
73 /* epoll(7) is faster above a certain number of fds */
74 if (fdmon_epoll_try_upgrade(ctx, npfd)) {
75 npfd = 0; /* we won't need pollfds[], reset npfd */
76 return ctx->fdmon_ops->wait(ctx, ready_list, timeout);
79 ret = qemu_poll_ns(pollfds, npfd, timeout);
80 if (ret > 0) {
81 int i;
83 for (i = 0; i < npfd; i++) {
84 int revents = pollfds[i].revents;
86 if (revents) {
87 aio_add_ready_handler(ready_list, nodes[i], revents);
92 npfd = 0;
93 return ret;
96 static void fdmon_poll_update(AioContext *ctx,
97 AioHandler *old_node,
98 AioHandler *new_node)
100 /* Do nothing, AioHandler already contains the state we'll need */
103 const FDMonOps fdmon_poll_ops = {
104 .update = fdmon_poll_update,
105 .wait = fdmon_poll_wait,
106 .need_wait = aio_poll_disabled,