usb: rewrite fw path, fix numbering
[qemu/mdroth.git] / async.c
blob57ac3a818032811ecccf2e3adbec63b4afc71596
1 /*
2 * QEMU System Emulator
4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu-common.h"
26 #include "qemu-aio.h"
29 * An AsyncContext protects the callbacks of AIO requests and Bottom Halves
30 * against interfering with each other. A typical example is qcow2 that accepts
31 * asynchronous requests, but relies for manipulation of its metadata on
32 * synchronous bdrv_read/write that doesn't trigger any callbacks.
34 * However, these functions are often emulated using AIO which means that AIO
35 * callbacks must be run - but at the same time we must not run callbacks of
36 * other requests as they might start to modify metadata and corrupt the
37 * internal state of the caller of bdrv_read/write.
39 * To achieve the desired semantics we switch into a new AsyncContext.
40 * Callbacks must only be run if they belong to the current AsyncContext.
41 * Otherwise they need to be queued until their own context is active again.
42 * This is how you can make qemu_aio_wait() wait only for your own callbacks.
44 * The AsyncContexts form a stack. When you leave a AsyncContexts, you always
45 * return to the old ("parent") context.
47 struct AsyncContext {
48 /* Consecutive number of the AsyncContext (position in the stack) */
49 int id;
51 /* Anchor of the list of Bottom Halves belonging to the context */
52 struct QEMUBH *first_bh;
54 /* Link to parent context */
55 struct AsyncContext *parent;
58 /* The currently active AsyncContext */
59 static struct AsyncContext *async_context = &(struct AsyncContext) { 0 };
62 * Enter a new AsyncContext. Already scheduled Bottom Halves and AIO callbacks
63 * won't be called until this context is left again.
65 void async_context_push(void)
67 struct AsyncContext *new = qemu_mallocz(sizeof(*new));
68 new->parent = async_context;
69 new->id = async_context->id + 1;
70 async_context = new;
73 /* Run queued AIO completions and destroy Bottom Half */
74 static void bh_run_aio_completions(void *opaque)
76 QEMUBH **bh = opaque;
77 qemu_bh_delete(*bh);
78 qemu_free(bh);
79 qemu_aio_process_queue();
82 * Leave the currently active AsyncContext. All Bottom Halves belonging to the
83 * old context are executed before changing the context.
85 void async_context_pop(void)
87 struct AsyncContext *old = async_context;
88 QEMUBH **bh;
90 /* Flush the bottom halves, we don't want to lose them */
91 while (qemu_bh_poll());
93 /* Switch back to the parent context */
94 async_context = async_context->parent;
95 qemu_free(old);
97 if (async_context == NULL) {
98 abort();
101 /* Schedule BH to run any queued AIO completions as soon as possible */
102 bh = qemu_malloc(sizeof(*bh));
103 *bh = qemu_bh_new(bh_run_aio_completions, bh);
104 qemu_bh_schedule(*bh);
108 * Returns the ID of the currently active AsyncContext
110 int get_async_context_id(void)
112 return async_context->id;
115 /***********************************************************/
116 /* bottom halves (can be seen as timers which expire ASAP) */
118 struct QEMUBH {
119 QEMUBHFunc *cb;
120 void *opaque;
121 int scheduled;
122 int idle;
123 int deleted;
124 QEMUBH *next;
127 QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
129 QEMUBH *bh;
130 bh = qemu_mallocz(sizeof(QEMUBH));
131 bh->cb = cb;
132 bh->opaque = opaque;
133 bh->next = async_context->first_bh;
134 async_context->first_bh = bh;
135 return bh;
138 int qemu_bh_poll(void)
140 QEMUBH *bh, **bhp;
141 int ret;
143 ret = 0;
144 for (bh = async_context->first_bh; bh; bh = bh->next) {
145 if (!bh->deleted && bh->scheduled) {
146 bh->scheduled = 0;
147 if (!bh->idle)
148 ret = 1;
149 bh->idle = 0;
150 bh->cb(bh->opaque);
154 /* remove deleted bhs */
155 bhp = &async_context->first_bh;
156 while (*bhp) {
157 bh = *bhp;
158 if (bh->deleted) {
159 *bhp = bh->next;
160 qemu_free(bh);
161 } else
162 bhp = &bh->next;
165 return ret;
168 void qemu_bh_schedule_idle(QEMUBH *bh)
170 if (bh->scheduled)
171 return;
172 bh->scheduled = 1;
173 bh->idle = 1;
176 void qemu_bh_schedule(QEMUBH *bh)
178 if (bh->scheduled)
179 return;
180 bh->scheduled = 1;
181 bh->idle = 0;
182 /* stop the currently executing CPU to execute the BH ASAP */
183 qemu_notify_event();
186 void qemu_bh_cancel(QEMUBH *bh)
188 bh->scheduled = 0;
191 void qemu_bh_delete(QEMUBH *bh)
193 bh->scheduled = 0;
194 bh->deleted = 1;
197 void qemu_bh_update_timeout(int *timeout)
199 QEMUBH *bh;
201 for (bh = async_context->first_bh; bh; bh = bh->next) {
202 if (!bh->deleted && bh->scheduled) {
203 if (bh->idle) {
204 /* idle bottom halves will be polled at least
205 * every 10ms */
206 *timeout = MIN(10, *timeout);
207 } else {
208 /* non-idle bottom halves will be executed
209 * immediately */
210 *timeout = 0;
211 break;