2 This file is part of PulseAudio.
4 Copyright 2006-2008 Lennart Poettering
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 #include <pulse/xmalloc.h>
31 #include <pulsecore/atomic.h>
32 #include <pulsecore/log.h>
33 #include <pulsecore/thread.h>
34 #include <pulsecore/macro.h>
35 #include <pulsecore/core-util.h>
36 #include <pulsecore/llist.h>
37 #include <pulsecore/flist.h>
38 #include <pulsecore/fdsem.h>
42 #define ASYNCQ_SIZE 256
44 /* For debugging purposes we can define _Y to put an extra thread
45 * yield between each operation. */
50 #define _Y pa_thread_yield()
52 #define _Y do { } while(0)
57 PA_LLIST_FIELDS(struct localq
);
64 pa_fdsem
*read_fdsem
, *write_fdsem
;
66 PA_LLIST_HEAD(struct localq
, localq
);
67 struct localq
*last_localq
;
68 pa_bool_t waiting_for_post
;
71 PA_STATIC_FLIST_DECLARE(localq
, 0, pa_xfree
);
73 #define PA_ASYNCQ_CELLS(x) ((pa_atomic_ptr_t*) ((uint8_t*) (x) + PA_ALIGN(sizeof(struct pa_asyncq))))
75 static unsigned reduce(pa_asyncq
*l
, unsigned value
) {
76 return value
& (unsigned) (l
->size
- 1);
79 pa_asyncq
*pa_asyncq_new(unsigned size
) {
85 pa_assert(pa_is_power_of_two(size
));
87 l
= pa_xmalloc0(PA_ALIGN(sizeof(pa_asyncq
)) + (sizeof(pa_atomic_ptr_t
) * size
));
91 PA_LLIST_HEAD_INIT(struct localq
, l
->localq
);
92 l
->last_localq
= NULL
;
93 l
->waiting_for_post
= FALSE
;
95 if (!(l
->read_fdsem
= pa_fdsem_new())) {
100 if (!(l
->write_fdsem
= pa_fdsem_new())) {
101 pa_fdsem_free(l
->read_fdsem
);
109 void pa_asyncq_free(pa_asyncq
*l
, pa_free_cb_t free_cb
) {
116 while ((p
= pa_asyncq_pop(l
, 0)))
120 while ((q
= l
->localq
)) {
124 PA_LLIST_REMOVE(struct localq
, l
->localq
, q
);
126 if (pa_flist_push(PA_STATIC_FLIST_GET(localq
), q
) < 0)
130 pa_fdsem_free(l
->read_fdsem
);
131 pa_fdsem_free(l
->write_fdsem
);
135 static int push(pa_asyncq
*l
, void *p
, pa_bool_t wait_op
) {
137 pa_atomic_ptr_t
*cells
;
142 cells
= PA_ASYNCQ_CELLS(l
);
145 idx
= reduce(l
, l
->write_idx
);
147 if (!pa_atomic_ptr_cmpxchg(&cells
[idx
], NULL
, p
)) {
152 /* pa_log("sleeping on push"); */
155 pa_fdsem_wait(l
->read_fdsem
);
156 } while (!pa_atomic_ptr_cmpxchg(&cells
[idx
], NULL
, p
));
162 pa_fdsem_post(l
->write_fdsem
);
167 static pa_bool_t
flush_postq(pa_asyncq
*l
, pa_bool_t wait_op
) {
172 while ((q
= l
->last_localq
)) {
174 if (push(l
, q
->data
, wait_op
) < 0)
177 l
->last_localq
= q
->prev
;
179 PA_LLIST_REMOVE(struct localq
, l
->localq
, q
);
181 if (pa_flist_push(PA_STATIC_FLIST_GET(localq
), q
) < 0)
188 int pa_asyncq_push(pa_asyncq
*l
, void *p
, pa_bool_t wait_op
) {
191 if (!flush_postq(l
, wait_op
))
194 return push(l
, p
, wait_op
);
197 void pa_asyncq_post(pa_asyncq
*l
, void *p
) {
203 if (flush_postq(l
, FALSE
))
204 if (pa_asyncq_push(l
, p
, FALSE
) >= 0)
207 /* OK, we couldn't push anything in the queue. So let's queue it
208 * locally and push it later */
210 if (pa_log_ratelimit(PA_LOG_WARN
))
211 pa_log_warn("q overrun, queuing locally");
213 if (!(q
= pa_flist_pop(PA_STATIC_FLIST_GET(localq
))))
214 q
= pa_xnew(struct localq
, 1);
217 PA_LLIST_PREPEND(struct localq
, l
->localq
, q
);
225 void* pa_asyncq_pop(pa_asyncq
*l
, pa_bool_t wait_op
) {
228 pa_atomic_ptr_t
*cells
;
232 cells
= PA_ASYNCQ_CELLS(l
);
235 idx
= reduce(l
, l
->read_idx
);
237 if (!(ret
= pa_atomic_ptr_load(&cells
[idx
]))) {
242 /* pa_log("sleeping on pop"); */
245 pa_fdsem_wait(l
->write_fdsem
);
246 } while (!(ret
= pa_atomic_ptr_load(&cells
[idx
])));
251 /* Guaranteed to succeed if we only have a single reader */
252 pa_assert_se(pa_atomic_ptr_cmpxchg(&cells
[idx
], ret
, NULL
));
257 pa_fdsem_post(l
->read_fdsem
);
262 int pa_asyncq_read_fd(pa_asyncq
*q
) {
265 return pa_fdsem_get(q
->write_fdsem
);
268 int pa_asyncq_read_before_poll(pa_asyncq
*l
) {
270 pa_atomic_ptr_t
*cells
;
274 cells
= PA_ASYNCQ_CELLS(l
);
277 idx
= reduce(l
, l
->read_idx
);
280 if (pa_atomic_ptr_load(&cells
[idx
]))
283 if (pa_fdsem_before_poll(l
->write_fdsem
) >= 0)
288 void pa_asyncq_read_after_poll(pa_asyncq
*l
) {
291 pa_fdsem_after_poll(l
->write_fdsem
);
294 int pa_asyncq_write_fd(pa_asyncq
*q
) {
297 return pa_fdsem_get(q
->read_fdsem
);
300 void pa_asyncq_write_before_poll(pa_asyncq
*l
) {
305 if (flush_postq(l
, FALSE
))
308 if (pa_fdsem_before_poll(l
->read_fdsem
) >= 0) {
309 l
->waiting_for_post
= TRUE
;
315 void pa_asyncq_write_after_poll(pa_asyncq
*l
) {
318 if (l
->waiting_for_post
) {
319 pa_fdsem_after_poll(l
->read_fdsem
);
320 l
->waiting_for_post
= FALSE
;