SYSENTER/SYSCALL support
[minix.git] / sys / sys / queue.h
blob368830b4e6aa03238cd80ff12d0cf320d05794bd
1 /* $NetBSD: queue.h,v 1.53 2011/11/19 22:51:31 tls Exp $ */
3 /*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
31 * @(#)queue.h 8.5 (Berkeley) 8/20/94
34 #ifndef _SYS_QUEUE_H_
35 #define _SYS_QUEUE_H_
37 #include <sys/null.h>
40 * This file defines five types of data structures: singly-linked lists,
41 * lists, simple queues, tail queues, and circular queues.
43 * A singly-linked list is headed by a single forward pointer. The
44 * elements are singly linked for minimum space and pointer manipulation
45 * overhead at the expense of O(n) removal for arbitrary elements. New
46 * elements can be added to the list after an existing element or at the
47 * head of the list. Elements being removed from the head of the list
48 * should use the explicit macro for this purpose for optimum
49 * efficiency. A singly-linked list may only be traversed in the forward
50 * direction. Singly-linked lists are ideal for applications with large
51 * datasets and few or no removals or for implementing a LIFO queue.
53 * A list is headed by a single forward pointer (or an array of forward
54 * pointers for a hash table header). The elements are doubly linked
55 * so that an arbitrary element can be removed without a need to
56 * traverse the list. New elements can be added to the list before
57 * or after an existing element or at the head of the list. A list
58 * may only be traversed in the forward direction.
60 * A simple queue is headed by a pair of pointers, one the head of the
61 * list and the other to the tail of the list. The elements are singly
62 * linked to save space, so elements can only be removed from the
63 * head of the list. New elements can be added to the list after
64 * an existing element, at the head of the list, or at the end of the
65 * list. A simple queue may only be traversed in the forward direction.
67 * A tail queue is headed by a pair of pointers, one to the head of the
68 * list and the other to the tail of the list. The elements are doubly
69 * linked so that an arbitrary element can be removed without a need to
70 * traverse the list. New elements can be added to the list before or
71 * after an existing element, at the head of the list, or at the end of
72 * the list. A tail queue may be traversed in either direction.
74 * A circle queue is headed by a pair of pointers, one to the head of the
75 * list and the other to the tail of the list. The elements are doubly
76 * linked so that an arbitrary element can be removed without a need to
77 * traverse the list. New elements can be added to the list before or after
78 * an existing element, at the head of the list, or at the end of the list.
79 * A circle queue may be traversed in either direction, but has a more
80 * complex end of list detection.
82 * For details on the use of these macros, see the queue(3) manual page.
86 * List definitions.
88 #define LIST_HEAD(name, type) \
89 struct name { \
90 struct type *lh_first; /* first element */ \
93 #define LIST_HEAD_INITIALIZER(head) \
94 { NULL }
96 #define LIST_ENTRY(type) \
97 struct { \
98 struct type *le_next; /* next element */ \
99 struct type **le_prev; /* address of previous next element */ \
103 * List functions.
105 #if defined(_KERNEL) && defined(QUEUEDEBUG)
106 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \
107 if ((head)->lh_first && \
108 (head)->lh_first->field.le_prev != &(head)->lh_first) \
109 panic("LIST_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
110 #define QUEUEDEBUG_LIST_OP(elm, field) \
111 if ((elm)->field.le_next && \
112 (elm)->field.le_next->field.le_prev != \
113 &(elm)->field.le_next) \
114 panic("LIST_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
115 if (*(elm)->field.le_prev != (elm)) \
116 panic("LIST_* back %p %s:%d", (elm), __FILE__, __LINE__);
117 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \
118 (elm)->field.le_next = (void *)1L; \
119 (elm)->field.le_prev = (void *)1L;
120 #else
121 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
122 #define QUEUEDEBUG_LIST_OP(elm, field)
123 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
124 #endif
126 #define LIST_INIT(head) do { \
127 (head)->lh_first = NULL; \
128 } while (/*CONSTCOND*/0)
130 #define LIST_INSERT_AFTER(listelm, elm, field) do { \
131 QUEUEDEBUG_LIST_OP((listelm), field) \
132 if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
133 (listelm)->field.le_next->field.le_prev = \
134 &(elm)->field.le_next; \
135 (listelm)->field.le_next = (elm); \
136 (elm)->field.le_prev = &(listelm)->field.le_next; \
137 } while (/*CONSTCOND*/0)
139 #define LIST_INSERT_BEFORE(listelm, elm, field) do { \
140 QUEUEDEBUG_LIST_OP((listelm), field) \
141 (elm)->field.le_prev = (listelm)->field.le_prev; \
142 (elm)->field.le_next = (listelm); \
143 *(listelm)->field.le_prev = (elm); \
144 (listelm)->field.le_prev = &(elm)->field.le_next; \
145 } while (/*CONSTCOND*/0)
147 #define LIST_INSERT_HEAD(head, elm, field) do { \
148 QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \
149 if (((elm)->field.le_next = (head)->lh_first) != NULL) \
150 (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
151 (head)->lh_first = (elm); \
152 (elm)->field.le_prev = &(head)->lh_first; \
153 } while (/*CONSTCOND*/0)
155 #define LIST_REMOVE(elm, field) do { \
156 QUEUEDEBUG_LIST_OP((elm), field) \
157 if ((elm)->field.le_next != NULL) \
158 (elm)->field.le_next->field.le_prev = \
159 (elm)->field.le_prev; \
160 *(elm)->field.le_prev = (elm)->field.le_next; \
161 QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
162 } while (/*CONSTCOND*/0)
164 #define LIST_FOREACH(var, head, field) \
165 for ((var) = ((head)->lh_first); \
166 (var); \
167 (var) = ((var)->field.le_next))
169 #define LIST_FOREACH_SAFE(var, head, field, tvar) \
170 for ((var) = LIST_FIRST((head)); \
171 (var) && ((tvar) = LIST_NEXT((var), field), 1); \
172 (var) = (tvar))
174 * List access methods.
176 #define LIST_EMPTY(head) ((head)->lh_first == NULL)
177 #define LIST_FIRST(head) ((head)->lh_first)
178 #define LIST_NEXT(elm, field) ((elm)->field.le_next)
182 * Singly-linked List definitions.
184 #define SLIST_HEAD(name, type) \
185 struct name { \
186 struct type *slh_first; /* first element */ \
189 #define SLIST_HEAD_INITIALIZER(head) \
190 { NULL }
192 #define SLIST_ENTRY(type) \
193 struct { \
194 struct type *sle_next; /* next element */ \
198 * Singly-linked List functions.
200 #define SLIST_INIT(head) do { \
201 (head)->slh_first = NULL; \
202 } while (/*CONSTCOND*/0)
204 #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
205 (elm)->field.sle_next = (slistelm)->field.sle_next; \
206 (slistelm)->field.sle_next = (elm); \
207 } while (/*CONSTCOND*/0)
209 #define SLIST_INSERT_HEAD(head, elm, field) do { \
210 (elm)->field.sle_next = (head)->slh_first; \
211 (head)->slh_first = (elm); \
212 } while (/*CONSTCOND*/0)
214 #define SLIST_REMOVE_HEAD(head, field) do { \
215 (head)->slh_first = (head)->slh_first->field.sle_next; \
216 } while (/*CONSTCOND*/0)
218 #define SLIST_REMOVE(head, elm, type, field) do { \
219 if ((head)->slh_first == (elm)) { \
220 SLIST_REMOVE_HEAD((head), field); \
222 else { \
223 struct type *curelm = (head)->slh_first; \
224 while(curelm->field.sle_next != (elm)) \
225 curelm = curelm->field.sle_next; \
226 curelm->field.sle_next = \
227 curelm->field.sle_next->field.sle_next; \
229 } while (/*CONSTCOND*/0)
231 #define SLIST_REMOVE_AFTER(slistelm, field) do { \
232 (slistelm)->field.sle_next = \
233 SLIST_NEXT(SLIST_NEXT((slistelm), field), field); \
234 } while (/*CONSTCOND*/0)
236 #define SLIST_FOREACH(var, head, field) \
237 for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
239 #define SLIST_FOREACH_SAFE(var, head, field, tvar) \
240 for ((var) = SLIST_FIRST((head)); \
241 (var) && ((tvar) = SLIST_NEXT((var), field), 1); \
242 (var) = (tvar))
245 * Singly-linked List access methods.
247 #define SLIST_EMPTY(head) ((head)->slh_first == NULL)
248 #define SLIST_FIRST(head) ((head)->slh_first)
249 #define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
253 * Singly-linked Tail queue declarations.
255 #define STAILQ_HEAD(name, type) \
256 struct name { \
257 struct type *stqh_first; /* first element */ \
258 struct type **stqh_last; /* addr of last next element */ \
261 #define STAILQ_HEAD_INITIALIZER(head) \
262 { NULL, &(head).stqh_first }
264 #define STAILQ_ENTRY(type) \
265 struct { \
266 struct type *stqe_next; /* next element */ \
270 * Singly-linked Tail queue functions.
272 #define STAILQ_INIT(head) do { \
273 (head)->stqh_first = NULL; \
274 (head)->stqh_last = &(head)->stqh_first; \
275 } while (/*CONSTCOND*/0)
277 #define STAILQ_INSERT_HEAD(head, elm, field) do { \
278 if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
279 (head)->stqh_last = &(elm)->field.stqe_next; \
280 (head)->stqh_first = (elm); \
281 } while (/*CONSTCOND*/0)
283 #define STAILQ_INSERT_TAIL(head, elm, field) do { \
284 (elm)->field.stqe_next = NULL; \
285 *(head)->stqh_last = (elm); \
286 (head)->stqh_last = &(elm)->field.stqe_next; \
287 } while (/*CONSTCOND*/0)
289 #define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
290 if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
291 (head)->stqh_last = &(elm)->field.stqe_next; \
292 (listelm)->field.stqe_next = (elm); \
293 } while (/*CONSTCOND*/0)
295 #define STAILQ_REMOVE_HEAD(head, field) do { \
296 if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
297 (head)->stqh_last = &(head)->stqh_first; \
298 } while (/*CONSTCOND*/0)
300 #define STAILQ_REMOVE(head, elm, type, field) do { \
301 if ((head)->stqh_first == (elm)) { \
302 STAILQ_REMOVE_HEAD((head), field); \
303 } else { \
304 struct type *curelm = (head)->stqh_first; \
305 while (curelm->field.stqe_next != (elm)) \
306 curelm = curelm->field.stqe_next; \
307 if ((curelm->field.stqe_next = \
308 curelm->field.stqe_next->field.stqe_next) == NULL) \
309 (head)->stqh_last = &(curelm)->field.stqe_next; \
311 } while (/*CONSTCOND*/0)
313 #define STAILQ_FOREACH(var, head, field) \
314 for ((var) = ((head)->stqh_first); \
315 (var); \
316 (var) = ((var)->field.stqe_next))
318 #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
319 for ((var) = STAILQ_FIRST((head)); \
320 (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
321 (var) = (tvar))
323 #define STAILQ_CONCAT(head1, head2) do { \
324 if (!STAILQ_EMPTY((head2))) { \
325 *(head1)->stqh_last = (head2)->stqh_first; \
326 (head1)->stqh_last = (head2)->stqh_last; \
327 STAILQ_INIT((head2)); \
329 } while (/*CONSTCOND*/0)
331 #define STAILQ_LAST(head, type, field) \
332 (STAILQ_EMPTY((head)) ? \
333 NULL : \
334 ((struct type *)(void *) \
335 ((char *)((head)->stqh_last) - offsetof(struct type, field))))
338 * Singly-linked Tail queue access methods.
340 #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
341 #define STAILQ_FIRST(head) ((head)->stqh_first)
342 #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
346 * Simple queue definitions.
348 #define SIMPLEQ_HEAD(name, type) \
349 struct name { \
350 struct type *sqh_first; /* first element */ \
351 struct type **sqh_last; /* addr of last next element */ \
354 #define SIMPLEQ_HEAD_INITIALIZER(head) \
355 { NULL, &(head).sqh_first }
357 #define SIMPLEQ_ENTRY(type) \
358 struct { \
359 struct type *sqe_next; /* next element */ \
363 * Simple queue functions.
365 #define SIMPLEQ_INIT(head) do { \
366 (head)->sqh_first = NULL; \
367 (head)->sqh_last = &(head)->sqh_first; \
368 } while (/*CONSTCOND*/0)
370 #define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
371 if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
372 (head)->sqh_last = &(elm)->field.sqe_next; \
373 (head)->sqh_first = (elm); \
374 } while (/*CONSTCOND*/0)
376 #define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
377 (elm)->field.sqe_next = NULL; \
378 *(head)->sqh_last = (elm); \
379 (head)->sqh_last = &(elm)->field.sqe_next; \
380 } while (/*CONSTCOND*/0)
382 #define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
383 if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
384 (head)->sqh_last = &(elm)->field.sqe_next; \
385 (listelm)->field.sqe_next = (elm); \
386 } while (/*CONSTCOND*/0)
388 #define SIMPLEQ_REMOVE_HEAD(head, field) do { \
389 if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
390 (head)->sqh_last = &(head)->sqh_first; \
391 } while (/*CONSTCOND*/0)
393 #define SIMPLEQ_REMOVE(head, elm, type, field) do { \
394 if ((head)->sqh_first == (elm)) { \
395 SIMPLEQ_REMOVE_HEAD((head), field); \
396 } else { \
397 struct type *curelm = (head)->sqh_first; \
398 while (curelm->field.sqe_next != (elm)) \
399 curelm = curelm->field.sqe_next; \
400 if ((curelm->field.sqe_next = \
401 curelm->field.sqe_next->field.sqe_next) == NULL) \
402 (head)->sqh_last = &(curelm)->field.sqe_next; \
404 } while (/*CONSTCOND*/0)
406 #define SIMPLEQ_FOREACH(var, head, field) \
407 for ((var) = ((head)->sqh_first); \
408 (var); \
409 (var) = ((var)->field.sqe_next))
411 #define SIMPLEQ_FOREACH_SAFE(var, head, field, next) \
412 for ((var) = ((head)->sqh_first); \
413 (var) && ((next = ((var)->field.sqe_next)), 1); \
414 (var) = (next))
416 #define SIMPLEQ_CONCAT(head1, head2) do { \
417 if (!SIMPLEQ_EMPTY((head2))) { \
418 *(head1)->sqh_last = (head2)->sqh_first; \
419 (head1)->sqh_last = (head2)->sqh_last; \
420 SIMPLEQ_INIT((head2)); \
422 } while (/*CONSTCOND*/0)
424 #define SIMPLEQ_LAST(head, type, field) \
425 (SIMPLEQ_EMPTY((head)) ? \
426 NULL : \
427 ((struct type *)(void *) \
428 ((char *)((head)->sqh_last) - offsetof(struct type, field))))
431 * Simple queue access methods.
433 #define SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
434 #define SIMPLEQ_FIRST(head) ((head)->sqh_first)
435 #define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
439 * Tail queue definitions.
441 #define _TAILQ_HEAD(name, type, qual) \
442 struct name { \
443 qual type *tqh_first; /* first element */ \
444 qual type *qual *tqh_last; /* addr of last next element */ \
446 #define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,)
448 #define TAILQ_HEAD_INITIALIZER(head) \
449 { NULL, &(head).tqh_first }
451 #define _TAILQ_ENTRY(type, qual) \
452 struct { \
453 qual type *tqe_next; /* next element */ \
454 qual type *qual *tqe_prev; /* address of previous next element */\
456 #define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,)
459 * Tail queue functions.
461 #if defined(_KERNEL) && defined(QUEUEDEBUG)
462 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \
463 if ((head)->tqh_first && \
464 (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \
465 panic("TAILQ_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
466 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \
467 if (*(head)->tqh_last != NULL) \
468 panic("TAILQ_INSERT_TAIL %p %s:%d", (head), __FILE__, __LINE__);
469 #define QUEUEDEBUG_TAILQ_OP(elm, field) \
470 if ((elm)->field.tqe_next && \
471 (elm)->field.tqe_next->field.tqe_prev != \
472 &(elm)->field.tqe_next) \
473 panic("TAILQ_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
474 if (*(elm)->field.tqe_prev != (elm)) \
475 panic("TAILQ_* back %p %s:%d", (elm), __FILE__, __LINE__);
476 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \
477 if ((elm)->field.tqe_next == NULL && \
478 (head)->tqh_last != &(elm)->field.tqe_next) \
479 panic("TAILQ_PREREMOVE head %p elm %p %s:%d", \
480 (head), (elm), __FILE__, __LINE__);
481 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \
482 (elm)->field.tqe_next = (void *)1L; \
483 (elm)->field.tqe_prev = (void *)1L;
484 #else
485 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)
486 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)
487 #define QUEUEDEBUG_TAILQ_OP(elm, field)
488 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)
489 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)
490 #endif
492 #define TAILQ_INIT(head) do { \
493 (head)->tqh_first = NULL; \
494 (head)->tqh_last = &(head)->tqh_first; \
495 } while (/*CONSTCOND*/0)
497 #define TAILQ_INSERT_HEAD(head, elm, field) do { \
498 QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field) \
499 if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
500 (head)->tqh_first->field.tqe_prev = \
501 &(elm)->field.tqe_next; \
502 else \
503 (head)->tqh_last = &(elm)->field.tqe_next; \
504 (head)->tqh_first = (elm); \
505 (elm)->field.tqe_prev = &(head)->tqh_first; \
506 } while (/*CONSTCOND*/0)
508 #define TAILQ_INSERT_TAIL(head, elm, field) do { \
509 QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field) \
510 (elm)->field.tqe_next = NULL; \
511 (elm)->field.tqe_prev = (head)->tqh_last; \
512 *(head)->tqh_last = (elm); \
513 (head)->tqh_last = &(elm)->field.tqe_next; \
514 } while (/*CONSTCOND*/0)
516 #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
517 QUEUEDEBUG_TAILQ_OP((listelm), field) \
518 if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
519 (elm)->field.tqe_next->field.tqe_prev = \
520 &(elm)->field.tqe_next; \
521 else \
522 (head)->tqh_last = &(elm)->field.tqe_next; \
523 (listelm)->field.tqe_next = (elm); \
524 (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
525 } while (/*CONSTCOND*/0)
527 #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
528 QUEUEDEBUG_TAILQ_OP((listelm), field) \
529 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
530 (elm)->field.tqe_next = (listelm); \
531 *(listelm)->field.tqe_prev = (elm); \
532 (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
533 } while (/*CONSTCOND*/0)
535 #define TAILQ_REMOVE(head, elm, field) do { \
536 QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \
537 QUEUEDEBUG_TAILQ_OP((elm), field) \
538 if (((elm)->field.tqe_next) != NULL) \
539 (elm)->field.tqe_next->field.tqe_prev = \
540 (elm)->field.tqe_prev; \
541 else \
542 (head)->tqh_last = (elm)->field.tqe_prev; \
543 *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
544 QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
545 } while (/*CONSTCOND*/0)
547 #define TAILQ_FOREACH(var, head, field) \
548 for ((var) = ((head)->tqh_first); \
549 (var); \
550 (var) = ((var)->field.tqe_next))
552 #define TAILQ_FOREACH_SAFE(var, head, field, next) \
553 for ((var) = ((head)->tqh_first); \
554 (var) != NULL && ((next) = TAILQ_NEXT(var, field), 1); \
555 (var) = (next))
557 #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
558 for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
559 (var); \
560 (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
562 #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \
563 for ((var) = TAILQ_LAST((head), headname); \
564 (var) && ((prev) = TAILQ_PREV((var), headname, field), 1);\
565 (var) = (prev))
567 #define TAILQ_CONCAT(head1, head2, field) do { \
568 if (!TAILQ_EMPTY(head2)) { \
569 *(head1)->tqh_last = (head2)->tqh_first; \
570 (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
571 (head1)->tqh_last = (head2)->tqh_last; \
572 TAILQ_INIT((head2)); \
574 } while (/*CONSTCOND*/0)
577 * Tail queue access methods.
579 #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
580 #define TAILQ_FIRST(head) ((head)->tqh_first)
581 #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
583 #define TAILQ_LAST(head, headname) \
584 (*(((struct headname *)((head)->tqh_last))->tqh_last))
585 #define TAILQ_PREV(elm, headname, field) \
586 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
590 * Circular queue definitions.
592 #if defined(_KERNEL) && defined(QUEUEDEBUG)
593 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) \
594 if ((head)->cqh_first != (void *)(head) && \
595 (head)->cqh_first->field.cqe_prev != (void *)(head)) \
596 panic("CIRCLEQ head forw %p %s:%d", (head), \
597 __FILE__, __LINE__); \
598 if ((head)->cqh_last != (void *)(head) && \
599 (head)->cqh_last->field.cqe_next != (void *)(head)) \
600 panic("CIRCLEQ head back %p %s:%d", (head), \
601 __FILE__, __LINE__);
602 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) \
603 if ((elm)->field.cqe_next == (void *)(head)) { \
604 if ((head)->cqh_last != (elm)) \
605 panic("CIRCLEQ elm last %p %s:%d", (elm), \
606 __FILE__, __LINE__); \
607 } else { \
608 if ((elm)->field.cqe_next->field.cqe_prev != (elm)) \
609 panic("CIRCLEQ elm forw %p %s:%d", (elm), \
610 __FILE__, __LINE__); \
612 if ((elm)->field.cqe_prev == (void *)(head)) { \
613 if ((head)->cqh_first != (elm)) \
614 panic("CIRCLEQ elm first %p %s:%d", (elm), \
615 __FILE__, __LINE__); \
616 } else { \
617 if ((elm)->field.cqe_prev->field.cqe_next != (elm)) \
618 panic("CIRCLEQ elm prev %p %s:%d", (elm), \
619 __FILE__, __LINE__); \
621 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) \
622 (elm)->field.cqe_next = (void *)1L; \
623 (elm)->field.cqe_prev = (void *)1L;
624 #else
625 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)
626 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)
627 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)
628 #endif
630 #define CIRCLEQ_HEAD(name, type) \
631 struct name { \
632 struct type *cqh_first; /* first element */ \
633 struct type *cqh_last; /* last element */ \
636 #define CIRCLEQ_HEAD_INITIALIZER(head) \
637 { (void *)&head, (void *)&head }
639 #define CIRCLEQ_ENTRY(type) \
640 struct { \
641 struct type *cqe_next; /* next element */ \
642 struct type *cqe_prev; /* previous element */ \
646 * Circular queue functions.
648 #define CIRCLEQ_INIT(head) do { \
649 (head)->cqh_first = (void *)(head); \
650 (head)->cqh_last = (void *)(head); \
651 } while (/*CONSTCOND*/0)
653 #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
654 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
655 QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
656 (elm)->field.cqe_next = (listelm)->field.cqe_next; \
657 (elm)->field.cqe_prev = (listelm); \
658 if ((listelm)->field.cqe_next == (void *)(head)) \
659 (head)->cqh_last = (elm); \
660 else \
661 (listelm)->field.cqe_next->field.cqe_prev = (elm); \
662 (listelm)->field.cqe_next = (elm); \
663 } while (/*CONSTCOND*/0)
665 #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
666 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
667 QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
668 (elm)->field.cqe_next = (listelm); \
669 (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
670 if ((listelm)->field.cqe_prev == (void *)(head)) \
671 (head)->cqh_first = (elm); \
672 else \
673 (listelm)->field.cqe_prev->field.cqe_next = (elm); \
674 (listelm)->field.cqe_prev = (elm); \
675 } while (/*CONSTCOND*/0)
677 #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
678 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
679 (elm)->field.cqe_next = (head)->cqh_first; \
680 (elm)->field.cqe_prev = (void *)(head); \
681 if ((head)->cqh_last == (void *)(head)) \
682 (head)->cqh_last = (elm); \
683 else \
684 (head)->cqh_first->field.cqe_prev = (elm); \
685 (head)->cqh_first = (elm); \
686 } while (/*CONSTCOND*/0)
688 #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
689 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
690 (elm)->field.cqe_next = (void *)(head); \
691 (elm)->field.cqe_prev = (head)->cqh_last; \
692 if ((head)->cqh_first == (void *)(head)) \
693 (head)->cqh_first = (elm); \
694 else \
695 (head)->cqh_last->field.cqe_next = (elm); \
696 (head)->cqh_last = (elm); \
697 } while (/*CONSTCOND*/0)
699 #define CIRCLEQ_REMOVE(head, elm, field) do { \
700 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
701 QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field) \
702 if ((elm)->field.cqe_next == (void *)(head)) \
703 (head)->cqh_last = (elm)->field.cqe_prev; \
704 else \
705 (elm)->field.cqe_next->field.cqe_prev = \
706 (elm)->field.cqe_prev; \
707 if ((elm)->field.cqe_prev == (void *)(head)) \
708 (head)->cqh_first = (elm)->field.cqe_next; \
709 else \
710 (elm)->field.cqe_prev->field.cqe_next = \
711 (elm)->field.cqe_next; \
712 QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field) \
713 } while (/*CONSTCOND*/0)
715 #define CIRCLEQ_FOREACH(var, head, field) \
716 for ((var) = ((head)->cqh_first); \
717 (var) != (const void *)(head); \
718 (var) = ((var)->field.cqe_next))
720 #define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
721 for ((var) = ((head)->cqh_last); \
722 (var) != (const void *)(head); \
723 (var) = ((var)->field.cqe_prev))
726 * Circular queue access methods.
728 #define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
729 #define CIRCLEQ_FIRST(head) ((head)->cqh_first)
730 #define CIRCLEQ_LAST(head) ((head)->cqh_last)
731 #define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
732 #define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
734 #define CIRCLEQ_LOOP_NEXT(head, elm, field) \
735 (((elm)->field.cqe_next == (void *)(head)) \
736 ? ((head)->cqh_first) \
737 : (elm->field.cqe_next))
738 #define CIRCLEQ_LOOP_PREV(head, elm, field) \
739 (((elm)->field.cqe_prev == (void *)(head)) \
740 ? ((head)->cqh_last) \
741 : (elm->field.cqe_prev))
743 #endif /* !_SYS_QUEUE_H_ */