kernel debug: priv can be NULL early on
[minix.git] / include / sys / queue.h
blob91a83423b529b5fd35e28dd922234e6ab2d538cb
1 /* $NetBSD: queue.h,v 1.52 2009/04/20 09:56:08 mschuett Exp $ */
3 /*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
31 * @(#)queue.h 8.5 (Berkeley) 8/20/94
34 #ifndef _SYS_QUEUE_H_
35 #define _SYS_QUEUE_H_
37 #include <sys/null.h>
40 * This file defines five types of data structures: singly-linked lists,
41 * lists, simple queues, tail queues, and circular queues.
43 * A singly-linked list is headed by a single forward pointer. The
44 * elements are singly linked for minimum space and pointer manipulation
45 * overhead at the expense of O(n) removal for arbitrary elements. New
46 * elements can be added to the list after an existing element or at the
47 * head of the list. Elements being removed from the head of the list
48 * should use the explicit macro for this purpose for optimum
49 * efficiency. A singly-linked list may only be traversed in the forward
50 * direction. Singly-linked lists are ideal for applications with large
51 * datasets and few or no removals or for implementing a LIFO queue.
53 * A list is headed by a single forward pointer (or an array of forward
54 * pointers for a hash table header). The elements are doubly linked
55 * so that an arbitrary element can be removed without a need to
56 * traverse the list. New elements can be added to the list before
57 * or after an existing element or at the head of the list. A list
58 * may only be traversed in the forward direction.
60 * A simple queue is headed by a pair of pointers, one the head of the
61 * list and the other to the tail of the list. The elements are singly
62 * linked to save space, so elements can only be removed from the
63 * head of the list. New elements can be added to the list after
64 * an existing element, at the head of the list, or at the end of the
65 * list. A simple queue may only be traversed in the forward direction.
67 * A tail queue is headed by a pair of pointers, one to the head of the
68 * list and the other to the tail of the list. The elements are doubly
69 * linked so that an arbitrary element can be removed without a need to
70 * traverse the list. New elements can be added to the list before or
71 * after an existing element, at the head of the list, or at the end of
72 * the list. A tail queue may be traversed in either direction.
74 * A circle queue is headed by a pair of pointers, one to the head of the
75 * list and the other to the tail of the list. The elements are doubly
76 * linked so that an arbitrary element can be removed without a need to
77 * traverse the list. New elements can be added to the list before or after
78 * an existing element, at the head of the list, or at the end of the list.
79 * A circle queue may be traversed in either direction, but has a more
80 * complex end of list detection.
82 * For details on the use of these macros, see the queue(3) manual page.
86 * List definitions.
88 #define LIST_HEAD(name, type) \
89 struct name { \
90 struct type *lh_first; /* first element */ \
93 #define LIST_HEAD_INITIALIZER(head) \
94 { NULL }
96 #define LIST_ENTRY(type) \
97 struct { \
98 struct type *le_next; /* next element */ \
99 struct type **le_prev; /* address of previous next element */ \
103 * List functions.
105 #if defined(_KERNEL) && defined(QUEUEDEBUG)
106 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \
107 if ((head)->lh_first && \
108 (head)->lh_first->field.le_prev != &(head)->lh_first) \
109 panic("LIST_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
110 #define QUEUEDEBUG_LIST_OP(elm, field) \
111 if ((elm)->field.le_next && \
112 (elm)->field.le_next->field.le_prev != \
113 &(elm)->field.le_next) \
114 panic("LIST_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
115 if (*(elm)->field.le_prev != (elm)) \
116 panic("LIST_* back %p %s:%d", (elm), __FILE__, __LINE__);
117 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \
118 (elm)->field.le_next = (void *)1L; \
119 (elm)->field.le_prev = (void *)1L;
120 #else
121 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
122 #define QUEUEDEBUG_LIST_OP(elm, field)
123 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
124 #endif
126 #define LIST_INIT(head) do { \
127 (head)->lh_first = NULL; \
128 } while (/*CONSTCOND*/0)
130 #define LIST_INSERT_AFTER(listelm, elm, field) do { \
131 QUEUEDEBUG_LIST_OP((listelm), field) \
132 if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
133 (listelm)->field.le_next->field.le_prev = \
134 &(elm)->field.le_next; \
135 (listelm)->field.le_next = (elm); \
136 (elm)->field.le_prev = &(listelm)->field.le_next; \
137 } while (/*CONSTCOND*/0)
139 #define LIST_INSERT_BEFORE(listelm, elm, field) do { \
140 QUEUEDEBUG_LIST_OP((listelm), field) \
141 (elm)->field.le_prev = (listelm)->field.le_prev; \
142 (elm)->field.le_next = (listelm); \
143 *(listelm)->field.le_prev = (elm); \
144 (listelm)->field.le_prev = &(elm)->field.le_next; \
145 } while (/*CONSTCOND*/0)
147 #define LIST_INSERT_HEAD(head, elm, field) do { \
148 QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \
149 if (((elm)->field.le_next = (head)->lh_first) != NULL) \
150 (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
151 (head)->lh_first = (elm); \
152 (elm)->field.le_prev = &(head)->lh_first; \
153 } while (/*CONSTCOND*/0)
155 #define LIST_REMOVE(elm, field) do { \
156 QUEUEDEBUG_LIST_OP((elm), field) \
157 if ((elm)->field.le_next != NULL) \
158 (elm)->field.le_next->field.le_prev = \
159 (elm)->field.le_prev; \
160 *(elm)->field.le_prev = (elm)->field.le_next; \
161 QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
162 } while (/*CONSTCOND*/0)
164 #define LIST_FOREACH(var, head, field) \
165 for ((var) = ((head)->lh_first); \
166 (var); \
167 (var) = ((var)->field.le_next))
169 #define LIST_FOREACH_SAFE(var, head, field, tvar) \
170 for ((var) = LIST_FIRST((head)); \
171 (var) && ((tvar) = LIST_NEXT((var), field), 1); \
172 (var) = (tvar))
175 * List access methods.
177 #define LIST_EMPTY(head) ((head)->lh_first == NULL)
178 #define LIST_FIRST(head) ((head)->lh_first)
179 #define LIST_NEXT(elm, field) ((elm)->field.le_next)
183 * Singly-linked List definitions.
185 #define SLIST_HEAD(name, type) \
186 struct name { \
187 struct type *slh_first; /* first element */ \
190 #define SLIST_HEAD_INITIALIZER(head) \
191 { NULL }
193 #define SLIST_ENTRY(type) \
194 struct { \
195 struct type *sle_next; /* next element */ \
199 * Singly-linked List functions.
201 #define SLIST_INIT(head) do { \
202 (head)->slh_first = NULL; \
203 } while (/*CONSTCOND*/0)
205 #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
206 (elm)->field.sle_next = (slistelm)->field.sle_next; \
207 (slistelm)->field.sle_next = (elm); \
208 } while (/*CONSTCOND*/0)
210 #define SLIST_INSERT_HEAD(head, elm, field) do { \
211 (elm)->field.sle_next = (head)->slh_first; \
212 (head)->slh_first = (elm); \
213 } while (/*CONSTCOND*/0)
215 #define SLIST_REMOVE_HEAD(head, field) do { \
216 (head)->slh_first = (head)->slh_first->field.sle_next; \
217 } while (/*CONSTCOND*/0)
219 #define SLIST_REMOVE(head, elm, type, field) do { \
220 if ((head)->slh_first == (elm)) { \
221 SLIST_REMOVE_HEAD((head), field); \
223 else { \
224 struct type *curelm = (head)->slh_first; \
225 while(curelm->field.sle_next != (elm)) \
226 curelm = curelm->field.sle_next; \
227 curelm->field.sle_next = \
228 curelm->field.sle_next->field.sle_next; \
230 } while (/*CONSTCOND*/0)
232 #define SLIST_REMOVE_AFTER(slistelm, field) do { \
233 (slistelm)->field.sle_next = \
234 SLIST_NEXT(SLIST_NEXT((slistelm), field), field); \
235 } while (/*CONSTCOND*/0)
237 #define SLIST_FOREACH(var, head, field) \
238 for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
240 #define SLIST_FOREACH_SAFE(var, head, field, tvar) \
241 for ((var) = SLIST_FIRST((head)); \
242 (var) && ((tvar) = SLIST_NEXT((var), field), 1); \
243 (var) = (tvar))
246 * Singly-linked List access methods.
248 #define SLIST_EMPTY(head) ((head)->slh_first == NULL)
249 #define SLIST_FIRST(head) ((head)->slh_first)
250 #define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
254 * Singly-linked Tail queue declarations.
256 #define STAILQ_HEAD(name, type) \
257 struct name { \
258 struct type *stqh_first; /* first element */ \
259 struct type **stqh_last; /* addr of last next element */ \
262 #define STAILQ_HEAD_INITIALIZER(head) \
263 { NULL, &(head).stqh_first }
265 #define STAILQ_ENTRY(type) \
266 struct { \
267 struct type *stqe_next; /* next element */ \
271 * Singly-linked Tail queue functions.
273 #define STAILQ_INIT(head) do { \
274 (head)->stqh_first = NULL; \
275 (head)->stqh_last = &(head)->stqh_first; \
276 } while (/*CONSTCOND*/0)
278 #define STAILQ_INSERT_HEAD(head, elm, field) do { \
279 if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
280 (head)->stqh_last = &(elm)->field.stqe_next; \
281 (head)->stqh_first = (elm); \
282 } while (/*CONSTCOND*/0)
284 #define STAILQ_INSERT_TAIL(head, elm, field) do { \
285 (elm)->field.stqe_next = NULL; \
286 *(head)->stqh_last = (elm); \
287 (head)->stqh_last = &(elm)->field.stqe_next; \
288 } while (/*CONSTCOND*/0)
290 #define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
291 if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
292 (head)->stqh_last = &(elm)->field.stqe_next; \
293 (listelm)->field.stqe_next = (elm); \
294 } while (/*CONSTCOND*/0)
296 #define STAILQ_REMOVE_HEAD(head, field) do { \
297 if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
298 (head)->stqh_last = &(head)->stqh_first; \
299 } while (/*CONSTCOND*/0)
301 #define STAILQ_REMOVE(head, elm, type, field) do { \
302 if ((head)->stqh_first == (elm)) { \
303 STAILQ_REMOVE_HEAD((head), field); \
304 } else { \
305 struct type *curelm = (head)->stqh_first; \
306 while (curelm->field.stqe_next != (elm)) \
307 curelm = curelm->field.stqe_next; \
308 if ((curelm->field.stqe_next = \
309 curelm->field.stqe_next->field.stqe_next) == NULL) \
310 (head)->stqh_last = &(curelm)->field.stqe_next; \
312 } while (/*CONSTCOND*/0)
314 #define STAILQ_FOREACH(var, head, field) \
315 for ((var) = ((head)->stqh_first); \
316 (var); \
317 (var) = ((var)->field.stqe_next))
319 #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
320 for ((var) = STAILQ_FIRST((head)); \
321 (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
322 (var) = (tvar))
324 #define STAILQ_CONCAT(head1, head2) do { \
325 if (!STAILQ_EMPTY((head2))) { \
326 *(head1)->stqh_last = (head2)->stqh_first; \
327 (head1)->stqh_last = (head2)->stqh_last; \
328 STAILQ_INIT((head2)); \
330 } while (/*CONSTCOND*/0)
332 #define STAILQ_LAST(head, type, field) \
333 (STAILQ_EMPTY((head)) ? \
334 NULL : \
335 ((struct type *)(void *) \
336 ((char *)((head)->stqh_last) - offsetof(struct type, field))))
339 * Singly-linked Tail queue access methods.
341 #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
342 #define STAILQ_FIRST(head) ((head)->stqh_first)
343 #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
347 * Simple queue definitions.
349 #define SIMPLEQ_HEAD(name, type) \
350 struct name { \
351 struct type *sqh_first; /* first element */ \
352 struct type **sqh_last; /* addr of last next element */ \
355 #define SIMPLEQ_HEAD_INITIALIZER(head) \
356 { NULL, &(head).sqh_first }
358 #define SIMPLEQ_ENTRY(type) \
359 struct { \
360 struct type *sqe_next; /* next element */ \
364 * Simple queue functions.
366 #define SIMPLEQ_INIT(head) do { \
367 (head)->sqh_first = NULL; \
368 (head)->sqh_last = &(head)->sqh_first; \
369 } while (/*CONSTCOND*/0)
371 #define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
372 if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
373 (head)->sqh_last = &(elm)->field.sqe_next; \
374 (head)->sqh_first = (elm); \
375 } while (/*CONSTCOND*/0)
377 #define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
378 (elm)->field.sqe_next = NULL; \
379 *(head)->sqh_last = (elm); \
380 (head)->sqh_last = &(elm)->field.sqe_next; \
381 } while (/*CONSTCOND*/0)
383 #define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
384 if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
385 (head)->sqh_last = &(elm)->field.sqe_next; \
386 (listelm)->field.sqe_next = (elm); \
387 } while (/*CONSTCOND*/0)
389 #define SIMPLEQ_REMOVE_HEAD(head, field) do { \
390 if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
391 (head)->sqh_last = &(head)->sqh_first; \
392 } while (/*CONSTCOND*/0)
394 #define SIMPLEQ_REMOVE(head, elm, type, field) do { \
395 if ((head)->sqh_first == (elm)) { \
396 SIMPLEQ_REMOVE_HEAD((head), field); \
397 } else { \
398 struct type *curelm = (head)->sqh_first; \
399 while (curelm->field.sqe_next != (elm)) \
400 curelm = curelm->field.sqe_next; \
401 if ((curelm->field.sqe_next = \
402 curelm->field.sqe_next->field.sqe_next) == NULL) \
403 (head)->sqh_last = &(curelm)->field.sqe_next; \
405 } while (/*CONSTCOND*/0)
407 #define SIMPLEQ_FOREACH(var, head, field) \
408 for ((var) = ((head)->sqh_first); \
409 (var); \
410 (var) = ((var)->field.sqe_next))
412 #define SIMPLEQ_FOREACH_SAFE(var, head, field, next) \
413 for ((var) = ((head)->sqh_first); \
414 (var) && ((next = ((var)->field.sqe_next)), 1); \
415 (var) = (next))
417 #define SIMPLEQ_CONCAT(head1, head2) do { \
418 if (!SIMPLEQ_EMPTY((head2))) { \
419 *(head1)->sqh_last = (head2)->sqh_first; \
420 (head1)->sqh_last = (head2)->sqh_last; \
421 SIMPLEQ_INIT((head2)); \
423 } while (/*CONSTCOND*/0)
425 #define SIMPLEQ_LAST(head, type, field) \
426 (SIMPLEQ_EMPTY((head)) ? \
427 NULL : \
428 ((struct type *)(void *) \
429 ((char *)((head)->sqh_last) - offsetof(struct type, field))))
432 * Simple queue access methods.
434 #define SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
435 #define SIMPLEQ_FIRST(head) ((head)->sqh_first)
436 #define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
440 * Tail queue definitions.
442 #define _TAILQ_HEAD(name, type, qual) \
443 struct name { \
444 qual type *tqh_first; /* first element */ \
445 qual type *qual *tqh_last; /* addr of last next element */ \
447 #define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,)
449 #define TAILQ_HEAD_INITIALIZER(head) \
450 { NULL, &(head).tqh_first }
452 #define _TAILQ_ENTRY(type, qual) \
453 struct { \
454 qual type *tqe_next; /* next element */ \
455 qual type *qual *tqe_prev; /* address of previous next element */\
457 #define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,)
460 * Tail queue functions.
462 #if defined(_KERNEL) && defined(QUEUEDEBUG)
463 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \
464 if ((head)->tqh_first && \
465 (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \
466 panic("TAILQ_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
467 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \
468 if (*(head)->tqh_last != NULL) \
469 panic("TAILQ_INSERT_TAIL %p %s:%d", (head), __FILE__, __LINE__);
470 #define QUEUEDEBUG_TAILQ_OP(elm, field) \
471 if ((elm)->field.tqe_next && \
472 (elm)->field.tqe_next->field.tqe_prev != \
473 &(elm)->field.tqe_next) \
474 panic("TAILQ_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
475 if (*(elm)->field.tqe_prev != (elm)) \
476 panic("TAILQ_* back %p %s:%d", (elm), __FILE__, __LINE__);
477 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \
478 if ((elm)->field.tqe_next == NULL && \
479 (head)->tqh_last != &(elm)->field.tqe_next) \
480 panic("TAILQ_PREREMOVE head %p elm %p %s:%d", \
481 (head), (elm), __FILE__, __LINE__);
482 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \
483 (elm)->field.tqe_next = (void *)1L; \
484 (elm)->field.tqe_prev = (void *)1L;
485 #else
486 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)
487 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)
488 #define QUEUEDEBUG_TAILQ_OP(elm, field)
489 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)
490 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)
491 #endif
493 #define TAILQ_INIT(head) do { \
494 (head)->tqh_first = NULL; \
495 (head)->tqh_last = &(head)->tqh_first; \
496 } while (/*CONSTCOND*/0)
498 #define TAILQ_INSERT_HEAD(head, elm, field) do { \
499 QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field) \
500 if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
501 (head)->tqh_first->field.tqe_prev = \
502 &(elm)->field.tqe_next; \
503 else \
504 (head)->tqh_last = &(elm)->field.tqe_next; \
505 (head)->tqh_first = (elm); \
506 (elm)->field.tqe_prev = &(head)->tqh_first; \
507 } while (/*CONSTCOND*/0)
509 #define TAILQ_INSERT_TAIL(head, elm, field) do { \
510 QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field) \
511 (elm)->field.tqe_next = NULL; \
512 (elm)->field.tqe_prev = (head)->tqh_last; \
513 *(head)->tqh_last = (elm); \
514 (head)->tqh_last = &(elm)->field.tqe_next; \
515 } while (/*CONSTCOND*/0)
517 #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
518 QUEUEDEBUG_TAILQ_OP((listelm), field) \
519 if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
520 (elm)->field.tqe_next->field.tqe_prev = \
521 &(elm)->field.tqe_next; \
522 else \
523 (head)->tqh_last = &(elm)->field.tqe_next; \
524 (listelm)->field.tqe_next = (elm); \
525 (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
526 } while (/*CONSTCOND*/0)
528 #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
529 QUEUEDEBUG_TAILQ_OP((listelm), field) \
530 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
531 (elm)->field.tqe_next = (listelm); \
532 *(listelm)->field.tqe_prev = (elm); \
533 (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
534 } while (/*CONSTCOND*/0)
536 #define TAILQ_REMOVE(head, elm, field) do { \
537 QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \
538 QUEUEDEBUG_TAILQ_OP((elm), field) \
539 if (((elm)->field.tqe_next) != NULL) \
540 (elm)->field.tqe_next->field.tqe_prev = \
541 (elm)->field.tqe_prev; \
542 else \
543 (head)->tqh_last = (elm)->field.tqe_prev; \
544 *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
545 QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
546 } while (/*CONSTCOND*/0)
548 #define TAILQ_FOREACH(var, head, field) \
549 for ((var) = ((head)->tqh_first); \
550 (var); \
551 (var) = ((var)->field.tqe_next))
553 #define TAILQ_FOREACH_SAFE(var, head, field, next) \
554 for ((var) = ((head)->tqh_first); \
555 (var) != NULL && ((next) = TAILQ_NEXT(var, field), 1); \
556 (var) = (next))
558 #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
559 for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
560 (var); \
561 (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
563 #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \
564 for ((var) = TAILQ_LAST((head), headname); \
565 (var) && ((prev) = TAILQ_PREV((var), headname, field), 1);\
566 (var) = (prev))
568 #define TAILQ_CONCAT(head1, head2, field) do { \
569 if (!TAILQ_EMPTY(head2)) { \
570 *(head1)->tqh_last = (head2)->tqh_first; \
571 (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
572 (head1)->tqh_last = (head2)->tqh_last; \
573 TAILQ_INIT((head2)); \
575 } while (/*CONSTCOND*/0)
578 * Tail queue access methods.
580 #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
581 #define TAILQ_FIRST(head) ((head)->tqh_first)
582 #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
584 #define TAILQ_LAST(head, headname) \
585 (*(((struct headname *)((head)->tqh_last))->tqh_last))
586 #define TAILQ_PREV(elm, headname, field) \
587 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
591 * Circular queue definitions.
593 #if defined(_KERNEL) && defined(QUEUEDEBUG)
594 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) \
595 if ((head)->cqh_first != (void *)(head) && \
596 (head)->cqh_first->field.cqe_prev != (void *)(head)) \
597 panic("CIRCLEQ head forw %p %s:%d", (head), \
598 __FILE__, __LINE__); \
599 if ((head)->cqh_last != (void *)(head) && \
600 (head)->cqh_last->field.cqe_next != (void *)(head)) \
601 panic("CIRCLEQ head back %p %s:%d", (head), \
602 __FILE__, __LINE__);
603 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) \
604 if ((elm)->field.cqe_next == (void *)(head)) { \
605 if ((head)->cqh_last != (elm)) \
606 panic("CIRCLEQ elm last %p %s:%d", (elm), \
607 __FILE__, __LINE__); \
608 } else { \
609 if ((elm)->field.cqe_next->field.cqe_prev != (elm)) \
610 panic("CIRCLEQ elm forw %p %s:%d", (elm), \
611 __FILE__, __LINE__); \
613 if ((elm)->field.cqe_prev == (void *)(head)) { \
614 if ((head)->cqh_first != (elm)) \
615 panic("CIRCLEQ elm first %p %s:%d", (elm), \
616 __FILE__, __LINE__); \
617 } else { \
618 if ((elm)->field.cqe_prev->field.cqe_next != (elm)) \
619 panic("CIRCLEQ elm prev %p %s:%d", (elm), \
620 __FILE__, __LINE__); \
622 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) \
623 (elm)->field.cqe_next = (void *)1L; \
624 (elm)->field.cqe_prev = (void *)1L;
625 #else
626 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)
627 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)
628 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)
629 #endif
631 #define CIRCLEQ_HEAD(name, type) \
632 struct name { \
633 struct type *cqh_first; /* first element */ \
634 struct type *cqh_last; /* last element */ \
637 #define CIRCLEQ_HEAD_INITIALIZER(head) \
638 { (void *)&head, (void *)&head }
640 #define CIRCLEQ_ENTRY(type) \
641 struct { \
642 struct type *cqe_next; /* next element */ \
643 struct type *cqe_prev; /* previous element */ \
647 * Circular queue functions.
649 #define CIRCLEQ_INIT(head) do { \
650 (head)->cqh_first = (void *)(head); \
651 (head)->cqh_last = (void *)(head); \
652 } while (/*CONSTCOND*/0)
654 #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
655 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
656 QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
657 (elm)->field.cqe_next = (listelm)->field.cqe_next; \
658 (elm)->field.cqe_prev = (listelm); \
659 if ((listelm)->field.cqe_next == (void *)(head)) \
660 (head)->cqh_last = (elm); \
661 else \
662 (listelm)->field.cqe_next->field.cqe_prev = (elm); \
663 (listelm)->field.cqe_next = (elm); \
664 } while (/*CONSTCOND*/0)
666 #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
667 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
668 QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
669 (elm)->field.cqe_next = (listelm); \
670 (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
671 if ((listelm)->field.cqe_prev == (void *)(head)) \
672 (head)->cqh_first = (elm); \
673 else \
674 (listelm)->field.cqe_prev->field.cqe_next = (elm); \
675 (listelm)->field.cqe_prev = (elm); \
676 } while (/*CONSTCOND*/0)
678 #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
679 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
680 (elm)->field.cqe_next = (head)->cqh_first; \
681 (elm)->field.cqe_prev = (void *)(head); \
682 if ((head)->cqh_last == (void *)(head)) \
683 (head)->cqh_last = (elm); \
684 else \
685 (head)->cqh_first->field.cqe_prev = (elm); \
686 (head)->cqh_first = (elm); \
687 } while (/*CONSTCOND*/0)
689 #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
690 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
691 (elm)->field.cqe_next = (void *)(head); \
692 (elm)->field.cqe_prev = (head)->cqh_last; \
693 if ((head)->cqh_first == (void *)(head)) \
694 (head)->cqh_first = (elm); \
695 else \
696 (head)->cqh_last->field.cqe_next = (elm); \
697 (head)->cqh_last = (elm); \
698 } while (/*CONSTCOND*/0)
700 #define CIRCLEQ_REMOVE(head, elm, field) do { \
701 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
702 QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field) \
703 if ((elm)->field.cqe_next == (void *)(head)) \
704 (head)->cqh_last = (elm)->field.cqe_prev; \
705 else \
706 (elm)->field.cqe_next->field.cqe_prev = \
707 (elm)->field.cqe_prev; \
708 if ((elm)->field.cqe_prev == (void *)(head)) \
709 (head)->cqh_first = (elm)->field.cqe_next; \
710 else \
711 (elm)->field.cqe_prev->field.cqe_next = \
712 (elm)->field.cqe_next; \
713 QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field) \
714 } while (/*CONSTCOND*/0)
716 #define CIRCLEQ_FOREACH(var, head, field) \
717 for ((var) = ((head)->cqh_first); \
718 (var) != (const void *)(head); \
719 (var) = ((var)->field.cqe_next))
721 #define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
722 for ((var) = ((head)->cqh_last); \
723 (var) != (const void *)(head); \
724 (var) = ((var)->field.cqe_prev))
727 * Circular queue access methods.
729 #define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
730 #define CIRCLEQ_FIRST(head) ((head)->cqh_first)
731 #define CIRCLEQ_LAST(head) ((head)->cqh_last)
732 #define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
733 #define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
735 #define CIRCLEQ_LOOP_NEXT(head, elm, field) \
736 (((elm)->field.cqe_next == (void *)(head)) \
737 ? ((head)->cqh_first) \
738 : (elm->field.cqe_next))
739 #define CIRCLEQ_LOOP_PREV(head, elm, field) \
740 (((elm)->field.cqe_prev == (void *)(head)) \
741 ? ((head)->cqh_last) \
742 : (elm->field.cqe_prev))
744 #endif /* !_SYS_QUEUE_H_ */