4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
28 #include "libuutil_common.h"
35 #define ELEM_TO_NODE(lp, e) \
36 ((uu_list_node_impl_t *)((uintptr_t)(e) + (lp)->ul_offset))
38 #define NODE_TO_ELEM(lp, n) \
39 ((void *)((uintptr_t)(n) - (lp)->ul_offset))
42 * uu_list_index_ts define a location for insertion. They are simply a
43 * pointer to the object after the insertion point. We store a mark
44 * in the low-bits of the index, to help prevent mistakes.
46 * When debugging, the index mark changes on every insert and delete, to
47 * catch stale references.
49 #define INDEX_MAX (sizeof (uintptr_t) - 1)
50 #define INDEX_NEXT(m) (((m) == INDEX_MAX)? 1 : ((m) + 1) & INDEX_MAX)
52 #define INDEX_TO_NODE(i) ((uu_list_node_impl_t *)((i) & ~INDEX_MAX))
53 #define NODE_TO_INDEX(p, n) (((uintptr_t)(n) & ~INDEX_MAX) | (p)->ul_index)
54 #define INDEX_VALID(p, i) (((i) & INDEX_MAX) == (p)->ul_index)
55 #define INDEX_CHECK(i) (((i) & INDEX_MAX) != 0)
57 #define POOL_TO_MARKER(pp) ((void *)((uintptr_t)(pp) | 1))
59 static uu_list_pool_t uu_null_lpool
= { &uu_null_lpool
, &uu_null_lpool
};
60 static pthread_mutex_t uu_lpool_list_lock
= PTHREAD_MUTEX_INITIALIZER
;
63 uu_list_pool_create(const char *name
, size_t objsize
,
64 size_t nodeoffset
, uu_compare_fn_t
*compare_func
, uint32_t flags
)
66 uu_list_pool_t
*pp
, *next
, *prev
;
69 uu_check_name(name
, UU_NAME_DOMAIN
) == -1 ||
70 nodeoffset
+ sizeof (uu_list_node_t
) > objsize
) {
71 uu_set_error(UU_ERROR_INVALID_ARGUMENT
);
75 if (flags
& ~UU_LIST_POOL_DEBUG
) {
76 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
80 pp
= uu_zalloc(sizeof (uu_list_pool_t
));
82 uu_set_error(UU_ERROR_NO_MEMORY
);
86 (void) strlcpy(pp
->ulp_name
, name
, sizeof (pp
->ulp_name
));
87 pp
->ulp_nodeoffset
= nodeoffset
;
88 pp
->ulp_objsize
= objsize
;
89 pp
->ulp_cmp
= compare_func
;
90 if (flags
& UU_LIST_POOL_DEBUG
)
92 pp
->ulp_last_index
= 0;
94 (void) pthread_mutex_init(&pp
->ulp_lock
, NULL
);
96 pp
->ulp_null_list
.ul_next_enc
= UU_PTR_ENCODE(&pp
->ulp_null_list
);
97 pp
->ulp_null_list
.ul_prev_enc
= UU_PTR_ENCODE(&pp
->ulp_null_list
);
99 (void) pthread_mutex_lock(&uu_lpool_list_lock
);
100 pp
->ulp_next
= next
= &uu_null_lpool
;
101 pp
->ulp_prev
= prev
= next
->ulp_prev
;
104 (void) pthread_mutex_unlock(&uu_lpool_list_lock
);
110 uu_list_pool_destroy(uu_list_pool_t
*pp
)
113 if (pp
->ulp_null_list
.ul_next_enc
!=
114 UU_PTR_ENCODE(&pp
->ulp_null_list
) ||
115 pp
->ulp_null_list
.ul_prev_enc
!=
116 UU_PTR_ENCODE(&pp
->ulp_null_list
)) {
117 uu_panic("uu_list_pool_destroy: Pool \"%.*s\" (%p) has "
118 "outstanding lists, or is corrupt.\n",
119 (int)sizeof (pp
->ulp_name
), pp
->ulp_name
,
123 (void) pthread_mutex_lock(&uu_lpool_list_lock
);
124 pp
->ulp_next
->ulp_prev
= pp
->ulp_prev
;
125 pp
->ulp_prev
->ulp_next
= pp
->ulp_next
;
126 (void) pthread_mutex_unlock(&uu_lpool_list_lock
);
133 uu_list_node_init(void *base
, uu_list_node_t
*np_arg
, uu_list_pool_t
*pp
)
135 uu_list_node_impl_t
*np
= (uu_list_node_impl_t
*)np_arg
;
138 uintptr_t offset
= (uintptr_t)np
- (uintptr_t)base
;
139 if (offset
+ sizeof (*np
) > pp
->ulp_objsize
) {
140 uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
141 "offset %ld doesn't fit in object (size %ld)\n",
142 base
, (void *)np
, (void *)pp
, pp
->ulp_name
,
143 (long)offset
, (long)pp
->ulp_objsize
);
145 if (offset
!= pp
->ulp_nodeoffset
) {
146 uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
147 "offset %ld doesn't match pool's offset (%ld)\n",
148 base
, (void *)np
, (void *)pp
, pp
->ulp_name
,
149 (long)offset
, (long)pp
->ulp_objsize
);
152 np
->uln_next
= POOL_TO_MARKER(pp
);
157 uu_list_node_fini(void *base
, uu_list_node_t
*np_arg
, uu_list_pool_t
*pp
)
159 uu_list_node_impl_t
*np
= (uu_list_node_impl_t
*)np_arg
;
162 if (np
->uln_next
== NULL
&&
163 np
->uln_prev
== NULL
) {
164 uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
165 "node already finied\n",
166 base
, (void *)np_arg
, (void *)pp
, pp
->ulp_name
);
168 if (np
->uln_next
!= POOL_TO_MARKER(pp
) ||
169 np
->uln_prev
!= NULL
) {
170 uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
171 "node corrupt or on list\n",
172 base
, (void *)np_arg
, (void *)pp
, pp
->ulp_name
);
180 uu_list_create(uu_list_pool_t
*pp
, void *parent
, uint32_t flags
)
182 uu_list_t
*lp
, *next
, *prev
;
184 if (flags
& ~(UU_LIST_DEBUG
| UU_LIST_SORTED
)) {
185 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
189 if ((flags
& UU_LIST_SORTED
) && pp
->ulp_cmp
== NULL
) {
191 uu_panic("uu_list_create(%p, ...): requested "
192 "UU_LIST_SORTED, but pool has no comparison func\n",
194 uu_set_error(UU_ERROR_NOT_SUPPORTED
);
198 lp
= uu_zalloc(sizeof (*lp
));
200 uu_set_error(UU_ERROR_NO_MEMORY
);
205 lp
->ul_parent_enc
= UU_PTR_ENCODE(parent
);
206 lp
->ul_offset
= pp
->ulp_nodeoffset
;
207 lp
->ul_debug
= pp
->ulp_debug
|| (flags
& UU_LIST_DEBUG
);
208 lp
->ul_sorted
= (flags
& UU_LIST_SORTED
);
210 lp
->ul_index
= (pp
->ulp_last_index
= INDEX_NEXT(pp
->ulp_last_index
));
212 lp
->ul_null_node
.uln_next
= &lp
->ul_null_node
;
213 lp
->ul_null_node
.uln_prev
= &lp
->ul_null_node
;
215 lp
->ul_null_walk
.ulw_next
= &lp
->ul_null_walk
;
216 lp
->ul_null_walk
.ulw_prev
= &lp
->ul_null_walk
;
218 (void) pthread_mutex_lock(&pp
->ulp_lock
);
219 next
= &pp
->ulp_null_list
;
220 prev
= UU_PTR_DECODE(next
->ul_prev_enc
);
221 lp
->ul_next_enc
= UU_PTR_ENCODE(next
);
222 lp
->ul_prev_enc
= UU_PTR_ENCODE(prev
);
223 next
->ul_prev_enc
= UU_PTR_ENCODE(lp
);
224 prev
->ul_next_enc
= UU_PTR_ENCODE(lp
);
225 (void) pthread_mutex_unlock(&pp
->ulp_lock
);
231 uu_list_destroy(uu_list_t
*lp
)
233 uu_list_pool_t
*pp
= lp
->ul_pool
;
236 if (lp
->ul_null_node
.uln_next
!= &lp
->ul_null_node
||
237 lp
->ul_null_node
.uln_prev
!= &lp
->ul_null_node
) {
238 uu_panic("uu_list_destroy(%p): list not empty\n",
241 if (lp
->ul_numnodes
!= 0) {
242 uu_panic("uu_list_destroy(%p): numnodes is nonzero, "
243 "but list is empty\n", (void *)lp
);
245 if (lp
->ul_null_walk
.ulw_next
!= &lp
->ul_null_walk
||
246 lp
->ul_null_walk
.ulw_prev
!= &lp
->ul_null_walk
) {
247 uu_panic("uu_list_destroy(%p): outstanding walkers\n",
252 (void) pthread_mutex_lock(&pp
->ulp_lock
);
253 UU_LIST_PTR(lp
->ul_next_enc
)->ul_prev_enc
= lp
->ul_prev_enc
;
254 UU_LIST_PTR(lp
->ul_prev_enc
)->ul_next_enc
= lp
->ul_next_enc
;
255 (void) pthread_mutex_unlock(&pp
->ulp_lock
);
256 lp
->ul_prev_enc
= UU_PTR_ENCODE(NULL
);
257 lp
->ul_next_enc
= UU_PTR_ENCODE(NULL
);
263 list_insert(uu_list_t
*lp
, uu_list_node_impl_t
*np
, uu_list_node_impl_t
*prev
,
264 uu_list_node_impl_t
*next
)
267 if (next
->uln_prev
!= prev
|| prev
->uln_next
!= next
)
268 uu_panic("insert(%p): internal error: %p and %p not "
269 "neighbors\n", (void *)lp
, (void *)next
,
272 if (np
->uln_next
!= POOL_TO_MARKER(lp
->ul_pool
) ||
273 np
->uln_prev
!= NULL
) {
274 uu_panic("insert(%p): elem %p node %p corrupt, "
275 "not initialized, or already in a list.\n",
276 (void *)lp
, NODE_TO_ELEM(lp
, np
), (void *)np
);
279 * invalidate outstanding uu_list_index_ts.
281 lp
->ul_index
= INDEX_NEXT(lp
->ul_index
);
292 uu_list_insert(uu_list_t
*lp
, void *elem
, uu_list_index_t idx
)
294 uu_list_node_impl_t
*np
;
296 np
= INDEX_TO_NODE(idx
);
298 np
= &lp
->ul_null_node
;
301 if (!INDEX_VALID(lp
, idx
))
302 uu_panic("uu_list_insert(%p, %p, %p): %s\n",
303 (void *)lp
, elem
, (void *)idx
,
304 INDEX_CHECK(idx
)? "outdated index" :
306 if (np
->uln_prev
== NULL
)
307 uu_panic("uu_list_insert(%p, %p, %p): out-of-date "
308 "index\n", (void *)lp
, elem
, (void *)idx
);
311 list_insert(lp
, ELEM_TO_NODE(lp
, elem
), np
->uln_prev
, np
);
315 uu_list_find(uu_list_t
*lp
, void *elem
, void *private, uu_list_index_t
*out
)
317 int sorted
= lp
->ul_sorted
;
318 uu_compare_fn_t
*func
= lp
->ul_pool
->ulp_cmp
;
319 uu_list_node_impl_t
*np
;
324 uu_set_error(UU_ERROR_NOT_SUPPORTED
);
327 for (np
= lp
->ul_null_node
.uln_next
; np
!= &lp
->ul_null_node
;
329 void *ep
= NODE_TO_ELEM(lp
, np
);
330 int cmp
= func(ep
, elem
, private);
333 *out
= NODE_TO_INDEX(lp
, np
);
336 if (sorted
&& cmp
> 0) {
338 *out
= NODE_TO_INDEX(lp
, np
);
343 *out
= NODE_TO_INDEX(lp
, 0);
348 uu_list_nearest_next(uu_list_t
*lp
, uu_list_index_t idx
)
350 uu_list_node_impl_t
*np
= INDEX_TO_NODE(idx
);
353 np
= &lp
->ul_null_node
;
356 if (!INDEX_VALID(lp
, idx
))
357 uu_panic("uu_list_nearest_next(%p, %p): %s\n",
358 (void *)lp
, (void *)idx
,
359 INDEX_CHECK(idx
)? "outdated index" :
361 if (np
->uln_prev
== NULL
)
362 uu_panic("uu_list_nearest_next(%p, %p): out-of-date "
363 "index\n", (void *)lp
, (void *)idx
);
366 if (np
== &lp
->ul_null_node
)
369 return (NODE_TO_ELEM(lp
, np
));
373 uu_list_nearest_prev(uu_list_t
*lp
, uu_list_index_t idx
)
375 uu_list_node_impl_t
*np
= INDEX_TO_NODE(idx
);
378 np
= &lp
->ul_null_node
;
381 if (!INDEX_VALID(lp
, idx
))
382 uu_panic("uu_list_nearest_prev(%p, %p): %s\n",
383 (void *)lp
, (void *)idx
, INDEX_CHECK(idx
)?
384 "outdated index" : "invalid index");
385 if (np
->uln_prev
== NULL
)
386 uu_panic("uu_list_nearest_prev(%p, %p): out-of-date "
387 "index\n", (void *)lp
, (void *)idx
);
390 if ((np
= np
->uln_prev
) == &lp
->ul_null_node
)
393 return (NODE_TO_ELEM(lp
, np
));
397 list_walk_init(uu_list_walk_t
*wp
, uu_list_t
*lp
, uint32_t flags
)
399 uu_list_walk_t
*next
, *prev
;
401 int robust
= (flags
& UU_WALK_ROBUST
);
402 int direction
= (flags
& UU_WALK_REVERSE
)? -1 : 1;
404 (void) memset(wp
, 0, sizeof (*wp
));
406 wp
->ulw_robust
= robust
;
407 wp
->ulw_dir
= direction
;
409 wp
->ulw_next_result
= lp
->ul_null_node
.uln_next
;
411 wp
->ulw_next_result
= lp
->ul_null_node
.uln_prev
;
413 if (lp
->ul_debug
|| robust
) {
415 * Add this walker to the list's list of walkers so
416 * uu_list_remove() can advance us if somebody tries to
417 * remove ulw_next_result.
419 wp
->ulw_next
= next
= &lp
->ul_null_walk
;
420 wp
->ulw_prev
= prev
= next
->ulw_prev
;
426 static uu_list_node_impl_t
*
427 list_walk_advance(uu_list_walk_t
*wp
, uu_list_t
*lp
)
429 uu_list_node_impl_t
*np
= wp
->ulw_next_result
;
430 uu_list_node_impl_t
*next
;
432 if (np
== &lp
->ul_null_node
)
435 next
= (wp
->ulw_dir
> 0)? np
->uln_next
: np
->uln_prev
;
437 wp
->ulw_next_result
= next
;
442 list_walk_fini(uu_list_walk_t
*wp
)
444 /* GLXXX debugging? */
445 if (wp
->ulw_next
!= NULL
) {
446 wp
->ulw_next
->ulw_prev
= wp
->ulw_prev
;
447 wp
->ulw_prev
->ulw_next
= wp
->ulw_next
;
452 wp
->ulw_next_result
= NULL
;
456 uu_list_walk_start(uu_list_t
*lp
, uint32_t flags
)
460 if (flags
& ~(UU_WALK_ROBUST
| UU_WALK_REVERSE
)) {
461 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
465 wp
= uu_zalloc(sizeof (*wp
));
467 uu_set_error(UU_ERROR_NO_MEMORY
);
471 list_walk_init(wp
, lp
, flags
);
476 uu_list_walk_next(uu_list_walk_t
*wp
)
478 uu_list_t
*lp
= wp
->ulw_list
;
479 uu_list_node_impl_t
*np
= list_walk_advance(wp
, lp
);
484 return (NODE_TO_ELEM(lp
, np
));
488 uu_list_walk_end(uu_list_walk_t
*wp
)
495 uu_list_walk(uu_list_t
*lp
, uu_walk_fn_t
*func
, void *private, uint32_t flags
)
497 uu_list_node_impl_t
*np
;
499 int status
= UU_WALK_NEXT
;
501 int robust
= (flags
& UU_WALK_ROBUST
);
502 int reverse
= (flags
& UU_WALK_REVERSE
);
504 if (flags
& ~(UU_WALK_ROBUST
| UU_WALK_REVERSE
)) {
505 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
509 if (lp
->ul_debug
|| robust
) {
510 uu_list_walk_t my_walk
;
513 list_walk_init(&my_walk
, lp
, flags
);
514 while (status
== UU_WALK_NEXT
&&
515 (e
= uu_list_walk_next(&my_walk
)) != NULL
)
516 status
= (*func
)(e
, private);
517 list_walk_fini(&my_walk
);
520 for (np
= lp
->ul_null_node
.uln_next
;
521 status
== UU_WALK_NEXT
&& np
!= &lp
->ul_null_node
;
523 status
= (*func
)(NODE_TO_ELEM(lp
, np
), private);
526 for (np
= lp
->ul_null_node
.uln_prev
;
527 status
== UU_WALK_NEXT
&& np
!= &lp
->ul_null_node
;
529 status
= (*func
)(NODE_TO_ELEM(lp
, np
), private);
535 uu_set_error(UU_ERROR_CALLBACK_FAILED
);
540 uu_list_remove(uu_list_t
*lp
, void *elem
)
542 uu_list_node_impl_t
*np
= ELEM_TO_NODE(lp
, elem
);
546 if (np
->uln_prev
== NULL
)
547 uu_panic("uu_list_remove(%p, %p): elem not on list\n",
550 * invalidate outstanding uu_list_index_ts.
552 lp
->ul_index
= INDEX_NEXT(lp
->ul_index
);
556 * robust walkers must be advanced. In debug mode, non-robust
557 * walkers are also on the list. If there are any, it's an error.
559 for (wp
= lp
->ul_null_walk
.ulw_next
; wp
!= &lp
->ul_null_walk
;
561 if (wp
->ulw_robust
) {
562 if (np
== wp
->ulw_next_result
)
563 (void) list_walk_advance(wp
, lp
);
564 } else if (wp
->ulw_next_result
!= NULL
) {
565 uu_panic("uu_list_remove(%p, %p): active non-robust "
566 "walker\n", (void *)lp
, elem
);
570 np
->uln_next
->uln_prev
= np
->uln_prev
;
571 np
->uln_prev
->uln_next
= np
->uln_next
;
575 np
->uln_next
= POOL_TO_MARKER(lp
->ul_pool
);
580 uu_list_teardown(uu_list_t
*lp
, void **cookie
)
585 * XXX: disable list modification until list is empty
587 if (lp
->ul_debug
&& *cookie
!= NULL
)
588 uu_panic("uu_list_teardown(%p, %p): unexpected cookie\n",
589 (void *)lp
, (void *)cookie
);
591 ep
= uu_list_first(lp
);
593 uu_list_remove(lp
, ep
);
598 uu_list_insert_before(uu_list_t
*lp
, void *target
, void *elem
)
600 uu_list_node_impl_t
*np
= ELEM_TO_NODE(lp
, target
);
603 np
= &lp
->ul_null_node
;
606 if (np
->uln_prev
== NULL
)
607 uu_panic("uu_list_insert_before(%p, %p, %p): %p is "
608 "not currently on a list\n",
609 (void *)lp
, target
, elem
, target
);
613 uu_panic("uu_list_insert_before(%p, ...): list is "
614 "UU_LIST_SORTED\n", (void *)lp
);
615 uu_set_error(UU_ERROR_NOT_SUPPORTED
);
619 list_insert(lp
, ELEM_TO_NODE(lp
, elem
), np
->uln_prev
, np
);
624 uu_list_insert_after(uu_list_t
*lp
, void *target
, void *elem
)
626 uu_list_node_impl_t
*np
= ELEM_TO_NODE(lp
, target
);
629 np
= &lp
->ul_null_node
;
632 if (np
->uln_prev
== NULL
)
633 uu_panic("uu_list_insert_after(%p, %p, %p): %p is "
634 "not currently on a list\n",
635 (void *)lp
, target
, elem
, target
);
639 uu_panic("uu_list_insert_after(%p, ...): list is "
640 "UU_LIST_SORTED\n", (void *)lp
);
641 uu_set_error(UU_ERROR_NOT_SUPPORTED
);
645 list_insert(lp
, ELEM_TO_NODE(lp
, elem
), np
, np
->uln_next
);
650 uu_list_numnodes(uu_list_t
*lp
)
652 return (lp
->ul_numnodes
);
656 uu_list_first(uu_list_t
*lp
)
658 uu_list_node_impl_t
*n
= lp
->ul_null_node
.uln_next
;
659 if (n
== &lp
->ul_null_node
)
661 return (NODE_TO_ELEM(lp
, n
));
665 uu_list_last(uu_list_t
*lp
)
667 uu_list_node_impl_t
*n
= lp
->ul_null_node
.uln_prev
;
668 if (n
== &lp
->ul_null_node
)
670 return (NODE_TO_ELEM(lp
, n
));
674 uu_list_next(uu_list_t
*lp
, void *elem
)
676 uu_list_node_impl_t
*n
= ELEM_TO_NODE(lp
, elem
);
679 if (n
== &lp
->ul_null_node
)
681 return (NODE_TO_ELEM(lp
, n
));
685 uu_list_prev(uu_list_t
*lp
, void *elem
)
687 uu_list_node_impl_t
*n
= ELEM_TO_NODE(lp
, elem
);
690 if (n
== &lp
->ul_null_node
)
692 return (NODE_TO_ELEM(lp
, n
));
696 * called from uu_lockup() and uu_release(), as part of our fork1()-safety.
703 (void) pthread_mutex_lock(&uu_lpool_list_lock
);
704 for (pp
= uu_null_lpool
.ulp_next
; pp
!= &uu_null_lpool
;
706 (void) pthread_mutex_lock(&pp
->ulp_lock
);
710 uu_list_release(void)
714 for (pp
= uu_null_lpool
.ulp_next
; pp
!= &uu_null_lpool
;
716 (void) pthread_mutex_unlock(&pp
->ulp_lock
);
717 (void) pthread_mutex_unlock(&uu_lpool_list_lock
);