4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright 2011 Jason King. All rights reserved.
29 #include "libuutil_common.h"
36 #define ELEM_TO_NODE(lp, e) \
37 ((uu_list_node_impl_t *)((uintptr_t)(e) + (lp)->ul_offset))
39 #define NODE_TO_ELEM(lp, n) \
40 ((void *)((uintptr_t)(n) - (lp)->ul_offset))
43 * uu_list_index_ts define a location for insertion. They are simply a
44 * pointer to the object after the insertion point. We store a mark
45 * in the low-bits of the index, to help prevent mistakes.
47 * When debugging, the index mark changes on every insert and delete, to
48 * catch stale references.
50 #define INDEX_MAX (sizeof (uintptr_t) - 1)
51 #define INDEX_NEXT(m) (((m) == INDEX_MAX)? 1 : ((m) + 1) & INDEX_MAX)
53 #define INDEX_TO_NODE(i) ((uu_list_node_impl_t *)((i) & ~INDEX_MAX))
54 #define NODE_TO_INDEX(p, n) (((uintptr_t)(n) & ~INDEX_MAX) | (p)->ul_index)
55 #define INDEX_VALID(p, i) (((i) & INDEX_MAX) == (p)->ul_index)
56 #define INDEX_CHECK(i) (((i) & INDEX_MAX) != 0)
58 #define POOL_TO_MARKER(pp) ((void *)((uintptr_t)(pp) | 1))
60 static uu_list_pool_t uu_null_lpool
= { &uu_null_lpool
, &uu_null_lpool
};
61 static pthread_mutex_t uu_lpool_list_lock
= PTHREAD_MUTEX_INITIALIZER
;
64 uu_list_pool_create(const char *name
, size_t objsize
,
65 size_t nodeoffset
, uu_compare_fn_t
*compare_func
, uint32_t flags
)
67 uu_list_pool_t
*pp
, *next
, *prev
;
70 uu_check_name(name
, UU_NAME_DOMAIN
) == -1 ||
71 nodeoffset
+ sizeof (uu_list_node_t
) > objsize
) {
72 uu_set_error(UU_ERROR_INVALID_ARGUMENT
);
76 if (flags
& ~UU_LIST_POOL_DEBUG
) {
77 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
81 pp
= uu_zalloc(sizeof (uu_list_pool_t
));
83 uu_set_error(UU_ERROR_NO_MEMORY
);
87 (void) strlcpy(pp
->ulp_name
, name
, sizeof (pp
->ulp_name
));
88 pp
->ulp_nodeoffset
= nodeoffset
;
89 pp
->ulp_objsize
= objsize
;
90 pp
->ulp_cmp
= compare_func
;
91 if (flags
& UU_LIST_POOL_DEBUG
)
93 pp
->ulp_last_index
= 0;
95 (void) pthread_mutex_init(&pp
->ulp_lock
, NULL
);
97 pp
->ulp_null_list
.ul_next_enc
= UU_PTR_ENCODE(&pp
->ulp_null_list
);
98 pp
->ulp_null_list
.ul_prev_enc
= UU_PTR_ENCODE(&pp
->ulp_null_list
);
100 (void) pthread_mutex_lock(&uu_lpool_list_lock
);
101 pp
->ulp_next
= next
= &uu_null_lpool
;
102 pp
->ulp_prev
= prev
= next
->ulp_prev
;
105 (void) pthread_mutex_unlock(&uu_lpool_list_lock
);
111 uu_list_pool_destroy(uu_list_pool_t
*pp
)
114 if (pp
->ulp_null_list
.ul_next_enc
!=
115 UU_PTR_ENCODE(&pp
->ulp_null_list
) ||
116 pp
->ulp_null_list
.ul_prev_enc
!=
117 UU_PTR_ENCODE(&pp
->ulp_null_list
)) {
118 uu_panic("uu_list_pool_destroy: Pool \"%.*s\" (%p) has "
119 "outstanding lists, or is corrupt.\n",
120 (int)sizeof (pp
->ulp_name
), pp
->ulp_name
,
124 (void) pthread_mutex_lock(&uu_lpool_list_lock
);
125 pp
->ulp_next
->ulp_prev
= pp
->ulp_prev
;
126 pp
->ulp_prev
->ulp_next
= pp
->ulp_next
;
127 (void) pthread_mutex_unlock(&uu_lpool_list_lock
);
134 uu_list_node_init(void *base
, uu_list_node_t
*np_arg
, uu_list_pool_t
*pp
)
136 uu_list_node_impl_t
*np
= (uu_list_node_impl_t
*)np_arg
;
139 uintptr_t offset
= (uintptr_t)np
- (uintptr_t)base
;
140 if (offset
+ sizeof (*np
) > pp
->ulp_objsize
) {
141 uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
142 "offset %ld doesn't fit in object (size %ld)\n",
143 base
, (void *)np
, (void *)pp
, pp
->ulp_name
,
144 (long)offset
, (long)pp
->ulp_objsize
);
146 if (offset
!= pp
->ulp_nodeoffset
) {
147 uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
148 "offset %ld doesn't match pool's offset (%ld)\n",
149 base
, (void *)np
, (void *)pp
, pp
->ulp_name
,
150 (long)offset
, (long)pp
->ulp_objsize
);
153 np
->uln_next
= POOL_TO_MARKER(pp
);
158 uu_list_node_fini(void *base
, uu_list_node_t
*np_arg
, uu_list_pool_t
*pp
)
160 uu_list_node_impl_t
*np
= (uu_list_node_impl_t
*)np_arg
;
163 if (np
->uln_next
== NULL
&&
164 np
->uln_prev
== NULL
) {
165 uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
166 "node already finied\n",
167 base
, (void *)np_arg
, (void *)pp
, pp
->ulp_name
);
169 if (np
->uln_next
!= POOL_TO_MARKER(pp
) ||
170 np
->uln_prev
!= NULL
) {
171 uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
172 "node corrupt or on list\n",
173 base
, (void *)np_arg
, (void *)pp
, pp
->ulp_name
);
181 uu_list_create(uu_list_pool_t
*pp
, void *parent
, uint32_t flags
)
183 uu_list_t
*lp
, *next
, *prev
;
185 if (flags
& ~(UU_LIST_DEBUG
| UU_LIST_SORTED
)) {
186 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
190 if ((flags
& UU_LIST_SORTED
) && pp
->ulp_cmp
== NULL
) {
192 uu_panic("uu_list_create(%p, ...): requested "
193 "UU_LIST_SORTED, but pool has no comparison func\n",
195 uu_set_error(UU_ERROR_NOT_SUPPORTED
);
199 lp
= uu_zalloc(sizeof (*lp
));
201 uu_set_error(UU_ERROR_NO_MEMORY
);
206 lp
->ul_parent_enc
= UU_PTR_ENCODE(parent
);
207 lp
->ul_offset
= pp
->ulp_nodeoffset
;
208 lp
->ul_debug
= pp
->ulp_debug
|| (flags
& UU_LIST_DEBUG
);
209 lp
->ul_sorted
= (flags
& UU_LIST_SORTED
);
211 lp
->ul_index
= (pp
->ulp_last_index
= INDEX_NEXT(pp
->ulp_last_index
));
213 lp
->ul_null_node
.uln_next
= &lp
->ul_null_node
;
214 lp
->ul_null_node
.uln_prev
= &lp
->ul_null_node
;
216 lp
->ul_null_walk
.ulw_next
= &lp
->ul_null_walk
;
217 lp
->ul_null_walk
.ulw_prev
= &lp
->ul_null_walk
;
219 (void) pthread_mutex_lock(&pp
->ulp_lock
);
220 next
= &pp
->ulp_null_list
;
221 prev
= UU_PTR_DECODE(next
->ul_prev_enc
);
222 lp
->ul_next_enc
= UU_PTR_ENCODE(next
);
223 lp
->ul_prev_enc
= UU_PTR_ENCODE(prev
);
224 next
->ul_prev_enc
= UU_PTR_ENCODE(lp
);
225 prev
->ul_next_enc
= UU_PTR_ENCODE(lp
);
226 (void) pthread_mutex_unlock(&pp
->ulp_lock
);
232 uu_list_destroy(uu_list_t
*lp
)
234 uu_list_pool_t
*pp
= lp
->ul_pool
;
237 if (lp
->ul_null_node
.uln_next
!= &lp
->ul_null_node
||
238 lp
->ul_null_node
.uln_prev
!= &lp
->ul_null_node
) {
239 uu_panic("uu_list_destroy(%p): list not empty\n",
242 if (lp
->ul_numnodes
!= 0) {
243 uu_panic("uu_list_destroy(%p): numnodes is nonzero, "
244 "but list is empty\n", (void *)lp
);
246 if (lp
->ul_null_walk
.ulw_next
!= &lp
->ul_null_walk
||
247 lp
->ul_null_walk
.ulw_prev
!= &lp
->ul_null_walk
) {
248 uu_panic("uu_list_destroy(%p): outstanding walkers\n",
253 (void) pthread_mutex_lock(&pp
->ulp_lock
);
254 UU_LIST_PTR(lp
->ul_next_enc
)->ul_prev_enc
= lp
->ul_prev_enc
;
255 UU_LIST_PTR(lp
->ul_prev_enc
)->ul_next_enc
= lp
->ul_next_enc
;
256 (void) pthread_mutex_unlock(&pp
->ulp_lock
);
257 lp
->ul_prev_enc
= UU_PTR_ENCODE(NULL
);
258 lp
->ul_next_enc
= UU_PTR_ENCODE(NULL
);
264 list_insert(uu_list_t
*lp
, uu_list_node_impl_t
*np
, uu_list_node_impl_t
*prev
,
265 uu_list_node_impl_t
*next
)
268 if (next
->uln_prev
!= prev
|| prev
->uln_next
!= next
)
269 uu_panic("insert(%p): internal error: %p and %p not "
270 "neighbors\n", (void *)lp
, (void *)next
,
273 if (np
->uln_next
!= POOL_TO_MARKER(lp
->ul_pool
) ||
274 np
->uln_prev
!= NULL
) {
275 uu_panic("insert(%p): elem %p node %p corrupt, "
276 "not initialized, or already in a list.\n",
277 (void *)lp
, NODE_TO_ELEM(lp
, np
), (void *)np
);
280 * invalidate outstanding uu_list_index_ts.
282 lp
->ul_index
= INDEX_NEXT(lp
->ul_index
);
293 uu_list_insert(uu_list_t
*lp
, void *elem
, uu_list_index_t idx
)
295 uu_list_node_impl_t
*np
;
297 np
= INDEX_TO_NODE(idx
);
299 np
= &lp
->ul_null_node
;
302 if (!INDEX_VALID(lp
, idx
))
303 uu_panic("uu_list_insert(%p, %p, %p): %s\n",
304 (void *)lp
, elem
, (void *)idx
,
305 INDEX_CHECK(idx
)? "outdated index" :
307 if (np
->uln_prev
== NULL
)
308 uu_panic("uu_list_insert(%p, %p, %p): out-of-date "
309 "index\n", (void *)lp
, elem
, (void *)idx
);
312 list_insert(lp
, ELEM_TO_NODE(lp
, elem
), np
->uln_prev
, np
);
316 uu_list_find(uu_list_t
*lp
, void *elem
, void *private, uu_list_index_t
*out
)
318 int sorted
= lp
->ul_sorted
;
319 uu_compare_fn_t
*func
= lp
->ul_pool
->ulp_cmp
;
320 uu_list_node_impl_t
*np
;
322 uu_set_error(UU_ERROR_NONE
);
327 uu_set_error(UU_ERROR_NOT_SUPPORTED
);
330 for (np
= lp
->ul_null_node
.uln_next
; np
!= &lp
->ul_null_node
;
332 void *ep
= NODE_TO_ELEM(lp
, np
);
333 int cmp
= func(ep
, elem
, private);
336 *out
= NODE_TO_INDEX(lp
, np
);
339 if (sorted
&& cmp
> 0) {
341 *out
= NODE_TO_INDEX(lp
, np
);
346 *out
= NODE_TO_INDEX(lp
, 0);
351 uu_list_nearest_next(uu_list_t
*lp
, uu_list_index_t idx
)
353 uu_list_node_impl_t
*np
= INDEX_TO_NODE(idx
);
356 np
= &lp
->ul_null_node
;
359 if (!INDEX_VALID(lp
, idx
))
360 uu_panic("uu_list_nearest_next(%p, %p): %s\n",
361 (void *)lp
, (void *)idx
,
362 INDEX_CHECK(idx
)? "outdated index" :
364 if (np
->uln_prev
== NULL
)
365 uu_panic("uu_list_nearest_next(%p, %p): out-of-date "
366 "index\n", (void *)lp
, (void *)idx
);
369 if (np
== &lp
->ul_null_node
)
372 return (NODE_TO_ELEM(lp
, np
));
376 uu_list_nearest_prev(uu_list_t
*lp
, uu_list_index_t idx
)
378 uu_list_node_impl_t
*np
= INDEX_TO_NODE(idx
);
381 np
= &lp
->ul_null_node
;
384 if (!INDEX_VALID(lp
, idx
))
385 uu_panic("uu_list_nearest_prev(%p, %p): %s\n",
386 (void *)lp
, (void *)idx
, INDEX_CHECK(idx
)?
387 "outdated index" : "invalid index");
388 if (np
->uln_prev
== NULL
)
389 uu_panic("uu_list_nearest_prev(%p, %p): out-of-date "
390 "index\n", (void *)lp
, (void *)idx
);
393 if ((np
= np
->uln_prev
) == &lp
->ul_null_node
)
396 return (NODE_TO_ELEM(lp
, np
));
400 list_walk_init(uu_list_walk_t
*wp
, uu_list_t
*lp
, uint32_t flags
)
402 uu_list_walk_t
*next
, *prev
;
404 int robust
= (flags
& UU_WALK_ROBUST
);
405 int direction
= (flags
& UU_WALK_REVERSE
)? -1 : 1;
407 (void) memset(wp
, 0, sizeof (*wp
));
409 wp
->ulw_robust
= robust
;
410 wp
->ulw_dir
= direction
;
412 wp
->ulw_next_result
= lp
->ul_null_node
.uln_next
;
414 wp
->ulw_next_result
= lp
->ul_null_node
.uln_prev
;
416 if (lp
->ul_debug
|| robust
) {
418 * Add this walker to the list's list of walkers so
419 * uu_list_remove() can advance us if somebody tries to
420 * remove ulw_next_result.
422 wp
->ulw_next
= next
= &lp
->ul_null_walk
;
423 wp
->ulw_prev
= prev
= next
->ulw_prev
;
429 static uu_list_node_impl_t
*
430 list_walk_advance(uu_list_walk_t
*wp
, uu_list_t
*lp
)
432 uu_list_node_impl_t
*np
= wp
->ulw_next_result
;
433 uu_list_node_impl_t
*next
;
435 if (np
== &lp
->ul_null_node
)
438 next
= (wp
->ulw_dir
> 0)? np
->uln_next
: np
->uln_prev
;
440 wp
->ulw_next_result
= next
;
445 list_walk_fini(uu_list_walk_t
*wp
)
447 /* GLXXX debugging? */
448 if (wp
->ulw_next
!= NULL
) {
449 wp
->ulw_next
->ulw_prev
= wp
->ulw_prev
;
450 wp
->ulw_prev
->ulw_next
= wp
->ulw_next
;
455 wp
->ulw_next_result
= NULL
;
459 uu_list_walk_start(uu_list_t
*lp
, uint32_t flags
)
463 if (flags
& ~(UU_WALK_ROBUST
| UU_WALK_REVERSE
)) {
464 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
468 wp
= uu_zalloc(sizeof (*wp
));
470 uu_set_error(UU_ERROR_NO_MEMORY
);
474 list_walk_init(wp
, lp
, flags
);
479 uu_list_walk_next(uu_list_walk_t
*wp
)
481 uu_list_t
*lp
= wp
->ulw_list
;
482 uu_list_node_impl_t
*np
= list_walk_advance(wp
, lp
);
487 return (NODE_TO_ELEM(lp
, np
));
491 uu_list_walk_end(uu_list_walk_t
*wp
)
498 uu_list_walk(uu_list_t
*lp
, uu_walk_fn_t
*func
, void *private, uint32_t flags
)
500 uu_list_node_impl_t
*np
;
502 int status
= UU_WALK_NEXT
;
504 int robust
= (flags
& UU_WALK_ROBUST
);
505 int reverse
= (flags
& UU_WALK_REVERSE
);
507 if (flags
& ~(UU_WALK_ROBUST
| UU_WALK_REVERSE
)) {
508 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
512 if (lp
->ul_debug
|| robust
) {
513 uu_list_walk_t my_walk
;
516 list_walk_init(&my_walk
, lp
, flags
);
517 while (status
== UU_WALK_NEXT
&&
518 (e
= uu_list_walk_next(&my_walk
)) != NULL
)
519 status
= (*func
)(e
, private);
520 list_walk_fini(&my_walk
);
523 for (np
= lp
->ul_null_node
.uln_next
;
524 status
== UU_WALK_NEXT
&& np
!= &lp
->ul_null_node
;
526 status
= (*func
)(NODE_TO_ELEM(lp
, np
), private);
529 for (np
= lp
->ul_null_node
.uln_prev
;
530 status
== UU_WALK_NEXT
&& np
!= &lp
->ul_null_node
;
532 status
= (*func
)(NODE_TO_ELEM(lp
, np
), private);
538 uu_set_error(UU_ERROR_CALLBACK_FAILED
);
543 uu_list_remove(uu_list_t
*lp
, void *elem
)
545 uu_list_node_impl_t
*np
= ELEM_TO_NODE(lp
, elem
);
549 if (np
->uln_prev
== NULL
)
550 uu_panic("uu_list_remove(%p, %p): elem not on list\n",
553 * invalidate outstanding uu_list_index_ts.
555 lp
->ul_index
= INDEX_NEXT(lp
->ul_index
);
559 * robust walkers must be advanced. In debug mode, non-robust
560 * walkers are also on the list. If there are any, it's an error.
562 for (wp
= lp
->ul_null_walk
.ulw_next
; wp
!= &lp
->ul_null_walk
;
564 if (wp
->ulw_robust
) {
565 if (np
== wp
->ulw_next_result
)
566 (void) list_walk_advance(wp
, lp
);
567 } else if (wp
->ulw_next_result
!= NULL
) {
568 uu_panic("uu_list_remove(%p, %p): active non-robust "
569 "walker\n", (void *)lp
, elem
);
573 np
->uln_next
->uln_prev
= np
->uln_prev
;
574 np
->uln_prev
->uln_next
= np
->uln_next
;
578 np
->uln_next
= POOL_TO_MARKER(lp
->ul_pool
);
583 uu_list_teardown(uu_list_t
*lp
, void **cookie
)
588 * XXX: disable list modification until list is empty
590 if (lp
->ul_debug
&& *cookie
!= NULL
)
591 uu_panic("uu_list_teardown(%p, %p): unexpected cookie\n",
592 (void *)lp
, (void *)cookie
);
594 ep
= uu_list_first(lp
);
596 uu_list_remove(lp
, ep
);
601 uu_list_insert_before(uu_list_t
*lp
, void *target
, void *elem
)
603 uu_list_node_impl_t
*np
= ELEM_TO_NODE(lp
, target
);
606 np
= &lp
->ul_null_node
;
609 if (np
->uln_prev
== NULL
)
610 uu_panic("uu_list_insert_before(%p, %p, %p): %p is "
611 "not currently on a list\n",
612 (void *)lp
, target
, elem
, target
);
616 uu_panic("uu_list_insert_before(%p, ...): list is "
617 "UU_LIST_SORTED\n", (void *)lp
);
618 uu_set_error(UU_ERROR_NOT_SUPPORTED
);
622 list_insert(lp
, ELEM_TO_NODE(lp
, elem
), np
->uln_prev
, np
);
627 uu_list_insert_after(uu_list_t
*lp
, void *target
, void *elem
)
629 uu_list_node_impl_t
*np
= ELEM_TO_NODE(lp
, target
);
632 np
= &lp
->ul_null_node
;
635 if (np
->uln_prev
== NULL
)
636 uu_panic("uu_list_insert_after(%p, %p, %p): %p is "
637 "not currently on a list\n",
638 (void *)lp
, target
, elem
, target
);
642 uu_panic("uu_list_insert_after(%p, ...): list is "
643 "UU_LIST_SORTED\n", (void *)lp
);
644 uu_set_error(UU_ERROR_NOT_SUPPORTED
);
648 list_insert(lp
, ELEM_TO_NODE(lp
, elem
), np
, np
->uln_next
);
653 uu_list_numnodes(uu_list_t
*lp
)
655 return (lp
->ul_numnodes
);
659 uu_list_first(uu_list_t
*lp
)
661 uu_list_node_impl_t
*n
= lp
->ul_null_node
.uln_next
;
662 if (n
== &lp
->ul_null_node
)
664 return (NODE_TO_ELEM(lp
, n
));
668 uu_list_last(uu_list_t
*lp
)
670 uu_list_node_impl_t
*n
= lp
->ul_null_node
.uln_prev
;
671 if (n
== &lp
->ul_null_node
)
673 return (NODE_TO_ELEM(lp
, n
));
677 uu_list_next(uu_list_t
*lp
, void *elem
)
679 uu_list_node_impl_t
*n
= ELEM_TO_NODE(lp
, elem
);
682 if (n
== &lp
->ul_null_node
)
684 return (NODE_TO_ELEM(lp
, n
));
688 uu_list_prev(uu_list_t
*lp
, void *elem
)
690 uu_list_node_impl_t
*n
= ELEM_TO_NODE(lp
, elem
);
693 if (n
== &lp
->ul_null_node
)
695 return (NODE_TO_ELEM(lp
, n
));
699 * called from uu_lockup() and uu_release(), as part of our fork1()-safety.
706 (void) pthread_mutex_lock(&uu_lpool_list_lock
);
707 for (pp
= uu_null_lpool
.ulp_next
; pp
!= &uu_null_lpool
;
709 (void) pthread_mutex_lock(&pp
->ulp_lock
);
713 uu_list_release(void)
717 for (pp
= uu_null_lpool
.ulp_next
; pp
!= &uu_null_lpool
;
719 (void) pthread_mutex_unlock(&pp
->ulp_lock
);
720 (void) pthread_mutex_unlock(&uu_lpool_list_lock
);