4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
28 #include "libuutil_common.h"
35 #define ELEM_TO_NODE(lp, e) \
36 ((uu_list_node_impl_t *)((uintptr_t)(e) + (lp)->ul_offset))
38 #define NODE_TO_ELEM(lp, n) \
39 ((void *)((uintptr_t)(n) - (lp)->ul_offset))
42 * uu_list_index_ts define a location for insertion. They are simply a
43 * pointer to the object after the insertion point. We store a mark
44 * in the low-bits of the index, to help prevent mistakes.
46 * When debugging, the index mark changes on every insert and delete, to
47 * catch stale references.
49 #define INDEX_MAX (sizeof (uintptr_t) - 1)
50 #define INDEX_NEXT(m) (((m) == INDEX_MAX)? 1 : ((m) + 1) & INDEX_MAX)
52 #define INDEX_TO_NODE(i) ((uu_list_node_impl_t *)((i) & ~INDEX_MAX))
53 #define NODE_TO_INDEX(p, n) (((uintptr_t)(n) & ~INDEX_MAX) | (p)->ul_index)
54 #define INDEX_VALID(p, i) (((i) & INDEX_MAX) == (p)->ul_index)
55 #define INDEX_CHECK(i) (((i) & INDEX_MAX) != 0)
57 #define POOL_TO_MARKER(pp) ((void *)((uintptr_t)(pp) | 1))
59 static uu_list_pool_t uu_null_lpool
= { &uu_null_lpool
, &uu_null_lpool
};
60 static pthread_mutex_t uu_lpool_list_lock
= PTHREAD_MUTEX_INITIALIZER
;
63 uu_list_pool_create(const char *name
, size_t objsize
,
64 size_t nodeoffset
, uu_compare_fn_t
*compare_func
, uint32_t flags
)
66 uu_list_pool_t
*pp
, *next
, *prev
;
69 uu_check_name(name
, UU_NAME_DOMAIN
) == -1 ||
70 nodeoffset
+ sizeof (uu_list_node_t
) > objsize
) {
71 uu_set_error(UU_ERROR_INVALID_ARGUMENT
);
75 if (flags
& ~UU_LIST_POOL_DEBUG
) {
76 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
80 pp
= uu_zalloc(sizeof (uu_list_pool_t
));
82 uu_set_error(UU_ERROR_NO_MEMORY
);
86 (void) strlcpy(pp
->ulp_name
, name
, sizeof (pp
->ulp_name
));
87 pp
->ulp_nodeoffset
= nodeoffset
;
88 pp
->ulp_objsize
= objsize
;
89 pp
->ulp_cmp
= compare_func
;
90 if (flags
& UU_LIST_POOL_DEBUG
)
92 pp
->ulp_last_index
= 0;
94 (void) pthread_mutex_init(&pp
->ulp_lock
, NULL
);
96 pp
->ulp_null_list
.ul_next
= &pp
->ulp_null_list
;
97 pp
->ulp_null_list
.ul_prev
= &pp
->ulp_null_list
;
99 (void) pthread_mutex_lock(&uu_lpool_list_lock
);
100 pp
->ulp_next
= next
= &uu_null_lpool
;
101 pp
->ulp_prev
= prev
= next
->ulp_prev
;
104 (void) pthread_mutex_unlock(&uu_lpool_list_lock
);
110 uu_list_pool_destroy(uu_list_pool_t
*pp
)
113 if (pp
->ulp_null_list
.ul_next
!= &pp
->ulp_null_list
||
114 pp
->ulp_null_list
.ul_prev
!= &pp
->ulp_null_list
) {
115 uu_panic("uu_list_pool_destroy: Pool \"%.*s\" (%p) has "
116 "outstanding lists, or is corrupt.\n",
117 (int)sizeof (pp
->ulp_name
), pp
->ulp_name
,
121 (void) pthread_mutex_lock(&uu_lpool_list_lock
);
122 pp
->ulp_next
->ulp_prev
= pp
->ulp_prev
;
123 pp
->ulp_prev
->ulp_next
= pp
->ulp_next
;
124 (void) pthread_mutex_unlock(&uu_lpool_list_lock
);
131 uu_list_node_init(void *base
, uu_list_node_t
*np_arg
, uu_list_pool_t
*pp
)
133 uu_list_node_impl_t
*np
= (uu_list_node_impl_t
*)np_arg
;
136 uintptr_t offset
= (uintptr_t)np
- (uintptr_t)base
;
137 if (offset
+ sizeof (*np
) > pp
->ulp_objsize
) {
138 uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
139 "offset %ld doesn't fit in object (size %ld)\n",
140 base
, (void *)np
, (void *)pp
, pp
->ulp_name
,
141 (long)offset
, (long)pp
->ulp_objsize
);
143 if (offset
!= pp
->ulp_nodeoffset
) {
144 uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
145 "offset %ld doesn't match pool's offset (%ld)\n",
146 base
, (void *)np
, (void *)pp
, pp
->ulp_name
,
147 (long)offset
, (long)pp
->ulp_objsize
);
150 np
->uln_next
= POOL_TO_MARKER(pp
);
155 uu_list_node_fini(void *base
, uu_list_node_t
*np_arg
, uu_list_pool_t
*pp
)
157 uu_list_node_impl_t
*np
= (uu_list_node_impl_t
*)np_arg
;
160 if (np
->uln_next
== NULL
&&
161 np
->uln_prev
== NULL
) {
162 uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
163 "node already finied\n",
164 base
, (void *)np_arg
, (void *)pp
, pp
->ulp_name
);
166 if (np
->uln_next
!= POOL_TO_MARKER(pp
) ||
167 np
->uln_prev
!= NULL
) {
168 uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
169 "node corrupt or on list\n",
170 base
, (void *)np_arg
, (void *)pp
, pp
->ulp_name
);
178 uu_list_create(uu_list_pool_t
*pp
, void *parent
, uint32_t flags
)
180 uu_list_t
*lp
, *next
, *prev
;
182 if (flags
& ~(UU_LIST_DEBUG
| UU_LIST_SORTED
)) {
183 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
187 if ((flags
& UU_LIST_SORTED
) && pp
->ulp_cmp
== NULL
) {
189 uu_panic("uu_list_create(%p, ...): requested "
190 "UU_LIST_SORTED, but pool has no comparison func\n",
192 uu_set_error(UU_ERROR_NOT_SUPPORTED
);
196 lp
= uu_zalloc(sizeof (*lp
));
198 uu_set_error(UU_ERROR_NO_MEMORY
);
203 lp
->ul_parent
= parent
;
204 lp
->ul_offset
= pp
->ulp_nodeoffset
;
205 lp
->ul_debug
= pp
->ulp_debug
|| (flags
& UU_LIST_DEBUG
);
206 lp
->ul_sorted
= (flags
& UU_LIST_SORTED
);
208 lp
->ul_index
= (pp
->ulp_last_index
= INDEX_NEXT(pp
->ulp_last_index
));
210 lp
->ul_null_node
.uln_next
= &lp
->ul_null_node
;
211 lp
->ul_null_node
.uln_prev
= &lp
->ul_null_node
;
213 lp
->ul_null_walk
.ulw_next
= &lp
->ul_null_walk
;
214 lp
->ul_null_walk
.ulw_prev
= &lp
->ul_null_walk
;
216 (void) pthread_mutex_lock(&pp
->ulp_lock
);
217 next
= &pp
->ulp_null_list
;
218 prev
= next
->ul_prev
;
223 (void) pthread_mutex_unlock(&pp
->ulp_lock
);
229 uu_list_destroy(uu_list_t
*lp
)
231 uu_list_pool_t
*pp
= lp
->ul_pool
;
234 if (lp
->ul_null_node
.uln_next
!= &lp
->ul_null_node
||
235 lp
->ul_null_node
.uln_prev
!= &lp
->ul_null_node
) {
236 uu_panic("uu_list_destroy(%p): list not empty\n",
239 if (lp
->ul_numnodes
!= 0) {
240 uu_panic("uu_list_destroy(%p): numnodes is nonzero, "
241 "but list is empty\n", (void *)lp
);
243 if (lp
->ul_null_walk
.ulw_next
!= &lp
->ul_null_walk
||
244 lp
->ul_null_walk
.ulw_prev
!= &lp
->ul_null_walk
) {
245 uu_panic("uu_list_destroy(%p): outstanding walkers\n",
250 (void) pthread_mutex_lock(&pp
->ulp_lock
);
251 lp
->ul_next
->ul_prev
= lp
->ul_prev
;
252 lp
->ul_prev
->ul_next
= lp
->ul_next
;
253 (void) pthread_mutex_unlock(&pp
->ulp_lock
);
261 list_insert(uu_list_t
*lp
, uu_list_node_impl_t
*np
, uu_list_node_impl_t
*prev
,
262 uu_list_node_impl_t
*next
)
265 if (next
->uln_prev
!= prev
|| prev
->uln_next
!= next
)
266 uu_panic("insert(%p): internal error: %p and %p not "
267 "neighbors\n", (void *)lp
, (void *)next
,
270 if (np
->uln_next
!= POOL_TO_MARKER(lp
->ul_pool
) ||
271 np
->uln_prev
!= NULL
) {
272 uu_panic("insert(%p): elem %p node %p corrupt, "
273 "not initialized, or already in a list.\n",
274 (void *)lp
, NODE_TO_ELEM(lp
, np
), (void *)np
);
277 * invalidate outstanding uu_list_index_ts.
279 lp
->ul_index
= INDEX_NEXT(lp
->ul_index
);
290 uu_list_insert(uu_list_t
*lp
, void *elem
, uu_list_index_t idx
)
292 uu_list_node_impl_t
*np
;
294 np
= INDEX_TO_NODE(idx
);
296 np
= &lp
->ul_null_node
;
299 if (!INDEX_VALID(lp
, idx
))
300 uu_panic("uu_list_insert(%p, %p, %p): %s\n",
301 (void *)lp
, elem
, (void *)idx
,
302 INDEX_CHECK(idx
)? "outdated index" :
304 if (np
->uln_prev
== NULL
)
305 uu_panic("uu_list_insert(%p, %p, %p): out-of-date "
306 "index\n", (void *)lp
, elem
, (void *)idx
);
309 list_insert(lp
, ELEM_TO_NODE(lp
, elem
), np
->uln_prev
, np
);
313 uu_list_find(uu_list_t
*lp
, void *elem
, void *private, uu_list_index_t
*out
)
315 int sorted
= lp
->ul_sorted
;
316 uu_compare_fn_t
*func
= lp
->ul_pool
->ulp_cmp
;
317 uu_list_node_impl_t
*np
;
322 uu_set_error(UU_ERROR_NOT_SUPPORTED
);
325 for (np
= lp
->ul_null_node
.uln_next
; np
!= &lp
->ul_null_node
;
327 void *ep
= NODE_TO_ELEM(lp
, np
);
328 int cmp
= func(ep
, elem
, private);
331 *out
= NODE_TO_INDEX(lp
, np
);
334 if (sorted
&& cmp
> 0) {
336 *out
= NODE_TO_INDEX(lp
, np
);
341 *out
= NODE_TO_INDEX(lp
, 0);
346 uu_list_nearest_next(uu_list_t
*lp
, uu_list_index_t idx
)
348 uu_list_node_impl_t
*np
= INDEX_TO_NODE(idx
);
351 np
= &lp
->ul_null_node
;
354 if (!INDEX_VALID(lp
, idx
))
355 uu_panic("uu_list_nearest_next(%p, %p): %s\n",
356 (void *)lp
, (void *)idx
,
357 INDEX_CHECK(idx
)? "outdated index" :
359 if (np
->uln_prev
== NULL
)
360 uu_panic("uu_list_nearest_next(%p, %p): out-of-date "
361 "index\n", (void *)lp
, (void *)idx
);
364 if (np
== &lp
->ul_null_node
)
367 return (NODE_TO_ELEM(lp
, np
));
371 uu_list_nearest_prev(uu_list_t
*lp
, uu_list_index_t idx
)
373 uu_list_node_impl_t
*np
= INDEX_TO_NODE(idx
);
376 np
= &lp
->ul_null_node
;
379 if (!INDEX_VALID(lp
, idx
))
380 uu_panic("uu_list_nearest_prev(%p, %p): %s\n",
381 (void *)lp
, (void *)idx
, INDEX_CHECK(idx
)?
382 "outdated index" : "invalid index");
383 if (np
->uln_prev
== NULL
)
384 uu_panic("uu_list_nearest_prev(%p, %p): out-of-date "
385 "index\n", (void *)lp
, (void *)idx
);
388 if ((np
= np
->uln_prev
) == &lp
->ul_null_node
)
391 return (NODE_TO_ELEM(lp
, np
));
395 list_walk_init(uu_list_walk_t
*wp
, uu_list_t
*lp
, uint32_t flags
)
397 uu_list_walk_t
*next
, *prev
;
399 int robust
= (flags
& UU_WALK_ROBUST
);
400 int direction
= (flags
& UU_WALK_REVERSE
)? -1 : 1;
402 (void) memset(wp
, 0, sizeof (*wp
));
404 wp
->ulw_robust
= robust
;
405 wp
->ulw_dir
= direction
;
407 wp
->ulw_next_result
= lp
->ul_null_node
.uln_next
;
409 wp
->ulw_next_result
= lp
->ul_null_node
.uln_prev
;
411 if (lp
->ul_debug
|| robust
) {
413 * Add this walker to the list's list of walkers so
414 * uu_list_remove() can advance us if somebody tries to
415 * remove ulw_next_result.
417 wp
->ulw_next
= next
= &lp
->ul_null_walk
;
418 wp
->ulw_prev
= prev
= next
->ulw_prev
;
424 static uu_list_node_impl_t
*
425 list_walk_advance(uu_list_walk_t
*wp
, uu_list_t
*lp
)
427 uu_list_node_impl_t
*np
= wp
->ulw_next_result
;
428 uu_list_node_impl_t
*next
;
430 if (np
== &lp
->ul_null_node
)
433 next
= (wp
->ulw_dir
> 0)? np
->uln_next
: np
->uln_prev
;
435 wp
->ulw_next_result
= next
;
440 list_walk_fini(uu_list_walk_t
*wp
)
442 /* GLXXX debugging? */
443 if (wp
->ulw_next
!= NULL
) {
444 wp
->ulw_next
->ulw_prev
= wp
->ulw_prev
;
445 wp
->ulw_prev
->ulw_next
= wp
->ulw_next
;
450 wp
->ulw_next_result
= NULL
;
454 uu_list_walk_start(uu_list_t
*lp
, uint32_t flags
)
458 if (flags
& ~(UU_WALK_ROBUST
| UU_WALK_REVERSE
)) {
459 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
463 wp
= uu_zalloc(sizeof (*wp
));
465 uu_set_error(UU_ERROR_NO_MEMORY
);
469 list_walk_init(wp
, lp
, flags
);
474 uu_list_walk_next(uu_list_walk_t
*wp
)
476 uu_list_t
*lp
= wp
->ulw_list
;
477 uu_list_node_impl_t
*np
= list_walk_advance(wp
, lp
);
482 return (NODE_TO_ELEM(lp
, np
));
486 uu_list_walk_end(uu_list_walk_t
*wp
)
493 uu_list_walk(uu_list_t
*lp
, uu_walk_fn_t
*func
, void *private, uint32_t flags
)
495 uu_list_node_impl_t
*np
;
497 int status
= UU_WALK_NEXT
;
499 int robust
= (flags
& UU_WALK_ROBUST
);
500 int reverse
= (flags
& UU_WALK_REVERSE
);
502 if (flags
& ~(UU_WALK_ROBUST
| UU_WALK_REVERSE
)) {
503 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
507 if (lp
->ul_debug
|| robust
) {
508 uu_list_walk_t
*my_walk
;
511 my_walk
= uu_zalloc(sizeof (*my_walk
));
515 list_walk_init(my_walk
, lp
, flags
);
516 while (status
== UU_WALK_NEXT
&&
517 (e
= uu_list_walk_next(my_walk
)) != NULL
)
518 status
= (*func
)(e
, private);
519 list_walk_fini(my_walk
);
524 for (np
= lp
->ul_null_node
.uln_next
;
525 status
== UU_WALK_NEXT
&& np
!= &lp
->ul_null_node
;
527 status
= (*func
)(NODE_TO_ELEM(lp
, np
), private);
530 for (np
= lp
->ul_null_node
.uln_prev
;
531 status
== UU_WALK_NEXT
&& np
!= &lp
->ul_null_node
;
533 status
= (*func
)(NODE_TO_ELEM(lp
, np
), private);
539 uu_set_error(UU_ERROR_CALLBACK_FAILED
);
544 uu_list_remove(uu_list_t
*lp
, void *elem
)
546 uu_list_node_impl_t
*np
= ELEM_TO_NODE(lp
, elem
);
550 if (np
->uln_prev
== NULL
)
551 uu_panic("uu_list_remove(%p, %p): elem not on list\n",
554 * invalidate outstanding uu_list_index_ts.
556 lp
->ul_index
= INDEX_NEXT(lp
->ul_index
);
560 * robust walkers must be advanced. In debug mode, non-robust
561 * walkers are also on the list. If there are any, it's an error.
563 for (wp
= lp
->ul_null_walk
.ulw_next
; wp
!= &lp
->ul_null_walk
;
565 if (wp
->ulw_robust
) {
566 if (np
== wp
->ulw_next_result
)
567 (void) list_walk_advance(wp
, lp
);
568 } else if (wp
->ulw_next_result
!= NULL
) {
569 uu_panic("uu_list_remove(%p, %p): active non-robust "
570 "walker\n", (void *)lp
, elem
);
574 np
->uln_next
->uln_prev
= np
->uln_prev
;
575 np
->uln_prev
->uln_next
= np
->uln_next
;
579 np
->uln_next
= POOL_TO_MARKER(lp
->ul_pool
);
584 uu_list_teardown(uu_list_t
*lp
, void **cookie
)
589 * XXX: disable list modification until list is empty
591 if (lp
->ul_debug
&& *cookie
!= NULL
)
592 uu_panic("uu_list_teardown(%p, %p): unexpected cookie\n",
593 (void *)lp
, (void *)cookie
);
595 ep
= uu_list_first(lp
);
597 uu_list_remove(lp
, ep
);
602 uu_list_insert_before(uu_list_t
*lp
, void *target
, void *elem
)
604 uu_list_node_impl_t
*np
= ELEM_TO_NODE(lp
, target
);
607 np
= &lp
->ul_null_node
;
610 if (np
->uln_prev
== NULL
)
611 uu_panic("uu_list_insert_before(%p, %p, %p): %p is "
612 "not currently on a list\n",
613 (void *)lp
, target
, elem
, target
);
617 uu_panic("uu_list_insert_before(%p, ...): list is "
618 "UU_LIST_SORTED\n", (void *)lp
);
619 uu_set_error(UU_ERROR_NOT_SUPPORTED
);
623 list_insert(lp
, ELEM_TO_NODE(lp
, elem
), np
->uln_prev
, np
);
628 uu_list_insert_after(uu_list_t
*lp
, void *target
, void *elem
)
630 uu_list_node_impl_t
*np
= ELEM_TO_NODE(lp
, target
);
633 np
= &lp
->ul_null_node
;
636 if (np
->uln_prev
== NULL
)
637 uu_panic("uu_list_insert_after(%p, %p, %p): %p is "
638 "not currently on a list\n",
639 (void *)lp
, target
, elem
, target
);
643 uu_panic("uu_list_insert_after(%p, ...): list is "
644 "UU_LIST_SORTED\n", (void *)lp
);
645 uu_set_error(UU_ERROR_NOT_SUPPORTED
);
649 list_insert(lp
, ELEM_TO_NODE(lp
, elem
), np
, np
->uln_next
);
654 uu_list_numnodes(uu_list_t
*lp
)
656 return (lp
->ul_numnodes
);
660 uu_list_first(uu_list_t
*lp
)
662 uu_list_node_impl_t
*n
= lp
->ul_null_node
.uln_next
;
663 if (n
== &lp
->ul_null_node
)
665 return (NODE_TO_ELEM(lp
, n
));
669 uu_list_last(uu_list_t
*lp
)
671 uu_list_node_impl_t
*n
= lp
->ul_null_node
.uln_prev
;
672 if (n
== &lp
->ul_null_node
)
674 return (NODE_TO_ELEM(lp
, n
));
678 uu_list_next(uu_list_t
*lp
, void *elem
)
680 uu_list_node_impl_t
*n
= ELEM_TO_NODE(lp
, elem
);
683 if (n
== &lp
->ul_null_node
)
685 return (NODE_TO_ELEM(lp
, n
));
689 uu_list_prev(uu_list_t
*lp
, void *elem
)
691 uu_list_node_impl_t
*n
= ELEM_TO_NODE(lp
, elem
);
694 if (n
== &lp
->ul_null_node
)
696 return (NODE_TO_ELEM(lp
, n
));
700 * called from uu_lockup() and uu_release(), as part of our fork1()-safety.
707 (void) pthread_mutex_lock(&uu_lpool_list_lock
);
708 for (pp
= uu_null_lpool
.ulp_next
; pp
!= &uu_null_lpool
;
710 (void) pthread_mutex_lock(&pp
->ulp_lock
);
714 uu_list_release(void)
718 for (pp
= uu_null_lpool
.ulp_next
; pp
!= &uu_null_lpool
;
720 (void) pthread_mutex_unlock(&pp
->ulp_lock
);
721 (void) pthread_mutex_unlock(&uu_lpool_list_lock
);