4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
28 #include "libuutil_common.h"
35 static uu_avl_pool_t uu_null_apool
= { &uu_null_apool
, &uu_null_apool
};
36 static pthread_mutex_t uu_apool_list_lock
= PTHREAD_MUTEX_INITIALIZER
;
39 * The index mark change on every insert and delete, to catch stale
42 * We leave the low bit alone, since the avl code uses it.
44 #define INDEX_MAX (sizeof (uintptr_t) - 2)
45 #define INDEX_NEXT(m) (((m) == INDEX_MAX)? 2 : ((m) + 2) & INDEX_MAX)
47 #define INDEX_DECODE(i) ((i) & ~INDEX_MAX)
48 #define INDEX_ENCODE(p, n) (((n) & ~INDEX_MAX) | (p)->ua_index)
49 #define INDEX_VALID(p, i) (((i) & INDEX_MAX) == (p)->ua_index)
50 #define INDEX_CHECK(i) (((i) & INDEX_MAX) != 0)
53 * When an element is inactive (not in a tree), we keep a marked pointer to
54 * its containing pool in its first word, and a NULL pointer in its second.
56 * On insert, we use these to verify that it comes from the correct pool.
58 #define NODE_ARRAY(p, n) ((uintptr_t *)((uintptr_t)(n) + \
59 (pp)->uap_nodeoffset))
61 #define POOL_TO_MARKER(pp) (((uintptr_t)(pp) | 1))
63 #define DEAD_MARKER 0xc4
66 uu_avl_pool_create(const char *name
, size_t objsize
, size_t nodeoffset
,
67 uu_compare_fn_t
*compare_func
, uint32_t flags
)
69 uu_avl_pool_t
*pp
, *next
, *prev
;
72 uu_check_name(name
, UU_NAME_DOMAIN
) == -1 ||
73 nodeoffset
+ sizeof (uu_avl_node_t
) > objsize
||
74 compare_func
== NULL
) {
75 uu_set_error(UU_ERROR_INVALID_ARGUMENT
);
79 if (flags
& ~UU_AVL_POOL_DEBUG
) {
80 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
84 pp
= uu_zalloc(sizeof (uu_avl_pool_t
));
86 uu_set_error(UU_ERROR_NO_MEMORY
);
90 (void) strlcpy(pp
->uap_name
, name
, sizeof (pp
->uap_name
));
91 pp
->uap_nodeoffset
= nodeoffset
;
92 pp
->uap_objsize
= objsize
;
93 pp
->uap_cmp
= compare_func
;
94 if (flags
& UU_AVL_POOL_DEBUG
)
96 pp
->uap_last_index
= 0;
98 (void) pthread_mutex_init(&pp
->uap_lock
, NULL
);
100 pp
->uap_null_avl
.ua_next
= &pp
->uap_null_avl
;
101 pp
->uap_null_avl
.ua_prev
= &pp
->uap_null_avl
;
103 (void) pthread_mutex_lock(&uu_apool_list_lock
);
104 pp
->uap_next
= next
= &uu_null_apool
;
105 pp
->uap_prev
= prev
= next
->uap_prev
;
108 (void) pthread_mutex_unlock(&uu_apool_list_lock
);
114 uu_avl_pool_destroy(uu_avl_pool_t
*pp
)
117 if (pp
->uap_null_avl
.ua_next
!= &pp
->uap_null_avl
||
118 pp
->uap_null_avl
.ua_prev
!= &pp
->uap_null_avl
) {
119 uu_panic("uu_avl_pool_destroy: Pool \"%.*s\" (%p) has "
120 "outstanding avls, or is corrupt.\n",
121 (int)sizeof (pp
->uap_name
), pp
->uap_name
,
125 (void) pthread_mutex_lock(&uu_apool_list_lock
);
126 pp
->uap_next
->uap_prev
= pp
->uap_prev
;
127 pp
->uap_prev
->uap_next
= pp
->uap_next
;
128 (void) pthread_mutex_unlock(&uu_apool_list_lock
);
129 (void) pthread_mutex_destroy(&pp
->uap_lock
);
136 uu_avl_node_init(void *base
, uu_avl_node_t
*np
, uu_avl_pool_t
*pp
)
138 uintptr_t *na
= (uintptr_t *)np
;
141 uintptr_t offset
= (uintptr_t)np
- (uintptr_t)base
;
142 if (offset
+ sizeof (*np
) > pp
->uap_objsize
) {
143 uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): "
144 "offset %ld doesn't fit in object (size %ld)\n",
145 base
, (void *)np
, (void *)pp
, pp
->uap_name
,
146 (long)offset
, (long)pp
->uap_objsize
);
148 if (offset
!= pp
->uap_nodeoffset
) {
149 uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): "
150 "offset %ld doesn't match pool's offset (%ld)\n",
151 base
, (void *)np
, (void *)pp
, pp
->uap_name
,
152 (long)offset
, (long)pp
->uap_objsize
);
156 na
[0] = POOL_TO_MARKER(pp
);
161 uu_avl_node_fini(void *base
, uu_avl_node_t
*np
, uu_avl_pool_t
*pp
)
163 uintptr_t *na
= (uintptr_t *)np
;
166 if (na
[0] == DEAD_MARKER
&& na
[1] == DEAD_MARKER
) {
167 uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): "
168 "node already finied\n",
169 base
, (void *)np
, (void *)pp
, pp
->uap_name
);
171 if (na
[0] != POOL_TO_MARKER(pp
) || na
[1] != 0) {
172 uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): "
173 "node corrupt, in tree, or in different pool\n",
174 base
, (void *)np
, (void *)pp
, pp
->uap_name
);
183 struct uu_avl_node_compare_info
{
184 uu_compare_fn_t
*ac_compare
;
191 uu_avl_node_compare(const void *l
, const void *r
)
193 struct uu_avl_node_compare_info
*info
=
194 (struct uu_avl_node_compare_info
*)l
;
196 int res
= info
->ac_compare(r
, info
->ac_right
, info
->ac_private
);
199 if (info
->ac_found
== NULL
)
200 info
->ac_found
= (void *)r
;
209 uu_avl_create(uu_avl_pool_t
*pp
, void *parent
, uint32_t flags
)
211 uu_avl_t
*ap
, *next
, *prev
;
213 if (flags
& ~UU_AVL_DEBUG
) {
214 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
218 ap
= uu_zalloc(sizeof (*ap
));
220 uu_set_error(UU_ERROR_NO_MEMORY
);
225 ap
->ua_parent
= parent
;
226 ap
->ua_debug
= pp
->uap_debug
|| (flags
& UU_AVL_DEBUG
);
227 ap
->ua_index
= (pp
->uap_last_index
= INDEX_NEXT(pp
->uap_last_index
));
229 avl_create(&ap
->ua_tree
, &uu_avl_node_compare
, pp
->uap_objsize
,
232 ap
->ua_null_walk
.uaw_next
= &ap
->ua_null_walk
;
233 ap
->ua_null_walk
.uaw_prev
= &ap
->ua_null_walk
;
235 (void) pthread_mutex_lock(&pp
->uap_lock
);
236 next
= &pp
->uap_null_avl
;
237 prev
= next
->ua_prev
;
242 (void) pthread_mutex_unlock(&pp
->uap_lock
);
248 uu_avl_destroy(uu_avl_t
*ap
)
250 uu_avl_pool_t
*pp
= ap
->ua_pool
;
253 if (avl_numnodes(&ap
->ua_tree
) != 0) {
254 uu_panic("uu_avl_destroy(%p): tree not empty\n",
257 if (ap
->ua_null_walk
.uaw_next
!= &ap
->ua_null_walk
||
258 ap
->ua_null_walk
.uaw_prev
!= &ap
->ua_null_walk
) {
259 uu_panic("uu_avl_destroy(%p): outstanding walkers\n",
263 (void) pthread_mutex_lock(&pp
->uap_lock
);
264 ap
->ua_next
->ua_prev
= ap
->ua_prev
;
265 ap
->ua_prev
->ua_next
= ap
->ua_next
;
266 (void) pthread_mutex_unlock(&pp
->uap_lock
);
271 avl_destroy(&ap
->ua_tree
);
277 uu_avl_numnodes(uu_avl_t
*ap
)
279 return (avl_numnodes(&ap
->ua_tree
));
283 uu_avl_first(uu_avl_t
*ap
)
285 return (avl_first(&ap
->ua_tree
));
289 uu_avl_last(uu_avl_t
*ap
)
291 return (avl_last(&ap
->ua_tree
));
295 uu_avl_next(uu_avl_t
*ap
, void *node
)
297 return (AVL_NEXT(&ap
->ua_tree
, node
));
301 uu_avl_prev(uu_avl_t
*ap
, void *node
)
303 return (AVL_PREV(&ap
->ua_tree
, node
));
307 _avl_walk_init(uu_avl_walk_t
*wp
, uu_avl_t
*ap
, uint32_t flags
)
309 uu_avl_walk_t
*next
, *prev
;
311 int robust
= (flags
& UU_WALK_ROBUST
);
312 int direction
= (flags
& UU_WALK_REVERSE
)? -1 : 1;
314 (void) memset(wp
, 0, sizeof (*wp
));
316 wp
->uaw_robust
= robust
;
317 wp
->uaw_dir
= direction
;
320 wp
->uaw_next_result
= avl_first(&ap
->ua_tree
);
322 wp
->uaw_next_result
= avl_last(&ap
->ua_tree
);
324 if (ap
->ua_debug
|| robust
) {
325 wp
->uaw_next
= next
= &ap
->ua_null_walk
;
326 wp
->uaw_prev
= prev
= next
->uaw_prev
;
333 _avl_walk_advance(uu_avl_walk_t
*wp
, uu_avl_t
*ap
)
335 void *np
= wp
->uaw_next_result
;
337 avl_tree_t
*t
= &ap
->ua_tree
;
342 wp
->uaw_next_result
= (wp
->uaw_dir
> 0)? AVL_NEXT(t
, np
) :
349 _avl_walk_fini(uu_avl_walk_t
*wp
)
351 if (wp
->uaw_next
!= NULL
) {
352 wp
->uaw_next
->uaw_prev
= wp
->uaw_prev
;
353 wp
->uaw_prev
->uaw_next
= wp
->uaw_next
;
358 wp
->uaw_next_result
= NULL
;
362 uu_avl_walk_start(uu_avl_t
*ap
, uint32_t flags
)
366 if (flags
& ~(UU_WALK_ROBUST
| UU_WALK_REVERSE
)) {
367 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
371 wp
= uu_zalloc(sizeof (*wp
));
373 uu_set_error(UU_ERROR_NO_MEMORY
);
377 _avl_walk_init(wp
, ap
, flags
);
382 uu_avl_walk_next(uu_avl_walk_t
*wp
)
384 return (_avl_walk_advance(wp
, wp
->uaw_avl
));
388 uu_avl_walk_end(uu_avl_walk_t
*wp
)
395 uu_avl_walk(uu_avl_t
*ap
, uu_walk_fn_t
*func
, void *private, uint32_t flags
)
398 uu_avl_walk_t my_walk
;
400 int status
= UU_WALK_NEXT
;
402 if (flags
& ~(UU_WALK_ROBUST
| UU_WALK_REVERSE
)) {
403 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
407 _avl_walk_init(&my_walk
, ap
, flags
);
408 while (status
== UU_WALK_NEXT
&&
409 (e
= _avl_walk_advance(&my_walk
, ap
)) != NULL
)
410 status
= (*func
)(e
, private);
411 _avl_walk_fini(&my_walk
);
415 uu_set_error(UU_ERROR_CALLBACK_FAILED
);
420 uu_avl_remove(uu_avl_t
*ap
, void *elem
)
423 uu_avl_pool_t
*pp
= ap
->ua_pool
;
424 uintptr_t *na
= NODE_ARRAY(pp
, elem
);
428 * invalidate outstanding uu_avl_index_ts.
430 ap
->ua_index
= INDEX_NEXT(ap
->ua_index
);
434 * Robust walkers most be advanced, if we are removing the node
435 * they are currently using. In debug mode, non-robust walkers
436 * are also on the walker list.
438 for (wp
= ap
->ua_null_walk
.uaw_next
; wp
!= &ap
->ua_null_walk
;
440 if (wp
->uaw_robust
) {
441 if (elem
== wp
->uaw_next_result
)
442 (void) _avl_walk_advance(wp
, ap
);
443 } else if (wp
->uaw_next_result
!= NULL
) {
444 uu_panic("uu_avl_remove(%p, %p): active non-robust "
445 "walker\n", (void *)ap
, elem
);
449 avl_remove(&ap
->ua_tree
, elem
);
451 na
[0] = POOL_TO_MARKER(pp
);
456 uu_avl_teardown(uu_avl_t
*ap
, void **cookie
)
458 void *elem
= avl_destroy_nodes(&ap
->ua_tree
, cookie
);
461 uu_avl_pool_t
*pp
= ap
->ua_pool
;
462 uintptr_t *na
= NODE_ARRAY(pp
, elem
);
464 na
[0] = POOL_TO_MARKER(pp
);
471 uu_avl_find(uu_avl_t
*ap
, void *elem
, void *private, uu_avl_index_t
*out
)
473 struct uu_avl_node_compare_info info
;
476 info
.ac_compare
= ap
->ua_pool
->uap_cmp
;
477 info
.ac_private
= private;
478 info
.ac_right
= elem
;
479 info
.ac_found
= NULL
;
481 result
= avl_find(&ap
->ua_tree
, &info
, out
);
483 *out
= INDEX_ENCODE(ap
, *out
);
485 if (ap
->ua_debug
&& result
!= NULL
)
486 uu_panic("uu_avl_find: internal error: avl_find succeeded\n");
488 return (info
.ac_found
);
492 uu_avl_insert(uu_avl_t
*ap
, void *elem
, uu_avl_index_t idx
)
495 uu_avl_pool_t
*pp
= ap
->ua_pool
;
496 uintptr_t *na
= NODE_ARRAY(pp
, elem
);
499 uu_panic("uu_avl_insert(%p, %p, %p): node already "
500 "in tree, or corrupt\n",
501 (void *)ap
, elem
, (void *)idx
);
503 uu_panic("uu_avl_insert(%p, %p, %p): node not "
505 (void *)ap
, elem
, (void *)idx
);
506 if (na
[0] != POOL_TO_MARKER(pp
))
507 uu_panic("uu_avl_insert(%p, %p, %p): node from "
508 "other pool, or corrupt\n",
509 (void *)ap
, elem
, (void *)idx
);
511 if (!INDEX_VALID(ap
, idx
))
512 uu_panic("uu_avl_insert(%p, %p, %p): %s\n",
513 (void *)ap
, elem
, (void *)idx
,
514 INDEX_CHECK(idx
)? "outdated index" :
518 * invalidate outstanding uu_avl_index_ts.
520 ap
->ua_index
= INDEX_NEXT(ap
->ua_index
);
522 avl_insert(&ap
->ua_tree
, elem
, INDEX_DECODE(idx
));
526 uu_avl_nearest_next(uu_avl_t
*ap
, uu_avl_index_t idx
)
528 if (ap
->ua_debug
&& !INDEX_VALID(ap
, idx
))
529 uu_panic("uu_avl_nearest_next(%p, %p): %s\n",
530 (void *)ap
, (void *)idx
, INDEX_CHECK(idx
)?
531 "outdated index" : "invalid index");
532 return (avl_nearest(&ap
->ua_tree
, INDEX_DECODE(idx
), AVL_AFTER
));
536 uu_avl_nearest_prev(uu_avl_t
*ap
, uu_avl_index_t idx
)
538 if (ap
->ua_debug
&& !INDEX_VALID(ap
, idx
))
539 uu_panic("uu_avl_nearest_prev(%p, %p): %s\n",
540 (void *)ap
, (void *)idx
, INDEX_CHECK(idx
)?
541 "outdated index" : "invalid index");
542 return (avl_nearest(&ap
->ua_tree
, INDEX_DECODE(idx
), AVL_BEFORE
));
546 * called from uu_lockup() and uu_release(), as part of our fork1()-safety.
553 (void) pthread_mutex_lock(&uu_apool_list_lock
);
554 for (pp
= uu_null_apool
.uap_next
; pp
!= &uu_null_apool
;
556 (void) pthread_mutex_lock(&pp
->uap_lock
);
564 for (pp
= uu_null_apool
.uap_next
; pp
!= &uu_null_apool
;
566 (void) pthread_mutex_unlock(&pp
->uap_lock
);
567 (void) pthread_mutex_unlock(&uu_apool_list_lock
);