4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
28 #include "libuutil_common.h"
35 static uu_avl_pool_t uu_null_apool
= { &uu_null_apool
, &uu_null_apool
};
36 static pthread_mutex_t uu_apool_list_lock
= PTHREAD_MUTEX_INITIALIZER
;
39 * The index mark change on every insert and delete, to catch stale
42 * We leave the low bit alone, since the avl code uses it.
44 #define INDEX_MAX (sizeof (uintptr_t) - 2)
45 #define INDEX_NEXT(m) (((m) == INDEX_MAX)? 2 : ((m) + 2) & INDEX_MAX)
47 #define INDEX_DECODE(i) ((i) & ~INDEX_MAX)
48 #define INDEX_ENCODE(p, n) (((n) & ~INDEX_MAX) | (p)->ua_index)
49 #define INDEX_VALID(p, i) (((i) & INDEX_MAX) == (p)->ua_index)
50 #define INDEX_CHECK(i) (((i) & INDEX_MAX) != 0)
53 * When an element is inactive (not in a tree), we keep a marked pointer to
54 * its containing pool in its first word, and a NULL pointer in its second.
56 * On insert, we use these to verify that it comes from the correct pool.
58 #define NODE_ARRAY(p, n) ((uintptr_t *)((uintptr_t)(n) + \
59 (pp)->uap_nodeoffset))
61 #define POOL_TO_MARKER(pp) (((uintptr_t)(pp) | 1))
63 #define DEAD_MARKER 0xc4
66 uu_avl_pool_create(const char *name
, size_t objsize
, size_t nodeoffset
,
67 uu_compare_fn_t
*compare_func
, uint32_t flags
)
69 uu_avl_pool_t
*pp
, *next
, *prev
;
72 uu_check_name(name
, UU_NAME_DOMAIN
) == -1 ||
73 nodeoffset
+ sizeof (uu_avl_node_t
) > objsize
||
74 compare_func
== NULL
) {
75 uu_set_error(UU_ERROR_INVALID_ARGUMENT
);
79 if (flags
& ~UU_AVL_POOL_DEBUG
) {
80 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
84 pp
= uu_zalloc(sizeof (uu_avl_pool_t
));
86 uu_set_error(UU_ERROR_NO_MEMORY
);
90 (void) strlcpy(pp
->uap_name
, name
, sizeof (pp
->uap_name
));
91 pp
->uap_nodeoffset
= nodeoffset
;
92 pp
->uap_objsize
= objsize
;
93 pp
->uap_cmp
= compare_func
;
94 if (flags
& UU_AVL_POOL_DEBUG
)
96 pp
->uap_last_index
= 0;
98 (void) pthread_mutex_init(&pp
->uap_lock
, NULL
);
100 pp
->uap_null_avl
.ua_next_enc
= UU_PTR_ENCODE(&pp
->uap_null_avl
);
101 pp
->uap_null_avl
.ua_prev_enc
= UU_PTR_ENCODE(&pp
->uap_null_avl
);
103 (void) pthread_mutex_lock(&uu_apool_list_lock
);
104 pp
->uap_next
= next
= &uu_null_apool
;
105 pp
->uap_prev
= prev
= next
->uap_prev
;
108 (void) pthread_mutex_unlock(&uu_apool_list_lock
);
114 uu_avl_pool_destroy(uu_avl_pool_t
*pp
)
117 if (pp
->uap_null_avl
.ua_next_enc
!=
118 UU_PTR_ENCODE(&pp
->uap_null_avl
) ||
119 pp
->uap_null_avl
.ua_prev_enc
!=
120 UU_PTR_ENCODE(&pp
->uap_null_avl
)) {
121 uu_panic("uu_avl_pool_destroy: Pool \"%.*s\" (%p) has "
122 "outstanding avls, or is corrupt.\n",
123 (int)sizeof (pp
->uap_name
), pp
->uap_name
,
127 (void) pthread_mutex_lock(&uu_apool_list_lock
);
128 pp
->uap_next
->uap_prev
= pp
->uap_prev
;
129 pp
->uap_prev
->uap_next
= pp
->uap_next
;
130 (void) pthread_mutex_unlock(&uu_apool_list_lock
);
137 uu_avl_node_init(void *base
, uu_avl_node_t
*np
, uu_avl_pool_t
*pp
)
139 uintptr_t *na
= (uintptr_t *)np
;
142 uintptr_t offset
= (uintptr_t)np
- (uintptr_t)base
;
143 if (offset
+ sizeof (*np
) > pp
->uap_objsize
) {
144 uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): "
145 "offset %ld doesn't fit in object (size %ld)\n",
146 base
, (void *)np
, (void *)pp
, pp
->uap_name
,
147 (long)offset
, (long)pp
->uap_objsize
);
149 if (offset
!= pp
->uap_nodeoffset
) {
150 uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): "
151 "offset %ld doesn't match pool's offset (%ld)\n",
152 base
, (void *)np
, (void *)pp
, pp
->uap_name
,
153 (long)offset
, (long)pp
->uap_objsize
);
157 na
[0] = POOL_TO_MARKER(pp
);
162 uu_avl_node_fini(void *base
, uu_avl_node_t
*np
, uu_avl_pool_t
*pp
)
164 uintptr_t *na
= (uintptr_t *)np
;
167 if (na
[0] == DEAD_MARKER
&& na
[1] == DEAD_MARKER
) {
168 uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): "
169 "node already finied\n",
170 base
, (void *)np
, (void *)pp
, pp
->uap_name
);
172 if (na
[0] != POOL_TO_MARKER(pp
) || na
[1] != 0) {
173 uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): "
174 "node corrupt, in tree, or in different pool\n",
175 base
, (void *)np
, (void *)pp
, pp
->uap_name
);
184 struct uu_avl_node_compare_info
{
185 uu_compare_fn_t
*ac_compare
;
192 uu_avl_node_compare(const void *l
, const void *r
)
194 struct uu_avl_node_compare_info
*info
=
195 (struct uu_avl_node_compare_info
*)l
;
197 int res
= info
->ac_compare(r
, info
->ac_right
, info
->ac_private
);
200 if (info
->ac_found
== NULL
)
201 info
->ac_found
= (void *)r
;
210 uu_avl_create(uu_avl_pool_t
*pp
, void *parent
, uint32_t flags
)
212 uu_avl_t
*ap
, *next
, *prev
;
214 if (flags
& ~UU_AVL_DEBUG
) {
215 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
219 ap
= uu_zalloc(sizeof (*ap
));
221 uu_set_error(UU_ERROR_NO_MEMORY
);
226 ap
->ua_parent_enc
= UU_PTR_ENCODE(parent
);
227 ap
->ua_debug
= pp
->uap_debug
|| (flags
& UU_AVL_DEBUG
);
228 ap
->ua_index
= (pp
->uap_last_index
= INDEX_NEXT(pp
->uap_last_index
));
230 avl_create(&ap
->ua_tree
, &uu_avl_node_compare
, pp
->uap_objsize
,
233 ap
->ua_null_walk
.uaw_next
= &ap
->ua_null_walk
;
234 ap
->ua_null_walk
.uaw_prev
= &ap
->ua_null_walk
;
236 (void) pthread_mutex_lock(&pp
->uap_lock
);
237 next
= &pp
->uap_null_avl
;
238 prev
= UU_PTR_DECODE(next
->ua_prev_enc
);
239 ap
->ua_next_enc
= UU_PTR_ENCODE(next
);
240 ap
->ua_prev_enc
= UU_PTR_ENCODE(prev
);
241 next
->ua_prev_enc
= UU_PTR_ENCODE(ap
);
242 prev
->ua_next_enc
= UU_PTR_ENCODE(ap
);
243 (void) pthread_mutex_unlock(&pp
->uap_lock
);
249 uu_avl_destroy(uu_avl_t
*ap
)
251 uu_avl_pool_t
*pp
= ap
->ua_pool
;
254 if (avl_numnodes(&ap
->ua_tree
) != 0) {
255 uu_panic("uu_avl_destroy(%p): tree not empty\n",
258 if (ap
->ua_null_walk
.uaw_next
!= &ap
->ua_null_walk
||
259 ap
->ua_null_walk
.uaw_prev
!= &ap
->ua_null_walk
) {
260 uu_panic("uu_avl_destroy(%p): outstanding walkers\n",
264 (void) pthread_mutex_lock(&pp
->uap_lock
);
265 UU_AVL_PTR(ap
->ua_next_enc
)->ua_prev_enc
= ap
->ua_prev_enc
;
266 UU_AVL_PTR(ap
->ua_prev_enc
)->ua_next_enc
= ap
->ua_next_enc
;
267 (void) pthread_mutex_unlock(&pp
->uap_lock
);
268 ap
->ua_prev_enc
= UU_PTR_ENCODE(NULL
);
269 ap
->ua_next_enc
= UU_PTR_ENCODE(NULL
);
272 avl_destroy(&ap
->ua_tree
);
278 uu_avl_numnodes(uu_avl_t
*ap
)
280 return (avl_numnodes(&ap
->ua_tree
));
284 uu_avl_first(uu_avl_t
*ap
)
286 return (avl_first(&ap
->ua_tree
));
290 uu_avl_last(uu_avl_t
*ap
)
292 return (avl_last(&ap
->ua_tree
));
296 uu_avl_next(uu_avl_t
*ap
, void *node
)
298 return (AVL_NEXT(&ap
->ua_tree
, node
));
302 uu_avl_prev(uu_avl_t
*ap
, void *node
)
304 return (AVL_PREV(&ap
->ua_tree
, node
));
308 _avl_walk_init(uu_avl_walk_t
*wp
, uu_avl_t
*ap
, uint32_t flags
)
310 uu_avl_walk_t
*next
, *prev
;
312 int robust
= (flags
& UU_WALK_ROBUST
);
313 int direction
= (flags
& UU_WALK_REVERSE
)? -1 : 1;
315 (void) memset(wp
, 0, sizeof (*wp
));
317 wp
->uaw_robust
= robust
;
318 wp
->uaw_dir
= direction
;
321 wp
->uaw_next_result
= avl_first(&ap
->ua_tree
);
323 wp
->uaw_next_result
= avl_last(&ap
->ua_tree
);
325 if (ap
->ua_debug
|| robust
) {
326 wp
->uaw_next
= next
= &ap
->ua_null_walk
;
327 wp
->uaw_prev
= prev
= next
->uaw_prev
;
334 _avl_walk_advance(uu_avl_walk_t
*wp
, uu_avl_t
*ap
)
336 void *np
= wp
->uaw_next_result
;
338 avl_tree_t
*t
= &ap
->ua_tree
;
343 wp
->uaw_next_result
= (wp
->uaw_dir
> 0)? AVL_NEXT(t
, np
) :
350 _avl_walk_fini(uu_avl_walk_t
*wp
)
352 if (wp
->uaw_next
!= NULL
) {
353 wp
->uaw_next
->uaw_prev
= wp
->uaw_prev
;
354 wp
->uaw_prev
->uaw_next
= wp
->uaw_next
;
359 wp
->uaw_next_result
= NULL
;
363 uu_avl_walk_start(uu_avl_t
*ap
, uint32_t flags
)
367 if (flags
& ~(UU_WALK_ROBUST
| UU_WALK_REVERSE
)) {
368 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
372 wp
= uu_zalloc(sizeof (*wp
));
374 uu_set_error(UU_ERROR_NO_MEMORY
);
378 _avl_walk_init(wp
, ap
, flags
);
383 uu_avl_walk_next(uu_avl_walk_t
*wp
)
385 return (_avl_walk_advance(wp
, wp
->uaw_avl
));
389 uu_avl_walk_end(uu_avl_walk_t
*wp
)
396 uu_avl_walk(uu_avl_t
*ap
, uu_walk_fn_t
*func
, void *private, uint32_t flags
)
399 uu_avl_walk_t my_walk
;
401 int status
= UU_WALK_NEXT
;
403 if (flags
& ~(UU_WALK_ROBUST
| UU_WALK_REVERSE
)) {
404 uu_set_error(UU_ERROR_UNKNOWN_FLAG
);
408 _avl_walk_init(&my_walk
, ap
, flags
);
409 while (status
== UU_WALK_NEXT
&&
410 (e
= _avl_walk_advance(&my_walk
, ap
)) != NULL
)
411 status
= (*func
)(e
, private);
412 _avl_walk_fini(&my_walk
);
416 uu_set_error(UU_ERROR_CALLBACK_FAILED
);
421 uu_avl_remove(uu_avl_t
*ap
, void *elem
)
424 uu_avl_pool_t
*pp
= ap
->ua_pool
;
425 uintptr_t *na
= NODE_ARRAY(pp
, elem
);
429 * invalidate outstanding uu_avl_index_ts.
431 ap
->ua_index
= INDEX_NEXT(ap
->ua_index
);
435 * Robust walkers most be advanced, if we are removing the node
436 * they are currently using. In debug mode, non-robust walkers
437 * are also on the walker list.
439 for (wp
= ap
->ua_null_walk
.uaw_next
; wp
!= &ap
->ua_null_walk
;
441 if (wp
->uaw_robust
) {
442 if (elem
== wp
->uaw_next_result
)
443 (void) _avl_walk_advance(wp
, ap
);
444 } else if (wp
->uaw_next_result
!= NULL
) {
445 uu_panic("uu_avl_remove(%p, %p): active non-robust "
446 "walker\n", (void *)ap
, elem
);
450 avl_remove(&ap
->ua_tree
, elem
);
452 na
[0] = POOL_TO_MARKER(pp
);
457 uu_avl_teardown(uu_avl_t
*ap
, void **cookie
)
459 void *elem
= avl_destroy_nodes(&ap
->ua_tree
, cookie
);
462 uu_avl_pool_t
*pp
= ap
->ua_pool
;
463 uintptr_t *na
= NODE_ARRAY(pp
, elem
);
465 na
[0] = POOL_TO_MARKER(pp
);
472 uu_avl_find(uu_avl_t
*ap
, void *elem
, void *private, uu_avl_index_t
*out
)
474 struct uu_avl_node_compare_info info
;
477 info
.ac_compare
= ap
->ua_pool
->uap_cmp
;
478 info
.ac_private
= private;
479 info
.ac_right
= elem
;
480 info
.ac_found
= NULL
;
482 result
= avl_find(&ap
->ua_tree
, &info
, out
);
484 *out
= INDEX_ENCODE(ap
, *out
);
486 if (ap
->ua_debug
&& result
!= NULL
)
487 uu_panic("uu_avl_find: internal error: avl_find succeeded\n");
489 return (info
.ac_found
);
493 uu_avl_insert(uu_avl_t
*ap
, void *elem
, uu_avl_index_t idx
)
496 uu_avl_pool_t
*pp
= ap
->ua_pool
;
497 uintptr_t *na
= NODE_ARRAY(pp
, elem
);
500 uu_panic("uu_avl_insert(%p, %p, %p): node already "
501 "in tree, or corrupt\n",
502 (void *)ap
, elem
, (void *)idx
);
504 uu_panic("uu_avl_insert(%p, %p, %p): node not "
506 (void *)ap
, elem
, (void *)idx
);
507 if (na
[0] != POOL_TO_MARKER(pp
))
508 uu_panic("uu_avl_insert(%p, %p, %p): node from "
509 "other pool, or corrupt\n",
510 (void *)ap
, elem
, (void *)idx
);
512 if (!INDEX_VALID(ap
, idx
))
513 uu_panic("uu_avl_insert(%p, %p, %p): %s\n",
514 (void *)ap
, elem
, (void *)idx
,
515 INDEX_CHECK(idx
)? "outdated index" :
519 * invalidate outstanding uu_avl_index_ts.
521 ap
->ua_index
= INDEX_NEXT(ap
->ua_index
);
523 avl_insert(&ap
->ua_tree
, elem
, INDEX_DECODE(idx
));
527 uu_avl_nearest_next(uu_avl_t
*ap
, uu_avl_index_t idx
)
529 if (ap
->ua_debug
&& !INDEX_VALID(ap
, idx
))
530 uu_panic("uu_avl_nearest_next(%p, %p): %s\n",
531 (void *)ap
, (void *)idx
, INDEX_CHECK(idx
)?
532 "outdated index" : "invalid index");
533 return (avl_nearest(&ap
->ua_tree
, INDEX_DECODE(idx
), AVL_AFTER
));
537 uu_avl_nearest_prev(uu_avl_t
*ap
, uu_avl_index_t idx
)
539 if (ap
->ua_debug
&& !INDEX_VALID(ap
, idx
))
540 uu_panic("uu_avl_nearest_prev(%p, %p): %s\n",
541 (void *)ap
, (void *)idx
, INDEX_CHECK(idx
)?
542 "outdated index" : "invalid index");
543 return (avl_nearest(&ap
->ua_tree
, INDEX_DECODE(idx
), AVL_BEFORE
));
547 * called from uu_lockup() and uu_release(), as part of our fork1()-safety.
554 (void) pthread_mutex_lock(&uu_apool_list_lock
);
555 for (pp
= uu_null_apool
.uap_next
; pp
!= &uu_null_apool
;
557 (void) pthread_mutex_lock(&pp
->uap_lock
);
565 for (pp
= uu_null_apool
.uap_next
; pp
!= &uu_null_apool
;
567 (void) pthread_mutex_unlock(&pp
->uap_lock
);
568 (void) pthread_mutex_unlock(&uu_apool_list_lock
);