2 * Copyright 2006, Haiku, Inc. All Rights Reserved.
3 * Distributed under the terms of the MIT License.
7 * Copyright (c) 1988, 1989, 1993
8 * The Regents of the University of California. All rights reserved.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * Routines to build and maintain radix trees for routing lookups.
41 #include <KernelExport.h>
47 static int rn_walktree_from(struct radix_node_head
*h
, void *a
, void *m
,
48 walktree_f_t
*f
, void *w
);
49 static int rn_walktree(struct radix_node_head
*, walktree_f_t
*, void *);
50 static struct radix_node
*rn_insert(void *, struct radix_node_head
*, int *,
51 struct radix_node
[2]);
52 static struct radix_node
*rn_newpair(void *, int, struct radix_node
[2]);
53 static struct radix_node
*rn_search(void *, struct radix_node
*);
54 static struct radix_node
*rn_search_m(void *, struct radix_node
*, void *);
56 static int max_keylen
;
57 static struct radix_mask
*rn_mkfreelist
;
58 static struct radix_node_head
*mask_rnhead
;
60 * Work area -- the following point to 3 buffers of size max_keylen,
61 * allocated in this order in a block of memory malloc'ed by rn_init.
63 static uint8
*rn_zeros
, *rn_ones
, *addmask_key
;
65 #define MKFree(m) { (m)->rm_mklist = rn_mkfreelist; rn_mkfreelist = (m);}
67 #define rn_masktop (mask_rnhead->rnh_treetop)
69 static int rn_lexobetter(void *m_arg
, void *n_arg
);
70 static struct radix_mask
*rn_new_radix_mask(struct radix_node
*tt
,
71 struct radix_mask
*next
);
72 static int rn_satisfies_leaf(char *trial
, struct radix_node
*leaf
,
76 * The data structure for the keys is a radix tree with one way
77 * branching removed. The index rn_bit at an internal node n represents a bit
78 * position to be tested. The tree is arranged so that all descendants
79 * of a node n have keys whose bits all agree up to position rn_bit - 1.
80 * (We say the index of n is rn_bit.)
82 * There is at least one descendant which has a one bit at position rn_bit,
83 * and at least one with a zero there.
85 * A route is determined by a pair of key and mask. We require that the
86 * bit-wise logical and of the key and mask to be the key.
87 * We define the index of a route to associated with the mask to be
88 * the first bit number in the mask where 0 occurs (with bit number 0
89 * representing the highest order bit).
91 * We say a mask is normal if every bit is 0, past the index of the mask.
92 * If a node n has a descendant (k, m) with index(m) == index(n) == rn_bit,
93 * and m is a normal mask, then the route applies to every descendant of n.
94 * If the index(m) < rn_bit, this implies the trailing last few bits of k
95 * before bit b are all 0, (and hence consequently true of every descendant
96 * of n), so the route applies to all descendants of the node as well.
98 * Similar logic shows that a non-normal mask m such that
99 * index(m) <= index(n) could potentially apply to many children of n.
100 * Thus, for each non-host route, we attach its mask to a list at an internal
101 * node as high in the tree as we can go.
103 * The present version of the code makes use of normal routes in short-
104 * circuiting an explict mask and compare operation when testing whether
105 * a key satisfies a normal route, and also in remembering the unique leaf
106 * that governs a subtree.
110 * Most of the functions in this code assume that the key/mask arguments
111 * are sockaddr-like structures, where the first byte is an u_char
112 * indicating the size of the entire structure.
114 * To make the assumption more explicit, we use the LEN() macro to access
115 * this field. It is safe to pass an expression with side effects
116 * to LEN() as the argument is evaluated only once.
118 #define LEN(x) (*(const u_char *)(x))
121 * XXX THIS NEEDS TO BE FIXED
122 * In the code, pointers to keys and masks are passed as either
123 * 'void *' (because callers use to pass pointers of various kinds), or
124 * 'caddr_t' (which is fine for pointer arithmetics, but not very
125 * clean when you dereference it to access data). Furthermore, caddr_t
126 * is really 'char *', while the natural type to operate on keys and
127 * masks would be 'u_char'. This mismatch require a lot of casts and
128 * intermediate variables to adapt types that clutter the code.
132 static int /* XXX: arbitrary ordering for non-contiguous masks */
133 rn_lexobetter(void *m_arg
, void *n_arg
)
135 register uint8
*mp
= m_arg
, *np
= n_arg
, *lim
;
137 if (LEN(mp
) > LEN(np
))
138 return 1; /* not really, but need to check longer one first */
139 if (LEN(mp
) == LEN(np
)) {
140 for (lim
= mp
+ LEN(mp
); mp
< lim
;) {
149 static struct radix_mask
*
150 rn_new_radix_mask(register struct radix_node
*tt
, register struct radix_mask
*next
)
152 register struct radix_mask
*m
;
156 rn_mkfreelist
= m
->rm_mklist
;
158 m
= (struct radix_mask
*)malloc(sizeof(struct radix_mask
));
160 dprintf("Mask for route not entered\n");
163 memset(m
, 0, sizeof *m
);
164 m
->rm_bit
= tt
->rn_bit
;
165 m
->rm_flags
= tt
->rn_flags
;
166 if (tt
->rn_flags
& RNF_NORMAL
)
169 m
->rm_mask
= tt
->rn_mask
;
177 Search a node in the tree matching the key.
179 static struct radix_node
*
180 rn_search(void *v_arg
, struct radix_node
*head
)
182 register struct radix_node
*x
;
185 for (x
= head
, v
= v_arg
; x
->rn_bit
>= 0;) {
186 if (x
->rn_bmask
& v
[x
->rn_offset
])
196 Same as above, but with an additional mask.
197 XXX note this function is used only once.
199 static struct radix_node
*
200 rn_search_m(void *v_arg
, struct radix_node
*head
, void *m_arg
)
202 register struct radix_node
*x
;
203 register caddr_t v
= v_arg
, m
= m_arg
;
205 for (x
= head
; x
->rn_bit
>= 0;) {
206 if ((x
->rn_bmask
& m
[x
->rn_offset
])
207 && (x
->rn_bmask
& v
[x
->rn_offset
]))
217 rn_satisfies_leaf(char *trial
, register struct radix_node
*leaf
, int skip
)
219 register char *cp
= trial
, *cp2
= leaf
->rn_key
, *cp3
= leaf
->rn_mask
;
221 int length
= min(LEN(cp
), LEN(cp2
));
226 length
= min(length
, *(u_char
*)cp3
);
227 cplim
= cp
+ length
; cp3
+= skip
; cp2
+= skip
;
228 for (cp
+= skip
; cp
< cplim
; cp
++, cp2
++, cp3
++)
229 if ((*cp
^ *cp2
) & *cp3
)
236 * Whenever we add a new leaf to the tree, we also add a parent node,
237 * so we allocate them as an array of two elements: the first one must be
238 * the leaf (see RNTORT() in route.c), the second one is the parent.
239 * This routine initializes the relevant fields of the nodes, so that
240 * the leaf is the left child of the parent node, and both nodes have
241 * (almost) all all fields filled as appropriate.
242 * (XXX some fields are left unset, see the '#if 0' section).
243 * The function returns a pointer to the parent node.
246 static struct radix_node
*
247 rn_newpair(void *v
, int b
, struct radix_node nodes
[2])
249 register struct radix_node
*tt
= nodes
, *t
= tt
+ 1;
251 t
->rn_bmask
= 0x80 >> (b
& 7);
253 t
->rn_offset
= b
>> 3;
255 #if 0 /* XXX perhaps we should fill these fields as well. */
256 t
->rn_parent
= t
->rn_right
= NULL
;
259 tt
->rn_dupedkey
= NULL
;
263 tt
->rn_key
= (caddr_t
)v
;
265 tt
->rn_flags
= t
->rn_flags
= RNF_ACTIVE
;
266 tt
->rn_mklist
= t
->rn_mklist
= 0;
271 static struct radix_node
*
272 rn_insert(void *v_arg
, struct radix_node_head
*head
, int *dupentry
,
273 struct radix_node nodes
[2])
276 struct radix_node
*top
= head
->rnh_treetop
;
277 int head_off
= top
->rn_offset
, vlen
= (int)LEN(v
);
278 register struct radix_node
*t
= rn_search(v_arg
, top
);
279 register uint8
*cp
= v
+ head_off
;
281 struct radix_node
*tt
;
283 * Find first bit at which v and t->rn_key differ
286 register uint8
*cp2
= t
->rn_key
+ head_off
;
287 register int cmp_res
;
288 uint8
*cplim
= v
+ vlen
;
298 cmp_res
= (cp
[-1] ^ cp2
[-1]) & 0xff;
299 for (b
= (cp
- v
) << 3; cmp_res
; b
--) {
304 register struct radix_node
*p
, *x
= top
;
308 if (cp
[x
->rn_offset
] & x
->rn_bmask
)
312 } while (b
> (unsigned) x
->rn_bit
);
313 /* x->rn_bit < b && x->rn_bit >= 0 */
314 t
= rn_newpair(v_arg
, b
, nodes
);
316 if ((cp
[p
->rn_offset
] & p
->rn_bmask
) == 0)
321 t
->rn_parent
= p
; /* frees x, p as temp vars below */
322 if ((cp
[t
->rn_offset
] & t
->rn_bmask
) == 0) {
334 This is the same as rn_walktree() except for the parameters and the
338 rn_walktree_from(struct radix_node_head
*h
, void *a
, void *m
, walktree_f_t
*f
, void *w
)
341 struct radix_node
*base
, *next
;
342 u_char
*xa
= (u_char
*)a
;
343 u_char
*xm
= (u_char
*)m
;
344 register struct radix_node
*rn
, *last
= 0 /* shut up gcc */;
349 * rn_search_m is sort-of-open-coded here. We cannot use the
350 * function because we need to keep track of the last node seen.
352 /* printf("about to search\n"); */
353 for (rn
= h
->rnh_treetop
; rn
->rn_bit
>= 0; ) {
355 /* printf("rn_bit %d, rn_bmask %x, xm[rn_offset] %x\n",
356 rn->rn_bit, rn->rn_bmask, xm[rn->rn_offset]); */
357 if (!(rn
->rn_bmask
& xm
[rn
->rn_offset
])) {
360 if (rn
->rn_bmask
& xa
[rn
->rn_offset
]) {
366 /* printf("done searching\n"); */
369 * Two cases: either we stepped off the end of our mask,
370 * in which case last == rn, or we reached a leaf, in which
371 * case we want to start from the last node we looked at.
372 * Either way, last is the node we want to start from.
377 /* printf("rn %p, lastb %d\n", rn, lastb);*/
380 * This gets complicated because we may delete the node
381 * while applying the function f to it, so we need to calculate
382 * the successor node in advance.
384 while (rn
->rn_bit
>= 0) {
389 /* printf("node %p (%d)\n", rn, rn->rn_bit); */
391 /* If at right child go back up, otherwise, go right */
392 while (rn
->rn_parent
->rn_right
== rn
393 && !(rn
->rn_flags
& RNF_ROOT
)) {
396 /* if went up beyond last, stop */
397 if (rn
->rn_bit
<= lastb
) {
399 /* printf("up too far\n"); */
401 * XXX we should jump to the 'Process leaves'
402 * part, because the values of 'rn' and 'next'
403 * we compute will not be used. Not a big deal
404 * because this loop will terminate, but it is
405 * inefficient and hard to understand!
411 * At the top of the tree, no need to traverse the right
412 * half, prevent the traversal of the entire tree in the
413 * case of default route.
415 if (rn
->rn_parent
->rn_flags
& RNF_ROOT
)
418 /* Find the next *leaf* since next node might vanish, too */
419 for (rn
= rn
->rn_parent
->rn_right
; rn
->rn_bit
>= 0;)
423 while ((rn
= base
) != 0) {
424 base
= rn
->rn_dupedkey
;
425 /* printf("leaf %p\n", rn); */
426 if (!(rn
->rn_flags
& RNF_ROOT
)
427 && (error
= (*f
)(rn
, w
)))
432 if (rn
->rn_flags
& RNF_ROOT
) {
433 /* printf("root, stopping"); */
442 rn_walktree(struct radix_node_head
*h
, walktree_f_t
*f
, void *w
)
445 struct radix_node
*base
, *next
;
446 register struct radix_node
*rn
= h
->rnh_treetop
;
448 * This gets complicated because we may delete the node
449 * while applying the function f to it, so we need to calculate
450 * the successor node in advance.
452 /* First time through node, go left */
453 while (rn
->rn_bit
>= 0) {
458 /* If at right child go back up, otherwise, go right */
459 while (rn
->rn_parent
->rn_right
== rn
460 && (rn
->rn_flags
& RNF_ROOT
) == 0) {
463 /* Find the next *leaf* since next node might vanish, too */
464 for (rn
= rn
->rn_parent
->rn_right
; rn
->rn_bit
>= 0;) {
469 while ((rn
= base
)) {
470 base
= rn
->rn_dupedkey
;
471 if (!(rn
->rn_flags
& RNF_ROOT
)
472 && (error
= (*f
)(rn
, w
)))
476 if (rn
->rn_flags
& RNF_ROOT
)
483 // #pragma mark - public API
487 rn_lookup(void *v_arg
, void *m_arg
, struct radix_node_head
*head
)
489 register struct radix_node
*x
;
490 uint8
*netmask
= NULL
;
493 x
= rn_addmask(m_arg
, 1, head
->rnh_treetop
->rn_offset
);
498 x
= rn_match(v_arg
, head
);
500 while (x
&& x
->rn_mask
!= netmask
)
508 rn_match(void *v_arg
, struct radix_node_head
*head
)
511 register struct radix_node
*t
= head
->rnh_treetop
, *x
;
512 register caddr_t cp
= v
, cp2
;
514 struct radix_node
*saved_t
, *top
= t
;
515 int off
= t
->rn_offset
, vlen
= LEN(cp
), matched_off
;
516 register int test
, b
, rn_bit
;
519 * Open code rn_search(v, top) to avoid overhead of extra
522 for (; t
->rn_bit
>= 0; ) {
523 if (t
->rn_bmask
& cp
[t
->rn_offset
])
529 * See if we match exactly as a host destination
530 * or at least learn how many bits match, for normal mask finesse.
532 * It doesn't hurt us to limit how many bytes to check
533 * to the length of the mask, since if it matches we had a genuine
534 * match and the leaf we have is the most specific one anyway;
535 * if it didn't match with a shorter length it would fail
536 * with a long one. This wins big for class B&C netmasks which
537 * are probably the most common case...
540 vlen
= *(u_char
*)t
->rn_mask
;
541 cp
+= off
; cp2
= t
->rn_key
+ off
; cplim
= v
+ vlen
;
542 for (; cp
< cplim
; cp
++, cp2
++) {
547 * This extra grot is in case we are explicitly asked
548 * to look up the default. Ugh!
550 * Never return the root node itself, it seems to cause a
553 if (t
->rn_flags
& RNF_ROOT
)
557 test
= (*cp
^ *cp2
) & 0xff; /* find first bit that differs */
558 for (b
= 7; (test
>>= 1) > 0;)
560 matched_off
= cp
- v
;
561 b
+= matched_off
<< 3;
564 * If there is a host route in a duped-key chain, it will be first.
566 if ((saved_t
= t
)->rn_mask
== 0)
568 for (; t
; t
= t
->rn_dupedkey
) {
570 * Even if we don't match exactly as a host,
571 * we may match if the leaf we wound up at is
574 if (t
->rn_flags
& RNF_NORMAL
) {
575 if (rn_bit
<= t
->rn_bit
)
577 } else if (rn_satisfies_leaf(v
, t
, matched_off
))
582 /* start searching up the tree */
584 register struct radix_mask
*m
;
588 * If non-contiguous masks ever become important
589 * we can restore the masking and open coding of
590 * the search and satisfaction test and put the
591 * calculation of "off" back before the "do".
594 if (m
->rm_flags
& RNF_NORMAL
) {
595 if (rn_bit
<= m
->rm_bit
)
598 off
= min(t
->rn_offset
, matched_off
);
599 x
= rn_search_m(v
, t
, m
->rm_mask
);
600 while (x
&& x
->rn_mask
!= m
->rm_mask
)
602 if (x
&& rn_satisfies_leaf(v
, x
, off
))
614 rn_addmask(void *n_arg
, int search
, int skip
)
616 uint8
*netmask
= (uint8
*)n_arg
;
617 register struct radix_node
*x
;
618 register uint8
*cp
, *cplim
;
619 register int b
= 0, mlen
, j
;
620 int maskduplicated
, m0
, isnormal
;
621 struct radix_node
*saved_x
;
622 static int last_zeroed
= 0;
624 if ((mlen
= LEN(netmask
)) > max_keylen
)
629 return mask_rnhead
->rnh_nodes
;
631 memcpy(addmask_key
+ 1, rn_ones
+ 1, skip
- 1);
632 if ((m0
= mlen
) > skip
)
633 memcpy(addmask_key
+ skip
, netmask
+ skip
, mlen
- skip
);
635 * Trim trailing zeroes.
637 for (cp
= addmask_key
+ mlen
; (cp
> addmask_key
) && cp
[-1] == 0;)
639 mlen
= cp
- addmask_key
;
641 if (m0
>= last_zeroed
)
643 return (mask_rnhead
->rnh_nodes
);
645 if (m0
< last_zeroed
)
646 memset(addmask_key
+ m0
, 0, last_zeroed
- m0
);
647 *addmask_key
= last_zeroed
= mlen
;
648 x
= rn_search(addmask_key
, rn_masktop
);
649 if (memcmp(addmask_key
, x
->rn_key
, mlen
) != 0)
653 x
= (struct radix_node
*)calloc(1, max_keylen
+ 2 * sizeof(*x
));
654 if ((saved_x
= x
) == 0)
656 netmask
= cp
= (caddr_t
)(x
+ 2);
657 memcpy(cp
, addmask_key
, mlen
);
658 x
= rn_insert(cp
, mask_rnhead
, &maskduplicated
, x
);
659 if (maskduplicated
) {
660 dprintf("rn_addmask: mask impossibly already in tree\n");
665 * Calculate index of mask, and check for normalcy.
666 * First find the first byte with a 0 bit, then if there are
667 * more bits left (remember we already trimmed the trailing 0's),
668 * the pattern must be one of those in normal_chars[], or we have
669 * a non-contiguous mask.
671 cplim
= netmask
+ mlen
;
673 for (cp
= netmask
+ skip
; (cp
< cplim
) && *(u_char
*)cp
== 0xff;) {
677 static char normal_chars
[] = {
678 0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, 0xff};
680 for (j
= 0x80; (j
& *cp
) != 0; j
>>= 1)
682 if (*cp
!= normal_chars
[b
] || cp
!= (cplim
- 1))
685 b
+= (cp
- netmask
) << 3;
688 x
->rn_flags
|= RNF_NORMAL
;
694 rn_addroute(void *v_arg
, void *n_arg
, struct radix_node_head
*head
,
695 struct radix_node treenodes
[2])
697 uint8
*v
= (uint8
*)v_arg
, *netmask
= (uint8
*)n_arg
;
698 register struct radix_node
*t
, *x
= 0, *tt
;
699 struct radix_node
*saved_tt
, *top
= head
->rnh_treetop
;
700 short b
= 0, b_leaf
= 0;
703 struct radix_mask
*m
, **mp
;
706 * In dealing with non-contiguous masks, there may be
707 * many different routes which have the same mask.
708 * We will find it useful to have a unique pointer to
709 * the mask to speed avoiding duplicate references at
710 * nodes and possibly save time in calculating indices.
713 if ((x
= rn_addmask(netmask
, 0, top
->rn_offset
)) == 0)
720 * Deal with duplicated keys: attach node to previous instance
722 saved_tt
= tt
= rn_insert(v
, head
, &keyduplicated
, treenodes
);
724 for (t
= tt
; tt
; t
= tt
, tt
= tt
->rn_dupedkey
) {
725 if (tt
->rn_mask
== netmask
)
729 ((b_leaf
< tt
->rn_bit
) /* index(netmask) > node */
730 || rn_refines(netmask
, tt
->rn_mask
)
731 || rn_lexobetter(netmask
, tt
->rn_mask
))))
735 * If the mask is not duplicated, we wouldn't
736 * find it among possible duplicate key entries
737 * anyway, so the above test doesn't hurt.
739 * We sort the masks for a duplicated key the same way as
740 * in a masklist -- most specific to least specific.
741 * This may require the unfortunate nuisance of relocating
742 * the head of the list.
744 * We also reverse, or doubly link the list through the
747 if (tt
== saved_tt
) {
748 struct radix_node
*xx
= x
;
749 /* link in at head of list */
750 (tt
= treenodes
)->rn_dupedkey
= t
;
751 tt
->rn_flags
= t
->rn_flags
;
752 tt
->rn_parent
= x
= t
->rn_parent
;
753 t
->rn_parent
= tt
; /* parent */
758 saved_tt
= tt
; x
= xx
;
760 (tt
= treenodes
)->rn_dupedkey
= t
->rn_dupedkey
;
762 tt
->rn_parent
= t
; /* parent */
763 if (tt
->rn_dupedkey
) /* parent */
764 tt
->rn_dupedkey
->rn_parent
= tt
; /* parent */
766 tt
->rn_key
= (caddr_t
) v
;
768 tt
->rn_flags
= RNF_ACTIVE
;
774 tt
->rn_mask
= netmask
;
775 tt
->rn_bit
= x
->rn_bit
;
776 tt
->rn_flags
|= x
->rn_flags
& RNF_NORMAL
;
778 t
= saved_tt
->rn_parent
;
781 b_leaf
= -1 - t
->rn_bit
;
782 if (t
->rn_right
== saved_tt
)
786 /* Promote general routes from below */
788 for (mp
= &t
->rn_mklist
; x
; x
= x
->rn_dupedkey
)
789 if (x
->rn_mask
&& (x
->rn_bit
>= b_leaf
) && x
->rn_mklist
== 0) {
790 *mp
= m
= rn_new_radix_mask(x
, 0);
794 } else if (x
->rn_mklist
) {
796 * Skip over masks whose index is > that of new node
798 for (mp
= &x
->rn_mklist
; (m
= *mp
); mp
= &m
->rm_mklist
)
799 if (m
->rm_bit
>= b_leaf
)
801 t
->rn_mklist
= m
; *mp
= 0;
804 /* Add new route to highest possible ancestor's list */
805 if ((netmask
== 0) || (b
> t
->rn_bit
))
806 return tt
; /* can't lift at all */
811 } while (b
<= t
->rn_bit
&& x
!= top
);
813 * Search through routes associated with node to
814 * insert new route according to index.
815 * Need same criteria as when sorting dupedkeys to avoid
816 * double loop on deletion.
818 for (mp
= &x
->rn_mklist
; (m
= *mp
); mp
= &m
->rm_mklist
) {
819 if (m
->rm_bit
< b_leaf
)
821 if (m
->rm_bit
> b_leaf
)
823 if (m
->rm_flags
& RNF_NORMAL
) {
824 mmask
= m
->rm_leaf
->rn_mask
;
825 if (tt
->rn_flags
& RNF_NORMAL
) {
826 dprintf("Non-unique normal route, mask not entered\n");
831 if (mmask
== netmask
) {
836 if (rn_refines(netmask
, mmask
)
837 || rn_lexobetter(netmask
, mmask
))
840 *mp
= rn_new_radix_mask(tt
, *mp
);
846 rn_delete(void *v_arg
, void *netmask_arg
, struct radix_node_head
*head
)
848 register struct radix_node
*t
, *p
, *x
, *tt
;
849 struct radix_mask
*m
, *saved_m
, **mp
;
850 struct radix_node
*dupedkey
, *saved_tt
, *top
;
852 int b
, head_off
, vlen
;
855 netmask
= netmask_arg
;
856 x
= head
->rnh_treetop
;
857 tt
= rn_search(v
, x
);
858 head_off
= x
->rn_offset
;
863 || memcmp(v
+ head_off
, tt
->rn_key
+ head_off
, vlen
- head_off
))
866 * Delete our route from mask lists.
869 if ((x
= rn_addmask(netmask
, 1, head_off
)) == 0)
872 while (tt
->rn_mask
!= netmask
)
873 if ((tt
= tt
->rn_dupedkey
) == 0)
876 if (tt
->rn_mask
== 0 || (saved_m
= m
= tt
->rn_mklist
) == 0)
878 if (tt
->rn_flags
& RNF_NORMAL
) {
879 if (m
->rm_leaf
!= tt
|| m
->rm_refs
> 0) {
880 dprintf("rn_delete: inconsistent annotation\n");
881 return 0; /* dangling ref could cause disaster */
884 if (m
->rm_mask
!= tt
->rn_mask
) {
885 dprintf("rn_delete: inconsistent annotation\n");
888 if (--m
->rm_refs
>= 0)
892 t
= saved_tt
->rn_parent
;
894 goto on1
; /* Wasn't lifted at all */
898 } while (b
<= t
->rn_bit
&& x
!= top
);
899 for (mp
= &x
->rn_mklist
; (m
= *mp
); mp
= &m
->rm_mklist
)
906 dprintf("rn_delete: couldn't find our annotation\n");
907 if (tt
->rn_flags
& RNF_NORMAL
)
908 return 0; /* Dangling ref to us */
912 * Eliminate us from tree
914 if (tt
->rn_flags
& RNF_ROOT
)
917 dupedkey
= saved_tt
->rn_dupedkey
;
920 * Here, tt is the deletion target and
921 * saved_tt is the head of the dupekey chain.
923 if (tt
== saved_tt
) {
924 /* remove from head of chain */
925 x
= dupedkey
; x
->rn_parent
= t
;
926 if (t
->rn_left
== tt
)
931 /* find node in front of tt on the chain */
932 for (x
= p
= saved_tt
; p
&& p
->rn_dupedkey
!= tt
;)
935 p
->rn_dupedkey
= tt
->rn_dupedkey
;
936 if (tt
->rn_dupedkey
) /* parent */
937 tt
->rn_dupedkey
->rn_parent
= p
;
940 dprintf("rn_delete: couldn't find us\n");
943 if (t
->rn_flags
& RNF_ACTIVE
) {
950 x
->rn_left
->rn_parent
= x
;
951 x
->rn_right
->rn_parent
= x
;
955 if (t
->rn_left
== tt
)
960 if (p
->rn_right
== t
)
966 * Demote routes attached to us.
969 if (x
->rn_bit
>= 0) {
970 for (mp
= &x
->rn_mklist
; (m
= *mp
);)
974 /* If there are any key,mask pairs in a sibling
975 duped-key chain, some subset will appear sorted
976 in the same order attached to our mklist */
977 for (m
= t
->rn_mklist
; m
&& x
; x
= x
->rn_dupedkey
) {
978 if (m
== x
->rn_mklist
) {
979 struct radix_mask
*mm
= m
->rm_mklist
;
981 if (--(m
->rm_refs
) < 0)
987 dprintf("rn_delete: Orphaned Mask %p at %p\n",
988 (void *)m
, (void *)x
);
993 * We may be holding an active internal node in the tree.
998 t
->rn_left
->rn_parent
= t
;
999 t
->rn_right
->rn_parent
= t
;
1001 if (p
->rn_left
== x
)
1007 tt
->rn_flags
&= ~RNF_ACTIVE
;
1008 tt
[1].rn_flags
&= ~RNF_ACTIVE
;
1014 rn_refines(void *m_arg
, void *n_arg
)
1016 register caddr_t m
= m_arg
, n
= n_arg
;
1017 register caddr_t lim
, lim2
= lim
= n
+ LEN(n
);
1018 int longer
= LEN(n
++) - (int)LEN(m
++);
1019 int masks_are_equal
= 1;
1027 masks_are_equal
= 0;
1035 if (masks_are_equal
&& (longer
< 0)) {
1036 for (lim2
= m
- longer
; m
< lim2
; ) {
1042 return !masks_are_equal
;
1047 Allocate and initialize an empty tree. This has 3 nodes, which are
1048 part of the radix_node_head (in the order <left,root,right>) and are
1049 marked RNF_ROOT so they cannot be freed.
1050 The leaves have all-zero and all-one keys, with significant
1051 bits starting at 'off'.
1052 Return 1 on success, 0 on error.
1055 rn_inithead(void **head
, int off
)
1057 register struct radix_node_head
*rnh
;
1058 register struct radix_node
*t
, *tt
, *ttt
;
1061 rnh
= (struct radix_node_head
*)calloc(1, sizeof(*rnh
));
1066 t
= rn_newpair(rn_zeros
, off
, rnh
->rnh_nodes
);
1067 ttt
= rnh
->rnh_nodes
+ 2;
1070 tt
= t
->rn_left
; /* ... which in turn is rnh->rnh_nodes */
1071 tt
->rn_flags
= t
->rn_flags
= RNF_ROOT
| RNF_ACTIVE
;
1072 tt
->rn_bit
= -1 - off
;
1074 ttt
->rn_key
= rn_ones
;
1075 rnh
->rnh_addaddr
= rn_addroute
;
1076 rnh
->rnh_deladdr
= rn_delete
;
1077 rnh
->rnh_matchaddr
= rn_match
;
1078 rnh
->rnh_lookup
= rn_lookup
;
1079 rnh
->rnh_walktree
= rn_walktree
;
1080 rnh
->rnh_walktree_from
= rn_walktree_from
;
1081 rnh
->rnh_treetop
= t
;
1093 for (dom
= domains
; dom
; dom
= dom
->dom_next
)
1094 if (dom
->dom_maxrtkey
> max_keylen
)
1095 max_keylen
= dom
->dom_maxrtkey
;
1097 if (max_keylen
== 0) {
1098 dprintf("rn_init: radix functions require max_keylen be set\n");
1101 rn_zeros
= (char *)malloc(3 * max_keylen
);
1102 if (rn_zeros
== NULL
)
1104 memset(rn_zeros
, 0, 3 * max_keylen
);
1105 rn_ones
= cp
= rn_zeros
+ max_keylen
;
1106 addmask_key
= cplim
= rn_ones
+ max_keylen
;
1109 if (rn_inithead((void **)(void *)&mask_rnhead
, 0) == 0)