1 /* $NetBSD: radix.c,v 1.4 2006/04/04 16:17:18 martti Exp $ */
4 * Copyright (c) 1988, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * @(#)radix.c 8.6 (Berkeley) 10/17/95
32 * Routines to build and maintain radix trees for routing lookups.
34 #if defined(KERNEL) || defined(_KERNEL)
40 #define __SYS_ATOMIC_OPS_H__
41 #if !defined(__svr4__) && !defined(__SVR4) && !defined(__osf__) && \
42 !defined(__hpux) && !defined(__sgi)
43 #include <sys/cdefs.h>
54 # define _IPV6_SWTAB_H
55 # define _PROTO_NET_H_
56 # define _PROTO_IPV6_H
57 # include <sys/malloc.h>
60 #include <sys/param.h>
62 #include <sys/systm.h>
64 void panic
__P((char *str
));
73 #include <sys/syslog.h>
76 #include <netinet/in.h>
77 #include <sys/socket.h>
82 #include "netinet/ip_compat.h"
83 #include "netinet/ip_fil.h"
88 #include "radix_ipf.h"
97 static struct radix_mask
*rn_mkfreelist
;
98 static struct radix_node_head
*mask_rnhead
;
99 static char *addmask_key
;
100 static u_char normal_chars
[] = {0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, 0xff};
101 static char *rn_zeros
= NULL
, *rn_ones
= NULL
;
103 #define rn_masktop (mask_rnhead->rnh_treetop)
105 #define Bcmp(a, b, l) (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (u_long)l))
107 static int rn_satisfies_leaf
__P((char *, struct radix_node
*, int));
108 static int rn_lexobetter
__P((void *, void *));
109 static struct radix_mask
*rn_new_radix_mask
__P((struct radix_node
*,
110 struct radix_mask
*));
111 static int rn_freenode
__P((struct radix_node
*, void *));
112 #if defined(AIX) && !defined(_KERNEL)
113 struct radix_node
*rn_match
__P((void *, struct radix_node_head
*));
114 struct radix_node
*rn_addmask
__P((int, int, void *));
115 #define FreeS(x, y) KFREES(x, y)
116 #define Bcopy(x, y, z) bcopy(x, y, z)
120 * The data structure for the keys is a radix tree with one way
121 * branching removed. The index rn_b at an internal node n represents a bit
122 * position to be tested. The tree is arranged so that all descendants
123 * of a node n have keys whose bits all agree up to position rn_b - 1.
124 * (We say the index of n is rn_b.)
126 * There is at least one descendant which has a one bit at position rn_b,
127 * and at least one with a zero there.
129 * A route is determined by a pair of key and mask. We require that the
130 * bit-wise logical and of the key and mask to be the key.
131 * We define the index of a route to associated with the mask to be
132 * the first bit number in the mask where 0 occurs (with bit number 0
133 * representing the highest order bit).
135 * We say a mask is normal if every bit is 0, past the index of the mask.
136 * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b,
137 * and m is a normal mask, then the route applies to every descendant of n.
138 * If the index(m) < rn_b, this implies the trailing last few bits of k
139 * before bit b are all 0, (and hence consequently true of every descendant
140 * of n), so the route applies to all descendants of the node as well.
142 * Similar logic shows that a non-normal mask m such that
143 * index(m) <= index(n) could potentially apply to many children of n.
144 * Thus, for each non-host route, we attach its mask to a list at an internal
145 * node as high in the tree as we can go.
147 * The present version of the code makes use of normal routes in short-
148 * circuiting an explicit mask and compare operation when testing whether
149 * a key satisfies a normal route, and also in remembering the unique leaf
150 * that governs a subtree.
154 rn_search(v_arg
, head
)
156 struct radix_node
*head
;
158 struct radix_node
*x
;
161 for (x
= head
, v
= v_arg
; x
->rn_b
>= 0;) {
162 if (x
->rn_bmask
& v
[x
->rn_off
])
171 rn_search_m(v_arg
, head
, m_arg
)
172 struct radix_node
*head
;
175 struct radix_node
*x
;
176 caddr_t v
= v_arg
, m
= m_arg
;
178 for (x
= head
; x
->rn_b
>= 0;) {
179 if ((x
->rn_bmask
& m
[x
->rn_off
]) &&
180 (x
->rn_bmask
& v
[x
->rn_off
]))
189 rn_refines(m_arg
, n_arg
)
192 caddr_t m
= m_arg
, n
= n_arg
;
193 caddr_t lim
, lim2
= lim
= n
+ *(u_char
*)n
;
194 int longer
= (*(u_char
*)n
++) - (int)(*(u_char
*)m
++);
195 int masks_are_equal
= 1;
208 if (masks_are_equal
&& (longer
< 0))
209 for (lim2
= m
- longer
; m
< lim2
; )
212 return (!masks_are_equal
);
216 rn_lookup(v_arg
, m_arg
, head
)
218 struct radix_node_head
*head
;
220 struct radix_node
*x
;
224 if ((x
= rn_addmask(m_arg
, 1, head
->rnh_treetop
->rn_off
)) == 0)
228 x
= rn_match(v_arg
, head
);
230 while (x
&& x
->rn_mask
!= netmask
)
237 rn_satisfies_leaf(trial
, leaf
, skip
)
239 struct radix_node
*leaf
;
242 char *cp
= trial
, *cp2
= leaf
->rn_key
, *cp3
= leaf
->rn_mask
;
244 int length
= min(*(u_char
*)cp
, *(u_char
*)cp2
);
249 length
= min(length
, *(u_char
*)cp3
);
253 for (cp
+= skip
; cp
< cplim
; cp
++, cp2
++, cp3
++)
254 if ((*cp
^ *cp2
) & *cp3
)
260 rn_match(v_arg
, head
)
262 struct radix_node_head
*head
;
265 struct radix_node
*t
= head
->rnh_treetop
, *x
;
268 struct radix_node
*saved_t
, *top
= t
;
269 int off
= t
->rn_off
, vlen
= *(u_char
*)cp
, matched_off
;
273 * Open code rn_search(v, top) to avoid overhead of extra
276 for (; t
->rn_b
>= 0; ) {
277 if (t
->rn_bmask
& cp
[t
->rn_off
])
283 * See if we match exactly as a host destination
284 * or at least learn how many bits match, for normal mask finesse.
286 * It doesn't hurt us to limit how many bytes to check
287 * to the length of the mask, since if it matches we had a genuine
288 * match and the leaf we have is the most specific one anyway;
289 * if it didn't match with a shorter length it would fail
290 * with a long one. This wins big for class B&C netmasks which
291 * are probably the most common case...
294 vlen
= *(u_char
*)t
->rn_mask
;
296 cp2
= t
->rn_key
+ off
;
298 for (; cp
< cplim
; cp
++, cp2
++)
302 * This extra grot is in case we are explicitly asked
303 * to look up the default. Ugh!
305 if ((t
->rn_flags
& RNF_ROOT
) && t
->rn_dupedkey
)
309 test
= (*cp
^ *cp2
) & 0xff; /* find first bit that differs */
310 for (b
= 7; (test
>>= 1) > 0;)
312 matched_off
= cp
- v
;
313 b
+= matched_off
<< 3;
316 * If there is a host route in a duped-key chain, it will be first.
318 if ((saved_t
= t
)->rn_mask
== 0)
320 for (; t
; t
= t
->rn_dupedkey
)
322 * Even if we don't match exactly as a host,
323 * we may match if the leaf we wound up at is
326 if (t
->rn_flags
& RNF_NORMAL
) {
329 } else if (rn_satisfies_leaf(v
, t
, matched_off
))
332 /* start searching up the tree */
334 struct radix_mask
*m
;
339 * If non-contiguous masks ever become important
340 * we can restore the masking and open coding of
341 * the search and satisfaction test and put the
342 * calculation of "off" back before the "do".
345 if (m
->rm_flags
& RNF_NORMAL
) {
349 off
= min(t
->rn_off
, matched_off
);
350 x
= rn_search_m(v
, t
, m
->rm_mask
);
351 while (x
&& x
->rn_mask
!= m
->rm_mask
)
353 if (x
&& rn_satisfies_leaf(v
, x
, off
))
365 struct radix_node
*rn_clist
;
371 rn_newpair(v
, b
, nodes
)
374 struct radix_node nodes
[2];
376 struct radix_node
*tt
= nodes
, *t
= tt
+ 1;
378 t
->rn_bmask
= 0x80 >> (b
& 7);
382 tt
->rn_key
= (caddr_t
)v
;
384 tt
->rn_flags
= t
->rn_flags
= RNF_ACTIVE
;
386 tt
->rn_info
= rn_nodenum
++;
387 t
->rn_info
= rn_nodenum
++;
389 tt
->rn_ybro
= rn_clist
;
396 rn_insert(v_arg
, head
, dupentry
, nodes
)
398 struct radix_node_head
*head
;
400 struct radix_node nodes
[2];
403 struct radix_node
*top
= head
->rnh_treetop
;
404 int head_off
= top
->rn_off
, vlen
= (int)*((u_char
*)v
);
405 struct radix_node
*t
= rn_search(v_arg
, top
);
406 caddr_t cp
= v
+ head_off
;
408 struct radix_node
*tt
;
412 log(LOG_DEBUG
, "rn_insert(%p,%p,%p,%p)\n", v_arg
, head
, dupentry
, nodes
);
415 * Find first bit at which v and t->rn_key differ
418 caddr_t cp2
= t
->rn_key
+ head_off
;
420 caddr_t cplim
= v
+ vlen
;
429 cmp_res
= (cp
[-1] ^ cp2
[-1]) & 0xff;
430 for (b
= (cp
- v
) << 3; cmp_res
; b
--)
434 struct radix_node
*p
, *x
= top
;
438 if (cp
[x
->rn_off
] & x
->rn_bmask
)
442 } while (b
> (unsigned) x
->rn_b
); /* x->rn_b < b && x->rn_b >= 0 */
445 log(LOG_DEBUG
, "rn_insert: Going In:\n"); // traverse(p);
447 t
= rn_newpair(v_arg
, b
, nodes
);
449 if ((cp
[p
->rn_off
] & p
->rn_bmask
) == 0)
454 t
->rn_p
= p
; /* frees x, p as temp vars below */
455 if ((cp
[t
->rn_off
] & t
->rn_bmask
) == 0) {
463 log(LOG_DEBUG
, "rn_insert: Coming Out:\n"); // traverse(p);
470 rn_addmask(n_arg
, search
, skip
)
474 caddr_t netmask
= (caddr_t
)n_arg
;
475 struct radix_node
*x
;
478 int maskduplicated
, m0
, isnormal
;
479 struct radix_node
*saved_x
;
480 static int last_zeroed
= 0;
484 log(LOG_DEBUG
, "rn_addmask(%p,%d,%d)\n", n_arg
, search
, skip
);
486 mlen
= *(u_char
*)netmask
;
487 if ((mlen
= *(u_char
*)netmask
) > max_keylen
)
492 return (mask_rnhead
->rnh_nodes
);
494 Bcopy(rn_ones
+ 1, addmask_key
+ 1, skip
- 1);
495 if ((m0
= mlen
) > skip
)
496 Bcopy(netmask
+ skip
, addmask_key
+ skip
, mlen
- skip
);
498 * Trim trailing zeroes.
500 for (cp
= addmask_key
+ mlen
; (cp
> addmask_key
) && cp
[-1] == 0;)
502 mlen
= cp
- addmask_key
;
504 if (m0
>= last_zeroed
)
506 return (mask_rnhead
->rnh_nodes
);
508 if (m0
< last_zeroed
)
509 Bzero(addmask_key
+ m0
, last_zeroed
- m0
);
510 *addmask_key
= last_zeroed
= mlen
;
511 x
= rn_search(addmask_key
, rn_masktop
);
512 if (Bcmp(addmask_key
, x
->rn_key
, mlen
) != 0)
516 R_Malloc(x
, struct radix_node
*, max_keylen
+ 2 * sizeof (*x
));
517 if ((saved_x
= x
) == 0)
519 Bzero(x
, max_keylen
+ 2 * sizeof (*x
));
520 netmask
= cp
= (caddr_t
)(x
+ 2);
521 Bcopy(addmask_key
, cp
, mlen
);
522 x
= rn_insert(cp
, mask_rnhead
, &maskduplicated
, x
);
523 if (maskduplicated
) {
525 log(LOG_ERR
, "rn_addmask: mask impossibly already in tree\n");
531 * Calculate index of mask, and check for normalcy.
533 cplim
= netmask
+ mlen
;
535 for (cp
= netmask
+ skip
; (cp
< cplim
) && *(u_char
*)cp
== 0xff;)
538 for (j
= 0x80; (j
& *cp
) != 0; j
>>= 1)
540 if (*cp
!= normal_chars
[b
] || cp
!= (cplim
- 1))
543 b
+= (cp
- netmask
) << 3;
546 x
->rn_flags
|= RNF_NORMAL
;
550 static int /* XXX: arbitrary ordering for non-contiguous masks */
551 rn_lexobetter(m_arg
, n_arg
)
554 u_char
*mp
= m_arg
, *np
= n_arg
, *lim
;
557 return 1; /* not really, but need to check longer one first */
559 for (lim
= mp
+ *mp
; mp
< lim
;)
565 static struct radix_mask
*
566 rn_new_radix_mask(tt
, next
)
567 struct radix_node
*tt
;
568 struct radix_mask
*next
;
570 struct radix_mask
*m
;
575 log(LOG_ERR
, "Mask for route not entered\n");
581 m
->rm_flags
= tt
->rn_flags
;
582 if (tt
->rn_flags
& RNF_NORMAL
)
585 m
->rm_mask
= tt
->rn_mask
;
592 rn_addroute(v_arg
, n_arg
, head
, treenodes
)
594 struct radix_node_head
*head
;
595 struct radix_node treenodes
[2];
597 caddr_t v
= (caddr_t
)v_arg
, netmask
= (caddr_t
)n_arg
;
598 struct radix_node
*t
, *x
= NULL
, *tt
;
599 struct radix_node
*saved_tt
, *top
= head
->rnh_treetop
;
600 short b
= 0, b_leaf
= 0;
603 struct radix_mask
*m
, **mp
;
607 log(LOG_DEBUG
, "rn_addroute(%p,%p,%p,%p)\n", v_arg
, n_arg
, head
, treenodes
);
610 * In dealing with non-contiguous masks, there may be
611 * many different routes which have the same mask.
612 * We will find it useful to have a unique pointer to
613 * the mask to speed avoiding duplicate references at
614 * nodes and possibly save time in calculating indices.
617 if ((x
= rn_addmask(netmask
, 0, top
->rn_off
)) == 0)
624 * Deal with duplicated keys: attach node to previous instance
626 saved_tt
= tt
= rn_insert(v
, head
, &keyduplicated
, treenodes
);
628 for (t
= tt
; tt
; t
= tt
, tt
= tt
->rn_dupedkey
) {
629 if (tt
->rn_mask
== netmask
)
633 ((b_leaf
< tt
->rn_b
) || /* index(netmask) > node */
634 rn_refines(netmask
, tt
->rn_mask
) ||
635 rn_lexobetter(netmask
, tt
->rn_mask
))))
639 * If the mask is not duplicated, we wouldn't
640 * find it among possible duplicate key entries
641 * anyway, so the above test doesn't hurt.
643 * We sort the masks for a duplicated key the same way as
644 * in a masklist -- most specific to least specific.
645 * This may require the unfortunate nuisance of relocating
646 * the head of the list.
648 * We also reverse, or doubly link the list through the
651 if (tt
== saved_tt
) {
652 struct radix_node
*xx
= x
;
653 /* link in at head of list */
654 (tt
= treenodes
)->rn_dupedkey
= t
;
655 tt
->rn_flags
= t
->rn_flags
;
656 tt
->rn_p
= x
= t
->rn_p
;
665 (tt
= treenodes
)->rn_dupedkey
= t
->rn_dupedkey
;
669 tt
->rn_dupedkey
->rn_p
= tt
;
673 tt
->rn_info
= rn_nodenum
++;
674 t
->rn_info
= rn_nodenum
++;
676 tt
->rn_ybro
= rn_clist
;
679 tt
->rn_key
= (caddr_t
) v
;
681 tt
->rn_flags
= RNF_ACTIVE
;
687 tt
->rn_mask
= netmask
;
689 tt
->rn_flags
|= x
->rn_flags
& RNF_NORMAL
;
694 b_leaf
= -1 - t
->rn_b
;
695 if (t
->rn_r
== saved_tt
)
699 /* Promote general routes from below */
701 for (mp
= &t
->rn_mklist
; x
; x
= x
->rn_dupedkey
)
702 if (x
->rn_mask
&& (x
->rn_b
>= b_leaf
) && x
->rn_mklist
== 0) {
703 *mp
= m
= rn_new_radix_mask(x
, 0);
707 } else if (x
->rn_mklist
) {
709 * Skip over masks whose index is > that of new node
711 for (mp
= &x
->rn_mklist
; (m
= *mp
) != NULL
; mp
= &m
->rm_mklist
)
712 if (m
->rm_b
>= b_leaf
)
718 /* Add new route to highest possible ancestor's list */
719 if ((netmask
== 0) || (b
> t
->rn_b
))
720 return tt
; /* can't lift at all */
725 } while (b
<= t
->rn_b
&& x
!= top
);
727 * Search through routes associated with node to
728 * insert new route according to index.
729 * Need same criteria as when sorting dupedkeys to avoid
730 * double loop on deletion.
732 for (mp
= &x
->rn_mklist
; (m
= *mp
) != NULL
; mp
= &m
->rm_mklist
) {
733 if (m
->rm_b
< b_leaf
)
735 if (m
->rm_b
> b_leaf
)
737 if (m
->rm_flags
& RNF_NORMAL
) {
738 mmask
= m
->rm_leaf
->rn_mask
;
739 if (tt
->rn_flags
& RNF_NORMAL
) {
741 log(LOG_ERR
, "Non-unique normal route,"
742 " mask not entered\n");
748 if (mmask
== netmask
) {
753 if (rn_refines(netmask
, mmask
)
754 || rn_lexobetter(netmask
, mmask
))
757 *mp
= rn_new_radix_mask(tt
, *mp
);
762 rn_delete(v_arg
, netmask_arg
, head
)
763 void *v_arg
, *netmask_arg
;
764 struct radix_node_head
*head
;
766 struct radix_node
*t
, *p
, *x
, *tt
;
767 struct radix_mask
*m
, *saved_m
, **mp
;
768 struct radix_node
*dupedkey
, *saved_tt
, *top
;
770 int b
, head_off
, vlen
;
773 netmask
= netmask_arg
;
774 x
= head
->rnh_treetop
;
775 tt
= rn_search(v
, x
);
776 head_off
= x
->rn_off
;
781 Bcmp(v
+ head_off
, tt
->rn_key
+ head_off
, vlen
- head_off
))
784 * Delete our route from mask lists.
787 if ((x
= rn_addmask(netmask
, 1, head_off
)) == 0)
790 while (tt
->rn_mask
!= netmask
)
791 if ((tt
= tt
->rn_dupedkey
) == 0)
794 if (tt
->rn_mask
== 0 || (saved_m
= m
= tt
->rn_mklist
) == 0)
796 if (tt
->rn_flags
& RNF_NORMAL
) {
797 if (m
->rm_leaf
!= tt
|| m
->rm_refs
> 0) {
799 log(LOG_ERR
, "rn_delete: inconsistent annotation\n");
801 return 0; /* dangling ref could cause disaster */
804 if (m
->rm_mask
!= tt
->rn_mask
) {
806 log(LOG_ERR
, "rn_delete: inconsistent annotation\n");
810 if (--m
->rm_refs
>= 0)
816 goto on1
; /* Wasn't lifted at all */
820 } while (b
<= t
->rn_b
&& x
!= top
);
821 for (mp
= &x
->rn_mklist
; (m
= *mp
) != NULL
; mp
= &m
->rm_mklist
)
829 log(LOG_ERR
, "rn_delete: couldn't find our annotation\n");
831 if (tt
->rn_flags
& RNF_NORMAL
)
832 return (0); /* Dangling ref to us */
836 * Eliminate us from tree
838 if (tt
->rn_flags
& RNF_ROOT
)
841 /* Get us out of the creation list */
842 for (t
= rn_clist
; t
&& t
->rn_ybro
!= tt
; t
= t
->rn_ybro
)
844 if (t
) t
->rn_ybro
= tt
->rn_ybro
;
847 dupedkey
= saved_tt
->rn_dupedkey
;
850 * Here, tt is the deletion target and
851 * saved_tt is the head of the dupedkey chain.
853 if (tt
== saved_tt
) {
861 /* find node in front of tt on the chain */
862 for (x
= p
= saved_tt
; p
&& p
->rn_dupedkey
!= tt
;)
865 p
->rn_dupedkey
= tt
->rn_dupedkey
;
867 tt
->rn_dupedkey
->rn_p
= p
;
871 log(LOG_ERR
, "rn_delete: couldn't find us\n");
875 if (t
->rn_flags
& RNF_ACTIVE
) {
905 * Demote routes attached to us.
909 for (mp
= &x
->rn_mklist
; (m
= *mp
) != NULL
;)
913 /* If there are any key,mask pairs in a sibling
914 duped-key chain, some subset will appear sorted
915 in the same order attached to our mklist */
916 for (m
= t
->rn_mklist
; m
&& x
; x
= x
->rn_dupedkey
)
917 if (m
== x
->rn_mklist
) {
918 struct radix_mask
*mm
= m
->rm_mklist
;
920 if (--(m
->rm_refs
) < 0)
926 log(LOG_ERR
, "%s %p at %p\n",
927 "rn_delete: Orphaned Mask", m
, x
);
932 * We may be holding an active internal node in the tree.
952 tt
->rn_flags
&= ~RNF_ACTIVE
;
953 tt
[1].rn_flags
&= ~RNF_ACTIVE
;
959 struct radix_node_head
*h
;
960 int (*f
) __P((struct radix_node
*, void *));
964 struct radix_node
*base
, *next
;
965 struct radix_node
*rn
= h
->rnh_treetop
;
967 * This gets complicated because we may delete the node
968 * while applying the function f to it, so we need to calculate
969 * the successor node in advance.
971 /* First time through node, go left */
972 while (rn
->rn_b
>= 0)
976 /* If at right child go back up, otherwise, go right */
977 while (rn
->rn_p
->rn_r
== rn
&& (rn
->rn_flags
& RNF_ROOT
) == 0)
979 /* Find the next *leaf* since next node might vanish, too */
980 for (rn
= rn
->rn_p
->rn_r
; rn
->rn_b
>= 0;)
984 while ((rn
= base
) != NULL
) {
985 base
= rn
->rn_dupedkey
;
986 if (!(rn
->rn_flags
& RNF_ROOT
)
987 && (error
= (*f
)(rn
, w
)))
991 if (rn
->rn_flags
& RNF_ROOT
)
998 rn_inithead(head
, off
)
1002 struct radix_node_head
*rnh
;
1006 R_Malloc(rnh
, struct radix_node_head
*, sizeof (*rnh
));
1010 return rn_inithead0(rnh
, off
);
1014 rn_inithead0(rnh
, off
)
1015 struct radix_node_head
*rnh
;
1018 struct radix_node
*t
, *tt
, *ttt
;
1020 Bzero(rnh
, sizeof (*rnh
));
1021 t
= rn_newpair(rn_zeros
, off
, rnh
->rnh_nodes
);
1022 ttt
= rnh
->rnh_nodes
+ 2;
1026 tt
->rn_flags
= t
->rn_flags
= RNF_ROOT
| RNF_ACTIVE
;
1027 tt
->rn_b
= -1 - off
;
1029 ttt
->rn_key
= rn_ones
;
1030 rnh
->rnh_addaddr
= rn_addroute
;
1031 rnh
->rnh_deladdr
= rn_delete
;
1032 rnh
->rnh_matchaddr
= rn_match
;
1033 rnh
->rnh_lookup
= rn_lookup
;
1034 rnh
->rnh_walktree
= rn_walktree
;
1035 rnh
->rnh_treetop
= t
;
1044 if (max_keylen
== 0) {
1047 "rn_init: radix functions require max_keylen be set\n");
1051 if (rn_zeros
== NULL
) {
1052 R_Malloc(rn_zeros
, char *, 3 * max_keylen
);
1054 if (rn_zeros
== NULL
)
1056 Bzero(rn_zeros
, 3 * max_keylen
);
1057 rn_ones
= cp
= rn_zeros
+ max_keylen
;
1058 addmask_key
= cplim
= rn_ones
+ max_keylen
;
1061 if (rn_inithead((void *)&mask_rnhead
, 0) == 0)
1067 rn_freenode(struct radix_node
*n
, void *p
)
1069 struct radix_node_head
*rnh
= p
;
1070 struct radix_node
*d
;
1072 d
= rnh
->rnh_deladdr(n
->rn_key
, NULL
, rnh
);
1074 FreeS(d
, max_keylen
+ 2 * sizeof (*d
));
1082 struct radix_node_head
*rnh
;
1085 (void)rn_walktree(rnh
, rn_freenode
, rnh
);
1087 rnh
->rnh_addaddr
= NULL
;
1088 rnh
->rnh_deladdr
= NULL
;
1089 rnh
->rnh_matchaddr
= NULL
;
1090 rnh
->rnh_lookup
= NULL
;
1091 rnh
->rnh_walktree
= NULL
;
1100 struct radix_mask
*m
;
1102 if (rn_zeros
!= NULL
) {
1103 FreeS(rn_zeros
, 3 * max_keylen
);
1107 if (mask_rnhead
!= NULL
) {
1108 rn_freehead(mask_rnhead
);
1112 while ((m
= rn_mkfreelist
) != NULL
) {
1113 rn_mkfreelist
= m
->rm_mklist
;
1121 typedef struct myst
{
1124 struct radix_node nodes
[2];
1128 main(int argc
, char *argv
[])
1130 struct radix_node_head
*rnh
;
1131 struct radix_node
*rn
;
1132 addrfamily_t af
, mf
;
1133 myst_t st1
, st2
, *stp
;
1135 memset(&st1
, 0, sizeof(st1
));
1136 memset(&st2
, 0, sizeof(st2
));
1137 memset(&af
, 0, sizeof(af
));
1142 rn_inithead(&rnh
, offsetof(addrfamily_t
, adf_addr
) << 3);
1144 st1
.dst
.adf_len
= sizeof(st1
);
1145 st1
.mask
.adf_len
= sizeof(st1
);
1146 st1
.dst
.adf_addr
.in4
.s_addr
= inet_addr("127.0.0.0");
1147 st1
.mask
.adf_addr
.in4
.s_addr
= inet_addr("255.0.0.0");
1148 rn
= rnh
->rnh_addaddr(&st1
.dst
, &st1
.mask
, rnh
, st1
.nodes
);
1149 printf("add.1 %p\n", rn
);
1151 st2
.dst
.adf_len
= sizeof(st2
);
1152 st2
.mask
.adf_len
= sizeof(st2
);
1153 st2
.dst
.adf_addr
.in4
.s_addr
= inet_addr("127.0.1.0");
1154 st2
.mask
.adf_addr
.in4
.s_addr
= inet_addr("255.255.255.0");
1155 rn
= rnh
->rnh_addaddr(&st2
.dst
, &st2
.mask
, rnh
, st2
.nodes
);
1156 printf("add.2 %p\n", rn
);
1158 af
.adf_len
= sizeof(af
);
1159 af
.adf_addr
.in4
.s_addr
= inet_addr("127.0.1.0");
1160 rn
= rnh
->rnh_matchaddr(&af
, rnh
);
1162 printf("1.lookup = %p key %p mask %p\n", rn
, rn
->rn_key
, rn
->rn_mask
);
1164 printf("%s/", inet_ntoa(stp
->dst
.adf_addr
.in4
));
1166 printf("%s\n", inet_ntoa(stp
->dst
.adf_addr
.in4
));
1169 mf
.adf_len
= sizeof(mf
);
1170 mf
.adf_addr
.in4
.s_addr
= inet_addr("255.255.255.0");
1171 rn
= rnh
->rnh_lookup(&af
, &mf
, rnh
);
1173 printf("2.lookup = %p key %p mask %p\n", rn
, rn
->rn_key
, rn
->rn_mask
);
1175 printf("%s/", inet_ntoa(stp
->dst
.adf_addr
.in4
));
1177 printf("%s\n", inet_ntoa(stp
->dst
.adf_addr
.in4
));
1180 af
.adf_len
= sizeof(af
);
1181 af
.adf_addr
.in4
.s_addr
= inet_addr("126.0.0.1");
1182 rn
= rnh
->rnh_matchaddr(&af
, rnh
);
1184 printf("3.lookup = %p key %p mask %p\n", rn
, rn
->rn_key
, rn
->rn_mask
);
1186 printf("%s/", inet_ntoa(stp
->dst
.adf_addr
.in4
));
1188 printf("%s\n", inet_ntoa(stp
->dst
.adf_addr
.in4
));
1196 log(int level
, char *format
, ...)
1200 va_start(ap
, format
);
1201 vfprintf(stderr
, format
, ap
);