dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / usr / src / cmd / cmd-inet / usr.sbin / in.routed / radix.c
blobc57327a464433b2b6d563a6cb4f012c5928a1832
1 /*
2 * Copyright 2001-2003 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
5 * Copyright (c) 1988, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgment:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
36 * @(#)radix.c 8.4 (Berkeley) 11/2/94
38 * $FreeBSD: src/sbin/routed/radix.c,v 1.6 2000/08/11 08:24:38 sheldonh Exp $
41 #pragma ident "%Z%%M% %I% %E% SMI"
44 * Routines to build and maintain radix trees for routing lookups.
47 #include "defs.h"
49 static const size_t max_keylen = sizeof (struct sockaddr_in);
50 static struct radix_mask *rn_mkfreelist;
51 static struct radix_node_head *mask_rnhead;
52 static uint8_t *rn_zeros, *rn_ones, *addmask_key;
54 #define rn_masktop (mask_rnhead->rnh_treetop)
56 static boolean_t rn_satisfies_leaf(uint8_t *, struct radix_node *, int);
58 static boolean_t rn_refines(void *, void *);
60 static struct radix_node
61 *rn_addmask(void *, uint_t, uint_t),
62 *rn_addroute(void *, void *, struct radix_node_head *,
63 struct radix_node [2]),
64 *rn_delete(void *, void *, struct radix_node_head *),
65 *rn_insert(void *, struct radix_node_head *, boolean_t *,
66 struct radix_node [2]),
67 *rn_match(void *, struct radix_node_head *),
68 *rn_newpair(void *, uint_t, struct radix_node[2]),
69 *rn_search(void *, struct radix_node *),
70 *rn_search_m(void *, struct radix_node *, void *);
72 static struct radix_node *rn_lookup(void *, void *, struct radix_node_head *);
74 #ifdef DEBUG
75 #define DBGMSG(x) msglog x
76 #else
77 #define DBGMSG(x) (void) 0
78 #endif
81 * The data structure for the keys is a radix tree with one way
82 * branching removed. The index rn_b at an internal node n represents a bit
83 * position to be tested. The tree is arranged so that all descendants
84 * of a node n have keys whose bits all agree up to position rn_b - 1.
85 * (We say the index of n is rn_b.)
87 * There is at least one descendant which has a one bit at position rn_b,
88 * and at least one with a zero there.
90 * A route is determined by a pair of key and mask. We require that the
91 * bit-wise logical and of the key and mask to be the key.
92 * We define the index of a route to associated with the mask to be
93 * the first bit number in the mask where 0 occurs (with bit number 0
94 * representing the highest order bit).
96 * We say a mask is normal if every bit is 0, past the index of the mask.
97 * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b,
98 * and m is a normal mask, then the route applies to every descendant of n.
99 * If the index(m) < rn_b, this implies the trailing last few bits of k
100 * before bit b are all 0, (and hence consequently true of every descendant
101 * of n), so the route applies to all descendants of the node as well.
103 * Similar logic shows that a non-normal mask m such that
104 * index(m) <= index(n) could potentially apply to many children of n.
105 * Thus, for each non-host route, we attach its mask to a list at an internal
106 * node as high in the tree as we can go.
108 * The present version of the code makes use of normal routes in short-
109 * circuiting an explict mask and compare operation when testing whether
110 * a key satisfies a normal route, and also in remembering the unique leaf
111 * that governs a subtree.
114 static struct radix_node *
115 rn_search(void *v_arg, struct radix_node *head)
117 struct radix_node *x;
118 uint8_t *v;
120 for (x = head, v = v_arg; x->rn_b >= 0; ) {
121 if (x->rn_bmask & v[x->rn_off])
122 x = x->rn_r;
123 else
124 x = x->rn_l;
126 return (x);
129 static struct radix_node *
130 rn_search_m(void *v_arg, struct radix_node *head, void *m_arg)
132 struct radix_node *x;
133 uint8_t *v = v_arg, *m = m_arg;
135 for (x = head; x->rn_b >= 0; ) {
136 if (x->rn_bmask & m[x->rn_off] & v[x->rn_off])
137 x = x->rn_r;
138 else
139 x = x->rn_l;
141 return (x);
145 * Returns true if there are no bits set in n_arg that are zero in
146 * m_arg and the masks aren't equal. In other words, it returns true
147 * when m_arg is a finer-granularity netmask -- it represents a subset
148 * of the destinations implied by n_arg.
150 static boolean_t
151 rn_refines(void* m_arg, void *n_arg)
153 uint8_t *m = m_arg, *n = n_arg;
154 uint8_t *lim;
155 boolean_t masks_are_equal = _B_TRUE;
157 lim = n + sizeof (struct sockaddr);
159 while (n < lim) {
160 if (*n & ~(*m))
161 return (_B_FALSE);
162 if (*n++ != *m++)
163 masks_are_equal = _B_FALSE;
165 return (!masks_are_equal);
168 static struct radix_node *
169 rn_lookup(void *v_arg, void *m_arg, struct radix_node_head *head)
171 struct radix_node *x;
172 uint8_t *netmask = NULL;
174 if (m_arg) {
175 if ((x = rn_addmask(m_arg, 1, head->rnh_treetop->rn_off)) ==
176 NULL) {
177 DBGMSG(("rn_lookup: failed to add mask"));
178 return (NULL);
180 netmask = x->rn_key;
182 x = rn_match(v_arg, head);
183 if (x && netmask) {
184 while (x && x->rn_mask != netmask)
185 x = x->rn_dupedkey;
187 return (x);
191 * Returns true if address 'trial' has no bits differing from the
192 * leaf's key when compared under the leaf's mask. In other words,
193 * returns true when 'trial' matches leaf.
195 static boolean_t
196 rn_satisfies_leaf(uint8_t *trial,
197 struct radix_node *leaf,
198 int skip)
200 uint8_t *cp = trial, *cp2 = leaf->rn_key, *cp3 = leaf->rn_mask;
201 uint8_t *cplim;
202 size_t length;
204 length = sizeof (struct sockaddr);
206 if (cp3 == NULL)
207 cp3 = rn_ones;
208 cplim = cp + length;
209 cp3 += skip;
210 cp2 += skip;
211 for (cp += skip; cp < cplim; cp++, cp2++, cp3++)
212 if ((*cp ^ *cp2) & *cp3)
213 return (_B_FALSE);
214 return (_B_TRUE);
217 static struct radix_node *
218 rn_match(void *v_arg, struct radix_node_head *head)
220 uint8_t *v = v_arg;
221 struct radix_node *t = head->rnh_treetop, *x;
222 uint8_t *cp = v, *cp2;
223 uint8_t *cplim;
224 struct radix_node *saved_t, *top = t;
225 uint_t off = t->rn_off, vlen, matched_off;
226 int test, b, rn_b;
228 vlen = sizeof (struct sockaddr);
231 * Open code rn_search(v, top) to avoid overhead of extra
232 * subroutine call.
234 for (; t->rn_b >= 0; ) {
235 if (t->rn_bmask & cp[t->rn_off])
236 t = t->rn_r;
237 else
238 t = t->rn_l;
241 cp += off;
242 cp2 = t->rn_key + off;
243 cplim = v + vlen;
244 for (; cp < cplim; cp++, cp2++)
245 if (*cp != *cp2)
246 goto found_difference_with_key;
248 * This extra grot is in case we are explicitly asked
249 * to look up the default. Ugh!
250 * Or 255.255.255.255
252 * In this case, we have a complete match of the key. Unless
253 * the node is one of the roots, we are finished.
254 * If it is the zeros root, then take what we have, prefering
255 * any real data.
256 * If it is the ones root, then pretend the target key was followed
257 * by a byte of zeros.
259 if (!(t->rn_flags & RNF_ROOT))
260 return (t); /* not a root */
261 if (t->rn_dupedkey) {
262 t = t->rn_dupedkey;
263 return (t); /* have some real data */
265 if (*(cp-1) == 0)
266 return (t); /* not the ones root */
267 b = 0; /* fake a zero after 255.255.255.255 */
268 goto calculated_differing_bit;
269 found_difference_with_key:
270 test = (*cp ^ *cp2) & 0xff; /* find first bit that differs */
271 for (b = 7; (test >>= 1) > 0; )
272 b--;
273 calculated_differing_bit:
274 matched_off = cp - v;
275 b += matched_off << 3;
276 rn_b = -1 - b;
278 * If there is a host route in a duped-key chain, it will be first.
280 if ((saved_t = t)->rn_mask == NULL)
281 t = t->rn_dupedkey;
282 for (; t; t = t->rn_dupedkey) {
284 * Even if we don't match exactly as a host,
285 * we may match if the leaf we wound up at is
286 * a route to a net.
288 if (t->rn_flags & RNF_NORMAL) {
289 if (rn_b <= t->rn_b)
290 return (t);
291 } else if (rn_satisfies_leaf(v, t, matched_off)) {
292 return (t);
295 t = saved_t;
296 /* start searching up the tree */
297 do {
298 struct radix_mask *m;
299 t = t->rn_p;
300 if ((m = t->rn_mklist) != NULL) {
302 * If non-contiguous masks ever become important
303 * we can restore the masking and open coding of
304 * the search and satisfaction test and put the
305 * calculation of "off" back before the "do".
307 do {
308 if (m->rm_flags & RNF_NORMAL) {
309 if (rn_b <= m->rm_b)
310 return (m->rm_leaf);
311 } else {
312 off = MIN(t->rn_off, matched_off);
313 x = rn_search_m(v, t, m->rm_mask);
314 while (x != NULL &&
315 x->rn_mask != m->rm_mask)
316 x = x->rn_dupedkey;
317 if (x != NULL &&
318 rn_satisfies_leaf(v, x, off))
319 return (x);
321 } while ((m = m->rm_mklist) != NULL);
323 } while (t != top);
324 return (NULL);
327 #ifdef RN_DEBUG
328 int rn_nodenum;
329 struct radix_node *rn_clist;
330 int rn_saveinfo;
331 boolean_t rn_debug = 1;
332 #endif
334 static struct radix_node *
335 rn_newpair(void *v, uint_t b, struct radix_node nodes[2])
337 struct radix_node *tt = nodes, *t = tt + 1;
339 t->rn_b = b;
340 t->rn_bmask = 0x80 >> (b & 7);
341 t->rn_l = tt;
342 t->rn_off = b >> 3;
343 tt->rn_b = -1;
344 tt->rn_key = v;
345 tt->rn_p = t;
346 tt->rn_flags = t->rn_flags = RNF_ACTIVE;
347 #ifdef RN_DEBUG
348 tt->rn_info = rn_nodenum++;
349 t->rn_info = rn_nodenum++;
350 tt->rn_twin = t;
351 tt->rn_ybro = rn_clist;
352 rn_clist = tt;
353 #endif
354 return (t);
357 static struct radix_node *
358 rn_insert(void* v_arg, struct radix_node_head *head, boolean_t *dupentry,
359 struct radix_node nodes[2])
361 uint8_t *v = v_arg;
362 struct radix_node *top = head->rnh_treetop;
363 uint_t head_off = top->rn_off, vlen;
364 struct radix_node *t = rn_search(v_arg, top);
365 uint8_t *cp = v + head_off, b;
366 struct radix_node *tt;
368 vlen = sizeof (struct sockaddr);
371 * Find first bit at which v and t->rn_key differ
374 uint8_t *cp2 = t->rn_key + head_off;
375 uint8_t cmp_res;
376 uint8_t *cplim = v + vlen;
378 while (cp < cplim)
379 if (*cp2++ != *cp++)
380 goto found_differing_byte;
381 /* handle adding 255.255.255.255 */
382 if (!(t->rn_flags & RNF_ROOT) || *(cp2-1) == 0) {
383 *dupentry = _B_TRUE;
384 return (t);
386 found_differing_byte:
387 *dupentry = _B_FALSE;
388 cmp_res = cp[-1] ^ cp2[-1];
389 for (b = (cp - v) << 3; cmp_res != 0; b--)
390 cmp_res >>= 1;
393 struct radix_node *p, *x = top;
394 cp = v;
395 do {
396 p = x;
397 if (cp[x->rn_off] & x->rn_bmask)
398 x = x->rn_r;
399 else
400 x = x->rn_l;
401 } while (b > (unsigned)x->rn_b);
402 #ifdef RN_DEBUG
403 if (rn_debug) {
404 msglog("rn_insert: Going In:");
405 traverse(p);
407 #endif
408 t = rn_newpair(v_arg, b, nodes);
409 tt = t->rn_l;
410 if (!(cp[p->rn_off] & p->rn_bmask))
411 p->rn_l = t;
412 else
413 p->rn_r = t;
414 x->rn_p = t; /* frees x, p as temp vars below */
415 t->rn_p = p;
416 if (!(cp[t->rn_off] & t->rn_bmask)) {
417 t->rn_r = x;
418 } else {
419 t->rn_r = tt;
420 t->rn_l = x;
422 #ifdef RN_DEBUG
423 if (rn_debug) {
424 msglog("rn_insert: Coming Out:");
425 traverse(p);
427 #endif
429 return (tt);
432 static struct radix_node *
433 rn_addmask(void *n_arg, uint_t search, uint_t skip)
435 uint8_t *netmask = n_arg;
436 struct radix_node *x;
437 uint8_t *cp, *cplim;
438 int b = 0, mlen, j, m0;
439 boolean_t maskduplicated;
440 struct radix_node *saved_x;
441 static int last_zeroed = 0;
443 mlen = sizeof (struct sockaddr);
444 if (skip == 0)
445 skip = 1;
446 if (mlen <= skip)
447 return (mask_rnhead->rnh_nodes);
448 if (skip > 1)
449 (void) memmove(addmask_key + 1, rn_ones + 1, skip - 1);
450 if ((m0 = mlen) > skip)
451 (void) memmove(addmask_key + skip, netmask + skip, mlen - skip);
453 * Trim trailing zeroes.
455 for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0; )
456 cp--;
457 mlen = cp - addmask_key;
458 if (mlen <= skip) {
459 if (m0 >= last_zeroed)
460 last_zeroed = mlen;
461 return (mask_rnhead->rnh_nodes);
463 if (m0 < last_zeroed)
464 (void) memset(addmask_key + m0, 0, last_zeroed - m0);
465 *addmask_key = last_zeroed = mlen;
466 x = rn_search(addmask_key, rn_masktop);
467 if (memcmp(addmask_key, x->rn_key, mlen) != 0)
468 x = NULL;
469 if (x != NULL || search != 0)
470 return (x);
471 x = rtmalloc(max_keylen + 2*sizeof (*x), "rn_addmask");
472 saved_x = x;
473 (void) memset(x, 0, max_keylen + 2 * sizeof (*x));
474 netmask = cp = (uint8_t *)(x + 2);
475 (void) memmove(cp, addmask_key, mlen);
476 x = rn_insert(cp, mask_rnhead, &maskduplicated, x);
477 if (maskduplicated) {
478 #ifdef DEBUG
479 logbad(1, "rn_addmask: mask impossibly already in tree");
480 #else
481 msglog("rn_addmask: mask impossibly already in tree");
482 #endif
483 free(saved_x);
484 return (x);
487 * Calculate index of mask, and check for normalcy.
489 cplim = netmask + mlen;
490 x->rn_flags |= RNF_NORMAL;
491 for (cp = netmask + skip; (cp < cplim) && *cp == 0xff; )
492 cp++;
493 if (cp != cplim) {
494 for (j = 0x80; (j & *cp) != 0; j >>= 1)
495 b++;
496 if (*cp != (0xFF & ~(0xFF >> b)) || cp != (cplim - 1))
497 x->rn_flags &= ~RNF_NORMAL;
499 b += (cp - netmask) << 3;
500 x->rn_b = -1 - b;
501 return (x);
504 static boolean_t /* Note: arbitrary ordering for non-contiguous masks */
505 rn_lexobetter(void *m_arg, void *n_arg)
507 uint8_t *mp = m_arg, *np = n_arg, *lim;
509 lim = mp + sizeof (struct sockaddr);
510 while (mp < lim)
511 if (*mp++ > *np++)
512 return (_B_TRUE);
513 return (_B_FALSE);
516 static struct radix_mask *
517 rn_new_radix_mask(struct radix_node *tt,
518 struct radix_mask *next)
520 struct radix_mask *m;
522 MKGet(m);
523 if (m == NULL) {
524 #ifdef DEBUG
525 logbad(1, "Mask for route not entered");
526 #else
527 msglog("Mask for route not entered");
528 #endif
529 return (NULL);
531 (void) memset(m, 0, sizeof (*m));
532 m->rm_b = tt->rn_b;
533 m->rm_flags = tt->rn_flags;
534 if (tt->rn_flags & RNF_NORMAL)
535 m->rm_leaf = tt;
536 else
537 m->rm_mask = tt->rn_mask;
538 m->rm_mklist = next;
539 tt->rn_mklist = m;
540 return (m);
543 static struct radix_node *
544 rn_addroute(void *v_arg, void *n_arg, struct radix_node_head *head,
545 struct radix_node treenodes[2])
547 uint8_t *v = v_arg, *netmask = n_arg;
548 struct radix_node *t, *x = 0, *tt;
549 struct radix_node *saved_tt, *top = head->rnh_treetop;
550 short b = 0, b_leaf = 0;
551 boolean_t keyduplicated;
552 uint8_t *mmask;
553 struct radix_mask *m, **mp;
556 * In dealing with non-contiguous masks, there may be
557 * many different routes which have the same mask.
558 * We will find it useful to have a unique pointer to
559 * the mask to speed avoiding duplicate references at
560 * nodes and possibly save time in calculating indices.
562 if (netmask) {
563 if ((x = rn_addmask(netmask, 0, top->rn_off)) == NULL) {
564 DBGMSG(("rn_addroute: addmask failed"));
565 return (NULL);
567 b_leaf = x->rn_b;
568 b = -1 - x->rn_b;
569 netmask = x->rn_key;
572 * Deal with duplicated keys: attach node to previous instance
574 saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes);
575 if (keyduplicated) {
576 for (t = tt; tt; t = tt, tt = tt->rn_dupedkey) {
577 if (tt->rn_mask == netmask) {
578 DBGMSG(("rn_addroute: duplicated route and "
579 "mask"));
580 return (NULL);
582 if (netmask == NULL ||
583 (tt->rn_mask &&
584 ((b_leaf < tt->rn_b) ||
585 rn_refines(netmask, tt->rn_mask) ||
586 rn_lexobetter(netmask, tt->rn_mask))))
587 break;
590 * If the mask is not duplicated, we wouldn't
591 * find it among possible duplicate key entries
592 * anyway, so the above test doesn't hurt.
594 * We sort the masks for a duplicated key the same way as
595 * in a masklist -- most specific to least specific.
596 * This may require the unfortunate nuisance of relocating
597 * the head of the list.
599 if (tt == saved_tt) {
600 struct radix_node *xx = x;
601 /* link in at head of list */
602 (tt = treenodes)->rn_dupedkey = t;
603 tt->rn_flags = t->rn_flags;
604 tt->rn_p = x = t->rn_p;
605 if (x->rn_l == t)
606 x->rn_l = tt;
607 else
608 x->rn_r = tt;
609 saved_tt = tt;
610 x = xx;
611 } else {
612 (tt = treenodes)->rn_dupedkey = t->rn_dupedkey;
613 t->rn_dupedkey = tt;
615 #ifdef RN_DEBUG
616 t = tt + 1;
617 tt->rn_info = rn_nodenum++;
618 t->rn_info = rn_nodenum++;
619 tt->rn_twin = t;
620 tt->rn_ybro = rn_clist;
621 rn_clist = tt;
622 #endif
623 tt->rn_key = v;
624 tt->rn_b = -1;
625 tt->rn_flags = RNF_ACTIVE;
628 * Put mask in tree.
630 if (netmask) {
631 tt->rn_mask = netmask;
632 tt->rn_b = x->rn_b;
633 tt->rn_flags |= x->rn_flags & RNF_NORMAL;
635 t = saved_tt->rn_p;
636 if (keyduplicated)
637 goto key_already_in_tree;
638 b_leaf = -1 - t->rn_b;
639 if (t->rn_r == saved_tt)
640 x = t->rn_l;
641 else
642 x = t->rn_r;
643 /* Promote general routes from below */
644 if (x->rn_b < 0) {
645 for (mp = &t->rn_mklist; x; x = x->rn_dupedkey)
646 if (x->rn_mask != NULL && (x->rn_b >= b_leaf) &&
647 x->rn_mklist == NULL) {
648 if ((*mp = m = rn_new_radix_mask(x, 0)) != NULL)
649 mp = &m->rm_mklist;
651 } else if (x->rn_mklist) {
653 * Skip over masks whose index is > that of new node
655 for (mp = &x->rn_mklist; (m = *mp) != NULL; mp = &m->rm_mklist)
656 if (m->rm_b >= b_leaf)
657 break;
658 t->rn_mklist = m;
659 *mp = 0;
661 key_already_in_tree:
662 /* Add new route to highest possible ancestor's list */
663 if ((netmask == NULL) || (b > t->rn_b)) {
664 return (tt); /* can't lift at all */
666 b_leaf = tt->rn_b;
667 do {
668 x = t;
669 t = t->rn_p;
670 } while (b <= t->rn_b && x != top);
672 * Search through routes associated with node to
673 * insert new route according to index.
674 * Need same criteria as when sorting dupedkeys to avoid
675 * double loop on deletion.
677 for (mp = &x->rn_mklist; (m = *mp) != NULL; mp = &m->rm_mklist) {
678 if (m->rm_b < b_leaf)
679 continue;
680 if (m->rm_b > b_leaf)
681 break;
682 if (m->rm_flags & RNF_NORMAL) {
683 mmask = m->rm_leaf->rn_mask;
684 if (tt->rn_flags & RNF_NORMAL) {
685 #ifdef DEBUG
686 logbad(1, "Non-unique normal route, mask "
687 "not entered");
688 #else
689 msglog("Non-unique normal route, mask "
690 "not entered");
691 #endif
692 return (tt);
694 } else
695 mmask = m->rm_mask;
696 if (mmask == netmask) {
697 m->rm_refs++;
698 tt->rn_mklist = m;
699 return (tt);
701 if (rn_refines(netmask, mmask) || rn_lexobetter(netmask, mmask))
702 break;
704 *mp = rn_new_radix_mask(tt, *mp);
705 return (tt);
708 static struct radix_node *
709 rn_delete(void *v_arg, void *netmask_arg, struct radix_node_head *head)
711 struct radix_node *t, *p, *x, *tt;
712 struct radix_mask *m, *saved_m, **mp;
713 struct radix_node *dupedkey, *saved_tt, *top;
714 uint8_t *v, *netmask;
715 int b;
716 uint_t head_off, vlen;
718 v = v_arg;
719 netmask = netmask_arg;
720 x = head->rnh_treetop;
721 tt = rn_search(v, x);
722 head_off = x->rn_off;
723 vlen = sizeof (struct sockaddr);
724 saved_tt = tt;
725 top = x;
726 if (tt == NULL ||
727 memcmp(v + head_off, tt->rn_key + head_off, vlen - head_off) != 0) {
728 DBGMSG(("rn_delete: unable to locate route to delete"));
729 return (NULL);
732 * Delete our route from mask lists.
734 if (netmask) {
735 if ((x = rn_addmask(netmask, 1, head_off)) == NULL) {
736 DBGMSG(("rn_delete: cannot add mask"));
737 return (NULL);
739 netmask = x->rn_key;
740 while (tt->rn_mask != netmask)
741 if ((tt = tt->rn_dupedkey) == NULL) {
742 DBGMSG(("rn_delete: cannot locate mask"));
743 return (NULL);
746 if (tt->rn_mask == NULL || (saved_m = m = tt->rn_mklist) == NULL)
747 goto annotation_removed;
748 if (tt->rn_flags & RNF_NORMAL) {
749 if (m->rm_leaf != tt || m->rm_refs > 0) {
750 #ifdef DEBUG
751 logbad(1, "rn_delete: inconsistent annotation");
752 #else
753 msglog("rn_delete: inconsistent annotation");
754 #endif
755 return (NULL); /* dangling ref could cause disaster */
757 } else {
758 if (m->rm_mask != tt->rn_mask) {
759 #ifdef DEBUG
760 logbad(1, "rn_delete: inconsistent annotation");
761 #else
762 msglog("rn_delete: inconsistent annotation");
763 #endif
764 goto annotation_removed;
766 if (--m->rm_refs >= 0)
767 goto annotation_removed;
769 b = -1 - tt->rn_b;
770 t = saved_tt->rn_p;
771 if (b > t->rn_b)
772 goto annotation_removed; /* Wasn't lifted at all */
773 do {
774 x = t;
775 t = t->rn_p;
776 } while (b <= t->rn_b && x != top);
777 for (mp = &x->rn_mklist; (m = *mp) != NULL; mp = &m->rm_mklist)
778 if (m == saved_m) {
779 *mp = m->rm_mklist;
780 MKFree(m);
781 break;
783 if (m == NULL) {
784 #ifdef DEBUG
785 logbad(1, "rn_delete: couldn't find our annotation");
786 #else
787 msglog("rn_delete: couldn't find our annotation");
788 #endif
789 if (tt->rn_flags & RNF_NORMAL)
790 return (NULL); /* Dangling ref to us */
792 annotation_removed:
794 * Eliminate us from tree
796 if (tt->rn_flags & RNF_ROOT) {
797 DBGMSG(("rn_delete: cannot delete root"));
798 return (NULL);
800 #ifdef RN_DEBUG
801 /* Get us out of the creation list */
802 for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {}
803 if (t != NULL)
804 t->rn_ybro = tt->rn_ybro;
805 #endif
806 t = tt->rn_p;
807 if ((dupedkey = saved_tt->rn_dupedkey) != NULL) {
808 if (tt == saved_tt) {
809 x = dupedkey;
810 x->rn_p = t;
811 if (t->rn_l == tt)
812 t->rn_l = x;
813 else
814 t->rn_r = x;
815 } else {
816 for (x = p = saved_tt; p && p->rn_dupedkey != tt; )
817 p = p->rn_dupedkey;
818 if (p != NULL) {
819 p->rn_dupedkey = tt->rn_dupedkey;
820 } else {
821 #ifdef DEBUG
822 logbad(1, "rn_delete: couldn't find us");
823 #else
824 msglog("rn_delete: couldn't find us");
825 #endif
828 t = tt + 1;
829 if (t->rn_flags & RNF_ACTIVE) {
830 #ifndef RN_DEBUG
831 *++x = *t;
832 p = t->rn_p;
833 #else
834 b = t->rn_info;
835 *++x = *t;
836 t->rn_info = b;
837 p = t->rn_p;
838 #endif
839 if (p->rn_l == t)
840 p->rn_l = x;
841 else
842 p->rn_r = x;
843 x->rn_l->rn_p = x;
844 x->rn_r->rn_p = x;
846 goto out;
848 if (t->rn_l == tt)
849 x = t->rn_r;
850 else
851 x = t->rn_l;
852 p = t->rn_p;
853 if (p->rn_r == t)
854 p->rn_r = x;
855 else
856 p->rn_l = x;
857 x->rn_p = p;
859 * Demote routes attached to us.
861 if (t->rn_mklist) {
862 if (x->rn_b >= 0) {
863 for (mp = &x->rn_mklist; (m = *mp) != NULL; )
864 mp = &m->rm_mklist;
865 *mp = t->rn_mklist;
866 } else {
868 * If there are any key,mask pairs in a sibling
869 * duped-key chain, some subset will appear sorted
870 * in the same order attached to our mklist
872 for (m = t->rn_mklist; m && x; x = x->rn_dupedkey)
873 if (m == x->rn_mklist) {
874 struct radix_mask *mm = m->rm_mklist;
875 x->rn_mklist = 0;
876 if (--(m->rm_refs) < 0)
877 MKFree(m);
878 m = mm;
880 if (m != NULL) {
881 #ifdef DEBUG
882 logbad(1, "rn_delete: Orphaned Mask %p at %p\n",
883 m, x);
884 #else
885 msglog("rn_delete: Orphaned Mask %p at %p\n", m,
887 #endif
892 * We may be holding an active internal node in the tree.
894 x = tt + 1;
895 if (t != x) {
896 #ifndef RN_DEBUG
897 *t = *x;
898 #else
899 b = t->rn_info;
900 *t = *x;
901 t->rn_info = b;
902 #endif
903 t->rn_l->rn_p = t;
904 t->rn_r->rn_p = t;
905 p = x->rn_p;
906 if (p->rn_l == x)
907 p->rn_l = t;
908 else
909 p->rn_r = t;
911 out:
912 tt->rn_flags &= ~RNF_ACTIVE;
913 tt[1].rn_flags &= ~RNF_ACTIVE;
914 return (tt);
918 rn_walktree(struct radix_node_head *h,
919 int (*f)(struct radix_node *, void *),
920 void *w)
922 int error;
923 struct radix_node *base, *next;
924 struct radix_node *rn = h->rnh_treetop;
926 * This gets complicated because we may delete the node
927 * while applying the function f to it, so we need to calculate
928 * the successor node in advance.
930 /* First time through node, go left */
931 while (rn->rn_b >= 0)
932 rn = rn->rn_l;
933 do {
934 base = rn;
935 /* If at right child go back up, otherwise, go right */
936 while (rn->rn_p->rn_r == rn && !(rn->rn_flags & RNF_ROOT))
937 rn = rn->rn_p;
938 /* Find the next *leaf* since next node might vanish, too */
939 for (rn = rn->rn_p->rn_r; rn->rn_b >= 0; )
940 rn = rn->rn_l;
941 next = rn;
942 /* Process leaves */
943 while ((rn = base) != NULL) {
944 base = rn->rn_dupedkey;
945 if (!(rn->rn_flags & RNF_ROOT) && (error = (*f)(rn, w)))
946 return (error);
948 rn = next;
949 } while (!(rn->rn_flags & RNF_ROOT));
950 return (0);
954 rn_inithead(void **head, uint_t off)
956 struct radix_node_head *rnh;
957 struct radix_node *t, *tt, *ttt;
958 if (*head)
959 return (1);
960 rnh = rtmalloc(sizeof (*rnh), "rn_inithead");
961 (void) memset(rnh, 0, sizeof (*rnh));
962 *head = rnh;
963 t = rn_newpair(rn_zeros, off, rnh->rnh_nodes);
964 ttt = rnh->rnh_nodes + 2;
965 t->rn_r = ttt;
966 t->rn_p = t;
967 tt = t->rn_l;
968 tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE;
969 tt->rn_b = -1 - off;
970 *ttt = *tt;
971 ttt->rn_key = rn_ones;
972 rnh->rnh_addaddr = rn_addroute;
973 rnh->rnh_deladdr = rn_delete;
974 rnh->rnh_matchaddr = rn_match;
975 rnh->rnh_lookup = rn_lookup;
976 rnh->rnh_walktree = rn_walktree;
977 rnh->rnh_treetop = t;
978 return (1);
981 void
982 rn_init(void)
984 uint8_t *cp, *cplim;
986 if (max_keylen == 0) {
987 logbad(1, "radix functions require max_keylen be set");
988 return;
990 rn_zeros = rtmalloc(3 * max_keylen, "rn_init");
991 (void) memset(rn_zeros, 0, 3 * max_keylen);
992 rn_ones = cp = rn_zeros + max_keylen;
993 addmask_key = cplim = rn_ones + max_keylen;
994 while (cp < cplim)
995 *cp++ = 0xFF;
996 if (rn_inithead((void **)&mask_rnhead, 0) == 0) {
997 logbad(0, "rn_init: could not initialize radix tree");