tools/adflib: build only host variant which is used by Sam440 target
[AROS.git] / workbench / network / stacks / AROSTCP / bsdsocket / net / radix.c
blob0f20564422a08f3a8b4f89f53f484e0907a3e3e0
1 /*
2 * Copyright (C) 1993 AmiTCP/IP Group, <amitcp-group@hut.fi>
3 * Helsinki University of Technology, Finland.
4 * All rights reserved.
5 * Copyright (C) 2005 Neil Cafferkey
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
19 * MA 02111-1307, USA.
24 * Copyright (c) 1988, 1989 Regents of the University of California.
25 * All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. All advertising materials mentioning features or use of this software
36 * must display the following acknowledgement:
37 * This product includes software developed by the University of
38 * California, Berkeley and its contributors.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
55 * @(#)radix.c 7.9 (Berkeley) 2/4/91
58 #include <conf.h>
61 * Routines to build and maintain radix trees for routing lookups.
63 #ifndef RNF_NORMAL
64 #include <sys/param.h>
65 #include <net/radix.h>
66 #include <sys/malloc.h>
67 #include <sys/systm.h>
68 #define M_DONTWAIT M_NOWAIT
69 #endif
70 #include <net/radix_protos.h>
71 #include <stdio.h>
72 struct radix_node_head *mask_rnhead=NULL;
73 #define rn_maskhead mask_rnhead->rnh_treetop
74 struct radix_mask *rn_mkfreelist = NULL;
75 struct radix_node_head *radix_node_head = NULL;
76 struct radix_node_head *rt_tables[AF_MAX+1] = {0};
78 #undef Bcmp
79 #define Bcmp(a, b, l) (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (u_long)l))
81 * The data structure for the keys is a radix tree with one way
82 * branching removed. The index rn_b at an internal node n represents a bit
83 * position to be tested. The tree is arranged so that all descendants
84 * of a node n have keys whose bits all agree up to position rn_b - 1.
85 * (We say the index of n is rn_b.)
87 * There is at least one descendant which has a one bit at position rn_b,
88 * and at least one with a zero there.
90 * A route is determined by a pair of key and mask. We require that the
91 * bit-wise logical and of the key and mask to be the key.
92 * We define the index of a route to associated with the mask to be
93 * the first bit number in the mask where 0 occurs (with bit number 0
94 * representing the highest order bit).
96 * We say a mask is normal if every bit is 0, past the index of the mask.
97 * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b,
98 * and m is a normal mask, then the route applies to every descendant of n.
99 * If the index(m) < rn_b, this implies the trailing last few bits of k
100 * before bit b are all 0, (and hence consequently true of every descendant
101 * of n), so the route applies to all descendants of the node as well.
103 * The present version of the code makes no use of normal routes,
104 * but similar logic shows that a non-normal mask m such that
105 * index(m) <= index(n) could potentially apply to many children of n.
106 * Thus, for each non-host route, we attach its mask to a list at an internal
107 * node as high in the tree as we can go.
110 struct radix_node *
111 rn_search(v, head)
112 struct radix_node *head;
113 register caddr_t v;
115 register struct radix_node *x;
117 for (x = head; x->rn_b >= 0;) {
118 if (x->rn_bmask & v[x->rn_off])
119 x = x->rn_r;
120 else
121 x = x->rn_l;
123 return x;
126 struct radix_node *
127 rn_search_m(v, head, m)
128 struct radix_node *head;
129 register caddr_t v, m;
131 register struct radix_node *x;
133 for (x = head; x->rn_b >= 0;) {
134 if ((x->rn_bmask & m[x->rn_off]) &&
135 (x->rn_bmask & v[x->rn_off]))
136 x = x->rn_r;
137 else
138 x = x->rn_l;
140 return x;
144 static int gotOddMasks = 0;
145 static char maskedKey[MAXKEYLEN] = {0};
147 struct radix_node *
148 rn_match(v, head)
149 struct radix_node *head;
150 caddr_t v;
152 register struct radix_node *t = head, *x;
153 register caddr_t cp = v, cp2, cp3;
154 caddr_t cplim, mstart;
155 struct radix_node *saved_t;
156 int off = t->rn_off, vlen = *(u_char *)cp, matched_off;
159 * Open code rn_search(v, head) to avoid overhead of extra
160 * subroutine call.
162 for (; t->rn_b >= 0; ) {
163 if (t->rn_bmask & cp[t->rn_off])
164 t = t->rn_r;
165 else
166 t = t->rn_l;
169 * See if we match exactly as a host destination
171 cp += off; cp2 = t->rn_key + off; cplim = v + vlen;
172 for (; cp < cplim; cp++, cp2++)
173 if (*cp != *cp2)
174 goto on1;
176 * This extra grot is in case we are explicitly asked
177 * to look up the default. Ugh!
179 if ((t->rn_flags & RNF_ROOT) && t->rn_dupedkey)
180 t = t->rn_dupedkey;
181 return t;
182 on1:
183 matched_off = cp - v;
184 saved_t = t;
185 do {
186 if (t->rn_mask) {
188 * Even if we don't match exactly as a hosts;
189 * we may match if the leaf we wound up at is
190 * a route to a net.
192 cp3 = matched_off + t->rn_mask;
193 cp2 = matched_off + t->rn_key;
194 for (; cp < cplim; cp++)
195 if ((*cp2++ ^ *cp) & *cp3++)
196 break;
197 if (cp == cplim)
198 return t;
199 cp = matched_off + v;
201 } while (t = t->rn_dupedkey);
202 t = saved_t;
203 /* start searching up the tree */
204 do {
205 register struct radix_mask *m;
206 t = t->rn_p;
207 if (m = t->rn_mklist) {
209 * After doing measurements here, it may
210 * turn out to be faster to open code
211 * rn_search_m here instead of always
212 * copying and masking.
214 off = MIN(t->rn_off, matched_off);
215 mstart = maskedKey + off;
216 do {
217 cp2 = mstart;
218 cp3 = m->rm_mask + off;
219 for (cp = v + off; cp < cplim;)
220 *cp2++ = *cp++ & *cp3++;
221 x = rn_search(maskedKey, t);
222 while (x && x->rn_mask != m->rm_mask)
223 x = x->rn_dupedkey;
224 if (x &&
225 (Bcmp(mstart, x->rn_key + off,
226 vlen - off) == 0))
227 return x;
228 } while (m = m->rm_mklist);
230 } while (t != head);
231 return 0;
234 #ifdef RN_DEBUG
235 int rn_nodenum = 0;
236 struct radix_node *rn_clist = NULL;
237 int rn_saveinfo = 0;
238 #endif
240 struct radix_node *
241 rn_newpair(v, b, nodes)
242 caddr_t v;
243 int b;
244 struct radix_node nodes[2];
246 register struct radix_node *tt = nodes, *t = tt + 1;
247 t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7);
248 t->rn_l = tt; t->rn_off = b >> 3;
249 tt->rn_b = -1; tt->rn_key = v; tt->rn_p = t;
250 tt->rn_flags = t->rn_flags = RNF_ACTIVE;
251 #ifdef RN_DEBUG
252 tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
253 tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
254 #endif
255 return t;
258 int rn_debug = 1;
259 struct radix_node *
260 rn_insert(v, head, dupentry, nodes)
261 caddr_t v;
262 struct radix_node *head;
263 int *dupentry;
264 struct radix_node nodes[2];
266 int head_off = head->rn_off, vlen = (int)*((u_char *)v);
267 register struct radix_node *t = rn_search(v, head);
268 register caddr_t cp = v + head_off;
269 register int b;
270 struct radix_node *tt;
272 *find first bit at which v and t->rn_key differ
275 register caddr_t cp2 = t->rn_key + head_off;
276 register int cmp_res;
277 caddr_t cplim = v + vlen;
279 while (cp < cplim)
280 if (*cp2++ != *cp++)
281 goto on1;
282 *dupentry = 1;
283 return t;
284 on1:
285 *dupentry = 0;
286 cmp_res = (cp[-1] ^ cp2[-1]) & 0xff;
287 for (b = (cp - v) << 3; cmp_res; b--)
288 cmp_res >>= 1;
291 register struct radix_node *p, *x = head;
292 cp = v;
293 do {
294 p = x;
295 if (cp[x->rn_off] & x->rn_bmask)
296 x = x->rn_r;
297 else x = x->rn_l;
298 } while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */
299 #ifdef RN_DEBUG
300 if (rn_debug)
301 printf("Going In:\n"), traverse(p);
302 #endif
303 t = rn_newpair(v, b, nodes); tt = t->rn_l;
304 if ((cp[p->rn_off] & p->rn_bmask) == 0)
305 p->rn_l = t;
306 else
307 p->rn_r = t;
308 x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */
309 if ((cp[t->rn_off] & t->rn_bmask) == 0) {
310 t->rn_r = x;
311 } else {
312 t->rn_r = tt; t->rn_l = x;
314 #ifdef RN_DEBUG
315 if (rn_debug)
316 printf("Coming out:\n"), traverse(p);
317 #endif
319 return (tt);
322 struct radix_node *
323 rn_addmask(netmask, search, skip)
324 caddr_t netmask;
325 int search;
326 int skip;
328 register struct radix_node *x;
329 register caddr_t cp, cplim;
330 register int b, mlen, j;
331 int maskduplicated;
333 mlen = *(u_char *)netmask;
334 if (search) {
335 x = rn_search(netmask, rn_maskhead);
336 mlen = *(u_char *)netmask;
337 if (Bcmp(netmask, x->rn_key, mlen) == 0)
338 return (x);
340 R_Malloc(x, struct radix_node *, MAXKEYLEN + 2 * sizeof (*x));
341 if (x == 0)
342 return (0);
343 Bzero(x, MAXKEYLEN + 2 * sizeof (*x));
344 cp = (caddr_t)(x + 2);
345 Bcopy(netmask, cp, mlen);
346 netmask = cp;
347 x = rn_insert(netmask, rn_maskhead, &maskduplicated, x);
349 * Calculate index of mask.
351 cplim = netmask + mlen;
352 for (cp = netmask + skip; cp < cplim; cp++)
353 if (*(u_char *)cp != 0xff)
354 break;
355 b = (cp - netmask) << 3;
356 if (cp != cplim) {
357 if (*cp != 0) {
358 gotOddMasks = 1;
359 for (j = 0x80; j; b++, j >>= 1)
360 if ((j & *cp) == 0)
361 break;
364 x->rn_b = -1 - b;
365 return (x);
368 struct radix_node *
369 rn_addroute(v, netmask, head, treenodes)
370 struct radix_node *head;
371 caddr_t netmask, v;
372 struct radix_node treenodes[2];
374 register caddr_t cp;
375 register struct radix_node *t, *x=NULL, *tt;
376 short b = 0, b_leaf;
377 int mlen, keyduplicated;
378 caddr_t cplim; unsigned char *maskp;
379 struct radix_mask *m, **mp;
380 struct radix_node *saved_tt;
383 * In dealing with non-contiguous masks, there may be
384 * many different routes which have the same mask.
385 * We will find it useful to have a unique pointer to
386 * the mask to speed avoiding duplicate references at
387 * nodes and possibly save time in calculating indices.
389 if (netmask) {
390 x = rn_search(netmask, rn_maskhead);
391 mlen = *(u_char *)netmask;
392 if (Bcmp(netmask, x->rn_key, mlen) != 0) {
393 x = rn_addmask(netmask, 0, head->rn_off);
394 if (x == 0)
395 return (0);
397 netmask = x->rn_key;
398 b = -1 - x->rn_b;
401 * Deal with duplicated keys: attach node to previous instance
403 saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes);
404 if (keyduplicated) {
405 do {
406 if (tt->rn_mask == netmask)
407 return (0);
408 t = tt;
409 } while (tt = tt->rn_dupedkey);
411 * If the mask is not duplicated, we wouldn't
412 * find it among possible duplicate key entries
413 * anyway, so the above test doesn't hurt.
415 * XXX: we really ought to sort the masks
416 * for a duplicated key the same way as in a masklist.
417 * It is an unfortunate pain having to relocate
418 * the head of the list.
420 t->rn_dupedkey = tt = treenodes;
421 #ifdef RN_DEBUG
422 t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
423 tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
424 #endif
425 t = saved_tt;
426 tt->rn_key = (caddr_t) v;
427 tt->rn_b = -1;
428 tt->rn_flags = t->rn_flags & ~RNF_ROOT;
431 * Put mask in tree.
433 if (netmask) {
434 tt->rn_mask = netmask;
435 tt->rn_b = x->rn_b;
437 t = saved_tt->rn_p;
438 b_leaf = -1 - t->rn_b;
439 if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r;
440 /* Promote general routes from below */
441 if (x->rn_b < 0) {
442 if (x->rn_mask && (x->rn_b >= b_leaf) && x->rn_mklist == 0) {
443 MKGet(m);
444 if (m) {
445 Bzero(m, sizeof *m);
446 m->rm_b = x->rn_b;
447 m->rm_mask = x->rn_mask;
448 x->rn_mklist = t->rn_mklist = m;
451 } else if (x->rn_mklist) {
453 * Skip over masks whose index is > that of new node
455 for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist)
456 if (m->rm_b >= b_leaf)
457 break;
458 t->rn_mklist = m; *mp = 0;
460 /* Add new route to highest possible ancestor's list */
461 if ((netmask == 0) || (b > t->rn_b ))
462 return tt; /* can't lift at all */
463 b_leaf = tt->rn_b;
464 do {
465 x = t;
466 t = t->rn_p;
467 } while (b <= t->rn_b && x != head);
469 * Search through routes associated with node to
470 * insert new route according to index.
471 * For nodes of equal index, place more specific
472 * masks first.
474 cplim = netmask + mlen;
475 for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist) {
476 if (m->rm_b < b_leaf)
477 continue;
478 if (m->rm_b > b_leaf)
479 break;
480 if (m->rm_mask == netmask) {
481 m->rm_refs++;
482 tt->rn_mklist = m;
483 return tt;
485 maskp = (u_char *)m->rm_mask;
486 for (cp = netmask; cp < cplim; cp++)
487 if (*(u_char *)cp > *maskp++)
488 goto on2;
490 on2:
491 MKGet(m);
492 if (m == 0) {
493 printf("Mask for route not entered\n");
494 return (tt);
496 Bzero(m, sizeof *m);
497 m->rm_b = b_leaf;
498 m->rm_mask = netmask;
499 m->rm_mklist = *mp;
500 *mp = m;
501 tt->rn_mklist = m;
502 return tt;
505 struct radix_node *
506 rn_delete(v, netmask, head)
507 caddr_t v, netmask;
508 struct radix_node *head;
510 register struct radix_node *t, *p, *x = head;
511 register struct radix_node *tt = rn_search(v, x);
512 int b, head_off = x->rn_off, vlen = * (u_char *) v;
513 struct radix_mask *m, *saved_m, **mp;
514 struct radix_node *dupedkey, *saved_tt = tt;
516 if (tt == 0 ||
517 Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off))
518 return (0);
520 * Delete our route from mask lists.
522 if (dupedkey = tt->rn_dupedkey) {
523 if (netmask)
524 netmask = rn_search(netmask, rn_maskhead)->rn_key;
525 while (tt->rn_mask != netmask)
526 if ((tt = tt->rn_dupedkey) == 0)
527 return (0);
529 if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0)
530 goto on1;
531 if (m->rm_mask != tt->rn_mask) {
532 printf("rn_delete: inconsistent annotation\n");
533 goto on1;
535 if (--m->rm_refs >= 0)
536 goto on1;
537 b = -1 - tt->rn_b;
538 t = saved_tt->rn_p;
539 if (b > t->rn_b)
540 goto on1; /* Wasn't lifted at all */
541 do {
542 x = t;
543 t = t->rn_p;
544 } while (b <= t->rn_b && x != head);
545 for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist)
546 if (m == saved_m) {
547 *mp = m->rm_mklist;
548 MKFree(m);
549 break;
551 if (m == 0)
552 printf("rn_delete: couldn't find our annotation\n");
553 on1:
555 * Eliminate us from tree
557 if (tt->rn_flags & RNF_ROOT)
558 return (0);
559 #ifdef RN_DEBUG
560 /* Get us out of the creation list */
561 for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {}
562 if (t) t->rn_ybro = tt->rn_ybro;
563 #endif /* RN_DEBUG */
564 t = tt->rn_p;
565 if (dupedkey) {
566 if (tt == saved_tt) {
567 x = dupedkey; x->rn_p = t;
568 if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x;
569 #ifndef RN_DEBUG
570 x++; t = tt + 1; *x = *t; p = t->rn_p;
571 #else
572 x++; b = x->rn_info; t = tt + 1; *x = *t; p = t->rn_p;
573 x->rn_info = b;
574 #endif
575 if (p->rn_l == t) p->rn_l = x; else p->rn_r = x;
576 x->rn_l->rn_p = x; x->rn_r->rn_p = x;
577 } else {
578 for (p = saved_tt; p && p->rn_dupedkey != tt;)
579 p = p->rn_dupedkey;
580 if (p) p->rn_dupedkey = tt->rn_dupedkey;
581 else printf("rn_delete: couldn't find us\n");
583 goto out;
585 if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l;
586 p = t->rn_p;
587 if (p->rn_r == t) p->rn_r = x; else p->rn_l = x;
588 x->rn_p = p;
590 * Demote routes attached to us.
592 if (t->rn_mklist) {
593 if (x->rn_b >= 0) {
594 for (mp = &x->rn_mklist; m = *mp;)
595 mp = &m->rm_mklist;
596 *mp = t->rn_mklist;
597 } else {
598 for (m = t->rn_mklist; m;) {
599 struct radix_mask *mm = m->rm_mklist;
600 if (m == x->rn_mklist && (--(m->rm_refs) < 0)) {
601 x->rn_mklist = 0;
602 MKFree(m);
603 } else
604 printf("%s %lx at %lx\n",
605 "rn_delete: Orphaned Mask",
606 (u_long)m, (u_long)x);
607 m = mm;
612 * We may be holding an active internal node in the tree.
614 x = tt + 1;
615 if (t != x) {
616 #ifndef RN_DEBUG
617 *t = *x;
618 #else
619 b = t->rn_info; *t = *x; t->rn_info = b;
620 #endif
621 t->rn_l->rn_p = t; t->rn_r->rn_p = t;
622 p = x->rn_p;
623 if (p->rn_l == x) p->rn_l = t; else p->rn_r = t;
625 out:
626 tt->rn_flags &= ~RNF_ACTIVE;
627 tt[1].rn_flags &= ~RNF_ACTIVE;
628 return (tt);
630 char rn_zeros[MAXKEYLEN] = {0}, rn_ones[MAXKEYLEN] = {0};
633 rn_inithead(head, off, af)
634 struct radix_node_head **head;
635 int off;
636 int af;
638 register struct radix_node_head *rnh;
639 register struct radix_node *t, *tt, *ttt;
640 if (*head)
641 return (1);
642 R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh));
643 if (rnh == 0)
644 return (0);
645 Bzero(rnh, sizeof (*rnh));
646 *head = rnh;
647 t = rn_newpair(rn_zeros, off, rnh->rnh_nodes);
648 ttt = rnh->rnh_nodes + 2;
649 t->rn_r = ttt;
650 t->rn_p = t;
651 tt = t->rn_l;
652 tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE;
653 tt->rn_b = -1 - off;
654 *ttt = *tt;
655 ttt->rn_key = rn_ones;
656 rnh->rnh_af = af;
657 rnh->rnh_treetop = t;
658 if (radix_node_head == 0) {
659 caddr_t cp = rn_ones, cplim = rn_ones + MAXKEYLEN;
660 while (cp < cplim)
661 *cp++ = -1;
662 if (rn_inithead(&radix_node_head, 0, 0) == 0) {
663 Free(rnh);
664 *head = 0;
665 return (0);
667 mask_rnhead = radix_node_head;
669 rnh->rnh_next = radix_node_head->rnh_next;
670 if (radix_node_head != rnh)
671 radix_node_head->rnh_next = rnh;
672 rt_tables[af] = rnh;
673 return (1);