2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
15 * This work is based on the LPC-trie which is originally descibed in:
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
25 * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $
28 * Code from fib_hash has been reused which includes the following header:
31 * INET An implementation of the TCP/IP protocol suite for the LINUX
32 * operating system. INET is implemented using the BSD Socket
33 * interface as the means of communication with the user level.
35 * IPv4 FIB: lookup engine and maintenance routines.
38 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
40 * This program is free software; you can redistribute it and/or
41 * modify it under the terms of the GNU General Public License
42 * as published by the Free Software Foundation; either version
43 * 2 of the License, or (at your option) any later version.
45 * Substantial contributions to this work comes from:
47 * David S. Miller, <davem@davemloft.net>
48 * Stephen Hemminger <shemminger@osdl.org>
49 * Paul E. McKenney <paulmck@us.ibm.com>
50 * Patrick McHardy <kaber@trash.net>
53 #define VERSION "0.407"
55 #include <asm/uaccess.h>
56 #include <asm/system.h>
57 #include <asm/bitops.h>
58 #include <linux/types.h>
59 #include <linux/kernel.h>
61 #include <linux/string.h>
62 #include <linux/socket.h>
63 #include <linux/sockios.h>
64 #include <linux/errno.h>
66 #include <linux/inet.h>
67 #include <linux/inetdevice.h>
68 #include <linux/netdevice.h>
69 #include <linux/if_arp.h>
70 #include <linux/proc_fs.h>
71 #include <linux/rcupdate.h>
72 #include <linux/skbuff.h>
73 #include <linux/netlink.h>
74 #include <linux/init.h>
75 #include <linux/list.h>
77 #include <net/protocol.h>
78 #include <net/route.h>
81 #include <net/ip_fib.h>
82 #include "fib_lookup.h"
84 #undef CONFIG_IP_FIB_TRIE_STATS
85 #define MAX_STAT_DEPTH 32
87 #define KEYLENGTH (8*sizeof(t_key))
88 #define MASK_PFX(k, l) (((l)==0)?0:(k >> (KEYLENGTH-l)) << (KEYLENGTH-l))
89 #define TKEY_GET_MASK(offset, bits) (((bits)==0)?0:((t_key)(-1) << (KEYLENGTH - bits) >> offset))
91 typedef unsigned int t_key
;
95 #define NODE_TYPE_MASK 0x1UL
96 #define NODE_PARENT(node) \
97 ((struct tnode *)rcu_dereference(((node)->parent & ~NODE_TYPE_MASK)))
99 #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
101 #define NODE_SET_PARENT(node, ptr) \
102 rcu_assign_pointer((node)->parent, \
103 ((unsigned long)(ptr)) | NODE_TYPE(node))
105 #define IS_TNODE(n) (!(n->parent & T_LEAF))
106 #define IS_LEAF(n) (n->parent & T_LEAF)
110 unsigned long parent
;
115 unsigned long parent
;
116 struct hlist_head list
;
121 struct hlist_node hlist
;
124 struct list_head falh
;
129 unsigned long parent
;
130 unsigned short pos
:5; /* 2log(KEYLENGTH) bits needed */
131 unsigned short bits
:5; /* 2log(KEYLENGTH) bits needed */
132 unsigned short full_children
; /* KEYLENGTH bits needed */
133 unsigned short empty_children
; /* KEYLENGTH bits needed */
135 struct node
*child
[0];
138 #ifdef CONFIG_IP_FIB_TRIE_STATS
139 struct trie_use_stats
{
141 unsigned int backtrack
;
142 unsigned int semantic_match_passed
;
143 unsigned int semantic_match_miss
;
144 unsigned int null_node_hit
;
145 unsigned int resize_node_skipped
;
150 unsigned int totdepth
;
151 unsigned int maxdepth
;
154 unsigned int nullpointers
;
155 unsigned int nodesizes
[MAX_STAT_DEPTH
];
160 #ifdef CONFIG_IP_FIB_TRIE_STATS
161 struct trie_use_stats stats
;
164 unsigned int revision
;
167 static void put_child(struct trie
*t
, struct tnode
*tn
, int i
, struct node
*n
);
168 static void tnode_put_child_reorg(struct tnode
*tn
, int i
, struct node
*n
, int wasfull
);
169 static struct node
*resize(struct trie
*t
, struct tnode
*tn
);
170 static struct tnode
*inflate(struct trie
*t
, struct tnode
*tn
);
171 static struct tnode
*halve(struct trie
*t
, struct tnode
*tn
);
172 static void tnode_free(struct tnode
*tn
);
174 static struct kmem_cache
*fn_alias_kmem __read_mostly
;
175 static struct trie
*trie_local
= NULL
, *trie_main
= NULL
;
178 /* rcu_read_lock needs to be hold by caller from readside */
180 static inline struct node
*tnode_get_child(struct tnode
*tn
, int i
)
182 BUG_ON(i
>= 1 << tn
->bits
);
184 return rcu_dereference(tn
->child
[i
]);
187 static inline int tnode_child_length(const struct tnode
*tn
)
189 return 1 << tn
->bits
;
192 static inline t_key
tkey_extract_bits(t_key a
, int offset
, int bits
)
194 if (offset
< KEYLENGTH
)
195 return ((t_key
)(a
<< offset
)) >> (KEYLENGTH
- bits
);
200 static inline int tkey_equals(t_key a
, t_key b
)
205 static inline int tkey_sub_equals(t_key a
, int offset
, int bits
, t_key b
)
207 if (bits
== 0 || offset
>= KEYLENGTH
)
209 bits
= bits
> KEYLENGTH
? KEYLENGTH
: bits
;
210 return ((a
^ b
) << offset
) >> (KEYLENGTH
- bits
) == 0;
213 static inline int tkey_mismatch(t_key a
, int offset
, t_key b
)
220 while ((diff
<< i
) >> (KEYLENGTH
-1) == 0)
226 To understand this stuff, an understanding of keys and all their bits is
227 necessary. Every node in the trie has a key associated with it, but not
228 all of the bits in that key are significant.
230 Consider a node 'n' and its parent 'tp'.
232 If n is a leaf, every bit in its key is significant. Its presence is
233 necessitated by path compression, since during a tree traversal (when
234 searching for a leaf - unless we are doing an insertion) we will completely
235 ignore all skipped bits we encounter. Thus we need to verify, at the end of
236 a potentially successful search, that we have indeed been walking the
239 Note that we can never "miss" the correct key in the tree if present by
240 following the wrong path. Path compression ensures that segments of the key
241 that are the same for all keys with a given prefix are skipped, but the
242 skipped part *is* identical for each node in the subtrie below the skipped
243 bit! trie_insert() in this implementation takes care of that - note the
244 call to tkey_sub_equals() in trie_insert().
246 if n is an internal node - a 'tnode' here, the various parts of its key
247 have many different meanings.
250 _________________________________________________________________
251 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
252 -----------------------------------------------------------------
253 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
255 _________________________________________________________________
256 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
257 -----------------------------------------------------------------
258 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
265 First, let's just ignore the bits that come before the parent tp, that is
266 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
267 not use them for anything.
269 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
270 index into the parent's child array. That is, they will be used to find
271 'n' among tp's children.
273 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
276 All the bits we have seen so far are significant to the node n. The rest
277 of the bits are really not needed or indeed known in n->key.
279 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
280 n's child array, and will of course be different for each child.
283 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
288 static inline void check_tnode(const struct tnode
*tn
)
290 WARN_ON(tn
&& tn
->pos
+tn
->bits
> 32);
293 static int halve_threshold
= 25;
294 static int inflate_threshold
= 50;
295 static int halve_threshold_root
= 15;
296 static int inflate_threshold_root
= 25;
299 static void __alias_free_mem(struct rcu_head
*head
)
301 struct fib_alias
*fa
= container_of(head
, struct fib_alias
, rcu
);
302 kmem_cache_free(fn_alias_kmem
, fa
);
305 static inline void alias_free_mem_rcu(struct fib_alias
*fa
)
307 call_rcu(&fa
->rcu
, __alias_free_mem
);
310 static void __leaf_free_rcu(struct rcu_head
*head
)
312 kfree(container_of(head
, struct leaf
, rcu
));
315 static void __leaf_info_free_rcu(struct rcu_head
*head
)
317 kfree(container_of(head
, struct leaf_info
, rcu
));
320 static inline void free_leaf_info(struct leaf_info
*leaf
)
322 call_rcu(&leaf
->rcu
, __leaf_info_free_rcu
);
325 static struct tnode
*tnode_alloc(unsigned int size
)
329 if (size
<= PAGE_SIZE
)
330 return kcalloc(size
, 1, GFP_KERNEL
);
332 pages
= alloc_pages(GFP_KERNEL
|__GFP_ZERO
, get_order(size
));
336 return page_address(pages
);
339 static void __tnode_free_rcu(struct rcu_head
*head
)
341 struct tnode
*tn
= container_of(head
, struct tnode
, rcu
);
342 unsigned int size
= sizeof(struct tnode
) +
343 (1 << tn
->bits
) * sizeof(struct node
*);
345 if (size
<= PAGE_SIZE
)
348 free_pages((unsigned long)tn
, get_order(size
));
351 static inline void tnode_free(struct tnode
*tn
)
354 struct leaf
*l
= (struct leaf
*) tn
;
355 call_rcu_bh(&l
->rcu
, __leaf_free_rcu
);
358 call_rcu(&tn
->rcu
, __tnode_free_rcu
);
361 static struct leaf
*leaf_new(void)
363 struct leaf
*l
= kmalloc(sizeof(struct leaf
), GFP_KERNEL
);
366 INIT_HLIST_HEAD(&l
->list
);
371 static struct leaf_info
*leaf_info_new(int plen
)
373 struct leaf_info
*li
= kmalloc(sizeof(struct leaf_info
), GFP_KERNEL
);
376 INIT_LIST_HEAD(&li
->falh
);
381 static struct tnode
* tnode_new(t_key key
, int pos
, int bits
)
383 int nchildren
= 1<<bits
;
384 int sz
= sizeof(struct tnode
) + nchildren
* sizeof(struct node
*);
385 struct tnode
*tn
= tnode_alloc(sz
);
389 tn
->parent
= T_TNODE
;
393 tn
->full_children
= 0;
394 tn
->empty_children
= 1<<bits
;
397 pr_debug("AT %p s=%u %u\n", tn
, (unsigned int) sizeof(struct tnode
),
398 (unsigned int) (sizeof(struct node
) * 1<<bits
));
403 * Check whether a tnode 'n' is "full", i.e. it is an internal node
404 * and no bits are skipped. See discussion in dyntree paper p. 6
407 static inline int tnode_full(const struct tnode
*tn
, const struct node
*n
)
409 if (n
== NULL
|| IS_LEAF(n
))
412 return ((struct tnode
*) n
)->pos
== tn
->pos
+ tn
->bits
;
415 static inline void put_child(struct trie
*t
, struct tnode
*tn
, int i
, struct node
*n
)
417 tnode_put_child_reorg(tn
, i
, n
, -1);
421 * Add a child at position i overwriting the old value.
422 * Update the value of full_children and empty_children.
425 static void tnode_put_child_reorg(struct tnode
*tn
, int i
, struct node
*n
, int wasfull
)
427 struct node
*chi
= tn
->child
[i
];
430 BUG_ON(i
>= 1<<tn
->bits
);
433 /* update emptyChildren */
434 if (n
== NULL
&& chi
!= NULL
)
435 tn
->empty_children
++;
436 else if (n
!= NULL
&& chi
== NULL
)
437 tn
->empty_children
--;
439 /* update fullChildren */
441 wasfull
= tnode_full(tn
, chi
);
443 isfull
= tnode_full(tn
, n
);
444 if (wasfull
&& !isfull
)
446 else if (!wasfull
&& isfull
)
450 NODE_SET_PARENT(n
, tn
);
452 rcu_assign_pointer(tn
->child
[i
], n
);
455 static struct node
*resize(struct trie
*t
, struct tnode
*tn
)
459 struct tnode
*old_tn
;
460 int inflate_threshold_use
;
461 int halve_threshold_use
;
466 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
467 tn
, inflate_threshold
, halve_threshold
);
470 if (tn
->empty_children
== tnode_child_length(tn
)) {
475 if (tn
->empty_children
== tnode_child_length(tn
) - 1)
476 for (i
= 0; i
< tnode_child_length(tn
); i
++) {
483 /* compress one level */
484 NODE_SET_PARENT(n
, NULL
);
489 * Double as long as the resulting node has a number of
490 * nonempty nodes that are above the threshold.
494 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
495 * the Helsinki University of Technology and Matti Tikkanen of Nokia
496 * Telecommunications, page 6:
497 * "A node is doubled if the ratio of non-empty children to all
498 * children in the *doubled* node is at least 'high'."
500 * 'high' in this instance is the variable 'inflate_threshold'. It
501 * is expressed as a percentage, so we multiply it with
502 * tnode_child_length() and instead of multiplying by 2 (since the
503 * child array will be doubled by inflate()) and multiplying
504 * the left-hand side by 100 (to handle the percentage thing) we
505 * multiply the left-hand side by 50.
507 * The left-hand side may look a bit weird: tnode_child_length(tn)
508 * - tn->empty_children is of course the number of non-null children
509 * in the current node. tn->full_children is the number of "full"
510 * children, that is non-null tnodes with a skip value of 0.
511 * All of those will be doubled in the resulting inflated tnode, so
512 * we just count them one extra time here.
514 * A clearer way to write this would be:
516 * to_be_doubled = tn->full_children;
517 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
520 * new_child_length = tnode_child_length(tn) * 2;
522 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
524 * if (new_fill_factor >= inflate_threshold)
526 * ...and so on, tho it would mess up the while () loop.
529 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
533 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
534 * inflate_threshold * new_child_length
536 * expand not_to_be_doubled and to_be_doubled, and shorten:
537 * 100 * (tnode_child_length(tn) - tn->empty_children +
538 * tn->full_children) >= inflate_threshold * new_child_length
540 * expand new_child_length:
541 * 100 * (tnode_child_length(tn) - tn->empty_children +
542 * tn->full_children) >=
543 * inflate_threshold * tnode_child_length(tn) * 2
546 * 50 * (tn->full_children + tnode_child_length(tn) -
547 * tn->empty_children) >= inflate_threshold *
548 * tnode_child_length(tn)
554 /* Keep root node larger */
557 inflate_threshold_use
= inflate_threshold_root
;
559 inflate_threshold_use
= inflate_threshold
;
562 while ((tn
->full_children
> 0 &&
563 50 * (tn
->full_children
+ tnode_child_length(tn
) - tn
->empty_children
) >=
564 inflate_threshold_use
* tnode_child_length(tn
))) {
570 #ifdef CONFIG_IP_FIB_TRIE_STATS
571 t
->stats
.resize_node_skipped
++;
580 * Halve as long as the number of empty children in this
581 * node is above threshold.
585 /* Keep root node larger */
588 halve_threshold_use
= halve_threshold_root
;
590 halve_threshold_use
= halve_threshold
;
593 while (tn
->bits
> 1 &&
594 100 * (tnode_child_length(tn
) - tn
->empty_children
) <
595 halve_threshold_use
* tnode_child_length(tn
)) {
601 #ifdef CONFIG_IP_FIB_TRIE_STATS
602 t
->stats
.resize_node_skipped
++;
609 /* Only one child remains */
610 if (tn
->empty_children
== tnode_child_length(tn
) - 1)
611 for (i
= 0; i
< tnode_child_length(tn
); i
++) {
618 /* compress one level */
620 NODE_SET_PARENT(n
, NULL
);
625 return (struct node
*) tn
;
628 static struct tnode
*inflate(struct trie
*t
, struct tnode
*tn
)
631 struct tnode
*oldtnode
= tn
;
632 int olen
= tnode_child_length(tn
);
635 pr_debug("In inflate\n");
637 tn
= tnode_new(oldtnode
->key
, oldtnode
->pos
, oldtnode
->bits
+ 1);
640 return ERR_PTR(-ENOMEM
);
643 * Preallocate and store tnodes before the actual work so we
644 * don't get into an inconsistent state if memory allocation
645 * fails. In case of failure we return the oldnode and inflate
646 * of tnode is ignored.
649 for (i
= 0; i
< olen
; i
++) {
650 struct tnode
*inode
= (struct tnode
*) tnode_get_child(oldtnode
, i
);
654 inode
->pos
== oldtnode
->pos
+ oldtnode
->bits
&&
656 struct tnode
*left
, *right
;
657 t_key m
= TKEY_GET_MASK(inode
->pos
, 1);
659 left
= tnode_new(inode
->key
&(~m
), inode
->pos
+ 1,
664 right
= tnode_new(inode
->key
|m
, inode
->pos
+ 1,
672 put_child(t
, tn
, 2*i
, (struct node
*) left
);
673 put_child(t
, tn
, 2*i
+1, (struct node
*) right
);
677 for (i
= 0; i
< olen
; i
++) {
678 struct node
*node
= tnode_get_child(oldtnode
, i
);
679 struct tnode
*left
, *right
;
686 /* A leaf or an internal node with skipped bits */
688 if (IS_LEAF(node
) || ((struct tnode
*) node
)->pos
>
689 tn
->pos
+ tn
->bits
- 1) {
690 if (tkey_extract_bits(node
->key
, oldtnode
->pos
+ oldtnode
->bits
,
692 put_child(t
, tn
, 2*i
, node
);
694 put_child(t
, tn
, 2*i
+1, node
);
698 /* An internal node with two children */
699 inode
= (struct tnode
*) node
;
701 if (inode
->bits
== 1) {
702 put_child(t
, tn
, 2*i
, inode
->child
[0]);
703 put_child(t
, tn
, 2*i
+1, inode
->child
[1]);
709 /* An internal node with more than two children */
711 /* We will replace this node 'inode' with two new
712 * ones, 'left' and 'right', each with half of the
713 * original children. The two new nodes will have
714 * a position one bit further down the key and this
715 * means that the "significant" part of their keys
716 * (see the discussion near the top of this file)
717 * will differ by one bit, which will be "0" in
718 * left's key and "1" in right's key. Since we are
719 * moving the key position by one step, the bit that
720 * we are moving away from - the bit at position
721 * (inode->pos) - is the one that will differ between
722 * left and right. So... we synthesize that bit in the
724 * The mask 'm' below will be a single "one" bit at
725 * the position (inode->pos)
728 /* Use the old key, but set the new significant
732 left
= (struct tnode
*) tnode_get_child(tn
, 2*i
);
733 put_child(t
, tn
, 2*i
, NULL
);
737 right
= (struct tnode
*) tnode_get_child(tn
, 2*i
+1);
738 put_child(t
, tn
, 2*i
+1, NULL
);
742 size
= tnode_child_length(left
);
743 for (j
= 0; j
< size
; j
++) {
744 put_child(t
, left
, j
, inode
->child
[j
]);
745 put_child(t
, right
, j
, inode
->child
[j
+ size
]);
747 put_child(t
, tn
, 2*i
, resize(t
, left
));
748 put_child(t
, tn
, 2*i
+1, resize(t
, right
));
752 tnode_free(oldtnode
);
756 int size
= tnode_child_length(tn
);
759 for (j
= 0; j
< size
; j
++)
761 tnode_free((struct tnode
*)tn
->child
[j
]);
765 return ERR_PTR(-ENOMEM
);
769 static struct tnode
*halve(struct trie
*t
, struct tnode
*tn
)
771 struct tnode
*oldtnode
= tn
;
772 struct node
*left
, *right
;
774 int olen
= tnode_child_length(tn
);
776 pr_debug("In halve\n");
778 tn
= tnode_new(oldtnode
->key
, oldtnode
->pos
, oldtnode
->bits
- 1);
781 return ERR_PTR(-ENOMEM
);
784 * Preallocate and store tnodes before the actual work so we
785 * don't get into an inconsistent state if memory allocation
786 * fails. In case of failure we return the oldnode and halve
787 * of tnode is ignored.
790 for (i
= 0; i
< olen
; i
+= 2) {
791 left
= tnode_get_child(oldtnode
, i
);
792 right
= tnode_get_child(oldtnode
, i
+1);
794 /* Two nonempty children */
798 newn
= tnode_new(left
->key
, tn
->pos
+ tn
->bits
, 1);
803 put_child(t
, tn
, i
/2, (struct node
*)newn
);
808 for (i
= 0; i
< olen
; i
+= 2) {
809 struct tnode
*newBinNode
;
811 left
= tnode_get_child(oldtnode
, i
);
812 right
= tnode_get_child(oldtnode
, i
+1);
814 /* At least one of the children is empty */
816 if (right
== NULL
) /* Both are empty */
818 put_child(t
, tn
, i
/2, right
);
823 put_child(t
, tn
, i
/2, left
);
827 /* Two nonempty children */
828 newBinNode
= (struct tnode
*) tnode_get_child(tn
, i
/2);
829 put_child(t
, tn
, i
/2, NULL
);
830 put_child(t
, newBinNode
, 0, left
);
831 put_child(t
, newBinNode
, 1, right
);
832 put_child(t
, tn
, i
/2, resize(t
, newBinNode
));
834 tnode_free(oldtnode
);
838 int size
= tnode_child_length(tn
);
841 for (j
= 0; j
< size
; j
++)
843 tnode_free((struct tnode
*)tn
->child
[j
]);
847 return ERR_PTR(-ENOMEM
);
851 static void trie_init(struct trie
*t
)
857 rcu_assign_pointer(t
->trie
, NULL
);
859 #ifdef CONFIG_IP_FIB_TRIE_STATS
860 memset(&t
->stats
, 0, sizeof(struct trie_use_stats
));
864 /* readside must use rcu_read_lock currently dump routines
865 via get_fa_head and dump */
867 static struct leaf_info
*find_leaf_info(struct leaf
*l
, int plen
)
869 struct hlist_head
*head
= &l
->list
;
870 struct hlist_node
*node
;
871 struct leaf_info
*li
;
873 hlist_for_each_entry_rcu(li
, node
, head
, hlist
)
874 if (li
->plen
== plen
)
880 static inline struct list_head
* get_fa_head(struct leaf
*l
, int plen
)
882 struct leaf_info
*li
= find_leaf_info(l
, plen
);
890 static void insert_leaf_info(struct hlist_head
*head
, struct leaf_info
*new)
892 struct leaf_info
*li
= NULL
, *last
= NULL
;
893 struct hlist_node
*node
;
895 if (hlist_empty(head
)) {
896 hlist_add_head_rcu(&new->hlist
, head
);
898 hlist_for_each_entry(li
, node
, head
, hlist
) {
899 if (new->plen
> li
->plen
)
905 hlist_add_after_rcu(&last
->hlist
, &new->hlist
);
907 hlist_add_before_rcu(&new->hlist
, &li
->hlist
);
911 /* rcu_read_lock needs to be hold by caller from readside */
914 fib_find_node(struct trie
*t
, u32 key
)
921 n
= rcu_dereference(t
->trie
);
923 while (n
!= NULL
&& NODE_TYPE(n
) == T_TNODE
) {
924 tn
= (struct tnode
*) n
;
928 if (tkey_sub_equals(tn
->key
, pos
, tn
->pos
-pos
, key
)) {
929 pos
= tn
->pos
+ tn
->bits
;
930 n
= tnode_get_child(tn
, tkey_extract_bits(key
, tn
->pos
, tn
->bits
));
934 /* Case we have found a leaf. Compare prefixes */
936 if (n
!= NULL
&& IS_LEAF(n
) && tkey_equals(key
, n
->key
))
937 return (struct leaf
*)n
;
942 static struct node
*trie_rebalance(struct trie
*t
, struct tnode
*tn
)
946 struct tnode
*tp
= NULL
;
950 while (tn
!= NULL
&& NODE_PARENT(tn
) != NULL
) {
952 tp
= NODE_PARENT(tn
);
953 cindex
= tkey_extract_bits(key
, tp
->pos
, tp
->bits
);
954 wasfull
= tnode_full(tp
, tnode_get_child(tp
, cindex
));
955 tn
= (struct tnode
*) resize (t
, (struct tnode
*)tn
);
956 tnode_put_child_reorg((struct tnode
*)tp
, cindex
,(struct node
*)tn
, wasfull
);
958 if (!NODE_PARENT(tn
))
961 tn
= NODE_PARENT(tn
);
963 /* Handle last (top) tnode */
965 tn
= (struct tnode
*) resize(t
, (struct tnode
*)tn
);
967 return (struct node
*) tn
;
970 /* only used from updater-side */
972 static struct list_head
*
973 fib_insert_node(struct trie
*t
, int *err
, u32 key
, int plen
)
976 struct tnode
*tp
= NULL
, *tn
= NULL
;
980 struct list_head
*fa_head
= NULL
;
981 struct leaf_info
*li
;
987 /* If we point to NULL, stop. Either the tree is empty and we should
988 * just put a new leaf in if, or we have reached an empty child slot,
989 * and we should just put our new leaf in that.
990 * If we point to a T_TNODE, check if it matches our key. Note that
991 * a T_TNODE might be skipping any number of bits - its 'pos' need
992 * not be the parent's 'pos'+'bits'!
994 * If it does match the current key, get pos/bits from it, extract
995 * the index from our key, push the T_TNODE and walk the tree.
997 * If it doesn't, we have to replace it with a new T_TNODE.
999 * If we point to a T_LEAF, it might or might not have the same key
1000 * as we do. If it does, just change the value, update the T_LEAF's
1001 * value, and return it.
1002 * If it doesn't, we need to replace it with a T_TNODE.
1005 while (n
!= NULL
&& NODE_TYPE(n
) == T_TNODE
) {
1006 tn
= (struct tnode
*) n
;
1010 if (tkey_sub_equals(tn
->key
, pos
, tn
->pos
-pos
, key
)) {
1012 pos
= tn
->pos
+ tn
->bits
;
1013 n
= tnode_get_child(tn
, tkey_extract_bits(key
, tn
->pos
, tn
->bits
));
1015 BUG_ON(n
&& NODE_PARENT(n
) != tn
);
1021 * n ----> NULL, LEAF or TNODE
1023 * tp is n's (parent) ----> NULL or TNODE
1026 BUG_ON(tp
&& IS_LEAF(tp
));
1028 /* Case 1: n is a leaf. Compare prefixes */
1030 if (n
!= NULL
&& IS_LEAF(n
) && tkey_equals(key
, n
->key
)) {
1031 struct leaf
*l
= (struct leaf
*) n
;
1033 li
= leaf_info_new(plen
);
1040 fa_head
= &li
->falh
;
1041 insert_leaf_info(&l
->list
, li
);
1053 li
= leaf_info_new(plen
);
1056 tnode_free((struct tnode
*) l
);
1061 fa_head
= &li
->falh
;
1062 insert_leaf_info(&l
->list
, li
);
1064 if (t
->trie
&& n
== NULL
) {
1065 /* Case 2: n is NULL, and will just insert a new leaf */
1067 NODE_SET_PARENT(l
, tp
);
1069 cindex
= tkey_extract_bits(key
, tp
->pos
, tp
->bits
);
1070 put_child(t
, (struct tnode
*)tp
, cindex
, (struct node
*)l
);
1072 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1074 * Add a new tnode here
1075 * first tnode need some special handling
1079 pos
= tp
->pos
+tp
->bits
;
1084 newpos
= tkey_mismatch(key
, pos
, n
->key
);
1085 tn
= tnode_new(n
->key
, newpos
, 1);
1088 tn
= tnode_new(key
, newpos
, 1); /* First tnode */
1093 tnode_free((struct tnode
*) l
);
1098 NODE_SET_PARENT(tn
, tp
);
1100 missbit
= tkey_extract_bits(key
, newpos
, 1);
1101 put_child(t
, tn
, missbit
, (struct node
*)l
);
1102 put_child(t
, tn
, 1-missbit
, n
);
1105 cindex
= tkey_extract_bits(key
, tp
->pos
, tp
->bits
);
1106 put_child(t
, (struct tnode
*)tp
, cindex
, (struct node
*)tn
);
1108 rcu_assign_pointer(t
->trie
, (struct node
*)tn
); /* First tnode */
1113 if (tp
&& tp
->pos
+ tp
->bits
> 32)
1114 printk(KERN_WARNING
"fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1115 tp
, tp
->pos
, tp
->bits
, key
, plen
);
1117 /* Rebalance the trie */
1119 rcu_assign_pointer(t
->trie
, trie_rebalance(t
, tp
));
1127 * Caller must hold RTNL.
1129 static int fn_trie_insert(struct fib_table
*tb
, struct fib_config
*cfg
)
1131 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1132 struct fib_alias
*fa
, *new_fa
;
1133 struct list_head
*fa_head
= NULL
;
1134 struct fib_info
*fi
;
1135 int plen
= cfg
->fc_dst_len
;
1136 u8 tos
= cfg
->fc_tos
;
1144 key
= ntohl(cfg
->fc_dst
);
1146 pr_debug("Insert table=%u %08x/%d\n", tb
->tb_id
, key
, plen
);
1148 mask
= ntohl(inet_make_mask(plen
));
1155 fi
= fib_create_info(cfg
);
1161 l
= fib_find_node(t
, key
);
1165 fa_head
= get_fa_head(l
, plen
);
1166 fa
= fib_find_alias(fa_head
, tos
, fi
->fib_priority
);
1169 /* Now fa, if non-NULL, points to the first fib alias
1170 * with the same keys [prefix,tos,priority], if such key already
1171 * exists or to the node before which we will insert new one.
1173 * If fa is NULL, we will need to allocate a new one and
1174 * insert to the head of f.
1176 * If f is NULL, no fib node matched the destination key
1177 * and we need to allocate a new one of those as well.
1180 if (fa
&& fa
->fa_info
->fib_priority
== fi
->fib_priority
) {
1181 struct fib_alias
*fa_orig
;
1184 if (cfg
->fc_nlflags
& NLM_F_EXCL
)
1187 if (cfg
->fc_nlflags
& NLM_F_REPLACE
) {
1188 struct fib_info
*fi_drop
;
1192 new_fa
= kmem_cache_alloc(fn_alias_kmem
, GFP_KERNEL
);
1196 fi_drop
= fa
->fa_info
;
1197 new_fa
->fa_tos
= fa
->fa_tos
;
1198 new_fa
->fa_info
= fi
;
1199 new_fa
->fa_type
= cfg
->fc_type
;
1200 new_fa
->fa_scope
= cfg
->fc_scope
;
1201 state
= fa
->fa_state
;
1202 new_fa
->fa_state
&= ~FA_S_ACCESSED
;
1204 list_replace_rcu(&fa
->fa_list
, &new_fa
->fa_list
);
1205 alias_free_mem_rcu(fa
);
1207 fib_release_info(fi_drop
);
1208 if (state
& FA_S_ACCESSED
)
1213 /* Error if we find a perfect match which
1214 * uses the same scope, type, and nexthop
1218 list_for_each_entry(fa
, fa_orig
->fa_list
.prev
, fa_list
) {
1219 if (fa
->fa_tos
!= tos
)
1221 if (fa
->fa_info
->fib_priority
!= fi
->fib_priority
)
1223 if (fa
->fa_type
== cfg
->fc_type
&&
1224 fa
->fa_scope
== cfg
->fc_scope
&&
1225 fa
->fa_info
== fi
) {
1229 if (!(cfg
->fc_nlflags
& NLM_F_APPEND
))
1233 if (!(cfg
->fc_nlflags
& NLM_F_CREATE
))
1237 new_fa
= kmem_cache_alloc(fn_alias_kmem
, GFP_KERNEL
);
1241 new_fa
->fa_info
= fi
;
1242 new_fa
->fa_tos
= tos
;
1243 new_fa
->fa_type
= cfg
->fc_type
;
1244 new_fa
->fa_scope
= cfg
->fc_scope
;
1245 new_fa
->fa_state
= 0;
1247 * Insert new entry to the list.
1252 fa_head
= fib_insert_node(t
, &err
, key
, plen
);
1254 goto out_free_new_fa
;
1257 list_add_tail_rcu(&new_fa
->fa_list
,
1258 (fa
? &fa
->fa_list
: fa_head
));
1261 rtmsg_fib(RTM_NEWROUTE
, htonl(key
), new_fa
, plen
, tb
->tb_id
,
1267 kmem_cache_free(fn_alias_kmem
, new_fa
);
1269 fib_release_info(fi
);
1275 /* should be called with rcu_read_lock */
1276 static inline int check_leaf(struct trie
*t
, struct leaf
*l
,
1277 t_key key
, int *plen
, const struct flowi
*flp
,
1278 struct fib_result
*res
)
1282 struct leaf_info
*li
;
1283 struct hlist_head
*hhead
= &l
->list
;
1284 struct hlist_node
*node
;
1286 hlist_for_each_entry_rcu(li
, node
, hhead
, hlist
) {
1288 mask
= inet_make_mask(i
);
1289 if (l
->key
!= (key
& ntohl(mask
)))
1292 if ((err
= fib_semantic_match(&li
->falh
, flp
, res
, htonl(l
->key
), mask
, i
)) <= 0) {
1294 #ifdef CONFIG_IP_FIB_TRIE_STATS
1295 t
->stats
.semantic_match_passed
++;
1299 #ifdef CONFIG_IP_FIB_TRIE_STATS
1300 t
->stats
.semantic_match_miss
++;
1307 fn_trie_lookup(struct fib_table
*tb
, const struct flowi
*flp
, struct fib_result
*res
)
1309 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1314 t_key key
= ntohl(flp
->fl4_dst
);
1317 int current_prefix_length
= KEYLENGTH
;
1319 t_key node_prefix
, key_prefix
, pref_mismatch
;
1324 n
= rcu_dereference(t
->trie
);
1328 #ifdef CONFIG_IP_FIB_TRIE_STATS
1334 if ((ret
= check_leaf(t
, (struct leaf
*)n
, key
, &plen
, flp
, res
)) <= 0)
1338 pn
= (struct tnode
*) n
;
1346 cindex
= tkey_extract_bits(MASK_PFX(key
, current_prefix_length
), pos
, bits
);
1348 n
= tnode_get_child(pn
, cindex
);
1351 #ifdef CONFIG_IP_FIB_TRIE_STATS
1352 t
->stats
.null_node_hit
++;
1358 if ((ret
= check_leaf(t
, (struct leaf
*)n
, key
, &plen
, flp
, res
)) <= 0)
1366 cn
= (struct tnode
*)n
;
1369 * It's a tnode, and we can do some extra checks here if we
1370 * like, to avoid descending into a dead-end branch.
1371 * This tnode is in the parent's child array at index
1372 * key[p_pos..p_pos+p_bits] but potentially with some bits
1373 * chopped off, so in reality the index may be just a
1374 * subprefix, padded with zero at the end.
1375 * We can also take a look at any skipped bits in this
1376 * tnode - everything up to p_pos is supposed to be ok,
1377 * and the non-chopped bits of the index (se previous
1378 * paragraph) are also guaranteed ok, but the rest is
1379 * considered unknown.
1381 * The skipped bits are key[pos+bits..cn->pos].
1384 /* If current_prefix_length < pos+bits, we are already doing
1385 * actual prefix matching, which means everything from
1386 * pos+(bits-chopped_off) onward must be zero along some
1387 * branch of this subtree - otherwise there is *no* valid
1388 * prefix present. Here we can only check the skipped
1389 * bits. Remember, since we have already indexed into the
1390 * parent's child array, we know that the bits we chopped of
1394 /* NOTA BENE: CHECKING ONLY SKIPPED BITS FOR THE NEW NODE HERE */
1396 if (current_prefix_length
< pos
+bits
) {
1397 if (tkey_extract_bits(cn
->key
, current_prefix_length
,
1398 cn
->pos
- current_prefix_length
) != 0 ||
1404 * If chopped_off=0, the index is fully validated and we
1405 * only need to look at the skipped bits for this, the new,
1406 * tnode. What we actually want to do is to find out if
1407 * these skipped bits match our key perfectly, or if we will
1408 * have to count on finding a matching prefix further down,
1409 * because if we do, we would like to have some way of
1410 * verifying the existence of such a prefix at this point.
1413 /* The only thing we can do at this point is to verify that
1414 * any such matching prefix can indeed be a prefix to our
1415 * key, and if the bits in the node we are inspecting that
1416 * do not match our key are not ZERO, this cannot be true.
1417 * Thus, find out where there is a mismatch (before cn->pos)
1418 * and verify that all the mismatching bits are zero in the
1422 /* Note: We aren't very concerned about the piece of the key
1423 * that precede pn->pos+pn->bits, since these have already been
1424 * checked. The bits after cn->pos aren't checked since these are
1425 * by definition "unknown" at this point. Thus, what we want to
1426 * see is if we are about to enter the "prefix matching" state,
1427 * and in that case verify that the skipped bits that will prevail
1428 * throughout this subtree are zero, as they have to be if we are
1429 * to find a matching prefix.
1432 node_prefix
= MASK_PFX(cn
->key
, cn
->pos
);
1433 key_prefix
= MASK_PFX(key
, cn
->pos
);
1434 pref_mismatch
= key_prefix
^node_prefix
;
1437 /* In short: If skipped bits in this node do not match the search
1438 * key, enter the "prefix matching" state.directly.
1440 if (pref_mismatch
) {
1441 while (!(pref_mismatch
& (1<<(KEYLENGTH
-1)))) {
1443 pref_mismatch
= pref_mismatch
<<1;
1445 key_prefix
= tkey_extract_bits(cn
->key
, mp
, cn
->pos
-mp
);
1447 if (key_prefix
!= 0)
1450 if (current_prefix_length
>= cn
->pos
)
1451 current_prefix_length
= mp
;
1454 pn
= (struct tnode
*)n
; /* Descend */
1461 /* As zero don't change the child key (cindex) */
1462 while ((chopped_off
<= pn
->bits
) && !(cindex
& (1<<(chopped_off
-1))))
1465 /* Decrease current_... with bits chopped off */
1466 if (current_prefix_length
> pn
->pos
+ pn
->bits
- chopped_off
)
1467 current_prefix_length
= pn
->pos
+ pn
->bits
- chopped_off
;
1470 * Either we do the actual chop off according or if we have
1471 * chopped off all bits in this tnode walk up to our parent.
1474 if (chopped_off
<= pn
->bits
) {
1475 cindex
&= ~(1 << (chopped_off
-1));
1477 if (NODE_PARENT(pn
) == NULL
)
1480 /* Get Child's index */
1481 cindex
= tkey_extract_bits(pn
->key
, NODE_PARENT(pn
)->pos
, NODE_PARENT(pn
)->bits
);
1482 pn
= NODE_PARENT(pn
);
1485 #ifdef CONFIG_IP_FIB_TRIE_STATS
1486 t
->stats
.backtrack
++;
1498 /* only called from updater side */
1499 static int trie_leaf_remove(struct trie
*t
, t_key key
)
1502 struct tnode
*tp
= NULL
;
1503 struct node
*n
= t
->trie
;
1506 pr_debug("entering trie_leaf_remove(%p)\n", n
);
1508 /* Note that in the case skipped bits, those bits are *not* checked!
1509 * When we finish this, we will have NULL or a T_LEAF, and the
1510 * T_LEAF may or may not match our key.
1513 while (n
!= NULL
&& IS_TNODE(n
)) {
1514 struct tnode
*tn
= (struct tnode
*) n
;
1516 n
= tnode_get_child(tn
,tkey_extract_bits(key
, tn
->pos
, tn
->bits
));
1518 BUG_ON(n
&& NODE_PARENT(n
) != tn
);
1520 l
= (struct leaf
*) n
;
1522 if (!n
|| !tkey_equals(l
->key
, key
))
1527 * Remove the leaf and rebalance the tree
1533 tp
= NODE_PARENT(n
);
1534 tnode_free((struct tnode
*) n
);
1537 cindex
= tkey_extract_bits(key
, tp
->pos
, tp
->bits
);
1538 put_child(t
, (struct tnode
*)tp
, cindex
, NULL
);
1539 rcu_assign_pointer(t
->trie
, trie_rebalance(t
, tp
));
1541 rcu_assign_pointer(t
->trie
, NULL
);
1547 * Caller must hold RTNL.
1549 static int fn_trie_delete(struct fib_table
*tb
, struct fib_config
*cfg
)
1551 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1553 int plen
= cfg
->fc_dst_len
;
1554 u8 tos
= cfg
->fc_tos
;
1555 struct fib_alias
*fa
, *fa_to_delete
;
1556 struct list_head
*fa_head
;
1558 struct leaf_info
*li
;
1563 key
= ntohl(cfg
->fc_dst
);
1564 mask
= ntohl(inet_make_mask(plen
));
1570 l
= fib_find_node(t
, key
);
1575 fa_head
= get_fa_head(l
, plen
);
1576 fa
= fib_find_alias(fa_head
, tos
, 0);
1581 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key
, plen
, tos
, t
);
1583 fa_to_delete
= NULL
;
1584 fa_head
= fa
->fa_list
.prev
;
1586 list_for_each_entry(fa
, fa_head
, fa_list
) {
1587 struct fib_info
*fi
= fa
->fa_info
;
1589 if (fa
->fa_tos
!= tos
)
1592 if ((!cfg
->fc_type
|| fa
->fa_type
== cfg
->fc_type
) &&
1593 (cfg
->fc_scope
== RT_SCOPE_NOWHERE
||
1594 fa
->fa_scope
== cfg
->fc_scope
) &&
1595 (!cfg
->fc_protocol
||
1596 fi
->fib_protocol
== cfg
->fc_protocol
) &&
1597 fib_nh_match(cfg
, fi
) == 0) {
1607 rtmsg_fib(RTM_DELROUTE
, htonl(key
), fa
, plen
, tb
->tb_id
,
1610 l
= fib_find_node(t
, key
);
1611 li
= find_leaf_info(l
, plen
);
1613 list_del_rcu(&fa
->fa_list
);
1615 if (list_empty(fa_head
)) {
1616 hlist_del_rcu(&li
->hlist
);
1620 if (hlist_empty(&l
->list
))
1621 trie_leaf_remove(t
, key
);
1623 if (fa
->fa_state
& FA_S_ACCESSED
)
1626 fib_release_info(fa
->fa_info
);
1627 alias_free_mem_rcu(fa
);
1631 static int trie_flush_list(struct trie
*t
, struct list_head
*head
)
1633 struct fib_alias
*fa
, *fa_node
;
1636 list_for_each_entry_safe(fa
, fa_node
, head
, fa_list
) {
1637 struct fib_info
*fi
= fa
->fa_info
;
1639 if (fi
&& (fi
->fib_flags
& RTNH_F_DEAD
)) {
1640 list_del_rcu(&fa
->fa_list
);
1641 fib_release_info(fa
->fa_info
);
1642 alias_free_mem_rcu(fa
);
1649 static int trie_flush_leaf(struct trie
*t
, struct leaf
*l
)
1652 struct hlist_head
*lih
= &l
->list
;
1653 struct hlist_node
*node
, *tmp
;
1654 struct leaf_info
*li
= NULL
;
1656 hlist_for_each_entry_safe(li
, node
, tmp
, lih
, hlist
) {
1657 found
+= trie_flush_list(t
, &li
->falh
);
1659 if (list_empty(&li
->falh
)) {
1660 hlist_del_rcu(&li
->hlist
);
1667 /* rcu_read_lock needs to be hold by caller from readside */
1669 static struct leaf
*nextleaf(struct trie
*t
, struct leaf
*thisleaf
)
1671 struct node
*c
= (struct node
*) thisleaf
;
1674 struct node
*trie
= rcu_dereference(t
->trie
);
1680 if (IS_LEAF(trie
)) /* trie w. just a leaf */
1681 return (struct leaf
*) trie
;
1683 p
= (struct tnode
*) trie
; /* Start */
1685 p
= (struct tnode
*) NODE_PARENT(c
);
1690 /* Find the next child of the parent */
1692 pos
= 1 + tkey_extract_bits(c
->key
, p
->pos
, p
->bits
);
1696 last
= 1 << p
->bits
;
1697 for (idx
= pos
; idx
< last
; idx
++) {
1698 c
= rcu_dereference(p
->child
[idx
]);
1703 /* Decend if tnode */
1704 while (IS_TNODE(c
)) {
1705 p
= (struct tnode
*) c
;
1708 /* Rightmost non-NULL branch */
1709 if (p
&& IS_TNODE(p
))
1710 while (!(c
= rcu_dereference(p
->child
[idx
]))
1711 && idx
< (1<<p
->bits
)) idx
++;
1713 /* Done with this tnode? */
1714 if (idx
>= (1 << p
->bits
) || !c
)
1717 return (struct leaf
*) c
;
1720 /* No more children go up one step */
1721 c
= (struct node
*) p
;
1722 p
= (struct tnode
*) NODE_PARENT(p
);
1724 return NULL
; /* Ready. Root of trie */
1728 * Caller must hold RTNL.
1730 static int fn_trie_flush(struct fib_table
*tb
)
1732 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1733 struct leaf
*ll
= NULL
, *l
= NULL
;
1738 for (h
= 0; (l
= nextleaf(t
, l
)) != NULL
; h
++) {
1739 found
+= trie_flush_leaf(t
, l
);
1741 if (ll
&& hlist_empty(&ll
->list
))
1742 trie_leaf_remove(t
, ll
->key
);
1746 if (ll
&& hlist_empty(&ll
->list
))
1747 trie_leaf_remove(t
, ll
->key
);
1749 pr_debug("trie_flush found=%d\n", found
);
1753 static int trie_last_dflt
= -1;
1756 fn_trie_select_default(struct fib_table
*tb
, const struct flowi
*flp
, struct fib_result
*res
)
1758 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1759 int order
, last_idx
;
1760 struct fib_info
*fi
= NULL
;
1761 struct fib_info
*last_resort
;
1762 struct fib_alias
*fa
= NULL
;
1763 struct list_head
*fa_head
;
1772 l
= fib_find_node(t
, 0);
1776 fa_head
= get_fa_head(l
, 0);
1780 if (list_empty(fa_head
))
1783 list_for_each_entry_rcu(fa
, fa_head
, fa_list
) {
1784 struct fib_info
*next_fi
= fa
->fa_info
;
1786 if (fa
->fa_scope
!= res
->scope
||
1787 fa
->fa_type
!= RTN_UNICAST
)
1790 if (next_fi
->fib_priority
> res
->fi
->fib_priority
)
1792 if (!next_fi
->fib_nh
[0].nh_gw
||
1793 next_fi
->fib_nh
[0].nh_scope
!= RT_SCOPE_LINK
)
1795 fa
->fa_state
|= FA_S_ACCESSED
;
1798 if (next_fi
!= res
->fi
)
1800 } else if (!fib_detect_death(fi
, order
, &last_resort
,
1801 &last_idx
, &trie_last_dflt
)) {
1803 fib_info_put(res
->fi
);
1805 atomic_inc(&fi
->fib_clntref
);
1806 trie_last_dflt
= order
;
1812 if (order
<= 0 || fi
== NULL
) {
1813 trie_last_dflt
= -1;
1817 if (!fib_detect_death(fi
, order
, &last_resort
, &last_idx
, &trie_last_dflt
)) {
1819 fib_info_put(res
->fi
);
1821 atomic_inc(&fi
->fib_clntref
);
1822 trie_last_dflt
= order
;
1825 if (last_idx
>= 0) {
1827 fib_info_put(res
->fi
);
1828 res
->fi
= last_resort
;
1830 atomic_inc(&last_resort
->fib_clntref
);
1832 trie_last_dflt
= last_idx
;
1837 static int fn_trie_dump_fa(t_key key
, int plen
, struct list_head
*fah
, struct fib_table
*tb
,
1838 struct sk_buff
*skb
, struct netlink_callback
*cb
)
1841 struct fib_alias
*fa
;
1843 __be32 xkey
= htonl(key
);
1848 /* rcu_read_lock is hold by caller */
1850 list_for_each_entry_rcu(fa
, fah
, fa_list
) {
1855 BUG_ON(!fa
->fa_info
);
1857 if (fib_dump_info(skb
, NETLINK_CB(cb
->skb
).pid
,
1866 fa
->fa_info
, 0) < 0) {
1876 static int fn_trie_dump_plen(struct trie
*t
, int plen
, struct fib_table
*tb
, struct sk_buff
*skb
,
1877 struct netlink_callback
*cb
)
1880 struct list_head
*fa_head
;
1881 struct leaf
*l
= NULL
;
1885 for (h
= 0; (l
= nextleaf(t
, l
)) != NULL
; h
++) {
1889 memset(&cb
->args
[4], 0,
1890 sizeof(cb
->args
) - 4*sizeof(cb
->args
[0]));
1892 fa_head
= get_fa_head(l
, plen
);
1897 if (list_empty(fa_head
))
1900 if (fn_trie_dump_fa(l
->key
, plen
, fa_head
, tb
, skb
, cb
)<0) {
1909 static int fn_trie_dump(struct fib_table
*tb
, struct sk_buff
*skb
, struct netlink_callback
*cb
)
1912 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1917 for (m
= 0; m
<= 32; m
++) {
1921 memset(&cb
->args
[3], 0,
1922 sizeof(cb
->args
) - 3*sizeof(cb
->args
[0]));
1924 if (fn_trie_dump_plen(t
, 32-m
, tb
, skb
, cb
)<0) {
1937 /* Fix more generic FIB names for init later */
1939 #ifdef CONFIG_IP_MULTIPLE_TABLES
1940 struct fib_table
* fib_hash_init(u32 id
)
1942 struct fib_table
* __init
fib_hash_init(u32 id
)
1945 struct fib_table
*tb
;
1948 if (fn_alias_kmem
== NULL
)
1949 fn_alias_kmem
= kmem_cache_create("ip_fib_alias",
1950 sizeof(struct fib_alias
),
1951 0, SLAB_HWCACHE_ALIGN
,
1954 tb
= kmalloc(sizeof(struct fib_table
) + sizeof(struct trie
),
1960 tb
->tb_lookup
= fn_trie_lookup
;
1961 tb
->tb_insert
= fn_trie_insert
;
1962 tb
->tb_delete
= fn_trie_delete
;
1963 tb
->tb_flush
= fn_trie_flush
;
1964 tb
->tb_select_default
= fn_trie_select_default
;
1965 tb
->tb_dump
= fn_trie_dump
;
1966 memset(tb
->tb_data
, 0, sizeof(struct trie
));
1968 t
= (struct trie
*) tb
->tb_data
;
1972 if (id
== RT_TABLE_LOCAL
)
1974 else if (id
== RT_TABLE_MAIN
)
1977 if (id
== RT_TABLE_LOCAL
)
1978 printk(KERN_INFO
"IPv4 FIB: Using LC-trie version %s\n", VERSION
);
1983 #ifdef CONFIG_PROC_FS
1984 /* Depth first Trie walk iterator */
1985 struct fib_trie_iter
{
1986 struct tnode
*tnode
;
1992 static struct node
*fib_trie_get_next(struct fib_trie_iter
*iter
)
1994 struct tnode
*tn
= iter
->tnode
;
1995 unsigned cindex
= iter
->index
;
1998 /* A single entry routing table */
2002 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
2003 iter
->tnode
, iter
->index
, iter
->depth
);
2005 while (cindex
< (1<<tn
->bits
)) {
2006 struct node
*n
= tnode_get_child(tn
, cindex
);
2011 iter
->index
= cindex
+ 1;
2013 /* push down one level */
2014 iter
->tnode
= (struct tnode
*) n
;
2024 /* Current node exhausted, pop back up */
2025 p
= NODE_PARENT(tn
);
2027 cindex
= tkey_extract_bits(tn
->key
, p
->pos
, p
->bits
)+1;
2037 static struct node
*fib_trie_get_first(struct fib_trie_iter
*iter
,
2045 n
= rcu_dereference(t
->trie
);
2052 iter
->tnode
= (struct tnode
*) n
;
2067 static void trie_collect_stats(struct trie
*t
, struct trie_stat
*s
)
2070 struct fib_trie_iter iter
;
2072 memset(s
, 0, sizeof(*s
));
2075 for (n
= fib_trie_get_first(&iter
, t
); n
;
2076 n
= fib_trie_get_next(&iter
)) {
2079 s
->totdepth
+= iter
.depth
;
2080 if (iter
.depth
> s
->maxdepth
)
2081 s
->maxdepth
= iter
.depth
;
2083 const struct tnode
*tn
= (const struct tnode
*) n
;
2087 if(tn
->bits
< MAX_STAT_DEPTH
)
2088 s
->nodesizes
[tn
->bits
]++;
2090 for (i
= 0; i
< (1<<tn
->bits
); i
++)
2099 * This outputs /proc/net/fib_triestats
2101 static void trie_show_stats(struct seq_file
*seq
, struct trie_stat
*stat
)
2103 unsigned i
, max
, pointers
, bytes
, avdepth
;
2106 avdepth
= stat
->totdepth
*100 / stat
->leaves
;
2110 seq_printf(seq
, "\tAver depth: %d.%02d\n", avdepth
/ 100, avdepth
% 100 );
2111 seq_printf(seq
, "\tMax depth: %u\n", stat
->maxdepth
);
2113 seq_printf(seq
, "\tLeaves: %u\n", stat
->leaves
);
2115 bytes
= sizeof(struct leaf
) * stat
->leaves
;
2116 seq_printf(seq
, "\tInternal nodes: %d\n\t", stat
->tnodes
);
2117 bytes
+= sizeof(struct tnode
) * stat
->tnodes
;
2119 max
= MAX_STAT_DEPTH
;
2120 while (max
> 0 && stat
->nodesizes
[max
-1] == 0)
2124 for (i
= 1; i
<= max
; i
++)
2125 if (stat
->nodesizes
[i
] != 0) {
2126 seq_printf(seq
, " %d: %d", i
, stat
->nodesizes
[i
]);
2127 pointers
+= (1<<i
) * stat
->nodesizes
[i
];
2129 seq_putc(seq
, '\n');
2130 seq_printf(seq
, "\tPointers: %d\n", pointers
);
2132 bytes
+= sizeof(struct node
*) * pointers
;
2133 seq_printf(seq
, "Null ptrs: %d\n", stat
->nullpointers
);
2134 seq_printf(seq
, "Total size: %d kB\n", (bytes
+ 1023) / 1024);
2136 #ifdef CONFIG_IP_FIB_TRIE_STATS
2137 seq_printf(seq
, "Counters:\n---------\n");
2138 seq_printf(seq
,"gets = %d\n", t
->stats
.gets
);
2139 seq_printf(seq
,"backtracks = %d\n", t
->stats
.backtrack
);
2140 seq_printf(seq
,"semantic match passed = %d\n", t
->stats
.semantic_match_passed
);
2141 seq_printf(seq
,"semantic match miss = %d\n", t
->stats
.semantic_match_miss
);
2142 seq_printf(seq
,"null node hit= %d\n", t
->stats
.null_node_hit
);
2143 seq_printf(seq
,"skipped node resize = %d\n", t
->stats
.resize_node_skipped
);
2145 memset(&(t
->stats
), 0, sizeof(t
->stats
));
2147 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2150 static int fib_triestat_seq_show(struct seq_file
*seq
, void *v
)
2152 struct trie_stat
*stat
;
2154 stat
= kmalloc(sizeof(*stat
), GFP_KERNEL
);
2158 seq_printf(seq
, "Basic info: size of leaf: %Zd bytes, size of tnode: %Zd bytes.\n",
2159 sizeof(struct leaf
), sizeof(struct tnode
));
2162 seq_printf(seq
, "Local:\n");
2163 trie_collect_stats(trie_local
, stat
);
2164 trie_show_stats(seq
, stat
);
2168 seq_printf(seq
, "Main:\n");
2169 trie_collect_stats(trie_main
, stat
);
2170 trie_show_stats(seq
, stat
);
2177 static int fib_triestat_seq_open(struct inode
*inode
, struct file
*file
)
2179 return single_open(file
, fib_triestat_seq_show
, NULL
);
2182 static const struct file_operations fib_triestat_fops
= {
2183 .owner
= THIS_MODULE
,
2184 .open
= fib_triestat_seq_open
,
2186 .llseek
= seq_lseek
,
2187 .release
= single_release
,
2190 static struct node
*fib_trie_get_idx(struct fib_trie_iter
*iter
,
2196 for (n
= fib_trie_get_first(iter
, trie_local
);
2197 n
; ++idx
, n
= fib_trie_get_next(iter
)) {
2202 for (n
= fib_trie_get_first(iter
, trie_main
);
2203 n
; ++idx
, n
= fib_trie_get_next(iter
)) {
2210 static void *fib_trie_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2214 return SEQ_START_TOKEN
;
2215 return fib_trie_get_idx(seq
->private, *pos
- 1);
2218 static void *fib_trie_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2220 struct fib_trie_iter
*iter
= seq
->private;
2224 if (v
== SEQ_START_TOKEN
)
2225 return fib_trie_get_idx(iter
, 0);
2227 v
= fib_trie_get_next(iter
);
2232 /* continue scan in next trie */
2233 if (iter
->trie
== trie_local
)
2234 return fib_trie_get_first(iter
, trie_main
);
2239 static void fib_trie_seq_stop(struct seq_file
*seq
, void *v
)
2244 static void seq_indent(struct seq_file
*seq
, int n
)
2246 while (n
-- > 0) seq_puts(seq
, " ");
2249 static inline const char *rtn_scope(enum rt_scope_t s
)
2251 static char buf
[32];
2254 case RT_SCOPE_UNIVERSE
: return "universe";
2255 case RT_SCOPE_SITE
: return "site";
2256 case RT_SCOPE_LINK
: return "link";
2257 case RT_SCOPE_HOST
: return "host";
2258 case RT_SCOPE_NOWHERE
: return "nowhere";
2260 snprintf(buf
, sizeof(buf
), "scope=%d", s
);
2265 static const char *rtn_type_names
[__RTN_MAX
] = {
2266 [RTN_UNSPEC
] = "UNSPEC",
2267 [RTN_UNICAST
] = "UNICAST",
2268 [RTN_LOCAL
] = "LOCAL",
2269 [RTN_BROADCAST
] = "BROADCAST",
2270 [RTN_ANYCAST
] = "ANYCAST",
2271 [RTN_MULTICAST
] = "MULTICAST",
2272 [RTN_BLACKHOLE
] = "BLACKHOLE",
2273 [RTN_UNREACHABLE
] = "UNREACHABLE",
2274 [RTN_PROHIBIT
] = "PROHIBIT",
2275 [RTN_THROW
] = "THROW",
2277 [RTN_XRESOLVE
] = "XRESOLVE",
2280 static inline const char *rtn_type(unsigned t
)
2282 static char buf
[32];
2284 if (t
< __RTN_MAX
&& rtn_type_names
[t
])
2285 return rtn_type_names
[t
];
2286 snprintf(buf
, sizeof(buf
), "type %d", t
);
2290 /* Pretty print the trie */
2291 static int fib_trie_seq_show(struct seq_file
*seq
, void *v
)
2293 const struct fib_trie_iter
*iter
= seq
->private;
2296 if (v
== SEQ_START_TOKEN
)
2299 if (!NODE_PARENT(n
)) {
2300 if (iter
->trie
== trie_local
)
2301 seq_puts(seq
, "<local>:\n");
2303 seq_puts(seq
, "<main>:\n");
2307 struct tnode
*tn
= (struct tnode
*) n
;
2308 __be32 prf
= htonl(MASK_PFX(tn
->key
, tn
->pos
));
2310 seq_indent(seq
, iter
->depth
-1);
2311 seq_printf(seq
, " +-- %d.%d.%d.%d/%d %d %d %d\n",
2312 NIPQUAD(prf
), tn
->pos
, tn
->bits
, tn
->full_children
,
2313 tn
->empty_children
);
2316 struct leaf
*l
= (struct leaf
*) n
;
2318 __be32 val
= htonl(l
->key
);
2320 seq_indent(seq
, iter
->depth
);
2321 seq_printf(seq
, " |-- %d.%d.%d.%d\n", NIPQUAD(val
));
2322 for (i
= 32; i
>= 0; i
--) {
2323 struct leaf_info
*li
= find_leaf_info(l
, i
);
2325 struct fib_alias
*fa
;
2326 list_for_each_entry_rcu(fa
, &li
->falh
, fa_list
) {
2327 seq_indent(seq
, iter
->depth
+1);
2328 seq_printf(seq
, " /%d %s %s", i
,
2329 rtn_scope(fa
->fa_scope
),
2330 rtn_type(fa
->fa_type
));
2332 seq_printf(seq
, "tos =%d\n",
2334 seq_putc(seq
, '\n');
2343 static struct seq_operations fib_trie_seq_ops
= {
2344 .start
= fib_trie_seq_start
,
2345 .next
= fib_trie_seq_next
,
2346 .stop
= fib_trie_seq_stop
,
2347 .show
= fib_trie_seq_show
,
2350 static int fib_trie_seq_open(struct inode
*inode
, struct file
*file
)
2352 struct seq_file
*seq
;
2354 struct fib_trie_iter
*s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
2359 rc
= seq_open(file
, &fib_trie_seq_ops
);
2363 seq
= file
->private_data
;
2365 memset(s
, 0, sizeof(*s
));
2373 static const struct file_operations fib_trie_fops
= {
2374 .owner
= THIS_MODULE
,
2375 .open
= fib_trie_seq_open
,
2377 .llseek
= seq_lseek
,
2378 .release
= seq_release_private
,
2381 static unsigned fib_flag_trans(int type
, __be32 mask
, const struct fib_info
*fi
)
2383 static unsigned type2flags
[RTN_MAX
+ 1] = {
2384 [7] = RTF_REJECT
, [8] = RTF_REJECT
,
2386 unsigned flags
= type2flags
[type
];
2388 if (fi
&& fi
->fib_nh
->nh_gw
)
2389 flags
|= RTF_GATEWAY
;
2390 if (mask
== htonl(0xFFFFFFFF))
2397 * This outputs /proc/net/route.
2398 * The format of the file is not supposed to be changed
2399 * and needs to be same as fib_hash output to avoid breaking
2402 static int fib_route_seq_show(struct seq_file
*seq
, void *v
)
2404 const struct fib_trie_iter
*iter
= seq
->private;
2409 if (v
== SEQ_START_TOKEN
) {
2410 seq_printf(seq
, "%-127s\n", "Iface\tDestination\tGateway "
2411 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2416 if (iter
->trie
== trie_local
)
2421 for (i
=32; i
>=0; i
--) {
2422 struct leaf_info
*li
= find_leaf_info(l
, i
);
2423 struct fib_alias
*fa
;
2424 __be32 mask
, prefix
;
2429 mask
= inet_make_mask(li
->plen
);
2430 prefix
= htonl(l
->key
);
2432 list_for_each_entry_rcu(fa
, &li
->falh
, fa_list
) {
2433 const struct fib_info
*fi
= fa
->fa_info
;
2434 unsigned flags
= fib_flag_trans(fa
->fa_type
, mask
, fi
);
2436 if (fa
->fa_type
== RTN_BROADCAST
2437 || fa
->fa_type
== RTN_MULTICAST
)
2441 snprintf(bf
, sizeof(bf
),
2442 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2443 fi
->fib_dev
? fi
->fib_dev
->name
: "*",
2445 fi
->fib_nh
->nh_gw
, flags
, 0, 0,
2448 (fi
->fib_advmss
? fi
->fib_advmss
+ 40 : 0),
2452 snprintf(bf
, sizeof(bf
),
2453 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2454 prefix
, 0, flags
, 0, 0, 0,
2457 seq_printf(seq
, "%-127s\n", bf
);
2464 static struct seq_operations fib_route_seq_ops
= {
2465 .start
= fib_trie_seq_start
,
2466 .next
= fib_trie_seq_next
,
2467 .stop
= fib_trie_seq_stop
,
2468 .show
= fib_route_seq_show
,
2471 static int fib_route_seq_open(struct inode
*inode
, struct file
*file
)
2473 struct seq_file
*seq
;
2475 struct fib_trie_iter
*s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
2480 rc
= seq_open(file
, &fib_route_seq_ops
);
2484 seq
= file
->private_data
;
2486 memset(s
, 0, sizeof(*s
));
2494 static const struct file_operations fib_route_fops
= {
2495 .owner
= THIS_MODULE
,
2496 .open
= fib_route_seq_open
,
2498 .llseek
= seq_lseek
,
2499 .release
= seq_release_private
,
2502 int __init
fib_proc_init(void)
2504 if (!proc_net_fops_create("fib_trie", S_IRUGO
, &fib_trie_fops
))
2507 if (!proc_net_fops_create("fib_triestat", S_IRUGO
, &fib_triestat_fops
))
2510 if (!proc_net_fops_create("route", S_IRUGO
, &fib_route_fops
))
2516 proc_net_remove("fib_triestat");
2518 proc_net_remove("fib_trie");
2523 void __init
fib_proc_exit(void)
2525 proc_net_remove("fib_trie");
2526 proc_net_remove("fib_triestat");
2527 proc_net_remove("route");
2530 #endif /* CONFIG_PROC_FS */