2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
15 * This work is based on the LPC-trie which is originally descibed in:
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
25 * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $
28 * Code from fib_hash has been reused which includes the following header:
31 * INET An implementation of the TCP/IP protocol suite for the LINUX
32 * operating system. INET is implemented using the BSD Socket
33 * interface as the means of communication with the user level.
35 * IPv4 FIB: lookup engine and maintenance routines.
38 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
40 * This program is free software; you can redistribute it and/or
41 * modify it under the terms of the GNU General Public License
42 * as published by the Free Software Foundation; either version
43 * 2 of the License, or (at your option) any later version.
45 * Substantial contributions to this work comes from:
47 * David S. Miller, <davem@davemloft.net>
48 * Stephen Hemminger <shemminger@osdl.org>
49 * Paul E. McKenney <paulmck@us.ibm.com>
50 * Patrick McHardy <kaber@trash.net>
53 #define VERSION "0.408"
55 #include <asm/uaccess.h>
56 #include <asm/system.h>
57 #include <linux/bitops.h>
58 #include <linux/types.h>
59 #include <linux/kernel.h>
61 #include <linux/string.h>
62 #include <linux/socket.h>
63 #include <linux/sockios.h>
64 #include <linux/errno.h>
66 #include <linux/inet.h>
67 #include <linux/inetdevice.h>
68 #include <linux/netdevice.h>
69 #include <linux/if_arp.h>
70 #include <linux/proc_fs.h>
71 #include <linux/rcupdate.h>
72 #include <linux/skbuff.h>
73 #include <linux/netlink.h>
74 #include <linux/init.h>
75 #include <linux/list.h>
76 #include <net/net_namespace.h>
78 #include <net/protocol.h>
79 #include <net/route.h>
82 #include <net/ip_fib.h>
83 #include "fib_lookup.h"
85 #define MAX_STAT_DEPTH 32
87 #define KEYLENGTH (8*sizeof(t_key))
89 typedef unsigned int t_key
;
93 #define NODE_TYPE_MASK 0x1UL
94 #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
96 #define IS_TNODE(n) (!(n->parent & T_LEAF))
97 #define IS_LEAF(n) (n->parent & T_LEAF)
100 unsigned long parent
;
105 unsigned long parent
;
107 struct hlist_head list
;
112 struct hlist_node hlist
;
115 struct list_head falh
;
119 unsigned long parent
;
121 unsigned char pos
; /* 2log(KEYLENGTH) bits needed */
122 unsigned char bits
; /* 2log(KEYLENGTH) bits needed */
123 unsigned int full_children
; /* KEYLENGTH bits needed */
124 unsigned int empty_children
; /* KEYLENGTH bits needed */
126 struct node
*child
[0];
129 #ifdef CONFIG_IP_FIB_TRIE_STATS
130 struct trie_use_stats
{
132 unsigned int backtrack
;
133 unsigned int semantic_match_passed
;
134 unsigned int semantic_match_miss
;
135 unsigned int null_node_hit
;
136 unsigned int resize_node_skipped
;
141 unsigned int totdepth
;
142 unsigned int maxdepth
;
145 unsigned int nullpointers
;
146 unsigned int nodesizes
[MAX_STAT_DEPTH
];
152 #ifdef CONFIG_IP_FIB_TRIE_STATS
153 struct trie_use_stats stats
;
157 static void put_child(struct trie
*t
, struct tnode
*tn
, int i
, struct node
*n
);
158 static void tnode_put_child_reorg(struct tnode
*tn
, int i
, struct node
*n
, int wasfull
);
159 static struct node
*resize(struct trie
*t
, struct tnode
*tn
);
160 static struct tnode
*inflate(struct trie
*t
, struct tnode
*tn
);
161 static struct tnode
*halve(struct trie
*t
, struct tnode
*tn
);
162 static void tnode_free(struct tnode
*tn
);
164 static struct kmem_cache
*fn_alias_kmem __read_mostly
;
166 static inline struct tnode
*node_parent(struct node
*node
)
168 return (struct tnode
*)(node
->parent
& ~NODE_TYPE_MASK
);
171 static inline struct tnode
*node_parent_rcu(struct node
*node
)
173 struct tnode
*ret
= node_parent(node
);
175 return rcu_dereference(ret
);
178 static inline void node_set_parent(struct node
*node
, struct tnode
*ptr
)
180 rcu_assign_pointer(node
->parent
,
181 (unsigned long)ptr
| NODE_TYPE(node
));
184 static inline struct node
*tnode_get_child(struct tnode
*tn
, unsigned int i
)
186 BUG_ON(i
>= 1U << tn
->bits
);
191 static inline struct node
*tnode_get_child_rcu(struct tnode
*tn
, unsigned int i
)
193 struct node
*ret
= tnode_get_child(tn
, i
);
195 return rcu_dereference(ret
);
198 static inline int tnode_child_length(const struct tnode
*tn
)
200 return 1 << tn
->bits
;
203 static inline t_key
mask_pfx(t_key k
, unsigned short l
)
205 return (l
== 0) ? 0 : k
>> (KEYLENGTH
-l
) << (KEYLENGTH
-l
);
208 static inline t_key
tkey_extract_bits(t_key a
, int offset
, int bits
)
210 if (offset
< KEYLENGTH
)
211 return ((t_key
)(a
<< offset
)) >> (KEYLENGTH
- bits
);
216 static inline int tkey_equals(t_key a
, t_key b
)
221 static inline int tkey_sub_equals(t_key a
, int offset
, int bits
, t_key b
)
223 if (bits
== 0 || offset
>= KEYLENGTH
)
225 bits
= bits
> KEYLENGTH
? KEYLENGTH
: bits
;
226 return ((a
^ b
) << offset
) >> (KEYLENGTH
- bits
) == 0;
229 static inline int tkey_mismatch(t_key a
, int offset
, t_key b
)
236 while ((diff
<< i
) >> (KEYLENGTH
-1) == 0)
242 To understand this stuff, an understanding of keys and all their bits is
243 necessary. Every node in the trie has a key associated with it, but not
244 all of the bits in that key are significant.
246 Consider a node 'n' and its parent 'tp'.
248 If n is a leaf, every bit in its key is significant. Its presence is
249 necessitated by path compression, since during a tree traversal (when
250 searching for a leaf - unless we are doing an insertion) we will completely
251 ignore all skipped bits we encounter. Thus we need to verify, at the end of
252 a potentially successful search, that we have indeed been walking the
255 Note that we can never "miss" the correct key in the tree if present by
256 following the wrong path. Path compression ensures that segments of the key
257 that are the same for all keys with a given prefix are skipped, but the
258 skipped part *is* identical for each node in the subtrie below the skipped
259 bit! trie_insert() in this implementation takes care of that - note the
260 call to tkey_sub_equals() in trie_insert().
262 if n is an internal node - a 'tnode' here, the various parts of its key
263 have many different meanings.
266 _________________________________________________________________
267 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
268 -----------------------------------------------------------------
269 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
271 _________________________________________________________________
272 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
273 -----------------------------------------------------------------
274 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
281 First, let's just ignore the bits that come before the parent tp, that is
282 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
283 not use them for anything.
285 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
286 index into the parent's child array. That is, they will be used to find
287 'n' among tp's children.
289 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
292 All the bits we have seen so far are significant to the node n. The rest
293 of the bits are really not needed or indeed known in n->key.
295 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
296 n's child array, and will of course be different for each child.
299 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
304 static inline void check_tnode(const struct tnode
*tn
)
306 WARN_ON(tn
&& tn
->pos
+tn
->bits
> 32);
309 static const int halve_threshold
= 25;
310 static const int inflate_threshold
= 50;
311 static const int halve_threshold_root
= 8;
312 static const int inflate_threshold_root
= 15;
315 static void __alias_free_mem(struct rcu_head
*head
)
317 struct fib_alias
*fa
= container_of(head
, struct fib_alias
, rcu
);
318 kmem_cache_free(fn_alias_kmem
, fa
);
321 static inline void alias_free_mem_rcu(struct fib_alias
*fa
)
323 call_rcu(&fa
->rcu
, __alias_free_mem
);
326 static void __leaf_free_rcu(struct rcu_head
*head
)
328 kfree(container_of(head
, struct leaf
, rcu
));
331 static void __leaf_info_free_rcu(struct rcu_head
*head
)
333 kfree(container_of(head
, struct leaf_info
, rcu
));
336 static inline void free_leaf_info(struct leaf_info
*leaf
)
338 call_rcu(&leaf
->rcu
, __leaf_info_free_rcu
);
341 static struct tnode
*tnode_alloc(size_t size
)
345 if (size
<= PAGE_SIZE
)
346 return kzalloc(size
, GFP_KERNEL
);
348 pages
= alloc_pages(GFP_KERNEL
|__GFP_ZERO
, get_order(size
));
352 return page_address(pages
);
355 static void __tnode_free_rcu(struct rcu_head
*head
)
357 struct tnode
*tn
= container_of(head
, struct tnode
, rcu
);
358 size_t size
= sizeof(struct tnode
) +
359 (sizeof(struct node
*) << tn
->bits
);
361 if (size
<= PAGE_SIZE
)
364 free_pages((unsigned long)tn
, get_order(size
));
367 static inline void tnode_free(struct tnode
*tn
)
370 struct leaf
*l
= (struct leaf
*) tn
;
371 call_rcu_bh(&l
->rcu
, __leaf_free_rcu
);
373 call_rcu(&tn
->rcu
, __tnode_free_rcu
);
376 static struct leaf
*leaf_new(void)
378 struct leaf
*l
= kmalloc(sizeof(struct leaf
), GFP_KERNEL
);
381 INIT_HLIST_HEAD(&l
->list
);
386 static struct leaf_info
*leaf_info_new(int plen
)
388 struct leaf_info
*li
= kmalloc(sizeof(struct leaf_info
), GFP_KERNEL
);
391 INIT_LIST_HEAD(&li
->falh
);
396 static struct tnode
* tnode_new(t_key key
, int pos
, int bits
)
398 size_t sz
= sizeof(struct tnode
) + (sizeof(struct node
*) << bits
);
399 struct tnode
*tn
= tnode_alloc(sz
);
402 tn
->parent
= T_TNODE
;
406 tn
->full_children
= 0;
407 tn
->empty_children
= 1<<bits
;
410 pr_debug("AT %p s=%u %lu\n", tn
, (unsigned int) sizeof(struct tnode
),
411 (unsigned long) (sizeof(struct node
) << bits
));
416 * Check whether a tnode 'n' is "full", i.e. it is an internal node
417 * and no bits are skipped. See discussion in dyntree paper p. 6
420 static inline int tnode_full(const struct tnode
*tn
, const struct node
*n
)
422 if (n
== NULL
|| IS_LEAF(n
))
425 return ((struct tnode
*) n
)->pos
== tn
->pos
+ tn
->bits
;
428 static inline void put_child(struct trie
*t
, struct tnode
*tn
, int i
, struct node
*n
)
430 tnode_put_child_reorg(tn
, i
, n
, -1);
434 * Add a child at position i overwriting the old value.
435 * Update the value of full_children and empty_children.
438 static void tnode_put_child_reorg(struct tnode
*tn
, int i
, struct node
*n
, int wasfull
)
440 struct node
*chi
= tn
->child
[i
];
443 BUG_ON(i
>= 1<<tn
->bits
);
446 /* update emptyChildren */
447 if (n
== NULL
&& chi
!= NULL
)
448 tn
->empty_children
++;
449 else if (n
!= NULL
&& chi
== NULL
)
450 tn
->empty_children
--;
452 /* update fullChildren */
454 wasfull
= tnode_full(tn
, chi
);
456 isfull
= tnode_full(tn
, n
);
457 if (wasfull
&& !isfull
)
459 else if (!wasfull
&& isfull
)
463 node_set_parent(n
, tn
);
465 rcu_assign_pointer(tn
->child
[i
], n
);
468 static struct node
*resize(struct trie
*t
, struct tnode
*tn
)
472 struct tnode
*old_tn
;
473 int inflate_threshold_use
;
474 int halve_threshold_use
;
480 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
481 tn
, inflate_threshold
, halve_threshold
);
484 if (tn
->empty_children
== tnode_child_length(tn
)) {
489 if (tn
->empty_children
== tnode_child_length(tn
) - 1)
490 for (i
= 0; i
< tnode_child_length(tn
); i
++) {
497 /* compress one level */
498 node_set_parent(n
, NULL
);
503 * Double as long as the resulting node has a number of
504 * nonempty nodes that are above the threshold.
508 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
509 * the Helsinki University of Technology and Matti Tikkanen of Nokia
510 * Telecommunications, page 6:
511 * "A node is doubled if the ratio of non-empty children to all
512 * children in the *doubled* node is at least 'high'."
514 * 'high' in this instance is the variable 'inflate_threshold'. It
515 * is expressed as a percentage, so we multiply it with
516 * tnode_child_length() and instead of multiplying by 2 (since the
517 * child array will be doubled by inflate()) and multiplying
518 * the left-hand side by 100 (to handle the percentage thing) we
519 * multiply the left-hand side by 50.
521 * The left-hand side may look a bit weird: tnode_child_length(tn)
522 * - tn->empty_children is of course the number of non-null children
523 * in the current node. tn->full_children is the number of "full"
524 * children, that is non-null tnodes with a skip value of 0.
525 * All of those will be doubled in the resulting inflated tnode, so
526 * we just count them one extra time here.
528 * A clearer way to write this would be:
530 * to_be_doubled = tn->full_children;
531 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
534 * new_child_length = tnode_child_length(tn) * 2;
536 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
538 * if (new_fill_factor >= inflate_threshold)
540 * ...and so on, tho it would mess up the while () loop.
543 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
547 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
548 * inflate_threshold * new_child_length
550 * expand not_to_be_doubled and to_be_doubled, and shorten:
551 * 100 * (tnode_child_length(tn) - tn->empty_children +
552 * tn->full_children) >= inflate_threshold * new_child_length
554 * expand new_child_length:
555 * 100 * (tnode_child_length(tn) - tn->empty_children +
556 * tn->full_children) >=
557 * inflate_threshold * tnode_child_length(tn) * 2
560 * 50 * (tn->full_children + tnode_child_length(tn) -
561 * tn->empty_children) >= inflate_threshold *
562 * tnode_child_length(tn)
568 /* Keep root node larger */
571 inflate_threshold_use
= inflate_threshold_root
;
573 inflate_threshold_use
= inflate_threshold
;
577 while ((tn
->full_children
> 0 && max_resize
-- &&
578 50 * (tn
->full_children
+ tnode_child_length(tn
) - tn
->empty_children
) >=
579 inflate_threshold_use
* tnode_child_length(tn
))) {
585 #ifdef CONFIG_IP_FIB_TRIE_STATS
586 t
->stats
.resize_node_skipped
++;
592 if (max_resize
< 0) {
594 printk(KERN_WARNING
"Fix inflate_threshold_root. Now=%d size=%d bits\n",
595 inflate_threshold_root
, tn
->bits
);
597 printk(KERN_WARNING
"Fix inflate_threshold. Now=%d size=%d bits\n",
598 inflate_threshold
, tn
->bits
);
604 * Halve as long as the number of empty children in this
605 * node is above threshold.
609 /* Keep root node larger */
612 halve_threshold_use
= halve_threshold_root
;
614 halve_threshold_use
= halve_threshold
;
618 while (tn
->bits
> 1 && max_resize
-- &&
619 100 * (tnode_child_length(tn
) - tn
->empty_children
) <
620 halve_threshold_use
* tnode_child_length(tn
)) {
626 #ifdef CONFIG_IP_FIB_TRIE_STATS
627 t
->stats
.resize_node_skipped
++;
633 if (max_resize
< 0) {
635 printk(KERN_WARNING
"Fix halve_threshold_root. Now=%d size=%d bits\n",
636 halve_threshold_root
, tn
->bits
);
638 printk(KERN_WARNING
"Fix halve_threshold. Now=%d size=%d bits\n",
639 halve_threshold
, tn
->bits
);
642 /* Only one child remains */
643 if (tn
->empty_children
== tnode_child_length(tn
) - 1)
644 for (i
= 0; i
< tnode_child_length(tn
); i
++) {
651 /* compress one level */
653 node_set_parent(n
, NULL
);
658 return (struct node
*) tn
;
661 static struct tnode
*inflate(struct trie
*t
, struct tnode
*tn
)
663 struct tnode
*oldtnode
= tn
;
664 int olen
= tnode_child_length(tn
);
667 pr_debug("In inflate\n");
669 tn
= tnode_new(oldtnode
->key
, oldtnode
->pos
, oldtnode
->bits
+ 1);
672 return ERR_PTR(-ENOMEM
);
675 * Preallocate and store tnodes before the actual work so we
676 * don't get into an inconsistent state if memory allocation
677 * fails. In case of failure we return the oldnode and inflate
678 * of tnode is ignored.
681 for (i
= 0; i
< olen
; i
++) {
682 struct tnode
*inode
= (struct tnode
*) tnode_get_child(oldtnode
, i
);
686 inode
->pos
== oldtnode
->pos
+ oldtnode
->bits
&&
688 struct tnode
*left
, *right
;
689 t_key m
= ~0U << (KEYLENGTH
- 1) >> inode
->pos
;
691 left
= tnode_new(inode
->key
&(~m
), inode
->pos
+ 1,
696 right
= tnode_new(inode
->key
|m
, inode
->pos
+ 1,
704 put_child(t
, tn
, 2*i
, (struct node
*) left
);
705 put_child(t
, tn
, 2*i
+1, (struct node
*) right
);
709 for (i
= 0; i
< olen
; i
++) {
711 struct node
*node
= tnode_get_child(oldtnode
, i
);
712 struct tnode
*left
, *right
;
719 /* A leaf or an internal node with skipped bits */
721 if (IS_LEAF(node
) || ((struct tnode
*) node
)->pos
>
722 tn
->pos
+ tn
->bits
- 1) {
723 if (tkey_extract_bits(node
->key
, oldtnode
->pos
+ oldtnode
->bits
,
725 put_child(t
, tn
, 2*i
, node
);
727 put_child(t
, tn
, 2*i
+1, node
);
731 /* An internal node with two children */
732 inode
= (struct tnode
*) node
;
734 if (inode
->bits
== 1) {
735 put_child(t
, tn
, 2*i
, inode
->child
[0]);
736 put_child(t
, tn
, 2*i
+1, inode
->child
[1]);
742 /* An internal node with more than two children */
744 /* We will replace this node 'inode' with two new
745 * ones, 'left' and 'right', each with half of the
746 * original children. The two new nodes will have
747 * a position one bit further down the key and this
748 * means that the "significant" part of their keys
749 * (see the discussion near the top of this file)
750 * will differ by one bit, which will be "0" in
751 * left's key and "1" in right's key. Since we are
752 * moving the key position by one step, the bit that
753 * we are moving away from - the bit at position
754 * (inode->pos) - is the one that will differ between
755 * left and right. So... we synthesize that bit in the
757 * The mask 'm' below will be a single "one" bit at
758 * the position (inode->pos)
761 /* Use the old key, but set the new significant
765 left
= (struct tnode
*) tnode_get_child(tn
, 2*i
);
766 put_child(t
, tn
, 2*i
, NULL
);
770 right
= (struct tnode
*) tnode_get_child(tn
, 2*i
+1);
771 put_child(t
, tn
, 2*i
+1, NULL
);
775 size
= tnode_child_length(left
);
776 for (j
= 0; j
< size
; j
++) {
777 put_child(t
, left
, j
, inode
->child
[j
]);
778 put_child(t
, right
, j
, inode
->child
[j
+ size
]);
780 put_child(t
, tn
, 2*i
, resize(t
, left
));
781 put_child(t
, tn
, 2*i
+1, resize(t
, right
));
785 tnode_free(oldtnode
);
789 int size
= tnode_child_length(tn
);
792 for (j
= 0; j
< size
; j
++)
794 tnode_free((struct tnode
*)tn
->child
[j
]);
798 return ERR_PTR(-ENOMEM
);
802 static struct tnode
*halve(struct trie
*t
, struct tnode
*tn
)
804 struct tnode
*oldtnode
= tn
;
805 struct node
*left
, *right
;
807 int olen
= tnode_child_length(tn
);
809 pr_debug("In halve\n");
811 tn
= tnode_new(oldtnode
->key
, oldtnode
->pos
, oldtnode
->bits
- 1);
814 return ERR_PTR(-ENOMEM
);
817 * Preallocate and store tnodes before the actual work so we
818 * don't get into an inconsistent state if memory allocation
819 * fails. In case of failure we return the oldnode and halve
820 * of tnode is ignored.
823 for (i
= 0; i
< olen
; i
+= 2) {
824 left
= tnode_get_child(oldtnode
, i
);
825 right
= tnode_get_child(oldtnode
, i
+1);
827 /* Two nonempty children */
831 newn
= tnode_new(left
->key
, tn
->pos
+ tn
->bits
, 1);
836 put_child(t
, tn
, i
/2, (struct node
*)newn
);
841 for (i
= 0; i
< olen
; i
+= 2) {
842 struct tnode
*newBinNode
;
844 left
= tnode_get_child(oldtnode
, i
);
845 right
= tnode_get_child(oldtnode
, i
+1);
847 /* At least one of the children is empty */
849 if (right
== NULL
) /* Both are empty */
851 put_child(t
, tn
, i
/2, right
);
856 put_child(t
, tn
, i
/2, left
);
860 /* Two nonempty children */
861 newBinNode
= (struct tnode
*) tnode_get_child(tn
, i
/2);
862 put_child(t
, tn
, i
/2, NULL
);
863 put_child(t
, newBinNode
, 0, left
);
864 put_child(t
, newBinNode
, 1, right
);
865 put_child(t
, tn
, i
/2, resize(t
, newBinNode
));
867 tnode_free(oldtnode
);
871 int size
= tnode_child_length(tn
);
874 for (j
= 0; j
< size
; j
++)
876 tnode_free((struct tnode
*)tn
->child
[j
]);
880 return ERR_PTR(-ENOMEM
);
884 /* readside must use rcu_read_lock currently dump routines
885 via get_fa_head and dump */
887 static struct leaf_info
*find_leaf_info(struct leaf
*l
, int plen
)
889 struct hlist_head
*head
= &l
->list
;
890 struct hlist_node
*node
;
891 struct leaf_info
*li
;
893 hlist_for_each_entry_rcu(li
, node
, head
, hlist
)
894 if (li
->plen
== plen
)
900 static inline struct list_head
* get_fa_head(struct leaf
*l
, int plen
)
902 struct leaf_info
*li
= find_leaf_info(l
, plen
);
910 static void insert_leaf_info(struct hlist_head
*head
, struct leaf_info
*new)
912 struct leaf_info
*li
= NULL
, *last
= NULL
;
913 struct hlist_node
*node
;
915 if (hlist_empty(head
)) {
916 hlist_add_head_rcu(&new->hlist
, head
);
918 hlist_for_each_entry(li
, node
, head
, hlist
) {
919 if (new->plen
> li
->plen
)
925 hlist_add_after_rcu(&last
->hlist
, &new->hlist
);
927 hlist_add_before_rcu(&new->hlist
, &li
->hlist
);
931 /* rcu_read_lock needs to be hold by caller from readside */
934 fib_find_node(struct trie
*t
, u32 key
)
941 n
= rcu_dereference(t
->trie
);
943 while (n
!= NULL
&& NODE_TYPE(n
) == T_TNODE
) {
944 tn
= (struct tnode
*) n
;
948 if (tkey_sub_equals(tn
->key
, pos
, tn
->pos
-pos
, key
)) {
949 pos
= tn
->pos
+ tn
->bits
;
950 n
= tnode_get_child_rcu(tn
, tkey_extract_bits(key
, tn
->pos
, tn
->bits
));
954 /* Case we have found a leaf. Compare prefixes */
956 if (n
!= NULL
&& IS_LEAF(n
) && tkey_equals(key
, n
->key
))
957 return (struct leaf
*)n
;
962 static struct node
*trie_rebalance(struct trie
*t
, struct tnode
*tn
)
965 t_key cindex
, key
= tn
->key
;
968 while (tn
!= NULL
&& (tp
= node_parent((struct node
*)tn
)) != NULL
) {
969 cindex
= tkey_extract_bits(key
, tp
->pos
, tp
->bits
);
970 wasfull
= tnode_full(tp
, tnode_get_child(tp
, cindex
));
971 tn
= (struct tnode
*) resize (t
, (struct tnode
*)tn
);
972 tnode_put_child_reorg((struct tnode
*)tp
, cindex
,(struct node
*)tn
, wasfull
);
974 tp
= node_parent((struct node
*) tn
);
980 /* Handle last (top) tnode */
982 tn
= (struct tnode
*) resize(t
, (struct tnode
*)tn
);
984 return (struct node
*) tn
;
987 /* only used from updater-side */
989 static struct list_head
*fib_insert_node(struct trie
*t
, u32 key
, int plen
)
992 struct tnode
*tp
= NULL
, *tn
= NULL
;
996 struct list_head
*fa_head
= NULL
;
997 struct leaf_info
*li
;
1003 /* If we point to NULL, stop. Either the tree is empty and we should
1004 * just put a new leaf in if, or we have reached an empty child slot,
1005 * and we should just put our new leaf in that.
1006 * If we point to a T_TNODE, check if it matches our key. Note that
1007 * a T_TNODE might be skipping any number of bits - its 'pos' need
1008 * not be the parent's 'pos'+'bits'!
1010 * If it does match the current key, get pos/bits from it, extract
1011 * the index from our key, push the T_TNODE and walk the tree.
1013 * If it doesn't, we have to replace it with a new T_TNODE.
1015 * If we point to a T_LEAF, it might or might not have the same key
1016 * as we do. If it does, just change the value, update the T_LEAF's
1017 * value, and return it.
1018 * If it doesn't, we need to replace it with a T_TNODE.
1021 while (n
!= NULL
&& NODE_TYPE(n
) == T_TNODE
) {
1022 tn
= (struct tnode
*) n
;
1026 if (tkey_sub_equals(tn
->key
, pos
, tn
->pos
-pos
, key
)) {
1028 pos
= tn
->pos
+ tn
->bits
;
1029 n
= tnode_get_child(tn
, tkey_extract_bits(key
, tn
->pos
, tn
->bits
));
1031 BUG_ON(n
&& node_parent(n
) != tn
);
1037 * n ----> NULL, LEAF or TNODE
1039 * tp is n's (parent) ----> NULL or TNODE
1042 BUG_ON(tp
&& IS_LEAF(tp
));
1044 /* Case 1: n is a leaf. Compare prefixes */
1046 if (n
!= NULL
&& IS_LEAF(n
) && tkey_equals(key
, n
->key
)) {
1047 l
= (struct leaf
*) n
;
1048 li
= leaf_info_new(plen
);
1053 fa_head
= &li
->falh
;
1054 insert_leaf_info(&l
->list
, li
);
1063 li
= leaf_info_new(plen
);
1066 tnode_free((struct tnode
*) l
);
1070 fa_head
= &li
->falh
;
1071 insert_leaf_info(&l
->list
, li
);
1073 if (t
->trie
&& n
== NULL
) {
1074 /* Case 2: n is NULL, and will just insert a new leaf */
1076 node_set_parent((struct node
*)l
, tp
);
1078 cindex
= tkey_extract_bits(key
, tp
->pos
, tp
->bits
);
1079 put_child(t
, (struct tnode
*)tp
, cindex
, (struct node
*)l
);
1081 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1083 * Add a new tnode here
1084 * first tnode need some special handling
1088 pos
= tp
->pos
+tp
->bits
;
1093 newpos
= tkey_mismatch(key
, pos
, n
->key
);
1094 tn
= tnode_new(n
->key
, newpos
, 1);
1097 tn
= tnode_new(key
, newpos
, 1); /* First tnode */
1102 tnode_free((struct tnode
*) l
);
1106 node_set_parent((struct node
*)tn
, tp
);
1108 missbit
= tkey_extract_bits(key
, newpos
, 1);
1109 put_child(t
, tn
, missbit
, (struct node
*)l
);
1110 put_child(t
, tn
, 1-missbit
, n
);
1113 cindex
= tkey_extract_bits(key
, tp
->pos
, tp
->bits
);
1114 put_child(t
, (struct tnode
*)tp
, cindex
, (struct node
*)tn
);
1116 rcu_assign_pointer(t
->trie
, (struct node
*)tn
); /* First tnode */
1121 if (tp
&& tp
->pos
+ tp
->bits
> 32)
1122 printk(KERN_WARNING
"fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1123 tp
, tp
->pos
, tp
->bits
, key
, plen
);
1125 /* Rebalance the trie */
1127 rcu_assign_pointer(t
->trie
, trie_rebalance(t
, tp
));
1133 * Caller must hold RTNL.
1135 static int fn_trie_insert(struct fib_table
*tb
, struct fib_config
*cfg
)
1137 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1138 struct fib_alias
*fa
, *new_fa
;
1139 struct list_head
*fa_head
= NULL
;
1140 struct fib_info
*fi
;
1141 int plen
= cfg
->fc_dst_len
;
1142 u8 tos
= cfg
->fc_tos
;
1150 key
= ntohl(cfg
->fc_dst
);
1152 pr_debug("Insert table=%u %08x/%d\n", tb
->tb_id
, key
, plen
);
1154 mask
= ntohl(inet_make_mask(plen
));
1161 fi
= fib_create_info(cfg
);
1167 l
= fib_find_node(t
, key
);
1171 fa_head
= get_fa_head(l
, plen
);
1172 fa
= fib_find_alias(fa_head
, tos
, fi
->fib_priority
);
1175 /* Now fa, if non-NULL, points to the first fib alias
1176 * with the same keys [prefix,tos,priority], if such key already
1177 * exists or to the node before which we will insert new one.
1179 * If fa is NULL, we will need to allocate a new one and
1180 * insert to the head of f.
1182 * If f is NULL, no fib node matched the destination key
1183 * and we need to allocate a new one of those as well.
1186 if (fa
&& fa
->fa_info
->fib_priority
== fi
->fib_priority
) {
1187 struct fib_alias
*fa_orig
;
1190 if (cfg
->fc_nlflags
& NLM_F_EXCL
)
1193 if (cfg
->fc_nlflags
& NLM_F_REPLACE
) {
1194 struct fib_info
*fi_drop
;
1197 if (fi
->fib_treeref
> 1)
1201 new_fa
= kmem_cache_alloc(fn_alias_kmem
, GFP_KERNEL
);
1205 fi_drop
= fa
->fa_info
;
1206 new_fa
->fa_tos
= fa
->fa_tos
;
1207 new_fa
->fa_info
= fi
;
1208 new_fa
->fa_type
= cfg
->fc_type
;
1209 new_fa
->fa_scope
= cfg
->fc_scope
;
1210 state
= fa
->fa_state
;
1211 new_fa
->fa_state
&= ~FA_S_ACCESSED
;
1213 list_replace_rcu(&fa
->fa_list
, &new_fa
->fa_list
);
1214 alias_free_mem_rcu(fa
);
1216 fib_release_info(fi_drop
);
1217 if (state
& FA_S_ACCESSED
)
1219 rtmsg_fib(RTM_NEWROUTE
, htonl(key
), new_fa
, plen
,
1220 tb
->tb_id
, &cfg
->fc_nlinfo
, NLM_F_REPLACE
);
1224 /* Error if we find a perfect match which
1225 * uses the same scope, type, and nexthop
1229 list_for_each_entry(fa
, fa_orig
->fa_list
.prev
, fa_list
) {
1230 if (fa
->fa_tos
!= tos
)
1232 if (fa
->fa_info
->fib_priority
!= fi
->fib_priority
)
1234 if (fa
->fa_type
== cfg
->fc_type
&&
1235 fa
->fa_scope
== cfg
->fc_scope
&&
1236 fa
->fa_info
== fi
) {
1240 if (!(cfg
->fc_nlflags
& NLM_F_APPEND
))
1244 if (!(cfg
->fc_nlflags
& NLM_F_CREATE
))
1248 new_fa
= kmem_cache_alloc(fn_alias_kmem
, GFP_KERNEL
);
1252 new_fa
->fa_info
= fi
;
1253 new_fa
->fa_tos
= tos
;
1254 new_fa
->fa_type
= cfg
->fc_type
;
1255 new_fa
->fa_scope
= cfg
->fc_scope
;
1256 new_fa
->fa_state
= 0;
1258 * Insert new entry to the list.
1262 fa_head
= fib_insert_node(t
, key
, plen
);
1263 if (unlikely(!fa_head
)) {
1265 goto out_free_new_fa
;
1269 list_add_tail_rcu(&new_fa
->fa_list
,
1270 (fa
? &fa
->fa_list
: fa_head
));
1275 rtmsg_fib(RTM_NEWROUTE
, htonl(key
), new_fa
, plen
, tb
->tb_id
,
1276 &cfg
->fc_nlinfo
, 0);
1281 kmem_cache_free(fn_alias_kmem
, new_fa
);
1283 fib_release_info(fi
);
1289 /* should be called with rcu_read_lock */
1290 static inline int check_leaf(struct trie
*t
, struct leaf
*l
,
1291 t_key key
, int *plen
, const struct flowi
*flp
,
1292 struct fib_result
*res
)
1296 struct leaf_info
*li
;
1297 struct hlist_head
*hhead
= &l
->list
;
1298 struct hlist_node
*node
;
1300 hlist_for_each_entry_rcu(li
, node
, hhead
, hlist
) {
1302 mask
= inet_make_mask(i
);
1303 if (l
->key
!= (key
& ntohl(mask
)))
1306 if ((err
= fib_semantic_match(&li
->falh
, flp
, res
, htonl(l
->key
), mask
, i
)) <= 0) {
1308 #ifdef CONFIG_IP_FIB_TRIE_STATS
1309 t
->stats
.semantic_match_passed
++;
1313 #ifdef CONFIG_IP_FIB_TRIE_STATS
1314 t
->stats
.semantic_match_miss
++;
1321 fn_trie_lookup(struct fib_table
*tb
, const struct flowi
*flp
, struct fib_result
*res
)
1323 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1328 t_key key
= ntohl(flp
->fl4_dst
);
1331 int current_prefix_length
= KEYLENGTH
;
1333 t_key node_prefix
, key_prefix
, pref_mismatch
;
1338 n
= rcu_dereference(t
->trie
);
1342 #ifdef CONFIG_IP_FIB_TRIE_STATS
1348 if ((ret
= check_leaf(t
, (struct leaf
*)n
, key
, &plen
, flp
, res
)) <= 0)
1352 pn
= (struct tnode
*) n
;
1360 cindex
= tkey_extract_bits(mask_pfx(key
, current_prefix_length
),
1363 n
= tnode_get_child(pn
, cindex
);
1366 #ifdef CONFIG_IP_FIB_TRIE_STATS
1367 t
->stats
.null_node_hit
++;
1373 if ((ret
= check_leaf(t
, (struct leaf
*)n
, key
, &plen
, flp
, res
)) <= 0)
1381 cn
= (struct tnode
*)n
;
1384 * It's a tnode, and we can do some extra checks here if we
1385 * like, to avoid descending into a dead-end branch.
1386 * This tnode is in the parent's child array at index
1387 * key[p_pos..p_pos+p_bits] but potentially with some bits
1388 * chopped off, so in reality the index may be just a
1389 * subprefix, padded with zero at the end.
1390 * We can also take a look at any skipped bits in this
1391 * tnode - everything up to p_pos is supposed to be ok,
1392 * and the non-chopped bits of the index (se previous
1393 * paragraph) are also guaranteed ok, but the rest is
1394 * considered unknown.
1396 * The skipped bits are key[pos+bits..cn->pos].
1399 /* If current_prefix_length < pos+bits, we are already doing
1400 * actual prefix matching, which means everything from
1401 * pos+(bits-chopped_off) onward must be zero along some
1402 * branch of this subtree - otherwise there is *no* valid
1403 * prefix present. Here we can only check the skipped
1404 * bits. Remember, since we have already indexed into the
1405 * parent's child array, we know that the bits we chopped of
1409 /* NOTA BENE: CHECKING ONLY SKIPPED BITS FOR THE NEW NODE HERE */
1411 if (current_prefix_length
< pos
+bits
) {
1412 if (tkey_extract_bits(cn
->key
, current_prefix_length
,
1413 cn
->pos
- current_prefix_length
) != 0 ||
1419 * If chopped_off=0, the index is fully validated and we
1420 * only need to look at the skipped bits for this, the new,
1421 * tnode. What we actually want to do is to find out if
1422 * these skipped bits match our key perfectly, or if we will
1423 * have to count on finding a matching prefix further down,
1424 * because if we do, we would like to have some way of
1425 * verifying the existence of such a prefix at this point.
1428 /* The only thing we can do at this point is to verify that
1429 * any such matching prefix can indeed be a prefix to our
1430 * key, and if the bits in the node we are inspecting that
1431 * do not match our key are not ZERO, this cannot be true.
1432 * Thus, find out where there is a mismatch (before cn->pos)
1433 * and verify that all the mismatching bits are zero in the
1437 /* Note: We aren't very concerned about the piece of the key
1438 * that precede pn->pos+pn->bits, since these have already been
1439 * checked. The bits after cn->pos aren't checked since these are
1440 * by definition "unknown" at this point. Thus, what we want to
1441 * see is if we are about to enter the "prefix matching" state,
1442 * and in that case verify that the skipped bits that will prevail
1443 * throughout this subtree are zero, as they have to be if we are
1444 * to find a matching prefix.
1447 node_prefix
= mask_pfx(cn
->key
, cn
->pos
);
1448 key_prefix
= mask_pfx(key
, cn
->pos
);
1449 pref_mismatch
= key_prefix
^node_prefix
;
1452 /* In short: If skipped bits in this node do not match the search
1453 * key, enter the "prefix matching" state.directly.
1455 if (pref_mismatch
) {
1456 while (!(pref_mismatch
& (1<<(KEYLENGTH
-1)))) {
1458 pref_mismatch
= pref_mismatch
<<1;
1460 key_prefix
= tkey_extract_bits(cn
->key
, mp
, cn
->pos
-mp
);
1462 if (key_prefix
!= 0)
1465 if (current_prefix_length
>= cn
->pos
)
1466 current_prefix_length
= mp
;
1469 pn
= (struct tnode
*)n
; /* Descend */
1476 /* As zero don't change the child key (cindex) */
1477 while ((chopped_off
<= pn
->bits
) && !(cindex
& (1<<(chopped_off
-1))))
1480 /* Decrease current_... with bits chopped off */
1481 if (current_prefix_length
> pn
->pos
+ pn
->bits
- chopped_off
)
1482 current_prefix_length
= pn
->pos
+ pn
->bits
- chopped_off
;
1485 * Either we do the actual chop off according or if we have
1486 * chopped off all bits in this tnode walk up to our parent.
1489 if (chopped_off
<= pn
->bits
) {
1490 cindex
&= ~(1 << (chopped_off
-1));
1492 struct tnode
*parent
= node_parent((struct node
*) pn
);
1496 /* Get Child's index */
1497 cindex
= tkey_extract_bits(pn
->key
, parent
->pos
, parent
->bits
);
1501 #ifdef CONFIG_IP_FIB_TRIE_STATS
1502 t
->stats
.backtrack
++;
1514 /* only called from updater side */
1515 static int trie_leaf_remove(struct trie
*t
, t_key key
)
1518 struct tnode
*tp
= NULL
;
1519 struct node
*n
= t
->trie
;
1522 pr_debug("entering trie_leaf_remove(%p)\n", n
);
1524 /* Note that in the case skipped bits, those bits are *not* checked!
1525 * When we finish this, we will have NULL or a T_LEAF, and the
1526 * T_LEAF may or may not match our key.
1529 while (n
!= NULL
&& IS_TNODE(n
)) {
1530 struct tnode
*tn
= (struct tnode
*) n
;
1532 n
= tnode_get_child(tn
,tkey_extract_bits(key
, tn
->pos
, tn
->bits
));
1534 BUG_ON(n
&& node_parent(n
) != tn
);
1536 l
= (struct leaf
*) n
;
1538 if (!n
|| !tkey_equals(l
->key
, key
))
1543 * Remove the leaf and rebalance the tree
1548 tp
= node_parent(n
);
1549 tnode_free((struct tnode
*) n
);
1552 cindex
= tkey_extract_bits(key
, tp
->pos
, tp
->bits
);
1553 put_child(t
, (struct tnode
*)tp
, cindex
, NULL
);
1554 rcu_assign_pointer(t
->trie
, trie_rebalance(t
, tp
));
1556 rcu_assign_pointer(t
->trie
, NULL
);
1562 * Caller must hold RTNL.
1564 static int fn_trie_delete(struct fib_table
*tb
, struct fib_config
*cfg
)
1566 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1568 int plen
= cfg
->fc_dst_len
;
1569 u8 tos
= cfg
->fc_tos
;
1570 struct fib_alias
*fa
, *fa_to_delete
;
1571 struct list_head
*fa_head
;
1573 struct leaf_info
*li
;
1578 key
= ntohl(cfg
->fc_dst
);
1579 mask
= ntohl(inet_make_mask(plen
));
1585 l
= fib_find_node(t
, key
);
1590 fa_head
= get_fa_head(l
, plen
);
1591 fa
= fib_find_alias(fa_head
, tos
, 0);
1596 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key
, plen
, tos
, t
);
1598 fa_to_delete
= NULL
;
1599 fa_head
= fa
->fa_list
.prev
;
1601 list_for_each_entry(fa
, fa_head
, fa_list
) {
1602 struct fib_info
*fi
= fa
->fa_info
;
1604 if (fa
->fa_tos
!= tos
)
1607 if ((!cfg
->fc_type
|| fa
->fa_type
== cfg
->fc_type
) &&
1608 (cfg
->fc_scope
== RT_SCOPE_NOWHERE
||
1609 fa
->fa_scope
== cfg
->fc_scope
) &&
1610 (!cfg
->fc_protocol
||
1611 fi
->fib_protocol
== cfg
->fc_protocol
) &&
1612 fib_nh_match(cfg
, fi
) == 0) {
1622 rtmsg_fib(RTM_DELROUTE
, htonl(key
), fa
, plen
, tb
->tb_id
,
1623 &cfg
->fc_nlinfo
, 0);
1625 l
= fib_find_node(t
, key
);
1626 li
= find_leaf_info(l
, plen
);
1628 list_del_rcu(&fa
->fa_list
);
1630 if (list_empty(fa_head
)) {
1631 hlist_del_rcu(&li
->hlist
);
1635 if (hlist_empty(&l
->list
))
1636 trie_leaf_remove(t
, key
);
1638 if (fa
->fa_state
& FA_S_ACCESSED
)
1641 fib_release_info(fa
->fa_info
);
1642 alias_free_mem_rcu(fa
);
1646 static int trie_flush_list(struct trie
*t
, struct list_head
*head
)
1648 struct fib_alias
*fa
, *fa_node
;
1651 list_for_each_entry_safe(fa
, fa_node
, head
, fa_list
) {
1652 struct fib_info
*fi
= fa
->fa_info
;
1654 if (fi
&& (fi
->fib_flags
& RTNH_F_DEAD
)) {
1655 list_del_rcu(&fa
->fa_list
);
1656 fib_release_info(fa
->fa_info
);
1657 alias_free_mem_rcu(fa
);
1664 static int trie_flush_leaf(struct trie
*t
, struct leaf
*l
)
1667 struct hlist_head
*lih
= &l
->list
;
1668 struct hlist_node
*node
, *tmp
;
1669 struct leaf_info
*li
= NULL
;
1671 hlist_for_each_entry_safe(li
, node
, tmp
, lih
, hlist
) {
1672 found
+= trie_flush_list(t
, &li
->falh
);
1674 if (list_empty(&li
->falh
)) {
1675 hlist_del_rcu(&li
->hlist
);
1682 /* rcu_read_lock needs to be hold by caller from readside */
1684 static struct leaf
*nextleaf(struct trie
*t
, struct leaf
*thisleaf
)
1686 struct node
*c
= (struct node
*) thisleaf
;
1689 struct node
*trie
= rcu_dereference(t
->trie
);
1695 if (IS_LEAF(trie
)) /* trie w. just a leaf */
1696 return (struct leaf
*) trie
;
1698 p
= (struct tnode
*) trie
; /* Start */
1700 p
= node_parent_rcu(c
);
1705 /* Find the next child of the parent */
1707 pos
= 1 + tkey_extract_bits(c
->key
, p
->pos
, p
->bits
);
1711 last
= 1 << p
->bits
;
1712 for (idx
= pos
; idx
< last
; idx
++) {
1713 c
= rcu_dereference(p
->child
[idx
]);
1718 /* Decend if tnode */
1719 while (IS_TNODE(c
)) {
1720 p
= (struct tnode
*) c
;
1723 /* Rightmost non-NULL branch */
1724 if (p
&& IS_TNODE(p
))
1725 while (!(c
= rcu_dereference(p
->child
[idx
]))
1726 && idx
< (1<<p
->bits
)) idx
++;
1728 /* Done with this tnode? */
1729 if (idx
>= (1 << p
->bits
) || !c
)
1732 return (struct leaf
*) c
;
1735 /* No more children go up one step */
1736 c
= (struct node
*) p
;
1737 p
= node_parent_rcu(c
);
1739 return NULL
; /* Ready. Root of trie */
1743 * Caller must hold RTNL.
1745 static int fn_trie_flush(struct fib_table
*tb
)
1747 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1748 struct leaf
*ll
= NULL
, *l
= NULL
;
1751 for (h
= 0; (l
= nextleaf(t
, l
)) != NULL
; h
++) {
1752 found
+= trie_flush_leaf(t
, l
);
1754 if (ll
&& hlist_empty(&ll
->list
))
1755 trie_leaf_remove(t
, ll
->key
);
1759 if (ll
&& hlist_empty(&ll
->list
))
1760 trie_leaf_remove(t
, ll
->key
);
1762 pr_debug("trie_flush found=%d\n", found
);
1767 fn_trie_select_default(struct fib_table
*tb
, const struct flowi
*flp
, struct fib_result
*res
)
1769 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1770 int order
, last_idx
;
1771 struct fib_info
*fi
= NULL
;
1772 struct fib_info
*last_resort
;
1773 struct fib_alias
*fa
= NULL
;
1774 struct list_head
*fa_head
;
1783 l
= fib_find_node(t
, 0);
1787 fa_head
= get_fa_head(l
, 0);
1791 if (list_empty(fa_head
))
1794 list_for_each_entry_rcu(fa
, fa_head
, fa_list
) {
1795 struct fib_info
*next_fi
= fa
->fa_info
;
1797 if (fa
->fa_scope
!= res
->scope
||
1798 fa
->fa_type
!= RTN_UNICAST
)
1801 if (next_fi
->fib_priority
> res
->fi
->fib_priority
)
1803 if (!next_fi
->fib_nh
[0].nh_gw
||
1804 next_fi
->fib_nh
[0].nh_scope
!= RT_SCOPE_LINK
)
1806 fa
->fa_state
|= FA_S_ACCESSED
;
1809 if (next_fi
!= res
->fi
)
1811 } else if (!fib_detect_death(fi
, order
, &last_resort
,
1812 &last_idx
, tb
->tb_default
)) {
1813 fib_result_assign(res
, fi
);
1814 tb
->tb_default
= order
;
1820 if (order
<= 0 || fi
== NULL
) {
1821 tb
->tb_default
= -1;
1825 if (!fib_detect_death(fi
, order
, &last_resort
, &last_idx
,
1827 fib_result_assign(res
, fi
);
1828 tb
->tb_default
= order
;
1832 fib_result_assign(res
, last_resort
);
1833 tb
->tb_default
= last_idx
;
1838 static int fn_trie_dump_fa(t_key key
, int plen
, struct list_head
*fah
, struct fib_table
*tb
,
1839 struct sk_buff
*skb
, struct netlink_callback
*cb
)
1842 struct fib_alias
*fa
;
1844 __be32 xkey
= htonl(key
);
1849 /* rcu_read_lock is hold by caller */
1851 list_for_each_entry_rcu(fa
, fah
, fa_list
) {
1856 BUG_ON(!fa
->fa_info
);
1858 if (fib_dump_info(skb
, NETLINK_CB(cb
->skb
).pid
,
1867 fa
->fa_info
, 0) < 0) {
1877 static int fn_trie_dump_plen(struct trie
*t
, int plen
, struct fib_table
*tb
, struct sk_buff
*skb
,
1878 struct netlink_callback
*cb
)
1881 struct list_head
*fa_head
;
1882 struct leaf
*l
= NULL
;
1886 for (h
= 0; (l
= nextleaf(t
, l
)) != NULL
; h
++) {
1890 memset(&cb
->args
[4], 0,
1891 sizeof(cb
->args
) - 4*sizeof(cb
->args
[0]));
1893 fa_head
= get_fa_head(l
, plen
);
1898 if (list_empty(fa_head
))
1901 if (fn_trie_dump_fa(l
->key
, plen
, fa_head
, tb
, skb
, cb
)<0) {
1910 static int fn_trie_dump(struct fib_table
*tb
, struct sk_buff
*skb
, struct netlink_callback
*cb
)
1913 struct trie
*t
= (struct trie
*) tb
->tb_data
;
1918 for (m
= 0; m
<= 32; m
++) {
1922 memset(&cb
->args
[3], 0,
1923 sizeof(cb
->args
) - 3*sizeof(cb
->args
[0]));
1925 if (fn_trie_dump_plen(t
, 32-m
, tb
, skb
, cb
)<0) {
1938 void __init
fib_hash_init(void)
1940 fn_alias_kmem
= kmem_cache_create("ip_fib_alias", sizeof(struct fib_alias
),
1941 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
1945 /* Fix more generic FIB names for init later */
1946 struct fib_table
*fib_hash_table(u32 id
)
1948 struct fib_table
*tb
;
1951 tb
= kmalloc(sizeof(struct fib_table
) + sizeof(struct trie
),
1957 tb
->tb_default
= -1;
1958 tb
->tb_lookup
= fn_trie_lookup
;
1959 tb
->tb_insert
= fn_trie_insert
;
1960 tb
->tb_delete
= fn_trie_delete
;
1961 tb
->tb_flush
= fn_trie_flush
;
1962 tb
->tb_select_default
= fn_trie_select_default
;
1963 tb
->tb_dump
= fn_trie_dump
;
1965 t
= (struct trie
*) tb
->tb_data
;
1966 memset(t
, 0, sizeof(*t
));
1968 if (id
== RT_TABLE_LOCAL
)
1969 printk(KERN_INFO
"IPv4 FIB: Using LC-trie version %s\n", VERSION
);
1974 #ifdef CONFIG_PROC_FS
1975 /* Depth first Trie walk iterator */
1976 struct fib_trie_iter
{
1977 struct seq_net_private p
;
1978 struct trie
*trie_local
, *trie_main
;
1979 struct tnode
*tnode
;
1985 static struct node
*fib_trie_get_next(struct fib_trie_iter
*iter
)
1987 struct tnode
*tn
= iter
->tnode
;
1988 unsigned cindex
= iter
->index
;
1991 /* A single entry routing table */
1995 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
1996 iter
->tnode
, iter
->index
, iter
->depth
);
1998 while (cindex
< (1<<tn
->bits
)) {
1999 struct node
*n
= tnode_get_child_rcu(tn
, cindex
);
2004 iter
->index
= cindex
+ 1;
2006 /* push down one level */
2007 iter
->tnode
= (struct tnode
*) n
;
2017 /* Current node exhausted, pop back up */
2018 p
= node_parent_rcu((struct node
*)tn
);
2020 cindex
= tkey_extract_bits(tn
->key
, p
->pos
, p
->bits
)+1;
2030 static struct node
*fib_trie_get_first(struct fib_trie_iter
*iter
,
2038 n
= rcu_dereference(t
->trie
);
2045 iter
->tnode
= (struct tnode
*) n
;
2060 static void trie_collect_stats(struct trie
*t
, struct trie_stat
*s
)
2063 struct fib_trie_iter iter
;
2065 memset(s
, 0, sizeof(*s
));
2068 for (n
= fib_trie_get_first(&iter
, t
); n
;
2069 n
= fib_trie_get_next(&iter
)) {
2072 s
->totdepth
+= iter
.depth
;
2073 if (iter
.depth
> s
->maxdepth
)
2074 s
->maxdepth
= iter
.depth
;
2076 const struct tnode
*tn
= (const struct tnode
*) n
;
2080 if (tn
->bits
< MAX_STAT_DEPTH
)
2081 s
->nodesizes
[tn
->bits
]++;
2083 for (i
= 0; i
< (1<<tn
->bits
); i
++)
2092 * This outputs /proc/net/fib_triestats
2094 static void trie_show_stats(struct seq_file
*seq
, struct trie_stat
*stat
)
2096 unsigned i
, max
, pointers
, bytes
, avdepth
;
2099 avdepth
= stat
->totdepth
*100 / stat
->leaves
;
2103 seq_printf(seq
, "\tAver depth: %u.%02d\n", avdepth
/ 100, avdepth
% 100 );
2104 seq_printf(seq
, "\tMax depth: %u\n", stat
->maxdepth
);
2106 seq_printf(seq
, "\tLeaves: %u\n", stat
->leaves
);
2108 bytes
= sizeof(struct leaf
) * stat
->leaves
;
2109 seq_printf(seq
, "\tInternal nodes: %u\n\t", stat
->tnodes
);
2110 bytes
+= sizeof(struct tnode
) * stat
->tnodes
;
2112 max
= MAX_STAT_DEPTH
;
2113 while (max
> 0 && stat
->nodesizes
[max
-1] == 0)
2117 for (i
= 1; i
<= max
; i
++)
2118 if (stat
->nodesizes
[i
] != 0) {
2119 seq_printf(seq
, " %u: %u", i
, stat
->nodesizes
[i
]);
2120 pointers
+= (1<<i
) * stat
->nodesizes
[i
];
2122 seq_putc(seq
, '\n');
2123 seq_printf(seq
, "\tPointers: %u\n", pointers
);
2125 bytes
+= sizeof(struct node
*) * pointers
;
2126 seq_printf(seq
, "Null ptrs: %u\n", stat
->nullpointers
);
2127 seq_printf(seq
, "Total size: %u kB\n", (bytes
+ 1023) / 1024);
2130 #ifdef CONFIG_IP_FIB_TRIE_STATS
2131 static void trie_show_usage(struct seq_file
*seq
,
2132 const struct trie_use_stats
*stats
)
2134 seq_printf(seq
, "\nCounters:\n---------\n");
2135 seq_printf(seq
,"gets = %u\n", stats
->gets
);
2136 seq_printf(seq
,"backtracks = %u\n", stats
->backtrack
);
2137 seq_printf(seq
,"semantic match passed = %u\n", stats
->semantic_match_passed
);
2138 seq_printf(seq
,"semantic match miss = %u\n", stats
->semantic_match_miss
);
2139 seq_printf(seq
,"null node hit= %u\n", stats
->null_node_hit
);
2140 seq_printf(seq
,"skipped node resize = %u\n\n", stats
->resize_node_skipped
);
2142 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2144 static void fib_trie_show(struct seq_file
*seq
, const char *name
, struct trie
*trie
)
2146 struct trie_stat stat
;
2148 seq_printf(seq
, "%s: %d\n", name
, trie
->size
);
2149 trie_collect_stats(trie
, &stat
);
2150 trie_show_stats(seq
, &stat
);
2151 #ifdef CONFIG_IP_FIB_TRIE_STATS
2152 trie_show_usage(seq
, &trie
->stats
);
2156 static int fib_triestat_seq_show(struct seq_file
*seq
, void *v
)
2158 struct net
*net
= (struct net
*)seq
->private;
2159 struct fib_table
*tb
;
2162 "Basic info: size of leaf: %Zd bytes, size of tnode: %Zd bytes.\n",
2163 sizeof(struct leaf
), sizeof(struct tnode
));
2165 tb
= fib_get_table(net
, RT_TABLE_LOCAL
);
2167 fib_trie_show(seq
, "Local", (struct trie
*) tb
->tb_data
);
2169 tb
= fib_get_table(net
, RT_TABLE_MAIN
);
2171 fib_trie_show(seq
, "Main", (struct trie
*) tb
->tb_data
);
2176 static int fib_triestat_seq_open(struct inode
*inode
, struct file
*file
)
2181 net
= get_proc_net(inode
);
2184 err
= single_open(file
, fib_triestat_seq_show
, net
);
2192 static int fib_triestat_seq_release(struct inode
*ino
, struct file
*f
)
2194 struct seq_file
*seq
= f
->private_data
;
2195 put_net(seq
->private);
2196 return single_release(ino
, f
);
2199 static const struct file_operations fib_triestat_fops
= {
2200 .owner
= THIS_MODULE
,
2201 .open
= fib_triestat_seq_open
,
2203 .llseek
= seq_lseek
,
2204 .release
= fib_triestat_seq_release
,
2207 static struct node
*fib_trie_get_idx(struct fib_trie_iter
*iter
,
2213 for (n
= fib_trie_get_first(iter
, iter
->trie_local
);
2214 n
; ++idx
, n
= fib_trie_get_next(iter
)) {
2219 for (n
= fib_trie_get_first(iter
, iter
->trie_main
);
2220 n
; ++idx
, n
= fib_trie_get_next(iter
)) {
2227 static void *fib_trie_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2230 struct fib_trie_iter
*iter
= seq
->private;
2231 struct fib_table
*tb
;
2233 if (!iter
->trie_local
) {
2234 tb
= fib_get_table(iter
->p
.net
, RT_TABLE_LOCAL
);
2236 iter
->trie_local
= (struct trie
*) tb
->tb_data
;
2238 if (!iter
->trie_main
) {
2239 tb
= fib_get_table(iter
->p
.net
, RT_TABLE_MAIN
);
2241 iter
->trie_main
= (struct trie
*) tb
->tb_data
;
2245 return SEQ_START_TOKEN
;
2246 return fib_trie_get_idx(iter
, *pos
- 1);
2249 static void *fib_trie_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2251 struct fib_trie_iter
*iter
= seq
->private;
2255 if (v
== SEQ_START_TOKEN
)
2256 return fib_trie_get_idx(iter
, 0);
2258 v
= fib_trie_get_next(iter
);
2263 /* continue scan in next trie */
2264 if (iter
->trie
== iter
->trie_local
)
2265 return fib_trie_get_first(iter
, iter
->trie_main
);
2270 static void fib_trie_seq_stop(struct seq_file
*seq
, void *v
)
2276 static void seq_indent(struct seq_file
*seq
, int n
)
2278 while (n
-- > 0) seq_puts(seq
, " ");
2281 static inline const char *rtn_scope(char *buf
, size_t len
, enum rt_scope_t s
)
2284 case RT_SCOPE_UNIVERSE
: return "universe";
2285 case RT_SCOPE_SITE
: return "site";
2286 case RT_SCOPE_LINK
: return "link";
2287 case RT_SCOPE_HOST
: return "host";
2288 case RT_SCOPE_NOWHERE
: return "nowhere";
2290 snprintf(buf
, len
, "scope=%d", s
);
2295 static const char *rtn_type_names
[__RTN_MAX
] = {
2296 [RTN_UNSPEC
] = "UNSPEC",
2297 [RTN_UNICAST
] = "UNICAST",
2298 [RTN_LOCAL
] = "LOCAL",
2299 [RTN_BROADCAST
] = "BROADCAST",
2300 [RTN_ANYCAST
] = "ANYCAST",
2301 [RTN_MULTICAST
] = "MULTICAST",
2302 [RTN_BLACKHOLE
] = "BLACKHOLE",
2303 [RTN_UNREACHABLE
] = "UNREACHABLE",
2304 [RTN_PROHIBIT
] = "PROHIBIT",
2305 [RTN_THROW
] = "THROW",
2307 [RTN_XRESOLVE
] = "XRESOLVE",
2310 static inline const char *rtn_type(char *buf
, size_t len
, unsigned t
)
2312 if (t
< __RTN_MAX
&& rtn_type_names
[t
])
2313 return rtn_type_names
[t
];
2314 snprintf(buf
, len
, "type %u", t
);
2318 /* Pretty print the trie */
2319 static int fib_trie_seq_show(struct seq_file
*seq
, void *v
)
2321 const struct fib_trie_iter
*iter
= seq
->private;
2324 if (v
== SEQ_START_TOKEN
)
2327 if (!node_parent_rcu(n
)) {
2328 if (iter
->trie
== iter
->trie_local
)
2329 seq_puts(seq
, "<local>:\n");
2331 seq_puts(seq
, "<main>:\n");
2335 struct tnode
*tn
= (struct tnode
*) n
;
2336 __be32 prf
= htonl(mask_pfx(tn
->key
, tn
->pos
));
2338 seq_indent(seq
, iter
->depth
-1);
2339 seq_printf(seq
, " +-- %d.%d.%d.%d/%d %d %d %d\n",
2340 NIPQUAD(prf
), tn
->pos
, tn
->bits
, tn
->full_children
,
2341 tn
->empty_children
);
2344 struct leaf
*l
= (struct leaf
*) n
;
2346 __be32 val
= htonl(l
->key
);
2348 seq_indent(seq
, iter
->depth
);
2349 seq_printf(seq
, " |-- %d.%d.%d.%d\n", NIPQUAD(val
));
2350 for (i
= 32; i
>= 0; i
--) {
2351 struct leaf_info
*li
= find_leaf_info(l
, i
);
2354 struct fib_alias
*fa
;
2356 list_for_each_entry_rcu(fa
, &li
->falh
, fa_list
) {
2357 char buf1
[32], buf2
[32];
2359 seq_indent(seq
, iter
->depth
+1);
2360 seq_printf(seq
, " /%d %s %s", i
,
2361 rtn_scope(buf1
, sizeof(buf1
),
2363 rtn_type(buf2
, sizeof(buf2
),
2366 seq_printf(seq
, "tos =%d\n",
2368 seq_putc(seq
, '\n');
2377 static const struct seq_operations fib_trie_seq_ops
= {
2378 .start
= fib_trie_seq_start
,
2379 .next
= fib_trie_seq_next
,
2380 .stop
= fib_trie_seq_stop
,
2381 .show
= fib_trie_seq_show
,
2384 static int fib_trie_seq_open(struct inode
*inode
, struct file
*file
)
2386 return seq_open_net(inode
, file
, &fib_trie_seq_ops
,
2387 sizeof(struct fib_trie_iter
));
2390 static const struct file_operations fib_trie_fops
= {
2391 .owner
= THIS_MODULE
,
2392 .open
= fib_trie_seq_open
,
2394 .llseek
= seq_lseek
,
2395 .release
= seq_release_net
,
2398 static unsigned fib_flag_trans(int type
, __be32 mask
, const struct fib_info
*fi
)
2400 static unsigned type2flags
[RTN_MAX
+ 1] = {
2401 [7] = RTF_REJECT
, [8] = RTF_REJECT
,
2403 unsigned flags
= type2flags
[type
];
2405 if (fi
&& fi
->fib_nh
->nh_gw
)
2406 flags
|= RTF_GATEWAY
;
2407 if (mask
== htonl(0xFFFFFFFF))
2414 * This outputs /proc/net/route.
2415 * The format of the file is not supposed to be changed
2416 * and needs to be same as fib_hash output to avoid breaking
2419 static int fib_route_seq_show(struct seq_file
*seq
, void *v
)
2421 const struct fib_trie_iter
*iter
= seq
->private;
2426 if (v
== SEQ_START_TOKEN
) {
2427 seq_printf(seq
, "%-127s\n", "Iface\tDestination\tGateway "
2428 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2433 if (iter
->trie
== iter
->trie_local
)
2438 for (i
=32; i
>=0; i
--) {
2439 struct leaf_info
*li
= find_leaf_info(l
, i
);
2440 struct fib_alias
*fa
;
2441 __be32 mask
, prefix
;
2446 mask
= inet_make_mask(li
->plen
);
2447 prefix
= htonl(l
->key
);
2449 list_for_each_entry_rcu(fa
, &li
->falh
, fa_list
) {
2450 const struct fib_info
*fi
= fa
->fa_info
;
2451 unsigned flags
= fib_flag_trans(fa
->fa_type
, mask
, fi
);
2453 if (fa
->fa_type
== RTN_BROADCAST
2454 || fa
->fa_type
== RTN_MULTICAST
)
2458 snprintf(bf
, sizeof(bf
),
2459 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2460 fi
->fib_dev
? fi
->fib_dev
->name
: "*",
2462 fi
->fib_nh
->nh_gw
, flags
, 0, 0,
2465 (fi
->fib_advmss
? fi
->fib_advmss
+ 40 : 0),
2469 snprintf(bf
, sizeof(bf
),
2470 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2471 prefix
, 0, flags
, 0, 0, 0,
2474 seq_printf(seq
, "%-127s\n", bf
);
2481 static const struct seq_operations fib_route_seq_ops
= {
2482 .start
= fib_trie_seq_start
,
2483 .next
= fib_trie_seq_next
,
2484 .stop
= fib_trie_seq_stop
,
2485 .show
= fib_route_seq_show
,
2488 static int fib_route_seq_open(struct inode
*inode
, struct file
*file
)
2490 return seq_open_net(inode
, file
, &fib_route_seq_ops
,
2491 sizeof(struct fib_trie_iter
));
2494 static const struct file_operations fib_route_fops
= {
2495 .owner
= THIS_MODULE
,
2496 .open
= fib_route_seq_open
,
2498 .llseek
= seq_lseek
,
2499 .release
= seq_release_net
,
2502 int __net_init
fib_proc_init(struct net
*net
)
2504 if (!proc_net_fops_create(net
, "fib_trie", S_IRUGO
, &fib_trie_fops
))
2507 if (!proc_net_fops_create(net
, "fib_triestat", S_IRUGO
,
2508 &fib_triestat_fops
))
2511 if (!proc_net_fops_create(net
, "route", S_IRUGO
, &fib_route_fops
))
2517 proc_net_remove(net
, "fib_triestat");
2519 proc_net_remove(net
, "fib_trie");
2524 void __net_exit
fib_proc_exit(struct net
*net
)
2526 proc_net_remove(net
, "fib_trie");
2527 proc_net_remove(net
, "fib_triestat");
2528 proc_net_remove(net
, "route");
2531 #endif /* CONFIG_PROC_FS */