libertas: tweak association debug output
[linux/fpc-iii.git] / net / ipv4 / fib_trie.c
bloba52334d30cf8ac6fc615cbe844c6b9bb3dc1bcae
1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
15 * This work is based on the LPC-trie which is originally descibed in:
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
25 * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $
28 * Code from fib_hash has been reused which includes the following header:
31 * INET An implementation of the TCP/IP protocol suite for the LINUX
32 * operating system. INET is implemented using the BSD Socket
33 * interface as the means of communication with the user level.
35 * IPv4 FIB: lookup engine and maintenance routines.
38 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
40 * This program is free software; you can redistribute it and/or
41 * modify it under the terms of the GNU General Public License
42 * as published by the Free Software Foundation; either version
43 * 2 of the License, or (at your option) any later version.
45 * Substantial contributions to this work comes from:
47 * David S. Miller, <davem@davemloft.net>
48 * Stephen Hemminger <shemminger@osdl.org>
49 * Paul E. McKenney <paulmck@us.ibm.com>
50 * Patrick McHardy <kaber@trash.net>
53 #define VERSION "0.408"
55 #include <asm/uaccess.h>
56 #include <asm/system.h>
57 #include <linux/bitops.h>
58 #include <linux/types.h>
59 #include <linux/kernel.h>
60 #include <linux/mm.h>
61 #include <linux/string.h>
62 #include <linux/socket.h>
63 #include <linux/sockios.h>
64 #include <linux/errno.h>
65 #include <linux/in.h>
66 #include <linux/inet.h>
67 #include <linux/inetdevice.h>
68 #include <linux/netdevice.h>
69 #include <linux/if_arp.h>
70 #include <linux/proc_fs.h>
71 #include <linux/rcupdate.h>
72 #include <linux/skbuff.h>
73 #include <linux/netlink.h>
74 #include <linux/init.h>
75 #include <linux/list.h>
76 #include <net/net_namespace.h>
77 #include <net/ip.h>
78 #include <net/protocol.h>
79 #include <net/route.h>
80 #include <net/tcp.h>
81 #include <net/sock.h>
82 #include <net/ip_fib.h>
83 #include "fib_lookup.h"
85 #define MAX_STAT_DEPTH 32
87 #define KEYLENGTH (8*sizeof(t_key))
89 typedef unsigned int t_key;
91 #define T_TNODE 0
92 #define T_LEAF 1
93 #define NODE_TYPE_MASK 0x1UL
94 #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
96 #define IS_TNODE(n) (!(n->parent & T_LEAF))
97 #define IS_LEAF(n) (n->parent & T_LEAF)
99 struct node {
100 unsigned long parent;
101 t_key key;
104 struct leaf {
105 unsigned long parent;
106 t_key key;
107 struct hlist_head list;
108 struct rcu_head rcu;
111 struct leaf_info {
112 struct hlist_node hlist;
113 struct rcu_head rcu;
114 int plen;
115 struct list_head falh;
118 struct tnode {
119 unsigned long parent;
120 t_key key;
121 unsigned char pos; /* 2log(KEYLENGTH) bits needed */
122 unsigned char bits; /* 2log(KEYLENGTH) bits needed */
123 unsigned int full_children; /* KEYLENGTH bits needed */
124 unsigned int empty_children; /* KEYLENGTH bits needed */
125 struct rcu_head rcu;
126 struct node *child[0];
129 #ifdef CONFIG_IP_FIB_TRIE_STATS
130 struct trie_use_stats {
131 unsigned int gets;
132 unsigned int backtrack;
133 unsigned int semantic_match_passed;
134 unsigned int semantic_match_miss;
135 unsigned int null_node_hit;
136 unsigned int resize_node_skipped;
138 #endif
140 struct trie_stat {
141 unsigned int totdepth;
142 unsigned int maxdepth;
143 unsigned int tnodes;
144 unsigned int leaves;
145 unsigned int nullpointers;
146 unsigned int nodesizes[MAX_STAT_DEPTH];
149 struct trie {
150 struct node *trie;
151 unsigned int size;
152 #ifdef CONFIG_IP_FIB_TRIE_STATS
153 struct trie_use_stats stats;
154 #endif
157 static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
158 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull);
159 static struct node *resize(struct trie *t, struct tnode *tn);
160 static struct tnode *inflate(struct trie *t, struct tnode *tn);
161 static struct tnode *halve(struct trie *t, struct tnode *tn);
162 static void tnode_free(struct tnode *tn);
164 static struct kmem_cache *fn_alias_kmem __read_mostly;
166 static inline struct tnode *node_parent(struct node *node)
168 return (struct tnode *)(node->parent & ~NODE_TYPE_MASK);
171 static inline struct tnode *node_parent_rcu(struct node *node)
173 struct tnode *ret = node_parent(node);
175 return rcu_dereference(ret);
178 static inline void node_set_parent(struct node *node, struct tnode *ptr)
180 rcu_assign_pointer(node->parent,
181 (unsigned long)ptr | NODE_TYPE(node));
184 static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
186 BUG_ON(i >= 1U << tn->bits);
188 return tn->child[i];
191 static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
193 struct node *ret = tnode_get_child(tn, i);
195 return rcu_dereference(ret);
198 static inline int tnode_child_length(const struct tnode *tn)
200 return 1 << tn->bits;
203 static inline t_key mask_pfx(t_key k, unsigned short l)
205 return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
208 static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
210 if (offset < KEYLENGTH)
211 return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
212 else
213 return 0;
216 static inline int tkey_equals(t_key a, t_key b)
218 return a == b;
221 static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
223 if (bits == 0 || offset >= KEYLENGTH)
224 return 1;
225 bits = bits > KEYLENGTH ? KEYLENGTH : bits;
226 return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
229 static inline int tkey_mismatch(t_key a, int offset, t_key b)
231 t_key diff = a ^ b;
232 int i = offset;
234 if (!diff)
235 return 0;
236 while ((diff << i) >> (KEYLENGTH-1) == 0)
237 i++;
238 return i;
242 To understand this stuff, an understanding of keys and all their bits is
243 necessary. Every node in the trie has a key associated with it, but not
244 all of the bits in that key are significant.
246 Consider a node 'n' and its parent 'tp'.
248 If n is a leaf, every bit in its key is significant. Its presence is
249 necessitated by path compression, since during a tree traversal (when
250 searching for a leaf - unless we are doing an insertion) we will completely
251 ignore all skipped bits we encounter. Thus we need to verify, at the end of
252 a potentially successful search, that we have indeed been walking the
253 correct key path.
255 Note that we can never "miss" the correct key in the tree if present by
256 following the wrong path. Path compression ensures that segments of the key
257 that are the same for all keys with a given prefix are skipped, but the
258 skipped part *is* identical for each node in the subtrie below the skipped
259 bit! trie_insert() in this implementation takes care of that - note the
260 call to tkey_sub_equals() in trie_insert().
262 if n is an internal node - a 'tnode' here, the various parts of its key
263 have many different meanings.
265 Example:
266 _________________________________________________________________
267 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
268 -----------------------------------------------------------------
269 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
271 _________________________________________________________________
272 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
273 -----------------------------------------------------------------
274 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
276 tp->pos = 7
277 tp->bits = 3
278 n->pos = 15
279 n->bits = 4
281 First, let's just ignore the bits that come before the parent tp, that is
282 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
283 not use them for anything.
285 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
286 index into the parent's child array. That is, they will be used to find
287 'n' among tp's children.
289 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
290 for the node n.
292 All the bits we have seen so far are significant to the node n. The rest
293 of the bits are really not needed or indeed known in n->key.
295 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
296 n's child array, and will of course be different for each child.
299 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
300 at this point.
304 static inline void check_tnode(const struct tnode *tn)
306 WARN_ON(tn && tn->pos+tn->bits > 32);
309 static const int halve_threshold = 25;
310 static const int inflate_threshold = 50;
311 static const int halve_threshold_root = 8;
312 static const int inflate_threshold_root = 15;
315 static void __alias_free_mem(struct rcu_head *head)
317 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
318 kmem_cache_free(fn_alias_kmem, fa);
321 static inline void alias_free_mem_rcu(struct fib_alias *fa)
323 call_rcu(&fa->rcu, __alias_free_mem);
326 static void __leaf_free_rcu(struct rcu_head *head)
328 kfree(container_of(head, struct leaf, rcu));
331 static void __leaf_info_free_rcu(struct rcu_head *head)
333 kfree(container_of(head, struct leaf_info, rcu));
336 static inline void free_leaf_info(struct leaf_info *leaf)
338 call_rcu(&leaf->rcu, __leaf_info_free_rcu);
341 static struct tnode *tnode_alloc(size_t size)
343 struct page *pages;
345 if (size <= PAGE_SIZE)
346 return kzalloc(size, GFP_KERNEL);
348 pages = alloc_pages(GFP_KERNEL|__GFP_ZERO, get_order(size));
349 if (!pages)
350 return NULL;
352 return page_address(pages);
355 static void __tnode_free_rcu(struct rcu_head *head)
357 struct tnode *tn = container_of(head, struct tnode, rcu);
358 size_t size = sizeof(struct tnode) +
359 (sizeof(struct node *) << tn->bits);
361 if (size <= PAGE_SIZE)
362 kfree(tn);
363 else
364 free_pages((unsigned long)tn, get_order(size));
367 static inline void tnode_free(struct tnode *tn)
369 if (IS_LEAF(tn)) {
370 struct leaf *l = (struct leaf *) tn;
371 call_rcu_bh(&l->rcu, __leaf_free_rcu);
372 } else
373 call_rcu(&tn->rcu, __tnode_free_rcu);
376 static struct leaf *leaf_new(void)
378 struct leaf *l = kmalloc(sizeof(struct leaf), GFP_KERNEL);
379 if (l) {
380 l->parent = T_LEAF;
381 INIT_HLIST_HEAD(&l->list);
383 return l;
386 static struct leaf_info *leaf_info_new(int plen)
388 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
389 if (li) {
390 li->plen = plen;
391 INIT_LIST_HEAD(&li->falh);
393 return li;
396 static struct tnode* tnode_new(t_key key, int pos, int bits)
398 size_t sz = sizeof(struct tnode) + (sizeof(struct node *) << bits);
399 struct tnode *tn = tnode_alloc(sz);
401 if (tn) {
402 tn->parent = T_TNODE;
403 tn->pos = pos;
404 tn->bits = bits;
405 tn->key = key;
406 tn->full_children = 0;
407 tn->empty_children = 1<<bits;
410 pr_debug("AT %p s=%u %lu\n", tn, (unsigned int) sizeof(struct tnode),
411 (unsigned long) (sizeof(struct node) << bits));
412 return tn;
416 * Check whether a tnode 'n' is "full", i.e. it is an internal node
417 * and no bits are skipped. See discussion in dyntree paper p. 6
420 static inline int tnode_full(const struct tnode *tn, const struct node *n)
422 if (n == NULL || IS_LEAF(n))
423 return 0;
425 return ((struct tnode *) n)->pos == tn->pos + tn->bits;
428 static inline void put_child(struct trie *t, struct tnode *tn, int i, struct node *n)
430 tnode_put_child_reorg(tn, i, n, -1);
434 * Add a child at position i overwriting the old value.
435 * Update the value of full_children and empty_children.
438 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull)
440 struct node *chi = tn->child[i];
441 int isfull;
443 BUG_ON(i >= 1<<tn->bits);
446 /* update emptyChildren */
447 if (n == NULL && chi != NULL)
448 tn->empty_children++;
449 else if (n != NULL && chi == NULL)
450 tn->empty_children--;
452 /* update fullChildren */
453 if (wasfull == -1)
454 wasfull = tnode_full(tn, chi);
456 isfull = tnode_full(tn, n);
457 if (wasfull && !isfull)
458 tn->full_children--;
459 else if (!wasfull && isfull)
460 tn->full_children++;
462 if (n)
463 node_set_parent(n, tn);
465 rcu_assign_pointer(tn->child[i], n);
468 static struct node *resize(struct trie *t, struct tnode *tn)
470 int i;
471 int err = 0;
472 struct tnode *old_tn;
473 int inflate_threshold_use;
474 int halve_threshold_use;
475 int max_resize;
477 if (!tn)
478 return NULL;
480 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
481 tn, inflate_threshold, halve_threshold);
483 /* No children */
484 if (tn->empty_children == tnode_child_length(tn)) {
485 tnode_free(tn);
486 return NULL;
488 /* One child */
489 if (tn->empty_children == tnode_child_length(tn) - 1)
490 for (i = 0; i < tnode_child_length(tn); i++) {
491 struct node *n;
493 n = tn->child[i];
494 if (!n)
495 continue;
497 /* compress one level */
498 node_set_parent(n, NULL);
499 tnode_free(tn);
500 return n;
503 * Double as long as the resulting node has a number of
504 * nonempty nodes that are above the threshold.
508 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
509 * the Helsinki University of Technology and Matti Tikkanen of Nokia
510 * Telecommunications, page 6:
511 * "A node is doubled if the ratio of non-empty children to all
512 * children in the *doubled* node is at least 'high'."
514 * 'high' in this instance is the variable 'inflate_threshold'. It
515 * is expressed as a percentage, so we multiply it with
516 * tnode_child_length() and instead of multiplying by 2 (since the
517 * child array will be doubled by inflate()) and multiplying
518 * the left-hand side by 100 (to handle the percentage thing) we
519 * multiply the left-hand side by 50.
521 * The left-hand side may look a bit weird: tnode_child_length(tn)
522 * - tn->empty_children is of course the number of non-null children
523 * in the current node. tn->full_children is the number of "full"
524 * children, that is non-null tnodes with a skip value of 0.
525 * All of those will be doubled in the resulting inflated tnode, so
526 * we just count them one extra time here.
528 * A clearer way to write this would be:
530 * to_be_doubled = tn->full_children;
531 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
532 * tn->full_children;
534 * new_child_length = tnode_child_length(tn) * 2;
536 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
537 * new_child_length;
538 * if (new_fill_factor >= inflate_threshold)
540 * ...and so on, tho it would mess up the while () loop.
542 * anyway,
543 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
544 * inflate_threshold
546 * avoid a division:
547 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
548 * inflate_threshold * new_child_length
550 * expand not_to_be_doubled and to_be_doubled, and shorten:
551 * 100 * (tnode_child_length(tn) - tn->empty_children +
552 * tn->full_children) >= inflate_threshold * new_child_length
554 * expand new_child_length:
555 * 100 * (tnode_child_length(tn) - tn->empty_children +
556 * tn->full_children) >=
557 * inflate_threshold * tnode_child_length(tn) * 2
559 * shorten again:
560 * 50 * (tn->full_children + tnode_child_length(tn) -
561 * tn->empty_children) >= inflate_threshold *
562 * tnode_child_length(tn)
566 check_tnode(tn);
568 /* Keep root node larger */
570 if (!tn->parent)
571 inflate_threshold_use = inflate_threshold_root;
572 else
573 inflate_threshold_use = inflate_threshold;
575 err = 0;
576 max_resize = 10;
577 while ((tn->full_children > 0 && max_resize-- &&
578 50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >=
579 inflate_threshold_use * tnode_child_length(tn))) {
581 old_tn = tn;
582 tn = inflate(t, tn);
583 if (IS_ERR(tn)) {
584 tn = old_tn;
585 #ifdef CONFIG_IP_FIB_TRIE_STATS
586 t->stats.resize_node_skipped++;
587 #endif
588 break;
592 if (max_resize < 0) {
593 if (!tn->parent)
594 printk(KERN_WARNING "Fix inflate_threshold_root. Now=%d size=%d bits\n",
595 inflate_threshold_root, tn->bits);
596 else
597 printk(KERN_WARNING "Fix inflate_threshold. Now=%d size=%d bits\n",
598 inflate_threshold, tn->bits);
601 check_tnode(tn);
604 * Halve as long as the number of empty children in this
605 * node is above threshold.
609 /* Keep root node larger */
611 if (!tn->parent)
612 halve_threshold_use = halve_threshold_root;
613 else
614 halve_threshold_use = halve_threshold;
616 err = 0;
617 max_resize = 10;
618 while (tn->bits > 1 && max_resize-- &&
619 100 * (tnode_child_length(tn) - tn->empty_children) <
620 halve_threshold_use * tnode_child_length(tn)) {
622 old_tn = tn;
623 tn = halve(t, tn);
624 if (IS_ERR(tn)) {
625 tn = old_tn;
626 #ifdef CONFIG_IP_FIB_TRIE_STATS
627 t->stats.resize_node_skipped++;
628 #endif
629 break;
633 if (max_resize < 0) {
634 if (!tn->parent)
635 printk(KERN_WARNING "Fix halve_threshold_root. Now=%d size=%d bits\n",
636 halve_threshold_root, tn->bits);
637 else
638 printk(KERN_WARNING "Fix halve_threshold. Now=%d size=%d bits\n",
639 halve_threshold, tn->bits);
642 /* Only one child remains */
643 if (tn->empty_children == tnode_child_length(tn) - 1)
644 for (i = 0; i < tnode_child_length(tn); i++) {
645 struct node *n;
647 n = tn->child[i];
648 if (!n)
649 continue;
651 /* compress one level */
653 node_set_parent(n, NULL);
654 tnode_free(tn);
655 return n;
658 return (struct node *) tn;
661 static struct tnode *inflate(struct trie *t, struct tnode *tn)
663 struct tnode *oldtnode = tn;
664 int olen = tnode_child_length(tn);
665 int i;
667 pr_debug("In inflate\n");
669 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
671 if (!tn)
672 return ERR_PTR(-ENOMEM);
675 * Preallocate and store tnodes before the actual work so we
676 * don't get into an inconsistent state if memory allocation
677 * fails. In case of failure we return the oldnode and inflate
678 * of tnode is ignored.
681 for (i = 0; i < olen; i++) {
682 struct tnode *inode = (struct tnode *) tnode_get_child(oldtnode, i);
684 if (inode &&
685 IS_TNODE(inode) &&
686 inode->pos == oldtnode->pos + oldtnode->bits &&
687 inode->bits > 1) {
688 struct tnode *left, *right;
689 t_key m = ~0U << (KEYLENGTH - 1) >> inode->pos;
691 left = tnode_new(inode->key&(~m), inode->pos + 1,
692 inode->bits - 1);
693 if (!left)
694 goto nomem;
696 right = tnode_new(inode->key|m, inode->pos + 1,
697 inode->bits - 1);
699 if (!right) {
700 tnode_free(left);
701 goto nomem;
704 put_child(t, tn, 2*i, (struct node *) left);
705 put_child(t, tn, 2*i+1, (struct node *) right);
709 for (i = 0; i < olen; i++) {
710 struct tnode *inode;
711 struct node *node = tnode_get_child(oldtnode, i);
712 struct tnode *left, *right;
713 int size, j;
715 /* An empty child */
716 if (node == NULL)
717 continue;
719 /* A leaf or an internal node with skipped bits */
721 if (IS_LEAF(node) || ((struct tnode *) node)->pos >
722 tn->pos + tn->bits - 1) {
723 if (tkey_extract_bits(node->key, oldtnode->pos + oldtnode->bits,
724 1) == 0)
725 put_child(t, tn, 2*i, node);
726 else
727 put_child(t, tn, 2*i+1, node);
728 continue;
731 /* An internal node with two children */
732 inode = (struct tnode *) node;
734 if (inode->bits == 1) {
735 put_child(t, tn, 2*i, inode->child[0]);
736 put_child(t, tn, 2*i+1, inode->child[1]);
738 tnode_free(inode);
739 continue;
742 /* An internal node with more than two children */
744 /* We will replace this node 'inode' with two new
745 * ones, 'left' and 'right', each with half of the
746 * original children. The two new nodes will have
747 * a position one bit further down the key and this
748 * means that the "significant" part of their keys
749 * (see the discussion near the top of this file)
750 * will differ by one bit, which will be "0" in
751 * left's key and "1" in right's key. Since we are
752 * moving the key position by one step, the bit that
753 * we are moving away from - the bit at position
754 * (inode->pos) - is the one that will differ between
755 * left and right. So... we synthesize that bit in the
756 * two new keys.
757 * The mask 'm' below will be a single "one" bit at
758 * the position (inode->pos)
761 /* Use the old key, but set the new significant
762 * bit to zero.
765 left = (struct tnode *) tnode_get_child(tn, 2*i);
766 put_child(t, tn, 2*i, NULL);
768 BUG_ON(!left);
770 right = (struct tnode *) tnode_get_child(tn, 2*i+1);
771 put_child(t, tn, 2*i+1, NULL);
773 BUG_ON(!right);
775 size = tnode_child_length(left);
776 for (j = 0; j < size; j++) {
777 put_child(t, left, j, inode->child[j]);
778 put_child(t, right, j, inode->child[j + size]);
780 put_child(t, tn, 2*i, resize(t, left));
781 put_child(t, tn, 2*i+1, resize(t, right));
783 tnode_free(inode);
785 tnode_free(oldtnode);
786 return tn;
787 nomem:
789 int size = tnode_child_length(tn);
790 int j;
792 for (j = 0; j < size; j++)
793 if (tn->child[j])
794 tnode_free((struct tnode *)tn->child[j]);
796 tnode_free(tn);
798 return ERR_PTR(-ENOMEM);
802 static struct tnode *halve(struct trie *t, struct tnode *tn)
804 struct tnode *oldtnode = tn;
805 struct node *left, *right;
806 int i;
807 int olen = tnode_child_length(tn);
809 pr_debug("In halve\n");
811 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
813 if (!tn)
814 return ERR_PTR(-ENOMEM);
817 * Preallocate and store tnodes before the actual work so we
818 * don't get into an inconsistent state if memory allocation
819 * fails. In case of failure we return the oldnode and halve
820 * of tnode is ignored.
823 for (i = 0; i < olen; i += 2) {
824 left = tnode_get_child(oldtnode, i);
825 right = tnode_get_child(oldtnode, i+1);
827 /* Two nonempty children */
828 if (left && right) {
829 struct tnode *newn;
831 newn = tnode_new(left->key, tn->pos + tn->bits, 1);
833 if (!newn)
834 goto nomem;
836 put_child(t, tn, i/2, (struct node *)newn);
841 for (i = 0; i < olen; i += 2) {
842 struct tnode *newBinNode;
844 left = tnode_get_child(oldtnode, i);
845 right = tnode_get_child(oldtnode, i+1);
847 /* At least one of the children is empty */
848 if (left == NULL) {
849 if (right == NULL) /* Both are empty */
850 continue;
851 put_child(t, tn, i/2, right);
852 continue;
855 if (right == NULL) {
856 put_child(t, tn, i/2, left);
857 continue;
860 /* Two nonempty children */
861 newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
862 put_child(t, tn, i/2, NULL);
863 put_child(t, newBinNode, 0, left);
864 put_child(t, newBinNode, 1, right);
865 put_child(t, tn, i/2, resize(t, newBinNode));
867 tnode_free(oldtnode);
868 return tn;
869 nomem:
871 int size = tnode_child_length(tn);
872 int j;
874 for (j = 0; j < size; j++)
875 if (tn->child[j])
876 tnode_free((struct tnode *)tn->child[j]);
878 tnode_free(tn);
880 return ERR_PTR(-ENOMEM);
884 /* readside must use rcu_read_lock currently dump routines
885 via get_fa_head and dump */
887 static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
889 struct hlist_head *head = &l->list;
890 struct hlist_node *node;
891 struct leaf_info *li;
893 hlist_for_each_entry_rcu(li, node, head, hlist)
894 if (li->plen == plen)
895 return li;
897 return NULL;
900 static inline struct list_head * get_fa_head(struct leaf *l, int plen)
902 struct leaf_info *li = find_leaf_info(l, plen);
904 if (!li)
905 return NULL;
907 return &li->falh;
910 static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
912 struct leaf_info *li = NULL, *last = NULL;
913 struct hlist_node *node;
915 if (hlist_empty(head)) {
916 hlist_add_head_rcu(&new->hlist, head);
917 } else {
918 hlist_for_each_entry(li, node, head, hlist) {
919 if (new->plen > li->plen)
920 break;
922 last = li;
924 if (last)
925 hlist_add_after_rcu(&last->hlist, &new->hlist);
926 else
927 hlist_add_before_rcu(&new->hlist, &li->hlist);
931 /* rcu_read_lock needs to be hold by caller from readside */
933 static struct leaf *
934 fib_find_node(struct trie *t, u32 key)
936 int pos;
937 struct tnode *tn;
938 struct node *n;
940 pos = 0;
941 n = rcu_dereference(t->trie);
943 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
944 tn = (struct tnode *) n;
946 check_tnode(tn);
948 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
949 pos = tn->pos + tn->bits;
950 n = tnode_get_child_rcu(tn, tkey_extract_bits(key, tn->pos, tn->bits));
951 } else
952 break;
954 /* Case we have found a leaf. Compare prefixes */
956 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
957 return (struct leaf *)n;
959 return NULL;
962 static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
964 int wasfull;
965 t_key cindex, key = tn->key;
966 struct tnode *tp;
968 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
969 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
970 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
971 tn = (struct tnode *) resize (t, (struct tnode *)tn);
972 tnode_put_child_reorg((struct tnode *)tp, cindex,(struct node*)tn, wasfull);
974 tp = node_parent((struct node *) tn);
975 if (!tp)
976 break;
977 tn = tp;
980 /* Handle last (top) tnode */
981 if (IS_TNODE(tn))
982 tn = (struct tnode*) resize(t, (struct tnode *)tn);
984 return (struct node*) tn;
987 /* only used from updater-side */
989 static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
991 int pos, newpos;
992 struct tnode *tp = NULL, *tn = NULL;
993 struct node *n;
994 struct leaf *l;
995 int missbit;
996 struct list_head *fa_head = NULL;
997 struct leaf_info *li;
998 t_key cindex;
1000 pos = 0;
1001 n = t->trie;
1003 /* If we point to NULL, stop. Either the tree is empty and we should
1004 * just put a new leaf in if, or we have reached an empty child slot,
1005 * and we should just put our new leaf in that.
1006 * If we point to a T_TNODE, check if it matches our key. Note that
1007 * a T_TNODE might be skipping any number of bits - its 'pos' need
1008 * not be the parent's 'pos'+'bits'!
1010 * If it does match the current key, get pos/bits from it, extract
1011 * the index from our key, push the T_TNODE and walk the tree.
1013 * If it doesn't, we have to replace it with a new T_TNODE.
1015 * If we point to a T_LEAF, it might or might not have the same key
1016 * as we do. If it does, just change the value, update the T_LEAF's
1017 * value, and return it.
1018 * If it doesn't, we need to replace it with a T_TNODE.
1021 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
1022 tn = (struct tnode *) n;
1024 check_tnode(tn);
1026 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
1027 tp = tn;
1028 pos = tn->pos + tn->bits;
1029 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
1031 BUG_ON(n && node_parent(n) != tn);
1032 } else
1033 break;
1037 * n ----> NULL, LEAF or TNODE
1039 * tp is n's (parent) ----> NULL or TNODE
1042 BUG_ON(tp && IS_LEAF(tp));
1044 /* Case 1: n is a leaf. Compare prefixes */
1046 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
1047 l = (struct leaf *) n;
1048 li = leaf_info_new(plen);
1050 if (!li)
1051 return NULL;
1053 fa_head = &li->falh;
1054 insert_leaf_info(&l->list, li);
1055 goto done;
1057 l = leaf_new();
1059 if (!l)
1060 return NULL;
1062 l->key = key;
1063 li = leaf_info_new(plen);
1065 if (!li) {
1066 tnode_free((struct tnode *) l);
1067 return NULL;
1070 fa_head = &li->falh;
1071 insert_leaf_info(&l->list, li);
1073 if (t->trie && n == NULL) {
1074 /* Case 2: n is NULL, and will just insert a new leaf */
1076 node_set_parent((struct node *)l, tp);
1078 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1079 put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
1080 } else {
1081 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1083 * Add a new tnode here
1084 * first tnode need some special handling
1087 if (tp)
1088 pos = tp->pos+tp->bits;
1089 else
1090 pos = 0;
1092 if (n) {
1093 newpos = tkey_mismatch(key, pos, n->key);
1094 tn = tnode_new(n->key, newpos, 1);
1095 } else {
1096 newpos = 0;
1097 tn = tnode_new(key, newpos, 1); /* First tnode */
1100 if (!tn) {
1101 free_leaf_info(li);
1102 tnode_free((struct tnode *) l);
1103 return NULL;
1106 node_set_parent((struct node *)tn, tp);
1108 missbit = tkey_extract_bits(key, newpos, 1);
1109 put_child(t, tn, missbit, (struct node *)l);
1110 put_child(t, tn, 1-missbit, n);
1112 if (tp) {
1113 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1114 put_child(t, (struct tnode *)tp, cindex, (struct node *)tn);
1115 } else {
1116 rcu_assign_pointer(t->trie, (struct node *)tn); /* First tnode */
1117 tp = tn;
1121 if (tp && tp->pos + tp->bits > 32)
1122 printk(KERN_WARNING "fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1123 tp, tp->pos, tp->bits, key, plen);
1125 /* Rebalance the trie */
1127 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1128 done:
1129 return fa_head;
1133 * Caller must hold RTNL.
1135 static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1137 struct trie *t = (struct trie *) tb->tb_data;
1138 struct fib_alias *fa, *new_fa;
1139 struct list_head *fa_head = NULL;
1140 struct fib_info *fi;
1141 int plen = cfg->fc_dst_len;
1142 u8 tos = cfg->fc_tos;
1143 u32 key, mask;
1144 int err;
1145 struct leaf *l;
1147 if (plen > 32)
1148 return -EINVAL;
1150 key = ntohl(cfg->fc_dst);
1152 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
1154 mask = ntohl(inet_make_mask(plen));
1156 if (key & ~mask)
1157 return -EINVAL;
1159 key = key & mask;
1161 fi = fib_create_info(cfg);
1162 if (IS_ERR(fi)) {
1163 err = PTR_ERR(fi);
1164 goto err;
1167 l = fib_find_node(t, key);
1168 fa = NULL;
1170 if (l) {
1171 fa_head = get_fa_head(l, plen);
1172 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1175 /* Now fa, if non-NULL, points to the first fib alias
1176 * with the same keys [prefix,tos,priority], if such key already
1177 * exists or to the node before which we will insert new one.
1179 * If fa is NULL, we will need to allocate a new one and
1180 * insert to the head of f.
1182 * If f is NULL, no fib node matched the destination key
1183 * and we need to allocate a new one of those as well.
1186 if (fa && fa->fa_info->fib_priority == fi->fib_priority) {
1187 struct fib_alias *fa_orig;
1189 err = -EEXIST;
1190 if (cfg->fc_nlflags & NLM_F_EXCL)
1191 goto out;
1193 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1194 struct fib_info *fi_drop;
1195 u8 state;
1197 if (fi->fib_treeref > 1)
1198 goto out;
1200 err = -ENOBUFS;
1201 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1202 if (new_fa == NULL)
1203 goto out;
1205 fi_drop = fa->fa_info;
1206 new_fa->fa_tos = fa->fa_tos;
1207 new_fa->fa_info = fi;
1208 new_fa->fa_type = cfg->fc_type;
1209 new_fa->fa_scope = cfg->fc_scope;
1210 state = fa->fa_state;
1211 new_fa->fa_state &= ~FA_S_ACCESSED;
1213 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1214 alias_free_mem_rcu(fa);
1216 fib_release_info(fi_drop);
1217 if (state & FA_S_ACCESSED)
1218 rt_cache_flush(-1);
1219 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1220 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
1222 goto succeeded;
1224 /* Error if we find a perfect match which
1225 * uses the same scope, type, and nexthop
1226 * information.
1228 fa_orig = fa;
1229 list_for_each_entry(fa, fa_orig->fa_list.prev, fa_list) {
1230 if (fa->fa_tos != tos)
1231 break;
1232 if (fa->fa_info->fib_priority != fi->fib_priority)
1233 break;
1234 if (fa->fa_type == cfg->fc_type &&
1235 fa->fa_scope == cfg->fc_scope &&
1236 fa->fa_info == fi) {
1237 goto out;
1240 if (!(cfg->fc_nlflags & NLM_F_APPEND))
1241 fa = fa_orig;
1243 err = -ENOENT;
1244 if (!(cfg->fc_nlflags & NLM_F_CREATE))
1245 goto out;
1247 err = -ENOBUFS;
1248 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1249 if (new_fa == NULL)
1250 goto out;
1252 new_fa->fa_info = fi;
1253 new_fa->fa_tos = tos;
1254 new_fa->fa_type = cfg->fc_type;
1255 new_fa->fa_scope = cfg->fc_scope;
1256 new_fa->fa_state = 0;
1258 * Insert new entry to the list.
1261 if (!fa_head) {
1262 fa_head = fib_insert_node(t, key, plen);
1263 if (unlikely(!fa_head)) {
1264 err = -ENOMEM;
1265 goto out_free_new_fa;
1269 list_add_tail_rcu(&new_fa->fa_list,
1270 (fa ? &fa->fa_list : fa_head));
1272 t->size++;
1274 rt_cache_flush(-1);
1275 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
1276 &cfg->fc_nlinfo, 0);
1277 succeeded:
1278 return 0;
1280 out_free_new_fa:
1281 kmem_cache_free(fn_alias_kmem, new_fa);
1282 out:
1283 fib_release_info(fi);
1284 err:
1285 return err;
1289 /* should be called with rcu_read_lock */
1290 static inline int check_leaf(struct trie *t, struct leaf *l,
1291 t_key key, int *plen, const struct flowi *flp,
1292 struct fib_result *res)
1294 int err, i;
1295 __be32 mask;
1296 struct leaf_info *li;
1297 struct hlist_head *hhead = &l->list;
1298 struct hlist_node *node;
1300 hlist_for_each_entry_rcu(li, node, hhead, hlist) {
1301 i = li->plen;
1302 mask = inet_make_mask(i);
1303 if (l->key != (key & ntohl(mask)))
1304 continue;
1306 if ((err = fib_semantic_match(&li->falh, flp, res, htonl(l->key), mask, i)) <= 0) {
1307 *plen = i;
1308 #ifdef CONFIG_IP_FIB_TRIE_STATS
1309 t->stats.semantic_match_passed++;
1310 #endif
1311 return err;
1313 #ifdef CONFIG_IP_FIB_TRIE_STATS
1314 t->stats.semantic_match_miss++;
1315 #endif
1317 return 1;
1320 static int
1321 fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
1323 struct trie *t = (struct trie *) tb->tb_data;
1324 int plen, ret = 0;
1325 struct node *n;
1326 struct tnode *pn;
1327 int pos, bits;
1328 t_key key = ntohl(flp->fl4_dst);
1329 int chopped_off;
1330 t_key cindex = 0;
1331 int current_prefix_length = KEYLENGTH;
1332 struct tnode *cn;
1333 t_key node_prefix, key_prefix, pref_mismatch;
1334 int mp;
1336 rcu_read_lock();
1338 n = rcu_dereference(t->trie);
1339 if (!n)
1340 goto failed;
1342 #ifdef CONFIG_IP_FIB_TRIE_STATS
1343 t->stats.gets++;
1344 #endif
1346 /* Just a leaf? */
1347 if (IS_LEAF(n)) {
1348 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1349 goto found;
1350 goto failed;
1352 pn = (struct tnode *) n;
1353 chopped_off = 0;
1355 while (pn) {
1356 pos = pn->pos;
1357 bits = pn->bits;
1359 if (!chopped_off)
1360 cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length),
1361 pos, bits);
1363 n = tnode_get_child(pn, cindex);
1365 if (n == NULL) {
1366 #ifdef CONFIG_IP_FIB_TRIE_STATS
1367 t->stats.null_node_hit++;
1368 #endif
1369 goto backtrace;
1372 if (IS_LEAF(n)) {
1373 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1374 goto found;
1375 else
1376 goto backtrace;
1379 #define HL_OPTIMIZE
1380 #ifdef HL_OPTIMIZE
1381 cn = (struct tnode *)n;
1384 * It's a tnode, and we can do some extra checks here if we
1385 * like, to avoid descending into a dead-end branch.
1386 * This tnode is in the parent's child array at index
1387 * key[p_pos..p_pos+p_bits] but potentially with some bits
1388 * chopped off, so in reality the index may be just a
1389 * subprefix, padded with zero at the end.
1390 * We can also take a look at any skipped bits in this
1391 * tnode - everything up to p_pos is supposed to be ok,
1392 * and the non-chopped bits of the index (se previous
1393 * paragraph) are also guaranteed ok, but the rest is
1394 * considered unknown.
1396 * The skipped bits are key[pos+bits..cn->pos].
1399 /* If current_prefix_length < pos+bits, we are already doing
1400 * actual prefix matching, which means everything from
1401 * pos+(bits-chopped_off) onward must be zero along some
1402 * branch of this subtree - otherwise there is *no* valid
1403 * prefix present. Here we can only check the skipped
1404 * bits. Remember, since we have already indexed into the
1405 * parent's child array, we know that the bits we chopped of
1406 * *are* zero.
1409 /* NOTA BENE: CHECKING ONLY SKIPPED BITS FOR THE NEW NODE HERE */
1411 if (current_prefix_length < pos+bits) {
1412 if (tkey_extract_bits(cn->key, current_prefix_length,
1413 cn->pos - current_prefix_length) != 0 ||
1414 !(cn->child[0]))
1415 goto backtrace;
1419 * If chopped_off=0, the index is fully validated and we
1420 * only need to look at the skipped bits for this, the new,
1421 * tnode. What we actually want to do is to find out if
1422 * these skipped bits match our key perfectly, or if we will
1423 * have to count on finding a matching prefix further down,
1424 * because if we do, we would like to have some way of
1425 * verifying the existence of such a prefix at this point.
1428 /* The only thing we can do at this point is to verify that
1429 * any such matching prefix can indeed be a prefix to our
1430 * key, and if the bits in the node we are inspecting that
1431 * do not match our key are not ZERO, this cannot be true.
1432 * Thus, find out where there is a mismatch (before cn->pos)
1433 * and verify that all the mismatching bits are zero in the
1434 * new tnode's key.
1437 /* Note: We aren't very concerned about the piece of the key
1438 * that precede pn->pos+pn->bits, since these have already been
1439 * checked. The bits after cn->pos aren't checked since these are
1440 * by definition "unknown" at this point. Thus, what we want to
1441 * see is if we are about to enter the "prefix matching" state,
1442 * and in that case verify that the skipped bits that will prevail
1443 * throughout this subtree are zero, as they have to be if we are
1444 * to find a matching prefix.
1447 node_prefix = mask_pfx(cn->key, cn->pos);
1448 key_prefix = mask_pfx(key, cn->pos);
1449 pref_mismatch = key_prefix^node_prefix;
1450 mp = 0;
1452 /* In short: If skipped bits in this node do not match the search
1453 * key, enter the "prefix matching" state.directly.
1455 if (pref_mismatch) {
1456 while (!(pref_mismatch & (1<<(KEYLENGTH-1)))) {
1457 mp++;
1458 pref_mismatch = pref_mismatch <<1;
1460 key_prefix = tkey_extract_bits(cn->key, mp, cn->pos-mp);
1462 if (key_prefix != 0)
1463 goto backtrace;
1465 if (current_prefix_length >= cn->pos)
1466 current_prefix_length = mp;
1468 #endif
1469 pn = (struct tnode *)n; /* Descend */
1470 chopped_off = 0;
1471 continue;
1473 backtrace:
1474 chopped_off++;
1476 /* As zero don't change the child key (cindex) */
1477 while ((chopped_off <= pn->bits) && !(cindex & (1<<(chopped_off-1))))
1478 chopped_off++;
1480 /* Decrease current_... with bits chopped off */
1481 if (current_prefix_length > pn->pos + pn->bits - chopped_off)
1482 current_prefix_length = pn->pos + pn->bits - chopped_off;
1485 * Either we do the actual chop off according or if we have
1486 * chopped off all bits in this tnode walk up to our parent.
1489 if (chopped_off <= pn->bits) {
1490 cindex &= ~(1 << (chopped_off-1));
1491 } else {
1492 struct tnode *parent = node_parent((struct node *) pn);
1493 if (!parent)
1494 goto failed;
1496 /* Get Child's index */
1497 cindex = tkey_extract_bits(pn->key, parent->pos, parent->bits);
1498 pn = parent;
1499 chopped_off = 0;
1501 #ifdef CONFIG_IP_FIB_TRIE_STATS
1502 t->stats.backtrack++;
1503 #endif
1504 goto backtrace;
1507 failed:
1508 ret = 1;
1509 found:
1510 rcu_read_unlock();
1511 return ret;
1514 /* only called from updater side */
1515 static int trie_leaf_remove(struct trie *t, t_key key)
1517 t_key cindex;
1518 struct tnode *tp = NULL;
1519 struct node *n = t->trie;
1520 struct leaf *l;
1522 pr_debug("entering trie_leaf_remove(%p)\n", n);
1524 /* Note that in the case skipped bits, those bits are *not* checked!
1525 * When we finish this, we will have NULL or a T_LEAF, and the
1526 * T_LEAF may or may not match our key.
1529 while (n != NULL && IS_TNODE(n)) {
1530 struct tnode *tn = (struct tnode *) n;
1531 check_tnode(tn);
1532 n = tnode_get_child(tn ,tkey_extract_bits(key, tn->pos, tn->bits));
1534 BUG_ON(n && node_parent(n) != tn);
1536 l = (struct leaf *) n;
1538 if (!n || !tkey_equals(l->key, key))
1539 return 0;
1542 * Key found.
1543 * Remove the leaf and rebalance the tree
1546 t->size--;
1548 tp = node_parent(n);
1549 tnode_free((struct tnode *) n);
1551 if (tp) {
1552 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1553 put_child(t, (struct tnode *)tp, cindex, NULL);
1554 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1555 } else
1556 rcu_assign_pointer(t->trie, NULL);
1558 return 1;
1562 * Caller must hold RTNL.
1564 static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg)
1566 struct trie *t = (struct trie *) tb->tb_data;
1567 u32 key, mask;
1568 int plen = cfg->fc_dst_len;
1569 u8 tos = cfg->fc_tos;
1570 struct fib_alias *fa, *fa_to_delete;
1571 struct list_head *fa_head;
1572 struct leaf *l;
1573 struct leaf_info *li;
1575 if (plen > 32)
1576 return -EINVAL;
1578 key = ntohl(cfg->fc_dst);
1579 mask = ntohl(inet_make_mask(plen));
1581 if (key & ~mask)
1582 return -EINVAL;
1584 key = key & mask;
1585 l = fib_find_node(t, key);
1587 if (!l)
1588 return -ESRCH;
1590 fa_head = get_fa_head(l, plen);
1591 fa = fib_find_alias(fa_head, tos, 0);
1593 if (!fa)
1594 return -ESRCH;
1596 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1598 fa_to_delete = NULL;
1599 fa_head = fa->fa_list.prev;
1601 list_for_each_entry(fa, fa_head, fa_list) {
1602 struct fib_info *fi = fa->fa_info;
1604 if (fa->fa_tos != tos)
1605 break;
1607 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1608 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
1609 fa->fa_scope == cfg->fc_scope) &&
1610 (!cfg->fc_protocol ||
1611 fi->fib_protocol == cfg->fc_protocol) &&
1612 fib_nh_match(cfg, fi) == 0) {
1613 fa_to_delete = fa;
1614 break;
1618 if (!fa_to_delete)
1619 return -ESRCH;
1621 fa = fa_to_delete;
1622 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
1623 &cfg->fc_nlinfo, 0);
1625 l = fib_find_node(t, key);
1626 li = find_leaf_info(l, plen);
1628 list_del_rcu(&fa->fa_list);
1630 if (list_empty(fa_head)) {
1631 hlist_del_rcu(&li->hlist);
1632 free_leaf_info(li);
1635 if (hlist_empty(&l->list))
1636 trie_leaf_remove(t, key);
1638 if (fa->fa_state & FA_S_ACCESSED)
1639 rt_cache_flush(-1);
1641 fib_release_info(fa->fa_info);
1642 alias_free_mem_rcu(fa);
1643 return 0;
1646 static int trie_flush_list(struct trie *t, struct list_head *head)
1648 struct fib_alias *fa, *fa_node;
1649 int found = 0;
1651 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1652 struct fib_info *fi = fa->fa_info;
1654 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1655 list_del_rcu(&fa->fa_list);
1656 fib_release_info(fa->fa_info);
1657 alias_free_mem_rcu(fa);
1658 found++;
1661 return found;
1664 static int trie_flush_leaf(struct trie *t, struct leaf *l)
1666 int found = 0;
1667 struct hlist_head *lih = &l->list;
1668 struct hlist_node *node, *tmp;
1669 struct leaf_info *li = NULL;
1671 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
1672 found += trie_flush_list(t, &li->falh);
1674 if (list_empty(&li->falh)) {
1675 hlist_del_rcu(&li->hlist);
1676 free_leaf_info(li);
1679 return found;
1682 /* rcu_read_lock needs to be hold by caller from readside */
1684 static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf)
1686 struct node *c = (struct node *) thisleaf;
1687 struct tnode *p;
1688 int idx;
1689 struct node *trie = rcu_dereference(t->trie);
1691 if (c == NULL) {
1692 if (trie == NULL)
1693 return NULL;
1695 if (IS_LEAF(trie)) /* trie w. just a leaf */
1696 return (struct leaf *) trie;
1698 p = (struct tnode*) trie; /* Start */
1699 } else
1700 p = node_parent_rcu(c);
1702 while (p) {
1703 int pos, last;
1705 /* Find the next child of the parent */
1706 if (c)
1707 pos = 1 + tkey_extract_bits(c->key, p->pos, p->bits);
1708 else
1709 pos = 0;
1711 last = 1 << p->bits;
1712 for (idx = pos; idx < last ; idx++) {
1713 c = rcu_dereference(p->child[idx]);
1715 if (!c)
1716 continue;
1718 /* Decend if tnode */
1719 while (IS_TNODE(c)) {
1720 p = (struct tnode *) c;
1721 idx = 0;
1723 /* Rightmost non-NULL branch */
1724 if (p && IS_TNODE(p))
1725 while (!(c = rcu_dereference(p->child[idx]))
1726 && idx < (1<<p->bits)) idx++;
1728 /* Done with this tnode? */
1729 if (idx >= (1 << p->bits) || !c)
1730 goto up;
1732 return (struct leaf *) c;
1735 /* No more children go up one step */
1736 c = (struct node *) p;
1737 p = node_parent_rcu(c);
1739 return NULL; /* Ready. Root of trie */
1743 * Caller must hold RTNL.
1745 static int fn_trie_flush(struct fib_table *tb)
1747 struct trie *t = (struct trie *) tb->tb_data;
1748 struct leaf *ll = NULL, *l = NULL;
1749 int found = 0, h;
1751 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1752 found += trie_flush_leaf(t, l);
1754 if (ll && hlist_empty(&ll->list))
1755 trie_leaf_remove(t, ll->key);
1756 ll = l;
1759 if (ll && hlist_empty(&ll->list))
1760 trie_leaf_remove(t, ll->key);
1762 pr_debug("trie_flush found=%d\n", found);
1763 return found;
1766 static void
1767 fn_trie_select_default(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
1769 struct trie *t = (struct trie *) tb->tb_data;
1770 int order, last_idx;
1771 struct fib_info *fi = NULL;
1772 struct fib_info *last_resort;
1773 struct fib_alias *fa = NULL;
1774 struct list_head *fa_head;
1775 struct leaf *l;
1777 last_idx = -1;
1778 last_resort = NULL;
1779 order = -1;
1781 rcu_read_lock();
1783 l = fib_find_node(t, 0);
1784 if (!l)
1785 goto out;
1787 fa_head = get_fa_head(l, 0);
1788 if (!fa_head)
1789 goto out;
1791 if (list_empty(fa_head))
1792 goto out;
1794 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1795 struct fib_info *next_fi = fa->fa_info;
1797 if (fa->fa_scope != res->scope ||
1798 fa->fa_type != RTN_UNICAST)
1799 continue;
1801 if (next_fi->fib_priority > res->fi->fib_priority)
1802 break;
1803 if (!next_fi->fib_nh[0].nh_gw ||
1804 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1805 continue;
1806 fa->fa_state |= FA_S_ACCESSED;
1808 if (fi == NULL) {
1809 if (next_fi != res->fi)
1810 break;
1811 } else if (!fib_detect_death(fi, order, &last_resort,
1812 &last_idx, tb->tb_default)) {
1813 fib_result_assign(res, fi);
1814 tb->tb_default = order;
1815 goto out;
1817 fi = next_fi;
1818 order++;
1820 if (order <= 0 || fi == NULL) {
1821 tb->tb_default = -1;
1822 goto out;
1825 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1826 tb->tb_default)) {
1827 fib_result_assign(res, fi);
1828 tb->tb_default = order;
1829 goto out;
1831 if (last_idx >= 0)
1832 fib_result_assign(res, last_resort);
1833 tb->tb_default = last_idx;
1834 out:
1835 rcu_read_unlock();
1838 static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, struct fib_table *tb,
1839 struct sk_buff *skb, struct netlink_callback *cb)
1841 int i, s_i;
1842 struct fib_alias *fa;
1844 __be32 xkey = htonl(key);
1846 s_i = cb->args[4];
1847 i = 0;
1849 /* rcu_read_lock is hold by caller */
1851 list_for_each_entry_rcu(fa, fah, fa_list) {
1852 if (i < s_i) {
1853 i++;
1854 continue;
1856 BUG_ON(!fa->fa_info);
1858 if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
1859 cb->nlh->nlmsg_seq,
1860 RTM_NEWROUTE,
1861 tb->tb_id,
1862 fa->fa_type,
1863 fa->fa_scope,
1864 xkey,
1865 plen,
1866 fa->fa_tos,
1867 fa->fa_info, 0) < 0) {
1868 cb->args[4] = i;
1869 return -1;
1871 i++;
1873 cb->args[4] = i;
1874 return skb->len;
1877 static int fn_trie_dump_plen(struct trie *t, int plen, struct fib_table *tb, struct sk_buff *skb,
1878 struct netlink_callback *cb)
1880 int h, s_h;
1881 struct list_head *fa_head;
1882 struct leaf *l = NULL;
1884 s_h = cb->args[3];
1886 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1887 if (h < s_h)
1888 continue;
1889 if (h > s_h)
1890 memset(&cb->args[4], 0,
1891 sizeof(cb->args) - 4*sizeof(cb->args[0]));
1893 fa_head = get_fa_head(l, plen);
1895 if (!fa_head)
1896 continue;
1898 if (list_empty(fa_head))
1899 continue;
1901 if (fn_trie_dump_fa(l->key, plen, fa_head, tb, skb, cb)<0) {
1902 cb->args[3] = h;
1903 return -1;
1906 cb->args[3] = h;
1907 return skb->len;
1910 static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb)
1912 int m, s_m;
1913 struct trie *t = (struct trie *) tb->tb_data;
1915 s_m = cb->args[2];
1917 rcu_read_lock();
1918 for (m = 0; m <= 32; m++) {
1919 if (m < s_m)
1920 continue;
1921 if (m > s_m)
1922 memset(&cb->args[3], 0,
1923 sizeof(cb->args) - 3*sizeof(cb->args[0]));
1925 if (fn_trie_dump_plen(t, 32-m, tb, skb, cb)<0) {
1926 cb->args[2] = m;
1927 goto out;
1930 rcu_read_unlock();
1931 cb->args[2] = m;
1932 return skb->len;
1933 out:
1934 rcu_read_unlock();
1935 return -1;
1938 void __init fib_hash_init(void)
1940 fn_alias_kmem = kmem_cache_create("ip_fib_alias", sizeof(struct fib_alias),
1941 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1945 /* Fix more generic FIB names for init later */
1946 struct fib_table *fib_hash_table(u32 id)
1948 struct fib_table *tb;
1949 struct trie *t;
1951 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
1952 GFP_KERNEL);
1953 if (tb == NULL)
1954 return NULL;
1956 tb->tb_id = id;
1957 tb->tb_default = -1;
1958 tb->tb_lookup = fn_trie_lookup;
1959 tb->tb_insert = fn_trie_insert;
1960 tb->tb_delete = fn_trie_delete;
1961 tb->tb_flush = fn_trie_flush;
1962 tb->tb_select_default = fn_trie_select_default;
1963 tb->tb_dump = fn_trie_dump;
1965 t = (struct trie *) tb->tb_data;
1966 memset(t, 0, sizeof(*t));
1968 if (id == RT_TABLE_LOCAL)
1969 printk(KERN_INFO "IPv4 FIB: Using LC-trie version %s\n", VERSION);
1971 return tb;
1974 #ifdef CONFIG_PROC_FS
1975 /* Depth first Trie walk iterator */
1976 struct fib_trie_iter {
1977 struct seq_net_private p;
1978 struct trie *trie_local, *trie_main;
1979 struct tnode *tnode;
1980 struct trie *trie;
1981 unsigned index;
1982 unsigned depth;
1985 static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
1987 struct tnode *tn = iter->tnode;
1988 unsigned cindex = iter->index;
1989 struct tnode *p;
1991 /* A single entry routing table */
1992 if (!tn)
1993 return NULL;
1995 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
1996 iter->tnode, iter->index, iter->depth);
1997 rescan:
1998 while (cindex < (1<<tn->bits)) {
1999 struct node *n = tnode_get_child_rcu(tn, cindex);
2001 if (n) {
2002 if (IS_LEAF(n)) {
2003 iter->tnode = tn;
2004 iter->index = cindex + 1;
2005 } else {
2006 /* push down one level */
2007 iter->tnode = (struct tnode *) n;
2008 iter->index = 0;
2009 ++iter->depth;
2011 return n;
2014 ++cindex;
2017 /* Current node exhausted, pop back up */
2018 p = node_parent_rcu((struct node *)tn);
2019 if (p) {
2020 cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
2021 tn = p;
2022 --iter->depth;
2023 goto rescan;
2026 /* got root? */
2027 return NULL;
2030 static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
2031 struct trie *t)
2033 struct node *n ;
2035 if (!t)
2036 return NULL;
2038 n = rcu_dereference(t->trie);
2040 if (!iter)
2041 return NULL;
2043 if (n) {
2044 if (IS_TNODE(n)) {
2045 iter->tnode = (struct tnode *) n;
2046 iter->trie = t;
2047 iter->index = 0;
2048 iter->depth = 1;
2049 } else {
2050 iter->tnode = NULL;
2051 iter->trie = t;
2052 iter->index = 0;
2053 iter->depth = 0;
2055 return n;
2057 return NULL;
2060 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2062 struct node *n;
2063 struct fib_trie_iter iter;
2065 memset(s, 0, sizeof(*s));
2067 rcu_read_lock();
2068 for (n = fib_trie_get_first(&iter, t); n;
2069 n = fib_trie_get_next(&iter)) {
2070 if (IS_LEAF(n)) {
2071 s->leaves++;
2072 s->totdepth += iter.depth;
2073 if (iter.depth > s->maxdepth)
2074 s->maxdepth = iter.depth;
2075 } else {
2076 const struct tnode *tn = (const struct tnode *) n;
2077 int i;
2079 s->tnodes++;
2080 if (tn->bits < MAX_STAT_DEPTH)
2081 s->nodesizes[tn->bits]++;
2083 for (i = 0; i < (1<<tn->bits); i++)
2084 if (!tn->child[i])
2085 s->nullpointers++;
2088 rcu_read_unlock();
2092 * This outputs /proc/net/fib_triestats
2094 static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
2096 unsigned i, max, pointers, bytes, avdepth;
2098 if (stat->leaves)
2099 avdepth = stat->totdepth*100 / stat->leaves;
2100 else
2101 avdepth = 0;
2103 seq_printf(seq, "\tAver depth: %u.%02d\n", avdepth / 100, avdepth % 100 );
2104 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
2106 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
2108 bytes = sizeof(struct leaf) * stat->leaves;
2109 seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
2110 bytes += sizeof(struct tnode) * stat->tnodes;
2112 max = MAX_STAT_DEPTH;
2113 while (max > 0 && stat->nodesizes[max-1] == 0)
2114 max--;
2116 pointers = 0;
2117 for (i = 1; i <= max; i++)
2118 if (stat->nodesizes[i] != 0) {
2119 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
2120 pointers += (1<<i) * stat->nodesizes[i];
2122 seq_putc(seq, '\n');
2123 seq_printf(seq, "\tPointers: %u\n", pointers);
2125 bytes += sizeof(struct node *) * pointers;
2126 seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
2127 seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
2130 #ifdef CONFIG_IP_FIB_TRIE_STATS
2131 static void trie_show_usage(struct seq_file *seq,
2132 const struct trie_use_stats *stats)
2134 seq_printf(seq, "\nCounters:\n---------\n");
2135 seq_printf(seq,"gets = %u\n", stats->gets);
2136 seq_printf(seq,"backtracks = %u\n", stats->backtrack);
2137 seq_printf(seq,"semantic match passed = %u\n", stats->semantic_match_passed);
2138 seq_printf(seq,"semantic match miss = %u\n", stats->semantic_match_miss);
2139 seq_printf(seq,"null node hit= %u\n", stats->null_node_hit);
2140 seq_printf(seq,"skipped node resize = %u\n\n", stats->resize_node_skipped);
2142 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2144 static void fib_trie_show(struct seq_file *seq, const char *name, struct trie *trie)
2146 struct trie_stat stat;
2148 seq_printf(seq, "%s: %d\n", name, trie->size);
2149 trie_collect_stats(trie, &stat);
2150 trie_show_stats(seq, &stat);
2151 #ifdef CONFIG_IP_FIB_TRIE_STATS
2152 trie_show_usage(seq, &trie->stats);
2153 #endif
2156 static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2158 struct net *net = (struct net *)seq->private;
2159 struct fib_table *tb;
2161 seq_printf(seq,
2162 "Basic info: size of leaf: %Zd bytes, size of tnode: %Zd bytes.\n",
2163 sizeof(struct leaf), sizeof(struct tnode));
2165 tb = fib_get_table(net, RT_TABLE_LOCAL);
2166 if (tb)
2167 fib_trie_show(seq, "Local", (struct trie *) tb->tb_data);
2169 tb = fib_get_table(net, RT_TABLE_MAIN);
2170 if (tb)
2171 fib_trie_show(seq, "Main", (struct trie *) tb->tb_data);
2173 return 0;
2176 static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2178 int err;
2179 struct net *net;
2181 net = get_proc_net(inode);
2182 if (net == NULL)
2183 return -ENXIO;
2184 err = single_open(file, fib_triestat_seq_show, net);
2185 if (err < 0) {
2186 put_net(net);
2187 return err;
2189 return 0;
2192 static int fib_triestat_seq_release(struct inode *ino, struct file *f)
2194 struct seq_file *seq = f->private_data;
2195 put_net(seq->private);
2196 return single_release(ino, f);
2199 static const struct file_operations fib_triestat_fops = {
2200 .owner = THIS_MODULE,
2201 .open = fib_triestat_seq_open,
2202 .read = seq_read,
2203 .llseek = seq_lseek,
2204 .release = fib_triestat_seq_release,
2207 static struct node *fib_trie_get_idx(struct fib_trie_iter *iter,
2208 loff_t pos)
2210 loff_t idx = 0;
2211 struct node *n;
2213 for (n = fib_trie_get_first(iter, iter->trie_local);
2214 n; ++idx, n = fib_trie_get_next(iter)) {
2215 if (pos == idx)
2216 return n;
2219 for (n = fib_trie_get_first(iter, iter->trie_main);
2220 n; ++idx, n = fib_trie_get_next(iter)) {
2221 if (pos == idx)
2222 return n;
2224 return NULL;
2227 static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2228 __acquires(RCU)
2230 struct fib_trie_iter *iter = seq->private;
2231 struct fib_table *tb;
2233 if (!iter->trie_local) {
2234 tb = fib_get_table(iter->p.net, RT_TABLE_LOCAL);
2235 if (tb)
2236 iter->trie_local = (struct trie *) tb->tb_data;
2238 if (!iter->trie_main) {
2239 tb = fib_get_table(iter->p.net, RT_TABLE_MAIN);
2240 if (tb)
2241 iter->trie_main = (struct trie *) tb->tb_data;
2243 rcu_read_lock();
2244 if (*pos == 0)
2245 return SEQ_START_TOKEN;
2246 return fib_trie_get_idx(iter, *pos - 1);
2249 static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2251 struct fib_trie_iter *iter = seq->private;
2252 void *l = v;
2254 ++*pos;
2255 if (v == SEQ_START_TOKEN)
2256 return fib_trie_get_idx(iter, 0);
2258 v = fib_trie_get_next(iter);
2259 BUG_ON(v == l);
2260 if (v)
2261 return v;
2263 /* continue scan in next trie */
2264 if (iter->trie == iter->trie_local)
2265 return fib_trie_get_first(iter, iter->trie_main);
2267 return NULL;
2270 static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2271 __releases(RCU)
2273 rcu_read_unlock();
2276 static void seq_indent(struct seq_file *seq, int n)
2278 while (n-- > 0) seq_puts(seq, " ");
2281 static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
2283 switch (s) {
2284 case RT_SCOPE_UNIVERSE: return "universe";
2285 case RT_SCOPE_SITE: return "site";
2286 case RT_SCOPE_LINK: return "link";
2287 case RT_SCOPE_HOST: return "host";
2288 case RT_SCOPE_NOWHERE: return "nowhere";
2289 default:
2290 snprintf(buf, len, "scope=%d", s);
2291 return buf;
2295 static const char *rtn_type_names[__RTN_MAX] = {
2296 [RTN_UNSPEC] = "UNSPEC",
2297 [RTN_UNICAST] = "UNICAST",
2298 [RTN_LOCAL] = "LOCAL",
2299 [RTN_BROADCAST] = "BROADCAST",
2300 [RTN_ANYCAST] = "ANYCAST",
2301 [RTN_MULTICAST] = "MULTICAST",
2302 [RTN_BLACKHOLE] = "BLACKHOLE",
2303 [RTN_UNREACHABLE] = "UNREACHABLE",
2304 [RTN_PROHIBIT] = "PROHIBIT",
2305 [RTN_THROW] = "THROW",
2306 [RTN_NAT] = "NAT",
2307 [RTN_XRESOLVE] = "XRESOLVE",
2310 static inline const char *rtn_type(char *buf, size_t len, unsigned t)
2312 if (t < __RTN_MAX && rtn_type_names[t])
2313 return rtn_type_names[t];
2314 snprintf(buf, len, "type %u", t);
2315 return buf;
2318 /* Pretty print the trie */
2319 static int fib_trie_seq_show(struct seq_file *seq, void *v)
2321 const struct fib_trie_iter *iter = seq->private;
2322 struct node *n = v;
2324 if (v == SEQ_START_TOKEN)
2325 return 0;
2327 if (!node_parent_rcu(n)) {
2328 if (iter->trie == iter->trie_local)
2329 seq_puts(seq, "<local>:\n");
2330 else
2331 seq_puts(seq, "<main>:\n");
2334 if (IS_TNODE(n)) {
2335 struct tnode *tn = (struct tnode *) n;
2336 __be32 prf = htonl(mask_pfx(tn->key, tn->pos));
2338 seq_indent(seq, iter->depth-1);
2339 seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n",
2340 NIPQUAD(prf), tn->pos, tn->bits, tn->full_children,
2341 tn->empty_children);
2343 } else {
2344 struct leaf *l = (struct leaf *) n;
2345 int i;
2346 __be32 val = htonl(l->key);
2348 seq_indent(seq, iter->depth);
2349 seq_printf(seq, " |-- %d.%d.%d.%d\n", NIPQUAD(val));
2350 for (i = 32; i >= 0; i--) {
2351 struct leaf_info *li = find_leaf_info(l, i);
2353 if (li) {
2354 struct fib_alias *fa;
2356 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2357 char buf1[32], buf2[32];
2359 seq_indent(seq, iter->depth+1);
2360 seq_printf(seq, " /%d %s %s", i,
2361 rtn_scope(buf1, sizeof(buf1),
2362 fa->fa_scope),
2363 rtn_type(buf2, sizeof(buf2),
2364 fa->fa_type));
2365 if (fa->fa_tos)
2366 seq_printf(seq, "tos =%d\n",
2367 fa->fa_tos);
2368 seq_putc(seq, '\n');
2374 return 0;
2377 static const struct seq_operations fib_trie_seq_ops = {
2378 .start = fib_trie_seq_start,
2379 .next = fib_trie_seq_next,
2380 .stop = fib_trie_seq_stop,
2381 .show = fib_trie_seq_show,
2384 static int fib_trie_seq_open(struct inode *inode, struct file *file)
2386 return seq_open_net(inode, file, &fib_trie_seq_ops,
2387 sizeof(struct fib_trie_iter));
2390 static const struct file_operations fib_trie_fops = {
2391 .owner = THIS_MODULE,
2392 .open = fib_trie_seq_open,
2393 .read = seq_read,
2394 .llseek = seq_lseek,
2395 .release = seq_release_net,
2398 static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2400 static unsigned type2flags[RTN_MAX + 1] = {
2401 [7] = RTF_REJECT, [8] = RTF_REJECT,
2403 unsigned flags = type2flags[type];
2405 if (fi && fi->fib_nh->nh_gw)
2406 flags |= RTF_GATEWAY;
2407 if (mask == htonl(0xFFFFFFFF))
2408 flags |= RTF_HOST;
2409 flags |= RTF_UP;
2410 return flags;
2414 * This outputs /proc/net/route.
2415 * The format of the file is not supposed to be changed
2416 * and needs to be same as fib_hash output to avoid breaking
2417 * legacy utilities
2419 static int fib_route_seq_show(struct seq_file *seq, void *v)
2421 const struct fib_trie_iter *iter = seq->private;
2422 struct leaf *l = v;
2423 int i;
2424 char bf[128];
2426 if (v == SEQ_START_TOKEN) {
2427 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2428 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2429 "\tWindow\tIRTT");
2430 return 0;
2433 if (iter->trie == iter->trie_local)
2434 return 0;
2435 if (IS_TNODE(l))
2436 return 0;
2438 for (i=32; i>=0; i--) {
2439 struct leaf_info *li = find_leaf_info(l, i);
2440 struct fib_alias *fa;
2441 __be32 mask, prefix;
2443 if (!li)
2444 continue;
2446 mask = inet_make_mask(li->plen);
2447 prefix = htonl(l->key);
2449 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2450 const struct fib_info *fi = fa->fa_info;
2451 unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
2453 if (fa->fa_type == RTN_BROADCAST
2454 || fa->fa_type == RTN_MULTICAST)
2455 continue;
2457 if (fi)
2458 snprintf(bf, sizeof(bf),
2459 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2460 fi->fib_dev ? fi->fib_dev->name : "*",
2461 prefix,
2462 fi->fib_nh->nh_gw, flags, 0, 0,
2463 fi->fib_priority,
2464 mask,
2465 (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
2466 fi->fib_window,
2467 fi->fib_rtt >> 3);
2468 else
2469 snprintf(bf, sizeof(bf),
2470 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2471 prefix, 0, flags, 0, 0, 0,
2472 mask, 0, 0, 0);
2474 seq_printf(seq, "%-127s\n", bf);
2478 return 0;
2481 static const struct seq_operations fib_route_seq_ops = {
2482 .start = fib_trie_seq_start,
2483 .next = fib_trie_seq_next,
2484 .stop = fib_trie_seq_stop,
2485 .show = fib_route_seq_show,
2488 static int fib_route_seq_open(struct inode *inode, struct file *file)
2490 return seq_open_net(inode, file, &fib_route_seq_ops,
2491 sizeof(struct fib_trie_iter));
2494 static const struct file_operations fib_route_fops = {
2495 .owner = THIS_MODULE,
2496 .open = fib_route_seq_open,
2497 .read = seq_read,
2498 .llseek = seq_lseek,
2499 .release = seq_release_net,
2502 int __net_init fib_proc_init(struct net *net)
2504 if (!proc_net_fops_create(net, "fib_trie", S_IRUGO, &fib_trie_fops))
2505 goto out1;
2507 if (!proc_net_fops_create(net, "fib_triestat", S_IRUGO,
2508 &fib_triestat_fops))
2509 goto out2;
2511 if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_route_fops))
2512 goto out3;
2514 return 0;
2516 out3:
2517 proc_net_remove(net, "fib_triestat");
2518 out2:
2519 proc_net_remove(net, "fib_trie");
2520 out1:
2521 return -ENOMEM;
2524 void __net_exit fib_proc_exit(struct net *net)
2526 proc_net_remove(net, "fib_trie");
2527 proc_net_remove(net, "fib_triestat");
2528 proc_net_remove(net, "route");
2531 #endif /* CONFIG_PROC_FS */