Added !vmlinux.lds.h to .gitignore because it would otherwise be ignored.
[jz_linux_2.6.24.3.git] / net / ipv4 / fib_trie.c
blobfd46509699862787aa3dab5905b0d8cf559b9190
1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
15 * This work is based on the LPC-trie which is originally descibed in:
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
25 * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $
28 * Code from fib_hash has been reused which includes the following header:
31 * INET An implementation of the TCP/IP protocol suite for the LINUX
32 * operating system. INET is implemented using the BSD Socket
33 * interface as the means of communication with the user level.
35 * IPv4 FIB: lookup engine and maintenance routines.
38 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
40 * This program is free software; you can redistribute it and/or
41 * modify it under the terms of the GNU General Public License
42 * as published by the Free Software Foundation; either version
43 * 2 of the License, or (at your option) any later version.
45 * Substantial contributions to this work comes from:
47 * David S. Miller, <davem@davemloft.net>
48 * Stephen Hemminger <shemminger@osdl.org>
49 * Paul E. McKenney <paulmck@us.ibm.com>
50 * Patrick McHardy <kaber@trash.net>
53 #define VERSION "0.408"
55 #include <asm/uaccess.h>
56 #include <asm/system.h>
57 #include <linux/bitops.h>
58 #include <linux/types.h>
59 #include <linux/kernel.h>
60 #include <linux/mm.h>
61 #include <linux/string.h>
62 #include <linux/socket.h>
63 #include <linux/sockios.h>
64 #include <linux/errno.h>
65 #include <linux/in.h>
66 #include <linux/inet.h>
67 #include <linux/inetdevice.h>
68 #include <linux/netdevice.h>
69 #include <linux/if_arp.h>
70 #include <linux/proc_fs.h>
71 #include <linux/rcupdate.h>
72 #include <linux/skbuff.h>
73 #include <linux/netlink.h>
74 #include <linux/init.h>
75 #include <linux/list.h>
76 #include <net/net_namespace.h>
77 #include <net/ip.h>
78 #include <net/protocol.h>
79 #include <net/route.h>
80 #include <net/tcp.h>
81 #include <net/sock.h>
82 #include <net/ip_fib.h>
83 #include "fib_lookup.h"
85 #undef CONFIG_IP_FIB_TRIE_STATS
86 #define MAX_STAT_DEPTH 32
88 #define KEYLENGTH (8*sizeof(t_key))
90 typedef unsigned int t_key;
92 #define T_TNODE 0
93 #define T_LEAF 1
94 #define NODE_TYPE_MASK 0x1UL
95 #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
97 #define IS_TNODE(n) (!(n->parent & T_LEAF))
98 #define IS_LEAF(n) (n->parent & T_LEAF)
100 struct node {
101 t_key key;
102 unsigned long parent;
105 struct leaf {
106 t_key key;
107 unsigned long parent;
108 struct hlist_head list;
109 struct rcu_head rcu;
112 struct leaf_info {
113 struct hlist_node hlist;
114 struct rcu_head rcu;
115 int plen;
116 struct list_head falh;
119 struct tnode {
120 t_key key;
121 unsigned long parent;
122 unsigned short pos:5; /* 2log(KEYLENGTH) bits needed */
123 unsigned short bits:5; /* 2log(KEYLENGTH) bits needed */
124 unsigned short full_children; /* KEYLENGTH bits needed */
125 unsigned short empty_children; /* KEYLENGTH bits needed */
126 struct rcu_head rcu;
127 struct node *child[0];
130 #ifdef CONFIG_IP_FIB_TRIE_STATS
131 struct trie_use_stats {
132 unsigned int gets;
133 unsigned int backtrack;
134 unsigned int semantic_match_passed;
135 unsigned int semantic_match_miss;
136 unsigned int null_node_hit;
137 unsigned int resize_node_skipped;
139 #endif
141 struct trie_stat {
142 unsigned int totdepth;
143 unsigned int maxdepth;
144 unsigned int tnodes;
145 unsigned int leaves;
146 unsigned int nullpointers;
147 unsigned int nodesizes[MAX_STAT_DEPTH];
150 struct trie {
151 struct node *trie;
152 #ifdef CONFIG_IP_FIB_TRIE_STATS
153 struct trie_use_stats stats;
154 #endif
155 int size;
156 unsigned int revision;
159 static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
160 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull);
161 static struct node *resize(struct trie *t, struct tnode *tn);
162 static struct tnode *inflate(struct trie *t, struct tnode *tn);
163 static struct tnode *halve(struct trie *t, struct tnode *tn);
164 static void tnode_free(struct tnode *tn);
166 static struct kmem_cache *fn_alias_kmem __read_mostly;
167 static struct trie *trie_local = NULL, *trie_main = NULL;
169 static inline struct tnode *node_parent(struct node *node)
171 struct tnode *ret;
173 ret = (struct tnode *)(node->parent & ~NODE_TYPE_MASK);
174 return rcu_dereference(ret);
177 static inline void node_set_parent(struct node *node, struct tnode *ptr)
179 rcu_assign_pointer(node->parent,
180 (unsigned long)ptr | NODE_TYPE(node));
183 /* rcu_read_lock needs to be hold by caller from readside */
185 static inline struct node *tnode_get_child(struct tnode *tn, int i)
187 BUG_ON(i >= 1 << tn->bits);
189 return rcu_dereference(tn->child[i]);
192 static inline int tnode_child_length(const struct tnode *tn)
194 return 1 << tn->bits;
197 static inline t_key mask_pfx(t_key k, unsigned short l)
199 return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
202 static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
204 if (offset < KEYLENGTH)
205 return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
206 else
207 return 0;
210 static inline int tkey_equals(t_key a, t_key b)
212 return a == b;
215 static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
217 if (bits == 0 || offset >= KEYLENGTH)
218 return 1;
219 bits = bits > KEYLENGTH ? KEYLENGTH : bits;
220 return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
223 static inline int tkey_mismatch(t_key a, int offset, t_key b)
225 t_key diff = a ^ b;
226 int i = offset;
228 if (!diff)
229 return 0;
230 while ((diff << i) >> (KEYLENGTH-1) == 0)
231 i++;
232 return i;
236 To understand this stuff, an understanding of keys and all their bits is
237 necessary. Every node in the trie has a key associated with it, but not
238 all of the bits in that key are significant.
240 Consider a node 'n' and its parent 'tp'.
242 If n is a leaf, every bit in its key is significant. Its presence is
243 necessitated by path compression, since during a tree traversal (when
244 searching for a leaf - unless we are doing an insertion) we will completely
245 ignore all skipped bits we encounter. Thus we need to verify, at the end of
246 a potentially successful search, that we have indeed been walking the
247 correct key path.
249 Note that we can never "miss" the correct key in the tree if present by
250 following the wrong path. Path compression ensures that segments of the key
251 that are the same for all keys with a given prefix are skipped, but the
252 skipped part *is* identical for each node in the subtrie below the skipped
253 bit! trie_insert() in this implementation takes care of that - note the
254 call to tkey_sub_equals() in trie_insert().
256 if n is an internal node - a 'tnode' here, the various parts of its key
257 have many different meanings.
259 Example:
260 _________________________________________________________________
261 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
262 -----------------------------------------------------------------
263 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
265 _________________________________________________________________
266 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
267 -----------------------------------------------------------------
268 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
270 tp->pos = 7
271 tp->bits = 3
272 n->pos = 15
273 n->bits = 4
275 First, let's just ignore the bits that come before the parent tp, that is
276 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
277 not use them for anything.
279 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
280 index into the parent's child array. That is, they will be used to find
281 'n' among tp's children.
283 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
284 for the node n.
286 All the bits we have seen so far are significant to the node n. The rest
287 of the bits are really not needed or indeed known in n->key.
289 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
290 n's child array, and will of course be different for each child.
293 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
294 at this point.
298 static inline void check_tnode(const struct tnode *tn)
300 WARN_ON(tn && tn->pos+tn->bits > 32);
303 static int halve_threshold = 25;
304 static int inflate_threshold = 50;
305 static int halve_threshold_root = 8;
306 static int inflate_threshold_root = 15;
309 static void __alias_free_mem(struct rcu_head *head)
311 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
312 kmem_cache_free(fn_alias_kmem, fa);
315 static inline void alias_free_mem_rcu(struct fib_alias *fa)
317 call_rcu(&fa->rcu, __alias_free_mem);
320 static void __leaf_free_rcu(struct rcu_head *head)
322 kfree(container_of(head, struct leaf, rcu));
325 static void __leaf_info_free_rcu(struct rcu_head *head)
327 kfree(container_of(head, struct leaf_info, rcu));
330 static inline void free_leaf_info(struct leaf_info *leaf)
332 call_rcu(&leaf->rcu, __leaf_info_free_rcu);
335 static struct tnode *tnode_alloc(unsigned int size)
337 struct page *pages;
339 if (size <= PAGE_SIZE)
340 return kcalloc(size, 1, GFP_KERNEL);
342 pages = alloc_pages(GFP_KERNEL|__GFP_ZERO, get_order(size));
343 if (!pages)
344 return NULL;
346 return page_address(pages);
349 static void __tnode_free_rcu(struct rcu_head *head)
351 struct tnode *tn = container_of(head, struct tnode, rcu);
352 unsigned int size = sizeof(struct tnode) +
353 (1 << tn->bits) * sizeof(struct node *);
355 if (size <= PAGE_SIZE)
356 kfree(tn);
357 else
358 free_pages((unsigned long)tn, get_order(size));
361 static inline void tnode_free(struct tnode *tn)
363 if (IS_LEAF(tn)) {
364 struct leaf *l = (struct leaf *) tn;
365 call_rcu_bh(&l->rcu, __leaf_free_rcu);
366 } else
367 call_rcu(&tn->rcu, __tnode_free_rcu);
370 static struct leaf *leaf_new(void)
372 struct leaf *l = kmalloc(sizeof(struct leaf), GFP_KERNEL);
373 if (l) {
374 l->parent = T_LEAF;
375 INIT_HLIST_HEAD(&l->list);
377 return l;
380 static struct leaf_info *leaf_info_new(int plen)
382 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
383 if (li) {
384 li->plen = plen;
385 INIT_LIST_HEAD(&li->falh);
387 return li;
390 static struct tnode* tnode_new(t_key key, int pos, int bits)
392 int nchildren = 1<<bits;
393 int sz = sizeof(struct tnode) + nchildren * sizeof(struct node *);
394 struct tnode *tn = tnode_alloc(sz);
396 if (tn) {
397 memset(tn, 0, sz);
398 tn->parent = T_TNODE;
399 tn->pos = pos;
400 tn->bits = bits;
401 tn->key = key;
402 tn->full_children = 0;
403 tn->empty_children = 1<<bits;
406 pr_debug("AT %p s=%u %u\n", tn, (unsigned int) sizeof(struct tnode),
407 (unsigned int) (sizeof(struct node) * 1<<bits));
408 return tn;
412 * Check whether a tnode 'n' is "full", i.e. it is an internal node
413 * and no bits are skipped. See discussion in dyntree paper p. 6
416 static inline int tnode_full(const struct tnode *tn, const struct node *n)
418 if (n == NULL || IS_LEAF(n))
419 return 0;
421 return ((struct tnode *) n)->pos == tn->pos + tn->bits;
424 static inline void put_child(struct trie *t, struct tnode *tn, int i, struct node *n)
426 tnode_put_child_reorg(tn, i, n, -1);
430 * Add a child at position i overwriting the old value.
431 * Update the value of full_children and empty_children.
434 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull)
436 struct node *chi = tn->child[i];
437 int isfull;
439 BUG_ON(i >= 1<<tn->bits);
442 /* update emptyChildren */
443 if (n == NULL && chi != NULL)
444 tn->empty_children++;
445 else if (n != NULL && chi == NULL)
446 tn->empty_children--;
448 /* update fullChildren */
449 if (wasfull == -1)
450 wasfull = tnode_full(tn, chi);
452 isfull = tnode_full(tn, n);
453 if (wasfull && !isfull)
454 tn->full_children--;
455 else if (!wasfull && isfull)
456 tn->full_children++;
458 if (n)
459 node_set_parent(n, tn);
461 rcu_assign_pointer(tn->child[i], n);
464 static struct node *resize(struct trie *t, struct tnode *tn)
466 int i;
467 int err = 0;
468 struct tnode *old_tn;
469 int inflate_threshold_use;
470 int halve_threshold_use;
471 int max_resize;
473 if (!tn)
474 return NULL;
476 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
477 tn, inflate_threshold, halve_threshold);
479 /* No children */
480 if (tn->empty_children == tnode_child_length(tn)) {
481 tnode_free(tn);
482 return NULL;
484 /* One child */
485 if (tn->empty_children == tnode_child_length(tn) - 1)
486 for (i = 0; i < tnode_child_length(tn); i++) {
487 struct node *n;
489 n = tn->child[i];
490 if (!n)
491 continue;
493 /* compress one level */
494 node_set_parent(n, NULL);
495 tnode_free(tn);
496 return n;
499 * Double as long as the resulting node has a number of
500 * nonempty nodes that are above the threshold.
504 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
505 * the Helsinki University of Technology and Matti Tikkanen of Nokia
506 * Telecommunications, page 6:
507 * "A node is doubled if the ratio of non-empty children to all
508 * children in the *doubled* node is at least 'high'."
510 * 'high' in this instance is the variable 'inflate_threshold'. It
511 * is expressed as a percentage, so we multiply it with
512 * tnode_child_length() and instead of multiplying by 2 (since the
513 * child array will be doubled by inflate()) and multiplying
514 * the left-hand side by 100 (to handle the percentage thing) we
515 * multiply the left-hand side by 50.
517 * The left-hand side may look a bit weird: tnode_child_length(tn)
518 * - tn->empty_children is of course the number of non-null children
519 * in the current node. tn->full_children is the number of "full"
520 * children, that is non-null tnodes with a skip value of 0.
521 * All of those will be doubled in the resulting inflated tnode, so
522 * we just count them one extra time here.
524 * A clearer way to write this would be:
526 * to_be_doubled = tn->full_children;
527 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
528 * tn->full_children;
530 * new_child_length = tnode_child_length(tn) * 2;
532 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
533 * new_child_length;
534 * if (new_fill_factor >= inflate_threshold)
536 * ...and so on, tho it would mess up the while () loop.
538 * anyway,
539 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
540 * inflate_threshold
542 * avoid a division:
543 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
544 * inflate_threshold * new_child_length
546 * expand not_to_be_doubled and to_be_doubled, and shorten:
547 * 100 * (tnode_child_length(tn) - tn->empty_children +
548 * tn->full_children) >= inflate_threshold * new_child_length
550 * expand new_child_length:
551 * 100 * (tnode_child_length(tn) - tn->empty_children +
552 * tn->full_children) >=
553 * inflate_threshold * tnode_child_length(tn) * 2
555 * shorten again:
556 * 50 * (tn->full_children + tnode_child_length(tn) -
557 * tn->empty_children) >= inflate_threshold *
558 * tnode_child_length(tn)
562 check_tnode(tn);
564 /* Keep root node larger */
566 if (!tn->parent)
567 inflate_threshold_use = inflate_threshold_root;
568 else
569 inflate_threshold_use = inflate_threshold;
571 err = 0;
572 max_resize = 10;
573 while ((tn->full_children > 0 && max_resize-- &&
574 50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >=
575 inflate_threshold_use * tnode_child_length(tn))) {
577 old_tn = tn;
578 tn = inflate(t, tn);
579 if (IS_ERR(tn)) {
580 tn = old_tn;
581 #ifdef CONFIG_IP_FIB_TRIE_STATS
582 t->stats.resize_node_skipped++;
583 #endif
584 break;
588 if (max_resize < 0) {
589 if (!tn->parent)
590 printk(KERN_WARNING "Fix inflate_threshold_root. Now=%d size=%d bits\n",
591 inflate_threshold_root, tn->bits);
592 else
593 printk(KERN_WARNING "Fix inflate_threshold. Now=%d size=%d bits\n",
594 inflate_threshold, tn->bits);
597 check_tnode(tn);
600 * Halve as long as the number of empty children in this
601 * node is above threshold.
605 /* Keep root node larger */
607 if (!tn->parent)
608 halve_threshold_use = halve_threshold_root;
609 else
610 halve_threshold_use = halve_threshold;
612 err = 0;
613 max_resize = 10;
614 while (tn->bits > 1 && max_resize-- &&
615 100 * (tnode_child_length(tn) - tn->empty_children) <
616 halve_threshold_use * tnode_child_length(tn)) {
618 old_tn = tn;
619 tn = halve(t, tn);
620 if (IS_ERR(tn)) {
621 tn = old_tn;
622 #ifdef CONFIG_IP_FIB_TRIE_STATS
623 t->stats.resize_node_skipped++;
624 #endif
625 break;
629 if (max_resize < 0) {
630 if (!tn->parent)
631 printk(KERN_WARNING "Fix halve_threshold_root. Now=%d size=%d bits\n",
632 halve_threshold_root, tn->bits);
633 else
634 printk(KERN_WARNING "Fix halve_threshold. Now=%d size=%d bits\n",
635 halve_threshold, tn->bits);
638 /* Only one child remains */
639 if (tn->empty_children == tnode_child_length(tn) - 1)
640 for (i = 0; i < tnode_child_length(tn); i++) {
641 struct node *n;
643 n = tn->child[i];
644 if (!n)
645 continue;
647 /* compress one level */
649 node_set_parent(n, NULL);
650 tnode_free(tn);
651 return n;
654 return (struct node *) tn;
657 static struct tnode *inflate(struct trie *t, struct tnode *tn)
659 struct tnode *inode;
660 struct tnode *oldtnode = tn;
661 int olen = tnode_child_length(tn);
662 int i;
664 pr_debug("In inflate\n");
666 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
668 if (!tn)
669 return ERR_PTR(-ENOMEM);
672 * Preallocate and store tnodes before the actual work so we
673 * don't get into an inconsistent state if memory allocation
674 * fails. In case of failure we return the oldnode and inflate
675 * of tnode is ignored.
678 for (i = 0; i < olen; i++) {
679 struct tnode *inode = (struct tnode *) tnode_get_child(oldtnode, i);
681 if (inode &&
682 IS_TNODE(inode) &&
683 inode->pos == oldtnode->pos + oldtnode->bits &&
684 inode->bits > 1) {
685 struct tnode *left, *right;
686 t_key m = ~0U << (KEYLENGTH - 1) >> inode->pos;
688 left = tnode_new(inode->key&(~m), inode->pos + 1,
689 inode->bits - 1);
690 if (!left)
691 goto nomem;
693 right = tnode_new(inode->key|m, inode->pos + 1,
694 inode->bits - 1);
696 if (!right) {
697 tnode_free(left);
698 goto nomem;
701 put_child(t, tn, 2*i, (struct node *) left);
702 put_child(t, tn, 2*i+1, (struct node *) right);
706 for (i = 0; i < olen; i++) {
707 struct node *node = tnode_get_child(oldtnode, i);
708 struct tnode *left, *right;
709 int size, j;
711 /* An empty child */
712 if (node == NULL)
713 continue;
715 /* A leaf or an internal node with skipped bits */
717 if (IS_LEAF(node) || ((struct tnode *) node)->pos >
718 tn->pos + tn->bits - 1) {
719 if (tkey_extract_bits(node->key, oldtnode->pos + oldtnode->bits,
720 1) == 0)
721 put_child(t, tn, 2*i, node);
722 else
723 put_child(t, tn, 2*i+1, node);
724 continue;
727 /* An internal node with two children */
728 inode = (struct tnode *) node;
730 if (inode->bits == 1) {
731 put_child(t, tn, 2*i, inode->child[0]);
732 put_child(t, tn, 2*i+1, inode->child[1]);
734 tnode_free(inode);
735 continue;
738 /* An internal node with more than two children */
740 /* We will replace this node 'inode' with two new
741 * ones, 'left' and 'right', each with half of the
742 * original children. The two new nodes will have
743 * a position one bit further down the key and this
744 * means that the "significant" part of their keys
745 * (see the discussion near the top of this file)
746 * will differ by one bit, which will be "0" in
747 * left's key and "1" in right's key. Since we are
748 * moving the key position by one step, the bit that
749 * we are moving away from - the bit at position
750 * (inode->pos) - is the one that will differ between
751 * left and right. So... we synthesize that bit in the
752 * two new keys.
753 * The mask 'm' below will be a single "one" bit at
754 * the position (inode->pos)
757 /* Use the old key, but set the new significant
758 * bit to zero.
761 left = (struct tnode *) tnode_get_child(tn, 2*i);
762 put_child(t, tn, 2*i, NULL);
764 BUG_ON(!left);
766 right = (struct tnode *) tnode_get_child(tn, 2*i+1);
767 put_child(t, tn, 2*i+1, NULL);
769 BUG_ON(!right);
771 size = tnode_child_length(left);
772 for (j = 0; j < size; j++) {
773 put_child(t, left, j, inode->child[j]);
774 put_child(t, right, j, inode->child[j + size]);
776 put_child(t, tn, 2*i, resize(t, left));
777 put_child(t, tn, 2*i+1, resize(t, right));
779 tnode_free(inode);
781 tnode_free(oldtnode);
782 return tn;
783 nomem:
785 int size = tnode_child_length(tn);
786 int j;
788 for (j = 0; j < size; j++)
789 if (tn->child[j])
790 tnode_free((struct tnode *)tn->child[j]);
792 tnode_free(tn);
794 return ERR_PTR(-ENOMEM);
798 static struct tnode *halve(struct trie *t, struct tnode *tn)
800 struct tnode *oldtnode = tn;
801 struct node *left, *right;
802 int i;
803 int olen = tnode_child_length(tn);
805 pr_debug("In halve\n");
807 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
809 if (!tn)
810 return ERR_PTR(-ENOMEM);
813 * Preallocate and store tnodes before the actual work so we
814 * don't get into an inconsistent state if memory allocation
815 * fails. In case of failure we return the oldnode and halve
816 * of tnode is ignored.
819 for (i = 0; i < olen; i += 2) {
820 left = tnode_get_child(oldtnode, i);
821 right = tnode_get_child(oldtnode, i+1);
823 /* Two nonempty children */
824 if (left && right) {
825 struct tnode *newn;
827 newn = tnode_new(left->key, tn->pos + tn->bits, 1);
829 if (!newn)
830 goto nomem;
832 put_child(t, tn, i/2, (struct node *)newn);
837 for (i = 0; i < olen; i += 2) {
838 struct tnode *newBinNode;
840 left = tnode_get_child(oldtnode, i);
841 right = tnode_get_child(oldtnode, i+1);
843 /* At least one of the children is empty */
844 if (left == NULL) {
845 if (right == NULL) /* Both are empty */
846 continue;
847 put_child(t, tn, i/2, right);
848 continue;
851 if (right == NULL) {
852 put_child(t, tn, i/2, left);
853 continue;
856 /* Two nonempty children */
857 newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
858 put_child(t, tn, i/2, NULL);
859 put_child(t, newBinNode, 0, left);
860 put_child(t, newBinNode, 1, right);
861 put_child(t, tn, i/2, resize(t, newBinNode));
863 tnode_free(oldtnode);
864 return tn;
865 nomem:
867 int size = tnode_child_length(tn);
868 int j;
870 for (j = 0; j < size; j++)
871 if (tn->child[j])
872 tnode_free((struct tnode *)tn->child[j]);
874 tnode_free(tn);
876 return ERR_PTR(-ENOMEM);
880 static void trie_init(struct trie *t)
882 if (!t)
883 return;
885 t->size = 0;
886 rcu_assign_pointer(t->trie, NULL);
887 t->revision = 0;
888 #ifdef CONFIG_IP_FIB_TRIE_STATS
889 memset(&t->stats, 0, sizeof(struct trie_use_stats));
890 #endif
893 /* readside must use rcu_read_lock currently dump routines
894 via get_fa_head and dump */
896 static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
898 struct hlist_head *head = &l->list;
899 struct hlist_node *node;
900 struct leaf_info *li;
902 hlist_for_each_entry_rcu(li, node, head, hlist)
903 if (li->plen == plen)
904 return li;
906 return NULL;
909 static inline struct list_head * get_fa_head(struct leaf *l, int plen)
911 struct leaf_info *li = find_leaf_info(l, plen);
913 if (!li)
914 return NULL;
916 return &li->falh;
919 static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
921 struct leaf_info *li = NULL, *last = NULL;
922 struct hlist_node *node;
924 if (hlist_empty(head)) {
925 hlist_add_head_rcu(&new->hlist, head);
926 } else {
927 hlist_for_each_entry(li, node, head, hlist) {
928 if (new->plen > li->plen)
929 break;
931 last = li;
933 if (last)
934 hlist_add_after_rcu(&last->hlist, &new->hlist);
935 else
936 hlist_add_before_rcu(&new->hlist, &li->hlist);
940 /* rcu_read_lock needs to be hold by caller from readside */
942 static struct leaf *
943 fib_find_node(struct trie *t, u32 key)
945 int pos;
946 struct tnode *tn;
947 struct node *n;
949 pos = 0;
950 n = rcu_dereference(t->trie);
952 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
953 tn = (struct tnode *) n;
955 check_tnode(tn);
957 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
958 pos = tn->pos + tn->bits;
959 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
960 } else
961 break;
963 /* Case we have found a leaf. Compare prefixes */
965 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
966 return (struct leaf *)n;
968 return NULL;
971 static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
973 int wasfull;
974 t_key cindex, key = tn->key;
975 struct tnode *tp;
977 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
978 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
979 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
980 tn = (struct tnode *) resize (t, (struct tnode *)tn);
981 tnode_put_child_reorg((struct tnode *)tp, cindex,(struct node*)tn, wasfull);
983 tp = node_parent((struct node *) tn);
984 if (!tp)
985 break;
986 tn = tp;
989 /* Handle last (top) tnode */
990 if (IS_TNODE(tn))
991 tn = (struct tnode*) resize(t, (struct tnode *)tn);
993 return (struct node*) tn;
996 /* only used from updater-side */
998 static struct list_head *
999 fib_insert_node(struct trie *t, int *err, u32 key, int plen)
1001 int pos, newpos;
1002 struct tnode *tp = NULL, *tn = NULL;
1003 struct node *n;
1004 struct leaf *l;
1005 int missbit;
1006 struct list_head *fa_head = NULL;
1007 struct leaf_info *li;
1008 t_key cindex;
1010 pos = 0;
1011 n = t->trie;
1013 /* If we point to NULL, stop. Either the tree is empty and we should
1014 * just put a new leaf in if, or we have reached an empty child slot,
1015 * and we should just put our new leaf in that.
1016 * If we point to a T_TNODE, check if it matches our key. Note that
1017 * a T_TNODE might be skipping any number of bits - its 'pos' need
1018 * not be the parent's 'pos'+'bits'!
1020 * If it does match the current key, get pos/bits from it, extract
1021 * the index from our key, push the T_TNODE and walk the tree.
1023 * If it doesn't, we have to replace it with a new T_TNODE.
1025 * If we point to a T_LEAF, it might or might not have the same key
1026 * as we do. If it does, just change the value, update the T_LEAF's
1027 * value, and return it.
1028 * If it doesn't, we need to replace it with a T_TNODE.
1031 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
1032 tn = (struct tnode *) n;
1034 check_tnode(tn);
1036 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
1037 tp = tn;
1038 pos = tn->pos + tn->bits;
1039 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
1041 BUG_ON(n && node_parent(n) != tn);
1042 } else
1043 break;
1047 * n ----> NULL, LEAF or TNODE
1049 * tp is n's (parent) ----> NULL or TNODE
1052 BUG_ON(tp && IS_LEAF(tp));
1054 /* Case 1: n is a leaf. Compare prefixes */
1056 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
1057 struct leaf *l = (struct leaf *) n;
1059 li = leaf_info_new(plen);
1061 if (!li) {
1062 *err = -ENOMEM;
1063 goto err;
1066 fa_head = &li->falh;
1067 insert_leaf_info(&l->list, li);
1068 goto done;
1070 t->size++;
1071 l = leaf_new();
1073 if (!l) {
1074 *err = -ENOMEM;
1075 goto err;
1078 l->key = key;
1079 li = leaf_info_new(plen);
1081 if (!li) {
1082 tnode_free((struct tnode *) l);
1083 *err = -ENOMEM;
1084 goto err;
1087 fa_head = &li->falh;
1088 insert_leaf_info(&l->list, li);
1090 if (t->trie && n == NULL) {
1091 /* Case 2: n is NULL, and will just insert a new leaf */
1093 node_set_parent((struct node *)l, tp);
1095 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1096 put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
1097 } else {
1098 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1100 * Add a new tnode here
1101 * first tnode need some special handling
1104 if (tp)
1105 pos = tp->pos+tp->bits;
1106 else
1107 pos = 0;
1109 if (n) {
1110 newpos = tkey_mismatch(key, pos, n->key);
1111 tn = tnode_new(n->key, newpos, 1);
1112 } else {
1113 newpos = 0;
1114 tn = tnode_new(key, newpos, 1); /* First tnode */
1117 if (!tn) {
1118 free_leaf_info(li);
1119 tnode_free((struct tnode *) l);
1120 *err = -ENOMEM;
1121 goto err;
1124 node_set_parent((struct node *)tn, tp);
1126 missbit = tkey_extract_bits(key, newpos, 1);
1127 put_child(t, tn, missbit, (struct node *)l);
1128 put_child(t, tn, 1-missbit, n);
1130 if (tp) {
1131 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1132 put_child(t, (struct tnode *)tp, cindex, (struct node *)tn);
1133 } else {
1134 rcu_assign_pointer(t->trie, (struct node *)tn); /* First tnode */
1135 tp = tn;
1139 if (tp && tp->pos + tp->bits > 32)
1140 printk(KERN_WARNING "fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1141 tp, tp->pos, tp->bits, key, plen);
1143 /* Rebalance the trie */
1145 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1146 done:
1147 t->revision++;
1148 err:
1149 return fa_head;
1153 * Caller must hold RTNL.
1155 static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1157 struct trie *t = (struct trie *) tb->tb_data;
1158 struct fib_alias *fa, *new_fa;
1159 struct list_head *fa_head = NULL;
1160 struct fib_info *fi;
1161 int plen = cfg->fc_dst_len;
1162 u8 tos = cfg->fc_tos;
1163 u32 key, mask;
1164 int err;
1165 struct leaf *l;
1167 if (plen > 32)
1168 return -EINVAL;
1170 key = ntohl(cfg->fc_dst);
1172 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
1174 mask = ntohl(inet_make_mask(plen));
1176 if (key & ~mask)
1177 return -EINVAL;
1179 key = key & mask;
1181 fi = fib_create_info(cfg);
1182 if (IS_ERR(fi)) {
1183 err = PTR_ERR(fi);
1184 goto err;
1187 l = fib_find_node(t, key);
1188 fa = NULL;
1190 if (l) {
1191 fa_head = get_fa_head(l, plen);
1192 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1195 /* Now fa, if non-NULL, points to the first fib alias
1196 * with the same keys [prefix,tos,priority], if such key already
1197 * exists or to the node before which we will insert new one.
1199 * If fa is NULL, we will need to allocate a new one and
1200 * insert to the head of f.
1202 * If f is NULL, no fib node matched the destination key
1203 * and we need to allocate a new one of those as well.
1206 if (fa && fa->fa_tos == tos &&
1207 fa->fa_info->fib_priority == fi->fib_priority) {
1208 struct fib_alias *fa_first, *fa_match;
1210 err = -EEXIST;
1211 if (cfg->fc_nlflags & NLM_F_EXCL)
1212 goto out;
1214 /* We have 2 goals:
1215 * 1. Find exact match for type, scope, fib_info to avoid
1216 * duplicate routes
1217 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1219 fa_match = NULL;
1220 fa_first = fa;
1221 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1222 list_for_each_entry_continue(fa, fa_head, fa_list) {
1223 if (fa->fa_tos != tos)
1224 break;
1225 if (fa->fa_info->fib_priority != fi->fib_priority)
1226 break;
1227 if (fa->fa_type == cfg->fc_type &&
1228 fa->fa_scope == cfg->fc_scope &&
1229 fa->fa_info == fi) {
1230 fa_match = fa;
1231 break;
1235 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1236 struct fib_info *fi_drop;
1237 u8 state;
1239 fa = fa_first;
1240 if (fa_match) {
1241 if (fa == fa_match)
1242 err = 0;
1243 goto out;
1245 err = -ENOBUFS;
1246 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1247 if (new_fa == NULL)
1248 goto out;
1250 fi_drop = fa->fa_info;
1251 new_fa->fa_tos = fa->fa_tos;
1252 new_fa->fa_info = fi;
1253 new_fa->fa_type = cfg->fc_type;
1254 new_fa->fa_scope = cfg->fc_scope;
1255 state = fa->fa_state;
1256 new_fa->fa_state = state & ~FA_S_ACCESSED;
1258 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1259 alias_free_mem_rcu(fa);
1261 fib_release_info(fi_drop);
1262 if (state & FA_S_ACCESSED)
1263 rt_cache_flush(-1);
1264 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1265 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
1267 goto succeeded;
1269 /* Error if we find a perfect match which
1270 * uses the same scope, type, and nexthop
1271 * information.
1273 if (fa_match)
1274 goto out;
1276 if (!(cfg->fc_nlflags & NLM_F_APPEND))
1277 fa = fa_first;
1279 err = -ENOENT;
1280 if (!(cfg->fc_nlflags & NLM_F_CREATE))
1281 goto out;
1283 err = -ENOBUFS;
1284 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1285 if (new_fa == NULL)
1286 goto out;
1288 new_fa->fa_info = fi;
1289 new_fa->fa_tos = tos;
1290 new_fa->fa_type = cfg->fc_type;
1291 new_fa->fa_scope = cfg->fc_scope;
1292 new_fa->fa_state = 0;
1294 * Insert new entry to the list.
1297 if (!fa_head) {
1298 err = 0;
1299 fa_head = fib_insert_node(t, &err, key, plen);
1300 if (err)
1301 goto out_free_new_fa;
1304 list_add_tail_rcu(&new_fa->fa_list,
1305 (fa ? &fa->fa_list : fa_head));
1307 rt_cache_flush(-1);
1308 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
1309 &cfg->fc_nlinfo, 0);
1310 succeeded:
1311 return 0;
1313 out_free_new_fa:
1314 kmem_cache_free(fn_alias_kmem, new_fa);
1315 out:
1316 fib_release_info(fi);
1317 err:
1318 return err;
1322 /* should be called with rcu_read_lock */
1323 static inline int check_leaf(struct trie *t, struct leaf *l,
1324 t_key key, int *plen, const struct flowi *flp,
1325 struct fib_result *res)
1327 int err, i;
1328 __be32 mask;
1329 struct leaf_info *li;
1330 struct hlist_head *hhead = &l->list;
1331 struct hlist_node *node;
1333 hlist_for_each_entry_rcu(li, node, hhead, hlist) {
1334 i = li->plen;
1335 mask = inet_make_mask(i);
1336 if (l->key != (key & ntohl(mask)))
1337 continue;
1339 if ((err = fib_semantic_match(&li->falh, flp, res, htonl(l->key), mask, i)) <= 0) {
1340 *plen = i;
1341 #ifdef CONFIG_IP_FIB_TRIE_STATS
1342 t->stats.semantic_match_passed++;
1343 #endif
1344 return err;
1346 #ifdef CONFIG_IP_FIB_TRIE_STATS
1347 t->stats.semantic_match_miss++;
1348 #endif
1350 return 1;
1353 static int
1354 fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
1356 struct trie *t = (struct trie *) tb->tb_data;
1357 int plen, ret = 0;
1358 struct node *n;
1359 struct tnode *pn;
1360 int pos, bits;
1361 t_key key = ntohl(flp->fl4_dst);
1362 int chopped_off;
1363 t_key cindex = 0;
1364 int current_prefix_length = KEYLENGTH;
1365 struct tnode *cn;
1366 t_key node_prefix, key_prefix, pref_mismatch;
1367 int mp;
1369 rcu_read_lock();
1371 n = rcu_dereference(t->trie);
1372 if (!n)
1373 goto failed;
1375 #ifdef CONFIG_IP_FIB_TRIE_STATS
1376 t->stats.gets++;
1377 #endif
1379 /* Just a leaf? */
1380 if (IS_LEAF(n)) {
1381 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1382 goto found;
1383 goto failed;
1385 pn = (struct tnode *) n;
1386 chopped_off = 0;
1388 while (pn) {
1389 pos = pn->pos;
1390 bits = pn->bits;
1392 if (!chopped_off)
1393 cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length),
1394 pos, bits);
1396 n = tnode_get_child(pn, cindex);
1398 if (n == NULL) {
1399 #ifdef CONFIG_IP_FIB_TRIE_STATS
1400 t->stats.null_node_hit++;
1401 #endif
1402 goto backtrace;
1405 if (IS_LEAF(n)) {
1406 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1407 goto found;
1408 else
1409 goto backtrace;
1412 #define HL_OPTIMIZE
1413 #ifdef HL_OPTIMIZE
1414 cn = (struct tnode *)n;
1417 * It's a tnode, and we can do some extra checks here if we
1418 * like, to avoid descending into a dead-end branch.
1419 * This tnode is in the parent's child array at index
1420 * key[p_pos..p_pos+p_bits] but potentially with some bits
1421 * chopped off, so in reality the index may be just a
1422 * subprefix, padded with zero at the end.
1423 * We can also take a look at any skipped bits in this
1424 * tnode - everything up to p_pos is supposed to be ok,
1425 * and the non-chopped bits of the index (se previous
1426 * paragraph) are also guaranteed ok, but the rest is
1427 * considered unknown.
1429 * The skipped bits are key[pos+bits..cn->pos].
1432 /* If current_prefix_length < pos+bits, we are already doing
1433 * actual prefix matching, which means everything from
1434 * pos+(bits-chopped_off) onward must be zero along some
1435 * branch of this subtree - otherwise there is *no* valid
1436 * prefix present. Here we can only check the skipped
1437 * bits. Remember, since we have already indexed into the
1438 * parent's child array, we know that the bits we chopped of
1439 * *are* zero.
1442 /* NOTA BENE: CHECKING ONLY SKIPPED BITS FOR THE NEW NODE HERE */
1444 if (current_prefix_length < pos+bits) {
1445 if (tkey_extract_bits(cn->key, current_prefix_length,
1446 cn->pos - current_prefix_length) != 0 ||
1447 !(cn->child[0]))
1448 goto backtrace;
1452 * If chopped_off=0, the index is fully validated and we
1453 * only need to look at the skipped bits for this, the new,
1454 * tnode. What we actually want to do is to find out if
1455 * these skipped bits match our key perfectly, or if we will
1456 * have to count on finding a matching prefix further down,
1457 * because if we do, we would like to have some way of
1458 * verifying the existence of such a prefix at this point.
1461 /* The only thing we can do at this point is to verify that
1462 * any such matching prefix can indeed be a prefix to our
1463 * key, and if the bits in the node we are inspecting that
1464 * do not match our key are not ZERO, this cannot be true.
1465 * Thus, find out where there is a mismatch (before cn->pos)
1466 * and verify that all the mismatching bits are zero in the
1467 * new tnode's key.
1470 /* Note: We aren't very concerned about the piece of the key
1471 * that precede pn->pos+pn->bits, since these have already been
1472 * checked. The bits after cn->pos aren't checked since these are
1473 * by definition "unknown" at this point. Thus, what we want to
1474 * see is if we are about to enter the "prefix matching" state,
1475 * and in that case verify that the skipped bits that will prevail
1476 * throughout this subtree are zero, as they have to be if we are
1477 * to find a matching prefix.
1480 node_prefix = mask_pfx(cn->key, cn->pos);
1481 key_prefix = mask_pfx(key, cn->pos);
1482 pref_mismatch = key_prefix^node_prefix;
1483 mp = 0;
1485 /* In short: If skipped bits in this node do not match the search
1486 * key, enter the "prefix matching" state.directly.
1488 if (pref_mismatch) {
1489 while (!(pref_mismatch & (1<<(KEYLENGTH-1)))) {
1490 mp++;
1491 pref_mismatch = pref_mismatch <<1;
1493 key_prefix = tkey_extract_bits(cn->key, mp, cn->pos-mp);
1495 if (key_prefix != 0)
1496 goto backtrace;
1498 if (current_prefix_length >= cn->pos)
1499 current_prefix_length = mp;
1501 #endif
1502 pn = (struct tnode *)n; /* Descend */
1503 chopped_off = 0;
1504 continue;
1506 backtrace:
1507 chopped_off++;
1509 /* As zero don't change the child key (cindex) */
1510 while ((chopped_off <= pn->bits) && !(cindex & (1<<(chopped_off-1))))
1511 chopped_off++;
1513 /* Decrease current_... with bits chopped off */
1514 if (current_prefix_length > pn->pos + pn->bits - chopped_off)
1515 current_prefix_length = pn->pos + pn->bits - chopped_off;
1518 * Either we do the actual chop off according or if we have
1519 * chopped off all bits in this tnode walk up to our parent.
1522 if (chopped_off <= pn->bits) {
1523 cindex &= ~(1 << (chopped_off-1));
1524 } else {
1525 struct tnode *parent = node_parent((struct node *) pn);
1526 if (!parent)
1527 goto failed;
1529 /* Get Child's index */
1530 cindex = tkey_extract_bits(pn->key, parent->pos, parent->bits);
1531 pn = parent;
1532 chopped_off = 0;
1534 #ifdef CONFIG_IP_FIB_TRIE_STATS
1535 t->stats.backtrack++;
1536 #endif
1537 goto backtrace;
1540 failed:
1541 ret = 1;
1542 found:
1543 rcu_read_unlock();
1544 return ret;
1547 /* only called from updater side */
1548 static int trie_leaf_remove(struct trie *t, t_key key)
1550 t_key cindex;
1551 struct tnode *tp = NULL;
1552 struct node *n = t->trie;
1553 struct leaf *l;
1555 pr_debug("entering trie_leaf_remove(%p)\n", n);
1557 /* Note that in the case skipped bits, those bits are *not* checked!
1558 * When we finish this, we will have NULL or a T_LEAF, and the
1559 * T_LEAF may or may not match our key.
1562 while (n != NULL && IS_TNODE(n)) {
1563 struct tnode *tn = (struct tnode *) n;
1564 check_tnode(tn);
1565 n = tnode_get_child(tn ,tkey_extract_bits(key, tn->pos, tn->bits));
1567 BUG_ON(n && node_parent(n) != tn);
1569 l = (struct leaf *) n;
1571 if (!n || !tkey_equals(l->key, key))
1572 return 0;
1575 * Key found.
1576 * Remove the leaf and rebalance the tree
1579 t->revision++;
1580 t->size--;
1582 tp = node_parent(n);
1583 tnode_free((struct tnode *) n);
1585 if (tp) {
1586 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1587 put_child(t, (struct tnode *)tp, cindex, NULL);
1588 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1589 } else
1590 rcu_assign_pointer(t->trie, NULL);
1592 return 1;
1596 * Caller must hold RTNL.
1598 static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg)
1600 struct trie *t = (struct trie *) tb->tb_data;
1601 u32 key, mask;
1602 int plen = cfg->fc_dst_len;
1603 u8 tos = cfg->fc_tos;
1604 struct fib_alias *fa, *fa_to_delete;
1605 struct list_head *fa_head;
1606 struct leaf *l;
1607 struct leaf_info *li;
1609 if (plen > 32)
1610 return -EINVAL;
1612 key = ntohl(cfg->fc_dst);
1613 mask = ntohl(inet_make_mask(plen));
1615 if (key & ~mask)
1616 return -EINVAL;
1618 key = key & mask;
1619 l = fib_find_node(t, key);
1621 if (!l)
1622 return -ESRCH;
1624 fa_head = get_fa_head(l, plen);
1625 fa = fib_find_alias(fa_head, tos, 0);
1627 if (!fa)
1628 return -ESRCH;
1630 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1632 fa_to_delete = NULL;
1633 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1634 list_for_each_entry_continue(fa, fa_head, fa_list) {
1635 struct fib_info *fi = fa->fa_info;
1637 if (fa->fa_tos != tos)
1638 break;
1640 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1641 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
1642 fa->fa_scope == cfg->fc_scope) &&
1643 (!cfg->fc_protocol ||
1644 fi->fib_protocol == cfg->fc_protocol) &&
1645 fib_nh_match(cfg, fi) == 0) {
1646 fa_to_delete = fa;
1647 break;
1651 if (!fa_to_delete)
1652 return -ESRCH;
1654 fa = fa_to_delete;
1655 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
1656 &cfg->fc_nlinfo, 0);
1658 l = fib_find_node(t, key);
1659 li = find_leaf_info(l, plen);
1661 list_del_rcu(&fa->fa_list);
1663 if (list_empty(fa_head)) {
1664 hlist_del_rcu(&li->hlist);
1665 free_leaf_info(li);
1668 if (hlist_empty(&l->list))
1669 trie_leaf_remove(t, key);
1671 if (fa->fa_state & FA_S_ACCESSED)
1672 rt_cache_flush(-1);
1674 fib_release_info(fa->fa_info);
1675 alias_free_mem_rcu(fa);
1676 return 0;
1679 static int trie_flush_list(struct trie *t, struct list_head *head)
1681 struct fib_alias *fa, *fa_node;
1682 int found = 0;
1684 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1685 struct fib_info *fi = fa->fa_info;
1687 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1688 list_del_rcu(&fa->fa_list);
1689 fib_release_info(fa->fa_info);
1690 alias_free_mem_rcu(fa);
1691 found++;
1694 return found;
1697 static int trie_flush_leaf(struct trie *t, struct leaf *l)
1699 int found = 0;
1700 struct hlist_head *lih = &l->list;
1701 struct hlist_node *node, *tmp;
1702 struct leaf_info *li = NULL;
1704 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
1705 found += trie_flush_list(t, &li->falh);
1707 if (list_empty(&li->falh)) {
1708 hlist_del_rcu(&li->hlist);
1709 free_leaf_info(li);
1712 return found;
1715 /* rcu_read_lock needs to be hold by caller from readside */
1717 static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf)
1719 struct node *c = (struct node *) thisleaf;
1720 struct tnode *p;
1721 int idx;
1722 struct node *trie = rcu_dereference(t->trie);
1724 if (c == NULL) {
1725 if (trie == NULL)
1726 return NULL;
1728 if (IS_LEAF(trie)) /* trie w. just a leaf */
1729 return (struct leaf *) trie;
1731 p = (struct tnode*) trie; /* Start */
1732 } else
1733 p = node_parent(c);
1735 while (p) {
1736 int pos, last;
1738 /* Find the next child of the parent */
1739 if (c)
1740 pos = 1 + tkey_extract_bits(c->key, p->pos, p->bits);
1741 else
1742 pos = 0;
1744 last = 1 << p->bits;
1745 for (idx = pos; idx < last ; idx++) {
1746 c = rcu_dereference(p->child[idx]);
1748 if (!c)
1749 continue;
1751 /* Decend if tnode */
1752 while (IS_TNODE(c)) {
1753 p = (struct tnode *) c;
1754 idx = 0;
1756 /* Rightmost non-NULL branch */
1757 if (p && IS_TNODE(p))
1758 while (!(c = rcu_dereference(p->child[idx]))
1759 && idx < (1<<p->bits)) idx++;
1761 /* Done with this tnode? */
1762 if (idx >= (1 << p->bits) || !c)
1763 goto up;
1765 return (struct leaf *) c;
1768 /* No more children go up one step */
1769 c = (struct node *) p;
1770 p = node_parent(c);
1772 return NULL; /* Ready. Root of trie */
1776 * Caller must hold RTNL.
1778 static int fn_trie_flush(struct fib_table *tb)
1780 struct trie *t = (struct trie *) tb->tb_data;
1781 struct leaf *ll = NULL, *l = NULL;
1782 int found = 0, h;
1784 t->revision++;
1786 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1787 found += trie_flush_leaf(t, l);
1789 if (ll && hlist_empty(&ll->list))
1790 trie_leaf_remove(t, ll->key);
1791 ll = l;
1794 if (ll && hlist_empty(&ll->list))
1795 trie_leaf_remove(t, ll->key);
1797 pr_debug("trie_flush found=%d\n", found);
1798 return found;
1801 static int trie_last_dflt = -1;
1803 static void
1804 fn_trie_select_default(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
1806 struct trie *t = (struct trie *) tb->tb_data;
1807 int order, last_idx;
1808 struct fib_info *fi = NULL;
1809 struct fib_info *last_resort;
1810 struct fib_alias *fa = NULL;
1811 struct list_head *fa_head;
1812 struct leaf *l;
1814 last_idx = -1;
1815 last_resort = NULL;
1816 order = -1;
1818 rcu_read_lock();
1820 l = fib_find_node(t, 0);
1821 if (!l)
1822 goto out;
1824 fa_head = get_fa_head(l, 0);
1825 if (!fa_head)
1826 goto out;
1828 if (list_empty(fa_head))
1829 goto out;
1831 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1832 struct fib_info *next_fi = fa->fa_info;
1834 if (fa->fa_scope != res->scope ||
1835 fa->fa_type != RTN_UNICAST)
1836 continue;
1838 if (next_fi->fib_priority > res->fi->fib_priority)
1839 break;
1840 if (!next_fi->fib_nh[0].nh_gw ||
1841 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1842 continue;
1843 fa->fa_state |= FA_S_ACCESSED;
1845 if (fi == NULL) {
1846 if (next_fi != res->fi)
1847 break;
1848 } else if (!fib_detect_death(fi, order, &last_resort,
1849 &last_idx, &trie_last_dflt)) {
1850 if (res->fi)
1851 fib_info_put(res->fi);
1852 res->fi = fi;
1853 atomic_inc(&fi->fib_clntref);
1854 trie_last_dflt = order;
1855 goto out;
1857 fi = next_fi;
1858 order++;
1860 if (order <= 0 || fi == NULL) {
1861 trie_last_dflt = -1;
1862 goto out;
1865 if (!fib_detect_death(fi, order, &last_resort, &last_idx, &trie_last_dflt)) {
1866 if (res->fi)
1867 fib_info_put(res->fi);
1868 res->fi = fi;
1869 atomic_inc(&fi->fib_clntref);
1870 trie_last_dflt = order;
1871 goto out;
1873 if (last_idx >= 0) {
1874 if (res->fi)
1875 fib_info_put(res->fi);
1876 res->fi = last_resort;
1877 if (last_resort)
1878 atomic_inc(&last_resort->fib_clntref);
1880 trie_last_dflt = last_idx;
1881 out:;
1882 rcu_read_unlock();
1885 static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, struct fib_table *tb,
1886 struct sk_buff *skb, struct netlink_callback *cb)
1888 int i, s_i;
1889 struct fib_alias *fa;
1891 __be32 xkey = htonl(key);
1893 s_i = cb->args[4];
1894 i = 0;
1896 /* rcu_read_lock is hold by caller */
1898 list_for_each_entry_rcu(fa, fah, fa_list) {
1899 if (i < s_i) {
1900 i++;
1901 continue;
1903 BUG_ON(!fa->fa_info);
1905 if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
1906 cb->nlh->nlmsg_seq,
1907 RTM_NEWROUTE,
1908 tb->tb_id,
1909 fa->fa_type,
1910 fa->fa_scope,
1911 xkey,
1912 plen,
1913 fa->fa_tos,
1914 fa->fa_info, 0) < 0) {
1915 cb->args[4] = i;
1916 return -1;
1918 i++;
1920 cb->args[4] = i;
1921 return skb->len;
1924 static int fn_trie_dump_plen(struct trie *t, int plen, struct fib_table *tb, struct sk_buff *skb,
1925 struct netlink_callback *cb)
1927 int h, s_h;
1928 struct list_head *fa_head;
1929 struct leaf *l = NULL;
1931 s_h = cb->args[3];
1933 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1934 if (h < s_h)
1935 continue;
1936 if (h > s_h)
1937 memset(&cb->args[4], 0,
1938 sizeof(cb->args) - 4*sizeof(cb->args[0]));
1940 fa_head = get_fa_head(l, plen);
1942 if (!fa_head)
1943 continue;
1945 if (list_empty(fa_head))
1946 continue;
1948 if (fn_trie_dump_fa(l->key, plen, fa_head, tb, skb, cb)<0) {
1949 cb->args[3] = h;
1950 return -1;
1953 cb->args[3] = h;
1954 return skb->len;
1957 static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb)
1959 int m, s_m;
1960 struct trie *t = (struct trie *) tb->tb_data;
1962 s_m = cb->args[2];
1964 rcu_read_lock();
1965 for (m = 0; m <= 32; m++) {
1966 if (m < s_m)
1967 continue;
1968 if (m > s_m)
1969 memset(&cb->args[3], 0,
1970 sizeof(cb->args) - 3*sizeof(cb->args[0]));
1972 if (fn_trie_dump_plen(t, 32-m, tb, skb, cb)<0) {
1973 cb->args[2] = m;
1974 goto out;
1977 rcu_read_unlock();
1978 cb->args[2] = m;
1979 return skb->len;
1980 out:
1981 rcu_read_unlock();
1982 return -1;
1985 /* Fix more generic FIB names for init later */
1987 #ifdef CONFIG_IP_MULTIPLE_TABLES
1988 struct fib_table * fib_hash_init(u32 id)
1989 #else
1990 struct fib_table * __init fib_hash_init(u32 id)
1991 #endif
1993 struct fib_table *tb;
1994 struct trie *t;
1996 if (fn_alias_kmem == NULL)
1997 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1998 sizeof(struct fib_alias),
1999 0, SLAB_HWCACHE_ALIGN,
2000 NULL);
2002 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
2003 GFP_KERNEL);
2004 if (tb == NULL)
2005 return NULL;
2007 tb->tb_id = id;
2008 tb->tb_lookup = fn_trie_lookup;
2009 tb->tb_insert = fn_trie_insert;
2010 tb->tb_delete = fn_trie_delete;
2011 tb->tb_flush = fn_trie_flush;
2012 tb->tb_select_default = fn_trie_select_default;
2013 tb->tb_dump = fn_trie_dump;
2014 memset(tb->tb_data, 0, sizeof(struct trie));
2016 t = (struct trie *) tb->tb_data;
2018 trie_init(t);
2020 if (id == RT_TABLE_LOCAL)
2021 trie_local = t;
2022 else if (id == RT_TABLE_MAIN)
2023 trie_main = t;
2025 if (id == RT_TABLE_LOCAL)
2026 printk(KERN_INFO "IPv4 FIB: Using LC-trie version %s\n", VERSION);
2028 return tb;
2031 #ifdef CONFIG_PROC_FS
2032 /* Depth first Trie walk iterator */
2033 struct fib_trie_iter {
2034 struct tnode *tnode;
2035 struct trie *trie;
2036 unsigned index;
2037 unsigned depth;
2040 static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
2042 struct tnode *tn = iter->tnode;
2043 unsigned cindex = iter->index;
2044 struct tnode *p;
2046 /* A single entry routing table */
2047 if (!tn)
2048 return NULL;
2050 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
2051 iter->tnode, iter->index, iter->depth);
2052 rescan:
2053 while (cindex < (1<<tn->bits)) {
2054 struct node *n = tnode_get_child(tn, cindex);
2056 if (n) {
2057 if (IS_LEAF(n)) {
2058 iter->tnode = tn;
2059 iter->index = cindex + 1;
2060 } else {
2061 /* push down one level */
2062 iter->tnode = (struct tnode *) n;
2063 iter->index = 0;
2064 ++iter->depth;
2066 return n;
2069 ++cindex;
2072 /* Current node exhausted, pop back up */
2073 p = node_parent((struct node *)tn);
2074 if (p) {
2075 cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
2076 tn = p;
2077 --iter->depth;
2078 goto rescan;
2081 /* got root? */
2082 return NULL;
2085 static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
2086 struct trie *t)
2088 struct node *n ;
2090 if (!t)
2091 return NULL;
2093 n = rcu_dereference(t->trie);
2095 if (!iter)
2096 return NULL;
2098 if (n) {
2099 if (IS_TNODE(n)) {
2100 iter->tnode = (struct tnode *) n;
2101 iter->trie = t;
2102 iter->index = 0;
2103 iter->depth = 1;
2104 } else {
2105 iter->tnode = NULL;
2106 iter->trie = t;
2107 iter->index = 0;
2108 iter->depth = 0;
2110 return n;
2112 return NULL;
2115 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2117 struct node *n;
2118 struct fib_trie_iter iter;
2120 memset(s, 0, sizeof(*s));
2122 rcu_read_lock();
2123 for (n = fib_trie_get_first(&iter, t); n;
2124 n = fib_trie_get_next(&iter)) {
2125 if (IS_LEAF(n)) {
2126 s->leaves++;
2127 s->totdepth += iter.depth;
2128 if (iter.depth > s->maxdepth)
2129 s->maxdepth = iter.depth;
2130 } else {
2131 const struct tnode *tn = (const struct tnode *) n;
2132 int i;
2134 s->tnodes++;
2135 if (tn->bits < MAX_STAT_DEPTH)
2136 s->nodesizes[tn->bits]++;
2138 for (i = 0; i < (1<<tn->bits); i++)
2139 if (!tn->child[i])
2140 s->nullpointers++;
2143 rcu_read_unlock();
2147 * This outputs /proc/net/fib_triestats
2149 static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
2151 unsigned i, max, pointers, bytes, avdepth;
2153 if (stat->leaves)
2154 avdepth = stat->totdepth*100 / stat->leaves;
2155 else
2156 avdepth = 0;
2158 seq_printf(seq, "\tAver depth: %d.%02d\n", avdepth / 100, avdepth % 100 );
2159 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
2161 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
2163 bytes = sizeof(struct leaf) * stat->leaves;
2164 seq_printf(seq, "\tInternal nodes: %d\n\t", stat->tnodes);
2165 bytes += sizeof(struct tnode) * stat->tnodes;
2167 max = MAX_STAT_DEPTH;
2168 while (max > 0 && stat->nodesizes[max-1] == 0)
2169 max--;
2171 pointers = 0;
2172 for (i = 1; i <= max; i++)
2173 if (stat->nodesizes[i] != 0) {
2174 seq_printf(seq, " %d: %d", i, stat->nodesizes[i]);
2175 pointers += (1<<i) * stat->nodesizes[i];
2177 seq_putc(seq, '\n');
2178 seq_printf(seq, "\tPointers: %d\n", pointers);
2180 bytes += sizeof(struct node *) * pointers;
2181 seq_printf(seq, "Null ptrs: %d\n", stat->nullpointers);
2182 seq_printf(seq, "Total size: %d kB\n", (bytes + 1023) / 1024);
2184 #ifdef CONFIG_IP_FIB_TRIE_STATS
2185 seq_printf(seq, "Counters:\n---------\n");
2186 seq_printf(seq,"gets = %d\n", t->stats.gets);
2187 seq_printf(seq,"backtracks = %d\n", t->stats.backtrack);
2188 seq_printf(seq,"semantic match passed = %d\n", t->stats.semantic_match_passed);
2189 seq_printf(seq,"semantic match miss = %d\n", t->stats.semantic_match_miss);
2190 seq_printf(seq,"null node hit= %d\n", t->stats.null_node_hit);
2191 seq_printf(seq,"skipped node resize = %d\n", t->stats.resize_node_skipped);
2192 #ifdef CLEAR_STATS
2193 memset(&(t->stats), 0, sizeof(t->stats));
2194 #endif
2195 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2198 static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2200 struct trie_stat *stat;
2202 stat = kmalloc(sizeof(*stat), GFP_KERNEL);
2203 if (!stat)
2204 return -ENOMEM;
2206 seq_printf(seq, "Basic info: size of leaf: %Zd bytes, size of tnode: %Zd bytes.\n",
2207 sizeof(struct leaf), sizeof(struct tnode));
2209 if (trie_local) {
2210 seq_printf(seq, "Local:\n");
2211 trie_collect_stats(trie_local, stat);
2212 trie_show_stats(seq, stat);
2215 if (trie_main) {
2216 seq_printf(seq, "Main:\n");
2217 trie_collect_stats(trie_main, stat);
2218 trie_show_stats(seq, stat);
2220 kfree(stat);
2222 return 0;
2225 static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2227 return single_open(file, fib_triestat_seq_show, NULL);
2230 static const struct file_operations fib_triestat_fops = {
2231 .owner = THIS_MODULE,
2232 .open = fib_triestat_seq_open,
2233 .read = seq_read,
2234 .llseek = seq_lseek,
2235 .release = single_release,
2238 static struct node *fib_trie_get_idx(struct fib_trie_iter *iter,
2239 loff_t pos)
2241 loff_t idx = 0;
2242 struct node *n;
2244 for (n = fib_trie_get_first(iter, trie_local);
2245 n; ++idx, n = fib_trie_get_next(iter)) {
2246 if (pos == idx)
2247 return n;
2250 for (n = fib_trie_get_first(iter, trie_main);
2251 n; ++idx, n = fib_trie_get_next(iter)) {
2252 if (pos == idx)
2253 return n;
2255 return NULL;
2258 static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2260 rcu_read_lock();
2261 if (*pos == 0)
2262 return SEQ_START_TOKEN;
2263 return fib_trie_get_idx(seq->private, *pos - 1);
2266 static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2268 struct fib_trie_iter *iter = seq->private;
2269 void *l = v;
2271 ++*pos;
2272 if (v == SEQ_START_TOKEN)
2273 return fib_trie_get_idx(iter, 0);
2275 v = fib_trie_get_next(iter);
2276 BUG_ON(v == l);
2277 if (v)
2278 return v;
2280 /* continue scan in next trie */
2281 if (iter->trie == trie_local)
2282 return fib_trie_get_first(iter, trie_main);
2284 return NULL;
2287 static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2289 rcu_read_unlock();
2292 static void seq_indent(struct seq_file *seq, int n)
2294 while (n-- > 0) seq_puts(seq, " ");
2297 static inline const char *rtn_scope(enum rt_scope_t s)
2299 static char buf[32];
2301 switch (s) {
2302 case RT_SCOPE_UNIVERSE: return "universe";
2303 case RT_SCOPE_SITE: return "site";
2304 case RT_SCOPE_LINK: return "link";
2305 case RT_SCOPE_HOST: return "host";
2306 case RT_SCOPE_NOWHERE: return "nowhere";
2307 default:
2308 snprintf(buf, sizeof(buf), "scope=%d", s);
2309 return buf;
2313 static const char *rtn_type_names[__RTN_MAX] = {
2314 [RTN_UNSPEC] = "UNSPEC",
2315 [RTN_UNICAST] = "UNICAST",
2316 [RTN_LOCAL] = "LOCAL",
2317 [RTN_BROADCAST] = "BROADCAST",
2318 [RTN_ANYCAST] = "ANYCAST",
2319 [RTN_MULTICAST] = "MULTICAST",
2320 [RTN_BLACKHOLE] = "BLACKHOLE",
2321 [RTN_UNREACHABLE] = "UNREACHABLE",
2322 [RTN_PROHIBIT] = "PROHIBIT",
2323 [RTN_THROW] = "THROW",
2324 [RTN_NAT] = "NAT",
2325 [RTN_XRESOLVE] = "XRESOLVE",
2328 static inline const char *rtn_type(unsigned t)
2330 static char buf[32];
2332 if (t < __RTN_MAX && rtn_type_names[t])
2333 return rtn_type_names[t];
2334 snprintf(buf, sizeof(buf), "type %d", t);
2335 return buf;
2338 /* Pretty print the trie */
2339 static int fib_trie_seq_show(struct seq_file *seq, void *v)
2341 const struct fib_trie_iter *iter = seq->private;
2342 struct node *n = v;
2344 if (v == SEQ_START_TOKEN)
2345 return 0;
2347 if (!node_parent(n)) {
2348 if (iter->trie == trie_local)
2349 seq_puts(seq, "<local>:\n");
2350 else
2351 seq_puts(seq, "<main>:\n");
2354 if (IS_TNODE(n)) {
2355 struct tnode *tn = (struct tnode *) n;
2356 __be32 prf = htonl(mask_pfx(tn->key, tn->pos));
2358 seq_indent(seq, iter->depth-1);
2359 seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n",
2360 NIPQUAD(prf), tn->pos, tn->bits, tn->full_children,
2361 tn->empty_children);
2363 } else {
2364 struct leaf *l = (struct leaf *) n;
2365 int i;
2366 __be32 val = htonl(l->key);
2368 seq_indent(seq, iter->depth);
2369 seq_printf(seq, " |-- %d.%d.%d.%d\n", NIPQUAD(val));
2370 for (i = 32; i >= 0; i--) {
2371 struct leaf_info *li = find_leaf_info(l, i);
2372 if (li) {
2373 struct fib_alias *fa;
2374 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2375 seq_indent(seq, iter->depth+1);
2376 seq_printf(seq, " /%d %s %s", i,
2377 rtn_scope(fa->fa_scope),
2378 rtn_type(fa->fa_type));
2379 if (fa->fa_tos)
2380 seq_printf(seq, "tos =%d\n",
2381 fa->fa_tos);
2382 seq_putc(seq, '\n');
2388 return 0;
2391 static const struct seq_operations fib_trie_seq_ops = {
2392 .start = fib_trie_seq_start,
2393 .next = fib_trie_seq_next,
2394 .stop = fib_trie_seq_stop,
2395 .show = fib_trie_seq_show,
2398 static int fib_trie_seq_open(struct inode *inode, struct file *file)
2400 return seq_open_private(file, &fib_trie_seq_ops,
2401 sizeof(struct fib_trie_iter));
2404 static const struct file_operations fib_trie_fops = {
2405 .owner = THIS_MODULE,
2406 .open = fib_trie_seq_open,
2407 .read = seq_read,
2408 .llseek = seq_lseek,
2409 .release = seq_release_private,
2412 static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2414 static unsigned type2flags[RTN_MAX + 1] = {
2415 [7] = RTF_REJECT, [8] = RTF_REJECT,
2417 unsigned flags = type2flags[type];
2419 if (fi && fi->fib_nh->nh_gw)
2420 flags |= RTF_GATEWAY;
2421 if (mask == htonl(0xFFFFFFFF))
2422 flags |= RTF_HOST;
2423 flags |= RTF_UP;
2424 return flags;
2428 * This outputs /proc/net/route.
2429 * The format of the file is not supposed to be changed
2430 * and needs to be same as fib_hash output to avoid breaking
2431 * legacy utilities
2433 static int fib_route_seq_show(struct seq_file *seq, void *v)
2435 const struct fib_trie_iter *iter = seq->private;
2436 struct leaf *l = v;
2437 int i;
2438 char bf[128];
2440 if (v == SEQ_START_TOKEN) {
2441 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2442 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2443 "\tWindow\tIRTT");
2444 return 0;
2447 if (iter->trie == trie_local)
2448 return 0;
2449 if (IS_TNODE(l))
2450 return 0;
2452 for (i=32; i>=0; i--) {
2453 struct leaf_info *li = find_leaf_info(l, i);
2454 struct fib_alias *fa;
2455 __be32 mask, prefix;
2457 if (!li)
2458 continue;
2460 mask = inet_make_mask(li->plen);
2461 prefix = htonl(l->key);
2463 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2464 const struct fib_info *fi = fa->fa_info;
2465 unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
2467 if (fa->fa_type == RTN_BROADCAST
2468 || fa->fa_type == RTN_MULTICAST)
2469 continue;
2471 if (fi)
2472 snprintf(bf, sizeof(bf),
2473 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2474 fi->fib_dev ? fi->fib_dev->name : "*",
2475 prefix,
2476 fi->fib_nh->nh_gw, flags, 0, 0,
2477 fi->fib_priority,
2478 mask,
2479 (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
2480 fi->fib_window,
2481 fi->fib_rtt >> 3);
2482 else
2483 snprintf(bf, sizeof(bf),
2484 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2485 prefix, 0, flags, 0, 0, 0,
2486 mask, 0, 0, 0);
2488 seq_printf(seq, "%-127s\n", bf);
2492 return 0;
2495 static const struct seq_operations fib_route_seq_ops = {
2496 .start = fib_trie_seq_start,
2497 .next = fib_trie_seq_next,
2498 .stop = fib_trie_seq_stop,
2499 .show = fib_route_seq_show,
2502 static int fib_route_seq_open(struct inode *inode, struct file *file)
2504 return seq_open_private(file, &fib_route_seq_ops,
2505 sizeof(struct fib_trie_iter));
2508 static const struct file_operations fib_route_fops = {
2509 .owner = THIS_MODULE,
2510 .open = fib_route_seq_open,
2511 .read = seq_read,
2512 .llseek = seq_lseek,
2513 .release = seq_release_private,
2516 int __init fib_proc_init(void)
2518 if (!proc_net_fops_create(&init_net, "fib_trie", S_IRUGO, &fib_trie_fops))
2519 goto out1;
2521 if (!proc_net_fops_create(&init_net, "fib_triestat", S_IRUGO, &fib_triestat_fops))
2522 goto out2;
2524 if (!proc_net_fops_create(&init_net, "route", S_IRUGO, &fib_route_fops))
2525 goto out3;
2527 return 0;
2529 out3:
2530 proc_net_remove(&init_net, "fib_triestat");
2531 out2:
2532 proc_net_remove(&init_net, "fib_trie");
2533 out1:
2534 return -ENOMEM;
2537 void __init fib_proc_exit(void)
2539 proc_net_remove(&init_net, "fib_trie");
2540 proc_net_remove(&init_net, "fib_triestat");
2541 proc_net_remove(&init_net, "route");
2544 #endif /* CONFIG_PROC_FS */