thermal: fix Mediatek thermal controller build
[linux/fpc-iii.git] / net / ipv6 / ip6_fib.c
blobea071fad67a03e9d884f99ddb64ea71dc8c31116
1 /*
2 * Linux INET6 implementation
3 * Forwarding Information Database
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * Changes:
14 * Yuji SEKIYA @USAGI: Support default route on router node;
15 * remove ip6_null_entry from the top of
16 * routing table.
17 * Ville Nuorvala: Fixed routing subtrees.
20 #define pr_fmt(fmt) "IPv6: " fmt
22 #include <linux/errno.h>
23 #include <linux/types.h>
24 #include <linux/net.h>
25 #include <linux/route.h>
26 #include <linux/netdevice.h>
27 #include <linux/in6.h>
28 #include <linux/init.h>
29 #include <linux/list.h>
30 #include <linux/slab.h>
32 #include <net/ipv6.h>
33 #include <net/ndisc.h>
34 #include <net/addrconf.h>
35 #include <net/lwtunnel.h>
37 #include <net/ip6_fib.h>
38 #include <net/ip6_route.h>
40 #define RT6_DEBUG 2
42 #if RT6_DEBUG >= 3
43 #define RT6_TRACE(x...) pr_debug(x)
44 #else
45 #define RT6_TRACE(x...) do { ; } while (0)
46 #endif
48 static struct kmem_cache *fib6_node_kmem __read_mostly;
50 struct fib6_cleaner {
51 struct fib6_walker w;
52 struct net *net;
53 int (*func)(struct rt6_info *, void *arg);
54 int sernum;
55 void *arg;
58 #ifdef CONFIG_IPV6_SUBTREES
59 #define FWS_INIT FWS_S
60 #else
61 #define FWS_INIT FWS_L
62 #endif
64 static void fib6_prune_clones(struct net *net, struct fib6_node *fn);
65 static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn);
66 static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn);
67 static int fib6_walk(struct net *net, struct fib6_walker *w);
68 static int fib6_walk_continue(struct fib6_walker *w);
71 * A routing update causes an increase of the serial number on the
72 * affected subtree. This allows for cached routes to be asynchronously
73 * tested when modifications are made to the destination cache as a
74 * result of redirects, path MTU changes, etc.
77 static void fib6_gc_timer_cb(unsigned long arg);
79 #define FOR_WALKERS(net, w) \
80 list_for_each_entry(w, &(net)->ipv6.fib6_walkers, lh)
82 static void fib6_walker_link(struct net *net, struct fib6_walker *w)
84 write_lock_bh(&net->ipv6.fib6_walker_lock);
85 list_add(&w->lh, &net->ipv6.fib6_walkers);
86 write_unlock_bh(&net->ipv6.fib6_walker_lock);
89 static void fib6_walker_unlink(struct net *net, struct fib6_walker *w)
91 write_lock_bh(&net->ipv6.fib6_walker_lock);
92 list_del(&w->lh);
93 write_unlock_bh(&net->ipv6.fib6_walker_lock);
96 static int fib6_new_sernum(struct net *net)
98 int new, old;
100 do {
101 old = atomic_read(&net->ipv6.fib6_sernum);
102 new = old < INT_MAX ? old + 1 : 1;
103 } while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
104 old, new) != old);
105 return new;
108 enum {
109 FIB6_NO_SERNUM_CHANGE = 0,
113 * Auxiliary address test functions for the radix tree.
115 * These assume a 32bit processor (although it will work on
116 * 64bit processors)
120 * test bit
122 #if defined(__LITTLE_ENDIAN)
123 # define BITOP_BE32_SWIZZLE (0x1F & ~7)
124 #else
125 # define BITOP_BE32_SWIZZLE 0
126 #endif
128 static __be32 addr_bit_set(const void *token, int fn_bit)
130 const __be32 *addr = token;
132 * Here,
133 * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)
134 * is optimized version of
135 * htonl(1 << ((~fn_bit)&0x1F))
136 * See include/asm-generic/bitops/le.h.
138 return (__force __be32)(1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)) &
139 addr[fn_bit >> 5];
142 static struct fib6_node *node_alloc(void)
144 struct fib6_node *fn;
146 fn = kmem_cache_zalloc(fib6_node_kmem, GFP_ATOMIC);
148 return fn;
151 static void node_free(struct fib6_node *fn)
153 kmem_cache_free(fib6_node_kmem, fn);
156 static void rt6_rcu_free(struct rt6_info *rt)
158 call_rcu(&rt->dst.rcu_head, dst_rcu_free);
161 static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
163 int cpu;
165 if (!non_pcpu_rt->rt6i_pcpu)
166 return;
168 for_each_possible_cpu(cpu) {
169 struct rt6_info **ppcpu_rt;
170 struct rt6_info *pcpu_rt;
172 ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu);
173 pcpu_rt = *ppcpu_rt;
174 if (pcpu_rt) {
175 rt6_rcu_free(pcpu_rt);
176 *ppcpu_rt = NULL;
180 non_pcpu_rt->rt6i_pcpu = NULL;
183 static void rt6_release(struct rt6_info *rt)
185 if (atomic_dec_and_test(&rt->rt6i_ref)) {
186 rt6_free_pcpu(rt);
187 rt6_rcu_free(rt);
191 static void fib6_link_table(struct net *net, struct fib6_table *tb)
193 unsigned int h;
196 * Initialize table lock at a single place to give lockdep a key,
197 * tables aren't visible prior to being linked to the list.
199 rwlock_init(&tb->tb6_lock);
201 h = tb->tb6_id & (FIB6_TABLE_HASHSZ - 1);
204 * No protection necessary, this is the only list mutatation
205 * operation, tables never disappear once they exist.
207 hlist_add_head_rcu(&tb->tb6_hlist, &net->ipv6.fib_table_hash[h]);
210 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
212 static struct fib6_table *fib6_alloc_table(struct net *net, u32 id)
214 struct fib6_table *table;
216 table = kzalloc(sizeof(*table), GFP_ATOMIC);
217 if (table) {
218 table->tb6_id = id;
219 table->tb6_root.leaf = net->ipv6.ip6_null_entry;
220 table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
221 inet_peer_base_init(&table->tb6_peers);
224 return table;
227 struct fib6_table *fib6_new_table(struct net *net, u32 id)
229 struct fib6_table *tb;
231 if (id == 0)
232 id = RT6_TABLE_MAIN;
233 tb = fib6_get_table(net, id);
234 if (tb)
235 return tb;
237 tb = fib6_alloc_table(net, id);
238 if (tb)
239 fib6_link_table(net, tb);
241 return tb;
244 struct fib6_table *fib6_get_table(struct net *net, u32 id)
246 struct fib6_table *tb;
247 struct hlist_head *head;
248 unsigned int h;
250 if (id == 0)
251 id = RT6_TABLE_MAIN;
252 h = id & (FIB6_TABLE_HASHSZ - 1);
253 rcu_read_lock();
254 head = &net->ipv6.fib_table_hash[h];
255 hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
256 if (tb->tb6_id == id) {
257 rcu_read_unlock();
258 return tb;
261 rcu_read_unlock();
263 return NULL;
265 EXPORT_SYMBOL_GPL(fib6_get_table);
267 static void __net_init fib6_tables_init(struct net *net)
269 fib6_link_table(net, net->ipv6.fib6_main_tbl);
270 fib6_link_table(net, net->ipv6.fib6_local_tbl);
272 #else
274 struct fib6_table *fib6_new_table(struct net *net, u32 id)
276 return fib6_get_table(net, id);
279 struct fib6_table *fib6_get_table(struct net *net, u32 id)
281 return net->ipv6.fib6_main_tbl;
284 struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
285 int flags, pol_lookup_t lookup)
287 struct rt6_info *rt;
289 rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
290 if (rt->rt6i_flags & RTF_REJECT &&
291 rt->dst.error == -EAGAIN) {
292 ip6_rt_put(rt);
293 rt = net->ipv6.ip6_null_entry;
294 dst_hold(&rt->dst);
297 return &rt->dst;
300 static void __net_init fib6_tables_init(struct net *net)
302 fib6_link_table(net, net->ipv6.fib6_main_tbl);
305 #endif
307 static int fib6_dump_node(struct fib6_walker *w)
309 int res;
310 struct rt6_info *rt;
312 for (rt = w->leaf; rt; rt = rt->dst.rt6_next) {
313 res = rt6_dump_route(rt, w->args);
314 if (res < 0) {
315 /* Frame is full, suspend walking */
316 w->leaf = rt;
317 return 1;
320 w->leaf = NULL;
321 return 0;
324 static void fib6_dump_end(struct netlink_callback *cb)
326 struct net *net = sock_net(cb->skb->sk);
327 struct fib6_walker *w = (void *)cb->args[2];
329 if (w) {
330 if (cb->args[4]) {
331 cb->args[4] = 0;
332 fib6_walker_unlink(net, w);
334 cb->args[2] = 0;
335 kfree(w);
337 cb->done = (void *)cb->args[3];
338 cb->args[1] = 3;
341 static int fib6_dump_done(struct netlink_callback *cb)
343 fib6_dump_end(cb);
344 return cb->done ? cb->done(cb) : 0;
347 static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
348 struct netlink_callback *cb)
350 struct net *net = sock_net(skb->sk);
351 struct fib6_walker *w;
352 int res;
354 w = (void *)cb->args[2];
355 w->root = &table->tb6_root;
357 if (cb->args[4] == 0) {
358 w->count = 0;
359 w->skip = 0;
361 read_lock_bh(&table->tb6_lock);
362 res = fib6_walk(net, w);
363 read_unlock_bh(&table->tb6_lock);
364 if (res > 0) {
365 cb->args[4] = 1;
366 cb->args[5] = w->root->fn_sernum;
368 } else {
369 if (cb->args[5] != w->root->fn_sernum) {
370 /* Begin at the root if the tree changed */
371 cb->args[5] = w->root->fn_sernum;
372 w->state = FWS_INIT;
373 w->node = w->root;
374 w->skip = w->count;
375 } else
376 w->skip = 0;
378 read_lock_bh(&table->tb6_lock);
379 res = fib6_walk_continue(w);
380 read_unlock_bh(&table->tb6_lock);
381 if (res <= 0) {
382 fib6_walker_unlink(net, w);
383 cb->args[4] = 0;
387 return res;
390 static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
392 struct net *net = sock_net(skb->sk);
393 unsigned int h, s_h;
394 unsigned int e = 0, s_e;
395 struct rt6_rtnl_dump_arg arg;
396 struct fib6_walker *w;
397 struct fib6_table *tb;
398 struct hlist_head *head;
399 int res = 0;
401 s_h = cb->args[0];
402 s_e = cb->args[1];
404 w = (void *)cb->args[2];
405 if (!w) {
406 /* New dump:
408 * 1. hook callback destructor.
410 cb->args[3] = (long)cb->done;
411 cb->done = fib6_dump_done;
414 * 2. allocate and initialize walker.
416 w = kzalloc(sizeof(*w), GFP_ATOMIC);
417 if (!w)
418 return -ENOMEM;
419 w->func = fib6_dump_node;
420 cb->args[2] = (long)w;
423 arg.skb = skb;
424 arg.cb = cb;
425 arg.net = net;
426 w->args = &arg;
428 rcu_read_lock();
429 for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) {
430 e = 0;
431 head = &net->ipv6.fib_table_hash[h];
432 hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
433 if (e < s_e)
434 goto next;
435 res = fib6_dump_table(tb, skb, cb);
436 if (res != 0)
437 goto out;
438 next:
439 e++;
442 out:
443 rcu_read_unlock();
444 cb->args[1] = e;
445 cb->args[0] = h;
447 res = res < 0 ? res : skb->len;
448 if (res <= 0)
449 fib6_dump_end(cb);
450 return res;
454 * Routing Table
456 * return the appropriate node for a routing tree "add" operation
457 * by either creating and inserting or by returning an existing
458 * node.
461 static struct fib6_node *fib6_add_1(struct fib6_node *root,
462 struct in6_addr *addr, int plen,
463 int offset, int allow_create,
464 int replace_required, int sernum)
466 struct fib6_node *fn, *in, *ln;
467 struct fib6_node *pn = NULL;
468 struct rt6key *key;
469 int bit;
470 __be32 dir = 0;
472 RT6_TRACE("fib6_add_1\n");
474 /* insert node in tree */
476 fn = root;
478 do {
479 key = (struct rt6key *)((u8 *)fn->leaf + offset);
482 * Prefix match
484 if (plen < fn->fn_bit ||
485 !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) {
486 if (!allow_create) {
487 if (replace_required) {
488 pr_warn("Can't replace route, no match found\n");
489 return ERR_PTR(-ENOENT);
491 pr_warn("NLM_F_CREATE should be set when creating new route\n");
493 goto insert_above;
497 * Exact match ?
500 if (plen == fn->fn_bit) {
501 /* clean up an intermediate node */
502 if (!(fn->fn_flags & RTN_RTINFO)) {
503 rt6_release(fn->leaf);
504 fn->leaf = NULL;
507 fn->fn_sernum = sernum;
509 return fn;
513 * We have more bits to go
516 /* Try to walk down on tree. */
517 fn->fn_sernum = sernum;
518 dir = addr_bit_set(addr, fn->fn_bit);
519 pn = fn;
520 fn = dir ? fn->right : fn->left;
521 } while (fn);
523 if (!allow_create) {
524 /* We should not create new node because
525 * NLM_F_REPLACE was specified without NLM_F_CREATE
526 * I assume it is safe to require NLM_F_CREATE when
527 * REPLACE flag is used! Later we may want to remove the
528 * check for replace_required, because according
529 * to netlink specification, NLM_F_CREATE
530 * MUST be specified if new route is created.
531 * That would keep IPv6 consistent with IPv4
533 if (replace_required) {
534 pr_warn("Can't replace route, no match found\n");
535 return ERR_PTR(-ENOENT);
537 pr_warn("NLM_F_CREATE should be set when creating new route\n");
540 * We walked to the bottom of tree.
541 * Create new leaf node without children.
544 ln = node_alloc();
546 if (!ln)
547 return ERR_PTR(-ENOMEM);
548 ln->fn_bit = plen;
550 ln->parent = pn;
551 ln->fn_sernum = sernum;
553 if (dir)
554 pn->right = ln;
555 else
556 pn->left = ln;
558 return ln;
561 insert_above:
563 * split since we don't have a common prefix anymore or
564 * we have a less significant route.
565 * we've to insert an intermediate node on the list
566 * this new node will point to the one we need to create
567 * and the current
570 pn = fn->parent;
572 /* find 1st bit in difference between the 2 addrs.
574 See comment in __ipv6_addr_diff: bit may be an invalid value,
575 but if it is >= plen, the value is ignored in any case.
578 bit = __ipv6_addr_diff(addr, &key->addr, sizeof(*addr));
581 * (intermediate)[in]
582 * / \
583 * (new leaf node)[ln] (old node)[fn]
585 if (plen > bit) {
586 in = node_alloc();
587 ln = node_alloc();
589 if (!in || !ln) {
590 if (in)
591 node_free(in);
592 if (ln)
593 node_free(ln);
594 return ERR_PTR(-ENOMEM);
598 * new intermediate node.
599 * RTN_RTINFO will
600 * be off since that an address that chooses one of
601 * the branches would not match less specific routes
602 * in the other branch
605 in->fn_bit = bit;
607 in->parent = pn;
608 in->leaf = fn->leaf;
609 atomic_inc(&in->leaf->rt6i_ref);
611 in->fn_sernum = sernum;
613 /* update parent pointer */
614 if (dir)
615 pn->right = in;
616 else
617 pn->left = in;
619 ln->fn_bit = plen;
621 ln->parent = in;
622 fn->parent = in;
624 ln->fn_sernum = sernum;
626 if (addr_bit_set(addr, bit)) {
627 in->right = ln;
628 in->left = fn;
629 } else {
630 in->left = ln;
631 in->right = fn;
633 } else { /* plen <= bit */
636 * (new leaf node)[ln]
637 * / \
638 * (old node)[fn] NULL
641 ln = node_alloc();
643 if (!ln)
644 return ERR_PTR(-ENOMEM);
646 ln->fn_bit = plen;
648 ln->parent = pn;
650 ln->fn_sernum = sernum;
652 if (dir)
653 pn->right = ln;
654 else
655 pn->left = ln;
657 if (addr_bit_set(&key->addr, plen))
658 ln->right = fn;
659 else
660 ln->left = fn;
662 fn->parent = ln;
664 return ln;
667 static bool rt6_qualify_for_ecmp(struct rt6_info *rt)
669 return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
670 RTF_GATEWAY;
673 static void fib6_copy_metrics(u32 *mp, const struct mx6_config *mxc)
675 int i;
677 for (i = 0; i < RTAX_MAX; i++) {
678 if (test_bit(i, mxc->mx_valid))
679 mp[i] = mxc->mx[i];
683 static int fib6_commit_metrics(struct dst_entry *dst, struct mx6_config *mxc)
685 if (!mxc->mx)
686 return 0;
688 if (dst->flags & DST_HOST) {
689 u32 *mp = dst_metrics_write_ptr(dst);
691 if (unlikely(!mp))
692 return -ENOMEM;
694 fib6_copy_metrics(mp, mxc);
695 } else {
696 dst_init_metrics(dst, mxc->mx, false);
698 /* We've stolen mx now. */
699 mxc->mx = NULL;
702 return 0;
705 static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn,
706 struct net *net)
708 if (atomic_read(&rt->rt6i_ref) != 1) {
709 /* This route is used as dummy address holder in some split
710 * nodes. It is not leaked, but it still holds other resources,
711 * which must be released in time. So, scan ascendant nodes
712 * and replace dummy references to this route with references
713 * to still alive ones.
715 while (fn) {
716 if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
717 fn->leaf = fib6_find_prefix(net, fn);
718 atomic_inc(&fn->leaf->rt6i_ref);
719 rt6_release(rt);
721 fn = fn->parent;
723 /* No more references are possible at this point. */
724 BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
729 * Insert routing information in a node.
732 static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
733 struct nl_info *info, struct mx6_config *mxc)
735 struct rt6_info *iter = NULL;
736 struct rt6_info **ins;
737 struct rt6_info **fallback_ins = NULL;
738 int replace = (info->nlh &&
739 (info->nlh->nlmsg_flags & NLM_F_REPLACE));
740 int add = (!info->nlh ||
741 (info->nlh->nlmsg_flags & NLM_F_CREATE));
742 int found = 0;
743 bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
744 int err;
746 ins = &fn->leaf;
748 for (iter = fn->leaf; iter; iter = iter->dst.rt6_next) {
750 * Search for duplicates
753 if (iter->rt6i_metric == rt->rt6i_metric) {
755 * Same priority level
757 if (info->nlh &&
758 (info->nlh->nlmsg_flags & NLM_F_EXCL))
759 return -EEXIST;
760 if (replace) {
761 if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) {
762 found++;
763 break;
765 if (rt_can_ecmp)
766 fallback_ins = fallback_ins ?: ins;
767 goto next_iter;
770 if (iter->dst.dev == rt->dst.dev &&
771 iter->rt6i_idev == rt->rt6i_idev &&
772 ipv6_addr_equal(&iter->rt6i_gateway,
773 &rt->rt6i_gateway)) {
774 if (rt->rt6i_nsiblings)
775 rt->rt6i_nsiblings = 0;
776 if (!(iter->rt6i_flags & RTF_EXPIRES))
777 return -EEXIST;
778 if (!(rt->rt6i_flags & RTF_EXPIRES))
779 rt6_clean_expires(iter);
780 else
781 rt6_set_expires(iter, rt->dst.expires);
782 iter->rt6i_pmtu = rt->rt6i_pmtu;
783 return -EEXIST;
785 /* If we have the same destination and the same metric,
786 * but not the same gateway, then the route we try to
787 * add is sibling to this route, increment our counter
788 * of siblings, and later we will add our route to the
789 * list.
790 * Only static routes (which don't have flag
791 * RTF_EXPIRES) are used for ECMPv6.
793 * To avoid long list, we only had siblings if the
794 * route have a gateway.
796 if (rt_can_ecmp &&
797 rt6_qualify_for_ecmp(iter))
798 rt->rt6i_nsiblings++;
801 if (iter->rt6i_metric > rt->rt6i_metric)
802 break;
804 next_iter:
805 ins = &iter->dst.rt6_next;
808 if (fallback_ins && !found) {
809 /* No ECMP-able route found, replace first non-ECMP one */
810 ins = fallback_ins;
811 iter = *ins;
812 found++;
815 /* Reset round-robin state, if necessary */
816 if (ins == &fn->leaf)
817 fn->rr_ptr = NULL;
819 /* Link this route to others same route. */
820 if (rt->rt6i_nsiblings) {
821 unsigned int rt6i_nsiblings;
822 struct rt6_info *sibling, *temp_sibling;
824 /* Find the first route that have the same metric */
825 sibling = fn->leaf;
826 while (sibling) {
827 if (sibling->rt6i_metric == rt->rt6i_metric &&
828 rt6_qualify_for_ecmp(sibling)) {
829 list_add_tail(&rt->rt6i_siblings,
830 &sibling->rt6i_siblings);
831 break;
833 sibling = sibling->dst.rt6_next;
835 /* For each sibling in the list, increment the counter of
836 * siblings. BUG() if counters does not match, list of siblings
837 * is broken!
839 rt6i_nsiblings = 0;
840 list_for_each_entry_safe(sibling, temp_sibling,
841 &rt->rt6i_siblings, rt6i_siblings) {
842 sibling->rt6i_nsiblings++;
843 BUG_ON(sibling->rt6i_nsiblings != rt->rt6i_nsiblings);
844 rt6i_nsiblings++;
846 BUG_ON(rt6i_nsiblings != rt->rt6i_nsiblings);
850 * insert node
852 if (!replace) {
853 if (!add)
854 pr_warn("NLM_F_CREATE should be set when creating new route\n");
856 add:
857 err = fib6_commit_metrics(&rt->dst, mxc);
858 if (err)
859 return err;
861 rt->dst.rt6_next = iter;
862 *ins = rt;
863 rt->rt6i_node = fn;
864 atomic_inc(&rt->rt6i_ref);
865 inet6_rt_notify(RTM_NEWROUTE, rt, info, 0);
866 info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
868 if (!(fn->fn_flags & RTN_RTINFO)) {
869 info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
870 fn->fn_flags |= RTN_RTINFO;
873 } else {
874 int nsiblings;
876 if (!found) {
877 if (add)
878 goto add;
879 pr_warn("NLM_F_REPLACE set, but no existing node found!\n");
880 return -ENOENT;
883 err = fib6_commit_metrics(&rt->dst, mxc);
884 if (err)
885 return err;
887 *ins = rt;
888 rt->rt6i_node = fn;
889 rt->dst.rt6_next = iter->dst.rt6_next;
890 atomic_inc(&rt->rt6i_ref);
891 inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
892 if (!(fn->fn_flags & RTN_RTINFO)) {
893 info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
894 fn->fn_flags |= RTN_RTINFO;
896 nsiblings = iter->rt6i_nsiblings;
897 fib6_purge_rt(iter, fn, info->nl_net);
898 rt6_release(iter);
900 if (nsiblings) {
901 /* Replacing an ECMP route, remove all siblings */
902 ins = &rt->dst.rt6_next;
903 iter = *ins;
904 while (iter) {
905 if (rt6_qualify_for_ecmp(iter)) {
906 *ins = iter->dst.rt6_next;
907 fib6_purge_rt(iter, fn, info->nl_net);
908 rt6_release(iter);
909 nsiblings--;
910 } else {
911 ins = &iter->dst.rt6_next;
913 iter = *ins;
915 WARN_ON(nsiblings != 0);
919 return 0;
922 static void fib6_start_gc(struct net *net, struct rt6_info *rt)
924 if (!timer_pending(&net->ipv6.ip6_fib_timer) &&
925 (rt->rt6i_flags & (RTF_EXPIRES | RTF_CACHE)))
926 mod_timer(&net->ipv6.ip6_fib_timer,
927 jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
930 void fib6_force_start_gc(struct net *net)
932 if (!timer_pending(&net->ipv6.ip6_fib_timer))
933 mod_timer(&net->ipv6.ip6_fib_timer,
934 jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
938 * Add routing information to the routing tree.
939 * <destination addr>/<source addr>
940 * with source addr info in sub-trees
943 int fib6_add(struct fib6_node *root, struct rt6_info *rt,
944 struct nl_info *info, struct mx6_config *mxc)
946 struct fib6_node *fn, *pn = NULL;
947 int err = -ENOMEM;
948 int allow_create = 1;
949 int replace_required = 0;
950 int sernum = fib6_new_sernum(info->nl_net);
952 if (WARN_ON_ONCE((rt->dst.flags & DST_NOCACHE) &&
953 !atomic_read(&rt->dst.__refcnt)))
954 return -EINVAL;
956 if (info->nlh) {
957 if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
958 allow_create = 0;
959 if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
960 replace_required = 1;
962 if (!allow_create && !replace_required)
963 pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
965 fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
966 offsetof(struct rt6_info, rt6i_dst), allow_create,
967 replace_required, sernum);
968 if (IS_ERR(fn)) {
969 err = PTR_ERR(fn);
970 fn = NULL;
971 goto out;
974 pn = fn;
976 #ifdef CONFIG_IPV6_SUBTREES
977 if (rt->rt6i_src.plen) {
978 struct fib6_node *sn;
980 if (!fn->subtree) {
981 struct fib6_node *sfn;
984 * Create subtree.
986 * fn[main tree]
988 * sfn[subtree root]
990 * sn[new leaf node]
993 /* Create subtree root node */
994 sfn = node_alloc();
995 if (!sfn)
996 goto st_failure;
998 sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
999 atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref);
1000 sfn->fn_flags = RTN_ROOT;
1001 sfn->fn_sernum = sernum;
1003 /* Now add the first leaf node to new subtree */
1005 sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
1006 rt->rt6i_src.plen,
1007 offsetof(struct rt6_info, rt6i_src),
1008 allow_create, replace_required, sernum);
1010 if (IS_ERR(sn)) {
1011 /* If it is failed, discard just allocated
1012 root, and then (in st_failure) stale node
1013 in main tree.
1015 node_free(sfn);
1016 err = PTR_ERR(sn);
1017 goto st_failure;
1020 /* Now link new subtree to main tree */
1021 sfn->parent = fn;
1022 fn->subtree = sfn;
1023 } else {
1024 sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
1025 rt->rt6i_src.plen,
1026 offsetof(struct rt6_info, rt6i_src),
1027 allow_create, replace_required, sernum);
1029 if (IS_ERR(sn)) {
1030 err = PTR_ERR(sn);
1031 goto st_failure;
1035 if (!fn->leaf) {
1036 fn->leaf = rt;
1037 atomic_inc(&rt->rt6i_ref);
1039 fn = sn;
1041 #endif
1043 err = fib6_add_rt2node(fn, rt, info, mxc);
1044 if (!err) {
1045 fib6_start_gc(info->nl_net, rt);
1046 if (!(rt->rt6i_flags & RTF_CACHE))
1047 fib6_prune_clones(info->nl_net, pn);
1048 rt->dst.flags &= ~DST_NOCACHE;
1051 out:
1052 if (err) {
1053 #ifdef CONFIG_IPV6_SUBTREES
1055 * If fib6_add_1 has cleared the old leaf pointer in the
1056 * super-tree leaf node we have to find a new one for it.
1058 if (pn != fn && pn->leaf == rt) {
1059 pn->leaf = NULL;
1060 atomic_dec(&rt->rt6i_ref);
1062 if (pn != fn && !pn->leaf && !(pn->fn_flags & RTN_RTINFO)) {
1063 pn->leaf = fib6_find_prefix(info->nl_net, pn);
1064 #if RT6_DEBUG >= 2
1065 if (!pn->leaf) {
1066 WARN_ON(pn->leaf == NULL);
1067 pn->leaf = info->nl_net->ipv6.ip6_null_entry;
1069 #endif
1070 atomic_inc(&pn->leaf->rt6i_ref);
1072 #endif
1073 if (!(rt->dst.flags & DST_NOCACHE))
1074 dst_free(&rt->dst);
1076 return err;
1078 #ifdef CONFIG_IPV6_SUBTREES
1079 /* Subtree creation failed, probably main tree node
1080 is orphan. If it is, shoot it.
1082 st_failure:
1083 if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
1084 fib6_repair_tree(info->nl_net, fn);
1085 if (!(rt->dst.flags & DST_NOCACHE))
1086 dst_free(&rt->dst);
1087 return err;
1088 #endif
1092 * Routing tree lookup
1096 struct lookup_args {
1097 int offset; /* key offset on rt6_info */
1098 const struct in6_addr *addr; /* search key */
1101 static struct fib6_node *fib6_lookup_1(struct fib6_node *root,
1102 struct lookup_args *args)
1104 struct fib6_node *fn;
1105 __be32 dir;
1107 if (unlikely(args->offset == 0))
1108 return NULL;
1111 * Descend on a tree
1114 fn = root;
1116 for (;;) {
1117 struct fib6_node *next;
1119 dir = addr_bit_set(args->addr, fn->fn_bit);
1121 next = dir ? fn->right : fn->left;
1123 if (next) {
1124 fn = next;
1125 continue;
1127 break;
1130 while (fn) {
1131 if (FIB6_SUBTREE(fn) || fn->fn_flags & RTN_RTINFO) {
1132 struct rt6key *key;
1134 key = (struct rt6key *) ((u8 *) fn->leaf +
1135 args->offset);
1137 if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
1138 #ifdef CONFIG_IPV6_SUBTREES
1139 if (fn->subtree) {
1140 struct fib6_node *sfn;
1141 sfn = fib6_lookup_1(fn->subtree,
1142 args + 1);
1143 if (!sfn)
1144 goto backtrack;
1145 fn = sfn;
1147 #endif
1148 if (fn->fn_flags & RTN_RTINFO)
1149 return fn;
1152 #ifdef CONFIG_IPV6_SUBTREES
1153 backtrack:
1154 #endif
1155 if (fn->fn_flags & RTN_ROOT)
1156 break;
1158 fn = fn->parent;
1161 return NULL;
1164 struct fib6_node *fib6_lookup(struct fib6_node *root, const struct in6_addr *daddr,
1165 const struct in6_addr *saddr)
1167 struct fib6_node *fn;
1168 struct lookup_args args[] = {
1170 .offset = offsetof(struct rt6_info, rt6i_dst),
1171 .addr = daddr,
1173 #ifdef CONFIG_IPV6_SUBTREES
1175 .offset = offsetof(struct rt6_info, rt6i_src),
1176 .addr = saddr,
1178 #endif
1180 .offset = 0, /* sentinel */
1184 fn = fib6_lookup_1(root, daddr ? args : args + 1);
1185 if (!fn || fn->fn_flags & RTN_TL_ROOT)
1186 fn = root;
1188 return fn;
1192 * Get node with specified destination prefix (and source prefix,
1193 * if subtrees are used)
1197 static struct fib6_node *fib6_locate_1(struct fib6_node *root,
1198 const struct in6_addr *addr,
1199 int plen, int offset)
1201 struct fib6_node *fn;
1203 for (fn = root; fn ; ) {
1204 struct rt6key *key = (struct rt6key *)((u8 *)fn->leaf + offset);
1207 * Prefix match
1209 if (plen < fn->fn_bit ||
1210 !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit))
1211 return NULL;
1213 if (plen == fn->fn_bit)
1214 return fn;
1217 * We have more bits to go
1219 if (addr_bit_set(addr, fn->fn_bit))
1220 fn = fn->right;
1221 else
1222 fn = fn->left;
1224 return NULL;
1227 struct fib6_node *fib6_locate(struct fib6_node *root,
1228 const struct in6_addr *daddr, int dst_len,
1229 const struct in6_addr *saddr, int src_len)
1231 struct fib6_node *fn;
1233 fn = fib6_locate_1(root, daddr, dst_len,
1234 offsetof(struct rt6_info, rt6i_dst));
1236 #ifdef CONFIG_IPV6_SUBTREES
1237 if (src_len) {
1238 WARN_ON(saddr == NULL);
1239 if (fn && fn->subtree)
1240 fn = fib6_locate_1(fn->subtree, saddr, src_len,
1241 offsetof(struct rt6_info, rt6i_src));
1243 #endif
1245 if (fn && fn->fn_flags & RTN_RTINFO)
1246 return fn;
1248 return NULL;
1253 * Deletion
1257 static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn)
1259 if (fn->fn_flags & RTN_ROOT)
1260 return net->ipv6.ip6_null_entry;
1262 while (fn) {
1263 if (fn->left)
1264 return fn->left->leaf;
1265 if (fn->right)
1266 return fn->right->leaf;
1268 fn = FIB6_SUBTREE(fn);
1270 return NULL;
1274 * Called to trim the tree of intermediate nodes when possible. "fn"
1275 * is the node we want to try and remove.
1278 static struct fib6_node *fib6_repair_tree(struct net *net,
1279 struct fib6_node *fn)
1281 int children;
1282 int nstate;
1283 struct fib6_node *child, *pn;
1284 struct fib6_walker *w;
1285 int iter = 0;
1287 for (;;) {
1288 RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter);
1289 iter++;
1291 WARN_ON(fn->fn_flags & RTN_RTINFO);
1292 WARN_ON(fn->fn_flags & RTN_TL_ROOT);
1293 WARN_ON(fn->leaf);
1295 children = 0;
1296 child = NULL;
1297 if (fn->right)
1298 child = fn->right, children |= 1;
1299 if (fn->left)
1300 child = fn->left, children |= 2;
1302 if (children == 3 || FIB6_SUBTREE(fn)
1303 #ifdef CONFIG_IPV6_SUBTREES
1304 /* Subtree root (i.e. fn) may have one child */
1305 || (children && fn->fn_flags & RTN_ROOT)
1306 #endif
1308 fn->leaf = fib6_find_prefix(net, fn);
1309 #if RT6_DEBUG >= 2
1310 if (!fn->leaf) {
1311 WARN_ON(!fn->leaf);
1312 fn->leaf = net->ipv6.ip6_null_entry;
1314 #endif
1315 atomic_inc(&fn->leaf->rt6i_ref);
1316 return fn->parent;
1319 pn = fn->parent;
1320 #ifdef CONFIG_IPV6_SUBTREES
1321 if (FIB6_SUBTREE(pn) == fn) {
1322 WARN_ON(!(fn->fn_flags & RTN_ROOT));
1323 FIB6_SUBTREE(pn) = NULL;
1324 nstate = FWS_L;
1325 } else {
1326 WARN_ON(fn->fn_flags & RTN_ROOT);
1327 #endif
1328 if (pn->right == fn)
1329 pn->right = child;
1330 else if (pn->left == fn)
1331 pn->left = child;
1332 #if RT6_DEBUG >= 2
1333 else
1334 WARN_ON(1);
1335 #endif
1336 if (child)
1337 child->parent = pn;
1338 nstate = FWS_R;
1339 #ifdef CONFIG_IPV6_SUBTREES
1341 #endif
1343 read_lock(&net->ipv6.fib6_walker_lock);
1344 FOR_WALKERS(net, w) {
1345 if (!child) {
1346 if (w->root == fn) {
1347 w->root = w->node = NULL;
1348 RT6_TRACE("W %p adjusted by delroot 1\n", w);
1349 } else if (w->node == fn) {
1350 RT6_TRACE("W %p adjusted by delnode 1, s=%d/%d\n", w, w->state, nstate);
1351 w->node = pn;
1352 w->state = nstate;
1354 } else {
1355 if (w->root == fn) {
1356 w->root = child;
1357 RT6_TRACE("W %p adjusted by delroot 2\n", w);
1359 if (w->node == fn) {
1360 w->node = child;
1361 if (children&2) {
1362 RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state);
1363 w->state = w->state >= FWS_R ? FWS_U : FWS_INIT;
1364 } else {
1365 RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state);
1366 w->state = w->state >= FWS_C ? FWS_U : FWS_INIT;
1371 read_unlock(&net->ipv6.fib6_walker_lock);
1373 node_free(fn);
1374 if (pn->fn_flags & RTN_RTINFO || FIB6_SUBTREE(pn))
1375 return pn;
1377 rt6_release(pn->leaf);
1378 pn->leaf = NULL;
1379 fn = pn;
1383 static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1384 struct nl_info *info)
1386 struct fib6_walker *w;
1387 struct rt6_info *rt = *rtp;
1388 struct net *net = info->nl_net;
1390 RT6_TRACE("fib6_del_route\n");
1392 /* Unlink it */
1393 *rtp = rt->dst.rt6_next;
1394 rt->rt6i_node = NULL;
1395 net->ipv6.rt6_stats->fib_rt_entries--;
1396 net->ipv6.rt6_stats->fib_discarded_routes++;
1398 /* Reset round-robin state, if necessary */
1399 if (fn->rr_ptr == rt)
1400 fn->rr_ptr = NULL;
1402 /* Remove this entry from other siblings */
1403 if (rt->rt6i_nsiblings) {
1404 struct rt6_info *sibling, *next_sibling;
1406 list_for_each_entry_safe(sibling, next_sibling,
1407 &rt->rt6i_siblings, rt6i_siblings)
1408 sibling->rt6i_nsiblings--;
1409 rt->rt6i_nsiblings = 0;
1410 list_del_init(&rt->rt6i_siblings);
1413 /* Adjust walkers */
1414 read_lock(&net->ipv6.fib6_walker_lock);
1415 FOR_WALKERS(net, w) {
1416 if (w->state == FWS_C && w->leaf == rt) {
1417 RT6_TRACE("walker %p adjusted by delroute\n", w);
1418 w->leaf = rt->dst.rt6_next;
1419 if (!w->leaf)
1420 w->state = FWS_U;
1423 read_unlock(&net->ipv6.fib6_walker_lock);
1425 rt->dst.rt6_next = NULL;
1427 /* If it was last route, expunge its radix tree node */
1428 if (!fn->leaf) {
1429 fn->fn_flags &= ~RTN_RTINFO;
1430 net->ipv6.rt6_stats->fib_route_nodes--;
1431 fn = fib6_repair_tree(net, fn);
1434 fib6_purge_rt(rt, fn, net);
1436 inet6_rt_notify(RTM_DELROUTE, rt, info, 0);
1437 rt6_release(rt);
1440 int fib6_del(struct rt6_info *rt, struct nl_info *info)
1442 struct net *net = info->nl_net;
1443 struct fib6_node *fn = rt->rt6i_node;
1444 struct rt6_info **rtp;
1446 #if RT6_DEBUG >= 2
1447 if (rt->dst.obsolete > 0) {
1448 WARN_ON(fn);
1449 return -ENOENT;
1451 #endif
1452 if (!fn || rt == net->ipv6.ip6_null_entry)
1453 return -ENOENT;
1455 WARN_ON(!(fn->fn_flags & RTN_RTINFO));
1457 if (!(rt->rt6i_flags & RTF_CACHE)) {
1458 struct fib6_node *pn = fn;
1459 #ifdef CONFIG_IPV6_SUBTREES
1460 /* clones of this route might be in another subtree */
1461 if (rt->rt6i_src.plen) {
1462 while (!(pn->fn_flags & RTN_ROOT))
1463 pn = pn->parent;
1464 pn = pn->parent;
1466 #endif
1467 fib6_prune_clones(info->nl_net, pn);
1471 * Walk the leaf entries looking for ourself
1474 for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->dst.rt6_next) {
1475 if (*rtp == rt) {
1476 fib6_del_route(fn, rtp, info);
1477 return 0;
1480 return -ENOENT;
1484 * Tree traversal function.
1486 * Certainly, it is not interrupt safe.
1487 * However, it is internally reenterable wrt itself and fib6_add/fib6_del.
1488 * It means, that we can modify tree during walking
1489 * and use this function for garbage collection, clone pruning,
1490 * cleaning tree when a device goes down etc. etc.
1492 * It guarantees that every node will be traversed,
1493 * and that it will be traversed only once.
1495 * Callback function w->func may return:
1496 * 0 -> continue walking.
1497 * positive value -> walking is suspended (used by tree dumps,
1498 * and probably by gc, if it will be split to several slices)
1499 * negative value -> terminate walking.
1501 * The function itself returns:
1502 * 0 -> walk is complete.
1503 * >0 -> walk is incomplete (i.e. suspended)
1504 * <0 -> walk is terminated by an error.
1507 static int fib6_walk_continue(struct fib6_walker *w)
1509 struct fib6_node *fn, *pn;
1511 for (;;) {
1512 fn = w->node;
1513 if (!fn)
1514 return 0;
1516 if (w->prune && fn != w->root &&
1517 fn->fn_flags & RTN_RTINFO && w->state < FWS_C) {
1518 w->state = FWS_C;
1519 w->leaf = fn->leaf;
1521 switch (w->state) {
1522 #ifdef CONFIG_IPV6_SUBTREES
1523 case FWS_S:
1524 if (FIB6_SUBTREE(fn)) {
1525 w->node = FIB6_SUBTREE(fn);
1526 continue;
1528 w->state = FWS_L;
1529 #endif
1530 case FWS_L:
1531 if (fn->left) {
1532 w->node = fn->left;
1533 w->state = FWS_INIT;
1534 continue;
1536 w->state = FWS_R;
1537 case FWS_R:
1538 if (fn->right) {
1539 w->node = fn->right;
1540 w->state = FWS_INIT;
1541 continue;
1543 w->state = FWS_C;
1544 w->leaf = fn->leaf;
1545 case FWS_C:
1546 if (w->leaf && fn->fn_flags & RTN_RTINFO) {
1547 int err;
1549 if (w->skip) {
1550 w->skip--;
1551 goto skip;
1554 err = w->func(w);
1555 if (err)
1556 return err;
1558 w->count++;
1559 continue;
1561 skip:
1562 w->state = FWS_U;
1563 case FWS_U:
1564 if (fn == w->root)
1565 return 0;
1566 pn = fn->parent;
1567 w->node = pn;
1568 #ifdef CONFIG_IPV6_SUBTREES
1569 if (FIB6_SUBTREE(pn) == fn) {
1570 WARN_ON(!(fn->fn_flags & RTN_ROOT));
1571 w->state = FWS_L;
1572 continue;
1574 #endif
1575 if (pn->left == fn) {
1576 w->state = FWS_R;
1577 continue;
1579 if (pn->right == fn) {
1580 w->state = FWS_C;
1581 w->leaf = w->node->leaf;
1582 continue;
1584 #if RT6_DEBUG >= 2
1585 WARN_ON(1);
1586 #endif
1591 static int fib6_walk(struct net *net, struct fib6_walker *w)
1593 int res;
1595 w->state = FWS_INIT;
1596 w->node = w->root;
1598 fib6_walker_link(net, w);
1599 res = fib6_walk_continue(w);
1600 if (res <= 0)
1601 fib6_walker_unlink(net, w);
1602 return res;
1605 static int fib6_clean_node(struct fib6_walker *w)
1607 int res;
1608 struct rt6_info *rt;
1609 struct fib6_cleaner *c = container_of(w, struct fib6_cleaner, w);
1610 struct nl_info info = {
1611 .nl_net = c->net,
1614 if (c->sernum != FIB6_NO_SERNUM_CHANGE &&
1615 w->node->fn_sernum != c->sernum)
1616 w->node->fn_sernum = c->sernum;
1618 if (!c->func) {
1619 WARN_ON_ONCE(c->sernum == FIB6_NO_SERNUM_CHANGE);
1620 w->leaf = NULL;
1621 return 0;
1624 for (rt = w->leaf; rt; rt = rt->dst.rt6_next) {
1625 res = c->func(rt, c->arg);
1626 if (res < 0) {
1627 w->leaf = rt;
1628 res = fib6_del(rt, &info);
1629 if (res) {
1630 #if RT6_DEBUG >= 2
1631 pr_debug("%s: del failed: rt=%p@%p err=%d\n",
1632 __func__, rt, rt->rt6i_node, res);
1633 #endif
1634 continue;
1636 return 0;
1638 WARN_ON(res != 0);
1640 w->leaf = rt;
1641 return 0;
1645 * Convenient frontend to tree walker.
1647 * func is called on each route.
1648 * It may return -1 -> delete this route.
1649 * 0 -> continue walking
1651 * prune==1 -> only immediate children of node (certainly,
1652 * ignoring pure split nodes) will be scanned.
1655 static void fib6_clean_tree(struct net *net, struct fib6_node *root,
1656 int (*func)(struct rt6_info *, void *arg),
1657 bool prune, int sernum, void *arg)
1659 struct fib6_cleaner c;
1661 c.w.root = root;
1662 c.w.func = fib6_clean_node;
1663 c.w.prune = prune;
1664 c.w.count = 0;
1665 c.w.skip = 0;
1666 c.func = func;
1667 c.sernum = sernum;
1668 c.arg = arg;
1669 c.net = net;
1671 fib6_walk(net, &c.w);
1674 static void __fib6_clean_all(struct net *net,
1675 int (*func)(struct rt6_info *, void *),
1676 int sernum, void *arg)
1678 struct fib6_table *table;
1679 struct hlist_head *head;
1680 unsigned int h;
1682 rcu_read_lock();
1683 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
1684 head = &net->ipv6.fib_table_hash[h];
1685 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
1686 write_lock_bh(&table->tb6_lock);
1687 fib6_clean_tree(net, &table->tb6_root,
1688 func, false, sernum, arg);
1689 write_unlock_bh(&table->tb6_lock);
1692 rcu_read_unlock();
1695 void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *),
1696 void *arg)
1698 __fib6_clean_all(net, func, FIB6_NO_SERNUM_CHANGE, arg);
1701 static int fib6_prune_clone(struct rt6_info *rt, void *arg)
1703 if (rt->rt6i_flags & RTF_CACHE) {
1704 RT6_TRACE("pruning clone %p\n", rt);
1705 return -1;
1708 return 0;
1711 static void fib6_prune_clones(struct net *net, struct fib6_node *fn)
1713 fib6_clean_tree(net, fn, fib6_prune_clone, true,
1714 FIB6_NO_SERNUM_CHANGE, NULL);
1717 static void fib6_flush_trees(struct net *net)
1719 int new_sernum = fib6_new_sernum(net);
1721 __fib6_clean_all(net, NULL, new_sernum, NULL);
1725 * Garbage collection
1728 struct fib6_gc_args
1730 int timeout;
1731 int more;
1734 static int fib6_age(struct rt6_info *rt, void *arg)
1736 struct fib6_gc_args *gc_args = arg;
1737 unsigned long now = jiffies;
1740 * check addrconf expiration here.
1741 * Routes are expired even if they are in use.
1743 * Also age clones. Note, that clones are aged out
1744 * only if they are not in use now.
1747 if (rt->rt6i_flags & RTF_EXPIRES && rt->dst.expires) {
1748 if (time_after(now, rt->dst.expires)) {
1749 RT6_TRACE("expiring %p\n", rt);
1750 return -1;
1752 gc_args->more++;
1753 } else if (rt->rt6i_flags & RTF_CACHE) {
1754 if (atomic_read(&rt->dst.__refcnt) == 0 &&
1755 time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
1756 RT6_TRACE("aging clone %p\n", rt);
1757 return -1;
1758 } else if (rt->rt6i_flags & RTF_GATEWAY) {
1759 struct neighbour *neigh;
1760 __u8 neigh_flags = 0;
1762 neigh = dst_neigh_lookup(&rt->dst, &rt->rt6i_gateway);
1763 if (neigh) {
1764 neigh_flags = neigh->flags;
1765 neigh_release(neigh);
1767 if (!(neigh_flags & NTF_ROUTER)) {
1768 RT6_TRACE("purging route %p via non-router but gateway\n",
1769 rt);
1770 return -1;
1773 gc_args->more++;
1776 return 0;
1779 void fib6_run_gc(unsigned long expires, struct net *net, bool force)
1781 struct fib6_gc_args gc_args;
1782 unsigned long now;
1784 if (force) {
1785 spin_lock_bh(&net->ipv6.fib6_gc_lock);
1786 } else if (!spin_trylock_bh(&net->ipv6.fib6_gc_lock)) {
1787 mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
1788 return;
1790 gc_args.timeout = expires ? (int)expires :
1791 net->ipv6.sysctl.ip6_rt_gc_interval;
1793 gc_args.more = icmp6_dst_gc();
1795 fib6_clean_all(net, fib6_age, &gc_args);
1796 now = jiffies;
1797 net->ipv6.ip6_rt_last_gc = now;
1799 if (gc_args.more)
1800 mod_timer(&net->ipv6.ip6_fib_timer,
1801 round_jiffies(now
1802 + net->ipv6.sysctl.ip6_rt_gc_interval));
1803 else
1804 del_timer(&net->ipv6.ip6_fib_timer);
1805 spin_unlock_bh(&net->ipv6.fib6_gc_lock);
1808 static void fib6_gc_timer_cb(unsigned long arg)
1810 fib6_run_gc(0, (struct net *)arg, true);
1813 static int __net_init fib6_net_init(struct net *net)
1815 size_t size = sizeof(struct hlist_head) * FIB6_TABLE_HASHSZ;
1817 spin_lock_init(&net->ipv6.fib6_gc_lock);
1818 rwlock_init(&net->ipv6.fib6_walker_lock);
1819 INIT_LIST_HEAD(&net->ipv6.fib6_walkers);
1820 setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net);
1822 net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL);
1823 if (!net->ipv6.rt6_stats)
1824 goto out_timer;
1826 /* Avoid false sharing : Use at least a full cache line */
1827 size = max_t(size_t, size, L1_CACHE_BYTES);
1829 net->ipv6.fib_table_hash = kzalloc(size, GFP_KERNEL);
1830 if (!net->ipv6.fib_table_hash)
1831 goto out_rt6_stats;
1833 net->ipv6.fib6_main_tbl = kzalloc(sizeof(*net->ipv6.fib6_main_tbl),
1834 GFP_KERNEL);
1835 if (!net->ipv6.fib6_main_tbl)
1836 goto out_fib_table_hash;
1838 net->ipv6.fib6_main_tbl->tb6_id = RT6_TABLE_MAIN;
1839 net->ipv6.fib6_main_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry;
1840 net->ipv6.fib6_main_tbl->tb6_root.fn_flags =
1841 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
1842 inet_peer_base_init(&net->ipv6.fib6_main_tbl->tb6_peers);
1844 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1845 net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl),
1846 GFP_KERNEL);
1847 if (!net->ipv6.fib6_local_tbl)
1848 goto out_fib6_main_tbl;
1849 net->ipv6.fib6_local_tbl->tb6_id = RT6_TABLE_LOCAL;
1850 net->ipv6.fib6_local_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry;
1851 net->ipv6.fib6_local_tbl->tb6_root.fn_flags =
1852 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
1853 inet_peer_base_init(&net->ipv6.fib6_local_tbl->tb6_peers);
1854 #endif
1855 fib6_tables_init(net);
1857 return 0;
1859 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1860 out_fib6_main_tbl:
1861 kfree(net->ipv6.fib6_main_tbl);
1862 #endif
1863 out_fib_table_hash:
1864 kfree(net->ipv6.fib_table_hash);
1865 out_rt6_stats:
1866 kfree(net->ipv6.rt6_stats);
1867 out_timer:
1868 return -ENOMEM;
1871 static void fib6_net_exit(struct net *net)
1873 rt6_ifdown(net, NULL);
1874 del_timer_sync(&net->ipv6.ip6_fib_timer);
1876 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1877 inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers);
1878 kfree(net->ipv6.fib6_local_tbl);
1879 #endif
1880 inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers);
1881 kfree(net->ipv6.fib6_main_tbl);
1882 kfree(net->ipv6.fib_table_hash);
1883 kfree(net->ipv6.rt6_stats);
1886 static struct pernet_operations fib6_net_ops = {
1887 .init = fib6_net_init,
1888 .exit = fib6_net_exit,
1891 int __init fib6_init(void)
1893 int ret = -ENOMEM;
1895 fib6_node_kmem = kmem_cache_create("fib6_nodes",
1896 sizeof(struct fib6_node),
1897 0, SLAB_HWCACHE_ALIGN,
1898 NULL);
1899 if (!fib6_node_kmem)
1900 goto out;
1902 ret = register_pernet_subsys(&fib6_net_ops);
1903 if (ret)
1904 goto out_kmem_cache_create;
1906 ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib,
1907 NULL);
1908 if (ret)
1909 goto out_unregister_subsys;
1911 __fib6_flush_trees = fib6_flush_trees;
1912 out:
1913 return ret;
1915 out_unregister_subsys:
1916 unregister_pernet_subsys(&fib6_net_ops);
1917 out_kmem_cache_create:
1918 kmem_cache_destroy(fib6_node_kmem);
1919 goto out;
1922 void fib6_gc_cleanup(void)
1924 unregister_pernet_subsys(&fib6_net_ops);
1925 kmem_cache_destroy(fib6_node_kmem);
1928 #ifdef CONFIG_PROC_FS
1930 struct ipv6_route_iter {
1931 struct seq_net_private p;
1932 struct fib6_walker w;
1933 loff_t skip;
1934 struct fib6_table *tbl;
1935 int sernum;
1938 static int ipv6_route_seq_show(struct seq_file *seq, void *v)
1940 struct rt6_info *rt = v;
1941 struct ipv6_route_iter *iter = seq->private;
1943 seq_printf(seq, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
1945 #ifdef CONFIG_IPV6_SUBTREES
1946 seq_printf(seq, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
1947 #else
1948 seq_puts(seq, "00000000000000000000000000000000 00 ");
1949 #endif
1950 if (rt->rt6i_flags & RTF_GATEWAY)
1951 seq_printf(seq, "%pi6", &rt->rt6i_gateway);
1952 else
1953 seq_puts(seq, "00000000000000000000000000000000");
1955 seq_printf(seq, " %08x %08x %08x %08x %8s\n",
1956 rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
1957 rt->dst.__use, rt->rt6i_flags,
1958 rt->dst.dev ? rt->dst.dev->name : "");
1959 iter->w.leaf = NULL;
1960 return 0;
1963 static int ipv6_route_yield(struct fib6_walker *w)
1965 struct ipv6_route_iter *iter = w->args;
1967 if (!iter->skip)
1968 return 1;
1970 do {
1971 iter->w.leaf = iter->w.leaf->dst.rt6_next;
1972 iter->skip--;
1973 if (!iter->skip && iter->w.leaf)
1974 return 1;
1975 } while (iter->w.leaf);
1977 return 0;
1980 static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter,
1981 struct net *net)
1983 memset(&iter->w, 0, sizeof(iter->w));
1984 iter->w.func = ipv6_route_yield;
1985 iter->w.root = &iter->tbl->tb6_root;
1986 iter->w.state = FWS_INIT;
1987 iter->w.node = iter->w.root;
1988 iter->w.args = iter;
1989 iter->sernum = iter->w.root->fn_sernum;
1990 INIT_LIST_HEAD(&iter->w.lh);
1991 fib6_walker_link(net, &iter->w);
1994 static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl,
1995 struct net *net)
1997 unsigned int h;
1998 struct hlist_node *node;
2000 if (tbl) {
2001 h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1;
2002 node = rcu_dereference_bh(hlist_next_rcu(&tbl->tb6_hlist));
2003 } else {
2004 h = 0;
2005 node = NULL;
2008 while (!node && h < FIB6_TABLE_HASHSZ) {
2009 node = rcu_dereference_bh(
2010 hlist_first_rcu(&net->ipv6.fib_table_hash[h++]));
2012 return hlist_entry_safe(node, struct fib6_table, tb6_hlist);
2015 static void ipv6_route_check_sernum(struct ipv6_route_iter *iter)
2017 if (iter->sernum != iter->w.root->fn_sernum) {
2018 iter->sernum = iter->w.root->fn_sernum;
2019 iter->w.state = FWS_INIT;
2020 iter->w.node = iter->w.root;
2021 WARN_ON(iter->w.skip);
2022 iter->w.skip = iter->w.count;
2026 static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2028 int r;
2029 struct rt6_info *n;
2030 struct net *net = seq_file_net(seq);
2031 struct ipv6_route_iter *iter = seq->private;
2033 if (!v)
2034 goto iter_table;
2036 n = ((struct rt6_info *)v)->dst.rt6_next;
2037 if (n) {
2038 ++*pos;
2039 return n;
2042 iter_table:
2043 ipv6_route_check_sernum(iter);
2044 read_lock(&iter->tbl->tb6_lock);
2045 r = fib6_walk_continue(&iter->w);
2046 read_unlock(&iter->tbl->tb6_lock);
2047 if (r > 0) {
2048 if (v)
2049 ++*pos;
2050 return iter->w.leaf;
2051 } else if (r < 0) {
2052 fib6_walker_unlink(net, &iter->w);
2053 return NULL;
2055 fib6_walker_unlink(net, &iter->w);
2057 iter->tbl = ipv6_route_seq_next_table(iter->tbl, net);
2058 if (!iter->tbl)
2059 return NULL;
2061 ipv6_route_seq_setup_walk(iter, net);
2062 goto iter_table;
2065 static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
2066 __acquires(RCU_BH)
2068 struct net *net = seq_file_net(seq);
2069 struct ipv6_route_iter *iter = seq->private;
2071 rcu_read_lock_bh();
2072 iter->tbl = ipv6_route_seq_next_table(NULL, net);
2073 iter->skip = *pos;
2075 if (iter->tbl) {
2076 ipv6_route_seq_setup_walk(iter, net);
2077 return ipv6_route_seq_next(seq, NULL, pos);
2078 } else {
2079 return NULL;
2083 static bool ipv6_route_iter_active(struct ipv6_route_iter *iter)
2085 struct fib6_walker *w = &iter->w;
2086 return w->node && !(w->state == FWS_U && w->node == w->root);
2089 static void ipv6_route_seq_stop(struct seq_file *seq, void *v)
2090 __releases(RCU_BH)
2092 struct net *net = seq_file_net(seq);
2093 struct ipv6_route_iter *iter = seq->private;
2095 if (ipv6_route_iter_active(iter))
2096 fib6_walker_unlink(net, &iter->w);
2098 rcu_read_unlock_bh();
2101 static const struct seq_operations ipv6_route_seq_ops = {
2102 .start = ipv6_route_seq_start,
2103 .next = ipv6_route_seq_next,
2104 .stop = ipv6_route_seq_stop,
2105 .show = ipv6_route_seq_show
2108 int ipv6_route_open(struct inode *inode, struct file *file)
2110 return seq_open_net(inode, file, &ipv6_route_seq_ops,
2111 sizeof(struct ipv6_route_iter));
2114 #endif /* CONFIG_PROC_FS */