net: ipv6: RTF_PCPU should not be settable from userspace
[linux/fpc-iii.git] / net / ipv6 / route.c
blob6c91d5c4a92cec77588758d209c4b4a39625578a
1 /*
2 * Linux INET6 implementation
3 * FIB front-end.
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 /* Changes:
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
23 * Ville Nuorvala
24 * Fixed routing subtrees.
27 #define pr_fmt(fmt) "IPv6: " fmt
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <net/net_namespace.h>
48 #include <net/snmp.h>
49 #include <net/ipv6.h>
50 #include <net/ip6_fib.h>
51 #include <net/ip6_route.h>
52 #include <net/ndisc.h>
53 #include <net/addrconf.h>
54 #include <net/tcp.h>
55 #include <linux/rtnetlink.h>
56 #include <net/dst.h>
57 #include <net/dst_metadata.h>
58 #include <net/xfrm.h>
59 #include <net/netevent.h>
60 #include <net/netlink.h>
61 #include <net/nexthop.h>
62 #include <net/lwtunnel.h>
63 #include <net/ip_tunnels.h>
64 #include <net/l3mdev.h>
66 #include <asm/uaccess.h>
68 #ifdef CONFIG_SYSCTL
69 #include <linux/sysctl.h>
70 #endif
72 enum rt6_nud_state {
73 RT6_NUD_FAIL_HARD = -3,
74 RT6_NUD_FAIL_PROBE = -2,
75 RT6_NUD_FAIL_DO_RR = -1,
76 RT6_NUD_SUCCEED = 1
79 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
80 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
81 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
82 static unsigned int ip6_mtu(const struct dst_entry *dst);
83 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
84 static void ip6_dst_destroy(struct dst_entry *);
85 static void ip6_dst_ifdown(struct dst_entry *,
86 struct net_device *dev, int how);
87 static int ip6_dst_gc(struct dst_ops *ops);
89 static int ip6_pkt_discard(struct sk_buff *skb);
90 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
91 static int ip6_pkt_prohibit(struct sk_buff *skb);
92 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
93 static void ip6_link_failure(struct sk_buff *skb);
94 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
95 struct sk_buff *skb, u32 mtu);
96 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
97 struct sk_buff *skb);
98 static void rt6_dst_from_metrics_check(struct rt6_info *rt);
99 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
101 #ifdef CONFIG_IPV6_ROUTE_INFO
102 static struct rt6_info *rt6_add_route_info(struct net *net,
103 const struct in6_addr *prefix, int prefixlen,
104 const struct in6_addr *gwaddr, int ifindex,
105 unsigned int pref);
106 static struct rt6_info *rt6_get_route_info(struct net *net,
107 const struct in6_addr *prefix, int prefixlen,
108 const struct in6_addr *gwaddr, int ifindex);
109 #endif
111 struct uncached_list {
112 spinlock_t lock;
113 struct list_head head;
116 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
118 static void rt6_uncached_list_add(struct rt6_info *rt)
120 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
122 rt->dst.flags |= DST_NOCACHE;
123 rt->rt6i_uncached_list = ul;
125 spin_lock_bh(&ul->lock);
126 list_add_tail(&rt->rt6i_uncached, &ul->head);
127 spin_unlock_bh(&ul->lock);
130 static void rt6_uncached_list_del(struct rt6_info *rt)
132 if (!list_empty(&rt->rt6i_uncached)) {
133 struct uncached_list *ul = rt->rt6i_uncached_list;
135 spin_lock_bh(&ul->lock);
136 list_del(&rt->rt6i_uncached);
137 spin_unlock_bh(&ul->lock);
141 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
143 struct net_device *loopback_dev = net->loopback_dev;
144 int cpu;
146 if (dev == loopback_dev)
147 return;
149 for_each_possible_cpu(cpu) {
150 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
151 struct rt6_info *rt;
153 spin_lock_bh(&ul->lock);
154 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
155 struct inet6_dev *rt_idev = rt->rt6i_idev;
156 struct net_device *rt_dev = rt->dst.dev;
158 if (rt_idev->dev == dev) {
159 rt->rt6i_idev = in6_dev_get(loopback_dev);
160 in6_dev_put(rt_idev);
163 if (rt_dev == dev) {
164 rt->dst.dev = loopback_dev;
165 dev_hold(rt->dst.dev);
166 dev_put(rt_dev);
169 spin_unlock_bh(&ul->lock);
173 static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
175 return dst_metrics_write_ptr(rt->dst.from);
178 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
180 struct rt6_info *rt = (struct rt6_info *)dst;
182 if (rt->rt6i_flags & RTF_PCPU)
183 return rt6_pcpu_cow_metrics(rt);
184 else if (rt->rt6i_flags & RTF_CACHE)
185 return NULL;
186 else
187 return dst_cow_metrics_generic(dst, old);
190 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
191 struct sk_buff *skb,
192 const void *daddr)
194 struct in6_addr *p = &rt->rt6i_gateway;
196 if (!ipv6_addr_any(p))
197 return (const void *) p;
198 else if (skb)
199 return &ipv6_hdr(skb)->daddr;
200 return daddr;
203 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
204 struct sk_buff *skb,
205 const void *daddr)
207 struct rt6_info *rt = (struct rt6_info *) dst;
208 struct neighbour *n;
210 daddr = choose_neigh_daddr(rt, skb, daddr);
211 n = __ipv6_neigh_lookup(dst->dev, daddr);
212 if (n)
213 return n;
214 return neigh_create(&nd_tbl, daddr, dst->dev);
217 static struct dst_ops ip6_dst_ops_template = {
218 .family = AF_INET6,
219 .gc = ip6_dst_gc,
220 .gc_thresh = 1024,
221 .check = ip6_dst_check,
222 .default_advmss = ip6_default_advmss,
223 .mtu = ip6_mtu,
224 .cow_metrics = ipv6_cow_metrics,
225 .destroy = ip6_dst_destroy,
226 .ifdown = ip6_dst_ifdown,
227 .negative_advice = ip6_negative_advice,
228 .link_failure = ip6_link_failure,
229 .update_pmtu = ip6_rt_update_pmtu,
230 .redirect = rt6_do_redirect,
231 .local_out = __ip6_local_out,
232 .neigh_lookup = ip6_neigh_lookup,
235 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
237 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
239 return mtu ? : dst->dev->mtu;
242 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
243 struct sk_buff *skb, u32 mtu)
247 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
248 struct sk_buff *skb)
252 static struct dst_ops ip6_dst_blackhole_ops = {
253 .family = AF_INET6,
254 .destroy = ip6_dst_destroy,
255 .check = ip6_dst_check,
256 .mtu = ip6_blackhole_mtu,
257 .default_advmss = ip6_default_advmss,
258 .update_pmtu = ip6_rt_blackhole_update_pmtu,
259 .redirect = ip6_rt_blackhole_redirect,
260 .cow_metrics = dst_cow_metrics_generic,
261 .neigh_lookup = ip6_neigh_lookup,
264 static const u32 ip6_template_metrics[RTAX_MAX] = {
265 [RTAX_HOPLIMIT - 1] = 0,
268 static const struct rt6_info ip6_null_entry_template = {
269 .dst = {
270 .__refcnt = ATOMIC_INIT(1),
271 .__use = 1,
272 .obsolete = DST_OBSOLETE_FORCE_CHK,
273 .error = -ENETUNREACH,
274 .input = ip6_pkt_discard,
275 .output = ip6_pkt_discard_out,
277 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
278 .rt6i_protocol = RTPROT_KERNEL,
279 .rt6i_metric = ~(u32) 0,
280 .rt6i_ref = ATOMIC_INIT(1),
283 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
285 static const struct rt6_info ip6_prohibit_entry_template = {
286 .dst = {
287 .__refcnt = ATOMIC_INIT(1),
288 .__use = 1,
289 .obsolete = DST_OBSOLETE_FORCE_CHK,
290 .error = -EACCES,
291 .input = ip6_pkt_prohibit,
292 .output = ip6_pkt_prohibit_out,
294 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
295 .rt6i_protocol = RTPROT_KERNEL,
296 .rt6i_metric = ~(u32) 0,
297 .rt6i_ref = ATOMIC_INIT(1),
300 static const struct rt6_info ip6_blk_hole_entry_template = {
301 .dst = {
302 .__refcnt = ATOMIC_INIT(1),
303 .__use = 1,
304 .obsolete = DST_OBSOLETE_FORCE_CHK,
305 .error = -EINVAL,
306 .input = dst_discard,
307 .output = dst_discard_out,
309 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
310 .rt6i_protocol = RTPROT_KERNEL,
311 .rt6i_metric = ~(u32) 0,
312 .rt6i_ref = ATOMIC_INIT(1),
315 #endif
317 static void rt6_info_init(struct rt6_info *rt)
319 struct dst_entry *dst = &rt->dst;
321 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
322 INIT_LIST_HEAD(&rt->rt6i_siblings);
323 INIT_LIST_HEAD(&rt->rt6i_uncached);
326 /* allocate dst with ip6_dst_ops */
327 static struct rt6_info *__ip6_dst_alloc(struct net *net,
328 struct net_device *dev,
329 int flags)
331 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
332 0, DST_OBSOLETE_FORCE_CHK, flags);
334 if (rt)
335 rt6_info_init(rt);
337 return rt;
340 static struct rt6_info *ip6_dst_alloc(struct net *net,
341 struct net_device *dev,
342 int flags)
344 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
346 if (rt) {
347 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
348 if (rt->rt6i_pcpu) {
349 int cpu;
351 for_each_possible_cpu(cpu) {
352 struct rt6_info **p;
354 p = per_cpu_ptr(rt->rt6i_pcpu, cpu);
355 /* no one shares rt */
356 *p = NULL;
358 } else {
359 dst_destroy((struct dst_entry *)rt);
360 return NULL;
364 return rt;
367 static void ip6_dst_destroy(struct dst_entry *dst)
369 struct rt6_info *rt = (struct rt6_info *)dst;
370 struct dst_entry *from = dst->from;
371 struct inet6_dev *idev;
373 dst_destroy_metrics_generic(dst);
374 free_percpu(rt->rt6i_pcpu);
375 rt6_uncached_list_del(rt);
377 idev = rt->rt6i_idev;
378 if (idev) {
379 rt->rt6i_idev = NULL;
380 in6_dev_put(idev);
383 dst->from = NULL;
384 dst_release(from);
387 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
388 int how)
390 struct rt6_info *rt = (struct rt6_info *)dst;
391 struct inet6_dev *idev = rt->rt6i_idev;
392 struct net_device *loopback_dev =
393 dev_net(dev)->loopback_dev;
395 if (dev != loopback_dev) {
396 if (idev && idev->dev == dev) {
397 struct inet6_dev *loopback_idev =
398 in6_dev_get(loopback_dev);
399 if (loopback_idev) {
400 rt->rt6i_idev = loopback_idev;
401 in6_dev_put(idev);
407 static bool __rt6_check_expired(const struct rt6_info *rt)
409 if (rt->rt6i_flags & RTF_EXPIRES)
410 return time_after(jiffies, rt->dst.expires);
411 else
412 return false;
415 static bool rt6_check_expired(const struct rt6_info *rt)
417 if (rt->rt6i_flags & RTF_EXPIRES) {
418 if (time_after(jiffies, rt->dst.expires))
419 return true;
420 } else if (rt->dst.from) {
421 return rt6_check_expired((struct rt6_info *) rt->dst.from);
423 return false;
426 /* Multipath route selection:
427 * Hash based function using packet header and flowlabel.
428 * Adapted from fib_info_hashfn()
430 static int rt6_info_hash_nhsfn(unsigned int candidate_count,
431 const struct flowi6 *fl6)
433 return get_hash_from_flowi6(fl6) % candidate_count;
436 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
437 struct flowi6 *fl6, int oif,
438 int strict)
440 struct rt6_info *sibling, *next_sibling;
441 int route_choosen;
443 route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
444 /* Don't change the route, if route_choosen == 0
445 * (siblings does not include ourself)
447 if (route_choosen)
448 list_for_each_entry_safe(sibling, next_sibling,
449 &match->rt6i_siblings, rt6i_siblings) {
450 route_choosen--;
451 if (route_choosen == 0) {
452 if (rt6_score_route(sibling, oif, strict) < 0)
453 break;
454 match = sibling;
455 break;
458 return match;
462 * Route lookup. Any table->tb6_lock is implied.
465 static inline struct rt6_info *rt6_device_match(struct net *net,
466 struct rt6_info *rt,
467 const struct in6_addr *saddr,
468 int oif,
469 int flags)
471 struct rt6_info *local = NULL;
472 struct rt6_info *sprt;
474 if (!oif && ipv6_addr_any(saddr))
475 goto out;
477 for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
478 struct net_device *dev = sprt->dst.dev;
480 if (oif) {
481 if (dev->ifindex == oif)
482 return sprt;
483 if (dev->flags & IFF_LOOPBACK) {
484 if (!sprt->rt6i_idev ||
485 sprt->rt6i_idev->dev->ifindex != oif) {
486 if (flags & RT6_LOOKUP_F_IFACE)
487 continue;
488 if (local &&
489 local->rt6i_idev->dev->ifindex == oif)
490 continue;
492 local = sprt;
494 } else {
495 if (ipv6_chk_addr(net, saddr, dev,
496 flags & RT6_LOOKUP_F_IFACE))
497 return sprt;
501 if (oif) {
502 if (local)
503 return local;
505 if (flags & RT6_LOOKUP_F_IFACE)
506 return net->ipv6.ip6_null_entry;
508 out:
509 return rt;
512 #ifdef CONFIG_IPV6_ROUTER_PREF
513 struct __rt6_probe_work {
514 struct work_struct work;
515 struct in6_addr target;
516 struct net_device *dev;
519 static void rt6_probe_deferred(struct work_struct *w)
521 struct in6_addr mcaddr;
522 struct __rt6_probe_work *work =
523 container_of(w, struct __rt6_probe_work, work);
525 addrconf_addr_solict_mult(&work->target, &mcaddr);
526 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL);
527 dev_put(work->dev);
528 kfree(work);
531 static void rt6_probe(struct rt6_info *rt)
533 struct __rt6_probe_work *work;
534 struct neighbour *neigh;
536 * Okay, this does not seem to be appropriate
537 * for now, however, we need to check if it
538 * is really so; aka Router Reachability Probing.
540 * Router Reachability Probe MUST be rate-limited
541 * to no more than one per minute.
543 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
544 return;
545 rcu_read_lock_bh();
546 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
547 if (neigh) {
548 if (neigh->nud_state & NUD_VALID)
549 goto out;
551 work = NULL;
552 write_lock(&neigh->lock);
553 if (!(neigh->nud_state & NUD_VALID) &&
554 time_after(jiffies,
555 neigh->updated +
556 rt->rt6i_idev->cnf.rtr_probe_interval)) {
557 work = kmalloc(sizeof(*work), GFP_ATOMIC);
558 if (work)
559 __neigh_set_probe_once(neigh);
561 write_unlock(&neigh->lock);
562 } else {
563 work = kmalloc(sizeof(*work), GFP_ATOMIC);
566 if (work) {
567 INIT_WORK(&work->work, rt6_probe_deferred);
568 work->target = rt->rt6i_gateway;
569 dev_hold(rt->dst.dev);
570 work->dev = rt->dst.dev;
571 schedule_work(&work->work);
574 out:
575 rcu_read_unlock_bh();
577 #else
578 static inline void rt6_probe(struct rt6_info *rt)
581 #endif
584 * Default Router Selection (RFC 2461 6.3.6)
586 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
588 struct net_device *dev = rt->dst.dev;
589 if (!oif || dev->ifindex == oif)
590 return 2;
591 if ((dev->flags & IFF_LOOPBACK) &&
592 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
593 return 1;
594 return 0;
597 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
599 struct neighbour *neigh;
600 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
602 if (rt->rt6i_flags & RTF_NONEXTHOP ||
603 !(rt->rt6i_flags & RTF_GATEWAY))
604 return RT6_NUD_SUCCEED;
606 rcu_read_lock_bh();
607 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
608 if (neigh) {
609 read_lock(&neigh->lock);
610 if (neigh->nud_state & NUD_VALID)
611 ret = RT6_NUD_SUCCEED;
612 #ifdef CONFIG_IPV6_ROUTER_PREF
613 else if (!(neigh->nud_state & NUD_FAILED))
614 ret = RT6_NUD_SUCCEED;
615 else
616 ret = RT6_NUD_FAIL_PROBE;
617 #endif
618 read_unlock(&neigh->lock);
619 } else {
620 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
621 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
623 rcu_read_unlock_bh();
625 return ret;
628 static int rt6_score_route(struct rt6_info *rt, int oif,
629 int strict)
631 int m;
633 m = rt6_check_dev(rt, oif);
634 if (!m && (strict & RT6_LOOKUP_F_IFACE))
635 return RT6_NUD_FAIL_HARD;
636 #ifdef CONFIG_IPV6_ROUTER_PREF
637 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
638 #endif
639 if (strict & RT6_LOOKUP_F_REACHABLE) {
640 int n = rt6_check_neigh(rt);
641 if (n < 0)
642 return n;
644 return m;
647 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
648 int *mpri, struct rt6_info *match,
649 bool *do_rr)
651 int m;
652 bool match_do_rr = false;
653 struct inet6_dev *idev = rt->rt6i_idev;
654 struct net_device *dev = rt->dst.dev;
656 if (dev && !netif_carrier_ok(dev) &&
657 idev->cnf.ignore_routes_with_linkdown)
658 goto out;
660 if (rt6_check_expired(rt))
661 goto out;
663 m = rt6_score_route(rt, oif, strict);
664 if (m == RT6_NUD_FAIL_DO_RR) {
665 match_do_rr = true;
666 m = 0; /* lowest valid score */
667 } else if (m == RT6_NUD_FAIL_HARD) {
668 goto out;
671 if (strict & RT6_LOOKUP_F_REACHABLE)
672 rt6_probe(rt);
674 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
675 if (m > *mpri) {
676 *do_rr = match_do_rr;
677 *mpri = m;
678 match = rt;
680 out:
681 return match;
684 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
685 struct rt6_info *rr_head,
686 u32 metric, int oif, int strict,
687 bool *do_rr)
689 struct rt6_info *rt, *match, *cont;
690 int mpri = -1;
692 match = NULL;
693 cont = NULL;
694 for (rt = rr_head; rt; rt = rt->dst.rt6_next) {
695 if (rt->rt6i_metric != metric) {
696 cont = rt;
697 break;
700 match = find_match(rt, oif, strict, &mpri, match, do_rr);
703 for (rt = fn->leaf; rt && rt != rr_head; rt = rt->dst.rt6_next) {
704 if (rt->rt6i_metric != metric) {
705 cont = rt;
706 break;
709 match = find_match(rt, oif, strict, &mpri, match, do_rr);
712 if (match || !cont)
713 return match;
715 for (rt = cont; rt; rt = rt->dst.rt6_next)
716 match = find_match(rt, oif, strict, &mpri, match, do_rr);
718 return match;
721 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
723 struct rt6_info *match, *rt0;
724 struct net *net;
725 bool do_rr = false;
727 rt0 = fn->rr_ptr;
728 if (!rt0)
729 fn->rr_ptr = rt0 = fn->leaf;
731 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
732 &do_rr);
734 if (do_rr) {
735 struct rt6_info *next = rt0->dst.rt6_next;
737 /* no entries matched; do round-robin */
738 if (!next || next->rt6i_metric != rt0->rt6i_metric)
739 next = fn->leaf;
741 if (next != rt0)
742 fn->rr_ptr = next;
745 net = dev_net(rt0->dst.dev);
746 return match ? match : net->ipv6.ip6_null_entry;
749 static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt)
751 return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
754 #ifdef CONFIG_IPV6_ROUTE_INFO
755 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
756 const struct in6_addr *gwaddr)
758 struct net *net = dev_net(dev);
759 struct route_info *rinfo = (struct route_info *) opt;
760 struct in6_addr prefix_buf, *prefix;
761 unsigned int pref;
762 unsigned long lifetime;
763 struct rt6_info *rt;
765 if (len < sizeof(struct route_info)) {
766 return -EINVAL;
769 /* Sanity check for prefix_len and length */
770 if (rinfo->length > 3) {
771 return -EINVAL;
772 } else if (rinfo->prefix_len > 128) {
773 return -EINVAL;
774 } else if (rinfo->prefix_len > 64) {
775 if (rinfo->length < 2) {
776 return -EINVAL;
778 } else if (rinfo->prefix_len > 0) {
779 if (rinfo->length < 1) {
780 return -EINVAL;
784 pref = rinfo->route_pref;
785 if (pref == ICMPV6_ROUTER_PREF_INVALID)
786 return -EINVAL;
788 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
790 if (rinfo->length == 3)
791 prefix = (struct in6_addr *)rinfo->prefix;
792 else {
793 /* this function is safe */
794 ipv6_addr_prefix(&prefix_buf,
795 (struct in6_addr *)rinfo->prefix,
796 rinfo->prefix_len);
797 prefix = &prefix_buf;
800 if (rinfo->prefix_len == 0)
801 rt = rt6_get_dflt_router(gwaddr, dev);
802 else
803 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
804 gwaddr, dev->ifindex);
806 if (rt && !lifetime) {
807 ip6_del_rt(rt);
808 rt = NULL;
811 if (!rt && lifetime)
812 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
813 pref);
814 else if (rt)
815 rt->rt6i_flags = RTF_ROUTEINFO |
816 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
818 if (rt) {
819 if (!addrconf_finite_timeout(lifetime))
820 rt6_clean_expires(rt);
821 else
822 rt6_set_expires(rt, jiffies + HZ * lifetime);
824 ip6_rt_put(rt);
826 return 0;
828 #endif
830 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
831 struct in6_addr *saddr)
833 struct fib6_node *pn;
834 while (1) {
835 if (fn->fn_flags & RTN_TL_ROOT)
836 return NULL;
837 pn = fn->parent;
838 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn)
839 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr);
840 else
841 fn = pn;
842 if (fn->fn_flags & RTN_RTINFO)
843 return fn;
847 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
848 struct fib6_table *table,
849 struct flowi6 *fl6, int flags)
851 struct fib6_node *fn;
852 struct rt6_info *rt;
854 read_lock_bh(&table->tb6_lock);
855 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
856 restart:
857 rt = fn->leaf;
858 rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
859 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
860 rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags);
861 if (rt == net->ipv6.ip6_null_entry) {
862 fn = fib6_backtrack(fn, &fl6->saddr);
863 if (fn)
864 goto restart;
866 dst_use(&rt->dst, jiffies);
867 read_unlock_bh(&table->tb6_lock);
868 return rt;
872 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
873 int flags)
875 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
877 EXPORT_SYMBOL_GPL(ip6_route_lookup);
879 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
880 const struct in6_addr *saddr, int oif, int strict)
882 struct flowi6 fl6 = {
883 .flowi6_oif = oif,
884 .daddr = *daddr,
886 struct dst_entry *dst;
887 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
889 if (saddr) {
890 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
891 flags |= RT6_LOOKUP_F_HAS_SADDR;
894 dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
895 if (dst->error == 0)
896 return (struct rt6_info *) dst;
898 dst_release(dst);
900 return NULL;
902 EXPORT_SYMBOL(rt6_lookup);
904 /* ip6_ins_rt is called with FREE table->tb6_lock.
905 It takes new route entry, the addition fails by any reason the
906 route is freed. In any case, if caller does not hold it, it may
907 be destroyed.
910 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
911 struct mx6_config *mxc)
913 int err;
914 struct fib6_table *table;
916 table = rt->rt6i_table;
917 write_lock_bh(&table->tb6_lock);
918 err = fib6_add(&table->tb6_root, rt, info, mxc);
919 write_unlock_bh(&table->tb6_lock);
921 return err;
924 int ip6_ins_rt(struct rt6_info *rt)
926 struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
927 struct mx6_config mxc = { .mx = NULL, };
929 return __ip6_ins_rt(rt, &info, &mxc);
932 static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
933 const struct in6_addr *daddr,
934 const struct in6_addr *saddr)
936 struct rt6_info *rt;
939 * Clone the route.
942 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
943 ort = (struct rt6_info *)ort->dst.from;
945 rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
947 if (!rt)
948 return NULL;
950 ip6_rt_copy_init(rt, ort);
951 rt->rt6i_flags |= RTF_CACHE;
952 rt->rt6i_metric = 0;
953 rt->dst.flags |= DST_HOST;
954 rt->rt6i_dst.addr = *daddr;
955 rt->rt6i_dst.plen = 128;
957 if (!rt6_is_gw_or_nonexthop(ort)) {
958 if (ort->rt6i_dst.plen != 128 &&
959 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
960 rt->rt6i_flags |= RTF_ANYCAST;
961 #ifdef CONFIG_IPV6_SUBTREES
962 if (rt->rt6i_src.plen && saddr) {
963 rt->rt6i_src.addr = *saddr;
964 rt->rt6i_src.plen = 128;
966 #endif
969 return rt;
972 static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
974 struct rt6_info *pcpu_rt;
976 pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
977 rt->dst.dev, rt->dst.flags);
979 if (!pcpu_rt)
980 return NULL;
981 ip6_rt_copy_init(pcpu_rt, rt);
982 pcpu_rt->rt6i_protocol = rt->rt6i_protocol;
983 pcpu_rt->rt6i_flags |= RTF_PCPU;
984 return pcpu_rt;
987 /* It should be called with read_lock_bh(&tb6_lock) acquired */
988 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
990 struct rt6_info *pcpu_rt, **p;
992 p = this_cpu_ptr(rt->rt6i_pcpu);
993 pcpu_rt = *p;
995 if (pcpu_rt) {
996 dst_hold(&pcpu_rt->dst);
997 rt6_dst_from_metrics_check(pcpu_rt);
999 return pcpu_rt;
1002 static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
1004 struct fib6_table *table = rt->rt6i_table;
1005 struct rt6_info *pcpu_rt, *prev, **p;
1007 pcpu_rt = ip6_rt_pcpu_alloc(rt);
1008 if (!pcpu_rt) {
1009 struct net *net = dev_net(rt->dst.dev);
1011 dst_hold(&net->ipv6.ip6_null_entry->dst);
1012 return net->ipv6.ip6_null_entry;
1015 read_lock_bh(&table->tb6_lock);
1016 if (rt->rt6i_pcpu) {
1017 p = this_cpu_ptr(rt->rt6i_pcpu);
1018 prev = cmpxchg(p, NULL, pcpu_rt);
1019 if (prev) {
1020 /* If someone did it before us, return prev instead */
1021 dst_destroy(&pcpu_rt->dst);
1022 pcpu_rt = prev;
1024 } else {
1025 /* rt has been removed from the fib6 tree
1026 * before we have a chance to acquire the read_lock.
1027 * In this case, don't brother to create a pcpu rt
1028 * since rt is going away anyway. The next
1029 * dst_check() will trigger a re-lookup.
1031 dst_destroy(&pcpu_rt->dst);
1032 pcpu_rt = rt;
1034 dst_hold(&pcpu_rt->dst);
1035 rt6_dst_from_metrics_check(pcpu_rt);
1036 read_unlock_bh(&table->tb6_lock);
1037 return pcpu_rt;
1040 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
1041 struct flowi6 *fl6, int flags)
1043 struct fib6_node *fn, *saved_fn;
1044 struct rt6_info *rt;
1045 int strict = 0;
1047 strict |= flags & RT6_LOOKUP_F_IFACE;
1048 if (net->ipv6.devconf_all->forwarding == 0)
1049 strict |= RT6_LOOKUP_F_REACHABLE;
1051 read_lock_bh(&table->tb6_lock);
1053 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1054 saved_fn = fn;
1056 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1057 oif = 0;
1059 redo_rt6_select:
1060 rt = rt6_select(fn, oif, strict);
1061 if (rt->rt6i_nsiblings)
1062 rt = rt6_multipath_select(rt, fl6, oif, strict);
1063 if (rt == net->ipv6.ip6_null_entry) {
1064 fn = fib6_backtrack(fn, &fl6->saddr);
1065 if (fn)
1066 goto redo_rt6_select;
1067 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1068 /* also consider unreachable route */
1069 strict &= ~RT6_LOOKUP_F_REACHABLE;
1070 fn = saved_fn;
1071 goto redo_rt6_select;
1076 if (rt == net->ipv6.ip6_null_entry || (rt->rt6i_flags & RTF_CACHE)) {
1077 dst_use(&rt->dst, jiffies);
1078 read_unlock_bh(&table->tb6_lock);
1080 rt6_dst_from_metrics_check(rt);
1081 return rt;
1082 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1083 !(rt->rt6i_flags & RTF_GATEWAY))) {
1084 /* Create a RTF_CACHE clone which will not be
1085 * owned by the fib6 tree. It is for the special case where
1086 * the daddr in the skb during the neighbor look-up is different
1087 * from the fl6->daddr used to look-up route here.
1090 struct rt6_info *uncached_rt;
1092 dst_use(&rt->dst, jiffies);
1093 read_unlock_bh(&table->tb6_lock);
1095 uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
1096 dst_release(&rt->dst);
1098 if (uncached_rt)
1099 rt6_uncached_list_add(uncached_rt);
1100 else
1101 uncached_rt = net->ipv6.ip6_null_entry;
1103 dst_hold(&uncached_rt->dst);
1104 return uncached_rt;
1106 } else {
1107 /* Get a percpu copy */
1109 struct rt6_info *pcpu_rt;
1111 rt->dst.lastuse = jiffies;
1112 rt->dst.__use++;
1113 pcpu_rt = rt6_get_pcpu_route(rt);
1115 if (pcpu_rt) {
1116 read_unlock_bh(&table->tb6_lock);
1117 } else {
1118 /* We have to do the read_unlock first
1119 * because rt6_make_pcpu_route() may trigger
1120 * ip6_dst_gc() which will take the write_lock.
1122 dst_hold(&rt->dst);
1123 read_unlock_bh(&table->tb6_lock);
1124 pcpu_rt = rt6_make_pcpu_route(rt);
1125 dst_release(&rt->dst);
1128 return pcpu_rt;
1133 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
1134 struct flowi6 *fl6, int flags)
1136 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
1139 static struct dst_entry *ip6_route_input_lookup(struct net *net,
1140 struct net_device *dev,
1141 struct flowi6 *fl6, int flags)
1143 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1144 flags |= RT6_LOOKUP_F_IFACE;
1146 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
1149 void ip6_route_input(struct sk_buff *skb)
1151 const struct ipv6hdr *iph = ipv6_hdr(skb);
1152 struct net *net = dev_net(skb->dev);
1153 int flags = RT6_LOOKUP_F_HAS_SADDR;
1154 struct ip_tunnel_info *tun_info;
1155 struct flowi6 fl6 = {
1156 .flowi6_iif = l3mdev_fib_oif(skb->dev),
1157 .daddr = iph->daddr,
1158 .saddr = iph->saddr,
1159 .flowlabel = ip6_flowinfo(iph),
1160 .flowi6_mark = skb->mark,
1161 .flowi6_proto = iph->nexthdr,
1164 tun_info = skb_tunnel_info(skb);
1165 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1166 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
1167 skb_dst_drop(skb);
1168 skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
1171 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
1172 struct flowi6 *fl6, int flags)
1174 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
1177 struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
1178 struct flowi6 *fl6, int flags)
1180 struct dst_entry *dst;
1181 bool any_src;
1183 dst = l3mdev_rt6_dst_by_oif(net, fl6);
1184 if (dst)
1185 return dst;
1187 fl6->flowi6_iif = LOOPBACK_IFINDEX;
1189 any_src = ipv6_addr_any(&fl6->saddr);
1190 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
1191 (fl6->flowi6_oif && any_src))
1192 flags |= RT6_LOOKUP_F_IFACE;
1194 if (!any_src)
1195 flags |= RT6_LOOKUP_F_HAS_SADDR;
1196 else if (sk)
1197 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1199 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1201 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
1203 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1205 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1206 struct dst_entry *new = NULL;
1208 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
1209 if (rt) {
1210 rt6_info_init(rt);
1212 new = &rt->dst;
1213 new->__use = 1;
1214 new->input = dst_discard;
1215 new->output = dst_discard_out;
1217 dst_copy_metrics(new, &ort->dst);
1218 rt->rt6i_idev = ort->rt6i_idev;
1219 if (rt->rt6i_idev)
1220 in6_dev_hold(rt->rt6i_idev);
1222 rt->rt6i_gateway = ort->rt6i_gateway;
1223 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
1224 rt->rt6i_metric = 0;
1226 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1227 #ifdef CONFIG_IPV6_SUBTREES
1228 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1229 #endif
1231 dst_free(new);
1234 dst_release(dst_orig);
1235 return new ? new : ERR_PTR(-ENOMEM);
1239 * Destination cache support functions
1242 static void rt6_dst_from_metrics_check(struct rt6_info *rt)
1244 if (rt->dst.from &&
1245 dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from))
1246 dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true);
1249 static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
1251 if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
1252 return NULL;
1254 if (rt6_check_expired(rt))
1255 return NULL;
1257 return &rt->dst;
1260 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
1262 if (!__rt6_check_expired(rt) &&
1263 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1264 rt6_check((struct rt6_info *)(rt->dst.from), cookie))
1265 return &rt->dst;
1266 else
1267 return NULL;
1270 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1272 struct rt6_info *rt;
1274 rt = (struct rt6_info *) dst;
1276 /* All IPV6 dsts are created with ->obsolete set to the value
1277 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1278 * into this function always.
1281 rt6_dst_from_metrics_check(rt);
1283 if (rt->rt6i_flags & RTF_PCPU ||
1284 (unlikely(dst->flags & DST_NOCACHE) && rt->dst.from))
1285 return rt6_dst_from_check(rt, cookie);
1286 else
1287 return rt6_check(rt, cookie);
1290 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1292 struct rt6_info *rt = (struct rt6_info *) dst;
1294 if (rt) {
1295 if (rt->rt6i_flags & RTF_CACHE) {
1296 if (rt6_check_expired(rt)) {
1297 ip6_del_rt(rt);
1298 dst = NULL;
1300 } else {
1301 dst_release(dst);
1302 dst = NULL;
1305 return dst;
1308 static void ip6_link_failure(struct sk_buff *skb)
1310 struct rt6_info *rt;
1312 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
1314 rt = (struct rt6_info *) skb_dst(skb);
1315 if (rt) {
1316 if (rt->rt6i_flags & RTF_CACHE) {
1317 dst_hold(&rt->dst);
1318 ip6_del_rt(rt);
1319 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
1320 rt->rt6i_node->fn_sernum = -1;
1325 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
1327 struct net *net = dev_net(rt->dst.dev);
1329 rt->rt6i_flags |= RTF_MODIFIED;
1330 rt->rt6i_pmtu = mtu;
1331 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1334 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
1336 return !(rt->rt6i_flags & RTF_CACHE) &&
1337 (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
1340 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1341 const struct ipv6hdr *iph, u32 mtu)
1343 struct rt6_info *rt6 = (struct rt6_info *)dst;
1345 if (rt6->rt6i_flags & RTF_LOCAL)
1346 return;
1348 dst_confirm(dst);
1349 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
1350 if (mtu >= dst_mtu(dst))
1351 return;
1353 if (!rt6_cache_allowed_for_pmtu(rt6)) {
1354 rt6_do_update_pmtu(rt6, mtu);
1355 } else {
1356 const struct in6_addr *daddr, *saddr;
1357 struct rt6_info *nrt6;
1359 if (iph) {
1360 daddr = &iph->daddr;
1361 saddr = &iph->saddr;
1362 } else if (sk) {
1363 daddr = &sk->sk_v6_daddr;
1364 saddr = &inet6_sk(sk)->saddr;
1365 } else {
1366 return;
1368 nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr);
1369 if (nrt6) {
1370 rt6_do_update_pmtu(nrt6, mtu);
1372 /* ip6_ins_rt(nrt6) will bump the
1373 * rt6->rt6i_node->fn_sernum
1374 * which will fail the next rt6_check() and
1375 * invalidate the sk->sk_dst_cache.
1377 ip6_ins_rt(nrt6);
1382 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1383 struct sk_buff *skb, u32 mtu)
1385 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
1388 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1389 int oif, u32 mark)
1391 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1392 struct dst_entry *dst;
1393 struct flowi6 fl6;
1395 memset(&fl6, 0, sizeof(fl6));
1396 fl6.flowi6_oif = oif;
1397 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
1398 fl6.daddr = iph->daddr;
1399 fl6.saddr = iph->saddr;
1400 fl6.flowlabel = ip6_flowinfo(iph);
1402 dst = ip6_route_output(net, NULL, &fl6);
1403 if (!dst->error)
1404 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
1405 dst_release(dst);
1407 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1409 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1411 ip6_update_pmtu(skb, sock_net(sk), mtu,
1412 sk->sk_bound_dev_if, sk->sk_mark);
1414 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1416 /* Handle redirects */
1417 struct ip6rd_flowi {
1418 struct flowi6 fl6;
1419 struct in6_addr gateway;
1422 static struct rt6_info *__ip6_route_redirect(struct net *net,
1423 struct fib6_table *table,
1424 struct flowi6 *fl6,
1425 int flags)
1427 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
1428 struct rt6_info *rt;
1429 struct fib6_node *fn;
1431 /* Get the "current" route for this destination and
1432 * check if the redirect has come from approriate router.
1434 * RFC 4861 specifies that redirects should only be
1435 * accepted if they come from the nexthop to the target.
1436 * Due to the way the routes are chosen, this notion
1437 * is a bit fuzzy and one might need to check all possible
1438 * routes.
1441 read_lock_bh(&table->tb6_lock);
1442 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1443 restart:
1444 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1445 if (rt6_check_expired(rt))
1446 continue;
1447 if (rt->dst.error)
1448 break;
1449 if (!(rt->rt6i_flags & RTF_GATEWAY))
1450 continue;
1451 if (fl6->flowi6_oif != rt->dst.dev->ifindex)
1452 continue;
1453 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1454 continue;
1455 break;
1458 if (!rt)
1459 rt = net->ipv6.ip6_null_entry;
1460 else if (rt->dst.error) {
1461 rt = net->ipv6.ip6_null_entry;
1462 goto out;
1465 if (rt == net->ipv6.ip6_null_entry) {
1466 fn = fib6_backtrack(fn, &fl6->saddr);
1467 if (fn)
1468 goto restart;
1471 out:
1472 dst_hold(&rt->dst);
1474 read_unlock_bh(&table->tb6_lock);
1476 return rt;
1479 static struct dst_entry *ip6_route_redirect(struct net *net,
1480 const struct flowi6 *fl6,
1481 const struct in6_addr *gateway)
1483 int flags = RT6_LOOKUP_F_HAS_SADDR;
1484 struct ip6rd_flowi rdfl;
1486 rdfl.fl6 = *fl6;
1487 rdfl.gateway = *gateway;
1489 return fib6_rule_lookup(net, &rdfl.fl6,
1490 flags, __ip6_route_redirect);
1493 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
1495 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1496 struct dst_entry *dst;
1497 struct flowi6 fl6;
1499 memset(&fl6, 0, sizeof(fl6));
1500 fl6.flowi6_iif = LOOPBACK_IFINDEX;
1501 fl6.flowi6_oif = oif;
1502 fl6.flowi6_mark = mark;
1503 fl6.daddr = iph->daddr;
1504 fl6.saddr = iph->saddr;
1505 fl6.flowlabel = ip6_flowinfo(iph);
1507 dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
1508 rt6_do_redirect(dst, NULL, skb);
1509 dst_release(dst);
1511 EXPORT_SYMBOL_GPL(ip6_redirect);
1513 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
1514 u32 mark)
1516 const struct ipv6hdr *iph = ipv6_hdr(skb);
1517 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
1518 struct dst_entry *dst;
1519 struct flowi6 fl6;
1521 memset(&fl6, 0, sizeof(fl6));
1522 fl6.flowi6_iif = LOOPBACK_IFINDEX;
1523 fl6.flowi6_oif = oif;
1524 fl6.flowi6_mark = mark;
1525 fl6.daddr = msg->dest;
1526 fl6.saddr = iph->daddr;
1528 dst = ip6_route_redirect(net, &fl6, &iph->saddr);
1529 rt6_do_redirect(dst, NULL, skb);
1530 dst_release(dst);
1533 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1535 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
1537 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
1539 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1541 struct net_device *dev = dst->dev;
1542 unsigned int mtu = dst_mtu(dst);
1543 struct net *net = dev_net(dev);
1545 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1547 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1548 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1551 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1552 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1553 * IPV6_MAXPLEN is also valid and means: "any MSS,
1554 * rely only on pmtu discovery"
1556 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1557 mtu = IPV6_MAXPLEN;
1558 return mtu;
1561 static unsigned int ip6_mtu(const struct dst_entry *dst)
1563 const struct rt6_info *rt = (const struct rt6_info *)dst;
1564 unsigned int mtu = rt->rt6i_pmtu;
1565 struct inet6_dev *idev;
1567 if (mtu)
1568 goto out;
1570 mtu = dst_metric_raw(dst, RTAX_MTU);
1571 if (mtu)
1572 goto out;
1574 mtu = IPV6_MIN_MTU;
1576 rcu_read_lock();
1577 idev = __in6_dev_get(dst->dev);
1578 if (idev)
1579 mtu = idev->cnf.mtu6;
1580 rcu_read_unlock();
1582 out:
1583 return min_t(unsigned int, mtu, IP6_MAX_MTU);
1586 static struct dst_entry *icmp6_dst_gc_list;
1587 static DEFINE_SPINLOCK(icmp6_dst_lock);
1589 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1590 struct flowi6 *fl6)
1592 struct dst_entry *dst;
1593 struct rt6_info *rt;
1594 struct inet6_dev *idev = in6_dev_get(dev);
1595 struct net *net = dev_net(dev);
1597 if (unlikely(!idev))
1598 return ERR_PTR(-ENODEV);
1600 rt = ip6_dst_alloc(net, dev, 0);
1601 if (unlikely(!rt)) {
1602 in6_dev_put(idev);
1603 dst = ERR_PTR(-ENOMEM);
1604 goto out;
1607 rt->dst.flags |= DST_HOST;
1608 rt->dst.output = ip6_output;
1609 atomic_set(&rt->dst.__refcnt, 1);
1610 rt->rt6i_gateway = fl6->daddr;
1611 rt->rt6i_dst.addr = fl6->daddr;
1612 rt->rt6i_dst.plen = 128;
1613 rt->rt6i_idev = idev;
1614 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
1616 spin_lock_bh(&icmp6_dst_lock);
1617 rt->dst.next = icmp6_dst_gc_list;
1618 icmp6_dst_gc_list = &rt->dst;
1619 spin_unlock_bh(&icmp6_dst_lock);
1621 fib6_force_start_gc(net);
1623 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
1625 out:
1626 return dst;
1629 int icmp6_dst_gc(void)
1631 struct dst_entry *dst, **pprev;
1632 int more = 0;
1634 spin_lock_bh(&icmp6_dst_lock);
1635 pprev = &icmp6_dst_gc_list;
1637 while ((dst = *pprev) != NULL) {
1638 if (!atomic_read(&dst->__refcnt)) {
1639 *pprev = dst->next;
1640 dst_free(dst);
1641 } else {
1642 pprev = &dst->next;
1643 ++more;
1647 spin_unlock_bh(&icmp6_dst_lock);
1649 return more;
1652 static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1653 void *arg)
1655 struct dst_entry *dst, **pprev;
1657 spin_lock_bh(&icmp6_dst_lock);
1658 pprev = &icmp6_dst_gc_list;
1659 while ((dst = *pprev) != NULL) {
1660 struct rt6_info *rt = (struct rt6_info *) dst;
1661 if (func(rt, arg)) {
1662 *pprev = dst->next;
1663 dst_free(dst);
1664 } else {
1665 pprev = &dst->next;
1668 spin_unlock_bh(&icmp6_dst_lock);
1671 static int ip6_dst_gc(struct dst_ops *ops)
1673 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1674 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1675 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1676 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1677 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1678 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1679 int entries;
1681 entries = dst_entries_get_fast(ops);
1682 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
1683 entries <= rt_max_size)
1684 goto out;
1686 net->ipv6.ip6_rt_gc_expire++;
1687 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
1688 entries = dst_entries_get_slow(ops);
1689 if (entries < ops->gc_thresh)
1690 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1691 out:
1692 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1693 return entries > rt_max_size;
1696 static int ip6_convert_metrics(struct mx6_config *mxc,
1697 const struct fib6_config *cfg)
1699 bool ecn_ca = false;
1700 struct nlattr *nla;
1701 int remaining;
1702 u32 *mp;
1704 if (!cfg->fc_mx)
1705 return 0;
1707 mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1708 if (unlikely(!mp))
1709 return -ENOMEM;
1711 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1712 int type = nla_type(nla);
1713 u32 val;
1715 if (!type)
1716 continue;
1717 if (unlikely(type > RTAX_MAX))
1718 goto err;
1720 if (type == RTAX_CC_ALGO) {
1721 char tmp[TCP_CA_NAME_MAX];
1723 nla_strlcpy(tmp, nla, sizeof(tmp));
1724 val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
1725 if (val == TCP_CA_UNSPEC)
1726 goto err;
1727 } else {
1728 val = nla_get_u32(nla);
1730 if (type == RTAX_HOPLIMIT && val > 255)
1731 val = 255;
1732 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
1733 goto err;
1735 mp[type - 1] = val;
1736 __set_bit(type - 1, mxc->mx_valid);
1739 if (ecn_ca) {
1740 __set_bit(RTAX_FEATURES - 1, mxc->mx_valid);
1741 mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
1744 mxc->mx = mp;
1745 return 0;
1746 err:
1747 kfree(mp);
1748 return -EINVAL;
1751 static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
1753 struct net *net = cfg->fc_nlinfo.nl_net;
1754 struct rt6_info *rt = NULL;
1755 struct net_device *dev = NULL;
1756 struct inet6_dev *idev = NULL;
1757 struct fib6_table *table;
1758 int addr_type;
1759 int err = -EINVAL;
1761 /* RTF_PCPU is an internal flag; can not be set by userspace */
1762 if (cfg->fc_flags & RTF_PCPU)
1763 goto out;
1765 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1766 goto out;
1767 #ifndef CONFIG_IPV6_SUBTREES
1768 if (cfg->fc_src_len)
1769 goto out;
1770 #endif
1771 if (cfg->fc_ifindex) {
1772 err = -ENODEV;
1773 dev = dev_get_by_index(net, cfg->fc_ifindex);
1774 if (!dev)
1775 goto out;
1776 idev = in6_dev_get(dev);
1777 if (!idev)
1778 goto out;
1781 if (cfg->fc_metric == 0)
1782 cfg->fc_metric = IP6_RT_PRIO_USER;
1784 err = -ENOBUFS;
1785 if (cfg->fc_nlinfo.nlh &&
1786 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1787 table = fib6_get_table(net, cfg->fc_table);
1788 if (!table) {
1789 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1790 table = fib6_new_table(net, cfg->fc_table);
1792 } else {
1793 table = fib6_new_table(net, cfg->fc_table);
1796 if (!table)
1797 goto out;
1799 rt = ip6_dst_alloc(net, NULL,
1800 (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
1802 if (!rt) {
1803 err = -ENOMEM;
1804 goto out;
1807 if (cfg->fc_flags & RTF_EXPIRES)
1808 rt6_set_expires(rt, jiffies +
1809 clock_t_to_jiffies(cfg->fc_expires));
1810 else
1811 rt6_clean_expires(rt);
1813 if (cfg->fc_protocol == RTPROT_UNSPEC)
1814 cfg->fc_protocol = RTPROT_BOOT;
1815 rt->rt6i_protocol = cfg->fc_protocol;
1817 addr_type = ipv6_addr_type(&cfg->fc_dst);
1819 if (addr_type & IPV6_ADDR_MULTICAST)
1820 rt->dst.input = ip6_mc_input;
1821 else if (cfg->fc_flags & RTF_LOCAL)
1822 rt->dst.input = ip6_input;
1823 else
1824 rt->dst.input = ip6_forward;
1826 rt->dst.output = ip6_output;
1828 if (cfg->fc_encap) {
1829 struct lwtunnel_state *lwtstate;
1831 err = lwtunnel_build_state(dev, cfg->fc_encap_type,
1832 cfg->fc_encap, AF_INET6, cfg,
1833 &lwtstate);
1834 if (err)
1835 goto out;
1836 rt->dst.lwtstate = lwtstate_get(lwtstate);
1837 if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
1838 rt->dst.lwtstate->orig_output = rt->dst.output;
1839 rt->dst.output = lwtunnel_output;
1841 if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
1842 rt->dst.lwtstate->orig_input = rt->dst.input;
1843 rt->dst.input = lwtunnel_input;
1847 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1848 rt->rt6i_dst.plen = cfg->fc_dst_len;
1849 if (rt->rt6i_dst.plen == 128)
1850 rt->dst.flags |= DST_HOST;
1852 #ifdef CONFIG_IPV6_SUBTREES
1853 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1854 rt->rt6i_src.plen = cfg->fc_src_len;
1855 #endif
1857 rt->rt6i_metric = cfg->fc_metric;
1859 /* We cannot add true routes via loopback here,
1860 they would result in kernel looping; promote them to reject routes
1862 if ((cfg->fc_flags & RTF_REJECT) ||
1863 (dev && (dev->flags & IFF_LOOPBACK) &&
1864 !(addr_type & IPV6_ADDR_LOOPBACK) &&
1865 !(cfg->fc_flags & RTF_LOCAL))) {
1866 /* hold loopback dev/idev if we haven't done so. */
1867 if (dev != net->loopback_dev) {
1868 if (dev) {
1869 dev_put(dev);
1870 in6_dev_put(idev);
1872 dev = net->loopback_dev;
1873 dev_hold(dev);
1874 idev = in6_dev_get(dev);
1875 if (!idev) {
1876 err = -ENODEV;
1877 goto out;
1880 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1881 switch (cfg->fc_type) {
1882 case RTN_BLACKHOLE:
1883 rt->dst.error = -EINVAL;
1884 rt->dst.output = dst_discard_out;
1885 rt->dst.input = dst_discard;
1886 break;
1887 case RTN_PROHIBIT:
1888 rt->dst.error = -EACCES;
1889 rt->dst.output = ip6_pkt_prohibit_out;
1890 rt->dst.input = ip6_pkt_prohibit;
1891 break;
1892 case RTN_THROW:
1893 case RTN_UNREACHABLE:
1894 default:
1895 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1896 : (cfg->fc_type == RTN_UNREACHABLE)
1897 ? -EHOSTUNREACH : -ENETUNREACH;
1898 rt->dst.output = ip6_pkt_discard_out;
1899 rt->dst.input = ip6_pkt_discard;
1900 break;
1902 goto install_route;
1905 if (cfg->fc_flags & RTF_GATEWAY) {
1906 const struct in6_addr *gw_addr;
1907 int gwa_type;
1909 gw_addr = &cfg->fc_gateway;
1910 gwa_type = ipv6_addr_type(gw_addr);
1912 /* if gw_addr is local we will fail to detect this in case
1913 * address is still TENTATIVE (DAD in progress). rt6_lookup()
1914 * will return already-added prefix route via interface that
1915 * prefix route was assigned to, which might be non-loopback.
1917 err = -EINVAL;
1918 if (ipv6_chk_addr_and_flags(net, gw_addr,
1919 gwa_type & IPV6_ADDR_LINKLOCAL ?
1920 dev : NULL, 0, 0))
1921 goto out;
1923 rt->rt6i_gateway = *gw_addr;
1925 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1926 struct rt6_info *grt;
1928 /* IPv6 strictly inhibits using not link-local
1929 addresses as nexthop address.
1930 Otherwise, router will not able to send redirects.
1931 It is very good, but in some (rare!) circumstances
1932 (SIT, PtP, NBMA NOARP links) it is handy to allow
1933 some exceptions. --ANK
1935 if (!(gwa_type & IPV6_ADDR_UNICAST))
1936 goto out;
1938 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1940 err = -EHOSTUNREACH;
1941 if (!grt)
1942 goto out;
1943 if (dev) {
1944 if (dev != grt->dst.dev) {
1945 ip6_rt_put(grt);
1946 goto out;
1948 } else {
1949 dev = grt->dst.dev;
1950 idev = grt->rt6i_idev;
1951 dev_hold(dev);
1952 in6_dev_hold(grt->rt6i_idev);
1954 if (!(grt->rt6i_flags & RTF_GATEWAY))
1955 err = 0;
1956 ip6_rt_put(grt);
1958 if (err)
1959 goto out;
1961 err = -EINVAL;
1962 if (!dev || (dev->flags & IFF_LOOPBACK))
1963 goto out;
1966 err = -ENODEV;
1967 if (!dev)
1968 goto out;
1970 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1971 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1972 err = -EINVAL;
1973 goto out;
1975 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
1976 rt->rt6i_prefsrc.plen = 128;
1977 } else
1978 rt->rt6i_prefsrc.plen = 0;
1980 rt->rt6i_flags = cfg->fc_flags;
1982 install_route:
1983 rt->dst.dev = dev;
1984 rt->rt6i_idev = idev;
1985 rt->rt6i_table = table;
1987 cfg->fc_nlinfo.nl_net = dev_net(dev);
1989 return rt;
1990 out:
1991 if (dev)
1992 dev_put(dev);
1993 if (idev)
1994 in6_dev_put(idev);
1995 if (rt)
1996 dst_free(&rt->dst);
1998 return ERR_PTR(err);
2001 int ip6_route_add(struct fib6_config *cfg)
2003 struct mx6_config mxc = { .mx = NULL, };
2004 struct rt6_info *rt;
2005 int err;
2007 rt = ip6_route_info_create(cfg);
2008 if (IS_ERR(rt)) {
2009 err = PTR_ERR(rt);
2010 rt = NULL;
2011 goto out;
2014 err = ip6_convert_metrics(&mxc, cfg);
2015 if (err)
2016 goto out;
2018 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc);
2020 kfree(mxc.mx);
2022 return err;
2023 out:
2024 if (rt)
2025 dst_free(&rt->dst);
2027 return err;
2030 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
2032 int err;
2033 struct fib6_table *table;
2034 struct net *net = dev_net(rt->dst.dev);
2036 if (rt == net->ipv6.ip6_null_entry ||
2037 rt->dst.flags & DST_NOCACHE) {
2038 err = -ENOENT;
2039 goto out;
2042 table = rt->rt6i_table;
2043 write_lock_bh(&table->tb6_lock);
2044 err = fib6_del(rt, info);
2045 write_unlock_bh(&table->tb6_lock);
2047 out:
2048 ip6_rt_put(rt);
2049 return err;
2052 int ip6_del_rt(struct rt6_info *rt)
2054 struct nl_info info = {
2055 .nl_net = dev_net(rt->dst.dev),
2057 return __ip6_del_rt(rt, &info);
2060 static int ip6_route_del(struct fib6_config *cfg)
2062 struct fib6_table *table;
2063 struct fib6_node *fn;
2064 struct rt6_info *rt;
2065 int err = -ESRCH;
2067 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
2068 if (!table)
2069 return err;
2071 read_lock_bh(&table->tb6_lock);
2073 fn = fib6_locate(&table->tb6_root,
2074 &cfg->fc_dst, cfg->fc_dst_len,
2075 &cfg->fc_src, cfg->fc_src_len);
2077 if (fn) {
2078 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2079 if ((rt->rt6i_flags & RTF_CACHE) &&
2080 !(cfg->fc_flags & RTF_CACHE))
2081 continue;
2082 if (cfg->fc_ifindex &&
2083 (!rt->dst.dev ||
2084 rt->dst.dev->ifindex != cfg->fc_ifindex))
2085 continue;
2086 if (cfg->fc_flags & RTF_GATEWAY &&
2087 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
2088 continue;
2089 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
2090 continue;
2091 if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
2092 continue;
2093 dst_hold(&rt->dst);
2094 read_unlock_bh(&table->tb6_lock);
2096 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
2099 read_unlock_bh(&table->tb6_lock);
2101 return err;
2104 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
2106 struct netevent_redirect netevent;
2107 struct rt6_info *rt, *nrt = NULL;
2108 struct ndisc_options ndopts;
2109 struct inet6_dev *in6_dev;
2110 struct neighbour *neigh;
2111 struct rd_msg *msg;
2112 int optlen, on_link;
2113 u8 *lladdr;
2115 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
2116 optlen -= sizeof(*msg);
2118 if (optlen < 0) {
2119 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
2120 return;
2123 msg = (struct rd_msg *)icmp6_hdr(skb);
2125 if (ipv6_addr_is_multicast(&msg->dest)) {
2126 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
2127 return;
2130 on_link = 0;
2131 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
2132 on_link = 1;
2133 } else if (ipv6_addr_type(&msg->target) !=
2134 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
2135 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
2136 return;
2139 in6_dev = __in6_dev_get(skb->dev);
2140 if (!in6_dev)
2141 return;
2142 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
2143 return;
2145 /* RFC2461 8.1:
2146 * The IP source address of the Redirect MUST be the same as the current
2147 * first-hop router for the specified ICMP Destination Address.
2150 if (!ndisc_parse_options(msg->opt, optlen, &ndopts)) {
2151 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
2152 return;
2155 lladdr = NULL;
2156 if (ndopts.nd_opts_tgt_lladdr) {
2157 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
2158 skb->dev);
2159 if (!lladdr) {
2160 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
2161 return;
2165 rt = (struct rt6_info *) dst;
2166 if (rt->rt6i_flags & RTF_REJECT) {
2167 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
2168 return;
2171 /* Redirect received -> path was valid.
2172 * Look, redirects are sent only in response to data packets,
2173 * so that this nexthop apparently is reachable. --ANK
2175 dst_confirm(&rt->dst);
2177 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
2178 if (!neigh)
2179 return;
2182 * We have finally decided to accept it.
2185 neigh_update(neigh, lladdr, NUD_STALE,
2186 NEIGH_UPDATE_F_WEAK_OVERRIDE|
2187 NEIGH_UPDATE_F_OVERRIDE|
2188 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
2189 NEIGH_UPDATE_F_ISROUTER))
2192 nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL);
2193 if (!nrt)
2194 goto out;
2196 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
2197 if (on_link)
2198 nrt->rt6i_flags &= ~RTF_GATEWAY;
2200 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
2202 if (ip6_ins_rt(nrt))
2203 goto out;
2205 netevent.old = &rt->dst;
2206 netevent.new = &nrt->dst;
2207 netevent.daddr = &msg->dest;
2208 netevent.neigh = neigh;
2209 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
2211 if (rt->rt6i_flags & RTF_CACHE) {
2212 rt = (struct rt6_info *) dst_clone(&rt->dst);
2213 ip6_del_rt(rt);
2216 out:
2217 neigh_release(neigh);
2221 * Misc support functions
2224 static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
2226 BUG_ON(from->dst.from);
2228 rt->rt6i_flags &= ~RTF_EXPIRES;
2229 dst_hold(&from->dst);
2230 rt->dst.from = &from->dst;
2231 dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
2234 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
2236 rt->dst.input = ort->dst.input;
2237 rt->dst.output = ort->dst.output;
2238 rt->rt6i_dst = ort->rt6i_dst;
2239 rt->dst.error = ort->dst.error;
2240 rt->rt6i_idev = ort->rt6i_idev;
2241 if (rt->rt6i_idev)
2242 in6_dev_hold(rt->rt6i_idev);
2243 rt->dst.lastuse = jiffies;
2244 rt->rt6i_gateway = ort->rt6i_gateway;
2245 rt->rt6i_flags = ort->rt6i_flags;
2246 rt6_set_from(rt, ort);
2247 rt->rt6i_metric = ort->rt6i_metric;
2248 #ifdef CONFIG_IPV6_SUBTREES
2249 rt->rt6i_src = ort->rt6i_src;
2250 #endif
2251 rt->rt6i_prefsrc = ort->rt6i_prefsrc;
2252 rt->rt6i_table = ort->rt6i_table;
2253 rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
2256 #ifdef CONFIG_IPV6_ROUTE_INFO
2257 static struct rt6_info *rt6_get_route_info(struct net *net,
2258 const struct in6_addr *prefix, int prefixlen,
2259 const struct in6_addr *gwaddr, int ifindex)
2261 struct fib6_node *fn;
2262 struct rt6_info *rt = NULL;
2263 struct fib6_table *table;
2265 table = fib6_get_table(net, RT6_TABLE_INFO);
2266 if (!table)
2267 return NULL;
2269 read_lock_bh(&table->tb6_lock);
2270 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0);
2271 if (!fn)
2272 goto out;
2274 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2275 if (rt->dst.dev->ifindex != ifindex)
2276 continue;
2277 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
2278 continue;
2279 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
2280 continue;
2281 dst_hold(&rt->dst);
2282 break;
2284 out:
2285 read_unlock_bh(&table->tb6_lock);
2286 return rt;
2289 static struct rt6_info *rt6_add_route_info(struct net *net,
2290 const struct in6_addr *prefix, int prefixlen,
2291 const struct in6_addr *gwaddr, int ifindex,
2292 unsigned int pref)
2294 struct fib6_config cfg = {
2295 .fc_metric = IP6_RT_PRIO_USER,
2296 .fc_ifindex = ifindex,
2297 .fc_dst_len = prefixlen,
2298 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
2299 RTF_UP | RTF_PREF(pref),
2300 .fc_nlinfo.portid = 0,
2301 .fc_nlinfo.nlh = NULL,
2302 .fc_nlinfo.nl_net = net,
2305 cfg.fc_table = l3mdev_fib_table_by_index(net, ifindex) ? : RT6_TABLE_INFO;
2306 cfg.fc_dst = *prefix;
2307 cfg.fc_gateway = *gwaddr;
2309 /* We should treat it as a default route if prefix length is 0. */
2310 if (!prefixlen)
2311 cfg.fc_flags |= RTF_DEFAULT;
2313 ip6_route_add(&cfg);
2315 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
2317 #endif
2319 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
2321 struct rt6_info *rt;
2322 struct fib6_table *table;
2324 table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
2325 if (!table)
2326 return NULL;
2328 read_lock_bh(&table->tb6_lock);
2329 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2330 if (dev == rt->dst.dev &&
2331 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
2332 ipv6_addr_equal(&rt->rt6i_gateway, addr))
2333 break;
2335 if (rt)
2336 dst_hold(&rt->dst);
2337 read_unlock_bh(&table->tb6_lock);
2338 return rt;
2341 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
2342 struct net_device *dev,
2343 unsigned int pref)
2345 struct fib6_config cfg = {
2346 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
2347 .fc_metric = IP6_RT_PRIO_USER,
2348 .fc_ifindex = dev->ifindex,
2349 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
2350 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
2351 .fc_nlinfo.portid = 0,
2352 .fc_nlinfo.nlh = NULL,
2353 .fc_nlinfo.nl_net = dev_net(dev),
2356 cfg.fc_gateway = *gwaddr;
2358 ip6_route_add(&cfg);
2360 return rt6_get_dflt_router(gwaddr, dev);
2363 void rt6_purge_dflt_routers(struct net *net)
2365 struct rt6_info *rt;
2366 struct fib6_table *table;
2368 /* NOTE: Keep consistent with rt6_get_dflt_router */
2369 table = fib6_get_table(net, RT6_TABLE_DFLT);
2370 if (!table)
2371 return;
2373 restart:
2374 read_lock_bh(&table->tb6_lock);
2375 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2376 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
2377 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
2378 dst_hold(&rt->dst);
2379 read_unlock_bh(&table->tb6_lock);
2380 ip6_del_rt(rt);
2381 goto restart;
2384 read_unlock_bh(&table->tb6_lock);
2387 static void rtmsg_to_fib6_config(struct net *net,
2388 struct in6_rtmsg *rtmsg,
2389 struct fib6_config *cfg)
2391 memset(cfg, 0, sizeof(*cfg));
2393 cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
2394 : RT6_TABLE_MAIN;
2395 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
2396 cfg->fc_metric = rtmsg->rtmsg_metric;
2397 cfg->fc_expires = rtmsg->rtmsg_info;
2398 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
2399 cfg->fc_src_len = rtmsg->rtmsg_src_len;
2400 cfg->fc_flags = rtmsg->rtmsg_flags;
2402 cfg->fc_nlinfo.nl_net = net;
2404 cfg->fc_dst = rtmsg->rtmsg_dst;
2405 cfg->fc_src = rtmsg->rtmsg_src;
2406 cfg->fc_gateway = rtmsg->rtmsg_gateway;
2409 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
2411 struct fib6_config cfg;
2412 struct in6_rtmsg rtmsg;
2413 int err;
2415 switch (cmd) {
2416 case SIOCADDRT: /* Add a route */
2417 case SIOCDELRT: /* Delete a route */
2418 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2419 return -EPERM;
2420 err = copy_from_user(&rtmsg, arg,
2421 sizeof(struct in6_rtmsg));
2422 if (err)
2423 return -EFAULT;
2425 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
2427 rtnl_lock();
2428 switch (cmd) {
2429 case SIOCADDRT:
2430 err = ip6_route_add(&cfg);
2431 break;
2432 case SIOCDELRT:
2433 err = ip6_route_del(&cfg);
2434 break;
2435 default:
2436 err = -EINVAL;
2438 rtnl_unlock();
2440 return err;
2443 return -EINVAL;
2447 * Drop the packet on the floor
2450 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
2452 int type;
2453 struct dst_entry *dst = skb_dst(skb);
2454 switch (ipstats_mib_noroutes) {
2455 case IPSTATS_MIB_INNOROUTES:
2456 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2457 if (type == IPV6_ADDR_ANY) {
2458 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2459 IPSTATS_MIB_INADDRERRORS);
2460 break;
2462 /* FALLTHROUGH */
2463 case IPSTATS_MIB_OUTNOROUTES:
2464 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2465 ipstats_mib_noroutes);
2466 break;
2468 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2469 kfree_skb(skb);
2470 return 0;
2473 static int ip6_pkt_discard(struct sk_buff *skb)
2475 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2478 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
2480 skb->dev = skb_dst(skb)->dev;
2481 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2484 static int ip6_pkt_prohibit(struct sk_buff *skb)
2486 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2489 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
2491 skb->dev = skb_dst(skb)->dev;
2492 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2496 * Allocate a dst for local (unicast / anycast) address.
2499 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2500 const struct in6_addr *addr,
2501 bool anycast)
2503 u32 tb_id;
2504 struct net *net = dev_net(idev->dev);
2505 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
2506 DST_NOCOUNT);
2507 if (!rt)
2508 return ERR_PTR(-ENOMEM);
2510 in6_dev_hold(idev);
2512 rt->dst.flags |= DST_HOST;
2513 rt->dst.input = ip6_input;
2514 rt->dst.output = ip6_output;
2515 rt->rt6i_idev = idev;
2517 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2518 if (anycast)
2519 rt->rt6i_flags |= RTF_ANYCAST;
2520 else
2521 rt->rt6i_flags |= RTF_LOCAL;
2523 rt->rt6i_gateway = *addr;
2524 rt->rt6i_dst.addr = *addr;
2525 rt->rt6i_dst.plen = 128;
2526 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
2527 rt->rt6i_table = fib6_get_table(net, tb_id);
2528 rt->dst.flags |= DST_NOCACHE;
2530 atomic_set(&rt->dst.__refcnt, 1);
2532 return rt;
2535 int ip6_route_get_saddr(struct net *net,
2536 struct rt6_info *rt,
2537 const struct in6_addr *daddr,
2538 unsigned int prefs,
2539 struct in6_addr *saddr)
2541 struct inet6_dev *idev =
2542 rt ? ip6_dst_idev((struct dst_entry *)rt) : NULL;
2543 int err = 0;
2544 if (rt && rt->rt6i_prefsrc.plen)
2545 *saddr = rt->rt6i_prefsrc.addr;
2546 else
2547 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2548 daddr, prefs, saddr);
2549 return err;
2552 /* remove deleted ip from prefsrc entries */
2553 struct arg_dev_net_ip {
2554 struct net_device *dev;
2555 struct net *net;
2556 struct in6_addr *addr;
2559 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2561 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2562 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2563 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2565 if (((void *)rt->dst.dev == dev || !dev) &&
2566 rt != net->ipv6.ip6_null_entry &&
2567 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2568 /* remove prefsrc entry */
2569 rt->rt6i_prefsrc.plen = 0;
2571 return 0;
2574 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2576 struct net *net = dev_net(ifp->idev->dev);
2577 struct arg_dev_net_ip adni = {
2578 .dev = ifp->idev->dev,
2579 .net = net,
2580 .addr = &ifp->addr,
2582 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
2585 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
2586 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2588 /* Remove routers and update dst entries when gateway turn into host. */
2589 static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
2591 struct in6_addr *gateway = (struct in6_addr *)arg;
2593 if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
2594 ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
2595 ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
2596 return -1;
2598 return 0;
2601 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
2603 fib6_clean_all(net, fib6_clean_tohost, gateway);
2606 struct arg_dev_net {
2607 struct net_device *dev;
2608 struct net *net;
2611 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2613 const struct arg_dev_net *adn = arg;
2614 const struct net_device *dev = adn->dev;
2616 if ((rt->dst.dev == dev || !dev) &&
2617 rt != adn->net->ipv6.ip6_null_entry)
2618 return -1;
2620 return 0;
2623 void rt6_ifdown(struct net *net, struct net_device *dev)
2625 struct arg_dev_net adn = {
2626 .dev = dev,
2627 .net = net,
2630 fib6_clean_all(net, fib6_ifdown, &adn);
2631 icmp6_clean_all(fib6_ifdown, &adn);
2632 if (dev)
2633 rt6_uncached_list_flush_dev(net, dev);
2636 struct rt6_mtu_change_arg {
2637 struct net_device *dev;
2638 unsigned int mtu;
2641 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2643 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2644 struct inet6_dev *idev;
2646 /* In IPv6 pmtu discovery is not optional,
2647 so that RTAX_MTU lock cannot disable it.
2648 We still use this lock to block changes
2649 caused by addrconf/ndisc.
2652 idev = __in6_dev_get(arg->dev);
2653 if (!idev)
2654 return 0;
2656 /* For administrative MTU increase, there is no way to discover
2657 IPv6 PMTU increase, so PMTU increase should be updated here.
2658 Since RFC 1981 doesn't include administrative MTU increase
2659 update PMTU increase is a MUST. (i.e. jumbo frame)
2662 If new MTU is less than route PMTU, this new MTU will be the
2663 lowest MTU in the path, update the route PMTU to reflect PMTU
2664 decreases; if new MTU is greater than route PMTU, and the
2665 old MTU is the lowest MTU in the path, update the route PMTU
2666 to reflect the increase. In this case if the other nodes' MTU
2667 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2668 PMTU discouvery.
2670 if (rt->dst.dev == arg->dev &&
2671 !dst_metric_locked(&rt->dst, RTAX_MTU)) {
2672 if (rt->rt6i_flags & RTF_CACHE) {
2673 /* For RTF_CACHE with rt6i_pmtu == 0
2674 * (i.e. a redirected route),
2675 * the metrics of its rt->dst.from has already
2676 * been updated.
2678 if (rt->rt6i_pmtu && rt->rt6i_pmtu > arg->mtu)
2679 rt->rt6i_pmtu = arg->mtu;
2680 } else if (dst_mtu(&rt->dst) >= arg->mtu ||
2681 (dst_mtu(&rt->dst) < arg->mtu &&
2682 dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
2683 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2686 return 0;
2689 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2691 struct rt6_mtu_change_arg arg = {
2692 .dev = dev,
2693 .mtu = mtu,
2696 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
2699 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2700 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2701 [RTA_OIF] = { .type = NLA_U32 },
2702 [RTA_IIF] = { .type = NLA_U32 },
2703 [RTA_PRIORITY] = { .type = NLA_U32 },
2704 [RTA_METRICS] = { .type = NLA_NESTED },
2705 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2706 [RTA_PREF] = { .type = NLA_U8 },
2707 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
2708 [RTA_ENCAP] = { .type = NLA_NESTED },
2711 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2712 struct fib6_config *cfg)
2714 struct rtmsg *rtm;
2715 struct nlattr *tb[RTA_MAX+1];
2716 unsigned int pref;
2717 int err;
2719 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2720 if (err < 0)
2721 goto errout;
2723 err = -EINVAL;
2724 rtm = nlmsg_data(nlh);
2725 memset(cfg, 0, sizeof(*cfg));
2727 cfg->fc_table = rtm->rtm_table;
2728 cfg->fc_dst_len = rtm->rtm_dst_len;
2729 cfg->fc_src_len = rtm->rtm_src_len;
2730 cfg->fc_flags = RTF_UP;
2731 cfg->fc_protocol = rtm->rtm_protocol;
2732 cfg->fc_type = rtm->rtm_type;
2734 if (rtm->rtm_type == RTN_UNREACHABLE ||
2735 rtm->rtm_type == RTN_BLACKHOLE ||
2736 rtm->rtm_type == RTN_PROHIBIT ||
2737 rtm->rtm_type == RTN_THROW)
2738 cfg->fc_flags |= RTF_REJECT;
2740 if (rtm->rtm_type == RTN_LOCAL)
2741 cfg->fc_flags |= RTF_LOCAL;
2743 if (rtm->rtm_flags & RTM_F_CLONED)
2744 cfg->fc_flags |= RTF_CACHE;
2746 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
2747 cfg->fc_nlinfo.nlh = nlh;
2748 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2750 if (tb[RTA_GATEWAY]) {
2751 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
2752 cfg->fc_flags |= RTF_GATEWAY;
2755 if (tb[RTA_DST]) {
2756 int plen = (rtm->rtm_dst_len + 7) >> 3;
2758 if (nla_len(tb[RTA_DST]) < plen)
2759 goto errout;
2761 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2764 if (tb[RTA_SRC]) {
2765 int plen = (rtm->rtm_src_len + 7) >> 3;
2767 if (nla_len(tb[RTA_SRC]) < plen)
2768 goto errout;
2770 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2773 if (tb[RTA_PREFSRC])
2774 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
2776 if (tb[RTA_OIF])
2777 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2779 if (tb[RTA_PRIORITY])
2780 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2782 if (tb[RTA_METRICS]) {
2783 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2784 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2787 if (tb[RTA_TABLE])
2788 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2790 if (tb[RTA_MULTIPATH]) {
2791 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2792 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2795 if (tb[RTA_PREF]) {
2796 pref = nla_get_u8(tb[RTA_PREF]);
2797 if (pref != ICMPV6_ROUTER_PREF_LOW &&
2798 pref != ICMPV6_ROUTER_PREF_HIGH)
2799 pref = ICMPV6_ROUTER_PREF_MEDIUM;
2800 cfg->fc_flags |= RTF_PREF(pref);
2803 if (tb[RTA_ENCAP])
2804 cfg->fc_encap = tb[RTA_ENCAP];
2806 if (tb[RTA_ENCAP_TYPE])
2807 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
2809 err = 0;
2810 errout:
2811 return err;
2814 struct rt6_nh {
2815 struct rt6_info *rt6_info;
2816 struct fib6_config r_cfg;
2817 struct mx6_config mxc;
2818 struct list_head next;
2821 static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
2823 struct rt6_nh *nh;
2825 list_for_each_entry(nh, rt6_nh_list, next) {
2826 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6 nexthop %pI6 ifi %d\n",
2827 &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
2828 nh->r_cfg.fc_ifindex);
2832 static int ip6_route_info_append(struct list_head *rt6_nh_list,
2833 struct rt6_info *rt, struct fib6_config *r_cfg)
2835 struct rt6_nh *nh;
2836 struct rt6_info *rtnh;
2837 int err = -EEXIST;
2839 list_for_each_entry(nh, rt6_nh_list, next) {
2840 /* check if rt6_info already exists */
2841 rtnh = nh->rt6_info;
2843 if (rtnh->dst.dev == rt->dst.dev &&
2844 rtnh->rt6i_idev == rt->rt6i_idev &&
2845 ipv6_addr_equal(&rtnh->rt6i_gateway,
2846 &rt->rt6i_gateway))
2847 return err;
2850 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
2851 if (!nh)
2852 return -ENOMEM;
2853 nh->rt6_info = rt;
2854 err = ip6_convert_metrics(&nh->mxc, r_cfg);
2855 if (err) {
2856 kfree(nh);
2857 return err;
2859 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
2860 list_add_tail(&nh->next, rt6_nh_list);
2862 return 0;
2865 static int ip6_route_multipath_add(struct fib6_config *cfg)
2867 struct fib6_config r_cfg;
2868 struct rtnexthop *rtnh;
2869 struct rt6_info *rt;
2870 struct rt6_nh *err_nh;
2871 struct rt6_nh *nh, *nh_safe;
2872 int remaining;
2873 int attrlen;
2874 int err = 1;
2875 int nhn = 0;
2876 int replace = (cfg->fc_nlinfo.nlh &&
2877 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
2878 LIST_HEAD(rt6_nh_list);
2880 remaining = cfg->fc_mp_len;
2881 rtnh = (struct rtnexthop *)cfg->fc_mp;
2883 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
2884 * rt6_info structs per nexthop
2886 while (rtnh_ok(rtnh, remaining)) {
2887 memcpy(&r_cfg, cfg, sizeof(*cfg));
2888 if (rtnh->rtnh_ifindex)
2889 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
2891 attrlen = rtnh_attrlen(rtnh);
2892 if (attrlen > 0) {
2893 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
2895 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2896 if (nla) {
2897 r_cfg.fc_gateway = nla_get_in6_addr(nla);
2898 r_cfg.fc_flags |= RTF_GATEWAY;
2900 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
2901 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
2902 if (nla)
2903 r_cfg.fc_encap_type = nla_get_u16(nla);
2906 rt = ip6_route_info_create(&r_cfg);
2907 if (IS_ERR(rt)) {
2908 err = PTR_ERR(rt);
2909 rt = NULL;
2910 goto cleanup;
2913 err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
2914 if (err) {
2915 dst_free(&rt->dst);
2916 goto cleanup;
2919 rtnh = rtnh_next(rtnh, &remaining);
2922 err_nh = NULL;
2923 list_for_each_entry(nh, &rt6_nh_list, next) {
2924 err = __ip6_ins_rt(nh->rt6_info, &cfg->fc_nlinfo, &nh->mxc);
2925 /* nh->rt6_info is used or freed at this point, reset to NULL*/
2926 nh->rt6_info = NULL;
2927 if (err) {
2928 if (replace && nhn)
2929 ip6_print_replace_route_err(&rt6_nh_list);
2930 err_nh = nh;
2931 goto add_errout;
2934 /* Because each route is added like a single route we remove
2935 * these flags after the first nexthop: if there is a collision,
2936 * we have already failed to add the first nexthop:
2937 * fib6_add_rt2node() has rejected it; when replacing, old
2938 * nexthops have been replaced by first new, the rest should
2939 * be added to it.
2941 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
2942 NLM_F_REPLACE);
2943 nhn++;
2946 goto cleanup;
2948 add_errout:
2949 /* Delete routes that were already added */
2950 list_for_each_entry(nh, &rt6_nh_list, next) {
2951 if (err_nh == nh)
2952 break;
2953 ip6_route_del(&nh->r_cfg);
2956 cleanup:
2957 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
2958 if (nh->rt6_info)
2959 dst_free(&nh->rt6_info->dst);
2960 kfree(nh->mxc.mx);
2961 list_del(&nh->next);
2962 kfree(nh);
2965 return err;
2968 static int ip6_route_multipath_del(struct fib6_config *cfg)
2970 struct fib6_config r_cfg;
2971 struct rtnexthop *rtnh;
2972 int remaining;
2973 int attrlen;
2974 int err = 1, last_err = 0;
2976 remaining = cfg->fc_mp_len;
2977 rtnh = (struct rtnexthop *)cfg->fc_mp;
2979 /* Parse a Multipath Entry */
2980 while (rtnh_ok(rtnh, remaining)) {
2981 memcpy(&r_cfg, cfg, sizeof(*cfg));
2982 if (rtnh->rtnh_ifindex)
2983 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
2985 attrlen = rtnh_attrlen(rtnh);
2986 if (attrlen > 0) {
2987 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
2989 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2990 if (nla) {
2991 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
2992 r_cfg.fc_flags |= RTF_GATEWAY;
2995 err = ip6_route_del(&r_cfg);
2996 if (err)
2997 last_err = err;
2999 rtnh = rtnh_next(rtnh, &remaining);
3002 return last_err;
3005 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
3007 struct fib6_config cfg;
3008 int err;
3010 err = rtm_to_fib6_config(skb, nlh, &cfg);
3011 if (err < 0)
3012 return err;
3014 if (cfg.fc_mp)
3015 return ip6_route_multipath_del(&cfg);
3016 else
3017 return ip6_route_del(&cfg);
3020 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
3022 struct fib6_config cfg;
3023 int err;
3025 err = rtm_to_fib6_config(skb, nlh, &cfg);
3026 if (err < 0)
3027 return err;
3029 if (cfg.fc_mp)
3030 return ip6_route_multipath_add(&cfg);
3031 else
3032 return ip6_route_add(&cfg);
3035 static inline size_t rt6_nlmsg_size(struct rt6_info *rt)
3037 return NLMSG_ALIGN(sizeof(struct rtmsg))
3038 + nla_total_size(16) /* RTA_SRC */
3039 + nla_total_size(16) /* RTA_DST */
3040 + nla_total_size(16) /* RTA_GATEWAY */
3041 + nla_total_size(16) /* RTA_PREFSRC */
3042 + nla_total_size(4) /* RTA_TABLE */
3043 + nla_total_size(4) /* RTA_IIF */
3044 + nla_total_size(4) /* RTA_OIF */
3045 + nla_total_size(4) /* RTA_PRIORITY */
3046 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
3047 + nla_total_size(sizeof(struct rta_cacheinfo))
3048 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
3049 + nla_total_size(1) /* RTA_PREF */
3050 + lwtunnel_get_encap_size(rt->dst.lwtstate);
3053 static int rt6_fill_node(struct net *net,
3054 struct sk_buff *skb, struct rt6_info *rt,
3055 struct in6_addr *dst, struct in6_addr *src,
3056 int iif, int type, u32 portid, u32 seq,
3057 int prefix, int nowait, unsigned int flags)
3059 u32 metrics[RTAX_MAX];
3060 struct rtmsg *rtm;
3061 struct nlmsghdr *nlh;
3062 long expires;
3063 u32 table;
3065 if (prefix) { /* user wants prefix routes only */
3066 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
3067 /* success since this is not a prefix route */
3068 return 1;
3072 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
3073 if (!nlh)
3074 return -EMSGSIZE;
3076 rtm = nlmsg_data(nlh);
3077 rtm->rtm_family = AF_INET6;
3078 rtm->rtm_dst_len = rt->rt6i_dst.plen;
3079 rtm->rtm_src_len = rt->rt6i_src.plen;
3080 rtm->rtm_tos = 0;
3081 if (rt->rt6i_table)
3082 table = rt->rt6i_table->tb6_id;
3083 else
3084 table = RT6_TABLE_UNSPEC;
3085 rtm->rtm_table = table;
3086 if (nla_put_u32(skb, RTA_TABLE, table))
3087 goto nla_put_failure;
3088 if (rt->rt6i_flags & RTF_REJECT) {
3089 switch (rt->dst.error) {
3090 case -EINVAL:
3091 rtm->rtm_type = RTN_BLACKHOLE;
3092 break;
3093 case -EACCES:
3094 rtm->rtm_type = RTN_PROHIBIT;
3095 break;
3096 case -EAGAIN:
3097 rtm->rtm_type = RTN_THROW;
3098 break;
3099 default:
3100 rtm->rtm_type = RTN_UNREACHABLE;
3101 break;
3104 else if (rt->rt6i_flags & RTF_LOCAL)
3105 rtm->rtm_type = RTN_LOCAL;
3106 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
3107 rtm->rtm_type = RTN_LOCAL;
3108 else
3109 rtm->rtm_type = RTN_UNICAST;
3110 rtm->rtm_flags = 0;
3111 if (!netif_carrier_ok(rt->dst.dev)) {
3112 rtm->rtm_flags |= RTNH_F_LINKDOWN;
3113 if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
3114 rtm->rtm_flags |= RTNH_F_DEAD;
3116 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
3117 rtm->rtm_protocol = rt->rt6i_protocol;
3118 if (rt->rt6i_flags & RTF_DYNAMIC)
3119 rtm->rtm_protocol = RTPROT_REDIRECT;
3120 else if (rt->rt6i_flags & RTF_ADDRCONF) {
3121 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
3122 rtm->rtm_protocol = RTPROT_RA;
3123 else
3124 rtm->rtm_protocol = RTPROT_KERNEL;
3127 if (rt->rt6i_flags & RTF_CACHE)
3128 rtm->rtm_flags |= RTM_F_CLONED;
3130 if (dst) {
3131 if (nla_put_in6_addr(skb, RTA_DST, dst))
3132 goto nla_put_failure;
3133 rtm->rtm_dst_len = 128;
3134 } else if (rtm->rtm_dst_len)
3135 if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
3136 goto nla_put_failure;
3137 #ifdef CONFIG_IPV6_SUBTREES
3138 if (src) {
3139 if (nla_put_in6_addr(skb, RTA_SRC, src))
3140 goto nla_put_failure;
3141 rtm->rtm_src_len = 128;
3142 } else if (rtm->rtm_src_len &&
3143 nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
3144 goto nla_put_failure;
3145 #endif
3146 if (iif) {
3147 #ifdef CONFIG_IPV6_MROUTE
3148 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
3149 int err = ip6mr_get_route(net, skb, rtm, nowait,
3150 portid);
3152 if (err <= 0) {
3153 if (!nowait) {
3154 if (err == 0)
3155 return 0;
3156 goto nla_put_failure;
3157 } else {
3158 if (err == -EMSGSIZE)
3159 goto nla_put_failure;
3162 } else
3163 #endif
3164 if (nla_put_u32(skb, RTA_IIF, iif))
3165 goto nla_put_failure;
3166 } else if (dst) {
3167 struct in6_addr saddr_buf;
3168 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
3169 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
3170 goto nla_put_failure;
3173 if (rt->rt6i_prefsrc.plen) {
3174 struct in6_addr saddr_buf;
3175 saddr_buf = rt->rt6i_prefsrc.addr;
3176 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
3177 goto nla_put_failure;
3180 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
3181 if (rt->rt6i_pmtu)
3182 metrics[RTAX_MTU - 1] = rt->rt6i_pmtu;
3183 if (rtnetlink_put_metrics(skb, metrics) < 0)
3184 goto nla_put_failure;
3186 if (rt->rt6i_flags & RTF_GATEWAY) {
3187 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
3188 goto nla_put_failure;
3191 if (rt->dst.dev &&
3192 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
3193 goto nla_put_failure;
3194 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
3195 goto nla_put_failure;
3197 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
3199 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
3200 goto nla_put_failure;
3202 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
3203 goto nla_put_failure;
3205 if (lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
3206 goto nla_put_failure;
3208 nlmsg_end(skb, nlh);
3209 return 0;
3211 nla_put_failure:
3212 nlmsg_cancel(skb, nlh);
3213 return -EMSGSIZE;
3216 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
3218 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
3219 int prefix;
3221 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
3222 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
3223 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
3224 } else
3225 prefix = 0;
3227 return rt6_fill_node(arg->net,
3228 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
3229 NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
3230 prefix, 0, NLM_F_MULTI);
3233 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
3235 struct net *net = sock_net(in_skb->sk);
3236 struct nlattr *tb[RTA_MAX+1];
3237 struct rt6_info *rt;
3238 struct sk_buff *skb;
3239 struct rtmsg *rtm;
3240 struct flowi6 fl6;
3241 int err, iif = 0, oif = 0;
3243 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
3244 if (err < 0)
3245 goto errout;
3247 err = -EINVAL;
3248 memset(&fl6, 0, sizeof(fl6));
3250 if (tb[RTA_SRC]) {
3251 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
3252 goto errout;
3254 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
3257 if (tb[RTA_DST]) {
3258 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
3259 goto errout;
3261 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
3264 if (tb[RTA_IIF])
3265 iif = nla_get_u32(tb[RTA_IIF]);
3267 if (tb[RTA_OIF])
3268 oif = nla_get_u32(tb[RTA_OIF]);
3270 if (tb[RTA_MARK])
3271 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
3273 if (iif) {
3274 struct net_device *dev;
3275 int flags = 0;
3277 dev = __dev_get_by_index(net, iif);
3278 if (!dev) {
3279 err = -ENODEV;
3280 goto errout;
3283 fl6.flowi6_iif = iif;
3285 if (!ipv6_addr_any(&fl6.saddr))
3286 flags |= RT6_LOOKUP_F_HAS_SADDR;
3288 rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
3289 flags);
3290 } else {
3291 fl6.flowi6_oif = oif;
3293 if (netif_index_is_l3_master(net, oif)) {
3294 fl6.flowi6_flags = FLOWI_FLAG_L3MDEV_SRC |
3295 FLOWI_FLAG_SKIP_NH_OIF;
3298 rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
3301 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3302 if (!skb) {
3303 ip6_rt_put(rt);
3304 err = -ENOBUFS;
3305 goto errout;
3308 /* Reserve room for dummy headers, this skb can pass
3309 through good chunk of routing engine.
3311 skb_reset_mac_header(skb);
3312 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
3314 skb_dst_set(skb, &rt->dst);
3316 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
3317 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
3318 nlh->nlmsg_seq, 0, 0, 0);
3319 if (err < 0) {
3320 kfree_skb(skb);
3321 goto errout;
3324 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3325 errout:
3326 return err;
3329 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
3330 unsigned int nlm_flags)
3332 struct sk_buff *skb;
3333 struct net *net = info->nl_net;
3334 u32 seq;
3335 int err;
3337 err = -ENOBUFS;
3338 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3340 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3341 if (!skb)
3342 goto errout;
3344 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
3345 event, info->portid, seq, 0, 0, nlm_flags);
3346 if (err < 0) {
3347 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
3348 WARN_ON(err == -EMSGSIZE);
3349 kfree_skb(skb);
3350 goto errout;
3352 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3353 info->nlh, gfp_any());
3354 return;
3355 errout:
3356 if (err < 0)
3357 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
3360 static int ip6_route_dev_notify(struct notifier_block *this,
3361 unsigned long event, void *ptr)
3363 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3364 struct net *net = dev_net(dev);
3366 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
3367 net->ipv6.ip6_null_entry->dst.dev = dev;
3368 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
3369 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3370 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
3371 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
3372 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
3373 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
3374 #endif
3377 return NOTIFY_OK;
3381 * /proc
3384 #ifdef CONFIG_PROC_FS
3386 static const struct file_operations ipv6_route_proc_fops = {
3387 .owner = THIS_MODULE,
3388 .open = ipv6_route_open,
3389 .read = seq_read,
3390 .llseek = seq_lseek,
3391 .release = seq_release_net,
3394 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
3396 struct net *net = (struct net *)seq->private;
3397 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
3398 net->ipv6.rt6_stats->fib_nodes,
3399 net->ipv6.rt6_stats->fib_route_nodes,
3400 net->ipv6.rt6_stats->fib_rt_alloc,
3401 net->ipv6.rt6_stats->fib_rt_entries,
3402 net->ipv6.rt6_stats->fib_rt_cache,
3403 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
3404 net->ipv6.rt6_stats->fib_discarded_routes);
3406 return 0;
3409 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
3411 return single_open_net(inode, file, rt6_stats_seq_show);
3414 static const struct file_operations rt6_stats_seq_fops = {
3415 .owner = THIS_MODULE,
3416 .open = rt6_stats_seq_open,
3417 .read = seq_read,
3418 .llseek = seq_lseek,
3419 .release = single_release_net,
3421 #endif /* CONFIG_PROC_FS */
3423 #ifdef CONFIG_SYSCTL
3425 static
3426 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
3427 void __user *buffer, size_t *lenp, loff_t *ppos)
3429 struct net *net;
3430 int delay;
3431 if (!write)
3432 return -EINVAL;
3434 net = (struct net *)ctl->extra1;
3435 delay = net->ipv6.sysctl.flush_delay;
3436 proc_dointvec(ctl, write, buffer, lenp, ppos);
3437 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
3438 return 0;
3441 struct ctl_table ipv6_route_table_template[] = {
3443 .procname = "flush",
3444 .data = &init_net.ipv6.sysctl.flush_delay,
3445 .maxlen = sizeof(int),
3446 .mode = 0200,
3447 .proc_handler = ipv6_sysctl_rtcache_flush
3450 .procname = "gc_thresh",
3451 .data = &ip6_dst_ops_template.gc_thresh,
3452 .maxlen = sizeof(int),
3453 .mode = 0644,
3454 .proc_handler = proc_dointvec,
3457 .procname = "max_size",
3458 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
3459 .maxlen = sizeof(int),
3460 .mode = 0644,
3461 .proc_handler = proc_dointvec,
3464 .procname = "gc_min_interval",
3465 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3466 .maxlen = sizeof(int),
3467 .mode = 0644,
3468 .proc_handler = proc_dointvec_jiffies,
3471 .procname = "gc_timeout",
3472 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
3473 .maxlen = sizeof(int),
3474 .mode = 0644,
3475 .proc_handler = proc_dointvec_jiffies,
3478 .procname = "gc_interval",
3479 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
3480 .maxlen = sizeof(int),
3481 .mode = 0644,
3482 .proc_handler = proc_dointvec_jiffies,
3485 .procname = "gc_elasticity",
3486 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
3487 .maxlen = sizeof(int),
3488 .mode = 0644,
3489 .proc_handler = proc_dointvec,
3492 .procname = "mtu_expires",
3493 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
3494 .maxlen = sizeof(int),
3495 .mode = 0644,
3496 .proc_handler = proc_dointvec_jiffies,
3499 .procname = "min_adv_mss",
3500 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
3501 .maxlen = sizeof(int),
3502 .mode = 0644,
3503 .proc_handler = proc_dointvec,
3506 .procname = "gc_min_interval_ms",
3507 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3508 .maxlen = sizeof(int),
3509 .mode = 0644,
3510 .proc_handler = proc_dointvec_ms_jiffies,
3515 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
3517 struct ctl_table *table;
3519 table = kmemdup(ipv6_route_table_template,
3520 sizeof(ipv6_route_table_template),
3521 GFP_KERNEL);
3523 if (table) {
3524 table[0].data = &net->ipv6.sysctl.flush_delay;
3525 table[0].extra1 = net;
3526 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
3527 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
3528 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3529 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
3530 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
3531 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
3532 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
3533 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
3534 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3536 /* Don't export sysctls to unprivileged users */
3537 if (net->user_ns != &init_user_ns)
3538 table[0].procname = NULL;
3541 return table;
3543 #endif
3545 static int __net_init ip6_route_net_init(struct net *net)
3547 int ret = -ENOMEM;
3549 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
3550 sizeof(net->ipv6.ip6_dst_ops));
3552 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
3553 goto out_ip6_dst_ops;
3555 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
3556 sizeof(*net->ipv6.ip6_null_entry),
3557 GFP_KERNEL);
3558 if (!net->ipv6.ip6_null_entry)
3559 goto out_ip6_dst_entries;
3560 net->ipv6.ip6_null_entry->dst.path =
3561 (struct dst_entry *)net->ipv6.ip6_null_entry;
3562 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3563 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
3564 ip6_template_metrics, true);
3566 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3567 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
3568 sizeof(*net->ipv6.ip6_prohibit_entry),
3569 GFP_KERNEL);
3570 if (!net->ipv6.ip6_prohibit_entry)
3571 goto out_ip6_null_entry;
3572 net->ipv6.ip6_prohibit_entry->dst.path =
3573 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
3574 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3575 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
3576 ip6_template_metrics, true);
3578 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
3579 sizeof(*net->ipv6.ip6_blk_hole_entry),
3580 GFP_KERNEL);
3581 if (!net->ipv6.ip6_blk_hole_entry)
3582 goto out_ip6_prohibit_entry;
3583 net->ipv6.ip6_blk_hole_entry->dst.path =
3584 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
3585 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3586 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
3587 ip6_template_metrics, true);
3588 #endif
3590 net->ipv6.sysctl.flush_delay = 0;
3591 net->ipv6.sysctl.ip6_rt_max_size = 4096;
3592 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
3593 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
3594 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
3595 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
3596 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
3597 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
3599 net->ipv6.ip6_rt_gc_expire = 30*HZ;
3601 ret = 0;
3602 out:
3603 return ret;
3605 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3606 out_ip6_prohibit_entry:
3607 kfree(net->ipv6.ip6_prohibit_entry);
3608 out_ip6_null_entry:
3609 kfree(net->ipv6.ip6_null_entry);
3610 #endif
3611 out_ip6_dst_entries:
3612 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3613 out_ip6_dst_ops:
3614 goto out;
3617 static void __net_exit ip6_route_net_exit(struct net *net)
3619 kfree(net->ipv6.ip6_null_entry);
3620 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3621 kfree(net->ipv6.ip6_prohibit_entry);
3622 kfree(net->ipv6.ip6_blk_hole_entry);
3623 #endif
3624 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3627 static int __net_init ip6_route_net_init_late(struct net *net)
3629 #ifdef CONFIG_PROC_FS
3630 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
3631 proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
3632 #endif
3633 return 0;
3636 static void __net_exit ip6_route_net_exit_late(struct net *net)
3638 #ifdef CONFIG_PROC_FS
3639 remove_proc_entry("ipv6_route", net->proc_net);
3640 remove_proc_entry("rt6_stats", net->proc_net);
3641 #endif
3644 static struct pernet_operations ip6_route_net_ops = {
3645 .init = ip6_route_net_init,
3646 .exit = ip6_route_net_exit,
3649 static int __net_init ipv6_inetpeer_init(struct net *net)
3651 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3653 if (!bp)
3654 return -ENOMEM;
3655 inet_peer_base_init(bp);
3656 net->ipv6.peers = bp;
3657 return 0;
3660 static void __net_exit ipv6_inetpeer_exit(struct net *net)
3662 struct inet_peer_base *bp = net->ipv6.peers;
3664 net->ipv6.peers = NULL;
3665 inetpeer_invalidate_tree(bp);
3666 kfree(bp);
3669 static struct pernet_operations ipv6_inetpeer_ops = {
3670 .init = ipv6_inetpeer_init,
3671 .exit = ipv6_inetpeer_exit,
3674 static struct pernet_operations ip6_route_net_late_ops = {
3675 .init = ip6_route_net_init_late,
3676 .exit = ip6_route_net_exit_late,
3679 static struct notifier_block ip6_route_dev_notifier = {
3680 .notifier_call = ip6_route_dev_notify,
3681 .priority = 0,
3684 int __init ip6_route_init(void)
3686 int ret;
3687 int cpu;
3689 ret = -ENOMEM;
3690 ip6_dst_ops_template.kmem_cachep =
3691 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
3692 SLAB_HWCACHE_ALIGN, NULL);
3693 if (!ip6_dst_ops_template.kmem_cachep)
3694 goto out;
3696 ret = dst_entries_init(&ip6_dst_blackhole_ops);
3697 if (ret)
3698 goto out_kmem_cache;
3700 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
3701 if (ret)
3702 goto out_dst_entries;
3704 ret = register_pernet_subsys(&ip6_route_net_ops);
3705 if (ret)
3706 goto out_register_inetpeer;
3708 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
3710 /* Registering of the loopback is done before this portion of code,
3711 * the loopback reference in rt6_info will not be taken, do it
3712 * manually for init_net */
3713 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
3714 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3715 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3716 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
3717 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3718 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
3719 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3720 #endif
3721 ret = fib6_init();
3722 if (ret)
3723 goto out_register_subsys;
3725 ret = xfrm6_init();
3726 if (ret)
3727 goto out_fib6_init;
3729 ret = fib6_rules_init();
3730 if (ret)
3731 goto xfrm6_init;
3733 ret = register_pernet_subsys(&ip6_route_net_late_ops);
3734 if (ret)
3735 goto fib6_rules_init;
3737 ret = -ENOBUFS;
3738 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
3739 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
3740 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
3741 goto out_register_late_subsys;
3743 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
3744 if (ret)
3745 goto out_register_late_subsys;
3747 for_each_possible_cpu(cpu) {
3748 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
3750 INIT_LIST_HEAD(&ul->head);
3751 spin_lock_init(&ul->lock);
3754 out:
3755 return ret;
3757 out_register_late_subsys:
3758 unregister_pernet_subsys(&ip6_route_net_late_ops);
3759 fib6_rules_init:
3760 fib6_rules_cleanup();
3761 xfrm6_init:
3762 xfrm6_fini();
3763 out_fib6_init:
3764 fib6_gc_cleanup();
3765 out_register_subsys:
3766 unregister_pernet_subsys(&ip6_route_net_ops);
3767 out_register_inetpeer:
3768 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3769 out_dst_entries:
3770 dst_entries_destroy(&ip6_dst_blackhole_ops);
3771 out_kmem_cache:
3772 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3773 goto out;
3776 void ip6_route_cleanup(void)
3778 unregister_netdevice_notifier(&ip6_route_dev_notifier);
3779 unregister_pernet_subsys(&ip6_route_net_late_ops);
3780 fib6_rules_cleanup();
3781 xfrm6_fini();
3782 fib6_gc_cleanup();
3783 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3784 unregister_pernet_subsys(&ip6_route_net_ops);
3785 dst_entries_destroy(&ip6_dst_blackhole_ops);
3786 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);