Linux 4.1.16
[linux/fpc-iii.git] / net / ipv6 / route.c
blobf371fefa7fdcce6f05c67b9bf37353c8f07bdd1a
1 /*
2 * Linux INET6 implementation
3 * FIB front-end.
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 /* Changes:
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
23 * Ville Nuorvala
24 * Fixed routing subtrees.
27 #define pr_fmt(fmt) "IPv6: " fmt
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <net/net_namespace.h>
48 #include <net/snmp.h>
49 #include <net/ipv6.h>
50 #include <net/ip6_fib.h>
51 #include <net/ip6_route.h>
52 #include <net/ndisc.h>
53 #include <net/addrconf.h>
54 #include <net/tcp.h>
55 #include <linux/rtnetlink.h>
56 #include <net/dst.h>
57 #include <net/xfrm.h>
58 #include <net/netevent.h>
59 #include <net/netlink.h>
60 #include <net/nexthop.h>
62 #include <asm/uaccess.h>
64 #ifdef CONFIG_SYSCTL
65 #include <linux/sysctl.h>
66 #endif
68 enum rt6_nud_state {
69 RT6_NUD_FAIL_HARD = -3,
70 RT6_NUD_FAIL_PROBE = -2,
71 RT6_NUD_FAIL_DO_RR = -1,
72 RT6_NUD_SUCCEED = 1
75 static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
76 const struct in6_addr *dest);
77 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
78 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
79 static unsigned int ip6_mtu(const struct dst_entry *dst);
80 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
81 static void ip6_dst_destroy(struct dst_entry *);
82 static void ip6_dst_ifdown(struct dst_entry *,
83 struct net_device *dev, int how);
84 static int ip6_dst_gc(struct dst_ops *ops);
86 static int ip6_pkt_discard(struct sk_buff *skb);
87 static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb);
88 static int ip6_pkt_prohibit(struct sk_buff *skb);
89 static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb);
90 static void ip6_link_failure(struct sk_buff *skb);
91 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
92 struct sk_buff *skb, u32 mtu);
93 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
94 struct sk_buff *skb);
95 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
97 #ifdef CONFIG_IPV6_ROUTE_INFO
98 static struct rt6_info *rt6_add_route_info(struct net *net,
99 const struct in6_addr *prefix, int prefixlen,
100 const struct in6_addr *gwaddr, int ifindex,
101 unsigned int pref);
102 static struct rt6_info *rt6_get_route_info(struct net *net,
103 const struct in6_addr *prefix, int prefixlen,
104 const struct in6_addr *gwaddr, int ifindex);
105 #endif
107 static void rt6_bind_peer(struct rt6_info *rt, int create)
109 struct inet_peer_base *base;
110 struct inet_peer *peer;
112 base = inetpeer_base_ptr(rt->_rt6i_peer);
113 if (!base)
114 return;
116 peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
117 if (peer) {
118 if (!rt6_set_peer(rt, peer))
119 inet_putpeer(peer);
123 static struct inet_peer *__rt6_get_peer(struct rt6_info *rt, int create)
125 if (rt6_has_peer(rt))
126 return rt6_peer_ptr(rt);
128 rt6_bind_peer(rt, create);
129 return (rt6_has_peer(rt) ? rt6_peer_ptr(rt) : NULL);
132 static struct inet_peer *rt6_get_peer_create(struct rt6_info *rt)
134 return __rt6_get_peer(rt, 1);
137 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
139 struct rt6_info *rt = (struct rt6_info *) dst;
140 struct inet_peer *peer;
141 u32 *p = NULL;
143 if (!(rt->dst.flags & DST_HOST))
144 return dst_cow_metrics_generic(dst, old);
146 peer = rt6_get_peer_create(rt);
147 if (peer) {
148 u32 *old_p = __DST_METRICS_PTR(old);
149 unsigned long prev, new;
151 p = peer->metrics;
152 if (inet_metrics_new(peer) ||
153 (old & DST_METRICS_FORCE_OVERWRITE))
154 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
156 new = (unsigned long) p;
157 prev = cmpxchg(&dst->_metrics, old, new);
159 if (prev != old) {
160 p = __DST_METRICS_PTR(prev);
161 if (prev & DST_METRICS_READ_ONLY)
162 p = NULL;
165 return p;
168 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
169 struct sk_buff *skb,
170 const void *daddr)
172 struct in6_addr *p = &rt->rt6i_gateway;
174 if (!ipv6_addr_any(p))
175 return (const void *) p;
176 else if (skb)
177 return &ipv6_hdr(skb)->daddr;
178 return daddr;
181 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
182 struct sk_buff *skb,
183 const void *daddr)
185 struct rt6_info *rt = (struct rt6_info *) dst;
186 struct neighbour *n;
188 daddr = choose_neigh_daddr(rt, skb, daddr);
189 n = __ipv6_neigh_lookup(dst->dev, daddr);
190 if (n)
191 return n;
192 return neigh_create(&nd_tbl, daddr, dst->dev);
195 static struct dst_ops ip6_dst_ops_template = {
196 .family = AF_INET6,
197 .gc = ip6_dst_gc,
198 .gc_thresh = 1024,
199 .check = ip6_dst_check,
200 .default_advmss = ip6_default_advmss,
201 .mtu = ip6_mtu,
202 .cow_metrics = ipv6_cow_metrics,
203 .destroy = ip6_dst_destroy,
204 .ifdown = ip6_dst_ifdown,
205 .negative_advice = ip6_negative_advice,
206 .link_failure = ip6_link_failure,
207 .update_pmtu = ip6_rt_update_pmtu,
208 .redirect = rt6_do_redirect,
209 .local_out = __ip6_local_out,
210 .neigh_lookup = ip6_neigh_lookup,
213 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
215 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
217 return mtu ? : dst->dev->mtu;
220 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
221 struct sk_buff *skb, u32 mtu)
225 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
226 struct sk_buff *skb)
230 static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
231 unsigned long old)
233 return NULL;
236 static struct dst_ops ip6_dst_blackhole_ops = {
237 .family = AF_INET6,
238 .destroy = ip6_dst_destroy,
239 .check = ip6_dst_check,
240 .mtu = ip6_blackhole_mtu,
241 .default_advmss = ip6_default_advmss,
242 .update_pmtu = ip6_rt_blackhole_update_pmtu,
243 .redirect = ip6_rt_blackhole_redirect,
244 .cow_metrics = ip6_rt_blackhole_cow_metrics,
245 .neigh_lookup = ip6_neigh_lookup,
248 static const u32 ip6_template_metrics[RTAX_MAX] = {
249 [RTAX_HOPLIMIT - 1] = 0,
252 static const struct rt6_info ip6_null_entry_template = {
253 .dst = {
254 .__refcnt = ATOMIC_INIT(1),
255 .__use = 1,
256 .obsolete = DST_OBSOLETE_FORCE_CHK,
257 .error = -ENETUNREACH,
258 .input = ip6_pkt_discard,
259 .output = ip6_pkt_discard_out,
261 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
262 .rt6i_protocol = RTPROT_KERNEL,
263 .rt6i_metric = ~(u32) 0,
264 .rt6i_ref = ATOMIC_INIT(1),
267 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
269 static const struct rt6_info ip6_prohibit_entry_template = {
270 .dst = {
271 .__refcnt = ATOMIC_INIT(1),
272 .__use = 1,
273 .obsolete = DST_OBSOLETE_FORCE_CHK,
274 .error = -EACCES,
275 .input = ip6_pkt_prohibit,
276 .output = ip6_pkt_prohibit_out,
278 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
279 .rt6i_protocol = RTPROT_KERNEL,
280 .rt6i_metric = ~(u32) 0,
281 .rt6i_ref = ATOMIC_INIT(1),
284 static const struct rt6_info ip6_blk_hole_entry_template = {
285 .dst = {
286 .__refcnt = ATOMIC_INIT(1),
287 .__use = 1,
288 .obsolete = DST_OBSOLETE_FORCE_CHK,
289 .error = -EINVAL,
290 .input = dst_discard,
291 .output = dst_discard_sk,
293 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
294 .rt6i_protocol = RTPROT_KERNEL,
295 .rt6i_metric = ~(u32) 0,
296 .rt6i_ref = ATOMIC_INIT(1),
299 #endif
301 /* allocate dst with ip6_dst_ops */
302 static inline struct rt6_info *ip6_dst_alloc(struct net *net,
303 struct net_device *dev,
304 int flags,
305 struct fib6_table *table)
307 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
308 0, DST_OBSOLETE_FORCE_CHK, flags);
310 if (rt) {
311 struct dst_entry *dst = &rt->dst;
313 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
314 rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
315 INIT_LIST_HEAD(&rt->rt6i_siblings);
317 return rt;
320 static void ip6_dst_destroy(struct dst_entry *dst)
322 struct rt6_info *rt = (struct rt6_info *)dst;
323 struct inet6_dev *idev = rt->rt6i_idev;
324 struct dst_entry *from = dst->from;
326 if (!(rt->dst.flags & DST_HOST))
327 dst_destroy_metrics_generic(dst);
329 if (idev) {
330 rt->rt6i_idev = NULL;
331 in6_dev_put(idev);
334 dst->from = NULL;
335 dst_release(from);
337 if (rt6_has_peer(rt)) {
338 struct inet_peer *peer = rt6_peer_ptr(rt);
339 inet_putpeer(peer);
343 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
344 int how)
346 struct rt6_info *rt = (struct rt6_info *)dst;
347 struct inet6_dev *idev = rt->rt6i_idev;
348 struct net_device *loopback_dev =
349 dev_net(dev)->loopback_dev;
351 if (dev != loopback_dev) {
352 if (idev && idev->dev == dev) {
353 struct inet6_dev *loopback_idev =
354 in6_dev_get(loopback_dev);
355 if (loopback_idev) {
356 rt->rt6i_idev = loopback_idev;
357 in6_dev_put(idev);
363 static bool rt6_check_expired(const struct rt6_info *rt)
365 if (rt->rt6i_flags & RTF_EXPIRES) {
366 if (time_after(jiffies, rt->dst.expires))
367 return true;
368 } else if (rt->dst.from) {
369 return rt6_check_expired((struct rt6_info *) rt->dst.from);
371 return false;
374 /* Multipath route selection:
375 * Hash based function using packet header and flowlabel.
376 * Adapted from fib_info_hashfn()
378 static int rt6_info_hash_nhsfn(unsigned int candidate_count,
379 const struct flowi6 *fl6)
381 unsigned int val = fl6->flowi6_proto;
383 val ^= ipv6_addr_hash(&fl6->daddr);
384 val ^= ipv6_addr_hash(&fl6->saddr);
386 /* Work only if this not encapsulated */
387 switch (fl6->flowi6_proto) {
388 case IPPROTO_UDP:
389 case IPPROTO_TCP:
390 case IPPROTO_SCTP:
391 val ^= (__force u16)fl6->fl6_sport;
392 val ^= (__force u16)fl6->fl6_dport;
393 break;
395 case IPPROTO_ICMPV6:
396 val ^= (__force u16)fl6->fl6_icmp_type;
397 val ^= (__force u16)fl6->fl6_icmp_code;
398 break;
400 /* RFC6438 recommands to use flowlabel */
401 val ^= (__force u32)fl6->flowlabel;
403 /* Perhaps, we need to tune, this function? */
404 val = val ^ (val >> 7) ^ (val >> 12);
405 return val % candidate_count;
408 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
409 struct flowi6 *fl6, int oif,
410 int strict)
412 struct rt6_info *sibling, *next_sibling;
413 int route_choosen;
415 route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
416 /* Don't change the route, if route_choosen == 0
417 * (siblings does not include ourself)
419 if (route_choosen)
420 list_for_each_entry_safe(sibling, next_sibling,
421 &match->rt6i_siblings, rt6i_siblings) {
422 route_choosen--;
423 if (route_choosen == 0) {
424 if (rt6_score_route(sibling, oif, strict) < 0)
425 break;
426 match = sibling;
427 break;
430 return match;
434 * Route lookup. Any table->tb6_lock is implied.
437 static inline struct rt6_info *rt6_device_match(struct net *net,
438 struct rt6_info *rt,
439 const struct in6_addr *saddr,
440 int oif,
441 int flags)
443 struct rt6_info *local = NULL;
444 struct rt6_info *sprt;
446 if (!oif && ipv6_addr_any(saddr))
447 goto out;
449 for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
450 struct net_device *dev = sprt->dst.dev;
452 if (oif) {
453 if (dev->ifindex == oif)
454 return sprt;
455 if (dev->flags & IFF_LOOPBACK) {
456 if (!sprt->rt6i_idev ||
457 sprt->rt6i_idev->dev->ifindex != oif) {
458 if (flags & RT6_LOOKUP_F_IFACE && oif)
459 continue;
460 if (local && (!oif ||
461 local->rt6i_idev->dev->ifindex == oif))
462 continue;
464 local = sprt;
466 } else {
467 if (ipv6_chk_addr(net, saddr, dev,
468 flags & RT6_LOOKUP_F_IFACE))
469 return sprt;
473 if (oif) {
474 if (local)
475 return local;
477 if (flags & RT6_LOOKUP_F_IFACE)
478 return net->ipv6.ip6_null_entry;
480 out:
481 return rt;
484 #ifdef CONFIG_IPV6_ROUTER_PREF
485 struct __rt6_probe_work {
486 struct work_struct work;
487 struct in6_addr target;
488 struct net_device *dev;
491 static void rt6_probe_deferred(struct work_struct *w)
493 struct in6_addr mcaddr;
494 struct __rt6_probe_work *work =
495 container_of(w, struct __rt6_probe_work, work);
497 addrconf_addr_solict_mult(&work->target, &mcaddr);
498 ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
499 dev_put(work->dev);
500 kfree(work);
503 static void rt6_probe(struct rt6_info *rt)
505 struct neighbour *neigh;
507 * Okay, this does not seem to be appropriate
508 * for now, however, we need to check if it
509 * is really so; aka Router Reachability Probing.
511 * Router Reachability Probe MUST be rate-limited
512 * to no more than one per minute.
514 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
515 return;
516 rcu_read_lock_bh();
517 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
518 if (neigh) {
519 write_lock(&neigh->lock);
520 if (neigh->nud_state & NUD_VALID)
521 goto out;
524 if (!neigh ||
525 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
526 struct __rt6_probe_work *work;
528 work = kmalloc(sizeof(*work), GFP_ATOMIC);
530 if (neigh && work)
531 __neigh_set_probe_once(neigh);
533 if (neigh)
534 write_unlock(&neigh->lock);
536 if (work) {
537 INIT_WORK(&work->work, rt6_probe_deferred);
538 work->target = rt->rt6i_gateway;
539 dev_hold(rt->dst.dev);
540 work->dev = rt->dst.dev;
541 schedule_work(&work->work);
543 } else {
544 out:
545 write_unlock(&neigh->lock);
547 rcu_read_unlock_bh();
549 #else
550 static inline void rt6_probe(struct rt6_info *rt)
553 #endif
556 * Default Router Selection (RFC 2461 6.3.6)
558 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
560 struct net_device *dev = rt->dst.dev;
561 if (!oif || dev->ifindex == oif)
562 return 2;
563 if ((dev->flags & IFF_LOOPBACK) &&
564 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
565 return 1;
566 return 0;
569 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
571 struct neighbour *neigh;
572 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
574 if (rt->rt6i_flags & RTF_NONEXTHOP ||
575 !(rt->rt6i_flags & RTF_GATEWAY))
576 return RT6_NUD_SUCCEED;
578 rcu_read_lock_bh();
579 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
580 if (neigh) {
581 read_lock(&neigh->lock);
582 if (neigh->nud_state & NUD_VALID)
583 ret = RT6_NUD_SUCCEED;
584 #ifdef CONFIG_IPV6_ROUTER_PREF
585 else if (!(neigh->nud_state & NUD_FAILED))
586 ret = RT6_NUD_SUCCEED;
587 else
588 ret = RT6_NUD_FAIL_PROBE;
589 #endif
590 read_unlock(&neigh->lock);
591 } else {
592 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
593 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
595 rcu_read_unlock_bh();
597 return ret;
600 static int rt6_score_route(struct rt6_info *rt, int oif,
601 int strict)
603 int m;
605 m = rt6_check_dev(rt, oif);
606 if (!m && (strict & RT6_LOOKUP_F_IFACE))
607 return RT6_NUD_FAIL_HARD;
608 #ifdef CONFIG_IPV6_ROUTER_PREF
609 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
610 #endif
611 if (strict & RT6_LOOKUP_F_REACHABLE) {
612 int n = rt6_check_neigh(rt);
613 if (n < 0)
614 return n;
616 return m;
619 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
620 int *mpri, struct rt6_info *match,
621 bool *do_rr)
623 int m;
624 bool match_do_rr = false;
626 if (rt6_check_expired(rt))
627 goto out;
629 m = rt6_score_route(rt, oif, strict);
630 if (m == RT6_NUD_FAIL_DO_RR) {
631 match_do_rr = true;
632 m = 0; /* lowest valid score */
633 } else if (m == RT6_NUD_FAIL_HARD) {
634 goto out;
637 if (strict & RT6_LOOKUP_F_REACHABLE)
638 rt6_probe(rt);
640 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
641 if (m > *mpri) {
642 *do_rr = match_do_rr;
643 *mpri = m;
644 match = rt;
646 out:
647 return match;
650 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
651 struct rt6_info *rr_head,
652 u32 metric, int oif, int strict,
653 bool *do_rr)
655 struct rt6_info *rt, *match;
656 int mpri = -1;
658 match = NULL;
659 for (rt = rr_head; rt && rt->rt6i_metric == metric;
660 rt = rt->dst.rt6_next)
661 match = find_match(rt, oif, strict, &mpri, match, do_rr);
662 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
663 rt = rt->dst.rt6_next)
664 match = find_match(rt, oif, strict, &mpri, match, do_rr);
666 return match;
669 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
671 struct rt6_info *match, *rt0;
672 struct net *net;
673 bool do_rr = false;
675 rt0 = fn->rr_ptr;
676 if (!rt0)
677 fn->rr_ptr = rt0 = fn->leaf;
679 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
680 &do_rr);
682 if (do_rr) {
683 struct rt6_info *next = rt0->dst.rt6_next;
685 /* no entries matched; do round-robin */
686 if (!next || next->rt6i_metric != rt0->rt6i_metric)
687 next = fn->leaf;
689 if (next != rt0)
690 fn->rr_ptr = next;
693 net = dev_net(rt0->dst.dev);
694 return match ? match : net->ipv6.ip6_null_entry;
697 #ifdef CONFIG_IPV6_ROUTE_INFO
698 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
699 const struct in6_addr *gwaddr)
701 struct net *net = dev_net(dev);
702 struct route_info *rinfo = (struct route_info *) opt;
703 struct in6_addr prefix_buf, *prefix;
704 unsigned int pref;
705 unsigned long lifetime;
706 struct rt6_info *rt;
708 if (len < sizeof(struct route_info)) {
709 return -EINVAL;
712 /* Sanity check for prefix_len and length */
713 if (rinfo->length > 3) {
714 return -EINVAL;
715 } else if (rinfo->prefix_len > 128) {
716 return -EINVAL;
717 } else if (rinfo->prefix_len > 64) {
718 if (rinfo->length < 2) {
719 return -EINVAL;
721 } else if (rinfo->prefix_len > 0) {
722 if (rinfo->length < 1) {
723 return -EINVAL;
727 pref = rinfo->route_pref;
728 if (pref == ICMPV6_ROUTER_PREF_INVALID)
729 return -EINVAL;
731 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
733 if (rinfo->length == 3)
734 prefix = (struct in6_addr *)rinfo->prefix;
735 else {
736 /* this function is safe */
737 ipv6_addr_prefix(&prefix_buf,
738 (struct in6_addr *)rinfo->prefix,
739 rinfo->prefix_len);
740 prefix = &prefix_buf;
743 if (rinfo->prefix_len == 0)
744 rt = rt6_get_dflt_router(gwaddr, dev);
745 else
746 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
747 gwaddr, dev->ifindex);
749 if (rt && !lifetime) {
750 ip6_del_rt(rt);
751 rt = NULL;
754 if (!rt && lifetime)
755 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
756 pref);
757 else if (rt)
758 rt->rt6i_flags = RTF_ROUTEINFO |
759 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
761 if (rt) {
762 if (!addrconf_finite_timeout(lifetime))
763 rt6_clean_expires(rt);
764 else
765 rt6_set_expires(rt, jiffies + HZ * lifetime);
767 ip6_rt_put(rt);
769 return 0;
771 #endif
773 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
774 struct in6_addr *saddr)
776 struct fib6_node *pn;
777 while (1) {
778 if (fn->fn_flags & RTN_TL_ROOT)
779 return NULL;
780 pn = fn->parent;
781 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn)
782 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr);
783 else
784 fn = pn;
785 if (fn->fn_flags & RTN_RTINFO)
786 return fn;
790 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
791 struct fib6_table *table,
792 struct flowi6 *fl6, int flags)
794 struct fib6_node *fn;
795 struct rt6_info *rt;
797 read_lock_bh(&table->tb6_lock);
798 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
799 restart:
800 rt = fn->leaf;
801 rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
802 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
803 rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags);
804 if (rt == net->ipv6.ip6_null_entry) {
805 fn = fib6_backtrack(fn, &fl6->saddr);
806 if (fn)
807 goto restart;
809 dst_use(&rt->dst, jiffies);
810 read_unlock_bh(&table->tb6_lock);
811 return rt;
815 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
816 int flags)
818 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
820 EXPORT_SYMBOL_GPL(ip6_route_lookup);
822 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
823 const struct in6_addr *saddr, int oif, int strict)
825 struct flowi6 fl6 = {
826 .flowi6_oif = oif,
827 .daddr = *daddr,
829 struct dst_entry *dst;
830 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
832 if (saddr) {
833 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
834 flags |= RT6_LOOKUP_F_HAS_SADDR;
837 dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
838 if (dst->error == 0)
839 return (struct rt6_info *) dst;
841 dst_release(dst);
843 return NULL;
845 EXPORT_SYMBOL(rt6_lookup);
847 /* ip6_ins_rt is called with FREE table->tb6_lock.
848 It takes new route entry, the addition fails by any reason the
849 route is freed. In any case, if caller does not hold it, it may
850 be destroyed.
853 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
854 struct mx6_config *mxc)
856 int err;
857 struct fib6_table *table;
859 table = rt->rt6i_table;
860 write_lock_bh(&table->tb6_lock);
861 err = fib6_add(&table->tb6_root, rt, info, mxc);
862 write_unlock_bh(&table->tb6_lock);
864 return err;
867 int ip6_ins_rt(struct rt6_info *rt)
869 struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
870 struct mx6_config mxc = { .mx = NULL, };
872 return __ip6_ins_rt(rt, &info, &mxc);
875 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
876 const struct in6_addr *daddr,
877 const struct in6_addr *saddr)
879 struct rt6_info *rt;
882 * Clone the route.
885 rt = ip6_rt_copy(ort, daddr);
887 if (rt) {
888 if (ort->rt6i_dst.plen != 128 &&
889 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
890 rt->rt6i_flags |= RTF_ANYCAST;
892 rt->rt6i_flags |= RTF_CACHE;
894 #ifdef CONFIG_IPV6_SUBTREES
895 if (rt->rt6i_src.plen && saddr) {
896 rt->rt6i_src.addr = *saddr;
897 rt->rt6i_src.plen = 128;
899 #endif
902 return rt;
905 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
906 const struct in6_addr *daddr)
908 struct rt6_info *rt = ip6_rt_copy(ort, daddr);
910 if (rt)
911 rt->rt6i_flags |= RTF_CACHE;
912 return rt;
915 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
916 struct flowi6 *fl6, int flags)
918 struct fib6_node *fn, *saved_fn;
919 struct rt6_info *rt, *nrt;
920 int strict = 0;
921 int attempts = 3;
922 int err;
924 strict |= flags & RT6_LOOKUP_F_IFACE;
925 if (net->ipv6.devconf_all->forwarding == 0)
926 strict |= RT6_LOOKUP_F_REACHABLE;
928 redo_fib6_lookup_lock:
929 read_lock_bh(&table->tb6_lock);
931 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
932 saved_fn = fn;
934 redo_rt6_select:
935 rt = rt6_select(fn, oif, strict);
936 if (rt->rt6i_nsiblings)
937 rt = rt6_multipath_select(rt, fl6, oif, strict);
938 if (rt == net->ipv6.ip6_null_entry) {
939 fn = fib6_backtrack(fn, &fl6->saddr);
940 if (fn)
941 goto redo_rt6_select;
942 else if (strict & RT6_LOOKUP_F_REACHABLE) {
943 /* also consider unreachable route */
944 strict &= ~RT6_LOOKUP_F_REACHABLE;
945 fn = saved_fn;
946 goto redo_rt6_select;
947 } else {
948 dst_hold(&rt->dst);
949 read_unlock_bh(&table->tb6_lock);
950 goto out2;
954 dst_hold(&rt->dst);
955 read_unlock_bh(&table->tb6_lock);
957 if (rt->rt6i_flags & RTF_CACHE)
958 goto out2;
960 if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)))
961 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
962 else if (!(rt->dst.flags & DST_HOST))
963 nrt = rt6_alloc_clone(rt, &fl6->daddr);
964 else
965 goto out2;
967 ip6_rt_put(rt);
968 rt = nrt ? : net->ipv6.ip6_null_entry;
970 dst_hold(&rt->dst);
971 if (nrt) {
972 err = ip6_ins_rt(nrt);
973 if (!err)
974 goto out2;
977 if (--attempts <= 0)
978 goto out2;
981 * Race condition! In the gap, when table->tb6_lock was
982 * released someone could insert this route. Relookup.
984 ip6_rt_put(rt);
985 goto redo_fib6_lookup_lock;
987 out2:
988 rt->dst.lastuse = jiffies;
989 rt->dst.__use++;
991 return rt;
994 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
995 struct flowi6 *fl6, int flags)
997 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
1000 static struct dst_entry *ip6_route_input_lookup(struct net *net,
1001 struct net_device *dev,
1002 struct flowi6 *fl6, int flags)
1004 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1005 flags |= RT6_LOOKUP_F_IFACE;
1007 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
1010 void ip6_route_input(struct sk_buff *skb)
1012 const struct ipv6hdr *iph = ipv6_hdr(skb);
1013 struct net *net = dev_net(skb->dev);
1014 int flags = RT6_LOOKUP_F_HAS_SADDR;
1015 struct flowi6 fl6 = {
1016 .flowi6_iif = skb->dev->ifindex,
1017 .daddr = iph->daddr,
1018 .saddr = iph->saddr,
1019 .flowlabel = ip6_flowinfo(iph),
1020 .flowi6_mark = skb->mark,
1021 .flowi6_proto = iph->nexthdr,
1024 skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
1027 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
1028 struct flowi6 *fl6, int flags)
1030 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
1033 struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
1034 struct flowi6 *fl6)
1036 int flags = 0;
1038 fl6->flowi6_iif = LOOPBACK_IFINDEX;
1040 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
1041 flags |= RT6_LOOKUP_F_IFACE;
1043 if (!ipv6_addr_any(&fl6->saddr))
1044 flags |= RT6_LOOKUP_F_HAS_SADDR;
1045 else if (sk)
1046 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1048 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1050 EXPORT_SYMBOL(ip6_route_output);
1052 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1054 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1055 struct dst_entry *new = NULL;
1057 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
1058 if (rt) {
1059 new = &rt->dst;
1061 memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
1062 rt6_init_peer(rt, net->ipv6.peers);
1064 new->__use = 1;
1065 new->input = dst_discard;
1066 new->output = dst_discard_sk;
1068 if (dst_metrics_read_only(&ort->dst))
1069 new->_metrics = ort->dst._metrics;
1070 else
1071 dst_copy_metrics(new, &ort->dst);
1072 rt->rt6i_idev = ort->rt6i_idev;
1073 if (rt->rt6i_idev)
1074 in6_dev_hold(rt->rt6i_idev);
1076 rt->rt6i_gateway = ort->rt6i_gateway;
1077 rt->rt6i_flags = ort->rt6i_flags;
1078 rt->rt6i_metric = 0;
1080 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1081 #ifdef CONFIG_IPV6_SUBTREES
1082 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1083 #endif
1085 dst_free(new);
1088 dst_release(dst_orig);
1089 return new ? new : ERR_PTR(-ENOMEM);
1093 * Destination cache support functions
1096 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1098 struct rt6_info *rt;
1100 rt = (struct rt6_info *) dst;
1102 /* All IPV6 dsts are created with ->obsolete set to the value
1103 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1104 * into this function always.
1106 if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
1107 return NULL;
1109 if (rt6_check_expired(rt))
1110 return NULL;
1112 return dst;
1115 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1117 struct rt6_info *rt = (struct rt6_info *) dst;
1119 if (rt) {
1120 if (rt->rt6i_flags & RTF_CACHE) {
1121 if (rt6_check_expired(rt)) {
1122 ip6_del_rt(rt);
1123 dst = NULL;
1125 } else {
1126 dst_release(dst);
1127 dst = NULL;
1130 return dst;
1133 static void ip6_link_failure(struct sk_buff *skb)
1135 struct rt6_info *rt;
1137 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
1139 rt = (struct rt6_info *) skb_dst(skb);
1140 if (rt) {
1141 if (rt->rt6i_flags & RTF_CACHE) {
1142 dst_hold(&rt->dst);
1143 if (ip6_del_rt(rt))
1144 dst_free(&rt->dst);
1145 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
1146 rt->rt6i_node->fn_sernum = -1;
1151 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1152 struct sk_buff *skb, u32 mtu)
1154 struct rt6_info *rt6 = (struct rt6_info *)dst;
1156 dst_confirm(dst);
1157 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
1158 struct net *net = dev_net(dst->dev);
1160 rt6->rt6i_flags |= RTF_MODIFIED;
1161 if (mtu < IPV6_MIN_MTU)
1162 mtu = IPV6_MIN_MTU;
1164 dst_metric_set(dst, RTAX_MTU, mtu);
1165 rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
1169 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1170 int oif, u32 mark)
1172 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1173 struct dst_entry *dst;
1174 struct flowi6 fl6;
1176 memset(&fl6, 0, sizeof(fl6));
1177 fl6.flowi6_oif = oif;
1178 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
1179 fl6.daddr = iph->daddr;
1180 fl6.saddr = iph->saddr;
1181 fl6.flowlabel = ip6_flowinfo(iph);
1183 dst = ip6_route_output(net, NULL, &fl6);
1184 if (!dst->error)
1185 ip6_rt_update_pmtu(dst, NULL, skb, ntohl(mtu));
1186 dst_release(dst);
1188 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1190 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1192 ip6_update_pmtu(skb, sock_net(sk), mtu,
1193 sk->sk_bound_dev_if, sk->sk_mark);
1195 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1197 /* Handle redirects */
1198 struct ip6rd_flowi {
1199 struct flowi6 fl6;
1200 struct in6_addr gateway;
1203 static struct rt6_info *__ip6_route_redirect(struct net *net,
1204 struct fib6_table *table,
1205 struct flowi6 *fl6,
1206 int flags)
1208 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
1209 struct rt6_info *rt;
1210 struct fib6_node *fn;
1212 /* Get the "current" route for this destination and
1213 * check if the redirect has come from approriate router.
1215 * RFC 4861 specifies that redirects should only be
1216 * accepted if they come from the nexthop to the target.
1217 * Due to the way the routes are chosen, this notion
1218 * is a bit fuzzy and one might need to check all possible
1219 * routes.
1222 read_lock_bh(&table->tb6_lock);
1223 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1224 restart:
1225 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1226 if (rt6_check_expired(rt))
1227 continue;
1228 if (rt->dst.error)
1229 break;
1230 if (!(rt->rt6i_flags & RTF_GATEWAY))
1231 continue;
1232 if (fl6->flowi6_oif != rt->dst.dev->ifindex)
1233 continue;
1234 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1235 continue;
1236 break;
1239 if (!rt)
1240 rt = net->ipv6.ip6_null_entry;
1241 else if (rt->dst.error) {
1242 rt = net->ipv6.ip6_null_entry;
1243 goto out;
1246 if (rt == net->ipv6.ip6_null_entry) {
1247 fn = fib6_backtrack(fn, &fl6->saddr);
1248 if (fn)
1249 goto restart;
1252 out:
1253 dst_hold(&rt->dst);
1255 read_unlock_bh(&table->tb6_lock);
1257 return rt;
1260 static struct dst_entry *ip6_route_redirect(struct net *net,
1261 const struct flowi6 *fl6,
1262 const struct in6_addr *gateway)
1264 int flags = RT6_LOOKUP_F_HAS_SADDR;
1265 struct ip6rd_flowi rdfl;
1267 rdfl.fl6 = *fl6;
1268 rdfl.gateway = *gateway;
1270 return fib6_rule_lookup(net, &rdfl.fl6,
1271 flags, __ip6_route_redirect);
1274 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
1276 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1277 struct dst_entry *dst;
1278 struct flowi6 fl6;
1280 memset(&fl6, 0, sizeof(fl6));
1281 fl6.flowi6_iif = LOOPBACK_IFINDEX;
1282 fl6.flowi6_oif = oif;
1283 fl6.flowi6_mark = mark;
1284 fl6.daddr = iph->daddr;
1285 fl6.saddr = iph->saddr;
1286 fl6.flowlabel = ip6_flowinfo(iph);
1288 dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
1289 rt6_do_redirect(dst, NULL, skb);
1290 dst_release(dst);
1292 EXPORT_SYMBOL_GPL(ip6_redirect);
1294 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
1295 u32 mark)
1297 const struct ipv6hdr *iph = ipv6_hdr(skb);
1298 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
1299 struct dst_entry *dst;
1300 struct flowi6 fl6;
1302 memset(&fl6, 0, sizeof(fl6));
1303 fl6.flowi6_iif = LOOPBACK_IFINDEX;
1304 fl6.flowi6_oif = oif;
1305 fl6.flowi6_mark = mark;
1306 fl6.daddr = msg->dest;
1307 fl6.saddr = iph->daddr;
1309 dst = ip6_route_redirect(net, &fl6, &iph->saddr);
1310 rt6_do_redirect(dst, NULL, skb);
1311 dst_release(dst);
1314 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1316 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
1318 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
1320 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1322 struct net_device *dev = dst->dev;
1323 unsigned int mtu = dst_mtu(dst);
1324 struct net *net = dev_net(dev);
1326 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1328 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1329 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1332 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1333 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1334 * IPV6_MAXPLEN is also valid and means: "any MSS,
1335 * rely only on pmtu discovery"
1337 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1338 mtu = IPV6_MAXPLEN;
1339 return mtu;
1342 static unsigned int ip6_mtu(const struct dst_entry *dst)
1344 struct inet6_dev *idev;
1345 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
1347 if (mtu)
1348 goto out;
1350 mtu = IPV6_MIN_MTU;
1352 rcu_read_lock();
1353 idev = __in6_dev_get(dst->dev);
1354 if (idev)
1355 mtu = idev->cnf.mtu6;
1356 rcu_read_unlock();
1358 out:
1359 return min_t(unsigned int, mtu, IP6_MAX_MTU);
1362 static struct dst_entry *icmp6_dst_gc_list;
1363 static DEFINE_SPINLOCK(icmp6_dst_lock);
1365 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1366 struct flowi6 *fl6)
1368 struct dst_entry *dst;
1369 struct rt6_info *rt;
1370 struct inet6_dev *idev = in6_dev_get(dev);
1371 struct net *net = dev_net(dev);
1373 if (unlikely(!idev))
1374 return ERR_PTR(-ENODEV);
1376 rt = ip6_dst_alloc(net, dev, 0, NULL);
1377 if (unlikely(!rt)) {
1378 in6_dev_put(idev);
1379 dst = ERR_PTR(-ENOMEM);
1380 goto out;
1383 rt->dst.flags |= DST_HOST;
1384 rt->dst.output = ip6_output;
1385 atomic_set(&rt->dst.__refcnt, 1);
1386 rt->rt6i_gateway = fl6->daddr;
1387 rt->rt6i_dst.addr = fl6->daddr;
1388 rt->rt6i_dst.plen = 128;
1389 rt->rt6i_idev = idev;
1390 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
1392 spin_lock_bh(&icmp6_dst_lock);
1393 rt->dst.next = icmp6_dst_gc_list;
1394 icmp6_dst_gc_list = &rt->dst;
1395 spin_unlock_bh(&icmp6_dst_lock);
1397 fib6_force_start_gc(net);
1399 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
1401 out:
1402 return dst;
1405 int icmp6_dst_gc(void)
1407 struct dst_entry *dst, **pprev;
1408 int more = 0;
1410 spin_lock_bh(&icmp6_dst_lock);
1411 pprev = &icmp6_dst_gc_list;
1413 while ((dst = *pprev) != NULL) {
1414 if (!atomic_read(&dst->__refcnt)) {
1415 *pprev = dst->next;
1416 dst_free(dst);
1417 } else {
1418 pprev = &dst->next;
1419 ++more;
1423 spin_unlock_bh(&icmp6_dst_lock);
1425 return more;
1428 static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1429 void *arg)
1431 struct dst_entry *dst, **pprev;
1433 spin_lock_bh(&icmp6_dst_lock);
1434 pprev = &icmp6_dst_gc_list;
1435 while ((dst = *pprev) != NULL) {
1436 struct rt6_info *rt = (struct rt6_info *) dst;
1437 if (func(rt, arg)) {
1438 *pprev = dst->next;
1439 dst_free(dst);
1440 } else {
1441 pprev = &dst->next;
1444 spin_unlock_bh(&icmp6_dst_lock);
1447 static int ip6_dst_gc(struct dst_ops *ops)
1449 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1450 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1451 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1452 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1453 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1454 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1455 int entries;
1457 entries = dst_entries_get_fast(ops);
1458 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
1459 entries <= rt_max_size)
1460 goto out;
1462 net->ipv6.ip6_rt_gc_expire++;
1463 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
1464 entries = dst_entries_get_slow(ops);
1465 if (entries < ops->gc_thresh)
1466 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1467 out:
1468 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1469 return entries > rt_max_size;
1472 static int ip6_convert_metrics(struct mx6_config *mxc,
1473 const struct fib6_config *cfg)
1475 struct nlattr *nla;
1476 int remaining;
1477 u32 *mp;
1479 if (!cfg->fc_mx)
1480 return 0;
1482 mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1483 if (unlikely(!mp))
1484 return -ENOMEM;
1486 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1487 int type = nla_type(nla);
1489 if (type) {
1490 u32 val;
1492 if (unlikely(type > RTAX_MAX))
1493 goto err;
1494 if (type == RTAX_CC_ALGO) {
1495 char tmp[TCP_CA_NAME_MAX];
1497 nla_strlcpy(tmp, nla, sizeof(tmp));
1498 val = tcp_ca_get_key_by_name(tmp);
1499 if (val == TCP_CA_UNSPEC)
1500 goto err;
1501 } else {
1502 val = nla_get_u32(nla);
1505 mp[type - 1] = val;
1506 __set_bit(type - 1, mxc->mx_valid);
1510 mxc->mx = mp;
1512 return 0;
1513 err:
1514 kfree(mp);
1515 return -EINVAL;
1518 int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret)
1520 int err;
1521 struct net *net = cfg->fc_nlinfo.nl_net;
1522 struct rt6_info *rt = NULL;
1523 struct net_device *dev = NULL;
1524 struct inet6_dev *idev = NULL;
1525 struct fib6_table *table;
1526 int addr_type;
1528 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1529 return -EINVAL;
1530 #ifndef CONFIG_IPV6_SUBTREES
1531 if (cfg->fc_src_len)
1532 return -EINVAL;
1533 #endif
1534 if (cfg->fc_ifindex) {
1535 err = -ENODEV;
1536 dev = dev_get_by_index(net, cfg->fc_ifindex);
1537 if (!dev)
1538 goto out;
1539 idev = in6_dev_get(dev);
1540 if (!idev)
1541 goto out;
1544 if (cfg->fc_metric == 0)
1545 cfg->fc_metric = IP6_RT_PRIO_USER;
1547 err = -ENOBUFS;
1548 if (cfg->fc_nlinfo.nlh &&
1549 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1550 table = fib6_get_table(net, cfg->fc_table);
1551 if (!table) {
1552 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1553 table = fib6_new_table(net, cfg->fc_table);
1555 } else {
1556 table = fib6_new_table(net, cfg->fc_table);
1559 if (!table)
1560 goto out;
1562 rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
1564 if (!rt) {
1565 err = -ENOMEM;
1566 goto out;
1569 if (cfg->fc_flags & RTF_EXPIRES)
1570 rt6_set_expires(rt, jiffies +
1571 clock_t_to_jiffies(cfg->fc_expires));
1572 else
1573 rt6_clean_expires(rt);
1575 if (cfg->fc_protocol == RTPROT_UNSPEC)
1576 cfg->fc_protocol = RTPROT_BOOT;
1577 rt->rt6i_protocol = cfg->fc_protocol;
1579 addr_type = ipv6_addr_type(&cfg->fc_dst);
1581 if (addr_type & IPV6_ADDR_MULTICAST)
1582 rt->dst.input = ip6_mc_input;
1583 else if (cfg->fc_flags & RTF_LOCAL)
1584 rt->dst.input = ip6_input;
1585 else
1586 rt->dst.input = ip6_forward;
1588 rt->dst.output = ip6_output;
1590 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1591 rt->rt6i_dst.plen = cfg->fc_dst_len;
1592 if (rt->rt6i_dst.plen == 128) {
1593 rt->dst.flags |= DST_HOST;
1594 dst_metrics_set_force_overwrite(&rt->dst);
1597 #ifdef CONFIG_IPV6_SUBTREES
1598 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1599 rt->rt6i_src.plen = cfg->fc_src_len;
1600 #endif
1602 rt->rt6i_metric = cfg->fc_metric;
1604 /* We cannot add true routes via loopback here,
1605 they would result in kernel looping; promote them to reject routes
1607 if ((cfg->fc_flags & RTF_REJECT) ||
1608 (dev && (dev->flags & IFF_LOOPBACK) &&
1609 !(addr_type & IPV6_ADDR_LOOPBACK) &&
1610 !(cfg->fc_flags & RTF_LOCAL))) {
1611 /* hold loopback dev/idev if we haven't done so. */
1612 if (dev != net->loopback_dev) {
1613 if (dev) {
1614 dev_put(dev);
1615 in6_dev_put(idev);
1617 dev = net->loopback_dev;
1618 dev_hold(dev);
1619 idev = in6_dev_get(dev);
1620 if (!idev) {
1621 err = -ENODEV;
1622 goto out;
1625 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1626 switch (cfg->fc_type) {
1627 case RTN_BLACKHOLE:
1628 rt->dst.error = -EINVAL;
1629 rt->dst.output = dst_discard_sk;
1630 rt->dst.input = dst_discard;
1631 break;
1632 case RTN_PROHIBIT:
1633 rt->dst.error = -EACCES;
1634 rt->dst.output = ip6_pkt_prohibit_out;
1635 rt->dst.input = ip6_pkt_prohibit;
1636 break;
1637 case RTN_THROW:
1638 default:
1639 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1640 : -ENETUNREACH;
1641 rt->dst.output = ip6_pkt_discard_out;
1642 rt->dst.input = ip6_pkt_discard;
1643 break;
1645 goto install_route;
1648 if (cfg->fc_flags & RTF_GATEWAY) {
1649 const struct in6_addr *gw_addr;
1650 int gwa_type;
1652 gw_addr = &cfg->fc_gateway;
1653 rt->rt6i_gateway = *gw_addr;
1654 gwa_type = ipv6_addr_type(gw_addr);
1656 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1657 struct rt6_info *grt;
1659 /* IPv6 strictly inhibits using not link-local
1660 addresses as nexthop address.
1661 Otherwise, router will not able to send redirects.
1662 It is very good, but in some (rare!) circumstances
1663 (SIT, PtP, NBMA NOARP links) it is handy to allow
1664 some exceptions. --ANK
1666 err = -EINVAL;
1667 if (!(gwa_type & IPV6_ADDR_UNICAST))
1668 goto out;
1670 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1672 err = -EHOSTUNREACH;
1673 if (!grt)
1674 goto out;
1675 if (dev) {
1676 if (dev != grt->dst.dev) {
1677 ip6_rt_put(grt);
1678 goto out;
1680 } else {
1681 dev = grt->dst.dev;
1682 idev = grt->rt6i_idev;
1683 dev_hold(dev);
1684 in6_dev_hold(grt->rt6i_idev);
1686 if (!(grt->rt6i_flags & RTF_GATEWAY))
1687 err = 0;
1688 ip6_rt_put(grt);
1690 if (err)
1691 goto out;
1693 err = -EINVAL;
1694 if (!dev || (dev->flags & IFF_LOOPBACK))
1695 goto out;
1698 err = -ENODEV;
1699 if (!dev)
1700 goto out;
1702 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1703 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1704 err = -EINVAL;
1705 goto out;
1707 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
1708 rt->rt6i_prefsrc.plen = 128;
1709 } else
1710 rt->rt6i_prefsrc.plen = 0;
1712 rt->rt6i_flags = cfg->fc_flags;
1714 install_route:
1715 rt->dst.dev = dev;
1716 rt->rt6i_idev = idev;
1717 rt->rt6i_table = table;
1719 cfg->fc_nlinfo.nl_net = dev_net(dev);
1721 *rt_ret = rt;
1723 return 0;
1724 out:
1725 if (dev)
1726 dev_put(dev);
1727 if (idev)
1728 in6_dev_put(idev);
1729 if (rt)
1730 dst_free(&rt->dst);
1732 *rt_ret = NULL;
1734 return err;
1737 int ip6_route_add(struct fib6_config *cfg)
1739 struct mx6_config mxc = { .mx = NULL, };
1740 struct rt6_info *rt = NULL;
1741 int err;
1743 err = ip6_route_info_create(cfg, &rt);
1744 if (err)
1745 goto out;
1747 err = ip6_convert_metrics(&mxc, cfg);
1748 if (err)
1749 goto out;
1751 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc);
1753 kfree(mxc.mx);
1755 return err;
1756 out:
1757 if (rt)
1758 dst_free(&rt->dst);
1760 return err;
1763 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1765 int err;
1766 struct fib6_table *table;
1767 struct net *net = dev_net(rt->dst.dev);
1769 if (rt == net->ipv6.ip6_null_entry) {
1770 err = -ENOENT;
1771 goto out;
1774 table = rt->rt6i_table;
1775 write_lock_bh(&table->tb6_lock);
1776 err = fib6_del(rt, info);
1777 write_unlock_bh(&table->tb6_lock);
1779 out:
1780 ip6_rt_put(rt);
1781 return err;
1784 int ip6_del_rt(struct rt6_info *rt)
1786 struct nl_info info = {
1787 .nl_net = dev_net(rt->dst.dev),
1789 return __ip6_del_rt(rt, &info);
1792 static int ip6_route_del(struct fib6_config *cfg)
1794 struct fib6_table *table;
1795 struct fib6_node *fn;
1796 struct rt6_info *rt;
1797 int err = -ESRCH;
1799 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
1800 if (!table)
1801 return err;
1803 read_lock_bh(&table->tb6_lock);
1805 fn = fib6_locate(&table->tb6_root,
1806 &cfg->fc_dst, cfg->fc_dst_len,
1807 &cfg->fc_src, cfg->fc_src_len);
1809 if (fn) {
1810 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1811 if (cfg->fc_ifindex &&
1812 (!rt->dst.dev ||
1813 rt->dst.dev->ifindex != cfg->fc_ifindex))
1814 continue;
1815 if (cfg->fc_flags & RTF_GATEWAY &&
1816 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1817 continue;
1818 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1819 continue;
1820 dst_hold(&rt->dst);
1821 read_unlock_bh(&table->tb6_lock);
1823 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1826 read_unlock_bh(&table->tb6_lock);
1828 return err;
1831 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
1833 struct net *net = dev_net(skb->dev);
1834 struct netevent_redirect netevent;
1835 struct rt6_info *rt, *nrt = NULL;
1836 struct ndisc_options ndopts;
1837 struct inet6_dev *in6_dev;
1838 struct neighbour *neigh;
1839 struct rd_msg *msg;
1840 int optlen, on_link;
1841 u8 *lladdr;
1843 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1844 optlen -= sizeof(*msg);
1846 if (optlen < 0) {
1847 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
1848 return;
1851 msg = (struct rd_msg *)icmp6_hdr(skb);
1853 if (ipv6_addr_is_multicast(&msg->dest)) {
1854 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
1855 return;
1858 on_link = 0;
1859 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
1860 on_link = 1;
1861 } else if (ipv6_addr_type(&msg->target) !=
1862 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
1863 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
1864 return;
1867 in6_dev = __in6_dev_get(skb->dev);
1868 if (!in6_dev)
1869 return;
1870 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
1871 return;
1873 /* RFC2461 8.1:
1874 * The IP source address of the Redirect MUST be the same as the current
1875 * first-hop router for the specified ICMP Destination Address.
1878 if (!ndisc_parse_options(msg->opt, optlen, &ndopts)) {
1879 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
1880 return;
1883 lladdr = NULL;
1884 if (ndopts.nd_opts_tgt_lladdr) {
1885 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
1886 skb->dev);
1887 if (!lladdr) {
1888 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
1889 return;
1893 rt = (struct rt6_info *) dst;
1894 if (rt == net->ipv6.ip6_null_entry) {
1895 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
1896 return;
1899 /* Redirect received -> path was valid.
1900 * Look, redirects are sent only in response to data packets,
1901 * so that this nexthop apparently is reachable. --ANK
1903 dst_confirm(&rt->dst);
1905 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
1906 if (!neigh)
1907 return;
1910 * We have finally decided to accept it.
1913 neigh_update(neigh, lladdr, NUD_STALE,
1914 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1915 NEIGH_UPDATE_F_OVERRIDE|
1916 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1917 NEIGH_UPDATE_F_ISROUTER))
1920 nrt = ip6_rt_copy(rt, &msg->dest);
1921 if (!nrt)
1922 goto out;
1924 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1925 if (on_link)
1926 nrt->rt6i_flags &= ~RTF_GATEWAY;
1928 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
1930 if (ip6_ins_rt(nrt))
1931 goto out;
1933 netevent.old = &rt->dst;
1934 netevent.new = &nrt->dst;
1935 netevent.daddr = &msg->dest;
1936 netevent.neigh = neigh;
1937 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1939 if (rt->rt6i_flags & RTF_CACHE) {
1940 rt = (struct rt6_info *) dst_clone(&rt->dst);
1941 ip6_del_rt(rt);
1944 out:
1945 neigh_release(neigh);
1949 * Misc support functions
1952 static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1953 const struct in6_addr *dest)
1955 struct net *net = dev_net(ort->dst.dev);
1956 struct rt6_info *rt = ip6_dst_alloc(net, ort->dst.dev, 0,
1957 ort->rt6i_table);
1959 if (rt) {
1960 rt->dst.input = ort->dst.input;
1961 rt->dst.output = ort->dst.output;
1962 rt->dst.flags |= DST_HOST;
1964 rt->rt6i_dst.addr = *dest;
1965 rt->rt6i_dst.plen = 128;
1966 dst_copy_metrics(&rt->dst, &ort->dst);
1967 rt->dst.error = ort->dst.error;
1968 rt->rt6i_idev = ort->rt6i_idev;
1969 if (rt->rt6i_idev)
1970 in6_dev_hold(rt->rt6i_idev);
1971 rt->dst.lastuse = jiffies;
1973 if (ort->rt6i_flags & RTF_GATEWAY)
1974 rt->rt6i_gateway = ort->rt6i_gateway;
1975 else
1976 rt->rt6i_gateway = *dest;
1977 rt->rt6i_flags = ort->rt6i_flags;
1978 rt6_set_from(rt, ort);
1979 rt->rt6i_metric = 0;
1981 #ifdef CONFIG_IPV6_SUBTREES
1982 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1983 #endif
1984 memcpy(&rt->rt6i_prefsrc, &ort->rt6i_prefsrc, sizeof(struct rt6key));
1985 rt->rt6i_table = ort->rt6i_table;
1987 return rt;
1990 #ifdef CONFIG_IPV6_ROUTE_INFO
1991 static struct rt6_info *rt6_get_route_info(struct net *net,
1992 const struct in6_addr *prefix, int prefixlen,
1993 const struct in6_addr *gwaddr, int ifindex)
1995 struct fib6_node *fn;
1996 struct rt6_info *rt = NULL;
1997 struct fib6_table *table;
1999 table = fib6_get_table(net, RT6_TABLE_INFO);
2000 if (!table)
2001 return NULL;
2003 read_lock_bh(&table->tb6_lock);
2004 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0);
2005 if (!fn)
2006 goto out;
2008 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2009 if (rt->dst.dev->ifindex != ifindex)
2010 continue;
2011 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
2012 continue;
2013 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
2014 continue;
2015 dst_hold(&rt->dst);
2016 break;
2018 out:
2019 read_unlock_bh(&table->tb6_lock);
2020 return rt;
2023 static struct rt6_info *rt6_add_route_info(struct net *net,
2024 const struct in6_addr *prefix, int prefixlen,
2025 const struct in6_addr *gwaddr, int ifindex,
2026 unsigned int pref)
2028 struct fib6_config cfg = {
2029 .fc_table = RT6_TABLE_INFO,
2030 .fc_metric = IP6_RT_PRIO_USER,
2031 .fc_ifindex = ifindex,
2032 .fc_dst_len = prefixlen,
2033 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
2034 RTF_UP | RTF_PREF(pref),
2035 .fc_nlinfo.portid = 0,
2036 .fc_nlinfo.nlh = NULL,
2037 .fc_nlinfo.nl_net = net,
2040 cfg.fc_dst = *prefix;
2041 cfg.fc_gateway = *gwaddr;
2043 /* We should treat it as a default route if prefix length is 0. */
2044 if (!prefixlen)
2045 cfg.fc_flags |= RTF_DEFAULT;
2047 ip6_route_add(&cfg);
2049 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
2051 #endif
2053 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
2055 struct rt6_info *rt;
2056 struct fib6_table *table;
2058 table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
2059 if (!table)
2060 return NULL;
2062 read_lock_bh(&table->tb6_lock);
2063 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2064 if (dev == rt->dst.dev &&
2065 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
2066 ipv6_addr_equal(&rt->rt6i_gateway, addr))
2067 break;
2069 if (rt)
2070 dst_hold(&rt->dst);
2071 read_unlock_bh(&table->tb6_lock);
2072 return rt;
2075 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
2076 struct net_device *dev,
2077 unsigned int pref)
2079 struct fib6_config cfg = {
2080 .fc_table = RT6_TABLE_DFLT,
2081 .fc_metric = IP6_RT_PRIO_USER,
2082 .fc_ifindex = dev->ifindex,
2083 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
2084 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
2085 .fc_nlinfo.portid = 0,
2086 .fc_nlinfo.nlh = NULL,
2087 .fc_nlinfo.nl_net = dev_net(dev),
2090 cfg.fc_gateway = *gwaddr;
2092 ip6_route_add(&cfg);
2094 return rt6_get_dflt_router(gwaddr, dev);
2097 void rt6_purge_dflt_routers(struct net *net)
2099 struct rt6_info *rt;
2100 struct fib6_table *table;
2102 /* NOTE: Keep consistent with rt6_get_dflt_router */
2103 table = fib6_get_table(net, RT6_TABLE_DFLT);
2104 if (!table)
2105 return;
2107 restart:
2108 read_lock_bh(&table->tb6_lock);
2109 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2110 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
2111 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
2112 dst_hold(&rt->dst);
2113 read_unlock_bh(&table->tb6_lock);
2114 ip6_del_rt(rt);
2115 goto restart;
2118 read_unlock_bh(&table->tb6_lock);
2121 static void rtmsg_to_fib6_config(struct net *net,
2122 struct in6_rtmsg *rtmsg,
2123 struct fib6_config *cfg)
2125 memset(cfg, 0, sizeof(*cfg));
2127 cfg->fc_table = RT6_TABLE_MAIN;
2128 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
2129 cfg->fc_metric = rtmsg->rtmsg_metric;
2130 cfg->fc_expires = rtmsg->rtmsg_info;
2131 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
2132 cfg->fc_src_len = rtmsg->rtmsg_src_len;
2133 cfg->fc_flags = rtmsg->rtmsg_flags;
2135 cfg->fc_nlinfo.nl_net = net;
2137 cfg->fc_dst = rtmsg->rtmsg_dst;
2138 cfg->fc_src = rtmsg->rtmsg_src;
2139 cfg->fc_gateway = rtmsg->rtmsg_gateway;
2142 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
2144 struct fib6_config cfg;
2145 struct in6_rtmsg rtmsg;
2146 int err;
2148 switch (cmd) {
2149 case SIOCADDRT: /* Add a route */
2150 case SIOCDELRT: /* Delete a route */
2151 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2152 return -EPERM;
2153 err = copy_from_user(&rtmsg, arg,
2154 sizeof(struct in6_rtmsg));
2155 if (err)
2156 return -EFAULT;
2158 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
2160 rtnl_lock();
2161 switch (cmd) {
2162 case SIOCADDRT:
2163 err = ip6_route_add(&cfg);
2164 break;
2165 case SIOCDELRT:
2166 err = ip6_route_del(&cfg);
2167 break;
2168 default:
2169 err = -EINVAL;
2171 rtnl_unlock();
2173 return err;
2176 return -EINVAL;
2180 * Drop the packet on the floor
2183 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
2185 int type;
2186 struct dst_entry *dst = skb_dst(skb);
2187 switch (ipstats_mib_noroutes) {
2188 case IPSTATS_MIB_INNOROUTES:
2189 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2190 if (type == IPV6_ADDR_ANY) {
2191 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2192 IPSTATS_MIB_INADDRERRORS);
2193 break;
2195 /* FALLTHROUGH */
2196 case IPSTATS_MIB_OUTNOROUTES:
2197 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2198 ipstats_mib_noroutes);
2199 break;
2201 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2202 kfree_skb(skb);
2203 return 0;
2206 static int ip6_pkt_discard(struct sk_buff *skb)
2208 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2211 static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb)
2213 skb->dev = skb_dst(skb)->dev;
2214 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2217 static int ip6_pkt_prohibit(struct sk_buff *skb)
2219 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2222 static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb)
2224 skb->dev = skb_dst(skb)->dev;
2225 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2229 * Allocate a dst for local (unicast / anycast) address.
2232 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2233 const struct in6_addr *addr,
2234 bool anycast)
2236 struct net *net = dev_net(idev->dev);
2237 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
2238 DST_NOCOUNT, NULL);
2239 if (!rt)
2240 return ERR_PTR(-ENOMEM);
2242 in6_dev_hold(idev);
2244 rt->dst.flags |= DST_HOST;
2245 rt->dst.input = ip6_input;
2246 rt->dst.output = ip6_output;
2247 rt->rt6i_idev = idev;
2249 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2250 if (anycast)
2251 rt->rt6i_flags |= RTF_ANYCAST;
2252 else
2253 rt->rt6i_flags |= RTF_LOCAL;
2255 rt->rt6i_gateway = *addr;
2256 rt->rt6i_dst.addr = *addr;
2257 rt->rt6i_dst.plen = 128;
2258 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
2260 atomic_set(&rt->dst.__refcnt, 1);
2262 return rt;
2265 int ip6_route_get_saddr(struct net *net,
2266 struct rt6_info *rt,
2267 const struct in6_addr *daddr,
2268 unsigned int prefs,
2269 struct in6_addr *saddr)
2271 struct inet6_dev *idev =
2272 rt ? ip6_dst_idev((struct dst_entry *)rt) : NULL;
2273 int err = 0;
2274 if (rt && rt->rt6i_prefsrc.plen)
2275 *saddr = rt->rt6i_prefsrc.addr;
2276 else
2277 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2278 daddr, prefs, saddr);
2279 return err;
2282 /* remove deleted ip from prefsrc entries */
2283 struct arg_dev_net_ip {
2284 struct net_device *dev;
2285 struct net *net;
2286 struct in6_addr *addr;
2289 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2291 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2292 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2293 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2295 if (((void *)rt->dst.dev == dev || !dev) &&
2296 rt != net->ipv6.ip6_null_entry &&
2297 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2298 /* remove prefsrc entry */
2299 rt->rt6i_prefsrc.plen = 0;
2301 return 0;
2304 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2306 struct net *net = dev_net(ifp->idev->dev);
2307 struct arg_dev_net_ip adni = {
2308 .dev = ifp->idev->dev,
2309 .net = net,
2310 .addr = &ifp->addr,
2312 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
2315 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
2316 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2318 /* Remove routers and update dst entries when gateway turn into host. */
2319 static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
2321 struct in6_addr *gateway = (struct in6_addr *)arg;
2323 if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
2324 ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
2325 ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
2326 return -1;
2328 return 0;
2331 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
2333 fib6_clean_all(net, fib6_clean_tohost, gateway);
2336 struct arg_dev_net {
2337 struct net_device *dev;
2338 struct net *net;
2341 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2343 const struct arg_dev_net *adn = arg;
2344 const struct net_device *dev = adn->dev;
2346 if ((rt->dst.dev == dev || !dev) &&
2347 rt != adn->net->ipv6.ip6_null_entry)
2348 return -1;
2350 return 0;
2353 void rt6_ifdown(struct net *net, struct net_device *dev)
2355 struct arg_dev_net adn = {
2356 .dev = dev,
2357 .net = net,
2360 fib6_clean_all(net, fib6_ifdown, &adn);
2361 icmp6_clean_all(fib6_ifdown, &adn);
2364 struct rt6_mtu_change_arg {
2365 struct net_device *dev;
2366 unsigned int mtu;
2369 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2371 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2372 struct inet6_dev *idev;
2374 /* In IPv6 pmtu discovery is not optional,
2375 so that RTAX_MTU lock cannot disable it.
2376 We still use this lock to block changes
2377 caused by addrconf/ndisc.
2380 idev = __in6_dev_get(arg->dev);
2381 if (!idev)
2382 return 0;
2384 /* For administrative MTU increase, there is no way to discover
2385 IPv6 PMTU increase, so PMTU increase should be updated here.
2386 Since RFC 1981 doesn't include administrative MTU increase
2387 update PMTU increase is a MUST. (i.e. jumbo frame)
2390 If new MTU is less than route PMTU, this new MTU will be the
2391 lowest MTU in the path, update the route PMTU to reflect PMTU
2392 decreases; if new MTU is greater than route PMTU, and the
2393 old MTU is the lowest MTU in the path, update the route PMTU
2394 to reflect the increase. In this case if the other nodes' MTU
2395 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2396 PMTU discouvery.
2398 if (rt->dst.dev == arg->dev &&
2399 !dst_metric_locked(&rt->dst, RTAX_MTU) &&
2400 (dst_mtu(&rt->dst) >= arg->mtu ||
2401 (dst_mtu(&rt->dst) < arg->mtu &&
2402 dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
2403 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2405 return 0;
2408 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2410 struct rt6_mtu_change_arg arg = {
2411 .dev = dev,
2412 .mtu = mtu,
2415 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
2418 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2419 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2420 [RTA_OIF] = { .type = NLA_U32 },
2421 [RTA_IIF] = { .type = NLA_U32 },
2422 [RTA_PRIORITY] = { .type = NLA_U32 },
2423 [RTA_METRICS] = { .type = NLA_NESTED },
2424 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2425 [RTA_PREF] = { .type = NLA_U8 },
2428 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2429 struct fib6_config *cfg)
2431 struct rtmsg *rtm;
2432 struct nlattr *tb[RTA_MAX+1];
2433 unsigned int pref;
2434 int err;
2436 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2437 if (err < 0)
2438 goto errout;
2440 err = -EINVAL;
2441 rtm = nlmsg_data(nlh);
2442 memset(cfg, 0, sizeof(*cfg));
2444 cfg->fc_table = rtm->rtm_table;
2445 cfg->fc_dst_len = rtm->rtm_dst_len;
2446 cfg->fc_src_len = rtm->rtm_src_len;
2447 cfg->fc_flags = RTF_UP;
2448 cfg->fc_protocol = rtm->rtm_protocol;
2449 cfg->fc_type = rtm->rtm_type;
2451 if (rtm->rtm_type == RTN_UNREACHABLE ||
2452 rtm->rtm_type == RTN_BLACKHOLE ||
2453 rtm->rtm_type == RTN_PROHIBIT ||
2454 rtm->rtm_type == RTN_THROW)
2455 cfg->fc_flags |= RTF_REJECT;
2457 if (rtm->rtm_type == RTN_LOCAL)
2458 cfg->fc_flags |= RTF_LOCAL;
2460 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
2461 cfg->fc_nlinfo.nlh = nlh;
2462 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2464 if (tb[RTA_GATEWAY]) {
2465 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
2466 cfg->fc_flags |= RTF_GATEWAY;
2469 if (tb[RTA_DST]) {
2470 int plen = (rtm->rtm_dst_len + 7) >> 3;
2472 if (nla_len(tb[RTA_DST]) < plen)
2473 goto errout;
2475 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2478 if (tb[RTA_SRC]) {
2479 int plen = (rtm->rtm_src_len + 7) >> 3;
2481 if (nla_len(tb[RTA_SRC]) < plen)
2482 goto errout;
2484 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2487 if (tb[RTA_PREFSRC])
2488 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
2490 if (tb[RTA_OIF])
2491 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2493 if (tb[RTA_PRIORITY])
2494 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2496 if (tb[RTA_METRICS]) {
2497 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2498 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2501 if (tb[RTA_TABLE])
2502 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2504 if (tb[RTA_MULTIPATH]) {
2505 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2506 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2509 if (tb[RTA_PREF]) {
2510 pref = nla_get_u8(tb[RTA_PREF]);
2511 if (pref != ICMPV6_ROUTER_PREF_LOW &&
2512 pref != ICMPV6_ROUTER_PREF_HIGH)
2513 pref = ICMPV6_ROUTER_PREF_MEDIUM;
2514 cfg->fc_flags |= RTF_PREF(pref);
2517 err = 0;
2518 errout:
2519 return err;
2522 struct rt6_nh {
2523 struct rt6_info *rt6_info;
2524 struct fib6_config r_cfg;
2525 struct mx6_config mxc;
2526 struct list_head next;
2529 static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
2531 struct rt6_nh *nh;
2533 list_for_each_entry(nh, rt6_nh_list, next) {
2534 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6 nexthop %pI6 ifi %d\n",
2535 &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
2536 nh->r_cfg.fc_ifindex);
2540 static int ip6_route_info_append(struct list_head *rt6_nh_list,
2541 struct rt6_info *rt, struct fib6_config *r_cfg)
2543 struct rt6_nh *nh;
2544 struct rt6_info *rtnh;
2545 int err = -EEXIST;
2547 list_for_each_entry(nh, rt6_nh_list, next) {
2548 /* check if rt6_info already exists */
2549 rtnh = nh->rt6_info;
2551 if (rtnh->dst.dev == rt->dst.dev &&
2552 rtnh->rt6i_idev == rt->rt6i_idev &&
2553 ipv6_addr_equal(&rtnh->rt6i_gateway,
2554 &rt->rt6i_gateway))
2555 return err;
2558 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
2559 if (!nh)
2560 return -ENOMEM;
2561 nh->rt6_info = rt;
2562 err = ip6_convert_metrics(&nh->mxc, r_cfg);
2563 if (err) {
2564 kfree(nh);
2565 return err;
2567 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
2568 list_add_tail(&nh->next, rt6_nh_list);
2570 return 0;
2573 static int ip6_route_multipath_add(struct fib6_config *cfg)
2575 struct fib6_config r_cfg;
2576 struct rtnexthop *rtnh;
2577 struct rt6_info *rt;
2578 struct rt6_nh *err_nh;
2579 struct rt6_nh *nh, *nh_safe;
2580 int remaining;
2581 int attrlen;
2582 int err = 1;
2583 int nhn = 0;
2584 int replace = (cfg->fc_nlinfo.nlh &&
2585 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
2586 LIST_HEAD(rt6_nh_list);
2588 remaining = cfg->fc_mp_len;
2589 rtnh = (struct rtnexthop *)cfg->fc_mp;
2591 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
2592 * rt6_info structs per nexthop
2594 while (rtnh_ok(rtnh, remaining)) {
2595 memcpy(&r_cfg, cfg, sizeof(*cfg));
2596 if (rtnh->rtnh_ifindex)
2597 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
2599 attrlen = rtnh_attrlen(rtnh);
2600 if (attrlen > 0) {
2601 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
2603 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2604 if (nla) {
2605 r_cfg.fc_gateway = nla_get_in6_addr(nla);
2606 r_cfg.fc_flags |= RTF_GATEWAY;
2610 err = ip6_route_info_create(&r_cfg, &rt);
2611 if (err)
2612 goto cleanup;
2614 err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
2615 if (err) {
2616 dst_free(&rt->dst);
2617 goto cleanup;
2620 rtnh = rtnh_next(rtnh, &remaining);
2623 err_nh = NULL;
2624 list_for_each_entry(nh, &rt6_nh_list, next) {
2625 err = __ip6_ins_rt(nh->rt6_info, &cfg->fc_nlinfo, &nh->mxc);
2626 /* nh->rt6_info is used or freed at this point, reset to NULL*/
2627 nh->rt6_info = NULL;
2628 if (err) {
2629 if (replace && nhn)
2630 ip6_print_replace_route_err(&rt6_nh_list);
2631 err_nh = nh;
2632 goto add_errout;
2635 /* Because each route is added like a single route we remove
2636 * these flags after the first nexthop: if there is a collision,
2637 * we have already failed to add the first nexthop:
2638 * fib6_add_rt2node() has rejected it; when replacing, old
2639 * nexthops have been replaced by first new, the rest should
2640 * be added to it.
2642 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
2643 NLM_F_REPLACE);
2644 nhn++;
2647 goto cleanup;
2649 add_errout:
2650 /* Delete routes that were already added */
2651 list_for_each_entry(nh, &rt6_nh_list, next) {
2652 if (err_nh == nh)
2653 break;
2654 ip6_route_del(&nh->r_cfg);
2657 cleanup:
2658 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
2659 if (nh->rt6_info)
2660 dst_free(&nh->rt6_info->dst);
2661 if (nh->mxc.mx)
2662 kfree(nh->mxc.mx);
2663 list_del(&nh->next);
2664 kfree(nh);
2667 return err;
2670 static int ip6_route_multipath_del(struct fib6_config *cfg)
2672 struct fib6_config r_cfg;
2673 struct rtnexthop *rtnh;
2674 int remaining;
2675 int attrlen;
2676 int err = 1, last_err = 0;
2678 remaining = cfg->fc_mp_len;
2679 rtnh = (struct rtnexthop *)cfg->fc_mp;
2681 /* Parse a Multipath Entry */
2682 while (rtnh_ok(rtnh, remaining)) {
2683 memcpy(&r_cfg, cfg, sizeof(*cfg));
2684 if (rtnh->rtnh_ifindex)
2685 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
2687 attrlen = rtnh_attrlen(rtnh);
2688 if (attrlen > 0) {
2689 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
2691 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2692 if (nla) {
2693 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
2694 r_cfg.fc_flags |= RTF_GATEWAY;
2697 err = ip6_route_del(&r_cfg);
2698 if (err)
2699 last_err = err;
2701 rtnh = rtnh_next(rtnh, &remaining);
2704 return last_err;
2707 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
2709 struct fib6_config cfg;
2710 int err;
2712 err = rtm_to_fib6_config(skb, nlh, &cfg);
2713 if (err < 0)
2714 return err;
2716 if (cfg.fc_mp)
2717 return ip6_route_multipath_del(&cfg);
2718 else
2719 return ip6_route_del(&cfg);
2722 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
2724 struct fib6_config cfg;
2725 int err;
2727 err = rtm_to_fib6_config(skb, nlh, &cfg);
2728 if (err < 0)
2729 return err;
2731 if (cfg.fc_mp)
2732 return ip6_route_multipath_add(&cfg);
2733 else
2734 return ip6_route_add(&cfg);
2737 static inline size_t rt6_nlmsg_size(void)
2739 return NLMSG_ALIGN(sizeof(struct rtmsg))
2740 + nla_total_size(16) /* RTA_SRC */
2741 + nla_total_size(16) /* RTA_DST */
2742 + nla_total_size(16) /* RTA_GATEWAY */
2743 + nla_total_size(16) /* RTA_PREFSRC */
2744 + nla_total_size(4) /* RTA_TABLE */
2745 + nla_total_size(4) /* RTA_IIF */
2746 + nla_total_size(4) /* RTA_OIF */
2747 + nla_total_size(4) /* RTA_PRIORITY */
2748 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2749 + nla_total_size(sizeof(struct rta_cacheinfo))
2750 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
2751 + nla_total_size(1); /* RTA_PREF */
2754 static int rt6_fill_node(struct net *net,
2755 struct sk_buff *skb, struct rt6_info *rt,
2756 struct in6_addr *dst, struct in6_addr *src,
2757 int iif, int type, u32 portid, u32 seq,
2758 int prefix, int nowait, unsigned int flags)
2760 struct rtmsg *rtm;
2761 struct nlmsghdr *nlh;
2762 long expires;
2763 u32 table;
2765 if (prefix) { /* user wants prefix routes only */
2766 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2767 /* success since this is not a prefix route */
2768 return 1;
2772 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
2773 if (!nlh)
2774 return -EMSGSIZE;
2776 rtm = nlmsg_data(nlh);
2777 rtm->rtm_family = AF_INET6;
2778 rtm->rtm_dst_len = rt->rt6i_dst.plen;
2779 rtm->rtm_src_len = rt->rt6i_src.plen;
2780 rtm->rtm_tos = 0;
2781 if (rt->rt6i_table)
2782 table = rt->rt6i_table->tb6_id;
2783 else
2784 table = RT6_TABLE_UNSPEC;
2785 rtm->rtm_table = table;
2786 if (nla_put_u32(skb, RTA_TABLE, table))
2787 goto nla_put_failure;
2788 if (rt->rt6i_flags & RTF_REJECT) {
2789 switch (rt->dst.error) {
2790 case -EINVAL:
2791 rtm->rtm_type = RTN_BLACKHOLE;
2792 break;
2793 case -EACCES:
2794 rtm->rtm_type = RTN_PROHIBIT;
2795 break;
2796 case -EAGAIN:
2797 rtm->rtm_type = RTN_THROW;
2798 break;
2799 default:
2800 rtm->rtm_type = RTN_UNREACHABLE;
2801 break;
2804 else if (rt->rt6i_flags & RTF_LOCAL)
2805 rtm->rtm_type = RTN_LOCAL;
2806 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
2807 rtm->rtm_type = RTN_LOCAL;
2808 else
2809 rtm->rtm_type = RTN_UNICAST;
2810 rtm->rtm_flags = 0;
2811 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2812 rtm->rtm_protocol = rt->rt6i_protocol;
2813 if (rt->rt6i_flags & RTF_DYNAMIC)
2814 rtm->rtm_protocol = RTPROT_REDIRECT;
2815 else if (rt->rt6i_flags & RTF_ADDRCONF) {
2816 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
2817 rtm->rtm_protocol = RTPROT_RA;
2818 else
2819 rtm->rtm_protocol = RTPROT_KERNEL;
2822 if (rt->rt6i_flags & RTF_CACHE)
2823 rtm->rtm_flags |= RTM_F_CLONED;
2825 if (dst) {
2826 if (nla_put_in6_addr(skb, RTA_DST, dst))
2827 goto nla_put_failure;
2828 rtm->rtm_dst_len = 128;
2829 } else if (rtm->rtm_dst_len)
2830 if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
2831 goto nla_put_failure;
2832 #ifdef CONFIG_IPV6_SUBTREES
2833 if (src) {
2834 if (nla_put_in6_addr(skb, RTA_SRC, src))
2835 goto nla_put_failure;
2836 rtm->rtm_src_len = 128;
2837 } else if (rtm->rtm_src_len &&
2838 nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
2839 goto nla_put_failure;
2840 #endif
2841 if (iif) {
2842 #ifdef CONFIG_IPV6_MROUTE
2843 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
2844 int err = ip6mr_get_route(net, skb, rtm, nowait);
2845 if (err <= 0) {
2846 if (!nowait) {
2847 if (err == 0)
2848 return 0;
2849 goto nla_put_failure;
2850 } else {
2851 if (err == -EMSGSIZE)
2852 goto nla_put_failure;
2855 } else
2856 #endif
2857 if (nla_put_u32(skb, RTA_IIF, iif))
2858 goto nla_put_failure;
2859 } else if (dst) {
2860 struct in6_addr saddr_buf;
2861 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
2862 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
2863 goto nla_put_failure;
2866 if (rt->rt6i_prefsrc.plen) {
2867 struct in6_addr saddr_buf;
2868 saddr_buf = rt->rt6i_prefsrc.addr;
2869 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
2870 goto nla_put_failure;
2873 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2874 goto nla_put_failure;
2876 if (rt->rt6i_flags & RTF_GATEWAY) {
2877 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
2878 goto nla_put_failure;
2881 if (rt->dst.dev &&
2882 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2883 goto nla_put_failure;
2884 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
2885 goto nla_put_failure;
2887 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
2889 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
2890 goto nla_put_failure;
2892 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
2893 goto nla_put_failure;
2895 nlmsg_end(skb, nlh);
2896 return 0;
2898 nla_put_failure:
2899 nlmsg_cancel(skb, nlh);
2900 return -EMSGSIZE;
2903 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2905 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2906 int prefix;
2908 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2909 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2910 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2911 } else
2912 prefix = 0;
2914 return rt6_fill_node(arg->net,
2915 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2916 NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
2917 prefix, 0, NLM_F_MULTI);
2920 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
2922 struct net *net = sock_net(in_skb->sk);
2923 struct nlattr *tb[RTA_MAX+1];
2924 struct rt6_info *rt;
2925 struct sk_buff *skb;
2926 struct rtmsg *rtm;
2927 struct flowi6 fl6;
2928 int err, iif = 0, oif = 0;
2930 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2931 if (err < 0)
2932 goto errout;
2934 err = -EINVAL;
2935 memset(&fl6, 0, sizeof(fl6));
2937 if (tb[RTA_SRC]) {
2938 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2939 goto errout;
2941 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
2944 if (tb[RTA_DST]) {
2945 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2946 goto errout;
2948 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
2951 if (tb[RTA_IIF])
2952 iif = nla_get_u32(tb[RTA_IIF]);
2954 if (tb[RTA_OIF])
2955 oif = nla_get_u32(tb[RTA_OIF]);
2957 if (tb[RTA_MARK])
2958 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
2960 if (iif) {
2961 struct net_device *dev;
2962 int flags = 0;
2964 dev = __dev_get_by_index(net, iif);
2965 if (!dev) {
2966 err = -ENODEV;
2967 goto errout;
2970 fl6.flowi6_iif = iif;
2972 if (!ipv6_addr_any(&fl6.saddr))
2973 flags |= RT6_LOOKUP_F_HAS_SADDR;
2975 rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
2976 flags);
2977 } else {
2978 fl6.flowi6_oif = oif;
2980 rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
2983 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2984 if (!skb) {
2985 ip6_rt_put(rt);
2986 err = -ENOBUFS;
2987 goto errout;
2990 /* Reserve room for dummy headers, this skb can pass
2991 through good chunk of routing engine.
2993 skb_reset_mac_header(skb);
2994 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2996 skb_dst_set(skb, &rt->dst);
2998 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
2999 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
3000 nlh->nlmsg_seq, 0, 0, 0);
3001 if (err < 0) {
3002 kfree_skb(skb);
3003 goto errout;
3006 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3007 errout:
3008 return err;
3011 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
3013 struct sk_buff *skb;
3014 struct net *net = info->nl_net;
3015 u32 seq;
3016 int err;
3018 err = -ENOBUFS;
3019 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3021 skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
3022 if (!skb)
3023 goto errout;
3025 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
3026 event, info->portid, seq, 0, 0, 0);
3027 if (err < 0) {
3028 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
3029 WARN_ON(err == -EMSGSIZE);
3030 kfree_skb(skb);
3031 goto errout;
3033 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3034 info->nlh, gfp_any());
3035 return;
3036 errout:
3037 if (err < 0)
3038 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
3041 static int ip6_route_dev_notify(struct notifier_block *this,
3042 unsigned long event, void *ptr)
3044 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3045 struct net *net = dev_net(dev);
3047 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
3048 net->ipv6.ip6_null_entry->dst.dev = dev;
3049 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
3050 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3051 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
3052 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
3053 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
3054 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
3055 #endif
3058 return NOTIFY_OK;
3062 * /proc
3065 #ifdef CONFIG_PROC_FS
3067 static const struct file_operations ipv6_route_proc_fops = {
3068 .owner = THIS_MODULE,
3069 .open = ipv6_route_open,
3070 .read = seq_read,
3071 .llseek = seq_lseek,
3072 .release = seq_release_net,
3075 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
3077 struct net *net = (struct net *)seq->private;
3078 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
3079 net->ipv6.rt6_stats->fib_nodes,
3080 net->ipv6.rt6_stats->fib_route_nodes,
3081 net->ipv6.rt6_stats->fib_rt_alloc,
3082 net->ipv6.rt6_stats->fib_rt_entries,
3083 net->ipv6.rt6_stats->fib_rt_cache,
3084 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
3085 net->ipv6.rt6_stats->fib_discarded_routes);
3087 return 0;
3090 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
3092 return single_open_net(inode, file, rt6_stats_seq_show);
3095 static const struct file_operations rt6_stats_seq_fops = {
3096 .owner = THIS_MODULE,
3097 .open = rt6_stats_seq_open,
3098 .read = seq_read,
3099 .llseek = seq_lseek,
3100 .release = single_release_net,
3102 #endif /* CONFIG_PROC_FS */
3104 #ifdef CONFIG_SYSCTL
3106 static
3107 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
3108 void __user *buffer, size_t *lenp, loff_t *ppos)
3110 struct net *net;
3111 int delay;
3112 if (!write)
3113 return -EINVAL;
3115 net = (struct net *)ctl->extra1;
3116 delay = net->ipv6.sysctl.flush_delay;
3117 proc_dointvec(ctl, write, buffer, lenp, ppos);
3118 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
3119 return 0;
3122 struct ctl_table ipv6_route_table_template[] = {
3124 .procname = "flush",
3125 .data = &init_net.ipv6.sysctl.flush_delay,
3126 .maxlen = sizeof(int),
3127 .mode = 0200,
3128 .proc_handler = ipv6_sysctl_rtcache_flush
3131 .procname = "gc_thresh",
3132 .data = &ip6_dst_ops_template.gc_thresh,
3133 .maxlen = sizeof(int),
3134 .mode = 0644,
3135 .proc_handler = proc_dointvec,
3138 .procname = "max_size",
3139 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
3140 .maxlen = sizeof(int),
3141 .mode = 0644,
3142 .proc_handler = proc_dointvec,
3145 .procname = "gc_min_interval",
3146 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3147 .maxlen = sizeof(int),
3148 .mode = 0644,
3149 .proc_handler = proc_dointvec_jiffies,
3152 .procname = "gc_timeout",
3153 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
3154 .maxlen = sizeof(int),
3155 .mode = 0644,
3156 .proc_handler = proc_dointvec_jiffies,
3159 .procname = "gc_interval",
3160 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
3161 .maxlen = sizeof(int),
3162 .mode = 0644,
3163 .proc_handler = proc_dointvec_jiffies,
3166 .procname = "gc_elasticity",
3167 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
3168 .maxlen = sizeof(int),
3169 .mode = 0644,
3170 .proc_handler = proc_dointvec,
3173 .procname = "mtu_expires",
3174 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
3175 .maxlen = sizeof(int),
3176 .mode = 0644,
3177 .proc_handler = proc_dointvec_jiffies,
3180 .procname = "min_adv_mss",
3181 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
3182 .maxlen = sizeof(int),
3183 .mode = 0644,
3184 .proc_handler = proc_dointvec,
3187 .procname = "gc_min_interval_ms",
3188 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3189 .maxlen = sizeof(int),
3190 .mode = 0644,
3191 .proc_handler = proc_dointvec_ms_jiffies,
3196 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
3198 struct ctl_table *table;
3200 table = kmemdup(ipv6_route_table_template,
3201 sizeof(ipv6_route_table_template),
3202 GFP_KERNEL);
3204 if (table) {
3205 table[0].data = &net->ipv6.sysctl.flush_delay;
3206 table[0].extra1 = net;
3207 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
3208 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
3209 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3210 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
3211 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
3212 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
3213 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
3214 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
3215 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3217 /* Don't export sysctls to unprivileged users */
3218 if (net->user_ns != &init_user_ns)
3219 table[0].procname = NULL;
3222 return table;
3224 #endif
3226 static int __net_init ip6_route_net_init(struct net *net)
3228 int ret = -ENOMEM;
3230 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
3231 sizeof(net->ipv6.ip6_dst_ops));
3233 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
3234 goto out_ip6_dst_ops;
3236 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
3237 sizeof(*net->ipv6.ip6_null_entry),
3238 GFP_KERNEL);
3239 if (!net->ipv6.ip6_null_entry)
3240 goto out_ip6_dst_entries;
3241 net->ipv6.ip6_null_entry->dst.path =
3242 (struct dst_entry *)net->ipv6.ip6_null_entry;
3243 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3244 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
3245 ip6_template_metrics, true);
3247 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3248 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
3249 sizeof(*net->ipv6.ip6_prohibit_entry),
3250 GFP_KERNEL);
3251 if (!net->ipv6.ip6_prohibit_entry)
3252 goto out_ip6_null_entry;
3253 net->ipv6.ip6_prohibit_entry->dst.path =
3254 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
3255 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3256 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
3257 ip6_template_metrics, true);
3259 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
3260 sizeof(*net->ipv6.ip6_blk_hole_entry),
3261 GFP_KERNEL);
3262 if (!net->ipv6.ip6_blk_hole_entry)
3263 goto out_ip6_prohibit_entry;
3264 net->ipv6.ip6_blk_hole_entry->dst.path =
3265 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
3266 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3267 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
3268 ip6_template_metrics, true);
3269 #endif
3271 net->ipv6.sysctl.flush_delay = 0;
3272 net->ipv6.sysctl.ip6_rt_max_size = 4096;
3273 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
3274 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
3275 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
3276 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
3277 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
3278 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
3280 net->ipv6.ip6_rt_gc_expire = 30*HZ;
3282 ret = 0;
3283 out:
3284 return ret;
3286 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3287 out_ip6_prohibit_entry:
3288 kfree(net->ipv6.ip6_prohibit_entry);
3289 out_ip6_null_entry:
3290 kfree(net->ipv6.ip6_null_entry);
3291 #endif
3292 out_ip6_dst_entries:
3293 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3294 out_ip6_dst_ops:
3295 goto out;
3298 static void __net_exit ip6_route_net_exit(struct net *net)
3300 kfree(net->ipv6.ip6_null_entry);
3301 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3302 kfree(net->ipv6.ip6_prohibit_entry);
3303 kfree(net->ipv6.ip6_blk_hole_entry);
3304 #endif
3305 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3308 static int __net_init ip6_route_net_init_late(struct net *net)
3310 #ifdef CONFIG_PROC_FS
3311 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
3312 proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
3313 #endif
3314 return 0;
3317 static void __net_exit ip6_route_net_exit_late(struct net *net)
3319 #ifdef CONFIG_PROC_FS
3320 remove_proc_entry("ipv6_route", net->proc_net);
3321 remove_proc_entry("rt6_stats", net->proc_net);
3322 #endif
3325 static struct pernet_operations ip6_route_net_ops = {
3326 .init = ip6_route_net_init,
3327 .exit = ip6_route_net_exit,
3330 static int __net_init ipv6_inetpeer_init(struct net *net)
3332 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3334 if (!bp)
3335 return -ENOMEM;
3336 inet_peer_base_init(bp);
3337 net->ipv6.peers = bp;
3338 return 0;
3341 static void __net_exit ipv6_inetpeer_exit(struct net *net)
3343 struct inet_peer_base *bp = net->ipv6.peers;
3345 net->ipv6.peers = NULL;
3346 inetpeer_invalidate_tree(bp);
3347 kfree(bp);
3350 static struct pernet_operations ipv6_inetpeer_ops = {
3351 .init = ipv6_inetpeer_init,
3352 .exit = ipv6_inetpeer_exit,
3355 static struct pernet_operations ip6_route_net_late_ops = {
3356 .init = ip6_route_net_init_late,
3357 .exit = ip6_route_net_exit_late,
3360 static struct notifier_block ip6_route_dev_notifier = {
3361 .notifier_call = ip6_route_dev_notify,
3362 .priority = 0,
3365 int __init ip6_route_init(void)
3367 int ret;
3369 ret = -ENOMEM;
3370 ip6_dst_ops_template.kmem_cachep =
3371 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
3372 SLAB_HWCACHE_ALIGN, NULL);
3373 if (!ip6_dst_ops_template.kmem_cachep)
3374 goto out;
3376 ret = dst_entries_init(&ip6_dst_blackhole_ops);
3377 if (ret)
3378 goto out_kmem_cache;
3380 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
3381 if (ret)
3382 goto out_dst_entries;
3384 ret = register_pernet_subsys(&ip6_route_net_ops);
3385 if (ret)
3386 goto out_register_inetpeer;
3388 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
3390 /* Registering of the loopback is done before this portion of code,
3391 * the loopback reference in rt6_info will not be taken, do it
3392 * manually for init_net */
3393 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
3394 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3395 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3396 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
3397 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3398 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
3399 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3400 #endif
3401 ret = fib6_init();
3402 if (ret)
3403 goto out_register_subsys;
3405 ret = xfrm6_init();
3406 if (ret)
3407 goto out_fib6_init;
3409 ret = fib6_rules_init();
3410 if (ret)
3411 goto xfrm6_init;
3413 ret = register_pernet_subsys(&ip6_route_net_late_ops);
3414 if (ret)
3415 goto fib6_rules_init;
3417 ret = -ENOBUFS;
3418 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
3419 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
3420 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
3421 goto out_register_late_subsys;
3423 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
3424 if (ret)
3425 goto out_register_late_subsys;
3427 out:
3428 return ret;
3430 out_register_late_subsys:
3431 unregister_pernet_subsys(&ip6_route_net_late_ops);
3432 fib6_rules_init:
3433 fib6_rules_cleanup();
3434 xfrm6_init:
3435 xfrm6_fini();
3436 out_fib6_init:
3437 fib6_gc_cleanup();
3438 out_register_subsys:
3439 unregister_pernet_subsys(&ip6_route_net_ops);
3440 out_register_inetpeer:
3441 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3442 out_dst_entries:
3443 dst_entries_destroy(&ip6_dst_blackhole_ops);
3444 out_kmem_cache:
3445 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3446 goto out;
3449 void ip6_route_cleanup(void)
3451 unregister_netdevice_notifier(&ip6_route_dev_notifier);
3452 unregister_pernet_subsys(&ip6_route_net_late_ops);
3453 fib6_rules_cleanup();
3454 xfrm6_fini();
3455 fib6_gc_cleanup();
3456 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3457 unregister_pernet_subsys(&ip6_route_net_ops);
3458 dst_entries_destroy(&ip6_dst_blackhole_ops);
3459 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);