2 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
24 * Fixed routing subtrees.
27 #include <linux/capability.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/times.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/route.h>
35 #include <linux/netdevice.h>
36 #include <linux/in6.h>
37 #include <linux/mroute6.h>
38 #include <linux/init.h>
39 #include <linux/if_arp.h>
40 #include <linux/proc_fs.h>
41 #include <linux/seq_file.h>
42 #include <linux/nsproxy.h>
43 #include <linux/slab.h>
44 #include <net/net_namespace.h>
47 #include <net/ip6_fib.h>
48 #include <net/ip6_route.h>
49 #include <net/ndisc.h>
50 #include <net/addrconf.h>
52 #include <linux/rtnetlink.h>
55 #include <net/netevent.h>
56 #include <net/netlink.h>
58 #include <asm/uaccess.h>
61 #include <linux/sysctl.h>
64 /* Set to 3 to get tracing. */
68 #define RDBG(x) printk x
69 #define RT6_TRACE(x...) printk(KERN_DEBUG x)
72 #define RT6_TRACE(x...) do { ; } while (0)
75 static struct rt6_info
* ip6_rt_copy(struct rt6_info
*ort
);
76 static struct dst_entry
*ip6_dst_check(struct dst_entry
*dst
, u32 cookie
);
77 static unsigned int ip6_default_advmss(const struct dst_entry
*dst
);
78 static unsigned int ip6_default_mtu(const struct dst_entry
*dst
);
79 static struct dst_entry
*ip6_negative_advice(struct dst_entry
*);
80 static void ip6_dst_destroy(struct dst_entry
*);
81 static void ip6_dst_ifdown(struct dst_entry
*,
82 struct net_device
*dev
, int how
);
83 static int ip6_dst_gc(struct dst_ops
*ops
);
85 static int ip6_pkt_discard(struct sk_buff
*skb
);
86 static int ip6_pkt_discard_out(struct sk_buff
*skb
);
87 static void ip6_link_failure(struct sk_buff
*skb
);
88 static void ip6_rt_update_pmtu(struct dst_entry
*dst
, u32 mtu
);
90 #ifdef CONFIG_IPV6_ROUTE_INFO
91 static struct rt6_info
*rt6_add_route_info(struct net
*net
,
92 const struct in6_addr
*prefix
, int prefixlen
,
93 const struct in6_addr
*gwaddr
, int ifindex
,
95 static struct rt6_info
*rt6_get_route_info(struct net
*net
,
96 const struct in6_addr
*prefix
, int prefixlen
,
97 const struct in6_addr
*gwaddr
, int ifindex
);
100 static u32
*ipv6_cow_metrics(struct dst_entry
*dst
, unsigned long old
)
102 struct rt6_info
*rt
= (struct rt6_info
*) dst
;
103 struct inet_peer
*peer
;
107 rt6_bind_peer(rt
, 1);
109 peer
= rt
->rt6i_peer
;
111 u32
*old_p
= __DST_METRICS_PTR(old
);
112 unsigned long prev
, new;
115 if (inet_metrics_new(peer
))
116 memcpy(p
, old_p
, sizeof(u32
) * RTAX_MAX
);
118 new = (unsigned long) p
;
119 prev
= cmpxchg(&dst
->_metrics
, old
, new);
122 p
= __DST_METRICS_PTR(prev
);
123 if (prev
& DST_METRICS_READ_ONLY
)
130 static struct dst_ops ip6_dst_ops_template
= {
132 .protocol
= cpu_to_be16(ETH_P_IPV6
),
135 .check
= ip6_dst_check
,
136 .default_advmss
= ip6_default_advmss
,
137 .default_mtu
= ip6_default_mtu
,
138 .cow_metrics
= ipv6_cow_metrics
,
139 .destroy
= ip6_dst_destroy
,
140 .ifdown
= ip6_dst_ifdown
,
141 .negative_advice
= ip6_negative_advice
,
142 .link_failure
= ip6_link_failure
,
143 .update_pmtu
= ip6_rt_update_pmtu
,
144 .local_out
= __ip6_local_out
,
147 static unsigned int ip6_blackhole_default_mtu(const struct dst_entry
*dst
)
152 static void ip6_rt_blackhole_update_pmtu(struct dst_entry
*dst
, u32 mtu
)
156 static u32
*ip6_rt_blackhole_cow_metrics(struct dst_entry
*dst
,
162 static struct dst_ops ip6_dst_blackhole_ops
= {
164 .protocol
= cpu_to_be16(ETH_P_IPV6
),
165 .destroy
= ip6_dst_destroy
,
166 .check
= ip6_dst_check
,
167 .default_mtu
= ip6_blackhole_default_mtu
,
168 .default_advmss
= ip6_default_advmss
,
169 .update_pmtu
= ip6_rt_blackhole_update_pmtu
,
170 .cow_metrics
= ip6_rt_blackhole_cow_metrics
,
173 static const u32 ip6_template_metrics
[RTAX_MAX
] = {
174 [RTAX_HOPLIMIT
- 1] = 255,
177 static struct rt6_info ip6_null_entry_template
= {
179 .__refcnt
= ATOMIC_INIT(1),
182 .error
= -ENETUNREACH
,
183 .input
= ip6_pkt_discard
,
184 .output
= ip6_pkt_discard_out
,
186 .rt6i_flags
= (RTF_REJECT
| RTF_NONEXTHOP
),
187 .rt6i_protocol
= RTPROT_KERNEL
,
188 .rt6i_metric
= ~(u32
) 0,
189 .rt6i_ref
= ATOMIC_INIT(1),
192 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
194 static int ip6_pkt_prohibit(struct sk_buff
*skb
);
195 static int ip6_pkt_prohibit_out(struct sk_buff
*skb
);
197 static struct rt6_info ip6_prohibit_entry_template
= {
199 .__refcnt
= ATOMIC_INIT(1),
203 .input
= ip6_pkt_prohibit
,
204 .output
= ip6_pkt_prohibit_out
,
206 .rt6i_flags
= (RTF_REJECT
| RTF_NONEXTHOP
),
207 .rt6i_protocol
= RTPROT_KERNEL
,
208 .rt6i_metric
= ~(u32
) 0,
209 .rt6i_ref
= ATOMIC_INIT(1),
212 static struct rt6_info ip6_blk_hole_entry_template
= {
214 .__refcnt
= ATOMIC_INIT(1),
218 .input
= dst_discard
,
219 .output
= dst_discard
,
221 .rt6i_flags
= (RTF_REJECT
| RTF_NONEXTHOP
),
222 .rt6i_protocol
= RTPROT_KERNEL
,
223 .rt6i_metric
= ~(u32
) 0,
224 .rt6i_ref
= ATOMIC_INIT(1),
229 /* allocate dst with ip6_dst_ops */
230 static inline struct rt6_info
*ip6_dst_alloc(struct dst_ops
*ops
,
231 struct net_device
*dev
,
234 struct rt6_info
*rt
= dst_alloc(ops
, dev
, 0, 0, flags
);
236 memset(&rt
->rt6i_table
, 0, sizeof(*rt
) - sizeof(struct dst_entry
));
241 static void ip6_dst_destroy(struct dst_entry
*dst
)
243 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
244 struct inet6_dev
*idev
= rt
->rt6i_idev
;
245 struct inet_peer
*peer
= rt
->rt6i_peer
;
248 rt
->rt6i_idev
= NULL
;
252 rt
->rt6i_peer
= NULL
;
257 static atomic_t __rt6_peer_genid
= ATOMIC_INIT(0);
259 static u32
rt6_peer_genid(void)
261 return atomic_read(&__rt6_peer_genid
);
264 void rt6_bind_peer(struct rt6_info
*rt
, int create
)
266 struct inet_peer
*peer
;
268 peer
= inet_getpeer_v6(&rt
->rt6i_dst
.addr
, create
);
269 if (peer
&& cmpxchg(&rt
->rt6i_peer
, NULL
, peer
) != NULL
)
272 rt
->rt6i_peer_genid
= rt6_peer_genid();
275 static void ip6_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
,
278 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
279 struct inet6_dev
*idev
= rt
->rt6i_idev
;
280 struct net_device
*loopback_dev
=
281 dev_net(dev
)->loopback_dev
;
283 if (dev
!= loopback_dev
&& idev
!= NULL
&& idev
->dev
== dev
) {
284 struct inet6_dev
*loopback_idev
=
285 in6_dev_get(loopback_dev
);
286 if (loopback_idev
!= NULL
) {
287 rt
->rt6i_idev
= loopback_idev
;
293 static __inline__
int rt6_check_expired(const struct rt6_info
*rt
)
295 return (rt
->rt6i_flags
& RTF_EXPIRES
) &&
296 time_after(jiffies
, rt
->rt6i_expires
);
299 static inline int rt6_need_strict(const struct in6_addr
*daddr
)
301 return ipv6_addr_type(daddr
) &
302 (IPV6_ADDR_MULTICAST
| IPV6_ADDR_LINKLOCAL
| IPV6_ADDR_LOOPBACK
);
306 * Route lookup. Any table->tb6_lock is implied.
309 static inline struct rt6_info
*rt6_device_match(struct net
*net
,
311 const struct in6_addr
*saddr
,
315 struct rt6_info
*local
= NULL
;
316 struct rt6_info
*sprt
;
318 if (!oif
&& ipv6_addr_any(saddr
))
321 for (sprt
= rt
; sprt
; sprt
= sprt
->dst
.rt6_next
) {
322 struct net_device
*dev
= sprt
->rt6i_dev
;
325 if (dev
->ifindex
== oif
)
327 if (dev
->flags
& IFF_LOOPBACK
) {
328 if (sprt
->rt6i_idev
== NULL
||
329 sprt
->rt6i_idev
->dev
->ifindex
!= oif
) {
330 if (flags
& RT6_LOOKUP_F_IFACE
&& oif
)
332 if (local
&& (!oif
||
333 local
->rt6i_idev
->dev
->ifindex
== oif
))
339 if (ipv6_chk_addr(net
, saddr
, dev
,
340 flags
& RT6_LOOKUP_F_IFACE
))
349 if (flags
& RT6_LOOKUP_F_IFACE
)
350 return net
->ipv6
.ip6_null_entry
;
356 #ifdef CONFIG_IPV6_ROUTER_PREF
357 static void rt6_probe(struct rt6_info
*rt
)
359 struct neighbour
*neigh
;
361 * Okay, this does not seem to be appropriate
362 * for now, however, we need to check if it
363 * is really so; aka Router Reachability Probing.
365 * Router Reachability Probe MUST be rate-limited
366 * to no more than one per minute.
369 neigh
= rt
? dst_get_neighbour(&rt
->dst
) : NULL
;
370 if (!neigh
|| (neigh
->nud_state
& NUD_VALID
))
372 read_lock_bh(&neigh
->lock
);
373 if (!(neigh
->nud_state
& NUD_VALID
) &&
374 time_after(jiffies
, neigh
->updated
+ rt
->rt6i_idev
->cnf
.rtr_probe_interval
)) {
375 struct in6_addr mcaddr
;
376 struct in6_addr
*target
;
378 neigh
->updated
= jiffies
;
379 read_unlock_bh(&neigh
->lock
);
381 target
= (struct in6_addr
*)&neigh
->primary_key
;
382 addrconf_addr_solict_mult(target
, &mcaddr
);
383 ndisc_send_ns(rt
->rt6i_dev
, NULL
, target
, &mcaddr
, NULL
);
385 read_unlock_bh(&neigh
->lock
);
391 static inline void rt6_probe(struct rt6_info
*rt
)
397 * Default Router Selection (RFC 2461 6.3.6)
399 static inline int rt6_check_dev(struct rt6_info
*rt
, int oif
)
401 struct net_device
*dev
= rt
->rt6i_dev
;
402 if (!oif
|| dev
->ifindex
== oif
)
404 if ((dev
->flags
& IFF_LOOPBACK
) &&
405 rt
->rt6i_idev
&& rt
->rt6i_idev
->dev
->ifindex
== oif
)
410 static inline int rt6_check_neigh(struct rt6_info
*rt
)
412 struct neighbour
*neigh
;
416 neigh
= dst_get_neighbour(&rt
->dst
);
417 if (rt
->rt6i_flags
& RTF_NONEXTHOP
||
418 !(rt
->rt6i_flags
& RTF_GATEWAY
))
421 read_lock_bh(&neigh
->lock
);
422 if (neigh
->nud_state
& NUD_VALID
)
424 #ifdef CONFIG_IPV6_ROUTER_PREF
425 else if (neigh
->nud_state
& NUD_FAILED
)
430 read_unlock_bh(&neigh
->lock
);
437 static int rt6_score_route(struct rt6_info
*rt
, int oif
,
442 m
= rt6_check_dev(rt
, oif
);
443 if (!m
&& (strict
& RT6_LOOKUP_F_IFACE
))
445 #ifdef CONFIG_IPV6_ROUTER_PREF
446 m
|= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt
->rt6i_flags
)) << 2;
448 n
= rt6_check_neigh(rt
);
449 if (!n
&& (strict
& RT6_LOOKUP_F_REACHABLE
))
454 static struct rt6_info
*find_match(struct rt6_info
*rt
, int oif
, int strict
,
455 int *mpri
, struct rt6_info
*match
)
459 if (rt6_check_expired(rt
))
462 m
= rt6_score_route(rt
, oif
, strict
);
467 if (strict
& RT6_LOOKUP_F_REACHABLE
)
471 } else if (strict
& RT6_LOOKUP_F_REACHABLE
) {
479 static struct rt6_info
*find_rr_leaf(struct fib6_node
*fn
,
480 struct rt6_info
*rr_head
,
481 u32 metric
, int oif
, int strict
)
483 struct rt6_info
*rt
, *match
;
487 for (rt
= rr_head
; rt
&& rt
->rt6i_metric
== metric
;
488 rt
= rt
->dst
.rt6_next
)
489 match
= find_match(rt
, oif
, strict
, &mpri
, match
);
490 for (rt
= fn
->leaf
; rt
&& rt
!= rr_head
&& rt
->rt6i_metric
== metric
;
491 rt
= rt
->dst
.rt6_next
)
492 match
= find_match(rt
, oif
, strict
, &mpri
, match
);
497 static struct rt6_info
*rt6_select(struct fib6_node
*fn
, int oif
, int strict
)
499 struct rt6_info
*match
, *rt0
;
502 RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n",
503 __func__
, fn
->leaf
, oif
);
507 fn
->rr_ptr
= rt0
= fn
->leaf
;
509 match
= find_rr_leaf(fn
, rt0
, rt0
->rt6i_metric
, oif
, strict
);
512 (strict
& RT6_LOOKUP_F_REACHABLE
)) {
513 struct rt6_info
*next
= rt0
->dst
.rt6_next
;
515 /* no entries matched; do round-robin */
516 if (!next
|| next
->rt6i_metric
!= rt0
->rt6i_metric
)
523 RT6_TRACE("%s() => %p\n",
526 net
= dev_net(rt0
->rt6i_dev
);
527 return match
? match
: net
->ipv6
.ip6_null_entry
;
530 #ifdef CONFIG_IPV6_ROUTE_INFO
531 int rt6_route_rcv(struct net_device
*dev
, u8
*opt
, int len
,
532 const struct in6_addr
*gwaddr
)
534 struct net
*net
= dev_net(dev
);
535 struct route_info
*rinfo
= (struct route_info
*) opt
;
536 struct in6_addr prefix_buf
, *prefix
;
538 unsigned long lifetime
;
541 if (len
< sizeof(struct route_info
)) {
545 /* Sanity check for prefix_len and length */
546 if (rinfo
->length
> 3) {
548 } else if (rinfo
->prefix_len
> 128) {
550 } else if (rinfo
->prefix_len
> 64) {
551 if (rinfo
->length
< 2) {
554 } else if (rinfo
->prefix_len
> 0) {
555 if (rinfo
->length
< 1) {
560 pref
= rinfo
->route_pref
;
561 if (pref
== ICMPV6_ROUTER_PREF_INVALID
)
564 lifetime
= addrconf_timeout_fixup(ntohl(rinfo
->lifetime
), HZ
);
566 if (rinfo
->length
== 3)
567 prefix
= (struct in6_addr
*)rinfo
->prefix
;
569 /* this function is safe */
570 ipv6_addr_prefix(&prefix_buf
,
571 (struct in6_addr
*)rinfo
->prefix
,
573 prefix
= &prefix_buf
;
576 rt
= rt6_get_route_info(net
, prefix
, rinfo
->prefix_len
, gwaddr
,
579 if (rt
&& !lifetime
) {
585 rt
= rt6_add_route_info(net
, prefix
, rinfo
->prefix_len
, gwaddr
, dev
->ifindex
,
588 rt
->rt6i_flags
= RTF_ROUTEINFO
|
589 (rt
->rt6i_flags
& ~RTF_PREF_MASK
) | RTF_PREF(pref
);
592 if (!addrconf_finite_timeout(lifetime
)) {
593 rt
->rt6i_flags
&= ~RTF_EXPIRES
;
595 rt
->rt6i_expires
= jiffies
+ HZ
* lifetime
;
596 rt
->rt6i_flags
|= RTF_EXPIRES
;
598 dst_release(&rt
->dst
);
604 #define BACKTRACK(__net, saddr) \
606 if (rt == __net->ipv6.ip6_null_entry) { \
607 struct fib6_node *pn; \
609 if (fn->fn_flags & RTN_TL_ROOT) \
612 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
613 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
616 if (fn->fn_flags & RTN_RTINFO) \
622 static struct rt6_info
*ip6_pol_route_lookup(struct net
*net
,
623 struct fib6_table
*table
,
624 struct flowi6
*fl6
, int flags
)
626 struct fib6_node
*fn
;
629 read_lock_bh(&table
->tb6_lock
);
630 fn
= fib6_lookup(&table
->tb6_root
, &fl6
->daddr
, &fl6
->saddr
);
633 rt
= rt6_device_match(net
, rt
, &fl6
->saddr
, fl6
->flowi6_oif
, flags
);
634 BACKTRACK(net
, &fl6
->saddr
);
636 dst_use(&rt
->dst
, jiffies
);
637 read_unlock_bh(&table
->tb6_lock
);
642 struct rt6_info
*rt6_lookup(struct net
*net
, const struct in6_addr
*daddr
,
643 const struct in6_addr
*saddr
, int oif
, int strict
)
645 struct flowi6 fl6
= {
649 struct dst_entry
*dst
;
650 int flags
= strict
? RT6_LOOKUP_F_IFACE
: 0;
653 memcpy(&fl6
.saddr
, saddr
, sizeof(*saddr
));
654 flags
|= RT6_LOOKUP_F_HAS_SADDR
;
657 dst
= fib6_rule_lookup(net
, &fl6
, flags
, ip6_pol_route_lookup
);
659 return (struct rt6_info
*) dst
;
666 EXPORT_SYMBOL(rt6_lookup
);
668 /* ip6_ins_rt is called with FREE table->tb6_lock.
669 It takes new route entry, the addition fails by any reason the
670 route is freed. In any case, if caller does not hold it, it may
674 static int __ip6_ins_rt(struct rt6_info
*rt
, struct nl_info
*info
)
677 struct fib6_table
*table
;
679 table
= rt
->rt6i_table
;
680 write_lock_bh(&table
->tb6_lock
);
681 err
= fib6_add(&table
->tb6_root
, rt
, info
);
682 write_unlock_bh(&table
->tb6_lock
);
687 int ip6_ins_rt(struct rt6_info
*rt
)
689 struct nl_info info
= {
690 .nl_net
= dev_net(rt
->rt6i_dev
),
692 return __ip6_ins_rt(rt
, &info
);
695 static struct rt6_info
*rt6_alloc_cow(struct rt6_info
*ort
, const struct in6_addr
*daddr
,
696 const struct in6_addr
*saddr
)
704 rt
= ip6_rt_copy(ort
);
707 struct neighbour
*neigh
;
708 int attempts
= !in_softirq();
710 if (!(rt
->rt6i_flags
&RTF_GATEWAY
)) {
711 if (rt
->rt6i_dst
.plen
!= 128 &&
712 ipv6_addr_equal(&rt
->rt6i_dst
.addr
, daddr
))
713 rt
->rt6i_flags
|= RTF_ANYCAST
;
714 ipv6_addr_copy(&rt
->rt6i_gateway
, daddr
);
717 ipv6_addr_copy(&rt
->rt6i_dst
.addr
, daddr
);
718 rt
->rt6i_dst
.plen
= 128;
719 rt
->rt6i_flags
|= RTF_CACHE
;
720 rt
->dst
.flags
|= DST_HOST
;
722 #ifdef CONFIG_IPV6_SUBTREES
723 if (rt
->rt6i_src
.plen
&& saddr
) {
724 ipv6_addr_copy(&rt
->rt6i_src
.addr
, saddr
);
725 rt
->rt6i_src
.plen
= 128;
730 neigh
= ndisc_get_neigh(rt
->rt6i_dev
, &rt
->rt6i_gateway
);
732 struct net
*net
= dev_net(rt
->rt6i_dev
);
733 int saved_rt_min_interval
=
734 net
->ipv6
.sysctl
.ip6_rt_gc_min_interval
;
735 int saved_rt_elasticity
=
736 net
->ipv6
.sysctl
.ip6_rt_gc_elasticity
;
738 if (attempts
-- > 0) {
739 net
->ipv6
.sysctl
.ip6_rt_gc_elasticity
= 1;
740 net
->ipv6
.sysctl
.ip6_rt_gc_min_interval
= 0;
742 ip6_dst_gc(&net
->ipv6
.ip6_dst_ops
);
744 net
->ipv6
.sysctl
.ip6_rt_gc_elasticity
=
746 net
->ipv6
.sysctl
.ip6_rt_gc_min_interval
=
747 saved_rt_min_interval
;
753 "ipv6: Neighbour table overflow.\n");
757 dst_set_neighbour(&rt
->dst
, neigh
);
763 static struct rt6_info
*rt6_alloc_clone(struct rt6_info
*ort
, const struct in6_addr
*daddr
)
765 struct rt6_info
*rt
= ip6_rt_copy(ort
);
767 ipv6_addr_copy(&rt
->rt6i_dst
.addr
, daddr
);
768 rt
->rt6i_dst
.plen
= 128;
769 rt
->rt6i_flags
|= RTF_CACHE
;
770 rt
->dst
.flags
|= DST_HOST
;
771 dst_set_neighbour(&rt
->dst
, neigh_clone(dst_get_neighbour_raw(&ort
->dst
)));
776 static struct rt6_info
*ip6_pol_route(struct net
*net
, struct fib6_table
*table
, int oif
,
777 struct flowi6
*fl6
, int flags
)
779 struct fib6_node
*fn
;
780 struct rt6_info
*rt
, *nrt
;
784 int reachable
= net
->ipv6
.devconf_all
->forwarding
? 0 : RT6_LOOKUP_F_REACHABLE
;
786 strict
|= flags
& RT6_LOOKUP_F_IFACE
;
789 read_lock_bh(&table
->tb6_lock
);
792 fn
= fib6_lookup(&table
->tb6_root
, &fl6
->daddr
, &fl6
->saddr
);
795 rt
= rt6_select(fn
, oif
, strict
| reachable
);
797 BACKTRACK(net
, &fl6
->saddr
);
798 if (rt
== net
->ipv6
.ip6_null_entry
||
799 rt
->rt6i_flags
& RTF_CACHE
)
803 read_unlock_bh(&table
->tb6_lock
);
805 if (!dst_get_neighbour_raw(&rt
->dst
) && !(rt
->rt6i_flags
& RTF_NONEXTHOP
))
806 nrt
= rt6_alloc_cow(rt
, &fl6
->daddr
, &fl6
->saddr
);
807 else if (!(rt
->dst
.flags
& DST_HOST
))
808 nrt
= rt6_alloc_clone(rt
, &fl6
->daddr
);
812 dst_release(&rt
->dst
);
813 rt
= nrt
? : net
->ipv6
.ip6_null_entry
;
817 err
= ip6_ins_rt(nrt
);
826 * Race condition! In the gap, when table->tb6_lock was
827 * released someone could insert this route. Relookup.
829 dst_release(&rt
->dst
);
838 read_unlock_bh(&table
->tb6_lock
);
840 rt
->dst
.lastuse
= jiffies
;
846 static struct rt6_info
*ip6_pol_route_input(struct net
*net
, struct fib6_table
*table
,
847 struct flowi6
*fl6
, int flags
)
849 return ip6_pol_route(net
, table
, fl6
->flowi6_iif
, fl6
, flags
);
852 void ip6_route_input(struct sk_buff
*skb
)
854 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
855 struct net
*net
= dev_net(skb
->dev
);
856 int flags
= RT6_LOOKUP_F_HAS_SADDR
;
857 struct flowi6 fl6
= {
858 .flowi6_iif
= skb
->dev
->ifindex
,
861 .flowlabel
= (* (__be32
*) iph
)&IPV6_FLOWINFO_MASK
,
862 .flowi6_mark
= skb
->mark
,
863 .flowi6_proto
= iph
->nexthdr
,
866 if (rt6_need_strict(&iph
->daddr
) && skb
->dev
->type
!= ARPHRD_PIMREG
)
867 flags
|= RT6_LOOKUP_F_IFACE
;
869 skb_dst_set(skb
, fib6_rule_lookup(net
, &fl6
, flags
, ip6_pol_route_input
));
872 static struct rt6_info
*ip6_pol_route_output(struct net
*net
, struct fib6_table
*table
,
873 struct flowi6
*fl6
, int flags
)
875 return ip6_pol_route(net
, table
, fl6
->flowi6_oif
, fl6
, flags
);
878 struct dst_entry
* ip6_route_output(struct net
*net
, const struct sock
*sk
,
883 if ((sk
&& sk
->sk_bound_dev_if
) || rt6_need_strict(&fl6
->daddr
))
884 flags
|= RT6_LOOKUP_F_IFACE
;
886 if (!ipv6_addr_any(&fl6
->saddr
))
887 flags
|= RT6_LOOKUP_F_HAS_SADDR
;
889 flags
|= rt6_srcprefs2flags(inet6_sk(sk
)->srcprefs
);
891 return fib6_rule_lookup(net
, fl6
, flags
, ip6_pol_route_output
);
894 EXPORT_SYMBOL(ip6_route_output
);
896 struct dst_entry
*ip6_blackhole_route(struct net
*net
, struct dst_entry
*dst_orig
)
898 struct rt6_info
*rt
, *ort
= (struct rt6_info
*) dst_orig
;
899 struct dst_entry
*new = NULL
;
901 rt
= dst_alloc(&ip6_dst_blackhole_ops
, ort
->dst
.dev
, 1, 0, 0);
903 memset(&rt
->rt6i_table
, 0, sizeof(*rt
) - sizeof(struct dst_entry
));
908 new->input
= dst_discard
;
909 new->output
= dst_discard
;
911 dst_copy_metrics(new, &ort
->dst
);
912 rt
->rt6i_idev
= ort
->rt6i_idev
;
914 in6_dev_hold(rt
->rt6i_idev
);
915 rt
->rt6i_expires
= 0;
917 ipv6_addr_copy(&rt
->rt6i_gateway
, &ort
->rt6i_gateway
);
918 rt
->rt6i_flags
= ort
->rt6i_flags
& ~RTF_EXPIRES
;
921 memcpy(&rt
->rt6i_dst
, &ort
->rt6i_dst
, sizeof(struct rt6key
));
922 #ifdef CONFIG_IPV6_SUBTREES
923 memcpy(&rt
->rt6i_src
, &ort
->rt6i_src
, sizeof(struct rt6key
));
929 dst_release(dst_orig
);
930 return new ? new : ERR_PTR(-ENOMEM
);
934 * Destination cache support functions
937 static struct dst_entry
*ip6_dst_check(struct dst_entry
*dst
, u32 cookie
)
941 rt
= (struct rt6_info
*) dst
;
943 if (rt
->rt6i_node
&& (rt
->rt6i_node
->fn_sernum
== cookie
)) {
944 if (rt
->rt6i_peer_genid
!= rt6_peer_genid()) {
946 rt6_bind_peer(rt
, 0);
947 rt
->rt6i_peer_genid
= rt6_peer_genid();
954 static struct dst_entry
*ip6_negative_advice(struct dst_entry
*dst
)
956 struct rt6_info
*rt
= (struct rt6_info
*) dst
;
959 if (rt
->rt6i_flags
& RTF_CACHE
) {
960 if (rt6_check_expired(rt
)) {
972 static void ip6_link_failure(struct sk_buff
*skb
)
976 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
, ICMPV6_ADDR_UNREACH
, 0);
978 rt
= (struct rt6_info
*) skb_dst(skb
);
980 if (rt
->rt6i_flags
&RTF_CACHE
) {
981 dst_set_expires(&rt
->dst
, 0);
982 rt
->rt6i_flags
|= RTF_EXPIRES
;
983 } else if (rt
->rt6i_node
&& (rt
->rt6i_flags
& RTF_DEFAULT
))
984 rt
->rt6i_node
->fn_sernum
= -1;
988 static void ip6_rt_update_pmtu(struct dst_entry
*dst
, u32 mtu
)
990 struct rt6_info
*rt6
= (struct rt6_info
*)dst
;
992 if (mtu
< dst_mtu(dst
) && rt6
->rt6i_dst
.plen
== 128) {
993 rt6
->rt6i_flags
|= RTF_MODIFIED
;
994 if (mtu
< IPV6_MIN_MTU
) {
995 u32 features
= dst_metric(dst
, RTAX_FEATURES
);
997 features
|= RTAX_FEATURE_ALLFRAG
;
998 dst_metric_set(dst
, RTAX_FEATURES
, features
);
1000 dst_metric_set(dst
, RTAX_MTU
, mtu
);
1004 static unsigned int ip6_default_advmss(const struct dst_entry
*dst
)
1006 struct net_device
*dev
= dst
->dev
;
1007 unsigned int mtu
= dst_mtu(dst
);
1008 struct net
*net
= dev_net(dev
);
1010 mtu
-= sizeof(struct ipv6hdr
) + sizeof(struct tcphdr
);
1012 if (mtu
< net
->ipv6
.sysctl
.ip6_rt_min_advmss
)
1013 mtu
= net
->ipv6
.sysctl
.ip6_rt_min_advmss
;
1016 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1017 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1018 * IPV6_MAXPLEN is also valid and means: "any MSS,
1019 * rely only on pmtu discovery"
1021 if (mtu
> IPV6_MAXPLEN
- sizeof(struct tcphdr
))
1026 static unsigned int ip6_default_mtu(const struct dst_entry
*dst
)
1028 unsigned int mtu
= IPV6_MIN_MTU
;
1029 struct inet6_dev
*idev
;
1032 idev
= __in6_dev_get(dst
->dev
);
1034 mtu
= idev
->cnf
.mtu6
;
1040 static struct dst_entry
*icmp6_dst_gc_list
;
1041 static DEFINE_SPINLOCK(icmp6_dst_lock
);
1043 struct dst_entry
*icmp6_dst_alloc(struct net_device
*dev
,
1044 struct neighbour
*neigh
,
1045 const struct in6_addr
*addr
)
1047 struct rt6_info
*rt
;
1048 struct inet6_dev
*idev
= in6_dev_get(dev
);
1049 struct net
*net
= dev_net(dev
);
1051 if (unlikely(idev
== NULL
))
1054 rt
= ip6_dst_alloc(&net
->ipv6
.ip6_dst_ops
, dev
, 0);
1055 if (unlikely(rt
== NULL
)) {
1063 neigh
= ndisc_get_neigh(dev
, addr
);
1068 rt
->rt6i_idev
= idev
;
1069 dst_set_neighbour(&rt
->dst
, neigh
);
1070 atomic_set(&rt
->dst
.__refcnt
, 1);
1071 dst_metric_set(&rt
->dst
, RTAX_HOPLIMIT
, 255);
1072 rt
->dst
.output
= ip6_output
;
1074 spin_lock_bh(&icmp6_dst_lock
);
1075 rt
->dst
.next
= icmp6_dst_gc_list
;
1076 icmp6_dst_gc_list
= &rt
->dst
;
1077 spin_unlock_bh(&icmp6_dst_lock
);
1079 fib6_force_start_gc(net
);
1085 int icmp6_dst_gc(void)
1087 struct dst_entry
*dst
, **pprev
;
1090 spin_lock_bh(&icmp6_dst_lock
);
1091 pprev
= &icmp6_dst_gc_list
;
1093 while ((dst
= *pprev
) != NULL
) {
1094 if (!atomic_read(&dst
->__refcnt
)) {
1103 spin_unlock_bh(&icmp6_dst_lock
);
1108 static void icmp6_clean_all(int (*func
)(struct rt6_info
*rt
, void *arg
),
1111 struct dst_entry
*dst
, **pprev
;
1113 spin_lock_bh(&icmp6_dst_lock
);
1114 pprev
= &icmp6_dst_gc_list
;
1115 while ((dst
= *pprev
) != NULL
) {
1116 struct rt6_info
*rt
= (struct rt6_info
*) dst
;
1117 if (func(rt
, arg
)) {
1124 spin_unlock_bh(&icmp6_dst_lock
);
1127 static int ip6_dst_gc(struct dst_ops
*ops
)
1129 unsigned long now
= jiffies
;
1130 struct net
*net
= container_of(ops
, struct net
, ipv6
.ip6_dst_ops
);
1131 int rt_min_interval
= net
->ipv6
.sysctl
.ip6_rt_gc_min_interval
;
1132 int rt_max_size
= net
->ipv6
.sysctl
.ip6_rt_max_size
;
1133 int rt_elasticity
= net
->ipv6
.sysctl
.ip6_rt_gc_elasticity
;
1134 int rt_gc_timeout
= net
->ipv6
.sysctl
.ip6_rt_gc_timeout
;
1135 unsigned long rt_last_gc
= net
->ipv6
.ip6_rt_last_gc
;
1138 entries
= dst_entries_get_fast(ops
);
1139 if (time_after(rt_last_gc
+ rt_min_interval
, now
) &&
1140 entries
<= rt_max_size
)
1143 net
->ipv6
.ip6_rt_gc_expire
++;
1144 fib6_run_gc(net
->ipv6
.ip6_rt_gc_expire
, net
);
1145 net
->ipv6
.ip6_rt_last_gc
= now
;
1146 entries
= dst_entries_get_slow(ops
);
1147 if (entries
< ops
->gc_thresh
)
1148 net
->ipv6
.ip6_rt_gc_expire
= rt_gc_timeout
>>1;
1150 net
->ipv6
.ip6_rt_gc_expire
-= net
->ipv6
.ip6_rt_gc_expire
>>rt_elasticity
;
1151 return entries
> rt_max_size
;
1154 /* Clean host part of a prefix. Not necessary in radix tree,
1155 but results in cleaner routing tables.
1157 Remove it only when all the things will work!
1160 int ip6_dst_hoplimit(struct dst_entry
*dst
)
1162 int hoplimit
= dst_metric_raw(dst
, RTAX_HOPLIMIT
);
1163 if (hoplimit
== 0) {
1164 struct net_device
*dev
= dst
->dev
;
1165 struct inet6_dev
*idev
;
1168 idev
= __in6_dev_get(dev
);
1170 hoplimit
= idev
->cnf
.hop_limit
;
1172 hoplimit
= dev_net(dev
)->ipv6
.devconf_all
->hop_limit
;
1177 EXPORT_SYMBOL(ip6_dst_hoplimit
);
1183 int ip6_route_add(struct fib6_config
*cfg
)
1186 struct net
*net
= cfg
->fc_nlinfo
.nl_net
;
1187 struct rt6_info
*rt
= NULL
;
1188 struct net_device
*dev
= NULL
;
1189 struct inet6_dev
*idev
= NULL
;
1190 struct fib6_table
*table
;
1193 if (cfg
->fc_dst_len
> 128 || cfg
->fc_src_len
> 128)
1195 #ifndef CONFIG_IPV6_SUBTREES
1196 if (cfg
->fc_src_len
)
1199 if (cfg
->fc_ifindex
) {
1201 dev
= dev_get_by_index(net
, cfg
->fc_ifindex
);
1204 idev
= in6_dev_get(dev
);
1209 if (cfg
->fc_metric
== 0)
1210 cfg
->fc_metric
= IP6_RT_PRIO_USER
;
1212 table
= fib6_new_table(net
, cfg
->fc_table
);
1213 if (table
== NULL
) {
1218 rt
= ip6_dst_alloc(&net
->ipv6
.ip6_dst_ops
, NULL
, DST_NOCOUNT
);
1225 rt
->dst
.obsolete
= -1;
1226 rt
->rt6i_expires
= (cfg
->fc_flags
& RTF_EXPIRES
) ?
1227 jiffies
+ clock_t_to_jiffies(cfg
->fc_expires
) :
1230 if (cfg
->fc_protocol
== RTPROT_UNSPEC
)
1231 cfg
->fc_protocol
= RTPROT_BOOT
;
1232 rt
->rt6i_protocol
= cfg
->fc_protocol
;
1234 addr_type
= ipv6_addr_type(&cfg
->fc_dst
);
1236 if (addr_type
& IPV6_ADDR_MULTICAST
)
1237 rt
->dst
.input
= ip6_mc_input
;
1238 else if (cfg
->fc_flags
& RTF_LOCAL
)
1239 rt
->dst
.input
= ip6_input
;
1241 rt
->dst
.input
= ip6_forward
;
1243 rt
->dst
.output
= ip6_output
;
1245 ipv6_addr_prefix(&rt
->rt6i_dst
.addr
, &cfg
->fc_dst
, cfg
->fc_dst_len
);
1246 rt
->rt6i_dst
.plen
= cfg
->fc_dst_len
;
1247 if (rt
->rt6i_dst
.plen
== 128)
1248 rt
->dst
.flags
|= DST_HOST
;
1250 #ifdef CONFIG_IPV6_SUBTREES
1251 ipv6_addr_prefix(&rt
->rt6i_src
.addr
, &cfg
->fc_src
, cfg
->fc_src_len
);
1252 rt
->rt6i_src
.plen
= cfg
->fc_src_len
;
1255 rt
->rt6i_metric
= cfg
->fc_metric
;
1257 /* We cannot add true routes via loopback here,
1258 they would result in kernel looping; promote them to reject routes
1260 if ((cfg
->fc_flags
& RTF_REJECT
) ||
1261 (dev
&& (dev
->flags
&IFF_LOOPBACK
) && !(addr_type
&IPV6_ADDR_LOOPBACK
)
1262 && !(cfg
->fc_flags
&RTF_LOCAL
))) {
1263 /* hold loopback dev/idev if we haven't done so. */
1264 if (dev
!= net
->loopback_dev
) {
1269 dev
= net
->loopback_dev
;
1271 idev
= in6_dev_get(dev
);
1277 rt
->dst
.output
= ip6_pkt_discard_out
;
1278 rt
->dst
.input
= ip6_pkt_discard
;
1279 rt
->dst
.error
= -ENETUNREACH
;
1280 rt
->rt6i_flags
= RTF_REJECT
|RTF_NONEXTHOP
;
1284 if (cfg
->fc_flags
& RTF_GATEWAY
) {
1285 const struct in6_addr
*gw_addr
;
1288 gw_addr
= &cfg
->fc_gateway
;
1289 ipv6_addr_copy(&rt
->rt6i_gateway
, gw_addr
);
1290 gwa_type
= ipv6_addr_type(gw_addr
);
1292 if (gwa_type
!= (IPV6_ADDR_LINKLOCAL
|IPV6_ADDR_UNICAST
)) {
1293 struct rt6_info
*grt
;
1295 /* IPv6 strictly inhibits using not link-local
1296 addresses as nexthop address.
1297 Otherwise, router will not able to send redirects.
1298 It is very good, but in some (rare!) circumstances
1299 (SIT, PtP, NBMA NOARP links) it is handy to allow
1300 some exceptions. --ANK
1303 if (!(gwa_type
&IPV6_ADDR_UNICAST
))
1306 grt
= rt6_lookup(net
, gw_addr
, NULL
, cfg
->fc_ifindex
, 1);
1308 err
= -EHOSTUNREACH
;
1312 if (dev
!= grt
->rt6i_dev
) {
1313 dst_release(&grt
->dst
);
1317 dev
= grt
->rt6i_dev
;
1318 idev
= grt
->rt6i_idev
;
1320 in6_dev_hold(grt
->rt6i_idev
);
1322 if (!(grt
->rt6i_flags
&RTF_GATEWAY
))
1324 dst_release(&grt
->dst
);
1330 if (dev
== NULL
|| (dev
->flags
&IFF_LOOPBACK
))
1338 if (!ipv6_addr_any(&cfg
->fc_prefsrc
)) {
1339 if (!ipv6_chk_addr(net
, &cfg
->fc_prefsrc
, dev
, 0)) {
1343 ipv6_addr_copy(&rt
->rt6i_prefsrc
.addr
, &cfg
->fc_prefsrc
);
1344 rt
->rt6i_prefsrc
.plen
= 128;
1346 rt
->rt6i_prefsrc
.plen
= 0;
1348 if (cfg
->fc_flags
& (RTF_GATEWAY
| RTF_NONEXTHOP
)) {
1349 struct neighbour
*neigh
= __neigh_lookup_errno(&nd_tbl
, &rt
->rt6i_gateway
, dev
);
1350 if (IS_ERR(neigh
)) {
1351 err
= PTR_ERR(neigh
);
1354 dst_set_neighbour(&rt
->dst
, neigh
);
1357 rt
->rt6i_flags
= cfg
->fc_flags
;
1364 nla_for_each_attr(nla
, cfg
->fc_mx
, cfg
->fc_mx_len
, remaining
) {
1365 int type
= nla_type(nla
);
1368 if (type
> RTAX_MAX
) {
1373 dst_metric_set(&rt
->dst
, type
, nla_get_u32(nla
));
1379 rt
->rt6i_idev
= idev
;
1380 rt
->rt6i_table
= table
;
1382 cfg
->fc_nlinfo
.nl_net
= dev_net(dev
);
1384 return __ip6_ins_rt(rt
, &cfg
->fc_nlinfo
);
1396 static int __ip6_del_rt(struct rt6_info
*rt
, struct nl_info
*info
)
1399 struct fib6_table
*table
;
1400 struct net
*net
= dev_net(rt
->rt6i_dev
);
1402 if (rt
== net
->ipv6
.ip6_null_entry
)
1405 table
= rt
->rt6i_table
;
1406 write_lock_bh(&table
->tb6_lock
);
1408 err
= fib6_del(rt
, info
);
1409 dst_release(&rt
->dst
);
1411 write_unlock_bh(&table
->tb6_lock
);
1416 int ip6_del_rt(struct rt6_info
*rt
)
1418 struct nl_info info
= {
1419 .nl_net
= dev_net(rt
->rt6i_dev
),
1421 return __ip6_del_rt(rt
, &info
);
1424 static int ip6_route_del(struct fib6_config
*cfg
)
1426 struct fib6_table
*table
;
1427 struct fib6_node
*fn
;
1428 struct rt6_info
*rt
;
1431 table
= fib6_get_table(cfg
->fc_nlinfo
.nl_net
, cfg
->fc_table
);
1435 read_lock_bh(&table
->tb6_lock
);
1437 fn
= fib6_locate(&table
->tb6_root
,
1438 &cfg
->fc_dst
, cfg
->fc_dst_len
,
1439 &cfg
->fc_src
, cfg
->fc_src_len
);
1442 for (rt
= fn
->leaf
; rt
; rt
= rt
->dst
.rt6_next
) {
1443 if (cfg
->fc_ifindex
&&
1444 (rt
->rt6i_dev
== NULL
||
1445 rt
->rt6i_dev
->ifindex
!= cfg
->fc_ifindex
))
1447 if (cfg
->fc_flags
& RTF_GATEWAY
&&
1448 !ipv6_addr_equal(&cfg
->fc_gateway
, &rt
->rt6i_gateway
))
1450 if (cfg
->fc_metric
&& cfg
->fc_metric
!= rt
->rt6i_metric
)
1453 read_unlock_bh(&table
->tb6_lock
);
1455 return __ip6_del_rt(rt
, &cfg
->fc_nlinfo
);
1458 read_unlock_bh(&table
->tb6_lock
);
1466 struct ip6rd_flowi
{
1468 struct in6_addr gateway
;
1471 static struct rt6_info
*__ip6_route_redirect(struct net
*net
,
1472 struct fib6_table
*table
,
1476 struct ip6rd_flowi
*rdfl
= (struct ip6rd_flowi
*)fl6
;
1477 struct rt6_info
*rt
;
1478 struct fib6_node
*fn
;
1481 * Get the "current" route for this destination and
1482 * check if the redirect has come from approriate router.
1484 * RFC 2461 specifies that redirects should only be
1485 * accepted if they come from the nexthop to the target.
1486 * Due to the way the routes are chosen, this notion
1487 * is a bit fuzzy and one might need to check all possible
1491 read_lock_bh(&table
->tb6_lock
);
1492 fn
= fib6_lookup(&table
->tb6_root
, &fl6
->daddr
, &fl6
->saddr
);
1494 for (rt
= fn
->leaf
; rt
; rt
= rt
->dst
.rt6_next
) {
1496 * Current route is on-link; redirect is always invalid.
1498 * Seems, previous statement is not true. It could
1499 * be node, which looks for us as on-link (f.e. proxy ndisc)
1500 * But then router serving it might decide, that we should
1501 * know truth 8)8) --ANK (980726).
1503 if (rt6_check_expired(rt
))
1505 if (!(rt
->rt6i_flags
& RTF_GATEWAY
))
1507 if (fl6
->flowi6_oif
!= rt
->rt6i_dev
->ifindex
)
1509 if (!ipv6_addr_equal(&rdfl
->gateway
, &rt
->rt6i_gateway
))
1515 rt
= net
->ipv6
.ip6_null_entry
;
1516 BACKTRACK(net
, &fl6
->saddr
);
1520 read_unlock_bh(&table
->tb6_lock
);
1525 static struct rt6_info
*ip6_route_redirect(const struct in6_addr
*dest
,
1526 const struct in6_addr
*src
,
1527 const struct in6_addr
*gateway
,
1528 struct net_device
*dev
)
1530 int flags
= RT6_LOOKUP_F_HAS_SADDR
;
1531 struct net
*net
= dev_net(dev
);
1532 struct ip6rd_flowi rdfl
= {
1534 .flowi6_oif
= dev
->ifindex
,
1540 ipv6_addr_copy(&rdfl
.gateway
, gateway
);
1542 if (rt6_need_strict(dest
))
1543 flags
|= RT6_LOOKUP_F_IFACE
;
1545 return (struct rt6_info
*)fib6_rule_lookup(net
, &rdfl
.fl6
,
1546 flags
, __ip6_route_redirect
);
1549 void rt6_redirect(const struct in6_addr
*dest
, const struct in6_addr
*src
,
1550 const struct in6_addr
*saddr
,
1551 struct neighbour
*neigh
, u8
*lladdr
, int on_link
)
1553 struct rt6_info
*rt
, *nrt
= NULL
;
1554 struct netevent_redirect netevent
;
1555 struct net
*net
= dev_net(neigh
->dev
);
1557 rt
= ip6_route_redirect(dest
, src
, saddr
, neigh
->dev
);
1559 if (rt
== net
->ipv6
.ip6_null_entry
) {
1560 if (net_ratelimit())
1561 printk(KERN_DEBUG
"rt6_redirect: source isn't a valid nexthop "
1562 "for redirect target\n");
1567 * We have finally decided to accept it.
1570 neigh_update(neigh
, lladdr
, NUD_STALE
,
1571 NEIGH_UPDATE_F_WEAK_OVERRIDE
|
1572 NEIGH_UPDATE_F_OVERRIDE
|
1573 (on_link
? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER
|
1574 NEIGH_UPDATE_F_ISROUTER
))
1578 * Redirect received -> path was valid.
1579 * Look, redirects are sent only in response to data packets,
1580 * so that this nexthop apparently is reachable. --ANK
1582 dst_confirm(&rt
->dst
);
1584 /* Duplicate redirect: silently ignore. */
1585 if (neigh
== dst_get_neighbour_raw(&rt
->dst
))
1588 nrt
= ip6_rt_copy(rt
);
1592 nrt
->rt6i_flags
= RTF_GATEWAY
|RTF_UP
|RTF_DYNAMIC
|RTF_CACHE
;
1594 nrt
->rt6i_flags
&= ~RTF_GATEWAY
;
1596 ipv6_addr_copy(&nrt
->rt6i_dst
.addr
, dest
);
1597 nrt
->rt6i_dst
.plen
= 128;
1598 nrt
->dst
.flags
|= DST_HOST
;
1600 ipv6_addr_copy(&nrt
->rt6i_gateway
, (struct in6_addr
*)neigh
->primary_key
);
1601 dst_set_neighbour(&nrt
->dst
, neigh_clone(neigh
));
1603 if (ip6_ins_rt(nrt
))
1606 netevent
.old
= &rt
->dst
;
1607 netevent
.new = &nrt
->dst
;
1608 call_netevent_notifiers(NETEVENT_REDIRECT
, &netevent
);
1610 if (rt
->rt6i_flags
&RTF_CACHE
) {
1616 dst_release(&rt
->dst
);
1620 * Handle ICMP "packet too big" messages
1621 * i.e. Path MTU discovery
1624 static void rt6_do_pmtu_disc(const struct in6_addr
*daddr
, const struct in6_addr
*saddr
,
1625 struct net
*net
, u32 pmtu
, int ifindex
)
1627 struct rt6_info
*rt
, *nrt
;
1630 rt
= rt6_lookup(net
, daddr
, saddr
, ifindex
, 0);
1634 if (rt6_check_expired(rt
)) {
1639 if (pmtu
>= dst_mtu(&rt
->dst
))
1642 if (pmtu
< IPV6_MIN_MTU
) {
1644 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1645 * MTU (1280) and a fragment header should always be included
1646 * after a node receiving Too Big message reporting PMTU is
1647 * less than the IPv6 Minimum Link MTU.
1649 pmtu
= IPV6_MIN_MTU
;
1653 /* New mtu received -> path was valid.
1654 They are sent only in response to data packets,
1655 so that this nexthop apparently is reachable. --ANK
1657 dst_confirm(&rt
->dst
);
1659 /* Host route. If it is static, it would be better
1660 not to override it, but add new one, so that
1661 when cache entry will expire old pmtu
1662 would return automatically.
1664 if (rt
->rt6i_flags
& RTF_CACHE
) {
1665 dst_metric_set(&rt
->dst
, RTAX_MTU
, pmtu
);
1667 u32 features
= dst_metric(&rt
->dst
, RTAX_FEATURES
);
1668 features
|= RTAX_FEATURE_ALLFRAG
;
1669 dst_metric_set(&rt
->dst
, RTAX_FEATURES
, features
);
1671 dst_set_expires(&rt
->dst
, net
->ipv6
.sysctl
.ip6_rt_mtu_expires
);
1672 rt
->rt6i_flags
|= RTF_MODIFIED
|RTF_EXPIRES
;
1677 Two cases are possible:
1678 1. It is connected route. Action: COW
1679 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1681 if (!dst_get_neighbour_raw(&rt
->dst
) && !(rt
->rt6i_flags
& RTF_NONEXTHOP
))
1682 nrt
= rt6_alloc_cow(rt
, daddr
, saddr
);
1684 nrt
= rt6_alloc_clone(rt
, daddr
);
1687 dst_metric_set(&nrt
->dst
, RTAX_MTU
, pmtu
);
1689 u32 features
= dst_metric(&nrt
->dst
, RTAX_FEATURES
);
1690 features
|= RTAX_FEATURE_ALLFRAG
;
1691 dst_metric_set(&nrt
->dst
, RTAX_FEATURES
, features
);
1694 /* According to RFC 1981, detecting PMTU increase shouldn't be
1695 * happened within 5 mins, the recommended timer is 10 mins.
1696 * Here this route expiration time is set to ip6_rt_mtu_expires
1697 * which is 10 mins. After 10 mins the decreased pmtu is expired
1698 * and detecting PMTU increase will be automatically happened.
1700 dst_set_expires(&nrt
->dst
, net
->ipv6
.sysctl
.ip6_rt_mtu_expires
);
1701 nrt
->rt6i_flags
|= RTF_DYNAMIC
|RTF_EXPIRES
;
1706 dst_release(&rt
->dst
);
1709 void rt6_pmtu_discovery(const struct in6_addr
*daddr
, const struct in6_addr
*saddr
,
1710 struct net_device
*dev
, u32 pmtu
)
1712 struct net
*net
= dev_net(dev
);
1715 * RFC 1981 states that a node "MUST reduce the size of the packets it
1716 * is sending along the path" that caused the Packet Too Big message.
1717 * Since it's not possible in the general case to determine which
1718 * interface was used to send the original packet, we update the MTU
1719 * on the interface that will be used to send future packets. We also
1720 * update the MTU on the interface that received the Packet Too Big in
1721 * case the original packet was forced out that interface with
1722 * SO_BINDTODEVICE or similar. This is the next best thing to the
1723 * correct behaviour, which would be to update the MTU on all
1726 rt6_do_pmtu_disc(daddr
, saddr
, net
, pmtu
, 0);
1727 rt6_do_pmtu_disc(daddr
, saddr
, net
, pmtu
, dev
->ifindex
);
1731 * Misc support functions
1734 static struct rt6_info
* ip6_rt_copy(struct rt6_info
*ort
)
1736 struct net
*net
= dev_net(ort
->rt6i_dev
);
1737 struct rt6_info
*rt
= ip6_dst_alloc(&net
->ipv6
.ip6_dst_ops
,
1741 rt
->dst
.input
= ort
->dst
.input
;
1742 rt
->dst
.output
= ort
->dst
.output
;
1744 dst_copy_metrics(&rt
->dst
, &ort
->dst
);
1745 rt
->dst
.error
= ort
->dst
.error
;
1746 rt
->rt6i_idev
= ort
->rt6i_idev
;
1748 in6_dev_hold(rt
->rt6i_idev
);
1749 rt
->dst
.lastuse
= jiffies
;
1750 rt
->rt6i_expires
= 0;
1752 ipv6_addr_copy(&rt
->rt6i_gateway
, &ort
->rt6i_gateway
);
1753 rt
->rt6i_flags
= ort
->rt6i_flags
& ~RTF_EXPIRES
;
1754 rt
->rt6i_metric
= 0;
1756 memcpy(&rt
->rt6i_dst
, &ort
->rt6i_dst
, sizeof(struct rt6key
));
1757 #ifdef CONFIG_IPV6_SUBTREES
1758 memcpy(&rt
->rt6i_src
, &ort
->rt6i_src
, sizeof(struct rt6key
));
1760 memcpy(&rt
->rt6i_prefsrc
, &ort
->rt6i_prefsrc
, sizeof(struct rt6key
));
1761 rt
->rt6i_table
= ort
->rt6i_table
;
1766 #ifdef CONFIG_IPV6_ROUTE_INFO
1767 static struct rt6_info
*rt6_get_route_info(struct net
*net
,
1768 const struct in6_addr
*prefix
, int prefixlen
,
1769 const struct in6_addr
*gwaddr
, int ifindex
)
1771 struct fib6_node
*fn
;
1772 struct rt6_info
*rt
= NULL
;
1773 struct fib6_table
*table
;
1775 table
= fib6_get_table(net
, RT6_TABLE_INFO
);
1779 write_lock_bh(&table
->tb6_lock
);
1780 fn
= fib6_locate(&table
->tb6_root
, prefix
,prefixlen
, NULL
, 0);
1784 for (rt
= fn
->leaf
; rt
; rt
= rt
->dst
.rt6_next
) {
1785 if (rt
->rt6i_dev
->ifindex
!= ifindex
)
1787 if ((rt
->rt6i_flags
& (RTF_ROUTEINFO
|RTF_GATEWAY
)) != (RTF_ROUTEINFO
|RTF_GATEWAY
))
1789 if (!ipv6_addr_equal(&rt
->rt6i_gateway
, gwaddr
))
1795 write_unlock_bh(&table
->tb6_lock
);
1799 static struct rt6_info
*rt6_add_route_info(struct net
*net
,
1800 const struct in6_addr
*prefix
, int prefixlen
,
1801 const struct in6_addr
*gwaddr
, int ifindex
,
1804 struct fib6_config cfg
= {
1805 .fc_table
= RT6_TABLE_INFO
,
1806 .fc_metric
= IP6_RT_PRIO_USER
,
1807 .fc_ifindex
= ifindex
,
1808 .fc_dst_len
= prefixlen
,
1809 .fc_flags
= RTF_GATEWAY
| RTF_ADDRCONF
| RTF_ROUTEINFO
|
1810 RTF_UP
| RTF_PREF(pref
),
1812 .fc_nlinfo
.nlh
= NULL
,
1813 .fc_nlinfo
.nl_net
= net
,
1816 ipv6_addr_copy(&cfg
.fc_dst
, prefix
);
1817 ipv6_addr_copy(&cfg
.fc_gateway
, gwaddr
);
1819 /* We should treat it as a default route if prefix length is 0. */
1821 cfg
.fc_flags
|= RTF_DEFAULT
;
1823 ip6_route_add(&cfg
);
1825 return rt6_get_route_info(net
, prefix
, prefixlen
, gwaddr
, ifindex
);
1829 struct rt6_info
*rt6_get_dflt_router(const struct in6_addr
*addr
, struct net_device
*dev
)
1831 struct rt6_info
*rt
;
1832 struct fib6_table
*table
;
1834 table
= fib6_get_table(dev_net(dev
), RT6_TABLE_DFLT
);
1838 write_lock_bh(&table
->tb6_lock
);
1839 for (rt
= table
->tb6_root
.leaf
; rt
; rt
=rt
->dst
.rt6_next
) {
1840 if (dev
== rt
->rt6i_dev
&&
1841 ((rt
->rt6i_flags
& (RTF_ADDRCONF
| RTF_DEFAULT
)) == (RTF_ADDRCONF
| RTF_DEFAULT
)) &&
1842 ipv6_addr_equal(&rt
->rt6i_gateway
, addr
))
1847 write_unlock_bh(&table
->tb6_lock
);
1851 struct rt6_info
*rt6_add_dflt_router(const struct in6_addr
*gwaddr
,
1852 struct net_device
*dev
,
1855 struct fib6_config cfg
= {
1856 .fc_table
= RT6_TABLE_DFLT
,
1857 .fc_metric
= IP6_RT_PRIO_USER
,
1858 .fc_ifindex
= dev
->ifindex
,
1859 .fc_flags
= RTF_GATEWAY
| RTF_ADDRCONF
| RTF_DEFAULT
|
1860 RTF_UP
| RTF_EXPIRES
| RTF_PREF(pref
),
1862 .fc_nlinfo
.nlh
= NULL
,
1863 .fc_nlinfo
.nl_net
= dev_net(dev
),
1866 ipv6_addr_copy(&cfg
.fc_gateway
, gwaddr
);
1868 ip6_route_add(&cfg
);
1870 return rt6_get_dflt_router(gwaddr
, dev
);
1873 void rt6_purge_dflt_routers(struct net
*net
)
1875 struct rt6_info
*rt
;
1876 struct fib6_table
*table
;
1878 /* NOTE: Keep consistent with rt6_get_dflt_router */
1879 table
= fib6_get_table(net
, RT6_TABLE_DFLT
);
1884 read_lock_bh(&table
->tb6_lock
);
1885 for (rt
= table
->tb6_root
.leaf
; rt
; rt
= rt
->dst
.rt6_next
) {
1886 if (rt
->rt6i_flags
& (RTF_DEFAULT
| RTF_ADDRCONF
)) {
1888 read_unlock_bh(&table
->tb6_lock
);
1893 read_unlock_bh(&table
->tb6_lock
);
1896 static void rtmsg_to_fib6_config(struct net
*net
,
1897 struct in6_rtmsg
*rtmsg
,
1898 struct fib6_config
*cfg
)
1900 memset(cfg
, 0, sizeof(*cfg
));
1902 cfg
->fc_table
= RT6_TABLE_MAIN
;
1903 cfg
->fc_ifindex
= rtmsg
->rtmsg_ifindex
;
1904 cfg
->fc_metric
= rtmsg
->rtmsg_metric
;
1905 cfg
->fc_expires
= rtmsg
->rtmsg_info
;
1906 cfg
->fc_dst_len
= rtmsg
->rtmsg_dst_len
;
1907 cfg
->fc_src_len
= rtmsg
->rtmsg_src_len
;
1908 cfg
->fc_flags
= rtmsg
->rtmsg_flags
;
1910 cfg
->fc_nlinfo
.nl_net
= net
;
1912 ipv6_addr_copy(&cfg
->fc_dst
, &rtmsg
->rtmsg_dst
);
1913 ipv6_addr_copy(&cfg
->fc_src
, &rtmsg
->rtmsg_src
);
1914 ipv6_addr_copy(&cfg
->fc_gateway
, &rtmsg
->rtmsg_gateway
);
1917 int ipv6_route_ioctl(struct net
*net
, unsigned int cmd
, void __user
*arg
)
1919 struct fib6_config cfg
;
1920 struct in6_rtmsg rtmsg
;
1924 case SIOCADDRT
: /* Add a route */
1925 case SIOCDELRT
: /* Delete a route */
1926 if (!capable(CAP_NET_ADMIN
))
1928 err
= copy_from_user(&rtmsg
, arg
,
1929 sizeof(struct in6_rtmsg
));
1933 rtmsg_to_fib6_config(net
, &rtmsg
, &cfg
);
1938 err
= ip6_route_add(&cfg
);
1941 err
= ip6_route_del(&cfg
);
1955 * Drop the packet on the floor
1958 static int ip6_pkt_drop(struct sk_buff
*skb
, u8 code
, int ipstats_mib_noroutes
)
1961 struct dst_entry
*dst
= skb_dst(skb
);
1962 switch (ipstats_mib_noroutes
) {
1963 case IPSTATS_MIB_INNOROUTES
:
1964 type
= ipv6_addr_type(&ipv6_hdr(skb
)->daddr
);
1965 if (type
== IPV6_ADDR_ANY
) {
1966 IP6_INC_STATS(dev_net(dst
->dev
), ip6_dst_idev(dst
),
1967 IPSTATS_MIB_INADDRERRORS
);
1971 case IPSTATS_MIB_OUTNOROUTES
:
1972 IP6_INC_STATS(dev_net(dst
->dev
), ip6_dst_idev(dst
),
1973 ipstats_mib_noroutes
);
1976 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
, code
, 0);
1981 static int ip6_pkt_discard(struct sk_buff
*skb
)
1983 return ip6_pkt_drop(skb
, ICMPV6_NOROUTE
, IPSTATS_MIB_INNOROUTES
);
1986 static int ip6_pkt_discard_out(struct sk_buff
*skb
)
1988 skb
->dev
= skb_dst(skb
)->dev
;
1989 return ip6_pkt_drop(skb
, ICMPV6_NOROUTE
, IPSTATS_MIB_OUTNOROUTES
);
1992 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1994 static int ip6_pkt_prohibit(struct sk_buff
*skb
)
1996 return ip6_pkt_drop(skb
, ICMPV6_ADM_PROHIBITED
, IPSTATS_MIB_INNOROUTES
);
1999 static int ip6_pkt_prohibit_out(struct sk_buff
*skb
)
2001 skb
->dev
= skb_dst(skb
)->dev
;
2002 return ip6_pkt_drop(skb
, ICMPV6_ADM_PROHIBITED
, IPSTATS_MIB_OUTNOROUTES
);
2008 * Allocate a dst for local (unicast / anycast) address.
2011 struct rt6_info
*addrconf_dst_alloc(struct inet6_dev
*idev
,
2012 const struct in6_addr
*addr
,
2015 struct net
*net
= dev_net(idev
->dev
);
2016 struct rt6_info
*rt
= ip6_dst_alloc(&net
->ipv6
.ip6_dst_ops
,
2017 net
->loopback_dev
, 0);
2018 struct neighbour
*neigh
;
2021 if (net_ratelimit())
2022 pr_warning("IPv6: Maximum number of routes reached,"
2023 " consider increasing route/max_size.\n");
2024 return ERR_PTR(-ENOMEM
);
2029 rt
->dst
.flags
|= DST_HOST
;
2030 rt
->dst
.input
= ip6_input
;
2031 rt
->dst
.output
= ip6_output
;
2032 rt
->rt6i_idev
= idev
;
2033 rt
->dst
.obsolete
= -1;
2035 rt
->rt6i_flags
= RTF_UP
| RTF_NONEXTHOP
;
2037 rt
->rt6i_flags
|= RTF_ANYCAST
;
2039 rt
->rt6i_flags
|= RTF_LOCAL
;
2040 neigh
= ndisc_get_neigh(rt
->rt6i_dev
, &rt
->rt6i_gateway
);
2041 if (IS_ERR(neigh
)) {
2044 return ERR_CAST(neigh
);
2046 dst_set_neighbour(&rt
->dst
, neigh
);
2048 ipv6_addr_copy(&rt
->rt6i_dst
.addr
, addr
);
2049 rt
->rt6i_dst
.plen
= 128;
2050 rt
->rt6i_table
= fib6_get_table(net
, RT6_TABLE_LOCAL
);
2052 atomic_set(&rt
->dst
.__refcnt
, 1);
2057 int ip6_route_get_saddr(struct net
*net
,
2058 struct rt6_info
*rt
,
2059 const struct in6_addr
*daddr
,
2061 struct in6_addr
*saddr
)
2063 struct inet6_dev
*idev
= ip6_dst_idev((struct dst_entry
*)rt
);
2065 if (rt
->rt6i_prefsrc
.plen
)
2066 ipv6_addr_copy(saddr
, &rt
->rt6i_prefsrc
.addr
);
2068 err
= ipv6_dev_get_saddr(net
, idev
? idev
->dev
: NULL
,
2069 daddr
, prefs
, saddr
);
2073 /* remove deleted ip from prefsrc entries */
2074 struct arg_dev_net_ip
{
2075 struct net_device
*dev
;
2077 struct in6_addr
*addr
;
2080 static int fib6_remove_prefsrc(struct rt6_info
*rt
, void *arg
)
2082 struct net_device
*dev
= ((struct arg_dev_net_ip
*)arg
)->dev
;
2083 struct net
*net
= ((struct arg_dev_net_ip
*)arg
)->net
;
2084 struct in6_addr
*addr
= ((struct arg_dev_net_ip
*)arg
)->addr
;
2086 if (((void *)rt
->rt6i_dev
== dev
|| dev
== NULL
) &&
2087 rt
!= net
->ipv6
.ip6_null_entry
&&
2088 ipv6_addr_equal(addr
, &rt
->rt6i_prefsrc
.addr
)) {
2089 /* remove prefsrc entry */
2090 rt
->rt6i_prefsrc
.plen
= 0;
2095 void rt6_remove_prefsrc(struct inet6_ifaddr
*ifp
)
2097 struct net
*net
= dev_net(ifp
->idev
->dev
);
2098 struct arg_dev_net_ip adni
= {
2099 .dev
= ifp
->idev
->dev
,
2103 fib6_clean_all(net
, fib6_remove_prefsrc
, 0, &adni
);
2106 struct arg_dev_net
{
2107 struct net_device
*dev
;
2111 static int fib6_ifdown(struct rt6_info
*rt
, void *arg
)
2113 const struct arg_dev_net
*adn
= arg
;
2114 const struct net_device
*dev
= adn
->dev
;
2116 if ((rt
->rt6i_dev
== dev
|| dev
== NULL
) &&
2117 rt
!= adn
->net
->ipv6
.ip6_null_entry
) {
2118 RT6_TRACE("deleted by ifdown %p\n", rt
);
2124 void rt6_ifdown(struct net
*net
, struct net_device
*dev
)
2126 struct arg_dev_net adn
= {
2131 fib6_clean_all(net
, fib6_ifdown
, 0, &adn
);
2132 icmp6_clean_all(fib6_ifdown
, &adn
);
2135 struct rt6_mtu_change_arg
2137 struct net_device
*dev
;
2141 static int rt6_mtu_change_route(struct rt6_info
*rt
, void *p_arg
)
2143 struct rt6_mtu_change_arg
*arg
= (struct rt6_mtu_change_arg
*) p_arg
;
2144 struct inet6_dev
*idev
;
2146 /* In IPv6 pmtu discovery is not optional,
2147 so that RTAX_MTU lock cannot disable it.
2148 We still use this lock to block changes
2149 caused by addrconf/ndisc.
2152 idev
= __in6_dev_get(arg
->dev
);
2156 /* For administrative MTU increase, there is no way to discover
2157 IPv6 PMTU increase, so PMTU increase should be updated here.
2158 Since RFC 1981 doesn't include administrative MTU increase
2159 update PMTU increase is a MUST. (i.e. jumbo frame)
2162 If new MTU is less than route PMTU, this new MTU will be the
2163 lowest MTU in the path, update the route PMTU to reflect PMTU
2164 decreases; if new MTU is greater than route PMTU, and the
2165 old MTU is the lowest MTU in the path, update the route PMTU
2166 to reflect the increase. In this case if the other nodes' MTU
2167 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2170 if (rt
->rt6i_dev
== arg
->dev
&&
2171 !dst_metric_locked(&rt
->dst
, RTAX_MTU
) &&
2172 (dst_mtu(&rt
->dst
) >= arg
->mtu
||
2173 (dst_mtu(&rt
->dst
) < arg
->mtu
&&
2174 dst_mtu(&rt
->dst
) == idev
->cnf
.mtu6
))) {
2175 dst_metric_set(&rt
->dst
, RTAX_MTU
, arg
->mtu
);
2180 void rt6_mtu_change(struct net_device
*dev
, unsigned mtu
)
2182 struct rt6_mtu_change_arg arg
= {
2187 fib6_clean_all(dev_net(dev
), rt6_mtu_change_route
, 0, &arg
);
2190 static const struct nla_policy rtm_ipv6_policy
[RTA_MAX
+1] = {
2191 [RTA_GATEWAY
] = { .len
= sizeof(struct in6_addr
) },
2192 [RTA_OIF
] = { .type
= NLA_U32
},
2193 [RTA_IIF
] = { .type
= NLA_U32
},
2194 [RTA_PRIORITY
] = { .type
= NLA_U32
},
2195 [RTA_METRICS
] = { .type
= NLA_NESTED
},
2198 static int rtm_to_fib6_config(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
2199 struct fib6_config
*cfg
)
2202 struct nlattr
*tb
[RTA_MAX
+1];
2205 err
= nlmsg_parse(nlh
, sizeof(*rtm
), tb
, RTA_MAX
, rtm_ipv6_policy
);
2210 rtm
= nlmsg_data(nlh
);
2211 memset(cfg
, 0, sizeof(*cfg
));
2213 cfg
->fc_table
= rtm
->rtm_table
;
2214 cfg
->fc_dst_len
= rtm
->rtm_dst_len
;
2215 cfg
->fc_src_len
= rtm
->rtm_src_len
;
2216 cfg
->fc_flags
= RTF_UP
;
2217 cfg
->fc_protocol
= rtm
->rtm_protocol
;
2219 if (rtm
->rtm_type
== RTN_UNREACHABLE
)
2220 cfg
->fc_flags
|= RTF_REJECT
;
2222 if (rtm
->rtm_type
== RTN_LOCAL
)
2223 cfg
->fc_flags
|= RTF_LOCAL
;
2225 cfg
->fc_nlinfo
.pid
= NETLINK_CB(skb
).pid
;
2226 cfg
->fc_nlinfo
.nlh
= nlh
;
2227 cfg
->fc_nlinfo
.nl_net
= sock_net(skb
->sk
);
2229 if (tb
[RTA_GATEWAY
]) {
2230 nla_memcpy(&cfg
->fc_gateway
, tb
[RTA_GATEWAY
], 16);
2231 cfg
->fc_flags
|= RTF_GATEWAY
;
2235 int plen
= (rtm
->rtm_dst_len
+ 7) >> 3;
2237 if (nla_len(tb
[RTA_DST
]) < plen
)
2240 nla_memcpy(&cfg
->fc_dst
, tb
[RTA_DST
], plen
);
2244 int plen
= (rtm
->rtm_src_len
+ 7) >> 3;
2246 if (nla_len(tb
[RTA_SRC
]) < plen
)
2249 nla_memcpy(&cfg
->fc_src
, tb
[RTA_SRC
], plen
);
2252 if (tb
[RTA_PREFSRC
])
2253 nla_memcpy(&cfg
->fc_prefsrc
, tb
[RTA_PREFSRC
], 16);
2256 cfg
->fc_ifindex
= nla_get_u32(tb
[RTA_OIF
]);
2258 if (tb
[RTA_PRIORITY
])
2259 cfg
->fc_metric
= nla_get_u32(tb
[RTA_PRIORITY
]);
2261 if (tb
[RTA_METRICS
]) {
2262 cfg
->fc_mx
= nla_data(tb
[RTA_METRICS
]);
2263 cfg
->fc_mx_len
= nla_len(tb
[RTA_METRICS
]);
2267 cfg
->fc_table
= nla_get_u32(tb
[RTA_TABLE
]);
2274 static int inet6_rtm_delroute(struct sk_buff
*skb
, struct nlmsghdr
* nlh
, void *arg
)
2276 struct fib6_config cfg
;
2279 err
= rtm_to_fib6_config(skb
, nlh
, &cfg
);
2283 return ip6_route_del(&cfg
);
2286 static int inet6_rtm_newroute(struct sk_buff
*skb
, struct nlmsghdr
* nlh
, void *arg
)
2288 struct fib6_config cfg
;
2291 err
= rtm_to_fib6_config(skb
, nlh
, &cfg
);
2295 return ip6_route_add(&cfg
);
2298 static inline size_t rt6_nlmsg_size(void)
2300 return NLMSG_ALIGN(sizeof(struct rtmsg
))
2301 + nla_total_size(16) /* RTA_SRC */
2302 + nla_total_size(16) /* RTA_DST */
2303 + nla_total_size(16) /* RTA_GATEWAY */
2304 + nla_total_size(16) /* RTA_PREFSRC */
2305 + nla_total_size(4) /* RTA_TABLE */
2306 + nla_total_size(4) /* RTA_IIF */
2307 + nla_total_size(4) /* RTA_OIF */
2308 + nla_total_size(4) /* RTA_PRIORITY */
2309 + RTAX_MAX
* nla_total_size(4) /* RTA_METRICS */
2310 + nla_total_size(sizeof(struct rta_cacheinfo
));
2313 static int rt6_fill_node(struct net
*net
,
2314 struct sk_buff
*skb
, struct rt6_info
*rt
,
2315 struct in6_addr
*dst
, struct in6_addr
*src
,
2316 int iif
, int type
, u32 pid
, u32 seq
,
2317 int prefix
, int nowait
, unsigned int flags
)
2320 struct nlmsghdr
*nlh
;
2323 struct neighbour
*n
;
2325 if (prefix
) { /* user wants prefix routes only */
2326 if (!(rt
->rt6i_flags
& RTF_PREFIX_RT
)) {
2327 /* success since this is not a prefix route */
2332 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*rtm
), flags
);
2336 rtm
= nlmsg_data(nlh
);
2337 rtm
->rtm_family
= AF_INET6
;
2338 rtm
->rtm_dst_len
= rt
->rt6i_dst
.plen
;
2339 rtm
->rtm_src_len
= rt
->rt6i_src
.plen
;
2342 table
= rt
->rt6i_table
->tb6_id
;
2344 table
= RT6_TABLE_UNSPEC
;
2345 rtm
->rtm_table
= table
;
2346 NLA_PUT_U32(skb
, RTA_TABLE
, table
);
2347 if (rt
->rt6i_flags
&RTF_REJECT
)
2348 rtm
->rtm_type
= RTN_UNREACHABLE
;
2349 else if (rt
->rt6i_flags
&RTF_LOCAL
)
2350 rtm
->rtm_type
= RTN_LOCAL
;
2351 else if (rt
->rt6i_dev
&& (rt
->rt6i_dev
->flags
&IFF_LOOPBACK
))
2352 rtm
->rtm_type
= RTN_LOCAL
;
2354 rtm
->rtm_type
= RTN_UNICAST
;
2356 rtm
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2357 rtm
->rtm_protocol
= rt
->rt6i_protocol
;
2358 if (rt
->rt6i_flags
&RTF_DYNAMIC
)
2359 rtm
->rtm_protocol
= RTPROT_REDIRECT
;
2360 else if (rt
->rt6i_flags
& RTF_ADDRCONF
)
2361 rtm
->rtm_protocol
= RTPROT_KERNEL
;
2362 else if (rt
->rt6i_flags
&RTF_DEFAULT
)
2363 rtm
->rtm_protocol
= RTPROT_RA
;
2365 if (rt
->rt6i_flags
&RTF_CACHE
)
2366 rtm
->rtm_flags
|= RTM_F_CLONED
;
2369 NLA_PUT(skb
, RTA_DST
, 16, dst
);
2370 rtm
->rtm_dst_len
= 128;
2371 } else if (rtm
->rtm_dst_len
)
2372 NLA_PUT(skb
, RTA_DST
, 16, &rt
->rt6i_dst
.addr
);
2373 #ifdef CONFIG_IPV6_SUBTREES
2375 NLA_PUT(skb
, RTA_SRC
, 16, src
);
2376 rtm
->rtm_src_len
= 128;
2377 } else if (rtm
->rtm_src_len
)
2378 NLA_PUT(skb
, RTA_SRC
, 16, &rt
->rt6i_src
.addr
);
2381 #ifdef CONFIG_IPV6_MROUTE
2382 if (ipv6_addr_is_multicast(&rt
->rt6i_dst
.addr
)) {
2383 int err
= ip6mr_get_route(net
, skb
, rtm
, nowait
);
2388 goto nla_put_failure
;
2390 if (err
== -EMSGSIZE
)
2391 goto nla_put_failure
;
2396 NLA_PUT_U32(skb
, RTA_IIF
, iif
);
2398 struct in6_addr saddr_buf
;
2399 if (ip6_route_get_saddr(net
, rt
, dst
, 0, &saddr_buf
) == 0)
2400 NLA_PUT(skb
, RTA_PREFSRC
, 16, &saddr_buf
);
2403 if (rt
->rt6i_prefsrc
.plen
) {
2404 struct in6_addr saddr_buf
;
2405 ipv6_addr_copy(&saddr_buf
, &rt
->rt6i_prefsrc
.addr
);
2406 NLA_PUT(skb
, RTA_PREFSRC
, 16, &saddr_buf
);
2409 if (rtnetlink_put_metrics(skb
, dst_metrics_ptr(&rt
->dst
)) < 0)
2410 goto nla_put_failure
;
2413 n
= dst_get_neighbour(&rt
->dst
);
2415 if (nla_put(skb
, RTA_GATEWAY
, 16, &n
->primary_key
) < 0) {
2417 goto nla_put_failure
;
2423 NLA_PUT_U32(skb
, RTA_OIF
, rt
->rt6i_dev
->ifindex
);
2425 NLA_PUT_U32(skb
, RTA_PRIORITY
, rt
->rt6i_metric
);
2427 if (!(rt
->rt6i_flags
& RTF_EXPIRES
))
2429 else if (rt
->rt6i_expires
- jiffies
< INT_MAX
)
2430 expires
= rt
->rt6i_expires
- jiffies
;
2434 if (rtnl_put_cacheinfo(skb
, &rt
->dst
, 0, 0, 0,
2435 expires
, rt
->dst
.error
) < 0)
2436 goto nla_put_failure
;
2438 return nlmsg_end(skb
, nlh
);
2441 nlmsg_cancel(skb
, nlh
);
2445 int rt6_dump_route(struct rt6_info
*rt
, void *p_arg
)
2447 struct rt6_rtnl_dump_arg
*arg
= (struct rt6_rtnl_dump_arg
*) p_arg
;
2450 if (nlmsg_len(arg
->cb
->nlh
) >= sizeof(struct rtmsg
)) {
2451 struct rtmsg
*rtm
= nlmsg_data(arg
->cb
->nlh
);
2452 prefix
= (rtm
->rtm_flags
& RTM_F_PREFIX
) != 0;
2456 return rt6_fill_node(arg
->net
,
2457 arg
->skb
, rt
, NULL
, NULL
, 0, RTM_NEWROUTE
,
2458 NETLINK_CB(arg
->cb
->skb
).pid
, arg
->cb
->nlh
->nlmsg_seq
,
2459 prefix
, 0, NLM_F_MULTI
);
2462 static int inet6_rtm_getroute(struct sk_buff
*in_skb
, struct nlmsghdr
* nlh
, void *arg
)
2464 struct net
*net
= sock_net(in_skb
->sk
);
2465 struct nlattr
*tb
[RTA_MAX
+1];
2466 struct rt6_info
*rt
;
2467 struct sk_buff
*skb
;
2472 err
= nlmsg_parse(nlh
, sizeof(*rtm
), tb
, RTA_MAX
, rtm_ipv6_policy
);
2477 memset(&fl6
, 0, sizeof(fl6
));
2480 if (nla_len(tb
[RTA_SRC
]) < sizeof(struct in6_addr
))
2483 ipv6_addr_copy(&fl6
.saddr
, nla_data(tb
[RTA_SRC
]));
2487 if (nla_len(tb
[RTA_DST
]) < sizeof(struct in6_addr
))
2490 ipv6_addr_copy(&fl6
.daddr
, nla_data(tb
[RTA_DST
]));
2494 iif
= nla_get_u32(tb
[RTA_IIF
]);
2497 fl6
.flowi6_oif
= nla_get_u32(tb
[RTA_OIF
]);
2500 struct net_device
*dev
;
2501 dev
= __dev_get_by_index(net
, iif
);
2508 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
2514 /* Reserve room for dummy headers, this skb can pass
2515 through good chunk of routing engine.
2517 skb_reset_mac_header(skb
);
2518 skb_reserve(skb
, MAX_HEADER
+ sizeof(struct ipv6hdr
));
2520 rt
= (struct rt6_info
*) ip6_route_output(net
, NULL
, &fl6
);
2521 skb_dst_set(skb
, &rt
->dst
);
2523 err
= rt6_fill_node(net
, skb
, rt
, &fl6
.daddr
, &fl6
.saddr
, iif
,
2524 RTM_NEWROUTE
, NETLINK_CB(in_skb
).pid
,
2525 nlh
->nlmsg_seq
, 0, 0, 0);
2531 err
= rtnl_unicast(skb
, net
, NETLINK_CB(in_skb
).pid
);
2536 void inet6_rt_notify(int event
, struct rt6_info
*rt
, struct nl_info
*info
)
2538 struct sk_buff
*skb
;
2539 struct net
*net
= info
->nl_net
;
2544 seq
= info
->nlh
!= NULL
? info
->nlh
->nlmsg_seq
: 0;
2546 skb
= nlmsg_new(rt6_nlmsg_size(), gfp_any());
2550 err
= rt6_fill_node(net
, skb
, rt
, NULL
, NULL
, 0,
2551 event
, info
->pid
, seq
, 0, 0, 0);
2553 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2554 WARN_ON(err
== -EMSGSIZE
);
2558 rtnl_notify(skb
, net
, info
->pid
, RTNLGRP_IPV6_ROUTE
,
2559 info
->nlh
, gfp_any());
2563 rtnl_set_sk_err(net
, RTNLGRP_IPV6_ROUTE
, err
);
2566 static int ip6_route_dev_notify(struct notifier_block
*this,
2567 unsigned long event
, void *data
)
2569 struct net_device
*dev
= (struct net_device
*)data
;
2570 struct net
*net
= dev_net(dev
);
2572 if (event
== NETDEV_REGISTER
&& (dev
->flags
& IFF_LOOPBACK
)) {
2573 net
->ipv6
.ip6_null_entry
->dst
.dev
= dev
;
2574 net
->ipv6
.ip6_null_entry
->rt6i_idev
= in6_dev_get(dev
);
2575 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2576 net
->ipv6
.ip6_prohibit_entry
->dst
.dev
= dev
;
2577 net
->ipv6
.ip6_prohibit_entry
->rt6i_idev
= in6_dev_get(dev
);
2578 net
->ipv6
.ip6_blk_hole_entry
->dst
.dev
= dev
;
2579 net
->ipv6
.ip6_blk_hole_entry
->rt6i_idev
= in6_dev_get(dev
);
2590 #ifdef CONFIG_PROC_FS
2601 static int rt6_info_route(struct rt6_info
*rt
, void *p_arg
)
2603 struct seq_file
*m
= p_arg
;
2604 struct neighbour
*n
;
2606 seq_printf(m
, "%pi6 %02x ", &rt
->rt6i_dst
.addr
, rt
->rt6i_dst
.plen
);
2608 #ifdef CONFIG_IPV6_SUBTREES
2609 seq_printf(m
, "%pi6 %02x ", &rt
->rt6i_src
.addr
, rt
->rt6i_src
.plen
);
2611 seq_puts(m
, "00000000000000000000000000000000 00 ");
2614 n
= dst_get_neighbour(&rt
->dst
);
2616 seq_printf(m
, "%pi6", n
->primary_key
);
2618 seq_puts(m
, "00000000000000000000000000000000");
2621 seq_printf(m
, " %08x %08x %08x %08x %8s\n",
2622 rt
->rt6i_metric
, atomic_read(&rt
->dst
.__refcnt
),
2623 rt
->dst
.__use
, rt
->rt6i_flags
,
2624 rt
->rt6i_dev
? rt
->rt6i_dev
->name
: "");
2628 static int ipv6_route_show(struct seq_file
*m
, void *v
)
2630 struct net
*net
= (struct net
*)m
->private;
2631 fib6_clean_all(net
, rt6_info_route
, 0, m
);
2635 static int ipv6_route_open(struct inode
*inode
, struct file
*file
)
2637 return single_open_net(inode
, file
, ipv6_route_show
);
2640 static const struct file_operations ipv6_route_proc_fops
= {
2641 .owner
= THIS_MODULE
,
2642 .open
= ipv6_route_open
,
2644 .llseek
= seq_lseek
,
2645 .release
= single_release_net
,
2648 static int rt6_stats_seq_show(struct seq_file
*seq
, void *v
)
2650 struct net
*net
= (struct net
*)seq
->private;
2651 seq_printf(seq
, "%04x %04x %04x %04x %04x %04x %04x\n",
2652 net
->ipv6
.rt6_stats
->fib_nodes
,
2653 net
->ipv6
.rt6_stats
->fib_route_nodes
,
2654 net
->ipv6
.rt6_stats
->fib_rt_alloc
,
2655 net
->ipv6
.rt6_stats
->fib_rt_entries
,
2656 net
->ipv6
.rt6_stats
->fib_rt_cache
,
2657 dst_entries_get_slow(&net
->ipv6
.ip6_dst_ops
),
2658 net
->ipv6
.rt6_stats
->fib_discarded_routes
);
2663 static int rt6_stats_seq_open(struct inode
*inode
, struct file
*file
)
2665 return single_open_net(inode
, file
, rt6_stats_seq_show
);
2668 static const struct file_operations rt6_stats_seq_fops
= {
2669 .owner
= THIS_MODULE
,
2670 .open
= rt6_stats_seq_open
,
2672 .llseek
= seq_lseek
,
2673 .release
= single_release_net
,
2675 #endif /* CONFIG_PROC_FS */
2677 #ifdef CONFIG_SYSCTL
2680 int ipv6_sysctl_rtcache_flush(ctl_table
*ctl
, int write
,
2681 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
2688 net
= (struct net
*)ctl
->extra1
;
2689 delay
= net
->ipv6
.sysctl
.flush_delay
;
2690 proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
2691 fib6_run_gc(delay
<= 0 ? ~0UL : (unsigned long)delay
, net
);
2695 ctl_table ipv6_route_table_template
[] = {
2697 .procname
= "flush",
2698 .data
= &init_net
.ipv6
.sysctl
.flush_delay
,
2699 .maxlen
= sizeof(int),
2701 .proc_handler
= ipv6_sysctl_rtcache_flush
2704 .procname
= "gc_thresh",
2705 .data
= &ip6_dst_ops_template
.gc_thresh
,
2706 .maxlen
= sizeof(int),
2708 .proc_handler
= proc_dointvec
,
2711 .procname
= "max_size",
2712 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_max_size
,
2713 .maxlen
= sizeof(int),
2715 .proc_handler
= proc_dointvec
,
2718 .procname
= "gc_min_interval",
2719 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_gc_min_interval
,
2720 .maxlen
= sizeof(int),
2722 .proc_handler
= proc_dointvec_jiffies
,
2725 .procname
= "gc_timeout",
2726 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_gc_timeout
,
2727 .maxlen
= sizeof(int),
2729 .proc_handler
= proc_dointvec_jiffies
,
2732 .procname
= "gc_interval",
2733 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_gc_interval
,
2734 .maxlen
= sizeof(int),
2736 .proc_handler
= proc_dointvec_jiffies
,
2739 .procname
= "gc_elasticity",
2740 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_gc_elasticity
,
2741 .maxlen
= sizeof(int),
2743 .proc_handler
= proc_dointvec
,
2746 .procname
= "mtu_expires",
2747 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_mtu_expires
,
2748 .maxlen
= sizeof(int),
2750 .proc_handler
= proc_dointvec_jiffies
,
2753 .procname
= "min_adv_mss",
2754 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_min_advmss
,
2755 .maxlen
= sizeof(int),
2757 .proc_handler
= proc_dointvec
,
2760 .procname
= "gc_min_interval_ms",
2761 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_gc_min_interval
,
2762 .maxlen
= sizeof(int),
2764 .proc_handler
= proc_dointvec_ms_jiffies
,
2769 struct ctl_table
* __net_init
ipv6_route_sysctl_init(struct net
*net
)
2771 struct ctl_table
*table
;
2773 table
= kmemdup(ipv6_route_table_template
,
2774 sizeof(ipv6_route_table_template
),
2778 table
[0].data
= &net
->ipv6
.sysctl
.flush_delay
;
2779 table
[0].extra1
= net
;
2780 table
[1].data
= &net
->ipv6
.ip6_dst_ops
.gc_thresh
;
2781 table
[2].data
= &net
->ipv6
.sysctl
.ip6_rt_max_size
;
2782 table
[3].data
= &net
->ipv6
.sysctl
.ip6_rt_gc_min_interval
;
2783 table
[4].data
= &net
->ipv6
.sysctl
.ip6_rt_gc_timeout
;
2784 table
[5].data
= &net
->ipv6
.sysctl
.ip6_rt_gc_interval
;
2785 table
[6].data
= &net
->ipv6
.sysctl
.ip6_rt_gc_elasticity
;
2786 table
[7].data
= &net
->ipv6
.sysctl
.ip6_rt_mtu_expires
;
2787 table
[8].data
= &net
->ipv6
.sysctl
.ip6_rt_min_advmss
;
2788 table
[9].data
= &net
->ipv6
.sysctl
.ip6_rt_gc_min_interval
;
2795 static int __net_init
ip6_route_net_init(struct net
*net
)
2799 memcpy(&net
->ipv6
.ip6_dst_ops
, &ip6_dst_ops_template
,
2800 sizeof(net
->ipv6
.ip6_dst_ops
));
2802 if (dst_entries_init(&net
->ipv6
.ip6_dst_ops
) < 0)
2803 goto out_ip6_dst_ops
;
2805 net
->ipv6
.ip6_null_entry
= kmemdup(&ip6_null_entry_template
,
2806 sizeof(*net
->ipv6
.ip6_null_entry
),
2808 if (!net
->ipv6
.ip6_null_entry
)
2809 goto out_ip6_dst_entries
;
2810 net
->ipv6
.ip6_null_entry
->dst
.path
=
2811 (struct dst_entry
*)net
->ipv6
.ip6_null_entry
;
2812 net
->ipv6
.ip6_null_entry
->dst
.ops
= &net
->ipv6
.ip6_dst_ops
;
2813 dst_init_metrics(&net
->ipv6
.ip6_null_entry
->dst
,
2814 ip6_template_metrics
, true);
2816 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2817 net
->ipv6
.ip6_prohibit_entry
= kmemdup(&ip6_prohibit_entry_template
,
2818 sizeof(*net
->ipv6
.ip6_prohibit_entry
),
2820 if (!net
->ipv6
.ip6_prohibit_entry
)
2821 goto out_ip6_null_entry
;
2822 net
->ipv6
.ip6_prohibit_entry
->dst
.path
=
2823 (struct dst_entry
*)net
->ipv6
.ip6_prohibit_entry
;
2824 net
->ipv6
.ip6_prohibit_entry
->dst
.ops
= &net
->ipv6
.ip6_dst_ops
;
2825 dst_init_metrics(&net
->ipv6
.ip6_prohibit_entry
->dst
,
2826 ip6_template_metrics
, true);
2828 net
->ipv6
.ip6_blk_hole_entry
= kmemdup(&ip6_blk_hole_entry_template
,
2829 sizeof(*net
->ipv6
.ip6_blk_hole_entry
),
2831 if (!net
->ipv6
.ip6_blk_hole_entry
)
2832 goto out_ip6_prohibit_entry
;
2833 net
->ipv6
.ip6_blk_hole_entry
->dst
.path
=
2834 (struct dst_entry
*)net
->ipv6
.ip6_blk_hole_entry
;
2835 net
->ipv6
.ip6_blk_hole_entry
->dst
.ops
= &net
->ipv6
.ip6_dst_ops
;
2836 dst_init_metrics(&net
->ipv6
.ip6_blk_hole_entry
->dst
,
2837 ip6_template_metrics
, true);
2840 net
->ipv6
.sysctl
.flush_delay
= 0;
2841 net
->ipv6
.sysctl
.ip6_rt_max_size
= 4096;
2842 net
->ipv6
.sysctl
.ip6_rt_gc_min_interval
= HZ
/ 2;
2843 net
->ipv6
.sysctl
.ip6_rt_gc_timeout
= 60*HZ
;
2844 net
->ipv6
.sysctl
.ip6_rt_gc_interval
= 30*HZ
;
2845 net
->ipv6
.sysctl
.ip6_rt_gc_elasticity
= 9;
2846 net
->ipv6
.sysctl
.ip6_rt_mtu_expires
= 10*60*HZ
;
2847 net
->ipv6
.sysctl
.ip6_rt_min_advmss
= IPV6_MIN_MTU
- 20 - 40;
2849 #ifdef CONFIG_PROC_FS
2850 proc_net_fops_create(net
, "ipv6_route", 0, &ipv6_route_proc_fops
);
2851 proc_net_fops_create(net
, "rt6_stats", S_IRUGO
, &rt6_stats_seq_fops
);
2853 net
->ipv6
.ip6_rt_gc_expire
= 30*HZ
;
2859 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2860 out_ip6_prohibit_entry
:
2861 kfree(net
->ipv6
.ip6_prohibit_entry
);
2863 kfree(net
->ipv6
.ip6_null_entry
);
2865 out_ip6_dst_entries
:
2866 dst_entries_destroy(&net
->ipv6
.ip6_dst_ops
);
2871 static void __net_exit
ip6_route_net_exit(struct net
*net
)
2873 #ifdef CONFIG_PROC_FS
2874 proc_net_remove(net
, "ipv6_route");
2875 proc_net_remove(net
, "rt6_stats");
2877 kfree(net
->ipv6
.ip6_null_entry
);
2878 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2879 kfree(net
->ipv6
.ip6_prohibit_entry
);
2880 kfree(net
->ipv6
.ip6_blk_hole_entry
);
2882 dst_entries_destroy(&net
->ipv6
.ip6_dst_ops
);
2885 static struct pernet_operations ip6_route_net_ops
= {
2886 .init
= ip6_route_net_init
,
2887 .exit
= ip6_route_net_exit
,
2890 static struct notifier_block ip6_route_dev_notifier
= {
2891 .notifier_call
= ip6_route_dev_notify
,
2895 int __init
ip6_route_init(void)
2900 ip6_dst_ops_template
.kmem_cachep
=
2901 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info
), 0,
2902 SLAB_HWCACHE_ALIGN
, NULL
);
2903 if (!ip6_dst_ops_template
.kmem_cachep
)
2906 ret
= dst_entries_init(&ip6_dst_blackhole_ops
);
2908 goto out_kmem_cache
;
2910 ret
= register_pernet_subsys(&ip6_route_net_ops
);
2912 goto out_dst_entries
;
2914 ip6_dst_blackhole_ops
.kmem_cachep
= ip6_dst_ops_template
.kmem_cachep
;
2916 /* Registering of the loopback is done before this portion of code,
2917 * the loopback reference in rt6_info will not be taken, do it
2918 * manually for init_net */
2919 init_net
.ipv6
.ip6_null_entry
->dst
.dev
= init_net
.loopback_dev
;
2920 init_net
.ipv6
.ip6_null_entry
->rt6i_idev
= in6_dev_get(init_net
.loopback_dev
);
2921 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2922 init_net
.ipv6
.ip6_prohibit_entry
->dst
.dev
= init_net
.loopback_dev
;
2923 init_net
.ipv6
.ip6_prohibit_entry
->rt6i_idev
= in6_dev_get(init_net
.loopback_dev
);
2924 init_net
.ipv6
.ip6_blk_hole_entry
->dst
.dev
= init_net
.loopback_dev
;
2925 init_net
.ipv6
.ip6_blk_hole_entry
->rt6i_idev
= in6_dev_get(init_net
.loopback_dev
);
2929 goto out_register_subsys
;
2935 ret
= fib6_rules_init();
2940 if (__rtnl_register(PF_INET6
, RTM_NEWROUTE
, inet6_rtm_newroute
, NULL
) ||
2941 __rtnl_register(PF_INET6
, RTM_DELROUTE
, inet6_rtm_delroute
, NULL
) ||
2942 __rtnl_register(PF_INET6
, RTM_GETROUTE
, inet6_rtm_getroute
, NULL
))
2943 goto fib6_rules_init
;
2945 ret
= register_netdevice_notifier(&ip6_route_dev_notifier
);
2947 goto fib6_rules_init
;
2953 fib6_rules_cleanup();
2958 out_register_subsys
:
2959 unregister_pernet_subsys(&ip6_route_net_ops
);
2961 dst_entries_destroy(&ip6_dst_blackhole_ops
);
2963 kmem_cache_destroy(ip6_dst_ops_template
.kmem_cachep
);
2967 void ip6_route_cleanup(void)
2969 unregister_netdevice_notifier(&ip6_route_dev_notifier
);
2970 fib6_rules_cleanup();
2973 unregister_pernet_subsys(&ip6_route_net_ops
);
2974 dst_entries_destroy(&ip6_dst_blackhole_ops
);
2975 kmem_cache_destroy(ip6_dst_ops_template
.kmem_cachep
);