1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * IPv4 Forwarding Information Base: semantics.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/uaccess.h>
13 #include <linux/bitops.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/jiffies.h>
18 #include <linux/string.h>
19 #include <linux/socket.h>
20 #include <linux/sockios.h>
21 #include <linux/errno.h>
23 #include <linux/inet.h>
24 #include <linux/inetdevice.h>
25 #include <linux/netdevice.h>
26 #include <linux/if_arp.h>
27 #include <linux/proc_fs.h>
28 #include <linux/skbuff.h>
29 #include <linux/init.h>
30 #include <linux/slab.h>
31 #include <linux/netlink.h>
32 #include <linux/hash.h>
33 #include <linux/nospec.h>
36 #include <net/inet_dscp.h>
38 #include <net/protocol.h>
39 #include <net/route.h>
42 #include <net/ip_fib.h>
43 #include <net/ip6_fib.h>
44 #include <net/nexthop.h>
45 #include <net/netlink.h>
47 #include <net/lwtunnel.h>
48 #include <net/fib_notifier.h>
49 #include <net/addrconf.h>
51 #include "fib_lookup.h"
53 static struct hlist_head
*fib_info_hash
;
54 static struct hlist_head
*fib_info_laddrhash
;
55 static unsigned int fib_info_hash_size
;
56 static unsigned int fib_info_hash_bits
;
57 static unsigned int fib_info_cnt
;
59 /* for_nexthops and change_nexthops only used when nexthop object
60 * is not set in a fib_info. The logic within can reference fib_nh.
62 #ifdef CONFIG_IP_ROUTE_MULTIPATH
64 #define for_nexthops(fi) { \
65 int nhsel; const struct fib_nh *nh; \
66 for (nhsel = 0, nh = (fi)->fib_nh; \
67 nhsel < fib_info_num_path((fi)); \
70 #define change_nexthops(fi) { \
71 int nhsel; struct fib_nh *nexthop_nh; \
72 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
73 nhsel < fib_info_num_path((fi)); \
74 nexthop_nh++, nhsel++)
76 #else /* CONFIG_IP_ROUTE_MULTIPATH */
78 /* Hope, that gcc will optimize it to get rid of dummy loop */
80 #define for_nexthops(fi) { \
81 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
82 for (nhsel = 0; nhsel < 1; nhsel++)
84 #define change_nexthops(fi) { \
86 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
87 for (nhsel = 0; nhsel < 1; nhsel++)
89 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
91 #define endfor_nexthops(fi) }
94 const struct fib_prop fib_props
[RTN_MAX
+ 1] = {
97 .scope
= RT_SCOPE_NOWHERE
,
101 .scope
= RT_SCOPE_UNIVERSE
,
105 .scope
= RT_SCOPE_HOST
,
109 .scope
= RT_SCOPE_LINK
,
113 .scope
= RT_SCOPE_LINK
,
117 .scope
= RT_SCOPE_UNIVERSE
,
121 .scope
= RT_SCOPE_UNIVERSE
,
123 [RTN_UNREACHABLE
] = {
124 .error
= -EHOSTUNREACH
,
125 .scope
= RT_SCOPE_UNIVERSE
,
129 .scope
= RT_SCOPE_UNIVERSE
,
133 .scope
= RT_SCOPE_UNIVERSE
,
137 .scope
= RT_SCOPE_NOWHERE
,
141 .scope
= RT_SCOPE_NOWHERE
,
145 static void rt_fibinfo_free(struct rtable __rcu
**rtp
)
147 struct rtable
*rt
= rcu_dereference_protected(*rtp
, 1);
152 /* Not even needed : RCU_INIT_POINTER(*rtp, NULL);
153 * because we waited an RCU grace period before calling
154 * free_fib_info_rcu()
157 dst_dev_put(&rt
->dst
);
158 dst_release_immediate(&rt
->dst
);
161 static void free_nh_exceptions(struct fib_nh_common
*nhc
)
163 struct fnhe_hash_bucket
*hash
;
166 hash
= rcu_dereference_protected(nhc
->nhc_exceptions
, 1);
169 for (i
= 0; i
< FNHE_HASH_SIZE
; i
++) {
170 struct fib_nh_exception
*fnhe
;
172 fnhe
= rcu_dereference_protected(hash
[i
].chain
, 1);
174 struct fib_nh_exception
*next
;
176 next
= rcu_dereference_protected(fnhe
->fnhe_next
, 1);
178 rt_fibinfo_free(&fnhe
->fnhe_rth_input
);
179 rt_fibinfo_free(&fnhe
->fnhe_rth_output
);
189 static void rt_fibinfo_free_cpus(struct rtable __rcu
* __percpu
*rtp
)
196 for_each_possible_cpu(cpu
) {
199 rt
= rcu_dereference_protected(*per_cpu_ptr(rtp
, cpu
), 1);
201 dst_dev_put(&rt
->dst
);
202 dst_release_immediate(&rt
->dst
);
208 void fib_nh_common_release(struct fib_nh_common
*nhc
)
210 netdev_put(nhc
->nhc_dev
, &nhc
->nhc_dev_tracker
);
211 lwtstate_put(nhc
->nhc_lwtstate
);
212 rt_fibinfo_free_cpus(nhc
->nhc_pcpu_rth_output
);
213 rt_fibinfo_free(&nhc
->nhc_rth_input
);
214 free_nh_exceptions(nhc
);
216 EXPORT_SYMBOL_GPL(fib_nh_common_release
);
218 void fib_nh_release(struct net
*net
, struct fib_nh
*fib_nh
)
220 #ifdef CONFIG_IP_ROUTE_CLASSID
221 if (fib_nh
->nh_tclassid
)
222 atomic_dec(&net
->ipv4
.fib_num_tclassid_users
);
224 fib_nh_common_release(&fib_nh
->nh_common
);
227 /* Release a nexthop info record */
228 static void free_fib_info_rcu(struct rcu_head
*head
)
230 struct fib_info
*fi
= container_of(head
, struct fib_info
, rcu
);
235 change_nexthops(fi
) {
236 fib_nh_release(fi
->fib_net
, nexthop_nh
);
237 } endfor_nexthops(fi
);
240 ip_fib_metrics_put(fi
->fib_metrics
);
245 void free_fib_info(struct fib_info
*fi
)
247 if (fi
->fib_dead
== 0) {
248 pr_warn("Freeing alive fib_info %p\n", fi
);
252 call_rcu_hurry(&fi
->rcu
, free_fib_info_rcu
);
254 EXPORT_SYMBOL_GPL(free_fib_info
);
256 void fib_release_info(struct fib_info
*fi
)
259 if (fi
&& refcount_dec_and_test(&fi
->fib_treeref
)) {
260 hlist_del(&fi
->fib_hash
);
265 hlist_del(&fi
->fib_lhash
);
267 list_del(&fi
->nh_list
);
269 change_nexthops(fi
) {
270 if (!nexthop_nh
->fib_nh_dev
)
272 hlist_del_rcu(&nexthop_nh
->nh_hash
);
273 } endfor_nexthops(fi
)
275 /* Paired with READ_ONCE() from fib_table_lookup() */
276 WRITE_ONCE(fi
->fib_dead
, 1);
281 static inline int nh_comp(struct fib_info
*fi
, struct fib_info
*ofi
)
283 const struct fib_nh
*onh
;
285 if (fi
->nh
|| ofi
->nh
)
286 return nexthop_cmp(fi
->nh
, ofi
->nh
) ? 0 : -1;
288 if (ofi
->fib_nhs
== 0)
292 onh
= fib_info_nh(ofi
, nhsel
);
294 if (nh
->fib_nh_oif
!= onh
->fib_nh_oif
||
295 nh
->fib_nh_gw_family
!= onh
->fib_nh_gw_family
||
296 nh
->fib_nh_scope
!= onh
->fib_nh_scope
||
297 #ifdef CONFIG_IP_ROUTE_MULTIPATH
298 nh
->fib_nh_weight
!= onh
->fib_nh_weight
||
300 #ifdef CONFIG_IP_ROUTE_CLASSID
301 nh
->nh_tclassid
!= onh
->nh_tclassid
||
303 lwtunnel_cmp_encap(nh
->fib_nh_lws
, onh
->fib_nh_lws
) ||
304 ((nh
->fib_nh_flags
^ onh
->fib_nh_flags
) & ~RTNH_COMPARE_MASK
))
307 if (nh
->fib_nh_gw_family
== AF_INET
&&
308 nh
->fib_nh_gw4
!= onh
->fib_nh_gw4
)
311 if (nh
->fib_nh_gw_family
== AF_INET6
&&
312 ipv6_addr_cmp(&nh
->fib_nh_gw6
, &onh
->fib_nh_gw6
))
314 } endfor_nexthops(fi
);
318 static struct hlist_head
*fib_nh_head(struct net_device
*dev
)
320 return &dev
->fib_nh_head
;
323 static unsigned int fib_info_hashfn_1(int init_val
, u8 protocol
, u8 scope
,
324 u32 prefsrc
, u32 priority
)
326 unsigned int val
= init_val
;
328 val
^= (protocol
<< 8) | scope
;
335 static unsigned int fib_info_hashfn_result(const struct net
*net
,
338 return hash_32(val
^ net_hash_mix(net
), fib_info_hash_bits
);
341 static inline unsigned int fib_info_hashfn(struct fib_info
*fi
)
345 val
= fib_info_hashfn_1(fi
->fib_nhs
, fi
->fib_protocol
,
346 fi
->fib_scope
, (__force u32
)fi
->fib_prefsrc
,
353 val
^= nh
->fib_nh_oif
;
354 } endfor_nexthops(fi
)
357 return fib_info_hashfn_result(fi
->fib_net
, val
);
360 /* no metrics, only nexthop id */
361 static struct fib_info
*fib_find_info_nh(struct net
*net
,
362 const struct fib_config
*cfg
)
364 struct hlist_head
*head
;
368 hash
= fib_info_hashfn_1(cfg
->fc_nh_id
,
369 cfg
->fc_protocol
, cfg
->fc_scope
,
370 (__force u32
)cfg
->fc_prefsrc
,
372 hash
= fib_info_hashfn_result(net
, hash
);
373 head
= &fib_info_hash
[hash
];
375 hlist_for_each_entry(fi
, head
, fib_hash
) {
376 if (!net_eq(fi
->fib_net
, net
))
378 if (!fi
->nh
|| fi
->nh
->id
!= cfg
->fc_nh_id
)
380 if (cfg
->fc_protocol
== fi
->fib_protocol
&&
381 cfg
->fc_scope
== fi
->fib_scope
&&
382 cfg
->fc_prefsrc
== fi
->fib_prefsrc
&&
383 cfg
->fc_priority
== fi
->fib_priority
&&
384 cfg
->fc_type
== fi
->fib_type
&&
385 cfg
->fc_table
== fi
->fib_tb_id
&&
386 !((cfg
->fc_flags
^ fi
->fib_flags
) & ~RTNH_COMPARE_MASK
))
393 static struct fib_info
*fib_find_info(struct fib_info
*nfi
)
395 struct hlist_head
*head
;
399 hash
= fib_info_hashfn(nfi
);
400 head
= &fib_info_hash
[hash
];
402 hlist_for_each_entry(fi
, head
, fib_hash
) {
403 if (!net_eq(fi
->fib_net
, nfi
->fib_net
))
405 if (fi
->fib_nhs
!= nfi
->fib_nhs
)
407 if (nfi
->fib_protocol
== fi
->fib_protocol
&&
408 nfi
->fib_scope
== fi
->fib_scope
&&
409 nfi
->fib_prefsrc
== fi
->fib_prefsrc
&&
410 nfi
->fib_priority
== fi
->fib_priority
&&
411 nfi
->fib_type
== fi
->fib_type
&&
412 nfi
->fib_tb_id
== fi
->fib_tb_id
&&
413 memcmp(nfi
->fib_metrics
, fi
->fib_metrics
,
414 sizeof(u32
) * RTAX_MAX
) == 0 &&
415 !((nfi
->fib_flags
^ fi
->fib_flags
) & ~RTNH_COMPARE_MASK
) &&
416 nh_comp(fi
, nfi
) == 0)
423 /* Check, that the gateway is already configured.
424 * Used only by redirect accept routine, under rcu_read_lock();
426 int ip_fib_check_default(__be32 gw
, struct net_device
*dev
)
428 struct hlist_head
*head
;
431 head
= fib_nh_head(dev
);
433 hlist_for_each_entry_rcu(nh
, head
, nh_hash
) {
434 DEBUG_NET_WARN_ON_ONCE(nh
->fib_nh_dev
!= dev
);
435 if (nh
->fib_nh_gw4
== gw
&&
436 !(nh
->fib_nh_flags
& RTNH_F_DEAD
)) {
444 size_t fib_nlmsg_size(struct fib_info
*fi
)
446 size_t payload
= NLMSG_ALIGN(sizeof(struct rtmsg
))
447 + nla_total_size(4) /* RTA_TABLE */
448 + nla_total_size(4) /* RTA_DST */
449 + nla_total_size(4) /* RTA_PRIORITY */
450 + nla_total_size(4) /* RTA_PREFSRC */
451 + nla_total_size(TCP_CA_NAME_MAX
); /* RTAX_CC_ALGO */
452 unsigned int nhs
= fib_info_num_path(fi
);
454 /* space for nested metrics */
455 payload
+= nla_total_size((RTAX_MAX
* nla_total_size(4)));
458 payload
+= nla_total_size(4); /* RTA_NH_ID */
461 size_t nh_encapsize
= 0;
462 /* Also handles the special case nhs == 1 */
464 /* each nexthop is packed in an attribute */
465 size_t nhsize
= nla_total_size(sizeof(struct rtnexthop
));
468 /* may contain flow and gateway attribute */
469 nhsize
+= 2 * nla_total_size(4);
471 /* grab encap info */
472 for (i
= 0; i
< fib_info_num_path(fi
); i
++) {
473 struct fib_nh_common
*nhc
= fib_info_nhc(fi
, i
);
475 if (nhc
->nhc_lwtstate
) {
477 nh_encapsize
+= lwtunnel_get_encap_size(
480 nh_encapsize
+= nla_total_size(2);
484 /* all nexthops are packed in a nested attribute */
485 payload
+= nla_total_size((nhs
* nhsize
) + nh_encapsize
);
492 void rtmsg_fib(int event
, __be32 key
, struct fib_alias
*fa
,
493 int dst_len
, u32 tb_id
, const struct nl_info
*info
,
494 unsigned int nlm_flags
)
496 struct fib_rt_info fri
;
498 u32 seq
= info
->nlh
? info
->nlh
->nlmsg_seq
: 0;
501 skb
= nlmsg_new(fib_nlmsg_size(fa
->fa_info
), GFP_KERNEL
);
505 fri
.fi
= fa
->fa_info
;
508 fri
.dst_len
= dst_len
;
509 fri
.dscp
= fa
->fa_dscp
;
510 fri
.type
= fa
->fa_type
;
511 fri
.offload
= READ_ONCE(fa
->offload
);
512 fri
.trap
= READ_ONCE(fa
->trap
);
513 fri
.offload_failed
= READ_ONCE(fa
->offload_failed
);
514 err
= fib_dump_info(skb
, info
->portid
, seq
, event
, &fri
, nlm_flags
);
516 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
517 WARN_ON(err
== -EMSGSIZE
);
521 rtnl_notify(skb
, info
->nl_net
, info
->portid
, RTNLGRP_IPV4_ROUTE
,
522 info
->nlh
, GFP_KERNEL
);
525 rtnl_set_sk_err(info
->nl_net
, RTNLGRP_IPV4_ROUTE
, err
);
528 static int fib_detect_death(struct fib_info
*fi
, int order
,
529 struct fib_info
**last_resort
, int *last_idx
,
532 const struct fib_nh_common
*nhc
= fib_info_nhc(fi
, 0);
534 int state
= NUD_NONE
;
536 if (likely(nhc
->nhc_gw_family
== AF_INET
))
537 n
= neigh_lookup(&arp_tbl
, &nhc
->nhc_gw
.ipv4
, nhc
->nhc_dev
);
538 else if (nhc
->nhc_gw_family
== AF_INET6
)
539 n
= neigh_lookup(ipv6_stub
->nd_tbl
, &nhc
->nhc_gw
.ipv6
,
545 state
= READ_ONCE(n
->nud_state
);
550 if (state
== NUD_REACHABLE
)
552 if ((state
& NUD_VALID
) && order
!= dflt
)
554 if ((state
& NUD_VALID
) ||
555 (*last_idx
< 0 && order
> dflt
&& state
!= NUD_INCOMPLETE
)) {
562 int fib_nh_common_init(struct net
*net
, struct fib_nh_common
*nhc
,
563 struct nlattr
*encap
, u16 encap_type
,
564 void *cfg
, gfp_t gfp_flags
,
565 struct netlink_ext_ack
*extack
)
569 nhc
->nhc_pcpu_rth_output
= alloc_percpu_gfp(struct rtable __rcu
*,
571 if (!nhc
->nhc_pcpu_rth_output
)
575 struct lwtunnel_state
*lwtstate
;
577 if (encap_type
== LWTUNNEL_ENCAP_NONE
) {
578 NL_SET_ERR_MSG(extack
, "LWT encap type not specified");
582 err
= lwtunnel_build_state(net
, encap_type
, encap
,
583 nhc
->nhc_family
, cfg
, &lwtstate
,
588 nhc
->nhc_lwtstate
= lwtstate_get(lwtstate
);
594 rt_fibinfo_free_cpus(nhc
->nhc_pcpu_rth_output
);
595 nhc
->nhc_pcpu_rth_output
= NULL
;
598 EXPORT_SYMBOL_GPL(fib_nh_common_init
);
600 int fib_nh_init(struct net
*net
, struct fib_nh
*nh
,
601 struct fib_config
*cfg
, int nh_weight
,
602 struct netlink_ext_ack
*extack
)
606 nh
->fib_nh_family
= AF_INET
;
608 err
= fib_nh_common_init(net
, &nh
->nh_common
, cfg
->fc_encap
,
609 cfg
->fc_encap_type
, cfg
, GFP_KERNEL
, extack
);
613 nh
->fib_nh_oif
= cfg
->fc_oif
;
614 nh
->fib_nh_gw_family
= cfg
->fc_gw_family
;
615 if (cfg
->fc_gw_family
== AF_INET
)
616 nh
->fib_nh_gw4
= cfg
->fc_gw4
;
617 else if (cfg
->fc_gw_family
== AF_INET6
)
618 nh
->fib_nh_gw6
= cfg
->fc_gw6
;
620 nh
->fib_nh_flags
= cfg
->fc_flags
;
622 #ifdef CONFIG_IP_ROUTE_CLASSID
623 nh
->nh_tclassid
= cfg
->fc_flow
;
625 atomic_inc(&net
->ipv4
.fib_num_tclassid_users
);
627 #ifdef CONFIG_IP_ROUTE_MULTIPATH
628 nh
->fib_nh_weight
= nh_weight
;
633 #ifdef CONFIG_IP_ROUTE_MULTIPATH
635 static int fib_count_nexthops(struct rtnexthop
*rtnh
, int remaining
,
636 struct netlink_ext_ack
*extack
)
640 while (rtnh_ok(rtnh
, remaining
)) {
642 rtnh
= rtnh_next(rtnh
, &remaining
);
645 /* leftover implies invalid nexthop configuration, discard it */
647 NL_SET_ERR_MSG(extack
,
648 "Invalid nexthop configuration - extra data after nexthops");
655 static int fib_gw_from_attr(__be32
*gw
, struct nlattr
*nla
,
656 struct netlink_ext_ack
*extack
)
658 if (nla_len(nla
) < sizeof(*gw
)) {
659 NL_SET_ERR_MSG(extack
, "Invalid IPv4 address in RTA_GATEWAY");
663 *gw
= nla_get_in_addr(nla
);
668 /* only called when fib_nh is integrated into fib_info */
669 static int fib_get_nhs(struct fib_info
*fi
, struct rtnexthop
*rtnh
,
670 int remaining
, struct fib_config
*cfg
,
671 struct netlink_ext_ack
*extack
)
673 struct net
*net
= fi
->fib_net
;
674 struct fib_config fib_cfg
;
678 change_nexthops(fi
) {
681 memset(&fib_cfg
, 0, sizeof(fib_cfg
));
683 if (!rtnh_ok(rtnh
, remaining
)) {
684 NL_SET_ERR_MSG(extack
,
685 "Invalid nexthop configuration - extra data after nexthop");
689 if (rtnh
->rtnh_flags
& (RTNH_F_DEAD
| RTNH_F_LINKDOWN
)) {
690 NL_SET_ERR_MSG(extack
,
691 "Invalid flags for nexthop - can not contain DEAD or LINKDOWN");
695 fib_cfg
.fc_flags
= (cfg
->fc_flags
& ~0xFF) | rtnh
->rtnh_flags
;
696 fib_cfg
.fc_oif
= rtnh
->rtnh_ifindex
;
698 attrlen
= rtnh_attrlen(rtnh
);
700 struct nlattr
*nla
, *nlav
, *attrs
= rtnh_attrs(rtnh
);
702 nla
= nla_find(attrs
, attrlen
, RTA_GATEWAY
);
703 nlav
= nla_find(attrs
, attrlen
, RTA_VIA
);
705 NL_SET_ERR_MSG(extack
,
706 "Nexthop configuration can not contain both GATEWAY and VIA");
710 ret
= fib_gw_from_attr(&fib_cfg
.fc_gw4
, nla
,
716 fib_cfg
.fc_gw_family
= AF_INET
;
718 ret
= fib_gw_from_via(&fib_cfg
, nlav
, extack
);
723 nla
= nla_find(attrs
, attrlen
, RTA_FLOW
);
725 if (nla_len(nla
) < sizeof(u32
)) {
726 NL_SET_ERR_MSG(extack
, "Invalid RTA_FLOW");
729 fib_cfg
.fc_flow
= nla_get_u32(nla
);
732 fib_cfg
.fc_encap
= nla_find(attrs
, attrlen
, RTA_ENCAP
);
733 /* RTA_ENCAP_TYPE length checked in
734 * lwtunnel_valid_encap_type_attr
736 nla
= nla_find(attrs
, attrlen
, RTA_ENCAP_TYPE
);
738 fib_cfg
.fc_encap_type
= nla_get_u16(nla
);
741 ret
= fib_nh_init(net
, nexthop_nh
, &fib_cfg
,
742 rtnh
->rtnh_hops
+ 1, extack
);
746 rtnh
= rtnh_next(rtnh
, &remaining
);
747 } endfor_nexthops(fi
);
750 nh
= fib_info_nh(fi
, 0);
751 if (cfg
->fc_oif
&& nh
->fib_nh_oif
!= cfg
->fc_oif
) {
752 NL_SET_ERR_MSG(extack
,
753 "Nexthop device index does not match RTA_OIF");
756 if (cfg
->fc_gw_family
) {
757 if (cfg
->fc_gw_family
!= nh
->fib_nh_gw_family
||
758 (cfg
->fc_gw_family
== AF_INET
&&
759 nh
->fib_nh_gw4
!= cfg
->fc_gw4
) ||
760 (cfg
->fc_gw_family
== AF_INET6
&&
761 ipv6_addr_cmp(&nh
->fib_nh_gw6
, &cfg
->fc_gw6
))) {
762 NL_SET_ERR_MSG(extack
,
763 "Nexthop gateway does not match RTA_GATEWAY or RTA_VIA");
767 #ifdef CONFIG_IP_ROUTE_CLASSID
768 if (cfg
->fc_flow
&& nh
->nh_tclassid
!= cfg
->fc_flow
) {
769 NL_SET_ERR_MSG(extack
,
770 "Nexthop class id does not match RTA_FLOW");
779 /* only called when fib_nh is integrated into fib_info */
780 static void fib_rebalance(struct fib_info
*fi
)
785 if (fib_info_num_path(fi
) < 2)
790 if (nh
->fib_nh_flags
& RTNH_F_DEAD
)
793 if (ip_ignore_linkdown(nh
->fib_nh_dev
) &&
794 nh
->fib_nh_flags
& RTNH_F_LINKDOWN
)
797 total
+= nh
->fib_nh_weight
;
798 } endfor_nexthops(fi
);
801 change_nexthops(fi
) {
804 if (nexthop_nh
->fib_nh_flags
& RTNH_F_DEAD
) {
806 } else if (ip_ignore_linkdown(nexthop_nh
->fib_nh_dev
) &&
807 nexthop_nh
->fib_nh_flags
& RTNH_F_LINKDOWN
) {
810 w
+= nexthop_nh
->fib_nh_weight
;
811 upper_bound
= DIV_ROUND_CLOSEST_ULL((u64
)w
<< 31,
815 atomic_set(&nexthop_nh
->fib_nh_upper_bound
, upper_bound
);
816 } endfor_nexthops(fi
);
818 #else /* CONFIG_IP_ROUTE_MULTIPATH */
820 static int fib_get_nhs(struct fib_info
*fi
, struct rtnexthop
*rtnh
,
821 int remaining
, struct fib_config
*cfg
,
822 struct netlink_ext_ack
*extack
)
824 NL_SET_ERR_MSG(extack
, "Multipath support not enabled in kernel");
829 #define fib_rebalance(fi) do { } while (0)
831 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
833 static int fib_encap_match(struct net
*net
, u16 encap_type
,
834 struct nlattr
*encap
,
835 const struct fib_nh
*nh
,
836 const struct fib_config
*cfg
,
837 struct netlink_ext_ack
*extack
)
839 struct lwtunnel_state
*lwtstate
;
842 if (encap_type
== LWTUNNEL_ENCAP_NONE
)
845 ret
= lwtunnel_build_state(net
, encap_type
, encap
, AF_INET
,
846 cfg
, &lwtstate
, extack
);
848 result
= lwtunnel_cmp_encap(lwtstate
, nh
->fib_nh_lws
);
849 lwtstate_free(lwtstate
);
855 int fib_nh_match(struct net
*net
, struct fib_config
*cfg
, struct fib_info
*fi
,
856 struct netlink_ext_ack
*extack
)
858 #ifdef CONFIG_IP_ROUTE_MULTIPATH
859 struct rtnexthop
*rtnh
;
863 if (cfg
->fc_priority
&& cfg
->fc_priority
!= fi
->fib_priority
)
867 if (fi
->nh
&& cfg
->fc_nh_id
== fi
->nh
->id
)
873 if (cfg
->fc_oif
|| cfg
->fc_gw_family
|| cfg
->fc_mp
)
878 if (cfg
->fc_oif
|| cfg
->fc_gw_family
) {
881 nh
= fib_info_nh(fi
, 0);
883 if (fib_encap_match(net
, cfg
->fc_encap_type
,
884 cfg
->fc_encap
, nh
, cfg
, extack
))
887 #ifdef CONFIG_IP_ROUTE_CLASSID
889 cfg
->fc_flow
!= nh
->nh_tclassid
)
892 if ((cfg
->fc_oif
&& cfg
->fc_oif
!= nh
->fib_nh_oif
) ||
893 (cfg
->fc_gw_family
&&
894 cfg
->fc_gw_family
!= nh
->fib_nh_gw_family
))
897 if (cfg
->fc_gw_family
== AF_INET
&&
898 cfg
->fc_gw4
!= nh
->fib_nh_gw4
)
901 if (cfg
->fc_gw_family
== AF_INET6
&&
902 ipv6_addr_cmp(&cfg
->fc_gw6
, &nh
->fib_nh_gw6
))
908 #ifdef CONFIG_IP_ROUTE_MULTIPATH
913 remaining
= cfg
->fc_mp_len
;
918 if (!rtnh_ok(rtnh
, remaining
))
921 if (rtnh
->rtnh_ifindex
&& rtnh
->rtnh_ifindex
!= nh
->fib_nh_oif
)
924 attrlen
= rtnh_attrlen(rtnh
);
926 struct nlattr
*nla
, *nlav
, *attrs
= rtnh_attrs(rtnh
);
929 nla
= nla_find(attrs
, attrlen
, RTA_GATEWAY
);
930 nlav
= nla_find(attrs
, attrlen
, RTA_VIA
);
932 NL_SET_ERR_MSG(extack
,
933 "Nexthop configuration can not contain both GATEWAY and VIA");
940 err
= fib_gw_from_attr(&gw
, nla
, extack
);
944 if (nh
->fib_nh_gw_family
!= AF_INET
||
945 gw
!= nh
->fib_nh_gw4
)
948 struct fib_config cfg2
;
950 err
= fib_gw_from_via(&cfg2
, nlav
, extack
);
954 switch (nh
->fib_nh_gw_family
) {
956 if (cfg2
.fc_gw_family
!= AF_INET
||
957 cfg2
.fc_gw4
!= nh
->fib_nh_gw4
)
961 if (cfg2
.fc_gw_family
!= AF_INET6
||
962 ipv6_addr_cmp(&cfg2
.fc_gw6
,
969 #ifdef CONFIG_IP_ROUTE_CLASSID
970 nla
= nla_find(attrs
, attrlen
, RTA_FLOW
);
972 if (nla_len(nla
) < sizeof(u32
)) {
973 NL_SET_ERR_MSG(extack
, "Invalid RTA_FLOW");
976 if (nla_get_u32(nla
) != nh
->nh_tclassid
)
982 rtnh
= rtnh_next(rtnh
, &remaining
);
983 } endfor_nexthops(fi
);
988 bool fib_metrics_match(struct fib_config
*cfg
, struct fib_info
*fi
)
996 nla_for_each_attr(nla
, cfg
->fc_mx
, cfg
->fc_mx_len
, remaining
) {
997 int type
= nla_type(nla
);
1002 if (type
> RTAX_MAX
)
1005 type
= array_index_nospec(type
, RTAX_MAX
+ 1);
1006 if (type
== RTAX_CC_ALGO
) {
1007 char tmp
[TCP_CA_NAME_MAX
];
1008 bool ecn_ca
= false;
1010 nla_strscpy(tmp
, nla
, sizeof(tmp
));
1011 val
= tcp_ca_get_key_by_name(tmp
, &ecn_ca
);
1013 if (nla_len(nla
) != sizeof(u32
))
1015 val
= nla_get_u32(nla
);
1018 fi_val
= fi
->fib_metrics
->metrics
[type
- 1];
1019 if (type
== RTAX_FEATURES
)
1020 fi_val
&= ~DST_FEATURE_ECN_CA
;
1029 static int fib_check_nh_v6_gw(struct net
*net
, struct fib_nh
*nh
,
1030 u32 table
, struct netlink_ext_ack
*extack
)
1032 struct fib6_config cfg
= {
1034 .fc_flags
= nh
->fib_nh_flags
| RTF_GATEWAY
,
1035 .fc_ifindex
= nh
->fib_nh_oif
,
1036 .fc_gateway
= nh
->fib_nh_gw6
,
1038 struct fib6_nh fib6_nh
= {};
1041 err
= ipv6_stub
->fib6_nh_init(net
, &fib6_nh
, &cfg
, GFP_KERNEL
, extack
);
1043 nh
->fib_nh_dev
= fib6_nh
.fib_nh_dev
;
1044 netdev_hold(nh
->fib_nh_dev
, &nh
->fib_nh_dev_tracker
,
1046 nh
->fib_nh_oif
= nh
->fib_nh_dev
->ifindex
;
1047 nh
->fib_nh_scope
= RT_SCOPE_LINK
;
1049 ipv6_stub
->fib6_nh_release(&fib6_nh
);
1059 * Semantics of nexthop is very messy by historical reasons.
1060 * We have to take into account, that:
1061 * a) gateway can be actually local interface address,
1062 * so that gatewayed route is direct.
1063 * b) gateway must be on-link address, possibly
1064 * described not by an ifaddr, but also by a direct route.
1065 * c) If both gateway and interface are specified, they should not
1067 * d) If we use tunnel routes, gateway could be not on-link.
1069 * Attempt to reconcile all of these (alas, self-contradictory) conditions
1070 * results in pretty ugly and hairy code with obscure logic.
1072 * I chose to generalized it instead, so that the size
1073 * of code does not increase practically, but it becomes
1074 * much more general.
1075 * Every prefix is assigned a "scope" value: "host" is local address,
1076 * "link" is direct route,
1077 * [ ... "site" ... "interior" ... ]
1078 * and "universe" is true gateway route with global meaning.
1080 * Every prefix refers to a set of "nexthop"s (gw, oif),
1081 * where gw must have narrower scope. This recursion stops
1082 * when gw has LOCAL scope or if "nexthop" is declared ONLINK,
1083 * which means that gw is forced to be on link.
1085 * Code is still hairy, but now it is apparently logically
1086 * consistent and very flexible. F.e. as by-product it allows
1087 * to co-exists in peace independent exterior and interior
1088 * routing processes.
1090 * Normally it looks as following.
1092 * {universe prefix} -> (gw, oif) [scope link]
1094 * |-> {link prefix} -> (gw, oif) [scope local]
1096 * |-> {local prefix} (terminal node)
1098 static int fib_check_nh_v4_gw(struct net
*net
, struct fib_nh
*nh
, u32 table
,
1099 u8 scope
, struct netlink_ext_ack
*extack
)
1101 struct net_device
*dev
;
1102 struct fib_result res
;
1105 if (nh
->fib_nh_flags
& RTNH_F_ONLINK
) {
1106 unsigned int addr_type
;
1108 if (scope
>= RT_SCOPE_LINK
) {
1109 NL_SET_ERR_MSG(extack
, "Nexthop has invalid scope");
1112 dev
= __dev_get_by_index(net
, nh
->fib_nh_oif
);
1114 NL_SET_ERR_MSG(extack
, "Nexthop device required for onlink");
1117 if (!(dev
->flags
& IFF_UP
)) {
1118 NL_SET_ERR_MSG(extack
, "Nexthop device is not up");
1121 addr_type
= inet_addr_type_dev_table(net
, dev
, nh
->fib_nh_gw4
);
1122 if (addr_type
!= RTN_UNICAST
) {
1123 NL_SET_ERR_MSG(extack
, "Nexthop has invalid gateway");
1126 if (!netif_carrier_ok(dev
))
1127 nh
->fib_nh_flags
|= RTNH_F_LINKDOWN
;
1128 nh
->fib_nh_dev
= dev
;
1129 netdev_hold(dev
, &nh
->fib_nh_dev_tracker
, GFP_ATOMIC
);
1130 nh
->fib_nh_scope
= RT_SCOPE_LINK
;
1135 struct fib_table
*tbl
= NULL
;
1136 struct flowi4 fl4
= {
1137 .daddr
= nh
->fib_nh_gw4
,
1138 .flowi4_scope
= scope
+ 1,
1139 .flowi4_oif
= nh
->fib_nh_oif
,
1140 .flowi4_iif
= LOOPBACK_IFINDEX
,
1143 /* It is not necessary, but requires a bit of thinking */
1144 if (fl4
.flowi4_scope
< RT_SCOPE_LINK
)
1145 fl4
.flowi4_scope
= RT_SCOPE_LINK
;
1147 if (table
&& table
!= RT_TABLE_MAIN
)
1148 tbl
= fib_get_table(net
, table
);
1151 err
= fib_table_lookup(tbl
, &fl4
, &res
,
1152 FIB_LOOKUP_IGNORE_LINKSTATE
|
1155 /* on error or if no table given do full lookup. This
1156 * is needed for example when nexthops are in the local
1157 * table rather than the given table
1160 err
= fib_lookup(net
, &fl4
, &res
,
1161 FIB_LOOKUP_IGNORE_LINKSTATE
);
1165 NL_SET_ERR_MSG(extack
, "Nexthop has invalid gateway");
1171 if (res
.type
!= RTN_UNICAST
&& res
.type
!= RTN_LOCAL
) {
1172 NL_SET_ERR_MSG(extack
, "Nexthop has invalid gateway");
1175 nh
->fib_nh_scope
= res
.scope
;
1176 nh
->fib_nh_oif
= FIB_RES_OIF(res
);
1177 nh
->fib_nh_dev
= dev
= FIB_RES_DEV(res
);
1179 NL_SET_ERR_MSG(extack
,
1180 "No egress device for nexthop gateway");
1183 netdev_hold(dev
, &nh
->fib_nh_dev_tracker
, GFP_ATOMIC
);
1184 if (!netif_carrier_ok(dev
))
1185 nh
->fib_nh_flags
|= RTNH_F_LINKDOWN
;
1186 err
= (dev
->flags
& IFF_UP
) ? 0 : -ENETDOWN
;
1192 static int fib_check_nh_nongw(struct net
*net
, struct fib_nh
*nh
,
1193 struct netlink_ext_ack
*extack
)
1195 struct in_device
*in_dev
;
1198 if (nh
->fib_nh_flags
& (RTNH_F_PERVASIVE
| RTNH_F_ONLINK
)) {
1199 NL_SET_ERR_MSG(extack
,
1200 "Invalid flags for nexthop - PERVASIVE and ONLINK can not be set");
1207 in_dev
= inetdev_by_index(net
, nh
->fib_nh_oif
);
1211 if (!(in_dev
->dev
->flags
& IFF_UP
)) {
1212 NL_SET_ERR_MSG(extack
, "Device for nexthop is not up");
1216 nh
->fib_nh_dev
= in_dev
->dev
;
1217 netdev_hold(nh
->fib_nh_dev
, &nh
->fib_nh_dev_tracker
, GFP_ATOMIC
);
1218 nh
->fib_nh_scope
= RT_SCOPE_HOST
;
1219 if (!netif_carrier_ok(nh
->fib_nh_dev
))
1220 nh
->fib_nh_flags
|= RTNH_F_LINKDOWN
;
1227 int fib_check_nh(struct net
*net
, struct fib_nh
*nh
, u32 table
, u8 scope
,
1228 struct netlink_ext_ack
*extack
)
1232 if (nh
->fib_nh_gw_family
== AF_INET
)
1233 err
= fib_check_nh_v4_gw(net
, nh
, table
, scope
, extack
);
1234 else if (nh
->fib_nh_gw_family
== AF_INET6
)
1235 err
= fib_check_nh_v6_gw(net
, nh
, table
, extack
);
1237 err
= fib_check_nh_nongw(net
, nh
, extack
);
1242 static struct hlist_head
*
1243 fib_info_laddrhash_bucket(const struct net
*net
, __be32 val
)
1245 u32 slot
= hash_32(net_hash_mix(net
) ^ (__force u32
)val
,
1246 fib_info_hash_bits
);
1248 return &fib_info_laddrhash
[slot
];
1251 static void fib_info_hash_move(struct hlist_head
*new_info_hash
,
1252 struct hlist_head
*new_laddrhash
,
1253 unsigned int new_size
)
1255 struct hlist_head
*old_info_hash
, *old_laddrhash
;
1256 unsigned int old_size
= fib_info_hash_size
;
1260 old_info_hash
= fib_info_hash
;
1261 old_laddrhash
= fib_info_laddrhash
;
1262 fib_info_hash_size
= new_size
;
1263 fib_info_hash_bits
= ilog2(new_size
);
1265 for (i
= 0; i
< old_size
; i
++) {
1266 struct hlist_head
*head
= &fib_info_hash
[i
];
1267 struct hlist_node
*n
;
1268 struct fib_info
*fi
;
1270 hlist_for_each_entry_safe(fi
, n
, head
, fib_hash
) {
1271 struct hlist_head
*dest
;
1272 unsigned int new_hash
;
1274 new_hash
= fib_info_hashfn(fi
);
1275 dest
= &new_info_hash
[new_hash
];
1276 hlist_add_head(&fi
->fib_hash
, dest
);
1279 fib_info_hash
= new_info_hash
;
1281 fib_info_laddrhash
= new_laddrhash
;
1282 for (i
= 0; i
< old_size
; i
++) {
1283 struct hlist_head
*lhead
= &old_laddrhash
[i
];
1284 struct hlist_node
*n
;
1285 struct fib_info
*fi
;
1287 hlist_for_each_entry_safe(fi
, n
, lhead
, fib_lhash
) {
1288 struct hlist_head
*ldest
;
1290 ldest
= fib_info_laddrhash_bucket(fi
->fib_net
,
1292 hlist_add_head(&fi
->fib_lhash
, ldest
);
1296 kvfree(old_info_hash
);
1297 kvfree(old_laddrhash
);
1300 __be32
fib_info_update_nhc_saddr(struct net
*net
, struct fib_nh_common
*nhc
,
1301 unsigned char scope
)
1306 if (nhc
->nhc_family
!= AF_INET
)
1307 return inet_select_addr(nhc
->nhc_dev
, 0, scope
);
1309 nh
= container_of(nhc
, struct fib_nh
, nh_common
);
1310 saddr
= inet_select_addr(nh
->fib_nh_dev
, nh
->fib_nh_gw4
, scope
);
1312 WRITE_ONCE(nh
->nh_saddr
, saddr
);
1313 WRITE_ONCE(nh
->nh_saddr_genid
, atomic_read(&net
->ipv4
.dev_addr_genid
));
1318 __be32
fib_result_prefsrc(struct net
*net
, struct fib_result
*res
)
1320 struct fib_nh_common
*nhc
= res
->nhc
;
1322 if (res
->fi
->fib_prefsrc
)
1323 return res
->fi
->fib_prefsrc
;
1325 if (nhc
->nhc_family
== AF_INET
) {
1328 nh
= container_of(nhc
, struct fib_nh
, nh_common
);
1329 if (READ_ONCE(nh
->nh_saddr_genid
) ==
1330 atomic_read(&net
->ipv4
.dev_addr_genid
))
1331 return READ_ONCE(nh
->nh_saddr
);
1334 return fib_info_update_nhc_saddr(net
, nhc
, res
->fi
->fib_scope
);
1337 static bool fib_valid_prefsrc(struct fib_config
*cfg
, __be32 fib_prefsrc
)
1339 if (cfg
->fc_type
!= RTN_LOCAL
|| !cfg
->fc_dst
||
1340 fib_prefsrc
!= cfg
->fc_dst
) {
1341 u32 tb_id
= cfg
->fc_table
;
1344 if (tb_id
== RT_TABLE_MAIN
)
1345 tb_id
= RT_TABLE_LOCAL
;
1347 rc
= inet_addr_type_table(cfg
->fc_nlinfo
.nl_net
,
1348 fib_prefsrc
, tb_id
);
1350 if (rc
!= RTN_LOCAL
&& tb_id
!= RT_TABLE_LOCAL
) {
1351 rc
= inet_addr_type_table(cfg
->fc_nlinfo
.nl_net
,
1352 fib_prefsrc
, RT_TABLE_LOCAL
);
1355 if (rc
!= RTN_LOCAL
)
1361 struct fib_info
*fib_create_info(struct fib_config
*cfg
,
1362 struct netlink_ext_ack
*extack
)
1365 struct fib_info
*fi
= NULL
;
1366 struct nexthop
*nh
= NULL
;
1367 struct fib_info
*ofi
;
1369 struct net
*net
= cfg
->fc_nlinfo
.nl_net
;
1372 if (cfg
->fc_type
> RTN_MAX
)
1375 /* Fast check to catch the most weird cases */
1376 if (fib_props
[cfg
->fc_type
].scope
> cfg
->fc_scope
) {
1377 NL_SET_ERR_MSG(extack
, "Invalid scope");
1381 if (cfg
->fc_flags
& (RTNH_F_DEAD
| RTNH_F_LINKDOWN
)) {
1382 NL_SET_ERR_MSG(extack
,
1383 "Invalid rtm_flags - can not contain DEAD or LINKDOWN");
1387 if (cfg
->fc_nh_id
) {
1389 fi
= fib_find_info_nh(net
, cfg
);
1391 refcount_inc(&fi
->fib_treeref
);
1396 nh
= nexthop_find_by_id(net
, cfg
->fc_nh_id
);
1398 NL_SET_ERR_MSG(extack
, "Nexthop id does not exist");
1404 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1406 nhs
= fib_count_nexthops(cfg
->fc_mp
, cfg
->fc_mp_len
, extack
);
1414 if (fib_info_cnt
>= fib_info_hash_size
) {
1415 unsigned int new_size
= fib_info_hash_size
<< 1;
1416 struct hlist_head
*new_info_hash
;
1417 struct hlist_head
*new_laddrhash
;
1422 bytes
= (size_t)new_size
* sizeof(struct hlist_head
*);
1423 new_info_hash
= kvzalloc(bytes
, GFP_KERNEL
);
1424 new_laddrhash
= kvzalloc(bytes
, GFP_KERNEL
);
1425 if (!new_info_hash
|| !new_laddrhash
) {
1426 kvfree(new_info_hash
);
1427 kvfree(new_laddrhash
);
1429 fib_info_hash_move(new_info_hash
, new_laddrhash
, new_size
);
1431 if (!fib_info_hash_size
)
1435 fi
= kzalloc(struct_size(fi
, fib_nh
, nhs
), GFP_KERNEL
);
1438 fi
->fib_metrics
= ip_fib_metrics_init(cfg
->fc_mx
, cfg
->fc_mx_len
, extack
);
1439 if (IS_ERR(fi
->fib_metrics
)) {
1440 err
= PTR_ERR(fi
->fib_metrics
);
1442 return ERR_PTR(err
);
1446 fi
->fib_protocol
= cfg
->fc_protocol
;
1447 fi
->fib_scope
= cfg
->fc_scope
;
1448 fi
->fib_flags
= cfg
->fc_flags
;
1449 fi
->fib_priority
= cfg
->fc_priority
;
1450 fi
->fib_prefsrc
= cfg
->fc_prefsrc
;
1451 fi
->fib_type
= cfg
->fc_type
;
1452 fi
->fib_tb_id
= cfg
->fc_table
;
1456 if (!nexthop_get(nh
)) {
1457 NL_SET_ERR_MSG(extack
, "Nexthop has been deleted");
1464 change_nexthops(fi
) {
1465 nexthop_nh
->nh_parent
= fi
;
1466 } endfor_nexthops(fi
)
1469 err
= fib_get_nhs(fi
, cfg
->fc_mp
, cfg
->fc_mp_len
, cfg
,
1472 err
= fib_nh_init(net
, fi
->fib_nh
, cfg
, 1, extack
);
1478 if (fib_props
[cfg
->fc_type
].error
) {
1479 if (cfg
->fc_gw_family
|| cfg
->fc_oif
|| cfg
->fc_mp
) {
1480 NL_SET_ERR_MSG(extack
,
1481 "Gateway, device and multipath can not be specified for this route type");
1486 switch (cfg
->fc_type
) {
1494 NL_SET_ERR_MSG(extack
, "Invalid route type");
1499 if (cfg
->fc_scope
> RT_SCOPE_HOST
) {
1500 NL_SET_ERR_MSG(extack
, "Invalid scope");
1505 err
= fib_check_nexthop(fi
->nh
, cfg
->fc_scope
, extack
);
1508 } else if (cfg
->fc_scope
== RT_SCOPE_HOST
) {
1509 struct fib_nh
*nh
= fi
->fib_nh
;
1511 /* Local address is added. */
1513 NL_SET_ERR_MSG(extack
,
1514 "Route with host scope can not have multiple nexthops");
1517 if (nh
->fib_nh_gw_family
) {
1518 NL_SET_ERR_MSG(extack
,
1519 "Route with host scope can not have a gateway");
1522 nh
->fib_nh_scope
= RT_SCOPE_NOWHERE
;
1523 nh
->fib_nh_dev
= dev_get_by_index(net
, nh
->fib_nh_oif
);
1525 if (!nh
->fib_nh_dev
)
1527 netdev_tracker_alloc(nh
->fib_nh_dev
, &nh
->fib_nh_dev_tracker
,
1532 change_nexthops(fi
) {
1533 err
= fib_check_nh(cfg
->fc_nlinfo
.nl_net
, nexthop_nh
,
1534 cfg
->fc_table
, cfg
->fc_scope
,
1538 if (nexthop_nh
->fib_nh_flags
& RTNH_F_LINKDOWN
)
1540 } endfor_nexthops(fi
)
1541 if (linkdown
== fi
->fib_nhs
)
1542 fi
->fib_flags
|= RTNH_F_LINKDOWN
;
1545 if (fi
->fib_prefsrc
&& !fib_valid_prefsrc(cfg
, fi
->fib_prefsrc
)) {
1546 NL_SET_ERR_MSG(extack
, "Invalid prefsrc address");
1551 change_nexthops(fi
) {
1552 fib_info_update_nhc_saddr(net
, &nexthop_nh
->nh_common
,
1554 if (nexthop_nh
->fib_nh_gw_family
== AF_INET6
)
1555 fi
->fib_nh_is_v6
= true;
1556 } endfor_nexthops(fi
)
1562 ofi
= fib_find_info(fi
);
1564 /* fib_table_lookup() should not see @fi yet. */
1567 refcount_inc(&ofi
->fib_treeref
);
1571 refcount_set(&fi
->fib_treeref
, 1);
1572 refcount_set(&fi
->fib_clntref
, 1);
1575 hlist_add_head(&fi
->fib_hash
,
1576 &fib_info_hash
[fib_info_hashfn(fi
)]);
1577 if (fi
->fib_prefsrc
) {
1578 struct hlist_head
*head
;
1580 head
= fib_info_laddrhash_bucket(net
, fi
->fib_prefsrc
);
1581 hlist_add_head(&fi
->fib_lhash
, head
);
1584 list_add(&fi
->nh_list
, &nh
->fi_list
);
1586 change_nexthops(fi
) {
1587 struct hlist_head
*head
;
1589 if (!nexthop_nh
->fib_nh_dev
)
1591 head
= fib_nh_head(nexthop_nh
->fib_nh_dev
);
1592 hlist_add_head_rcu(&nexthop_nh
->nh_hash
, head
);
1593 } endfor_nexthops(fi
)
1602 /* fib_table_lookup() should not see @fi yet. */
1607 return ERR_PTR(err
);
1610 int fib_nexthop_info(struct sk_buff
*skb
, const struct fib_nh_common
*nhc
,
1611 u8 rt_family
, unsigned char *flags
, bool skip_oif
)
1613 if (nhc
->nhc_flags
& RTNH_F_DEAD
)
1614 *flags
|= RTNH_F_DEAD
;
1616 if (nhc
->nhc_flags
& RTNH_F_LINKDOWN
) {
1617 *flags
|= RTNH_F_LINKDOWN
;
1620 switch (nhc
->nhc_family
) {
1622 if (ip_ignore_linkdown(nhc
->nhc_dev
))
1623 *flags
|= RTNH_F_DEAD
;
1626 if (ip6_ignore_linkdown(nhc
->nhc_dev
))
1627 *flags
|= RTNH_F_DEAD
;
1633 switch (nhc
->nhc_gw_family
) {
1635 if (nla_put_in_addr(skb
, RTA_GATEWAY
, nhc
->nhc_gw
.ipv4
))
1636 goto nla_put_failure
;
1639 /* if gateway family does not match nexthop family
1640 * gateway is encoded as RTA_VIA
1642 if (rt_family
!= nhc
->nhc_gw_family
) {
1643 int alen
= sizeof(struct in6_addr
);
1647 nla
= nla_reserve(skb
, RTA_VIA
, alen
+ 2);
1649 goto nla_put_failure
;
1651 via
= nla_data(nla
);
1652 via
->rtvia_family
= AF_INET6
;
1653 memcpy(via
->rtvia_addr
, &nhc
->nhc_gw
.ipv6
, alen
);
1654 } else if (nla_put_in6_addr(skb
, RTA_GATEWAY
,
1655 &nhc
->nhc_gw
.ipv6
) < 0) {
1656 goto nla_put_failure
;
1661 *flags
|= (nhc
->nhc_flags
&
1662 (RTNH_F_ONLINK
| RTNH_F_OFFLOAD
| RTNH_F_TRAP
));
1664 if (!skip_oif
&& nhc
->nhc_dev
&&
1665 nla_put_u32(skb
, RTA_OIF
, nhc
->nhc_dev
->ifindex
))
1666 goto nla_put_failure
;
1668 if (nhc
->nhc_lwtstate
&&
1669 lwtunnel_fill_encap(skb
, nhc
->nhc_lwtstate
,
1670 RTA_ENCAP
, RTA_ENCAP_TYPE
) < 0)
1671 goto nla_put_failure
;
1678 EXPORT_SYMBOL_GPL(fib_nexthop_info
);
1680 #if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6)
1681 int fib_add_nexthop(struct sk_buff
*skb
, const struct fib_nh_common
*nhc
,
1682 int nh_weight
, u8 rt_family
, u32 nh_tclassid
)
1684 const struct net_device
*dev
= nhc
->nhc_dev
;
1685 struct rtnexthop
*rtnh
;
1686 unsigned char flags
= 0;
1688 rtnh
= nla_reserve_nohdr(skb
, sizeof(*rtnh
));
1690 goto nla_put_failure
;
1692 rtnh
->rtnh_hops
= nh_weight
- 1;
1693 rtnh
->rtnh_ifindex
= dev
? dev
->ifindex
: 0;
1695 if (fib_nexthop_info(skb
, nhc
, rt_family
, &flags
, true) < 0)
1696 goto nla_put_failure
;
1698 rtnh
->rtnh_flags
= flags
;
1700 if (nh_tclassid
&& nla_put_u32(skb
, RTA_FLOW
, nh_tclassid
))
1701 goto nla_put_failure
;
1703 /* length of rtnetlink header + attributes */
1704 rtnh
->rtnh_len
= nlmsg_get_pos(skb
) - (void *)rtnh
;
1711 EXPORT_SYMBOL_GPL(fib_add_nexthop
);
1714 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1715 static int fib_add_multipath(struct sk_buff
*skb
, struct fib_info
*fi
)
1719 mp
= nla_nest_start_noflag(skb
, RTA_MULTIPATH
);
1721 goto nla_put_failure
;
1723 if (unlikely(fi
->nh
)) {
1724 if (nexthop_mpath_fill_node(skb
, fi
->nh
, AF_INET
) < 0)
1725 goto nla_put_failure
;
1730 u32 nh_tclassid
= 0;
1731 #ifdef CONFIG_IP_ROUTE_CLASSID
1732 nh_tclassid
= nh
->nh_tclassid
;
1734 if (fib_add_nexthop(skb
, &nh
->nh_common
, nh
->fib_nh_weight
,
1735 AF_INET
, nh_tclassid
) < 0)
1736 goto nla_put_failure
;
1737 } endfor_nexthops(fi
);
1740 nla_nest_end(skb
, mp
);
1748 static int fib_add_multipath(struct sk_buff
*skb
, struct fib_info
*fi
)
1754 int fib_dump_info(struct sk_buff
*skb
, u32 portid
, u32 seq
, int event
,
1755 const struct fib_rt_info
*fri
, unsigned int flags
)
1757 unsigned int nhs
= fib_info_num_path(fri
->fi
);
1758 struct fib_info
*fi
= fri
->fi
;
1759 u32 tb_id
= fri
->tb_id
;
1760 struct nlmsghdr
*nlh
;
1763 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*rtm
), flags
);
1767 rtm
= nlmsg_data(nlh
);
1768 rtm
->rtm_family
= AF_INET
;
1769 rtm
->rtm_dst_len
= fri
->dst_len
;
1770 rtm
->rtm_src_len
= 0;
1771 rtm
->rtm_tos
= inet_dscp_to_dsfield(fri
->dscp
);
1773 rtm
->rtm_table
= tb_id
;
1775 rtm
->rtm_table
= RT_TABLE_COMPAT
;
1776 if (nla_put_u32(skb
, RTA_TABLE
, tb_id
))
1777 goto nla_put_failure
;
1778 rtm
->rtm_type
= fri
->type
;
1779 rtm
->rtm_flags
= fi
->fib_flags
;
1780 rtm
->rtm_scope
= fi
->fib_scope
;
1781 rtm
->rtm_protocol
= fi
->fib_protocol
;
1783 if (rtm
->rtm_dst_len
&&
1784 nla_put_in_addr(skb
, RTA_DST
, fri
->dst
))
1785 goto nla_put_failure
;
1786 if (fi
->fib_priority
&&
1787 nla_put_u32(skb
, RTA_PRIORITY
, fi
->fib_priority
))
1788 goto nla_put_failure
;
1789 if (rtnetlink_put_metrics(skb
, fi
->fib_metrics
->metrics
) < 0)
1790 goto nla_put_failure
;
1792 if (fi
->fib_prefsrc
&&
1793 nla_put_in_addr(skb
, RTA_PREFSRC
, fi
->fib_prefsrc
))
1794 goto nla_put_failure
;
1797 if (nla_put_u32(skb
, RTA_NH_ID
, fi
->nh
->id
))
1798 goto nla_put_failure
;
1799 if (nexthop_is_blackhole(fi
->nh
))
1800 rtm
->rtm_type
= RTN_BLACKHOLE
;
1801 if (!READ_ONCE(fi
->fib_net
->ipv4
.sysctl_nexthop_compat_mode
))
1806 const struct fib_nh_common
*nhc
= fib_info_nhc(fi
, 0);
1807 unsigned char flags
= 0;
1809 if (fib_nexthop_info(skb
, nhc
, AF_INET
, &flags
, false) < 0)
1810 goto nla_put_failure
;
1812 rtm
->rtm_flags
= flags
;
1813 #ifdef CONFIG_IP_ROUTE_CLASSID
1814 if (nhc
->nhc_family
== AF_INET
) {
1817 nh
= container_of(nhc
, struct fib_nh
, nh_common
);
1818 if (nh
->nh_tclassid
&&
1819 nla_put_u32(skb
, RTA_FLOW
, nh
->nh_tclassid
))
1820 goto nla_put_failure
;
1824 if (fib_add_multipath(skb
, fi
) < 0)
1825 goto nla_put_failure
;
1830 rtm
->rtm_flags
|= RTM_F_OFFLOAD
;
1832 rtm
->rtm_flags
|= RTM_F_TRAP
;
1833 if (fri
->offload_failed
)
1834 rtm
->rtm_flags
|= RTM_F_OFFLOAD_FAILED
;
1836 nlmsg_end(skb
, nlh
);
1840 nlmsg_cancel(skb
, nlh
);
1846 * - local address disappeared -> we must delete all the entries
1848 * - device went down -> we must shutdown all nexthops going via it.
1850 int fib_sync_down_addr(struct net_device
*dev
, __be32 local
)
1852 int tb_id
= l3mdev_fib_table(dev
) ? : RT_TABLE_MAIN
;
1853 struct net
*net
= dev_net(dev
);
1854 struct hlist_head
*head
;
1855 struct fib_info
*fi
;
1858 if (!fib_info_laddrhash
|| local
== 0)
1861 head
= fib_info_laddrhash_bucket(net
, local
);
1862 hlist_for_each_entry(fi
, head
, fib_lhash
) {
1863 if (!net_eq(fi
->fib_net
, net
) ||
1864 fi
->fib_tb_id
!= tb_id
)
1866 if (fi
->fib_prefsrc
== local
) {
1867 fi
->fib_flags
|= RTNH_F_DEAD
;
1868 fi
->pfsrc_removed
= true;
1875 static int call_fib_nh_notifiers(struct fib_nh
*nh
,
1876 enum fib_event_type event_type
)
1878 bool ignore_link_down
= ip_ignore_linkdown(nh
->fib_nh_dev
);
1879 struct fib_nh_notifier_info info
= {
1883 switch (event_type
) {
1884 case FIB_EVENT_NH_ADD
:
1885 if (nh
->fib_nh_flags
& RTNH_F_DEAD
)
1887 if (ignore_link_down
&& nh
->fib_nh_flags
& RTNH_F_LINKDOWN
)
1889 return call_fib4_notifiers(dev_net(nh
->fib_nh_dev
), event_type
,
1891 case FIB_EVENT_NH_DEL
:
1892 if ((ignore_link_down
&& nh
->fib_nh_flags
& RTNH_F_LINKDOWN
) ||
1893 (nh
->fib_nh_flags
& RTNH_F_DEAD
))
1894 return call_fib4_notifiers(dev_net(nh
->fib_nh_dev
),
1895 event_type
, &info
.info
);
1904 /* Update the PMTU of exceptions when:
1905 * - the new MTU of the first hop becomes smaller than the PMTU
1906 * - the old MTU was the same as the PMTU, and it limited discovery of
1907 * larger MTUs on the path. With that limit raised, we can now
1908 * discover larger MTUs
1909 * A special case is locked exceptions, for which the PMTU is smaller
1910 * than the minimal accepted PMTU:
1911 * - if the new MTU is greater than the PMTU, don't make any change
1912 * - otherwise, unlock and set PMTU
1914 void fib_nhc_update_mtu(struct fib_nh_common
*nhc
, u32
new, u32 orig
)
1916 struct fnhe_hash_bucket
*bucket
;
1919 bucket
= rcu_dereference_protected(nhc
->nhc_exceptions
, 1);
1923 for (i
= 0; i
< FNHE_HASH_SIZE
; i
++) {
1924 struct fib_nh_exception
*fnhe
;
1926 for (fnhe
= rcu_dereference_protected(bucket
[i
].chain
, 1);
1928 fnhe
= rcu_dereference_protected(fnhe
->fnhe_next
, 1)) {
1929 if (fnhe
->fnhe_mtu_locked
) {
1930 if (new <= fnhe
->fnhe_pmtu
) {
1931 fnhe
->fnhe_pmtu
= new;
1932 fnhe
->fnhe_mtu_locked
= false;
1934 } else if (new < fnhe
->fnhe_pmtu
||
1935 orig
== fnhe
->fnhe_pmtu
) {
1936 fnhe
->fnhe_pmtu
= new;
1942 void fib_sync_mtu(struct net_device
*dev
, u32 orig_mtu
)
1944 struct hlist_head
*head
= fib_nh_head(dev
);
1947 hlist_for_each_entry(nh
, head
, nh_hash
) {
1948 DEBUG_NET_WARN_ON_ONCE(nh
->fib_nh_dev
!= dev
);
1949 fib_nhc_update_mtu(&nh
->nh_common
, dev
->mtu
, orig_mtu
);
1953 /* Event force Flags Description
1954 * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
1955 * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
1956 * NETDEV_DOWN 1 LINKDOWN|DEAD Last address removed
1957 * NETDEV_UNREGISTER 1 LINKDOWN|DEAD Device removed
1959 * only used when fib_nh is built into fib_info
1961 int fib_sync_down_dev(struct net_device
*dev
, unsigned long event
, bool force
)
1963 struct hlist_head
*head
= fib_nh_head(dev
);
1964 struct fib_info
*prev_fi
= NULL
;
1965 int scope
= RT_SCOPE_NOWHERE
;
1972 hlist_for_each_entry(nh
, head
, nh_hash
) {
1973 struct fib_info
*fi
= nh
->nh_parent
;
1976 BUG_ON(!fi
->fib_nhs
);
1977 DEBUG_NET_WARN_ON_ONCE(nh
->fib_nh_dev
!= dev
);
1982 change_nexthops(fi
) {
1983 if (nexthop_nh
->fib_nh_flags
& RTNH_F_DEAD
)
1985 else if (nexthop_nh
->fib_nh_dev
== dev
&&
1986 nexthop_nh
->fib_nh_scope
!= scope
) {
1989 case NETDEV_UNREGISTER
:
1990 nexthop_nh
->fib_nh_flags
|= RTNH_F_DEAD
;
1993 nexthop_nh
->fib_nh_flags
|= RTNH_F_LINKDOWN
;
1996 call_fib_nh_notifiers(nexthop_nh
,
2000 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2001 if (event
== NETDEV_UNREGISTER
&&
2002 nexthop_nh
->fib_nh_dev
== dev
) {
2007 } endfor_nexthops(fi
)
2008 if (dead
== fi
->fib_nhs
) {
2011 case NETDEV_UNREGISTER
:
2012 fi
->fib_flags
|= RTNH_F_DEAD
;
2015 fi
->fib_flags
|= RTNH_F_LINKDOWN
;
2027 /* Must be invoked inside of an RCU protected region. */
2028 static void fib_select_default(const struct flowi4
*flp
, struct fib_result
*res
)
2030 struct fib_info
*fi
= NULL
, *last_resort
= NULL
;
2031 struct hlist_head
*fa_head
= res
->fa_head
;
2032 struct fib_table
*tb
= res
->table
;
2033 u8 slen
= 32 - res
->prefixlen
;
2034 int order
= -1, last_idx
= -1;
2035 struct fib_alias
*fa
, *fa1
= NULL
;
2036 u32 last_prio
= res
->fi
->fib_priority
;
2037 dscp_t last_dscp
= 0;
2039 hlist_for_each_entry_rcu(fa
, fa_head
, fa_list
) {
2040 struct fib_info
*next_fi
= fa
->fa_info
;
2041 struct fib_nh_common
*nhc
;
2043 if (fa
->fa_slen
!= slen
)
2045 if (fa
->fa_dscp
&& !fib_dscp_masked_match(fa
->fa_dscp
, flp
))
2047 if (fa
->tb_id
!= tb
->tb_id
)
2049 if (next_fi
->fib_priority
> last_prio
&&
2050 fa
->fa_dscp
== last_dscp
) {
2055 if (next_fi
->fib_flags
& RTNH_F_DEAD
)
2057 last_dscp
= fa
->fa_dscp
;
2058 last_prio
= next_fi
->fib_priority
;
2060 if (next_fi
->fib_scope
!= res
->scope
||
2061 fa
->fa_type
!= RTN_UNICAST
)
2064 nhc
= fib_info_nhc(next_fi
, 0);
2065 if (!nhc
->nhc_gw_family
|| nhc
->nhc_scope
!= RT_SCOPE_LINK
)
2068 fib_alias_accessed(fa
);
2071 if (next_fi
!= res
->fi
)
2074 } else if (!fib_detect_death(fi
, order
, &last_resort
,
2075 &last_idx
, fa1
->fa_default
)) {
2076 fib_result_assign(res
, fi
);
2077 fa1
->fa_default
= order
;
2084 if (order
<= 0 || !fi
) {
2086 fa1
->fa_default
= -1;
2090 if (!fib_detect_death(fi
, order
, &last_resort
, &last_idx
,
2092 fib_result_assign(res
, fi
);
2093 fa1
->fa_default
= order
;
2098 fib_result_assign(res
, last_resort
);
2099 fa1
->fa_default
= last_idx
;
2105 * Dead device goes up. We wake up dead nexthops.
2106 * It takes sense only on multipath routes.
2108 * only used when fib_nh is built into fib_info
2110 int fib_sync_up(struct net_device
*dev
, unsigned char nh_flags
)
2112 struct fib_info
*prev_fi
;
2113 struct hlist_head
*head
;
2117 if (!(dev
->flags
& IFF_UP
))
2120 if (nh_flags
& RTNH_F_DEAD
) {
2121 unsigned int flags
= dev_get_flags(dev
);
2123 if (flags
& (IFF_RUNNING
| IFF_LOWER_UP
))
2124 nh_flags
|= RTNH_F_LINKDOWN
;
2128 head
= fib_nh_head(dev
);
2131 hlist_for_each_entry(nh
, head
, nh_hash
) {
2132 struct fib_info
*fi
= nh
->nh_parent
;
2135 BUG_ON(!fi
->fib_nhs
);
2136 DEBUG_NET_WARN_ON_ONCE(nh
->fib_nh_dev
!= dev
);
2142 change_nexthops(fi
) {
2143 if (!(nexthop_nh
->fib_nh_flags
& nh_flags
)) {
2147 if (!nexthop_nh
->fib_nh_dev
||
2148 !(nexthop_nh
->fib_nh_dev
->flags
& IFF_UP
))
2150 if (nexthop_nh
->fib_nh_dev
!= dev
||
2151 !__in_dev_get_rtnl(dev
))
2154 nexthop_nh
->fib_nh_flags
&= ~nh_flags
;
2155 call_fib_nh_notifiers(nexthop_nh
, FIB_EVENT_NH_ADD
);
2156 } endfor_nexthops(fi
)
2159 fi
->fib_flags
&= ~nh_flags
;
2169 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2170 static bool fib_good_nh(const struct fib_nh
*nh
)
2172 int state
= NUD_REACHABLE
;
2174 if (nh
->fib_nh_scope
== RT_SCOPE_LINK
) {
2175 struct neighbour
*n
;
2179 if (likely(nh
->fib_nh_gw_family
== AF_INET
))
2180 n
= __ipv4_neigh_lookup_noref(nh
->fib_nh_dev
,
2181 (__force u32
)nh
->fib_nh_gw4
);
2182 else if (nh
->fib_nh_gw_family
== AF_INET6
)
2183 n
= __ipv6_neigh_lookup_noref_stub(nh
->fib_nh_dev
,
2188 state
= READ_ONCE(n
->nud_state
);
2193 return !!(state
& NUD_VALID
);
2196 void fib_select_multipath(struct fib_result
*res
, int hash
)
2198 struct fib_info
*fi
= res
->fi
;
2199 struct net
*net
= fi
->fib_net
;
2202 if (unlikely(res
->fi
->nh
)) {
2203 nexthop_path_fib_result(res
, hash
);
2207 change_nexthops(fi
) {
2208 if (READ_ONCE(net
->ipv4
.sysctl_fib_multipath_use_neigh
)) {
2209 if (!fib_good_nh(nexthop_nh
))
2212 res
->nh_sel
= nhsel
;
2213 res
->nhc
= &nexthop_nh
->nh_common
;
2218 if (hash
> atomic_read(&nexthop_nh
->fib_nh_upper_bound
))
2221 res
->nh_sel
= nhsel
;
2222 res
->nhc
= &nexthop_nh
->nh_common
;
2224 } endfor_nexthops(fi
);
2228 void fib_select_path(struct net
*net
, struct fib_result
*res
,
2229 struct flowi4
*fl4
, const struct sk_buff
*skb
)
2231 if (fl4
->flowi4_oif
)
2234 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2235 if (fib_info_num_path(res
->fi
) > 1) {
2236 int h
= fib_multipath_hash(net
, fl4
, skb
, NULL
);
2238 fib_select_multipath(res
, h
);
2242 if (!res
->prefixlen
&&
2243 res
->table
->tb_num_default
> 1 &&
2244 res
->type
== RTN_UNICAST
)
2245 fib_select_default(fl4
, res
);
2249 struct net_device
*l3mdev
;
2251 l3mdev
= dev_get_by_index_rcu(net
, fl4
->flowi4_l3mdev
);
2254 l3mdev_master_dev_rcu(FIB_RES_DEV(*res
)) == l3mdev
)
2255 fl4
->saddr
= fib_result_prefsrc(net
, res
);
2257 fl4
->saddr
= inet_select_addr(l3mdev
, 0, RT_SCOPE_LINK
);