1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * IPv4 Forwarding Information Base: semantics.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/uaccess.h>
13 #include <linux/bitops.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/jiffies.h>
18 #include <linux/string.h>
19 #include <linux/socket.h>
20 #include <linux/sockios.h>
21 #include <linux/errno.h>
23 #include <linux/inet.h>
24 #include <linux/inetdevice.h>
25 #include <linux/netdevice.h>
26 #include <linux/if_arp.h>
27 #include <linux/proc_fs.h>
28 #include <linux/skbuff.h>
29 #include <linux/init.h>
30 #include <linux/slab.h>
31 #include <linux/netlink.h>
35 #include <net/protocol.h>
36 #include <net/route.h>
39 #include <net/ip_fib.h>
40 #include <net/ip6_fib.h>
41 #include <net/nexthop.h>
42 #include <net/netlink.h>
44 #include <net/lwtunnel.h>
45 #include <net/fib_notifier.h>
46 #include <net/addrconf.h>
48 #include "fib_lookup.h"
50 static DEFINE_SPINLOCK(fib_info_lock
);
51 static struct hlist_head
*fib_info_hash
;
52 static struct hlist_head
*fib_info_laddrhash
;
53 static unsigned int fib_info_hash_size
;
54 static unsigned int fib_info_cnt
;
56 #define DEVINDEX_HASHBITS 8
57 #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
58 static struct hlist_head fib_info_devhash
[DEVINDEX_HASHSIZE
];
60 /* for_nexthops and change_nexthops only used when nexthop object
61 * is not set in a fib_info. The logic within can reference fib_nh.
63 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65 #define for_nexthops(fi) { \
66 int nhsel; const struct fib_nh *nh; \
67 for (nhsel = 0, nh = (fi)->fib_nh; \
68 nhsel < fib_info_num_path((fi)); \
71 #define change_nexthops(fi) { \
72 int nhsel; struct fib_nh *nexthop_nh; \
73 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
74 nhsel < fib_info_num_path((fi)); \
75 nexthop_nh++, nhsel++)
77 #else /* CONFIG_IP_ROUTE_MULTIPATH */
79 /* Hope, that gcc will optimize it to get rid of dummy loop */
81 #define for_nexthops(fi) { \
82 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
83 for (nhsel = 0; nhsel < 1; nhsel++)
85 #define change_nexthops(fi) { \
87 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
88 for (nhsel = 0; nhsel < 1; nhsel++)
90 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
92 #define endfor_nexthops(fi) }
95 const struct fib_prop fib_props
[RTN_MAX
+ 1] = {
98 .scope
= RT_SCOPE_NOWHERE
,
102 .scope
= RT_SCOPE_UNIVERSE
,
106 .scope
= RT_SCOPE_HOST
,
110 .scope
= RT_SCOPE_LINK
,
114 .scope
= RT_SCOPE_LINK
,
118 .scope
= RT_SCOPE_UNIVERSE
,
122 .scope
= RT_SCOPE_UNIVERSE
,
124 [RTN_UNREACHABLE
] = {
125 .error
= -EHOSTUNREACH
,
126 .scope
= RT_SCOPE_UNIVERSE
,
130 .scope
= RT_SCOPE_UNIVERSE
,
134 .scope
= RT_SCOPE_UNIVERSE
,
138 .scope
= RT_SCOPE_NOWHERE
,
142 .scope
= RT_SCOPE_NOWHERE
,
146 static void rt_fibinfo_free(struct rtable __rcu
**rtp
)
148 struct rtable
*rt
= rcu_dereference_protected(*rtp
, 1);
153 /* Not even needed : RCU_INIT_POINTER(*rtp, NULL);
154 * because we waited an RCU grace period before calling
155 * free_fib_info_rcu()
158 dst_dev_put(&rt
->dst
);
159 dst_release_immediate(&rt
->dst
);
162 static void free_nh_exceptions(struct fib_nh_common
*nhc
)
164 struct fnhe_hash_bucket
*hash
;
167 hash
= rcu_dereference_protected(nhc
->nhc_exceptions
, 1);
170 for (i
= 0; i
< FNHE_HASH_SIZE
; i
++) {
171 struct fib_nh_exception
*fnhe
;
173 fnhe
= rcu_dereference_protected(hash
[i
].chain
, 1);
175 struct fib_nh_exception
*next
;
177 next
= rcu_dereference_protected(fnhe
->fnhe_next
, 1);
179 rt_fibinfo_free(&fnhe
->fnhe_rth_input
);
180 rt_fibinfo_free(&fnhe
->fnhe_rth_output
);
190 static void rt_fibinfo_free_cpus(struct rtable __rcu
* __percpu
*rtp
)
197 for_each_possible_cpu(cpu
) {
200 rt
= rcu_dereference_protected(*per_cpu_ptr(rtp
, cpu
), 1);
202 dst_dev_put(&rt
->dst
);
203 dst_release_immediate(&rt
->dst
);
209 void fib_nh_common_release(struct fib_nh_common
*nhc
)
212 dev_put(nhc
->nhc_dev
);
214 lwtstate_put(nhc
->nhc_lwtstate
);
215 rt_fibinfo_free_cpus(nhc
->nhc_pcpu_rth_output
);
216 rt_fibinfo_free(&nhc
->nhc_rth_input
);
217 free_nh_exceptions(nhc
);
219 EXPORT_SYMBOL_GPL(fib_nh_common_release
);
221 void fib_nh_release(struct net
*net
, struct fib_nh
*fib_nh
)
223 #ifdef CONFIG_IP_ROUTE_CLASSID
224 if (fib_nh
->nh_tclassid
)
225 net
->ipv4
.fib_num_tclassid_users
--;
227 fib_nh_common_release(&fib_nh
->nh_common
);
230 /* Release a nexthop info record */
231 static void free_fib_info_rcu(struct rcu_head
*head
)
233 struct fib_info
*fi
= container_of(head
, struct fib_info
, rcu
);
238 change_nexthops(fi
) {
239 fib_nh_release(fi
->fib_net
, nexthop_nh
);
240 } endfor_nexthops(fi
);
243 ip_fib_metrics_put(fi
->fib_metrics
);
248 void free_fib_info(struct fib_info
*fi
)
250 if (fi
->fib_dead
== 0) {
251 pr_warn("Freeing alive fib_info %p\n", fi
);
256 call_rcu(&fi
->rcu
, free_fib_info_rcu
);
258 EXPORT_SYMBOL_GPL(free_fib_info
);
260 void fib_release_info(struct fib_info
*fi
)
262 spin_lock_bh(&fib_info_lock
);
263 if (fi
&& --fi
->fib_treeref
== 0) {
264 hlist_del(&fi
->fib_hash
);
266 hlist_del(&fi
->fib_lhash
);
268 list_del(&fi
->nh_list
);
270 change_nexthops(fi
) {
271 if (!nexthop_nh
->fib_nh_dev
)
273 hlist_del(&nexthop_nh
->nh_hash
);
274 } endfor_nexthops(fi
)
279 spin_unlock_bh(&fib_info_lock
);
282 static inline int nh_comp(struct fib_info
*fi
, struct fib_info
*ofi
)
284 const struct fib_nh
*onh
;
286 if (fi
->nh
|| ofi
->nh
)
287 return nexthop_cmp(fi
->nh
, ofi
->nh
) ? 0 : -1;
289 if (ofi
->fib_nhs
== 0)
293 onh
= fib_info_nh(ofi
, nhsel
);
295 if (nh
->fib_nh_oif
!= onh
->fib_nh_oif
||
296 nh
->fib_nh_gw_family
!= onh
->fib_nh_gw_family
||
297 nh
->fib_nh_scope
!= onh
->fib_nh_scope
||
298 #ifdef CONFIG_IP_ROUTE_MULTIPATH
299 nh
->fib_nh_weight
!= onh
->fib_nh_weight
||
301 #ifdef CONFIG_IP_ROUTE_CLASSID
302 nh
->nh_tclassid
!= onh
->nh_tclassid
||
304 lwtunnel_cmp_encap(nh
->fib_nh_lws
, onh
->fib_nh_lws
) ||
305 ((nh
->fib_nh_flags
^ onh
->fib_nh_flags
) & ~RTNH_COMPARE_MASK
))
308 if (nh
->fib_nh_gw_family
== AF_INET
&&
309 nh
->fib_nh_gw4
!= onh
->fib_nh_gw4
)
312 if (nh
->fib_nh_gw_family
== AF_INET6
&&
313 ipv6_addr_cmp(&nh
->fib_nh_gw6
, &onh
->fib_nh_gw6
))
315 } endfor_nexthops(fi
);
319 static inline unsigned int fib_devindex_hashfn(unsigned int val
)
321 unsigned int mask
= DEVINDEX_HASHSIZE
- 1;
324 (val
>> DEVINDEX_HASHBITS
) ^
325 (val
>> (DEVINDEX_HASHBITS
* 2))) & mask
;
328 static unsigned int fib_info_hashfn_1(int init_val
, u8 protocol
, u8 scope
,
329 u32 prefsrc
, u32 priority
)
331 unsigned int val
= init_val
;
333 val
^= (protocol
<< 8) | scope
;
340 static unsigned int fib_info_hashfn_result(unsigned int val
)
342 unsigned int mask
= (fib_info_hash_size
- 1);
344 return (val
^ (val
>> 7) ^ (val
>> 12)) & mask
;
347 static inline unsigned int fib_info_hashfn(struct fib_info
*fi
)
351 val
= fib_info_hashfn_1(fi
->fib_nhs
, fi
->fib_protocol
,
352 fi
->fib_scope
, (__force u32
)fi
->fib_prefsrc
,
356 val
^= fib_devindex_hashfn(fi
->nh
->id
);
359 val
^= fib_devindex_hashfn(nh
->fib_nh_oif
);
360 } endfor_nexthops(fi
)
363 return fib_info_hashfn_result(val
);
366 /* no metrics, only nexthop id */
367 static struct fib_info
*fib_find_info_nh(struct net
*net
,
368 const struct fib_config
*cfg
)
370 struct hlist_head
*head
;
374 hash
= fib_info_hashfn_1(fib_devindex_hashfn(cfg
->fc_nh_id
),
375 cfg
->fc_protocol
, cfg
->fc_scope
,
376 (__force u32
)cfg
->fc_prefsrc
,
378 hash
= fib_info_hashfn_result(hash
);
379 head
= &fib_info_hash
[hash
];
381 hlist_for_each_entry(fi
, head
, fib_hash
) {
382 if (!net_eq(fi
->fib_net
, net
))
384 if (!fi
->nh
|| fi
->nh
->id
!= cfg
->fc_nh_id
)
386 if (cfg
->fc_protocol
== fi
->fib_protocol
&&
387 cfg
->fc_scope
== fi
->fib_scope
&&
388 cfg
->fc_prefsrc
== fi
->fib_prefsrc
&&
389 cfg
->fc_priority
== fi
->fib_priority
&&
390 cfg
->fc_type
== fi
->fib_type
&&
391 cfg
->fc_table
== fi
->fib_tb_id
&&
392 !((cfg
->fc_flags
^ fi
->fib_flags
) & ~RTNH_COMPARE_MASK
))
399 static struct fib_info
*fib_find_info(struct fib_info
*nfi
)
401 struct hlist_head
*head
;
405 hash
= fib_info_hashfn(nfi
);
406 head
= &fib_info_hash
[hash
];
408 hlist_for_each_entry(fi
, head
, fib_hash
) {
409 if (!net_eq(fi
->fib_net
, nfi
->fib_net
))
411 if (fi
->fib_nhs
!= nfi
->fib_nhs
)
413 if (nfi
->fib_protocol
== fi
->fib_protocol
&&
414 nfi
->fib_scope
== fi
->fib_scope
&&
415 nfi
->fib_prefsrc
== fi
->fib_prefsrc
&&
416 nfi
->fib_priority
== fi
->fib_priority
&&
417 nfi
->fib_type
== fi
->fib_type
&&
418 memcmp(nfi
->fib_metrics
, fi
->fib_metrics
,
419 sizeof(u32
) * RTAX_MAX
) == 0 &&
420 !((nfi
->fib_flags
^ fi
->fib_flags
) & ~RTNH_COMPARE_MASK
) &&
421 nh_comp(fi
, nfi
) == 0)
428 /* Check, that the gateway is already configured.
429 * Used only by redirect accept routine.
431 int ip_fib_check_default(__be32 gw
, struct net_device
*dev
)
433 struct hlist_head
*head
;
437 spin_lock(&fib_info_lock
);
439 hash
= fib_devindex_hashfn(dev
->ifindex
);
440 head
= &fib_info_devhash
[hash
];
441 hlist_for_each_entry(nh
, head
, nh_hash
) {
442 if (nh
->fib_nh_dev
== dev
&&
443 nh
->fib_nh_gw4
== gw
&&
444 !(nh
->fib_nh_flags
& RTNH_F_DEAD
)) {
445 spin_unlock(&fib_info_lock
);
450 spin_unlock(&fib_info_lock
);
455 static inline size_t fib_nlmsg_size(struct fib_info
*fi
)
457 size_t payload
= NLMSG_ALIGN(sizeof(struct rtmsg
))
458 + nla_total_size(4) /* RTA_TABLE */
459 + nla_total_size(4) /* RTA_DST */
460 + nla_total_size(4) /* RTA_PRIORITY */
461 + nla_total_size(4) /* RTA_PREFSRC */
462 + nla_total_size(TCP_CA_NAME_MAX
); /* RTAX_CC_ALGO */
463 unsigned int nhs
= fib_info_num_path(fi
);
465 /* space for nested metrics */
466 payload
+= nla_total_size((RTAX_MAX
* nla_total_size(4)));
469 payload
+= nla_total_size(4); /* RTA_NH_ID */
472 size_t nh_encapsize
= 0;
473 /* Also handles the special case nhs == 1 */
475 /* each nexthop is packed in an attribute */
476 size_t nhsize
= nla_total_size(sizeof(struct rtnexthop
));
479 /* may contain flow and gateway attribute */
480 nhsize
+= 2 * nla_total_size(4);
482 /* grab encap info */
483 for (i
= 0; i
< fib_info_num_path(fi
); i
++) {
484 struct fib_nh_common
*nhc
= fib_info_nhc(fi
, i
);
486 if (nhc
->nhc_lwtstate
) {
488 nh_encapsize
+= lwtunnel_get_encap_size(
491 nh_encapsize
+= nla_total_size(2);
495 /* all nexthops are packed in a nested attribute */
496 payload
+= nla_total_size((nhs
* nhsize
) + nh_encapsize
);
503 void rtmsg_fib(int event
, __be32 key
, struct fib_alias
*fa
,
504 int dst_len
, u32 tb_id
, const struct nl_info
*info
,
505 unsigned int nlm_flags
)
507 struct fib_rt_info fri
;
509 u32 seq
= info
->nlh
? info
->nlh
->nlmsg_seq
: 0;
512 skb
= nlmsg_new(fib_nlmsg_size(fa
->fa_info
), GFP_KERNEL
);
516 fri
.fi
= fa
->fa_info
;
519 fri
.dst_len
= dst_len
;
520 fri
.tos
= fa
->fa_tos
;
521 fri
.type
= fa
->fa_type
;
522 fri
.offload
= fa
->offload
;
524 err
= fib_dump_info(skb
, info
->portid
, seq
, event
, &fri
, nlm_flags
);
526 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
527 WARN_ON(err
== -EMSGSIZE
);
531 rtnl_notify(skb
, info
->nl_net
, info
->portid
, RTNLGRP_IPV4_ROUTE
,
532 info
->nlh
, GFP_KERNEL
);
536 rtnl_set_sk_err(info
->nl_net
, RTNLGRP_IPV4_ROUTE
, err
);
539 static int fib_detect_death(struct fib_info
*fi
, int order
,
540 struct fib_info
**last_resort
, int *last_idx
,
543 const struct fib_nh_common
*nhc
= fib_info_nhc(fi
, 0);
545 int state
= NUD_NONE
;
547 if (likely(nhc
->nhc_gw_family
== AF_INET
))
548 n
= neigh_lookup(&arp_tbl
, &nhc
->nhc_gw
.ipv4
, nhc
->nhc_dev
);
549 else if (nhc
->nhc_gw_family
== AF_INET6
)
550 n
= neigh_lookup(ipv6_stub
->nd_tbl
, &nhc
->nhc_gw
.ipv6
,
556 state
= n
->nud_state
;
561 if (state
== NUD_REACHABLE
)
563 if ((state
& NUD_VALID
) && order
!= dflt
)
565 if ((state
& NUD_VALID
) ||
566 (*last_idx
< 0 && order
> dflt
&& state
!= NUD_INCOMPLETE
)) {
573 int fib_nh_common_init(struct net
*net
, struct fib_nh_common
*nhc
,
574 struct nlattr
*encap
, u16 encap_type
,
575 void *cfg
, gfp_t gfp_flags
,
576 struct netlink_ext_ack
*extack
)
580 nhc
->nhc_pcpu_rth_output
= alloc_percpu_gfp(struct rtable __rcu
*,
582 if (!nhc
->nhc_pcpu_rth_output
)
586 struct lwtunnel_state
*lwtstate
;
588 if (encap_type
== LWTUNNEL_ENCAP_NONE
) {
589 NL_SET_ERR_MSG(extack
, "LWT encap type not specified");
593 err
= lwtunnel_build_state(net
, encap_type
, encap
,
594 nhc
->nhc_family
, cfg
, &lwtstate
,
599 nhc
->nhc_lwtstate
= lwtstate_get(lwtstate
);
605 rt_fibinfo_free_cpus(nhc
->nhc_pcpu_rth_output
);
606 nhc
->nhc_pcpu_rth_output
= NULL
;
609 EXPORT_SYMBOL_GPL(fib_nh_common_init
);
611 int fib_nh_init(struct net
*net
, struct fib_nh
*nh
,
612 struct fib_config
*cfg
, int nh_weight
,
613 struct netlink_ext_ack
*extack
)
617 nh
->fib_nh_family
= AF_INET
;
619 err
= fib_nh_common_init(net
, &nh
->nh_common
, cfg
->fc_encap
,
620 cfg
->fc_encap_type
, cfg
, GFP_KERNEL
, extack
);
624 nh
->fib_nh_oif
= cfg
->fc_oif
;
625 nh
->fib_nh_gw_family
= cfg
->fc_gw_family
;
626 if (cfg
->fc_gw_family
== AF_INET
)
627 nh
->fib_nh_gw4
= cfg
->fc_gw4
;
628 else if (cfg
->fc_gw_family
== AF_INET6
)
629 nh
->fib_nh_gw6
= cfg
->fc_gw6
;
631 nh
->fib_nh_flags
= cfg
->fc_flags
;
633 #ifdef CONFIG_IP_ROUTE_CLASSID
634 nh
->nh_tclassid
= cfg
->fc_flow
;
636 net
->ipv4
.fib_num_tclassid_users
++;
638 #ifdef CONFIG_IP_ROUTE_MULTIPATH
639 nh
->fib_nh_weight
= nh_weight
;
644 #ifdef CONFIG_IP_ROUTE_MULTIPATH
646 static int fib_count_nexthops(struct rtnexthop
*rtnh
, int remaining
,
647 struct netlink_ext_ack
*extack
)
651 while (rtnh_ok(rtnh
, remaining
)) {
653 rtnh
= rtnh_next(rtnh
, &remaining
);
656 /* leftover implies invalid nexthop configuration, discard it */
658 NL_SET_ERR_MSG(extack
,
659 "Invalid nexthop configuration - extra data after nexthops");
666 /* only called when fib_nh is integrated into fib_info */
667 static int fib_get_nhs(struct fib_info
*fi
, struct rtnexthop
*rtnh
,
668 int remaining
, struct fib_config
*cfg
,
669 struct netlink_ext_ack
*extack
)
671 struct net
*net
= fi
->fib_net
;
672 struct fib_config fib_cfg
;
676 change_nexthops(fi
) {
679 memset(&fib_cfg
, 0, sizeof(fib_cfg
));
681 if (!rtnh_ok(rtnh
, remaining
)) {
682 NL_SET_ERR_MSG(extack
,
683 "Invalid nexthop configuration - extra data after nexthop");
687 if (rtnh
->rtnh_flags
& (RTNH_F_DEAD
| RTNH_F_LINKDOWN
)) {
688 NL_SET_ERR_MSG(extack
,
689 "Invalid flags for nexthop - can not contain DEAD or LINKDOWN");
693 fib_cfg
.fc_flags
= (cfg
->fc_flags
& ~0xFF) | rtnh
->rtnh_flags
;
694 fib_cfg
.fc_oif
= rtnh
->rtnh_ifindex
;
696 attrlen
= rtnh_attrlen(rtnh
);
698 struct nlattr
*nla
, *nlav
, *attrs
= rtnh_attrs(rtnh
);
700 nla
= nla_find(attrs
, attrlen
, RTA_GATEWAY
);
701 nlav
= nla_find(attrs
, attrlen
, RTA_VIA
);
703 NL_SET_ERR_MSG(extack
,
704 "Nexthop configuration can not contain both GATEWAY and VIA");
708 fib_cfg
.fc_gw4
= nla_get_in_addr(nla
);
710 fib_cfg
.fc_gw_family
= AF_INET
;
712 ret
= fib_gw_from_via(&fib_cfg
, nlav
, extack
);
717 nla
= nla_find(attrs
, attrlen
, RTA_FLOW
);
719 fib_cfg
.fc_flow
= nla_get_u32(nla
);
721 fib_cfg
.fc_encap
= nla_find(attrs
, attrlen
, RTA_ENCAP
);
722 nla
= nla_find(attrs
, attrlen
, RTA_ENCAP_TYPE
);
724 fib_cfg
.fc_encap_type
= nla_get_u16(nla
);
727 ret
= fib_nh_init(net
, nexthop_nh
, &fib_cfg
,
728 rtnh
->rtnh_hops
+ 1, extack
);
732 rtnh
= rtnh_next(rtnh
, &remaining
);
733 } endfor_nexthops(fi
);
736 nh
= fib_info_nh(fi
, 0);
737 if (cfg
->fc_oif
&& nh
->fib_nh_oif
!= cfg
->fc_oif
) {
738 NL_SET_ERR_MSG(extack
,
739 "Nexthop device index does not match RTA_OIF");
742 if (cfg
->fc_gw_family
) {
743 if (cfg
->fc_gw_family
!= nh
->fib_nh_gw_family
||
744 (cfg
->fc_gw_family
== AF_INET
&&
745 nh
->fib_nh_gw4
!= cfg
->fc_gw4
) ||
746 (cfg
->fc_gw_family
== AF_INET6
&&
747 ipv6_addr_cmp(&nh
->fib_nh_gw6
, &cfg
->fc_gw6
))) {
748 NL_SET_ERR_MSG(extack
,
749 "Nexthop gateway does not match RTA_GATEWAY or RTA_VIA");
753 #ifdef CONFIG_IP_ROUTE_CLASSID
754 if (cfg
->fc_flow
&& nh
->nh_tclassid
!= cfg
->fc_flow
) {
755 NL_SET_ERR_MSG(extack
,
756 "Nexthop class id does not match RTA_FLOW");
765 /* only called when fib_nh is integrated into fib_info */
766 static void fib_rebalance(struct fib_info
*fi
)
771 if (fib_info_num_path(fi
) < 2)
776 if (nh
->fib_nh_flags
& RTNH_F_DEAD
)
779 if (ip_ignore_linkdown(nh
->fib_nh_dev
) &&
780 nh
->fib_nh_flags
& RTNH_F_LINKDOWN
)
783 total
+= nh
->fib_nh_weight
;
784 } endfor_nexthops(fi
);
787 change_nexthops(fi
) {
790 if (nexthop_nh
->fib_nh_flags
& RTNH_F_DEAD
) {
792 } else if (ip_ignore_linkdown(nexthop_nh
->fib_nh_dev
) &&
793 nexthop_nh
->fib_nh_flags
& RTNH_F_LINKDOWN
) {
796 w
+= nexthop_nh
->fib_nh_weight
;
797 upper_bound
= DIV_ROUND_CLOSEST_ULL((u64
)w
<< 31,
801 atomic_set(&nexthop_nh
->fib_nh_upper_bound
, upper_bound
);
802 } endfor_nexthops(fi
);
804 #else /* CONFIG_IP_ROUTE_MULTIPATH */
806 static int fib_get_nhs(struct fib_info
*fi
, struct rtnexthop
*rtnh
,
807 int remaining
, struct fib_config
*cfg
,
808 struct netlink_ext_ack
*extack
)
810 NL_SET_ERR_MSG(extack
, "Multipath support not enabled in kernel");
815 #define fib_rebalance(fi) do { } while (0)
817 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
819 static int fib_encap_match(struct net
*net
, u16 encap_type
,
820 struct nlattr
*encap
,
821 const struct fib_nh
*nh
,
822 const struct fib_config
*cfg
,
823 struct netlink_ext_ack
*extack
)
825 struct lwtunnel_state
*lwtstate
;
828 if (encap_type
== LWTUNNEL_ENCAP_NONE
)
831 ret
= lwtunnel_build_state(net
, encap_type
, encap
, AF_INET
,
832 cfg
, &lwtstate
, extack
);
834 result
= lwtunnel_cmp_encap(lwtstate
, nh
->fib_nh_lws
);
835 lwtstate_free(lwtstate
);
841 int fib_nh_match(struct net
*net
, struct fib_config
*cfg
, struct fib_info
*fi
,
842 struct netlink_ext_ack
*extack
)
844 #ifdef CONFIG_IP_ROUTE_MULTIPATH
845 struct rtnexthop
*rtnh
;
849 if (cfg
->fc_priority
&& cfg
->fc_priority
!= fi
->fib_priority
)
853 if (fi
->nh
&& cfg
->fc_nh_id
== fi
->nh
->id
)
858 if (cfg
->fc_oif
|| cfg
->fc_gw_family
) {
859 struct fib_nh
*nh
= fib_info_nh(fi
, 0);
862 if (fib_encap_match(net
, cfg
->fc_encap_type
,
863 cfg
->fc_encap
, nh
, cfg
, extack
))
866 #ifdef CONFIG_IP_ROUTE_CLASSID
868 cfg
->fc_flow
!= nh
->nh_tclassid
)
871 if ((cfg
->fc_oif
&& cfg
->fc_oif
!= nh
->fib_nh_oif
) ||
872 (cfg
->fc_gw_family
&&
873 cfg
->fc_gw_family
!= nh
->fib_nh_gw_family
))
876 if (cfg
->fc_gw_family
== AF_INET
&&
877 cfg
->fc_gw4
!= nh
->fib_nh_gw4
)
880 if (cfg
->fc_gw_family
== AF_INET6
&&
881 ipv6_addr_cmp(&cfg
->fc_gw6
, &nh
->fib_nh_gw6
))
887 #ifdef CONFIG_IP_ROUTE_MULTIPATH
892 remaining
= cfg
->fc_mp_len
;
897 if (!rtnh_ok(rtnh
, remaining
))
900 if (rtnh
->rtnh_ifindex
&& rtnh
->rtnh_ifindex
!= nh
->fib_nh_oif
)
903 attrlen
= rtnh_attrlen(rtnh
);
905 struct nlattr
*nla
, *nlav
, *attrs
= rtnh_attrs(rtnh
);
907 nla
= nla_find(attrs
, attrlen
, RTA_GATEWAY
);
908 nlav
= nla_find(attrs
, attrlen
, RTA_VIA
);
910 NL_SET_ERR_MSG(extack
,
911 "Nexthop configuration can not contain both GATEWAY and VIA");
916 if (nh
->fib_nh_gw_family
!= AF_INET
||
917 nla_get_in_addr(nla
) != nh
->fib_nh_gw4
)
920 struct fib_config cfg2
;
923 err
= fib_gw_from_via(&cfg2
, nlav
, extack
);
927 switch (nh
->fib_nh_gw_family
) {
929 if (cfg2
.fc_gw_family
!= AF_INET
||
930 cfg2
.fc_gw4
!= nh
->fib_nh_gw4
)
934 if (cfg2
.fc_gw_family
!= AF_INET6
||
935 ipv6_addr_cmp(&cfg2
.fc_gw6
,
942 #ifdef CONFIG_IP_ROUTE_CLASSID
943 nla
= nla_find(attrs
, attrlen
, RTA_FLOW
);
944 if (nla
&& nla_get_u32(nla
) != nh
->nh_tclassid
)
949 rtnh
= rtnh_next(rtnh
, &remaining
);
950 } endfor_nexthops(fi
);
955 bool fib_metrics_match(struct fib_config
*cfg
, struct fib_info
*fi
)
963 nla_for_each_attr(nla
, cfg
->fc_mx
, cfg
->fc_mx_len
, remaining
) {
964 int type
= nla_type(nla
);
972 if (type
== RTAX_CC_ALGO
) {
973 char tmp
[TCP_CA_NAME_MAX
];
976 nla_strscpy(tmp
, nla
, sizeof(tmp
));
977 val
= tcp_ca_get_key_by_name(fi
->fib_net
, tmp
, &ecn_ca
);
979 if (nla_len(nla
) != sizeof(u32
))
981 val
= nla_get_u32(nla
);
984 fi_val
= fi
->fib_metrics
->metrics
[type
- 1];
985 if (type
== RTAX_FEATURES
)
986 fi_val
&= ~DST_FEATURE_ECN_CA
;
995 static int fib_check_nh_v6_gw(struct net
*net
, struct fib_nh
*nh
,
996 u32 table
, struct netlink_ext_ack
*extack
)
998 struct fib6_config cfg
= {
1000 .fc_flags
= nh
->fib_nh_flags
| RTF_GATEWAY
,
1001 .fc_ifindex
= nh
->fib_nh_oif
,
1002 .fc_gateway
= nh
->fib_nh_gw6
,
1004 struct fib6_nh fib6_nh
= {};
1007 err
= ipv6_stub
->fib6_nh_init(net
, &fib6_nh
, &cfg
, GFP_KERNEL
, extack
);
1009 nh
->fib_nh_dev
= fib6_nh
.fib_nh_dev
;
1010 dev_hold(nh
->fib_nh_dev
);
1011 nh
->fib_nh_oif
= nh
->fib_nh_dev
->ifindex
;
1012 nh
->fib_nh_scope
= RT_SCOPE_LINK
;
1014 ipv6_stub
->fib6_nh_release(&fib6_nh
);
1024 * Semantics of nexthop is very messy by historical reasons.
1025 * We have to take into account, that:
1026 * a) gateway can be actually local interface address,
1027 * so that gatewayed route is direct.
1028 * b) gateway must be on-link address, possibly
1029 * described not by an ifaddr, but also by a direct route.
1030 * c) If both gateway and interface are specified, they should not
1032 * d) If we use tunnel routes, gateway could be not on-link.
1034 * Attempt to reconcile all of these (alas, self-contradictory) conditions
1035 * results in pretty ugly and hairy code with obscure logic.
1037 * I chose to generalized it instead, so that the size
1038 * of code does not increase practically, but it becomes
1039 * much more general.
1040 * Every prefix is assigned a "scope" value: "host" is local address,
1041 * "link" is direct route,
1042 * [ ... "site" ... "interior" ... ]
1043 * and "universe" is true gateway route with global meaning.
1045 * Every prefix refers to a set of "nexthop"s (gw, oif),
1046 * where gw must have narrower scope. This recursion stops
1047 * when gw has LOCAL scope or if "nexthop" is declared ONLINK,
1048 * which means that gw is forced to be on link.
1050 * Code is still hairy, but now it is apparently logically
1051 * consistent and very flexible. F.e. as by-product it allows
1052 * to co-exists in peace independent exterior and interior
1053 * routing processes.
1055 * Normally it looks as following.
1057 * {universe prefix} -> (gw, oif) [scope link]
1059 * |-> {link prefix} -> (gw, oif) [scope local]
1061 * |-> {local prefix} (terminal node)
1063 static int fib_check_nh_v4_gw(struct net
*net
, struct fib_nh
*nh
, u32 table
,
1064 u8 scope
, struct netlink_ext_ack
*extack
)
1066 struct net_device
*dev
;
1067 struct fib_result res
;
1070 if (nh
->fib_nh_flags
& RTNH_F_ONLINK
) {
1071 unsigned int addr_type
;
1073 if (scope
>= RT_SCOPE_LINK
) {
1074 NL_SET_ERR_MSG(extack
, "Nexthop has invalid scope");
1077 dev
= __dev_get_by_index(net
, nh
->fib_nh_oif
);
1079 NL_SET_ERR_MSG(extack
, "Nexthop device required for onlink");
1082 if (!(dev
->flags
& IFF_UP
)) {
1083 NL_SET_ERR_MSG(extack
, "Nexthop device is not up");
1086 addr_type
= inet_addr_type_dev_table(net
, dev
, nh
->fib_nh_gw4
);
1087 if (addr_type
!= RTN_UNICAST
) {
1088 NL_SET_ERR_MSG(extack
, "Nexthop has invalid gateway");
1091 if (!netif_carrier_ok(dev
))
1092 nh
->fib_nh_flags
|= RTNH_F_LINKDOWN
;
1093 nh
->fib_nh_dev
= dev
;
1095 nh
->fib_nh_scope
= RT_SCOPE_LINK
;
1100 struct fib_table
*tbl
= NULL
;
1101 struct flowi4 fl4
= {
1102 .daddr
= nh
->fib_nh_gw4
,
1103 .flowi4_scope
= scope
+ 1,
1104 .flowi4_oif
= nh
->fib_nh_oif
,
1105 .flowi4_iif
= LOOPBACK_IFINDEX
,
1108 /* It is not necessary, but requires a bit of thinking */
1109 if (fl4
.flowi4_scope
< RT_SCOPE_LINK
)
1110 fl4
.flowi4_scope
= RT_SCOPE_LINK
;
1112 if (table
&& table
!= RT_TABLE_MAIN
)
1113 tbl
= fib_get_table(net
, table
);
1116 err
= fib_table_lookup(tbl
, &fl4
, &res
,
1117 FIB_LOOKUP_IGNORE_LINKSTATE
|
1120 /* on error or if no table given do full lookup. This
1121 * is needed for example when nexthops are in the local
1122 * table rather than the given table
1125 err
= fib_lookup(net
, &fl4
, &res
,
1126 FIB_LOOKUP_IGNORE_LINKSTATE
);
1130 NL_SET_ERR_MSG(extack
, "Nexthop has invalid gateway");
1136 if (res
.type
!= RTN_UNICAST
&& res
.type
!= RTN_LOCAL
) {
1137 NL_SET_ERR_MSG(extack
, "Nexthop has invalid gateway");
1140 nh
->fib_nh_scope
= res
.scope
;
1141 nh
->fib_nh_oif
= FIB_RES_OIF(res
);
1142 nh
->fib_nh_dev
= dev
= FIB_RES_DEV(res
);
1144 NL_SET_ERR_MSG(extack
,
1145 "No egress device for nexthop gateway");
1149 if (!netif_carrier_ok(dev
))
1150 nh
->fib_nh_flags
|= RTNH_F_LINKDOWN
;
1151 err
= (dev
->flags
& IFF_UP
) ? 0 : -ENETDOWN
;
1157 static int fib_check_nh_nongw(struct net
*net
, struct fib_nh
*nh
,
1158 struct netlink_ext_ack
*extack
)
1160 struct in_device
*in_dev
;
1163 if (nh
->fib_nh_flags
& (RTNH_F_PERVASIVE
| RTNH_F_ONLINK
)) {
1164 NL_SET_ERR_MSG(extack
,
1165 "Invalid flags for nexthop - PERVASIVE and ONLINK can not be set");
1172 in_dev
= inetdev_by_index(net
, nh
->fib_nh_oif
);
1176 if (!(in_dev
->dev
->flags
& IFF_UP
)) {
1177 NL_SET_ERR_MSG(extack
, "Device for nexthop is not up");
1181 nh
->fib_nh_dev
= in_dev
->dev
;
1182 dev_hold(nh
->fib_nh_dev
);
1183 nh
->fib_nh_scope
= RT_SCOPE_HOST
;
1184 if (!netif_carrier_ok(nh
->fib_nh_dev
))
1185 nh
->fib_nh_flags
|= RTNH_F_LINKDOWN
;
1192 int fib_check_nh(struct net
*net
, struct fib_nh
*nh
, u32 table
, u8 scope
,
1193 struct netlink_ext_ack
*extack
)
1197 if (nh
->fib_nh_gw_family
== AF_INET
)
1198 err
= fib_check_nh_v4_gw(net
, nh
, table
, scope
, extack
);
1199 else if (nh
->fib_nh_gw_family
== AF_INET6
)
1200 err
= fib_check_nh_v6_gw(net
, nh
, table
, extack
);
1202 err
= fib_check_nh_nongw(net
, nh
, extack
);
1207 static inline unsigned int fib_laddr_hashfn(__be32 val
)
1209 unsigned int mask
= (fib_info_hash_size
- 1);
1211 return ((__force u32
)val
^
1212 ((__force u32
)val
>> 7) ^
1213 ((__force u32
)val
>> 14)) & mask
;
1216 static struct hlist_head
*fib_info_hash_alloc(int bytes
)
1218 if (bytes
<= PAGE_SIZE
)
1219 return kzalloc(bytes
, GFP_KERNEL
);
1221 return (struct hlist_head
*)
1222 __get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
1226 static void fib_info_hash_free(struct hlist_head
*hash
, int bytes
)
1231 if (bytes
<= PAGE_SIZE
)
1234 free_pages((unsigned long) hash
, get_order(bytes
));
1237 static void fib_info_hash_move(struct hlist_head
*new_info_hash
,
1238 struct hlist_head
*new_laddrhash
,
1239 unsigned int new_size
)
1241 struct hlist_head
*old_info_hash
, *old_laddrhash
;
1242 unsigned int old_size
= fib_info_hash_size
;
1243 unsigned int i
, bytes
;
1245 spin_lock_bh(&fib_info_lock
);
1246 old_info_hash
= fib_info_hash
;
1247 old_laddrhash
= fib_info_laddrhash
;
1248 fib_info_hash_size
= new_size
;
1250 for (i
= 0; i
< old_size
; i
++) {
1251 struct hlist_head
*head
= &fib_info_hash
[i
];
1252 struct hlist_node
*n
;
1253 struct fib_info
*fi
;
1255 hlist_for_each_entry_safe(fi
, n
, head
, fib_hash
) {
1256 struct hlist_head
*dest
;
1257 unsigned int new_hash
;
1259 new_hash
= fib_info_hashfn(fi
);
1260 dest
= &new_info_hash
[new_hash
];
1261 hlist_add_head(&fi
->fib_hash
, dest
);
1264 fib_info_hash
= new_info_hash
;
1266 for (i
= 0; i
< old_size
; i
++) {
1267 struct hlist_head
*lhead
= &fib_info_laddrhash
[i
];
1268 struct hlist_node
*n
;
1269 struct fib_info
*fi
;
1271 hlist_for_each_entry_safe(fi
, n
, lhead
, fib_lhash
) {
1272 struct hlist_head
*ldest
;
1273 unsigned int new_hash
;
1275 new_hash
= fib_laddr_hashfn(fi
->fib_prefsrc
);
1276 ldest
= &new_laddrhash
[new_hash
];
1277 hlist_add_head(&fi
->fib_lhash
, ldest
);
1280 fib_info_laddrhash
= new_laddrhash
;
1282 spin_unlock_bh(&fib_info_lock
);
1284 bytes
= old_size
* sizeof(struct hlist_head
*);
1285 fib_info_hash_free(old_info_hash
, bytes
);
1286 fib_info_hash_free(old_laddrhash
, bytes
);
1289 __be32
fib_info_update_nhc_saddr(struct net
*net
, struct fib_nh_common
*nhc
,
1290 unsigned char scope
)
1294 if (nhc
->nhc_family
!= AF_INET
)
1295 return inet_select_addr(nhc
->nhc_dev
, 0, scope
);
1297 nh
= container_of(nhc
, struct fib_nh
, nh_common
);
1298 nh
->nh_saddr
= inet_select_addr(nh
->fib_nh_dev
, nh
->fib_nh_gw4
, scope
);
1299 nh
->nh_saddr_genid
= atomic_read(&net
->ipv4
.dev_addr_genid
);
1301 return nh
->nh_saddr
;
1304 __be32
fib_result_prefsrc(struct net
*net
, struct fib_result
*res
)
1306 struct fib_nh_common
*nhc
= res
->nhc
;
1308 if (res
->fi
->fib_prefsrc
)
1309 return res
->fi
->fib_prefsrc
;
1311 if (nhc
->nhc_family
== AF_INET
) {
1314 nh
= container_of(nhc
, struct fib_nh
, nh_common
);
1315 if (nh
->nh_saddr_genid
== atomic_read(&net
->ipv4
.dev_addr_genid
))
1316 return nh
->nh_saddr
;
1319 return fib_info_update_nhc_saddr(net
, nhc
, res
->fi
->fib_scope
);
1322 static bool fib_valid_prefsrc(struct fib_config
*cfg
, __be32 fib_prefsrc
)
1324 if (cfg
->fc_type
!= RTN_LOCAL
|| !cfg
->fc_dst
||
1325 fib_prefsrc
!= cfg
->fc_dst
) {
1326 u32 tb_id
= cfg
->fc_table
;
1329 if (tb_id
== RT_TABLE_MAIN
)
1330 tb_id
= RT_TABLE_LOCAL
;
1332 rc
= inet_addr_type_table(cfg
->fc_nlinfo
.nl_net
,
1333 fib_prefsrc
, tb_id
);
1335 if (rc
!= RTN_LOCAL
&& tb_id
!= RT_TABLE_LOCAL
) {
1336 rc
= inet_addr_type_table(cfg
->fc_nlinfo
.nl_net
,
1337 fib_prefsrc
, RT_TABLE_LOCAL
);
1340 if (rc
!= RTN_LOCAL
)
1346 struct fib_info
*fib_create_info(struct fib_config
*cfg
,
1347 struct netlink_ext_ack
*extack
)
1350 struct fib_info
*fi
= NULL
;
1351 struct nexthop
*nh
= NULL
;
1352 struct fib_info
*ofi
;
1354 struct net
*net
= cfg
->fc_nlinfo
.nl_net
;
1356 if (cfg
->fc_type
> RTN_MAX
)
1359 /* Fast check to catch the most weird cases */
1360 if (fib_props
[cfg
->fc_type
].scope
> cfg
->fc_scope
) {
1361 NL_SET_ERR_MSG(extack
, "Invalid scope");
1365 if (cfg
->fc_flags
& (RTNH_F_DEAD
| RTNH_F_LINKDOWN
)) {
1366 NL_SET_ERR_MSG(extack
,
1367 "Invalid rtm_flags - can not contain DEAD or LINKDOWN");
1371 if (cfg
->fc_nh_id
) {
1373 fi
= fib_find_info_nh(net
, cfg
);
1380 nh
= nexthop_find_by_id(net
, cfg
->fc_nh_id
);
1382 NL_SET_ERR_MSG(extack
, "Nexthop id does not exist");
1388 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1390 nhs
= fib_count_nexthops(cfg
->fc_mp
, cfg
->fc_mp_len
, extack
);
1397 if (fib_info_cnt
>= fib_info_hash_size
) {
1398 unsigned int new_size
= fib_info_hash_size
<< 1;
1399 struct hlist_head
*new_info_hash
;
1400 struct hlist_head
*new_laddrhash
;
1405 bytes
= new_size
* sizeof(struct hlist_head
*);
1406 new_info_hash
= fib_info_hash_alloc(bytes
);
1407 new_laddrhash
= fib_info_hash_alloc(bytes
);
1408 if (!new_info_hash
|| !new_laddrhash
) {
1409 fib_info_hash_free(new_info_hash
, bytes
);
1410 fib_info_hash_free(new_laddrhash
, bytes
);
1412 fib_info_hash_move(new_info_hash
, new_laddrhash
, new_size
);
1414 if (!fib_info_hash_size
)
1418 fi
= kzalloc(struct_size(fi
, fib_nh
, nhs
), GFP_KERNEL
);
1421 fi
->fib_metrics
= ip_fib_metrics_init(fi
->fib_net
, cfg
->fc_mx
,
1422 cfg
->fc_mx_len
, extack
);
1423 if (IS_ERR(fi
->fib_metrics
)) {
1424 err
= PTR_ERR(fi
->fib_metrics
);
1426 return ERR_PTR(err
);
1431 fi
->fib_protocol
= cfg
->fc_protocol
;
1432 fi
->fib_scope
= cfg
->fc_scope
;
1433 fi
->fib_flags
= cfg
->fc_flags
;
1434 fi
->fib_priority
= cfg
->fc_priority
;
1435 fi
->fib_prefsrc
= cfg
->fc_prefsrc
;
1436 fi
->fib_type
= cfg
->fc_type
;
1437 fi
->fib_tb_id
= cfg
->fc_table
;
1441 if (!nexthop_get(nh
)) {
1442 NL_SET_ERR_MSG(extack
, "Nexthop has been deleted");
1449 change_nexthops(fi
) {
1450 nexthop_nh
->nh_parent
= fi
;
1451 } endfor_nexthops(fi
)
1454 err
= fib_get_nhs(fi
, cfg
->fc_mp
, cfg
->fc_mp_len
, cfg
,
1457 err
= fib_nh_init(net
, fi
->fib_nh
, cfg
, 1, extack
);
1463 if (fib_props
[cfg
->fc_type
].error
) {
1464 if (cfg
->fc_gw_family
|| cfg
->fc_oif
|| cfg
->fc_mp
) {
1465 NL_SET_ERR_MSG(extack
,
1466 "Gateway, device and multipath can not be specified for this route type");
1471 switch (cfg
->fc_type
) {
1479 NL_SET_ERR_MSG(extack
, "Invalid route type");
1484 if (cfg
->fc_scope
> RT_SCOPE_HOST
) {
1485 NL_SET_ERR_MSG(extack
, "Invalid scope");
1490 err
= fib_check_nexthop(fi
->nh
, cfg
->fc_scope
, extack
);
1493 } else if (cfg
->fc_scope
== RT_SCOPE_HOST
) {
1494 struct fib_nh
*nh
= fi
->fib_nh
;
1496 /* Local address is added. */
1498 NL_SET_ERR_MSG(extack
,
1499 "Route with host scope can not have multiple nexthops");
1502 if (nh
->fib_nh_gw_family
) {
1503 NL_SET_ERR_MSG(extack
,
1504 "Route with host scope can not have a gateway");
1507 nh
->fib_nh_scope
= RT_SCOPE_NOWHERE
;
1508 nh
->fib_nh_dev
= dev_get_by_index(net
, nh
->fib_nh_oif
);
1510 if (!nh
->fib_nh_dev
)
1515 change_nexthops(fi
) {
1516 err
= fib_check_nh(cfg
->fc_nlinfo
.nl_net
, nexthop_nh
,
1517 cfg
->fc_table
, cfg
->fc_scope
,
1521 if (nexthop_nh
->fib_nh_flags
& RTNH_F_LINKDOWN
)
1523 } endfor_nexthops(fi
)
1524 if (linkdown
== fi
->fib_nhs
)
1525 fi
->fib_flags
|= RTNH_F_LINKDOWN
;
1528 if (fi
->fib_prefsrc
&& !fib_valid_prefsrc(cfg
, fi
->fib_prefsrc
)) {
1529 NL_SET_ERR_MSG(extack
, "Invalid prefsrc address");
1534 change_nexthops(fi
) {
1535 fib_info_update_nhc_saddr(net
, &nexthop_nh
->nh_common
,
1537 if (nexthop_nh
->fib_nh_gw_family
== AF_INET6
)
1538 fi
->fib_nh_is_v6
= true;
1539 } endfor_nexthops(fi
)
1545 ofi
= fib_find_info(fi
);
1554 refcount_set(&fi
->fib_clntref
, 1);
1555 spin_lock_bh(&fib_info_lock
);
1556 hlist_add_head(&fi
->fib_hash
,
1557 &fib_info_hash
[fib_info_hashfn(fi
)]);
1558 if (fi
->fib_prefsrc
) {
1559 struct hlist_head
*head
;
1561 head
= &fib_info_laddrhash
[fib_laddr_hashfn(fi
->fib_prefsrc
)];
1562 hlist_add_head(&fi
->fib_lhash
, head
);
1565 list_add(&fi
->nh_list
, &nh
->fi_list
);
1567 change_nexthops(fi
) {
1568 struct hlist_head
*head
;
1571 if (!nexthop_nh
->fib_nh_dev
)
1573 hash
= fib_devindex_hashfn(nexthop_nh
->fib_nh_dev
->ifindex
);
1574 head
= &fib_info_devhash
[hash
];
1575 hlist_add_head(&nexthop_nh
->nh_hash
, head
);
1576 } endfor_nexthops(fi
)
1578 spin_unlock_bh(&fib_info_lock
);
1590 return ERR_PTR(err
);
1593 int fib_nexthop_info(struct sk_buff
*skb
, const struct fib_nh_common
*nhc
,
1594 u8 rt_family
, unsigned char *flags
, bool skip_oif
)
1596 if (nhc
->nhc_flags
& RTNH_F_DEAD
)
1597 *flags
|= RTNH_F_DEAD
;
1599 if (nhc
->nhc_flags
& RTNH_F_LINKDOWN
) {
1600 *flags
|= RTNH_F_LINKDOWN
;
1603 switch (nhc
->nhc_family
) {
1605 if (ip_ignore_linkdown(nhc
->nhc_dev
))
1606 *flags
|= RTNH_F_DEAD
;
1609 if (ip6_ignore_linkdown(nhc
->nhc_dev
))
1610 *flags
|= RTNH_F_DEAD
;
1616 switch (nhc
->nhc_gw_family
) {
1618 if (nla_put_in_addr(skb
, RTA_GATEWAY
, nhc
->nhc_gw
.ipv4
))
1619 goto nla_put_failure
;
1622 /* if gateway family does not match nexthop family
1623 * gateway is encoded as RTA_VIA
1625 if (rt_family
!= nhc
->nhc_gw_family
) {
1626 int alen
= sizeof(struct in6_addr
);
1630 nla
= nla_reserve(skb
, RTA_VIA
, alen
+ 2);
1632 goto nla_put_failure
;
1634 via
= nla_data(nla
);
1635 via
->rtvia_family
= AF_INET6
;
1636 memcpy(via
->rtvia_addr
, &nhc
->nhc_gw
.ipv6
, alen
);
1637 } else if (nla_put_in6_addr(skb
, RTA_GATEWAY
,
1638 &nhc
->nhc_gw
.ipv6
) < 0) {
1639 goto nla_put_failure
;
1644 *flags
|= (nhc
->nhc_flags
&
1645 (RTNH_F_ONLINK
| RTNH_F_OFFLOAD
| RTNH_F_TRAP
));
1647 if (!skip_oif
&& nhc
->nhc_dev
&&
1648 nla_put_u32(skb
, RTA_OIF
, nhc
->nhc_dev
->ifindex
))
1649 goto nla_put_failure
;
1651 if (nhc
->nhc_lwtstate
&&
1652 lwtunnel_fill_encap(skb
, nhc
->nhc_lwtstate
,
1653 RTA_ENCAP
, RTA_ENCAP_TYPE
) < 0)
1654 goto nla_put_failure
;
1661 EXPORT_SYMBOL_GPL(fib_nexthop_info
);
1663 #if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6)
1664 int fib_add_nexthop(struct sk_buff
*skb
, const struct fib_nh_common
*nhc
,
1665 int nh_weight
, u8 rt_family
)
1667 const struct net_device
*dev
= nhc
->nhc_dev
;
1668 struct rtnexthop
*rtnh
;
1669 unsigned char flags
= 0;
1671 rtnh
= nla_reserve_nohdr(skb
, sizeof(*rtnh
));
1673 goto nla_put_failure
;
1675 rtnh
->rtnh_hops
= nh_weight
- 1;
1676 rtnh
->rtnh_ifindex
= dev
? dev
->ifindex
: 0;
1678 if (fib_nexthop_info(skb
, nhc
, rt_family
, &flags
, true) < 0)
1679 goto nla_put_failure
;
1681 rtnh
->rtnh_flags
= flags
;
1683 /* length of rtnetlink header + attributes */
1684 rtnh
->rtnh_len
= nlmsg_get_pos(skb
) - (void *)rtnh
;
1691 EXPORT_SYMBOL_GPL(fib_add_nexthop
);
1694 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1695 static int fib_add_multipath(struct sk_buff
*skb
, struct fib_info
*fi
)
1699 mp
= nla_nest_start_noflag(skb
, RTA_MULTIPATH
);
1701 goto nla_put_failure
;
1703 if (unlikely(fi
->nh
)) {
1704 if (nexthop_mpath_fill_node(skb
, fi
->nh
, AF_INET
) < 0)
1705 goto nla_put_failure
;
1710 if (fib_add_nexthop(skb
, &nh
->nh_common
, nh
->fib_nh_weight
,
1712 goto nla_put_failure
;
1713 #ifdef CONFIG_IP_ROUTE_CLASSID
1714 if (nh
->nh_tclassid
&&
1715 nla_put_u32(skb
, RTA_FLOW
, nh
->nh_tclassid
))
1716 goto nla_put_failure
;
1718 } endfor_nexthops(fi
);
1721 nla_nest_end(skb
, mp
);
1729 static int fib_add_multipath(struct sk_buff
*skb
, struct fib_info
*fi
)
1735 int fib_dump_info(struct sk_buff
*skb
, u32 portid
, u32 seq
, int event
,
1736 struct fib_rt_info
*fri
, unsigned int flags
)
1738 unsigned int nhs
= fib_info_num_path(fri
->fi
);
1739 struct fib_info
*fi
= fri
->fi
;
1740 u32 tb_id
= fri
->tb_id
;
1741 struct nlmsghdr
*nlh
;
1744 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*rtm
), flags
);
1748 rtm
= nlmsg_data(nlh
);
1749 rtm
->rtm_family
= AF_INET
;
1750 rtm
->rtm_dst_len
= fri
->dst_len
;
1751 rtm
->rtm_src_len
= 0;
1752 rtm
->rtm_tos
= fri
->tos
;
1754 rtm
->rtm_table
= tb_id
;
1756 rtm
->rtm_table
= RT_TABLE_COMPAT
;
1757 if (nla_put_u32(skb
, RTA_TABLE
, tb_id
))
1758 goto nla_put_failure
;
1759 rtm
->rtm_type
= fri
->type
;
1760 rtm
->rtm_flags
= fi
->fib_flags
;
1761 rtm
->rtm_scope
= fi
->fib_scope
;
1762 rtm
->rtm_protocol
= fi
->fib_protocol
;
1764 if (rtm
->rtm_dst_len
&&
1765 nla_put_in_addr(skb
, RTA_DST
, fri
->dst
))
1766 goto nla_put_failure
;
1767 if (fi
->fib_priority
&&
1768 nla_put_u32(skb
, RTA_PRIORITY
, fi
->fib_priority
))
1769 goto nla_put_failure
;
1770 if (rtnetlink_put_metrics(skb
, fi
->fib_metrics
->metrics
) < 0)
1771 goto nla_put_failure
;
1773 if (fi
->fib_prefsrc
&&
1774 nla_put_in_addr(skb
, RTA_PREFSRC
, fi
->fib_prefsrc
))
1775 goto nla_put_failure
;
1778 if (nla_put_u32(skb
, RTA_NH_ID
, fi
->nh
->id
))
1779 goto nla_put_failure
;
1780 if (nexthop_is_blackhole(fi
->nh
))
1781 rtm
->rtm_type
= RTN_BLACKHOLE
;
1782 if (!fi
->fib_net
->ipv4
.sysctl_nexthop_compat_mode
)
1787 const struct fib_nh_common
*nhc
= fib_info_nhc(fi
, 0);
1788 unsigned char flags
= 0;
1790 if (fib_nexthop_info(skb
, nhc
, AF_INET
, &flags
, false) < 0)
1791 goto nla_put_failure
;
1793 rtm
->rtm_flags
= flags
;
1794 #ifdef CONFIG_IP_ROUTE_CLASSID
1795 if (nhc
->nhc_family
== AF_INET
) {
1798 nh
= container_of(nhc
, struct fib_nh
, nh_common
);
1799 if (nh
->nh_tclassid
&&
1800 nla_put_u32(skb
, RTA_FLOW
, nh
->nh_tclassid
))
1801 goto nla_put_failure
;
1805 if (fib_add_multipath(skb
, fi
) < 0)
1806 goto nla_put_failure
;
1811 rtm
->rtm_flags
|= RTM_F_OFFLOAD
;
1813 rtm
->rtm_flags
|= RTM_F_TRAP
;
1815 nlmsg_end(skb
, nlh
);
1819 nlmsg_cancel(skb
, nlh
);
1825 * - local address disappeared -> we must delete all the entries
1827 * - device went down -> we must shutdown all nexthops going via it.
1829 int fib_sync_down_addr(struct net_device
*dev
, __be32 local
)
1832 unsigned int hash
= fib_laddr_hashfn(local
);
1833 struct hlist_head
*head
= &fib_info_laddrhash
[hash
];
1834 int tb_id
= l3mdev_fib_table(dev
) ? : RT_TABLE_MAIN
;
1835 struct net
*net
= dev_net(dev
);
1836 struct fib_info
*fi
;
1838 if (!fib_info_laddrhash
|| local
== 0)
1841 hlist_for_each_entry(fi
, head
, fib_lhash
) {
1842 if (!net_eq(fi
->fib_net
, net
) ||
1843 fi
->fib_tb_id
!= tb_id
)
1845 if (fi
->fib_prefsrc
== local
) {
1846 fi
->fib_flags
|= RTNH_F_DEAD
;
1853 static int call_fib_nh_notifiers(struct fib_nh
*nh
,
1854 enum fib_event_type event_type
)
1856 bool ignore_link_down
= ip_ignore_linkdown(nh
->fib_nh_dev
);
1857 struct fib_nh_notifier_info info
= {
1861 switch (event_type
) {
1862 case FIB_EVENT_NH_ADD
:
1863 if (nh
->fib_nh_flags
& RTNH_F_DEAD
)
1865 if (ignore_link_down
&& nh
->fib_nh_flags
& RTNH_F_LINKDOWN
)
1867 return call_fib4_notifiers(dev_net(nh
->fib_nh_dev
), event_type
,
1869 case FIB_EVENT_NH_DEL
:
1870 if ((ignore_link_down
&& nh
->fib_nh_flags
& RTNH_F_LINKDOWN
) ||
1871 (nh
->fib_nh_flags
& RTNH_F_DEAD
))
1872 return call_fib4_notifiers(dev_net(nh
->fib_nh_dev
),
1873 event_type
, &info
.info
);
1881 /* Update the PMTU of exceptions when:
1882 * - the new MTU of the first hop becomes smaller than the PMTU
1883 * - the old MTU was the same as the PMTU, and it limited discovery of
1884 * larger MTUs on the path. With that limit raised, we can now
1885 * discover larger MTUs
1886 * A special case is locked exceptions, for which the PMTU is smaller
1887 * than the minimal accepted PMTU:
1888 * - if the new MTU is greater than the PMTU, don't make any change
1889 * - otherwise, unlock and set PMTU
1891 void fib_nhc_update_mtu(struct fib_nh_common
*nhc
, u32
new, u32 orig
)
1893 struct fnhe_hash_bucket
*bucket
;
1896 bucket
= rcu_dereference_protected(nhc
->nhc_exceptions
, 1);
1900 for (i
= 0; i
< FNHE_HASH_SIZE
; i
++) {
1901 struct fib_nh_exception
*fnhe
;
1903 for (fnhe
= rcu_dereference_protected(bucket
[i
].chain
, 1);
1905 fnhe
= rcu_dereference_protected(fnhe
->fnhe_next
, 1)) {
1906 if (fnhe
->fnhe_mtu_locked
) {
1907 if (new <= fnhe
->fnhe_pmtu
) {
1908 fnhe
->fnhe_pmtu
= new;
1909 fnhe
->fnhe_mtu_locked
= false;
1911 } else if (new < fnhe
->fnhe_pmtu
||
1912 orig
== fnhe
->fnhe_pmtu
) {
1913 fnhe
->fnhe_pmtu
= new;
1919 void fib_sync_mtu(struct net_device
*dev
, u32 orig_mtu
)
1921 unsigned int hash
= fib_devindex_hashfn(dev
->ifindex
);
1922 struct hlist_head
*head
= &fib_info_devhash
[hash
];
1925 hlist_for_each_entry(nh
, head
, nh_hash
) {
1926 if (nh
->fib_nh_dev
== dev
)
1927 fib_nhc_update_mtu(&nh
->nh_common
, dev
->mtu
, orig_mtu
);
1931 /* Event force Flags Description
1932 * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
1933 * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
1934 * NETDEV_DOWN 1 LINKDOWN|DEAD Last address removed
1935 * NETDEV_UNREGISTER 1 LINKDOWN|DEAD Device removed
1937 * only used when fib_nh is built into fib_info
1939 int fib_sync_down_dev(struct net_device
*dev
, unsigned long event
, bool force
)
1942 int scope
= RT_SCOPE_NOWHERE
;
1943 struct fib_info
*prev_fi
= NULL
;
1944 unsigned int hash
= fib_devindex_hashfn(dev
->ifindex
);
1945 struct hlist_head
*head
= &fib_info_devhash
[hash
];
1951 hlist_for_each_entry(nh
, head
, nh_hash
) {
1952 struct fib_info
*fi
= nh
->nh_parent
;
1955 BUG_ON(!fi
->fib_nhs
);
1956 if (nh
->fib_nh_dev
!= dev
|| fi
== prev_fi
)
1960 change_nexthops(fi
) {
1961 if (nexthop_nh
->fib_nh_flags
& RTNH_F_DEAD
)
1963 else if (nexthop_nh
->fib_nh_dev
== dev
&&
1964 nexthop_nh
->fib_nh_scope
!= scope
) {
1967 case NETDEV_UNREGISTER
:
1968 nexthop_nh
->fib_nh_flags
|= RTNH_F_DEAD
;
1971 nexthop_nh
->fib_nh_flags
|= RTNH_F_LINKDOWN
;
1974 call_fib_nh_notifiers(nexthop_nh
,
1978 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1979 if (event
== NETDEV_UNREGISTER
&&
1980 nexthop_nh
->fib_nh_dev
== dev
) {
1985 } endfor_nexthops(fi
)
1986 if (dead
== fi
->fib_nhs
) {
1989 case NETDEV_UNREGISTER
:
1990 fi
->fib_flags
|= RTNH_F_DEAD
;
1993 fi
->fib_flags
|= RTNH_F_LINKDOWN
;
2005 /* Must be invoked inside of an RCU protected region. */
2006 static void fib_select_default(const struct flowi4
*flp
, struct fib_result
*res
)
2008 struct fib_info
*fi
= NULL
, *last_resort
= NULL
;
2009 struct hlist_head
*fa_head
= res
->fa_head
;
2010 struct fib_table
*tb
= res
->table
;
2011 u8 slen
= 32 - res
->prefixlen
;
2012 int order
= -1, last_idx
= -1;
2013 struct fib_alias
*fa
, *fa1
= NULL
;
2014 u32 last_prio
= res
->fi
->fib_priority
;
2017 hlist_for_each_entry_rcu(fa
, fa_head
, fa_list
) {
2018 struct fib_info
*next_fi
= fa
->fa_info
;
2019 struct fib_nh_common
*nhc
;
2021 if (fa
->fa_slen
!= slen
)
2023 if (fa
->fa_tos
&& fa
->fa_tos
!= flp
->flowi4_tos
)
2025 if (fa
->tb_id
!= tb
->tb_id
)
2027 if (next_fi
->fib_priority
> last_prio
&&
2028 fa
->fa_tos
== last_tos
) {
2033 if (next_fi
->fib_flags
& RTNH_F_DEAD
)
2035 last_tos
= fa
->fa_tos
;
2036 last_prio
= next_fi
->fib_priority
;
2038 if (next_fi
->fib_scope
!= res
->scope
||
2039 fa
->fa_type
!= RTN_UNICAST
)
2042 nhc
= fib_info_nhc(next_fi
, 0);
2043 if (!nhc
->nhc_gw_family
|| nhc
->nhc_scope
!= RT_SCOPE_LINK
)
2046 fib_alias_accessed(fa
);
2049 if (next_fi
!= res
->fi
)
2052 } else if (!fib_detect_death(fi
, order
, &last_resort
,
2053 &last_idx
, fa1
->fa_default
)) {
2054 fib_result_assign(res
, fi
);
2055 fa1
->fa_default
= order
;
2062 if (order
<= 0 || !fi
) {
2064 fa1
->fa_default
= -1;
2068 if (!fib_detect_death(fi
, order
, &last_resort
, &last_idx
,
2070 fib_result_assign(res
, fi
);
2071 fa1
->fa_default
= order
;
2076 fib_result_assign(res
, last_resort
);
2077 fa1
->fa_default
= last_idx
;
2083 * Dead device goes up. We wake up dead nexthops.
2084 * It takes sense only on multipath routes.
2086 * only used when fib_nh is built into fib_info
2088 int fib_sync_up(struct net_device
*dev
, unsigned char nh_flags
)
2090 struct fib_info
*prev_fi
;
2092 struct hlist_head
*head
;
2096 if (!(dev
->flags
& IFF_UP
))
2099 if (nh_flags
& RTNH_F_DEAD
) {
2100 unsigned int flags
= dev_get_flags(dev
);
2102 if (flags
& (IFF_RUNNING
| IFF_LOWER_UP
))
2103 nh_flags
|= RTNH_F_LINKDOWN
;
2107 hash
= fib_devindex_hashfn(dev
->ifindex
);
2108 head
= &fib_info_devhash
[hash
];
2111 hlist_for_each_entry(nh
, head
, nh_hash
) {
2112 struct fib_info
*fi
= nh
->nh_parent
;
2115 BUG_ON(!fi
->fib_nhs
);
2116 if (nh
->fib_nh_dev
!= dev
|| fi
== prev_fi
)
2121 change_nexthops(fi
) {
2122 if (!(nexthop_nh
->fib_nh_flags
& nh_flags
)) {
2126 if (!nexthop_nh
->fib_nh_dev
||
2127 !(nexthop_nh
->fib_nh_dev
->flags
& IFF_UP
))
2129 if (nexthop_nh
->fib_nh_dev
!= dev
||
2130 !__in_dev_get_rtnl(dev
))
2133 nexthop_nh
->fib_nh_flags
&= ~nh_flags
;
2134 call_fib_nh_notifiers(nexthop_nh
, FIB_EVENT_NH_ADD
);
2135 } endfor_nexthops(fi
)
2138 fi
->fib_flags
&= ~nh_flags
;
2148 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2149 static bool fib_good_nh(const struct fib_nh
*nh
)
2151 int state
= NUD_REACHABLE
;
2153 if (nh
->fib_nh_scope
== RT_SCOPE_LINK
) {
2154 struct neighbour
*n
;
2158 if (likely(nh
->fib_nh_gw_family
== AF_INET
))
2159 n
= __ipv4_neigh_lookup_noref(nh
->fib_nh_dev
,
2160 (__force u32
)nh
->fib_nh_gw4
);
2161 else if (nh
->fib_nh_gw_family
== AF_INET6
)
2162 n
= __ipv6_neigh_lookup_noref_stub(nh
->fib_nh_dev
,
2167 state
= n
->nud_state
;
2169 rcu_read_unlock_bh();
2172 return !!(state
& NUD_VALID
);
2175 void fib_select_multipath(struct fib_result
*res
, int hash
)
2177 struct fib_info
*fi
= res
->fi
;
2178 struct net
*net
= fi
->fib_net
;
2181 if (unlikely(res
->fi
->nh
)) {
2182 nexthop_path_fib_result(res
, hash
);
2186 change_nexthops(fi
) {
2187 if (net
->ipv4
.sysctl_fib_multipath_use_neigh
) {
2188 if (!fib_good_nh(nexthop_nh
))
2191 res
->nh_sel
= nhsel
;
2192 res
->nhc
= &nexthop_nh
->nh_common
;
2197 if (hash
> atomic_read(&nexthop_nh
->fib_nh_upper_bound
))
2200 res
->nh_sel
= nhsel
;
2201 res
->nhc
= &nexthop_nh
->nh_common
;
2203 } endfor_nexthops(fi
);
2207 void fib_select_path(struct net
*net
, struct fib_result
*res
,
2208 struct flowi4
*fl4
, const struct sk_buff
*skb
)
2210 if (fl4
->flowi4_oif
&& !(fl4
->flowi4_flags
& FLOWI_FLAG_SKIP_NH_OIF
))
2213 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2214 if (fib_info_num_path(res
->fi
) > 1) {
2215 int h
= fib_multipath_hash(net
, fl4
, skb
, NULL
);
2217 fib_select_multipath(res
, h
);
2221 if (!res
->prefixlen
&&
2222 res
->table
->tb_num_default
> 1 &&
2223 res
->type
== RTN_UNICAST
)
2224 fib_select_default(fl4
, res
);
2228 fl4
->saddr
= fib_result_prefsrc(net
, res
);