2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * IPv4 Forwarding Information Base: semantics.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <asm/uaccess.h>
17 #include <linux/bitops.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/jiffies.h>
22 #include <linux/string.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/errno.h>
27 #include <linux/inet.h>
28 #include <linux/inetdevice.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/proc_fs.h>
32 #include <linux/skbuff.h>
33 #include <linux/init.h>
34 #include <linux/slab.h>
38 #include <net/protocol.h>
39 #include <net/route.h>
42 #include <net/ip_fib.h>
43 #include <net/netlink.h>
44 #include <net/nexthop.h>
45 #include <net/lwtunnel.h>
47 #include "fib_lookup.h"
49 static DEFINE_SPINLOCK(fib_info_lock
);
50 static struct hlist_head
*fib_info_hash
;
51 static struct hlist_head
*fib_info_laddrhash
;
52 static unsigned int fib_info_hash_size
;
53 static unsigned int fib_info_cnt
;
55 #define DEVINDEX_HASHBITS 8
56 #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
57 static struct hlist_head fib_info_devhash
[DEVINDEX_HASHSIZE
];
59 #ifdef CONFIG_IP_ROUTE_MULTIPATH
60 u32 fib_multipath_secret __read_mostly
;
62 #define for_nexthops(fi) { \
63 int nhsel; const struct fib_nh *nh; \
64 for (nhsel = 0, nh = (fi)->fib_nh; \
65 nhsel < (fi)->fib_nhs; \
68 #define change_nexthops(fi) { \
69 int nhsel; struct fib_nh *nexthop_nh; \
70 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
71 nhsel < (fi)->fib_nhs; \
72 nexthop_nh++, nhsel++)
74 #else /* CONFIG_IP_ROUTE_MULTIPATH */
76 /* Hope, that gcc will optimize it to get rid of dummy loop */
78 #define for_nexthops(fi) { \
79 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
80 for (nhsel = 0; nhsel < 1; nhsel++)
82 #define change_nexthops(fi) { \
84 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
85 for (nhsel = 0; nhsel < 1; nhsel++)
87 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
89 #define endfor_nexthops(fi) }
92 const struct fib_prop fib_props
[RTN_MAX
+ 1] = {
95 .scope
= RT_SCOPE_NOWHERE
,
99 .scope
= RT_SCOPE_UNIVERSE
,
103 .scope
= RT_SCOPE_HOST
,
107 .scope
= RT_SCOPE_LINK
,
111 .scope
= RT_SCOPE_LINK
,
115 .scope
= RT_SCOPE_UNIVERSE
,
119 .scope
= RT_SCOPE_UNIVERSE
,
121 [RTN_UNREACHABLE
] = {
122 .error
= -EHOSTUNREACH
,
123 .scope
= RT_SCOPE_UNIVERSE
,
127 .scope
= RT_SCOPE_UNIVERSE
,
131 .scope
= RT_SCOPE_UNIVERSE
,
135 .scope
= RT_SCOPE_NOWHERE
,
139 .scope
= RT_SCOPE_NOWHERE
,
143 static void rt_fibinfo_free(struct rtable __rcu
**rtp
)
145 struct rtable
*rt
= rcu_dereference_protected(*rtp
, 1);
150 /* Not even needed : RCU_INIT_POINTER(*rtp, NULL);
151 * because we waited an RCU grace period before calling
152 * free_fib_info_rcu()
158 static void free_nh_exceptions(struct fib_nh
*nh
)
160 struct fnhe_hash_bucket
*hash
;
163 hash
= rcu_dereference_protected(nh
->nh_exceptions
, 1);
166 for (i
= 0; i
< FNHE_HASH_SIZE
; i
++) {
167 struct fib_nh_exception
*fnhe
;
169 fnhe
= rcu_dereference_protected(hash
[i
].chain
, 1);
171 struct fib_nh_exception
*next
;
173 next
= rcu_dereference_protected(fnhe
->fnhe_next
, 1);
175 rt_fibinfo_free(&fnhe
->fnhe_rth_input
);
176 rt_fibinfo_free(&fnhe
->fnhe_rth_output
);
186 static void rt_fibinfo_free_cpus(struct rtable __rcu
* __percpu
*rtp
)
193 for_each_possible_cpu(cpu
) {
196 rt
= rcu_dereference_protected(*per_cpu_ptr(rtp
, cpu
), 1);
203 /* Release a nexthop info record */
204 static void free_fib_info_rcu(struct rcu_head
*head
)
206 struct fib_info
*fi
= container_of(head
, struct fib_info
, rcu
);
207 struct dst_metrics
*m
;
209 change_nexthops(fi
) {
210 if (nexthop_nh
->nh_dev
)
211 dev_put(nexthop_nh
->nh_dev
);
212 lwtstate_put(nexthop_nh
->nh_lwtstate
);
213 free_nh_exceptions(nexthop_nh
);
214 rt_fibinfo_free_cpus(nexthop_nh
->nh_pcpu_rth_output
);
215 rt_fibinfo_free(&nexthop_nh
->nh_rth_input
);
216 } endfor_nexthops(fi
);
219 if (m
!= &dst_default_metrics
&& atomic_dec_and_test(&m
->refcnt
))
224 void free_fib_info(struct fib_info
*fi
)
226 if (fi
->fib_dead
== 0) {
227 pr_warn("Freeing alive fib_info %p\n", fi
);
231 #ifdef CONFIG_IP_ROUTE_CLASSID
232 change_nexthops(fi
) {
233 if (nexthop_nh
->nh_tclassid
)
234 fi
->fib_net
->ipv4
.fib_num_tclassid_users
--;
235 } endfor_nexthops(fi
);
237 call_rcu(&fi
->rcu
, free_fib_info_rcu
);
240 void fib_release_info(struct fib_info
*fi
)
242 spin_lock_bh(&fib_info_lock
);
243 if (fi
&& --fi
->fib_treeref
== 0) {
244 hlist_del(&fi
->fib_hash
);
246 hlist_del(&fi
->fib_lhash
);
247 change_nexthops(fi
) {
248 if (!nexthop_nh
->nh_dev
)
250 hlist_del(&nexthop_nh
->nh_hash
);
251 } endfor_nexthops(fi
)
255 spin_unlock_bh(&fib_info_lock
);
258 static inline int nh_comp(const struct fib_info
*fi
, const struct fib_info
*ofi
)
260 const struct fib_nh
*onh
= ofi
->fib_nh
;
263 if (nh
->nh_oif
!= onh
->nh_oif
||
264 nh
->nh_gw
!= onh
->nh_gw
||
265 nh
->nh_scope
!= onh
->nh_scope
||
266 #ifdef CONFIG_IP_ROUTE_MULTIPATH
267 nh
->nh_weight
!= onh
->nh_weight
||
269 #ifdef CONFIG_IP_ROUTE_CLASSID
270 nh
->nh_tclassid
!= onh
->nh_tclassid
||
272 lwtunnel_cmp_encap(nh
->nh_lwtstate
, onh
->nh_lwtstate
) ||
273 ((nh
->nh_flags
^ onh
->nh_flags
) & ~RTNH_COMPARE_MASK
))
276 } endfor_nexthops(fi
);
280 static inline unsigned int fib_devindex_hashfn(unsigned int val
)
282 unsigned int mask
= DEVINDEX_HASHSIZE
- 1;
285 (val
>> DEVINDEX_HASHBITS
) ^
286 (val
>> (DEVINDEX_HASHBITS
* 2))) & mask
;
289 static inline unsigned int fib_info_hashfn(const struct fib_info
*fi
)
291 unsigned int mask
= (fib_info_hash_size
- 1);
292 unsigned int val
= fi
->fib_nhs
;
294 val
^= (fi
->fib_protocol
<< 8) | fi
->fib_scope
;
295 val
^= (__force u32
)fi
->fib_prefsrc
;
296 val
^= fi
->fib_priority
;
298 val
^= fib_devindex_hashfn(nh
->nh_oif
);
299 } endfor_nexthops(fi
)
301 return (val
^ (val
>> 7) ^ (val
>> 12)) & mask
;
304 static struct fib_info
*fib_find_info(const struct fib_info
*nfi
)
306 struct hlist_head
*head
;
310 hash
= fib_info_hashfn(nfi
);
311 head
= &fib_info_hash
[hash
];
313 hlist_for_each_entry(fi
, head
, fib_hash
) {
314 if (!net_eq(fi
->fib_net
, nfi
->fib_net
))
316 if (fi
->fib_nhs
!= nfi
->fib_nhs
)
318 if (nfi
->fib_protocol
== fi
->fib_protocol
&&
319 nfi
->fib_scope
== fi
->fib_scope
&&
320 nfi
->fib_prefsrc
== fi
->fib_prefsrc
&&
321 nfi
->fib_priority
== fi
->fib_priority
&&
322 nfi
->fib_type
== fi
->fib_type
&&
323 memcmp(nfi
->fib_metrics
, fi
->fib_metrics
,
324 sizeof(u32
) * RTAX_MAX
) == 0 &&
325 !((nfi
->fib_flags
^ fi
->fib_flags
) & ~RTNH_COMPARE_MASK
) &&
326 (nfi
->fib_nhs
== 0 || nh_comp(fi
, nfi
) == 0))
333 /* Check, that the gateway is already configured.
334 * Used only by redirect accept routine.
336 int ip_fib_check_default(__be32 gw
, struct net_device
*dev
)
338 struct hlist_head
*head
;
342 spin_lock(&fib_info_lock
);
344 hash
= fib_devindex_hashfn(dev
->ifindex
);
345 head
= &fib_info_devhash
[hash
];
346 hlist_for_each_entry(nh
, head
, nh_hash
) {
347 if (nh
->nh_dev
== dev
&&
349 !(nh
->nh_flags
& RTNH_F_DEAD
)) {
350 spin_unlock(&fib_info_lock
);
355 spin_unlock(&fib_info_lock
);
360 static inline size_t fib_nlmsg_size(struct fib_info
*fi
)
362 size_t payload
= NLMSG_ALIGN(sizeof(struct rtmsg
))
363 + nla_total_size(4) /* RTA_TABLE */
364 + nla_total_size(4) /* RTA_DST */
365 + nla_total_size(4) /* RTA_PRIORITY */
366 + nla_total_size(4) /* RTA_PREFSRC */
367 + nla_total_size(TCP_CA_NAME_MAX
); /* RTAX_CC_ALGO */
369 /* space for nested metrics */
370 payload
+= nla_total_size((RTAX_MAX
* nla_total_size(4)));
373 size_t nh_encapsize
= 0;
374 /* Also handles the special case fib_nhs == 1 */
376 /* each nexthop is packed in an attribute */
377 size_t nhsize
= nla_total_size(sizeof(struct rtnexthop
));
379 /* may contain flow and gateway attribute */
380 nhsize
+= 2 * nla_total_size(4);
382 /* grab encap info */
384 if (nh
->nh_lwtstate
) {
386 nh_encapsize
+= lwtunnel_get_encap_size(
389 nh_encapsize
+= nla_total_size(2);
391 } endfor_nexthops(fi
);
393 /* all nexthops are packed in a nested attribute */
394 payload
+= nla_total_size((fi
->fib_nhs
* nhsize
) +
402 void rtmsg_fib(int event
, __be32 key
, struct fib_alias
*fa
,
403 int dst_len
, u32 tb_id
, const struct nl_info
*info
,
404 unsigned int nlm_flags
)
407 u32 seq
= info
->nlh
? info
->nlh
->nlmsg_seq
: 0;
410 skb
= nlmsg_new(fib_nlmsg_size(fa
->fa_info
), GFP_KERNEL
);
414 err
= fib_dump_info(skb
, info
->portid
, seq
, event
, tb_id
,
415 fa
->fa_type
, key
, dst_len
,
416 fa
->fa_tos
, fa
->fa_info
, nlm_flags
);
418 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
419 WARN_ON(err
== -EMSGSIZE
);
423 rtnl_notify(skb
, info
->nl_net
, info
->portid
, RTNLGRP_IPV4_ROUTE
,
424 info
->nlh
, GFP_KERNEL
);
428 rtnl_set_sk_err(info
->nl_net
, RTNLGRP_IPV4_ROUTE
, err
);
431 static int fib_detect_death(struct fib_info
*fi
, int order
,
432 struct fib_info
**last_resort
, int *last_idx
,
436 int state
= NUD_NONE
;
438 n
= neigh_lookup(&arp_tbl
, &fi
->fib_nh
[0].nh_gw
, fi
->fib_dev
);
440 state
= n
->nud_state
;
445 if (state
== NUD_REACHABLE
)
447 if ((state
& NUD_VALID
) && order
!= dflt
)
449 if ((state
& NUD_VALID
) ||
450 (*last_idx
< 0 && order
> dflt
&& state
!= NUD_INCOMPLETE
)) {
457 #ifdef CONFIG_IP_ROUTE_MULTIPATH
459 static int fib_count_nexthops(struct rtnexthop
*rtnh
, int remaining
)
463 while (rtnh_ok(rtnh
, remaining
)) {
465 rtnh
= rtnh_next(rtnh
, &remaining
);
468 /* leftover implies invalid nexthop configuration, discard it */
469 return remaining
> 0 ? 0 : nhs
;
472 static int fib_get_nhs(struct fib_info
*fi
, struct rtnexthop
*rtnh
,
473 int remaining
, struct fib_config
*cfg
)
475 struct net
*net
= cfg
->fc_nlinfo
.nl_net
;
478 change_nexthops(fi
) {
481 if (!rtnh_ok(rtnh
, remaining
))
484 if (rtnh
->rtnh_flags
& (RTNH_F_DEAD
| RTNH_F_LINKDOWN
))
487 nexthop_nh
->nh_flags
=
488 (cfg
->fc_flags
& ~0xFF) | rtnh
->rtnh_flags
;
489 nexthop_nh
->nh_oif
= rtnh
->rtnh_ifindex
;
490 nexthop_nh
->nh_weight
= rtnh
->rtnh_hops
+ 1;
492 attrlen
= rtnh_attrlen(rtnh
);
494 struct nlattr
*nla
, *attrs
= rtnh_attrs(rtnh
);
496 nla
= nla_find(attrs
, attrlen
, RTA_GATEWAY
);
497 nexthop_nh
->nh_gw
= nla
? nla_get_in_addr(nla
) : 0;
498 #ifdef CONFIG_IP_ROUTE_CLASSID
499 nla
= nla_find(attrs
, attrlen
, RTA_FLOW
);
500 nexthop_nh
->nh_tclassid
= nla
? nla_get_u32(nla
) : 0;
501 if (nexthop_nh
->nh_tclassid
)
502 fi
->fib_net
->ipv4
.fib_num_tclassid_users
++;
504 nla
= nla_find(attrs
, attrlen
, RTA_ENCAP
);
506 struct lwtunnel_state
*lwtstate
;
507 struct net_device
*dev
= NULL
;
508 struct nlattr
*nla_entype
;
510 nla_entype
= nla_find(attrs
, attrlen
,
515 dev
= __dev_get_by_index(net
, cfg
->fc_oif
);
516 ret
= lwtunnel_build_state(dev
, nla_get_u16(
522 nexthop_nh
->nh_lwtstate
=
523 lwtstate_get(lwtstate
);
527 rtnh
= rtnh_next(rtnh
, &remaining
);
528 } endfor_nexthops(fi
);
539 static void fib_rebalance(struct fib_info
*fi
)
543 struct in_device
*in_dev
;
550 if (nh
->nh_flags
& RTNH_F_DEAD
)
553 in_dev
= __in_dev_get_rtnl(nh
->nh_dev
);
556 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev
) &&
557 nh
->nh_flags
& RTNH_F_LINKDOWN
)
560 total
+= nh
->nh_weight
;
561 } endfor_nexthops(fi
);
564 change_nexthops(fi
) {
567 in_dev
= __in_dev_get_rtnl(nexthop_nh
->nh_dev
);
569 if (nexthop_nh
->nh_flags
& RTNH_F_DEAD
) {
572 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev
) &&
573 nexthop_nh
->nh_flags
& RTNH_F_LINKDOWN
) {
576 w
+= nexthop_nh
->nh_weight
;
577 upper_bound
= DIV_ROUND_CLOSEST_ULL((u64
)w
<< 31,
581 atomic_set(&nexthop_nh
->nh_upper_bound
, upper_bound
);
582 } endfor_nexthops(fi
);
584 net_get_random_once(&fib_multipath_secret
,
585 sizeof(fib_multipath_secret
));
588 static inline void fib_add_weight(struct fib_info
*fi
,
589 const struct fib_nh
*nh
)
591 fi
->fib_weight
+= nh
->nh_weight
;
594 #else /* CONFIG_IP_ROUTE_MULTIPATH */
596 #define fib_rebalance(fi) do { } while (0)
597 #define fib_add_weight(fi, nh) do { } while (0)
599 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
601 static int fib_encap_match(struct net
*net
, u16 encap_type
,
602 struct nlattr
*encap
,
603 int oif
, const struct fib_nh
*nh
,
604 const struct fib_config
*cfg
)
606 struct lwtunnel_state
*lwtstate
;
607 struct net_device
*dev
= NULL
;
610 if (encap_type
== LWTUNNEL_ENCAP_NONE
)
614 dev
= __dev_get_by_index(net
, oif
);
615 ret
= lwtunnel_build_state(dev
, encap_type
, encap
,
616 AF_INET
, cfg
, &lwtstate
);
618 result
= lwtunnel_cmp_encap(lwtstate
, nh
->nh_lwtstate
);
619 lwtstate_free(lwtstate
);
625 int fib_nh_match(struct fib_config
*cfg
, struct fib_info
*fi
)
627 struct net
*net
= cfg
->fc_nlinfo
.nl_net
;
628 #ifdef CONFIG_IP_ROUTE_MULTIPATH
629 struct rtnexthop
*rtnh
;
633 if (cfg
->fc_priority
&& cfg
->fc_priority
!= fi
->fib_priority
)
636 if (cfg
->fc_oif
|| cfg
->fc_gw
) {
638 if (fib_encap_match(net
, cfg
->fc_encap_type
,
639 cfg
->fc_encap
, cfg
->fc_oif
,
643 #ifdef CONFIG_IP_ROUTE_CLASSID
645 cfg
->fc_flow
!= fi
->fib_nh
->nh_tclassid
)
648 if ((!cfg
->fc_oif
|| cfg
->fc_oif
== fi
->fib_nh
->nh_oif
) &&
649 (!cfg
->fc_gw
|| cfg
->fc_gw
== fi
->fib_nh
->nh_gw
))
654 #ifdef CONFIG_IP_ROUTE_MULTIPATH
659 remaining
= cfg
->fc_mp_len
;
664 if (!rtnh_ok(rtnh
, remaining
))
667 if (rtnh
->rtnh_ifindex
&& rtnh
->rtnh_ifindex
!= nh
->nh_oif
)
670 attrlen
= rtnh_attrlen(rtnh
);
672 struct nlattr
*nla
, *attrs
= rtnh_attrs(rtnh
);
674 nla
= nla_find(attrs
, attrlen
, RTA_GATEWAY
);
675 if (nla
&& nla_get_in_addr(nla
) != nh
->nh_gw
)
677 #ifdef CONFIG_IP_ROUTE_CLASSID
678 nla
= nla_find(attrs
, attrlen
, RTA_FLOW
);
679 if (nla
&& nla_get_u32(nla
) != nh
->nh_tclassid
)
684 rtnh
= rtnh_next(rtnh
, &remaining
);
685 } endfor_nexthops(fi
);
695 * Semantics of nexthop is very messy by historical reasons.
696 * We have to take into account, that:
697 * a) gateway can be actually local interface address,
698 * so that gatewayed route is direct.
699 * b) gateway must be on-link address, possibly
700 * described not by an ifaddr, but also by a direct route.
701 * c) If both gateway and interface are specified, they should not
703 * d) If we use tunnel routes, gateway could be not on-link.
705 * Attempt to reconcile all of these (alas, self-contradictory) conditions
706 * results in pretty ugly and hairy code with obscure logic.
708 * I chose to generalized it instead, so that the size
709 * of code does not increase practically, but it becomes
711 * Every prefix is assigned a "scope" value: "host" is local address,
712 * "link" is direct route,
713 * [ ... "site" ... "interior" ... ]
714 * and "universe" is true gateway route with global meaning.
716 * Every prefix refers to a set of "nexthop"s (gw, oif),
717 * where gw must have narrower scope. This recursion stops
718 * when gw has LOCAL scope or if "nexthop" is declared ONLINK,
719 * which means that gw is forced to be on link.
721 * Code is still hairy, but now it is apparently logically
722 * consistent and very flexible. F.e. as by-product it allows
723 * to co-exists in peace independent exterior and interior
726 * Normally it looks as following.
728 * {universe prefix} -> (gw, oif) [scope link]
730 * |-> {link prefix} -> (gw, oif) [scope local]
732 * |-> {local prefix} (terminal node)
734 static int fib_check_nh(struct fib_config
*cfg
, struct fib_info
*fi
,
739 struct net_device
*dev
;
741 net
= cfg
->fc_nlinfo
.nl_net
;
743 struct fib_result res
;
745 if (nh
->nh_flags
& RTNH_F_ONLINK
) {
746 unsigned int addr_type
;
748 if (cfg
->fc_scope
>= RT_SCOPE_LINK
)
750 dev
= __dev_get_by_index(net
, nh
->nh_oif
);
753 if (!(dev
->flags
& IFF_UP
))
755 addr_type
= inet_addr_type_dev_table(net
, dev
, nh
->nh_gw
);
756 if (addr_type
!= RTN_UNICAST
)
758 if (!netif_carrier_ok(dev
))
759 nh
->nh_flags
|= RTNH_F_LINKDOWN
;
762 nh
->nh_scope
= RT_SCOPE_LINK
;
767 struct fib_table
*tbl
= NULL
;
768 struct flowi4 fl4
= {
770 .flowi4_scope
= cfg
->fc_scope
+ 1,
771 .flowi4_oif
= nh
->nh_oif
,
772 .flowi4_iif
= LOOPBACK_IFINDEX
,
775 /* It is not necessary, but requires a bit of thinking */
776 if (fl4
.flowi4_scope
< RT_SCOPE_LINK
)
777 fl4
.flowi4_scope
= RT_SCOPE_LINK
;
780 tbl
= fib_get_table(net
, cfg
->fc_table
);
783 err
= fib_table_lookup(tbl
, &fl4
, &res
,
784 FIB_LOOKUP_IGNORE_LINKSTATE
|
787 /* on error or if no table given do full lookup. This
788 * is needed for example when nexthops are in the local
789 * table rather than the given table
792 err
= fib_lookup(net
, &fl4
, &res
,
793 FIB_LOOKUP_IGNORE_LINKSTATE
);
802 if (res
.type
!= RTN_UNICAST
&& res
.type
!= RTN_LOCAL
)
804 nh
->nh_scope
= res
.scope
;
805 nh
->nh_oif
= FIB_RES_OIF(res
);
806 nh
->nh_dev
= dev
= FIB_RES_DEV(res
);
810 if (!netif_carrier_ok(dev
))
811 nh
->nh_flags
|= RTNH_F_LINKDOWN
;
812 err
= (dev
->flags
& IFF_UP
) ? 0 : -ENETDOWN
;
814 struct in_device
*in_dev
;
816 if (nh
->nh_flags
& (RTNH_F_PERVASIVE
| RTNH_F_ONLINK
))
821 in_dev
= inetdev_by_index(net
, nh
->nh_oif
);
825 if (!(in_dev
->dev
->flags
& IFF_UP
))
827 nh
->nh_dev
= in_dev
->dev
;
828 dev_hold(nh
->nh_dev
);
829 nh
->nh_scope
= RT_SCOPE_HOST
;
830 if (!netif_carrier_ok(nh
->nh_dev
))
831 nh
->nh_flags
|= RTNH_F_LINKDOWN
;
839 static inline unsigned int fib_laddr_hashfn(__be32 val
)
841 unsigned int mask
= (fib_info_hash_size
- 1);
843 return ((__force u32
)val
^
844 ((__force u32
)val
>> 7) ^
845 ((__force u32
)val
>> 14)) & mask
;
848 static struct hlist_head
*fib_info_hash_alloc(int bytes
)
850 if (bytes
<= PAGE_SIZE
)
851 return kzalloc(bytes
, GFP_KERNEL
);
853 return (struct hlist_head
*)
854 __get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
858 static void fib_info_hash_free(struct hlist_head
*hash
, int bytes
)
863 if (bytes
<= PAGE_SIZE
)
866 free_pages((unsigned long) hash
, get_order(bytes
));
869 static void fib_info_hash_move(struct hlist_head
*new_info_hash
,
870 struct hlist_head
*new_laddrhash
,
871 unsigned int new_size
)
873 struct hlist_head
*old_info_hash
, *old_laddrhash
;
874 unsigned int old_size
= fib_info_hash_size
;
875 unsigned int i
, bytes
;
877 spin_lock_bh(&fib_info_lock
);
878 old_info_hash
= fib_info_hash
;
879 old_laddrhash
= fib_info_laddrhash
;
880 fib_info_hash_size
= new_size
;
882 for (i
= 0; i
< old_size
; i
++) {
883 struct hlist_head
*head
= &fib_info_hash
[i
];
884 struct hlist_node
*n
;
887 hlist_for_each_entry_safe(fi
, n
, head
, fib_hash
) {
888 struct hlist_head
*dest
;
889 unsigned int new_hash
;
891 new_hash
= fib_info_hashfn(fi
);
892 dest
= &new_info_hash
[new_hash
];
893 hlist_add_head(&fi
->fib_hash
, dest
);
896 fib_info_hash
= new_info_hash
;
898 for (i
= 0; i
< old_size
; i
++) {
899 struct hlist_head
*lhead
= &fib_info_laddrhash
[i
];
900 struct hlist_node
*n
;
903 hlist_for_each_entry_safe(fi
, n
, lhead
, fib_lhash
) {
904 struct hlist_head
*ldest
;
905 unsigned int new_hash
;
907 new_hash
= fib_laddr_hashfn(fi
->fib_prefsrc
);
908 ldest
= &new_laddrhash
[new_hash
];
909 hlist_add_head(&fi
->fib_lhash
, ldest
);
912 fib_info_laddrhash
= new_laddrhash
;
914 spin_unlock_bh(&fib_info_lock
);
916 bytes
= old_size
* sizeof(struct hlist_head
*);
917 fib_info_hash_free(old_info_hash
, bytes
);
918 fib_info_hash_free(old_laddrhash
, bytes
);
921 __be32
fib_info_update_nh_saddr(struct net
*net
, struct fib_nh
*nh
)
923 nh
->nh_saddr
= inet_select_addr(nh
->nh_dev
,
925 nh
->nh_parent
->fib_scope
);
926 nh
->nh_saddr_genid
= atomic_read(&net
->ipv4
.dev_addr_genid
);
931 static bool fib_valid_prefsrc(struct fib_config
*cfg
, __be32 fib_prefsrc
)
933 if (cfg
->fc_type
!= RTN_LOCAL
|| !cfg
->fc_dst
||
934 fib_prefsrc
!= cfg
->fc_dst
) {
935 u32 tb_id
= cfg
->fc_table
;
938 if (tb_id
== RT_TABLE_MAIN
)
939 tb_id
= RT_TABLE_LOCAL
;
941 rc
= inet_addr_type_table(cfg
->fc_nlinfo
.nl_net
,
944 if (rc
!= RTN_LOCAL
&& tb_id
!= RT_TABLE_LOCAL
) {
945 rc
= inet_addr_type_table(cfg
->fc_nlinfo
.nl_net
,
946 fib_prefsrc
, RT_TABLE_LOCAL
);
956 fib_convert_metrics(struct fib_info
*fi
, const struct fib_config
*cfg
)
965 nla_for_each_attr(nla
, cfg
->fc_mx
, cfg
->fc_mx_len
, remaining
) {
966 int type
= nla_type(nla
);
974 if (type
== RTAX_CC_ALGO
) {
975 char tmp
[TCP_CA_NAME_MAX
];
977 nla_strlcpy(tmp
, nla
, sizeof(tmp
));
978 val
= tcp_ca_get_key_by_name(tmp
, &ecn_ca
);
979 if (val
== TCP_CA_UNSPEC
)
982 if (nla_len(nla
) != sizeof(u32
))
984 val
= nla_get_u32(nla
);
986 if (type
== RTAX_ADVMSS
&& val
> 65535 - 40)
988 if (type
== RTAX_MTU
&& val
> 65535 - 15)
990 if (type
== RTAX_HOPLIMIT
&& val
> 255)
992 if (type
== RTAX_FEATURES
&& (val
& ~RTAX_FEATURE_MASK
))
994 fi
->fib_metrics
->metrics
[type
- 1] = val
;
998 fi
->fib_metrics
->metrics
[RTAX_FEATURES
- 1] |= DST_FEATURE_ECN_CA
;
1003 struct fib_info
*fib_create_info(struct fib_config
*cfg
)
1006 struct fib_info
*fi
= NULL
;
1007 struct fib_info
*ofi
;
1009 struct net
*net
= cfg
->fc_nlinfo
.nl_net
;
1011 if (cfg
->fc_type
> RTN_MAX
)
1014 /* Fast check to catch the most weird cases */
1015 if (fib_props
[cfg
->fc_type
].scope
> cfg
->fc_scope
)
1018 if (cfg
->fc_flags
& (RTNH_F_DEAD
| RTNH_F_LINKDOWN
))
1021 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1023 nhs
= fib_count_nexthops(cfg
->fc_mp
, cfg
->fc_mp_len
);
1030 if (fib_info_cnt
>= fib_info_hash_size
) {
1031 unsigned int new_size
= fib_info_hash_size
<< 1;
1032 struct hlist_head
*new_info_hash
;
1033 struct hlist_head
*new_laddrhash
;
1038 bytes
= new_size
* sizeof(struct hlist_head
*);
1039 new_info_hash
= fib_info_hash_alloc(bytes
);
1040 new_laddrhash
= fib_info_hash_alloc(bytes
);
1041 if (!new_info_hash
|| !new_laddrhash
) {
1042 fib_info_hash_free(new_info_hash
, bytes
);
1043 fib_info_hash_free(new_laddrhash
, bytes
);
1045 fib_info_hash_move(new_info_hash
, new_laddrhash
, new_size
);
1047 if (!fib_info_hash_size
)
1051 fi
= kzalloc(sizeof(*fi
)+nhs
*sizeof(struct fib_nh
), GFP_KERNEL
);
1055 fi
->fib_metrics
= kzalloc(sizeof(*fi
->fib_metrics
), GFP_KERNEL
);
1056 if (unlikely(!fi
->fib_metrics
)) {
1058 return ERR_PTR(err
);
1060 atomic_set(&fi
->fib_metrics
->refcnt
, 1);
1062 fi
->fib_metrics
= (struct dst_metrics
*)&dst_default_metrics
;
1066 fi
->fib_protocol
= cfg
->fc_protocol
;
1067 fi
->fib_scope
= cfg
->fc_scope
;
1068 fi
->fib_flags
= cfg
->fc_flags
;
1069 fi
->fib_priority
= cfg
->fc_priority
;
1070 fi
->fib_prefsrc
= cfg
->fc_prefsrc
;
1071 fi
->fib_type
= cfg
->fc_type
;
1072 fi
->fib_tb_id
= cfg
->fc_table
;
1075 change_nexthops(fi
) {
1076 nexthop_nh
->nh_parent
= fi
;
1077 nexthop_nh
->nh_pcpu_rth_output
= alloc_percpu(struct rtable __rcu
*);
1078 if (!nexthop_nh
->nh_pcpu_rth_output
)
1080 } endfor_nexthops(fi
)
1082 err
= fib_convert_metrics(fi
, cfg
);
1087 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1088 err
= fib_get_nhs(fi
, cfg
->fc_mp
, cfg
->fc_mp_len
, cfg
);
1091 if (cfg
->fc_oif
&& fi
->fib_nh
->nh_oif
!= cfg
->fc_oif
)
1093 if (cfg
->fc_gw
&& fi
->fib_nh
->nh_gw
!= cfg
->fc_gw
)
1095 #ifdef CONFIG_IP_ROUTE_CLASSID
1096 if (cfg
->fc_flow
&& fi
->fib_nh
->nh_tclassid
!= cfg
->fc_flow
)
1103 struct fib_nh
*nh
= fi
->fib_nh
;
1105 if (cfg
->fc_encap
) {
1106 struct lwtunnel_state
*lwtstate
;
1107 struct net_device
*dev
= NULL
;
1109 if (cfg
->fc_encap_type
== LWTUNNEL_ENCAP_NONE
)
1112 dev
= __dev_get_by_index(net
, cfg
->fc_oif
);
1113 err
= lwtunnel_build_state(dev
, cfg
->fc_encap_type
,
1114 cfg
->fc_encap
, AF_INET
, cfg
,
1119 nh
->nh_lwtstate
= lwtstate_get(lwtstate
);
1121 nh
->nh_oif
= cfg
->fc_oif
;
1122 nh
->nh_gw
= cfg
->fc_gw
;
1123 nh
->nh_flags
= cfg
->fc_flags
;
1124 #ifdef CONFIG_IP_ROUTE_CLASSID
1125 nh
->nh_tclassid
= cfg
->fc_flow
;
1126 if (nh
->nh_tclassid
)
1127 fi
->fib_net
->ipv4
.fib_num_tclassid_users
++;
1129 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1134 if (fib_props
[cfg
->fc_type
].error
) {
1135 if (cfg
->fc_gw
|| cfg
->fc_oif
|| cfg
->fc_mp
)
1139 switch (cfg
->fc_type
) {
1151 if (cfg
->fc_scope
> RT_SCOPE_HOST
)
1154 if (cfg
->fc_scope
== RT_SCOPE_HOST
) {
1155 struct fib_nh
*nh
= fi
->fib_nh
;
1157 /* Local address is added. */
1158 if (nhs
!= 1 || nh
->nh_gw
)
1160 nh
->nh_scope
= RT_SCOPE_NOWHERE
;
1161 nh
->nh_dev
= dev_get_by_index(net
, fi
->fib_nh
->nh_oif
);
1168 change_nexthops(fi
) {
1169 err
= fib_check_nh(cfg
, fi
, nexthop_nh
);
1172 if (nexthop_nh
->nh_flags
& RTNH_F_LINKDOWN
)
1174 } endfor_nexthops(fi
)
1175 if (linkdown
== fi
->fib_nhs
)
1176 fi
->fib_flags
|= RTNH_F_LINKDOWN
;
1179 if (fi
->fib_prefsrc
&& !fib_valid_prefsrc(cfg
, fi
->fib_prefsrc
))
1182 change_nexthops(fi
) {
1183 fib_info_update_nh_saddr(net
, nexthop_nh
);
1184 fib_add_weight(fi
, nexthop_nh
);
1185 } endfor_nexthops(fi
)
1190 ofi
= fib_find_info(fi
);
1199 atomic_inc(&fi
->fib_clntref
);
1200 spin_lock_bh(&fib_info_lock
);
1201 hlist_add_head(&fi
->fib_hash
,
1202 &fib_info_hash
[fib_info_hashfn(fi
)]);
1203 if (fi
->fib_prefsrc
) {
1204 struct hlist_head
*head
;
1206 head
= &fib_info_laddrhash
[fib_laddr_hashfn(fi
->fib_prefsrc
)];
1207 hlist_add_head(&fi
->fib_lhash
, head
);
1209 change_nexthops(fi
) {
1210 struct hlist_head
*head
;
1213 if (!nexthop_nh
->nh_dev
)
1215 hash
= fib_devindex_hashfn(nexthop_nh
->nh_dev
->ifindex
);
1216 head
= &fib_info_devhash
[hash
];
1217 hlist_add_head(&nexthop_nh
->nh_hash
, head
);
1218 } endfor_nexthops(fi
)
1219 spin_unlock_bh(&fib_info_lock
);
1231 return ERR_PTR(err
);
1234 int fib_dump_info(struct sk_buff
*skb
, u32 portid
, u32 seq
, int event
,
1235 u32 tb_id
, u8 type
, __be32 dst
, int dst_len
, u8 tos
,
1236 struct fib_info
*fi
, unsigned int flags
)
1238 struct nlmsghdr
*nlh
;
1241 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*rtm
), flags
);
1245 rtm
= nlmsg_data(nlh
);
1246 rtm
->rtm_family
= AF_INET
;
1247 rtm
->rtm_dst_len
= dst_len
;
1248 rtm
->rtm_src_len
= 0;
1251 rtm
->rtm_table
= tb_id
;
1253 rtm
->rtm_table
= RT_TABLE_COMPAT
;
1254 if (nla_put_u32(skb
, RTA_TABLE
, tb_id
))
1255 goto nla_put_failure
;
1256 rtm
->rtm_type
= type
;
1257 rtm
->rtm_flags
= fi
->fib_flags
;
1258 rtm
->rtm_scope
= fi
->fib_scope
;
1259 rtm
->rtm_protocol
= fi
->fib_protocol
;
1261 if (rtm
->rtm_dst_len
&&
1262 nla_put_in_addr(skb
, RTA_DST
, dst
))
1263 goto nla_put_failure
;
1264 if (fi
->fib_priority
&&
1265 nla_put_u32(skb
, RTA_PRIORITY
, fi
->fib_priority
))
1266 goto nla_put_failure
;
1267 if (rtnetlink_put_metrics(skb
, fi
->fib_metrics
->metrics
) < 0)
1268 goto nla_put_failure
;
1270 if (fi
->fib_prefsrc
&&
1271 nla_put_in_addr(skb
, RTA_PREFSRC
, fi
->fib_prefsrc
))
1272 goto nla_put_failure
;
1273 if (fi
->fib_nhs
== 1) {
1274 struct in_device
*in_dev
;
1276 if (fi
->fib_nh
->nh_gw
&&
1277 nla_put_in_addr(skb
, RTA_GATEWAY
, fi
->fib_nh
->nh_gw
))
1278 goto nla_put_failure
;
1279 if (fi
->fib_nh
->nh_oif
&&
1280 nla_put_u32(skb
, RTA_OIF
, fi
->fib_nh
->nh_oif
))
1281 goto nla_put_failure
;
1282 if (fi
->fib_nh
->nh_flags
& RTNH_F_LINKDOWN
) {
1283 in_dev
= __in_dev_get_rtnl(fi
->fib_nh
->nh_dev
);
1285 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev
))
1286 rtm
->rtm_flags
|= RTNH_F_DEAD
;
1288 #ifdef CONFIG_IP_ROUTE_CLASSID
1289 if (fi
->fib_nh
[0].nh_tclassid
&&
1290 nla_put_u32(skb
, RTA_FLOW
, fi
->fib_nh
[0].nh_tclassid
))
1291 goto nla_put_failure
;
1293 if (fi
->fib_nh
->nh_lwtstate
&&
1294 lwtunnel_fill_encap(skb
, fi
->fib_nh
->nh_lwtstate
) < 0)
1295 goto nla_put_failure
;
1297 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1298 if (fi
->fib_nhs
> 1) {
1299 struct rtnexthop
*rtnh
;
1302 mp
= nla_nest_start(skb
, RTA_MULTIPATH
);
1304 goto nla_put_failure
;
1307 struct in_device
*in_dev
;
1309 rtnh
= nla_reserve_nohdr(skb
, sizeof(*rtnh
));
1311 goto nla_put_failure
;
1313 rtnh
->rtnh_flags
= nh
->nh_flags
& 0xFF;
1314 if (nh
->nh_flags
& RTNH_F_LINKDOWN
) {
1315 in_dev
= __in_dev_get_rtnl(nh
->nh_dev
);
1317 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev
))
1318 rtnh
->rtnh_flags
|= RTNH_F_DEAD
;
1320 rtnh
->rtnh_hops
= nh
->nh_weight
- 1;
1321 rtnh
->rtnh_ifindex
= nh
->nh_oif
;
1324 nla_put_in_addr(skb
, RTA_GATEWAY
, nh
->nh_gw
))
1325 goto nla_put_failure
;
1326 #ifdef CONFIG_IP_ROUTE_CLASSID
1327 if (nh
->nh_tclassid
&&
1328 nla_put_u32(skb
, RTA_FLOW
, nh
->nh_tclassid
))
1329 goto nla_put_failure
;
1331 if (nh
->nh_lwtstate
&&
1332 lwtunnel_fill_encap(skb
, nh
->nh_lwtstate
) < 0)
1333 goto nla_put_failure
;
1335 /* length of rtnetlink header + attributes */
1336 rtnh
->rtnh_len
= nlmsg_get_pos(skb
) - (void *) rtnh
;
1337 } endfor_nexthops(fi
);
1339 nla_nest_end(skb
, mp
);
1342 nlmsg_end(skb
, nlh
);
1346 nlmsg_cancel(skb
, nlh
);
1352 * - local address disappeared -> we must delete all the entries
1354 * - device went down -> we must shutdown all nexthops going via it.
1356 int fib_sync_down_addr(struct net_device
*dev
, __be32 local
)
1359 unsigned int hash
= fib_laddr_hashfn(local
);
1360 struct hlist_head
*head
= &fib_info_laddrhash
[hash
];
1361 struct net
*net
= dev_net(dev
);
1362 int tb_id
= l3mdev_fib_table(dev
);
1363 struct fib_info
*fi
;
1365 if (!fib_info_laddrhash
|| local
== 0)
1368 hlist_for_each_entry(fi
, head
, fib_lhash
) {
1369 if (!net_eq(fi
->fib_net
, net
) ||
1370 fi
->fib_tb_id
!= tb_id
)
1372 if (fi
->fib_prefsrc
== local
) {
1373 fi
->fib_flags
|= RTNH_F_DEAD
;
1380 /* Update the PMTU of exceptions when:
1381 * - the new MTU of the first hop becomes smaller than the PMTU
1382 * - the old MTU was the same as the PMTU, and it limited discovery of
1383 * larger MTUs on the path. With that limit raised, we can now
1384 * discover larger MTUs
1385 * A special case is locked exceptions, for which the PMTU is smaller
1386 * than the minimal accepted PMTU:
1387 * - if the new MTU is greater than the PMTU, don't make any change
1388 * - otherwise, unlock and set PMTU
1390 static void nh_update_mtu(struct fib_nh
*nh
, u32
new, u32 orig
)
1392 struct fnhe_hash_bucket
*bucket
;
1395 bucket
= rcu_dereference_protected(nh
->nh_exceptions
, 1);
1399 for (i
= 0; i
< FNHE_HASH_SIZE
; i
++) {
1400 struct fib_nh_exception
*fnhe
;
1402 for (fnhe
= rcu_dereference_protected(bucket
[i
].chain
, 1);
1404 fnhe
= rcu_dereference_protected(fnhe
->fnhe_next
, 1)) {
1405 if (fnhe
->fnhe_mtu_locked
) {
1406 if (new <= fnhe
->fnhe_pmtu
) {
1407 fnhe
->fnhe_pmtu
= new;
1408 fnhe
->fnhe_mtu_locked
= false;
1410 } else if (new < fnhe
->fnhe_pmtu
||
1411 orig
== fnhe
->fnhe_pmtu
) {
1412 fnhe
->fnhe_pmtu
= new;
1418 void fib_sync_mtu(struct net_device
*dev
, u32 orig_mtu
)
1420 unsigned int hash
= fib_devindex_hashfn(dev
->ifindex
);
1421 struct hlist_head
*head
= &fib_info_devhash
[hash
];
1424 hlist_for_each_entry(nh
, head
, nh_hash
) {
1425 if (nh
->nh_dev
== dev
)
1426 nh_update_mtu(nh
, dev
->mtu
, orig_mtu
);
1430 /* Event force Flags Description
1431 * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
1432 * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
1433 * NETDEV_DOWN 1 LINKDOWN|DEAD Last address removed
1434 * NETDEV_UNREGISTER 1 LINKDOWN|DEAD Device removed
1436 int fib_sync_down_dev(struct net_device
*dev
, unsigned long event
, bool force
)
1439 int scope
= RT_SCOPE_NOWHERE
;
1440 struct fib_info
*prev_fi
= NULL
;
1441 unsigned int hash
= fib_devindex_hashfn(dev
->ifindex
);
1442 struct hlist_head
*head
= &fib_info_devhash
[hash
];
1448 hlist_for_each_entry(nh
, head
, nh_hash
) {
1449 struct fib_info
*fi
= nh
->nh_parent
;
1452 BUG_ON(!fi
->fib_nhs
);
1453 if (nh
->nh_dev
!= dev
|| fi
== prev_fi
)
1457 change_nexthops(fi
) {
1458 if (nexthop_nh
->nh_flags
& RTNH_F_DEAD
)
1460 else if (nexthop_nh
->nh_dev
== dev
&&
1461 nexthop_nh
->nh_scope
!= scope
) {
1464 case NETDEV_UNREGISTER
:
1465 nexthop_nh
->nh_flags
|= RTNH_F_DEAD
;
1468 nexthop_nh
->nh_flags
|= RTNH_F_LINKDOWN
;
1473 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1474 if (event
== NETDEV_UNREGISTER
&&
1475 nexthop_nh
->nh_dev
== dev
) {
1480 } endfor_nexthops(fi
)
1481 if (dead
== fi
->fib_nhs
) {
1484 case NETDEV_UNREGISTER
:
1485 fi
->fib_flags
|= RTNH_F_DEAD
;
1488 fi
->fib_flags
|= RTNH_F_LINKDOWN
;
1500 /* Must be invoked inside of an RCU protected region. */
1501 void fib_select_default(const struct flowi4
*flp
, struct fib_result
*res
)
1503 struct fib_info
*fi
= NULL
, *last_resort
= NULL
;
1504 struct hlist_head
*fa_head
= res
->fa_head
;
1505 struct fib_table
*tb
= res
->table
;
1506 u8 slen
= 32 - res
->prefixlen
;
1507 int order
= -1, last_idx
= -1;
1508 struct fib_alias
*fa
, *fa1
= NULL
;
1509 u32 last_prio
= res
->fi
->fib_priority
;
1512 hlist_for_each_entry_rcu(fa
, fa_head
, fa_list
) {
1513 struct fib_info
*next_fi
= fa
->fa_info
;
1515 if (fa
->fa_slen
!= slen
)
1517 if (fa
->fa_tos
&& fa
->fa_tos
!= flp
->flowi4_tos
)
1519 if (fa
->tb_id
!= tb
->tb_id
)
1521 if (next_fi
->fib_priority
> last_prio
&&
1522 fa
->fa_tos
== last_tos
) {
1527 if (next_fi
->fib_flags
& RTNH_F_DEAD
)
1529 last_tos
= fa
->fa_tos
;
1530 last_prio
= next_fi
->fib_priority
;
1532 if (next_fi
->fib_scope
!= res
->scope
||
1533 fa
->fa_type
!= RTN_UNICAST
)
1535 if (!next_fi
->fib_nh
[0].nh_gw
||
1536 next_fi
->fib_nh
[0].nh_scope
!= RT_SCOPE_LINK
)
1539 fib_alias_accessed(fa
);
1542 if (next_fi
!= res
->fi
)
1545 } else if (!fib_detect_death(fi
, order
, &last_resort
,
1546 &last_idx
, fa1
->fa_default
)) {
1547 fib_result_assign(res
, fi
);
1548 fa1
->fa_default
= order
;
1555 if (order
<= 0 || !fi
) {
1557 fa1
->fa_default
= -1;
1561 if (!fib_detect_death(fi
, order
, &last_resort
, &last_idx
,
1563 fib_result_assign(res
, fi
);
1564 fa1
->fa_default
= order
;
1569 fib_result_assign(res
, last_resort
);
1570 fa1
->fa_default
= last_idx
;
1576 * Dead device goes up. We wake up dead nexthops.
1577 * It takes sense only on multipath routes.
1579 int fib_sync_up(struct net_device
*dev
, unsigned int nh_flags
)
1581 struct fib_info
*prev_fi
;
1583 struct hlist_head
*head
;
1587 if (!(dev
->flags
& IFF_UP
))
1590 if (nh_flags
& RTNH_F_DEAD
) {
1591 unsigned int flags
= dev_get_flags(dev
);
1593 if (flags
& (IFF_RUNNING
| IFF_LOWER_UP
))
1594 nh_flags
|= RTNH_F_LINKDOWN
;
1598 hash
= fib_devindex_hashfn(dev
->ifindex
);
1599 head
= &fib_info_devhash
[hash
];
1602 hlist_for_each_entry(nh
, head
, nh_hash
) {
1603 struct fib_info
*fi
= nh
->nh_parent
;
1606 BUG_ON(!fi
->fib_nhs
);
1607 if (nh
->nh_dev
!= dev
|| fi
== prev_fi
)
1612 change_nexthops(fi
) {
1613 if (!(nexthop_nh
->nh_flags
& nh_flags
)) {
1617 if (!nexthop_nh
->nh_dev
||
1618 !(nexthop_nh
->nh_dev
->flags
& IFF_UP
))
1620 if (nexthop_nh
->nh_dev
!= dev
||
1621 !__in_dev_get_rtnl(dev
))
1624 nexthop_nh
->nh_flags
&= ~nh_flags
;
1625 } endfor_nexthops(fi
)
1628 fi
->fib_flags
&= ~nh_flags
;
1638 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1639 static bool fib_good_nh(const struct fib_nh
*nh
)
1641 int state
= NUD_REACHABLE
;
1643 if (nh
->nh_scope
== RT_SCOPE_LINK
) {
1644 struct neighbour
*n
;
1648 n
= __ipv4_neigh_lookup_noref(nh
->nh_dev
,
1649 (__force u32
)nh
->nh_gw
);
1651 state
= n
->nud_state
;
1653 rcu_read_unlock_bh();
1656 return !!(state
& NUD_VALID
);
1659 void fib_select_multipath(struct fib_result
*res
, int hash
)
1661 struct fib_info
*fi
= res
->fi
;
1662 struct net
*net
= fi
->fib_net
;
1666 if (net
->ipv4
.sysctl_fib_multipath_use_neigh
) {
1667 if (!fib_good_nh(nh
))
1670 res
->nh_sel
= nhsel
;
1675 if (hash
> atomic_read(&nh
->nh_upper_bound
))
1678 res
->nh_sel
= nhsel
;
1680 } endfor_nexthops(fi
);
1684 void fib_select_path(struct net
*net
, struct fib_result
*res
,
1685 struct flowi4
*fl4
, int mp_hash
)
1689 oif_check
= (fl4
->flowi4_oif
== 0 ||
1690 fl4
->flowi4_flags
& FLOWI_FLAG_SKIP_NH_OIF
);
1692 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1693 if (res
->fi
->fib_nhs
> 1 && oif_check
) {
1695 mp_hash
= get_hash_from_flowi4(fl4
) >> 1;
1697 fib_select_multipath(res
, mp_hash
);
1701 if (!res
->prefixlen
&&
1702 res
->table
->tb_num_default
> 1 &&
1703 res
->type
== RTN_UNICAST
&& oif_check
)
1704 fib_select_default(fl4
, res
);
1707 fl4
->saddr
= FIB_RES_PREFSRC(net
, *res
);
1709 EXPORT_SYMBOL_GPL(fib_select_path
);