2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * IPv4 Forwarding Information Base: semantics.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <asm/uaccess.h>
17 #include <asm/system.h>
18 #include <linux/bitops.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/jiffies.h>
23 #include <linux/string.h>
24 #include <linux/socket.h>
25 #include <linux/sockios.h>
26 #include <linux/errno.h>
28 #include <linux/inet.h>
29 #include <linux/inetdevice.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/proc_fs.h>
33 #include <linux/skbuff.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
39 #include <net/protocol.h>
40 #include <net/route.h>
43 #include <net/ip_fib.h>
44 #include <net/netlink.h>
45 #include <net/nexthop.h>
47 #include "fib_lookup.h"
49 static DEFINE_SPINLOCK(fib_info_lock
);
50 static struct hlist_head
*fib_info_hash
;
51 static struct hlist_head
*fib_info_laddrhash
;
52 static unsigned int fib_hash_size
;
53 static unsigned int fib_info_cnt
;
55 #define DEVINDEX_HASHBITS 8
56 #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
57 static struct hlist_head fib_info_devhash
[DEVINDEX_HASHSIZE
];
59 #ifdef CONFIG_IP_ROUTE_MULTIPATH
61 static DEFINE_SPINLOCK(fib_multipath_lock
);
63 #define for_nexthops(fi) { \
64 int nhsel; const struct fib_nh *nh; \
65 for (nhsel = 0, nh = (fi)->fib_nh; \
66 nhsel < (fi)->fib_nhs; \
69 #define change_nexthops(fi) { \
70 int nhsel; struct fib_nh *nexthop_nh; \
71 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
72 nhsel < (fi)->fib_nhs; \
73 nexthop_nh++, nhsel++)
75 #else /* CONFIG_IP_ROUTE_MULTIPATH */
77 /* Hope, that gcc will optimize it to get rid of dummy loop */
79 #define for_nexthops(fi) { \
80 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
81 for (nhsel = 0; nhsel < 1; nhsel++)
83 #define change_nexthops(fi) { \
85 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
86 for (nhsel = 0; nhsel < 1; nhsel++)
88 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
90 #define endfor_nexthops(fi) }
97 } fib_props
[RTN_MAX
+ 1] = {
100 .scope
= RT_SCOPE_NOWHERE
,
104 .scope
= RT_SCOPE_UNIVERSE
,
108 .scope
= RT_SCOPE_HOST
,
112 .scope
= RT_SCOPE_LINK
,
116 .scope
= RT_SCOPE_LINK
,
120 .scope
= RT_SCOPE_UNIVERSE
,
124 .scope
= RT_SCOPE_UNIVERSE
,
126 [RTN_UNREACHABLE
] = {
127 .error
= -EHOSTUNREACH
,
128 .scope
= RT_SCOPE_UNIVERSE
,
132 .scope
= RT_SCOPE_UNIVERSE
,
136 .scope
= RT_SCOPE_UNIVERSE
,
140 .scope
= RT_SCOPE_NOWHERE
,
144 .scope
= RT_SCOPE_NOWHERE
,
149 /* Release a nexthop info record */
151 static void free_fib_info_rcu(struct rcu_head
*head
)
153 struct fib_info
*fi
= container_of(head
, struct fib_info
, rcu
);
158 void free_fib_info(struct fib_info
*fi
)
160 if (fi
->fib_dead
== 0) {
161 pr_warning("Freeing alive fib_info %p\n", fi
);
164 change_nexthops(fi
) {
165 if (nexthop_nh
->nh_dev
)
166 dev_put(nexthop_nh
->nh_dev
);
167 nexthop_nh
->nh_dev
= NULL
;
168 } endfor_nexthops(fi
);
170 release_net(fi
->fib_net
);
171 call_rcu(&fi
->rcu
, free_fib_info_rcu
);
174 void fib_release_info(struct fib_info
*fi
)
176 spin_lock_bh(&fib_info_lock
);
177 if (fi
&& --fi
->fib_treeref
== 0) {
178 hlist_del(&fi
->fib_hash
);
180 hlist_del(&fi
->fib_lhash
);
181 change_nexthops(fi
) {
182 if (!nexthop_nh
->nh_dev
)
184 hlist_del(&nexthop_nh
->nh_hash
);
185 } endfor_nexthops(fi
)
189 spin_unlock_bh(&fib_info_lock
);
192 static inline int nh_comp(const struct fib_info
*fi
, const struct fib_info
*ofi
)
194 const struct fib_nh
*onh
= ofi
->fib_nh
;
197 if (nh
->nh_oif
!= onh
->nh_oif
||
198 nh
->nh_gw
!= onh
->nh_gw
||
199 nh
->nh_scope
!= onh
->nh_scope
||
200 #ifdef CONFIG_IP_ROUTE_MULTIPATH
201 nh
->nh_weight
!= onh
->nh_weight
||
203 #ifdef CONFIG_NET_CLS_ROUTE
204 nh
->nh_tclassid
!= onh
->nh_tclassid
||
206 ((nh
->nh_flags
^ onh
->nh_flags
) & ~RTNH_F_DEAD
))
209 } endfor_nexthops(fi
);
213 static inline unsigned int fib_devindex_hashfn(unsigned int val
)
215 unsigned int mask
= DEVINDEX_HASHSIZE
- 1;
218 (val
>> DEVINDEX_HASHBITS
) ^
219 (val
>> (DEVINDEX_HASHBITS
* 2))) & mask
;
222 static inline unsigned int fib_info_hashfn(const struct fib_info
*fi
)
224 unsigned int mask
= (fib_hash_size
- 1);
225 unsigned int val
= fi
->fib_nhs
;
227 val
^= fi
->fib_protocol
;
228 val
^= (__force u32
)fi
->fib_prefsrc
;
229 val
^= fi
->fib_priority
;
231 val
^= fib_devindex_hashfn(nh
->nh_oif
);
232 } endfor_nexthops(fi
)
234 return (val
^ (val
>> 7) ^ (val
>> 12)) & mask
;
237 static struct fib_info
*fib_find_info(const struct fib_info
*nfi
)
239 struct hlist_head
*head
;
240 struct hlist_node
*node
;
244 hash
= fib_info_hashfn(nfi
);
245 head
= &fib_info_hash
[hash
];
247 hlist_for_each_entry(fi
, node
, head
, fib_hash
) {
248 if (!net_eq(fi
->fib_net
, nfi
->fib_net
))
250 if (fi
->fib_nhs
!= nfi
->fib_nhs
)
252 if (nfi
->fib_protocol
== fi
->fib_protocol
&&
253 nfi
->fib_prefsrc
== fi
->fib_prefsrc
&&
254 nfi
->fib_priority
== fi
->fib_priority
&&
255 memcmp(nfi
->fib_metrics
, fi
->fib_metrics
,
256 sizeof(fi
->fib_metrics
)) == 0 &&
257 ((nfi
->fib_flags
^ fi
->fib_flags
) & ~RTNH_F_DEAD
) == 0 &&
258 (nfi
->fib_nhs
== 0 || nh_comp(fi
, nfi
) == 0))
265 /* Check, that the gateway is already configured.
266 * Used only by redirect accept routine.
268 int ip_fib_check_default(__be32 gw
, struct net_device
*dev
)
270 struct hlist_head
*head
;
271 struct hlist_node
*node
;
275 spin_lock(&fib_info_lock
);
277 hash
= fib_devindex_hashfn(dev
->ifindex
);
278 head
= &fib_info_devhash
[hash
];
279 hlist_for_each_entry(nh
, node
, head
, nh_hash
) {
280 if (nh
->nh_dev
== dev
&&
282 !(nh
->nh_flags
& RTNH_F_DEAD
)) {
283 spin_unlock(&fib_info_lock
);
288 spin_unlock(&fib_info_lock
);
293 static inline size_t fib_nlmsg_size(struct fib_info
*fi
)
295 size_t payload
= NLMSG_ALIGN(sizeof(struct rtmsg
))
296 + nla_total_size(4) /* RTA_TABLE */
297 + nla_total_size(4) /* RTA_DST */
298 + nla_total_size(4) /* RTA_PRIORITY */
299 + nla_total_size(4); /* RTA_PREFSRC */
301 /* space for nested metrics */
302 payload
+= nla_total_size((RTAX_MAX
* nla_total_size(4)));
305 /* Also handles the special case fib_nhs == 1 */
307 /* each nexthop is packed in an attribute */
308 size_t nhsize
= nla_total_size(sizeof(struct rtnexthop
));
310 /* may contain flow and gateway attribute */
311 nhsize
+= 2 * nla_total_size(4);
313 /* all nexthops are packed in a nested attribute */
314 payload
+= nla_total_size(fi
->fib_nhs
* nhsize
);
320 void rtmsg_fib(int event
, __be32 key
, struct fib_alias
*fa
,
321 int dst_len
, u32 tb_id
, struct nl_info
*info
,
322 unsigned int nlm_flags
)
325 u32 seq
= info
->nlh
? info
->nlh
->nlmsg_seq
: 0;
328 skb
= nlmsg_new(fib_nlmsg_size(fa
->fa_info
), GFP_KERNEL
);
332 err
= fib_dump_info(skb
, info
->pid
, seq
, event
, tb_id
,
333 fa
->fa_type
, fa
->fa_scope
, key
, dst_len
,
334 fa
->fa_tos
, fa
->fa_info
, nlm_flags
);
336 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
337 WARN_ON(err
== -EMSGSIZE
);
341 rtnl_notify(skb
, info
->nl_net
, info
->pid
, RTNLGRP_IPV4_ROUTE
,
342 info
->nlh
, GFP_KERNEL
);
346 rtnl_set_sk_err(info
->nl_net
, RTNLGRP_IPV4_ROUTE
, err
);
349 /* Return the first fib alias matching TOS with
350 * priority less than or equal to PRIO.
352 struct fib_alias
*fib_find_alias(struct list_head
*fah
, u8 tos
, u32 prio
)
355 struct fib_alias
*fa
;
356 list_for_each_entry(fa
, fah
, fa_list
) {
357 if (fa
->fa_tos
> tos
)
359 if (fa
->fa_info
->fib_priority
>= prio
||
367 int fib_detect_death(struct fib_info
*fi
, int order
,
368 struct fib_info
**last_resort
, int *last_idx
, int dflt
)
371 int state
= NUD_NONE
;
373 n
= neigh_lookup(&arp_tbl
, &fi
->fib_nh
[0].nh_gw
, fi
->fib_dev
);
375 state
= n
->nud_state
;
378 if (state
== NUD_REACHABLE
)
380 if ((state
& NUD_VALID
) && order
!= dflt
)
382 if ((state
& NUD_VALID
) ||
383 (*last_idx
< 0 && order
> dflt
)) {
390 #ifdef CONFIG_IP_ROUTE_MULTIPATH
392 static int fib_count_nexthops(struct rtnexthop
*rtnh
, int remaining
)
396 while (rtnh_ok(rtnh
, remaining
)) {
398 rtnh
= rtnh_next(rtnh
, &remaining
);
401 /* leftover implies invalid nexthop configuration, discard it */
402 return remaining
> 0 ? 0 : nhs
;
405 static int fib_get_nhs(struct fib_info
*fi
, struct rtnexthop
*rtnh
,
406 int remaining
, struct fib_config
*cfg
)
408 change_nexthops(fi
) {
411 if (!rtnh_ok(rtnh
, remaining
))
414 nexthop_nh
->nh_flags
=
415 (cfg
->fc_flags
& ~0xFF) | rtnh
->rtnh_flags
;
416 nexthop_nh
->nh_oif
= rtnh
->rtnh_ifindex
;
417 nexthop_nh
->nh_weight
= rtnh
->rtnh_hops
+ 1;
419 attrlen
= rtnh_attrlen(rtnh
);
421 struct nlattr
*nla
, *attrs
= rtnh_attrs(rtnh
);
423 nla
= nla_find(attrs
, attrlen
, RTA_GATEWAY
);
424 nexthop_nh
->nh_gw
= nla
? nla_get_be32(nla
) : 0;
425 #ifdef CONFIG_NET_CLS_ROUTE
426 nla
= nla_find(attrs
, attrlen
, RTA_FLOW
);
427 nexthop_nh
->nh_tclassid
= nla
? nla_get_u32(nla
) : 0;
431 rtnh
= rtnh_next(rtnh
, &remaining
);
432 } endfor_nexthops(fi
);
439 int fib_nh_match(struct fib_config
*cfg
, struct fib_info
*fi
)
441 #ifdef CONFIG_IP_ROUTE_MULTIPATH
442 struct rtnexthop
*rtnh
;
446 if (cfg
->fc_priority
&& cfg
->fc_priority
!= fi
->fib_priority
)
449 if (cfg
->fc_oif
|| cfg
->fc_gw
) {
450 if ((!cfg
->fc_oif
|| cfg
->fc_oif
== fi
->fib_nh
->nh_oif
) &&
451 (!cfg
->fc_gw
|| cfg
->fc_gw
== fi
->fib_nh
->nh_gw
))
456 #ifdef CONFIG_IP_ROUTE_MULTIPATH
457 if (cfg
->fc_mp
== NULL
)
461 remaining
= cfg
->fc_mp_len
;
466 if (!rtnh_ok(rtnh
, remaining
))
469 if (rtnh
->rtnh_ifindex
&& rtnh
->rtnh_ifindex
!= nh
->nh_oif
)
472 attrlen
= rtnh_attrlen(rtnh
);
474 struct nlattr
*nla
, *attrs
= rtnh_attrs(rtnh
);
476 nla
= nla_find(attrs
, attrlen
, RTA_GATEWAY
);
477 if (nla
&& nla_get_be32(nla
) != nh
->nh_gw
)
479 #ifdef CONFIG_NET_CLS_ROUTE
480 nla
= nla_find(attrs
, attrlen
, RTA_FLOW
);
481 if (nla
&& nla_get_u32(nla
) != nh
->nh_tclassid
)
486 rtnh
= rtnh_next(rtnh
, &remaining
);
487 } endfor_nexthops(fi
);
497 * Semantics of nexthop is very messy by historical reasons.
498 * We have to take into account, that:
499 * a) gateway can be actually local interface address,
500 * so that gatewayed route is direct.
501 * b) gateway must be on-link address, possibly
502 * described not by an ifaddr, but also by a direct route.
503 * c) If both gateway and interface are specified, they should not
505 * d) If we use tunnel routes, gateway could be not on-link.
507 * Attempt to reconcile all of these (alas, self-contradictory) conditions
508 * results in pretty ugly and hairy code with obscure logic.
510 * I chose to generalized it instead, so that the size
511 * of code does not increase practically, but it becomes
513 * Every prefix is assigned a "scope" value: "host" is local address,
514 * "link" is direct route,
515 * [ ... "site" ... "interior" ... ]
516 * and "universe" is true gateway route with global meaning.
518 * Every prefix refers to a set of "nexthop"s (gw, oif),
519 * where gw must have narrower scope. This recursion stops
520 * when gw has LOCAL scope or if "nexthop" is declared ONLINK,
521 * which means that gw is forced to be on link.
523 * Code is still hairy, but now it is apparently logically
524 * consistent and very flexible. F.e. as by-product it allows
525 * to co-exists in peace independent exterior and interior
528 * Normally it looks as following.
530 * {universe prefix} -> (gw, oif) [scope link]
532 * |-> {link prefix} -> (gw, oif) [scope local]
534 * |-> {local prefix} (terminal node)
536 static int fib_check_nh(struct fib_config
*cfg
, struct fib_info
*fi
,
541 struct net_device
*dev
;
543 net
= cfg
->fc_nlinfo
.nl_net
;
545 struct fib_result res
;
547 if (nh
->nh_flags
& RTNH_F_ONLINK
) {
549 if (cfg
->fc_scope
>= RT_SCOPE_LINK
)
551 if (inet_addr_type(net
, nh
->nh_gw
) != RTN_UNICAST
)
553 dev
= __dev_get_by_index(net
, nh
->nh_oif
);
556 if (!(dev
->flags
& IFF_UP
))
560 nh
->nh_scope
= RT_SCOPE_LINK
;
566 .fl4_dst
= nh
->nh_gw
,
567 .fl4_scope
= cfg
->fc_scope
+ 1,
571 /* It is not necessary, but requires a bit of thinking */
572 if (fl
.fl4_scope
< RT_SCOPE_LINK
)
573 fl
.fl4_scope
= RT_SCOPE_LINK
;
574 err
= fib_lookup(net
, &fl
, &res
);
581 if (res
.type
!= RTN_UNICAST
&& res
.type
!= RTN_LOCAL
)
583 nh
->nh_scope
= res
.scope
;
584 nh
->nh_oif
= FIB_RES_OIF(res
);
585 nh
->nh_dev
= dev
= FIB_RES_DEV(res
);
589 err
= (dev
->flags
& IFF_UP
) ? 0 : -ENETDOWN
;
591 struct in_device
*in_dev
;
593 if (nh
->nh_flags
& (RTNH_F_PERVASIVE
| RTNH_F_ONLINK
))
598 in_dev
= inetdev_by_index(net
, nh
->nh_oif
);
602 if (!(in_dev
->dev
->flags
& IFF_UP
))
604 nh
->nh_dev
= in_dev
->dev
;
605 dev_hold(nh
->nh_dev
);
606 nh
->nh_scope
= RT_SCOPE_HOST
;
614 static inline unsigned int fib_laddr_hashfn(__be32 val
)
616 unsigned int mask
= (fib_hash_size
- 1);
618 return ((__force u32
)val
^
619 ((__force u32
)val
>> 7) ^
620 ((__force u32
)val
>> 14)) & mask
;
623 static struct hlist_head
*fib_hash_alloc(int bytes
)
625 if (bytes
<= PAGE_SIZE
)
626 return kzalloc(bytes
, GFP_KERNEL
);
628 return (struct hlist_head
*)
629 __get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
633 static void fib_hash_free(struct hlist_head
*hash
, int bytes
)
638 if (bytes
<= PAGE_SIZE
)
641 free_pages((unsigned long) hash
, get_order(bytes
));
644 static void fib_hash_move(struct hlist_head
*new_info_hash
,
645 struct hlist_head
*new_laddrhash
,
646 unsigned int new_size
)
648 struct hlist_head
*old_info_hash
, *old_laddrhash
;
649 unsigned int old_size
= fib_hash_size
;
650 unsigned int i
, bytes
;
652 spin_lock_bh(&fib_info_lock
);
653 old_info_hash
= fib_info_hash
;
654 old_laddrhash
= fib_info_laddrhash
;
655 fib_hash_size
= new_size
;
657 for (i
= 0; i
< old_size
; i
++) {
658 struct hlist_head
*head
= &fib_info_hash
[i
];
659 struct hlist_node
*node
, *n
;
662 hlist_for_each_entry_safe(fi
, node
, n
, head
, fib_hash
) {
663 struct hlist_head
*dest
;
664 unsigned int new_hash
;
666 hlist_del(&fi
->fib_hash
);
668 new_hash
= fib_info_hashfn(fi
);
669 dest
= &new_info_hash
[new_hash
];
670 hlist_add_head(&fi
->fib_hash
, dest
);
673 fib_info_hash
= new_info_hash
;
675 for (i
= 0; i
< old_size
; i
++) {
676 struct hlist_head
*lhead
= &fib_info_laddrhash
[i
];
677 struct hlist_node
*node
, *n
;
680 hlist_for_each_entry_safe(fi
, node
, n
, lhead
, fib_lhash
) {
681 struct hlist_head
*ldest
;
682 unsigned int new_hash
;
684 hlist_del(&fi
->fib_lhash
);
686 new_hash
= fib_laddr_hashfn(fi
->fib_prefsrc
);
687 ldest
= &new_laddrhash
[new_hash
];
688 hlist_add_head(&fi
->fib_lhash
, ldest
);
691 fib_info_laddrhash
= new_laddrhash
;
693 spin_unlock_bh(&fib_info_lock
);
695 bytes
= old_size
* sizeof(struct hlist_head
*);
696 fib_hash_free(old_info_hash
, bytes
);
697 fib_hash_free(old_laddrhash
, bytes
);
700 struct fib_info
*fib_create_info(struct fib_config
*cfg
)
703 struct fib_info
*fi
= NULL
;
704 struct fib_info
*ofi
;
706 struct net
*net
= cfg
->fc_nlinfo
.nl_net
;
708 /* Fast check to catch the most weird cases */
709 if (fib_props
[cfg
->fc_type
].scope
> cfg
->fc_scope
)
712 #ifdef CONFIG_IP_ROUTE_MULTIPATH
714 nhs
= fib_count_nexthops(cfg
->fc_mp
, cfg
->fc_mp_len
);
721 if (fib_info_cnt
>= fib_hash_size
) {
722 unsigned int new_size
= fib_hash_size
<< 1;
723 struct hlist_head
*new_info_hash
;
724 struct hlist_head
*new_laddrhash
;
729 bytes
= new_size
* sizeof(struct hlist_head
*);
730 new_info_hash
= fib_hash_alloc(bytes
);
731 new_laddrhash
= fib_hash_alloc(bytes
);
732 if (!new_info_hash
|| !new_laddrhash
) {
733 fib_hash_free(new_info_hash
, bytes
);
734 fib_hash_free(new_laddrhash
, bytes
);
736 fib_hash_move(new_info_hash
, new_laddrhash
, new_size
);
742 fi
= kzalloc(sizeof(*fi
)+nhs
*sizeof(struct fib_nh
), GFP_KERNEL
);
747 fi
->fib_net
= hold_net(net
);
748 fi
->fib_protocol
= cfg
->fc_protocol
;
749 fi
->fib_flags
= cfg
->fc_flags
;
750 fi
->fib_priority
= cfg
->fc_priority
;
751 fi
->fib_prefsrc
= cfg
->fc_prefsrc
;
754 change_nexthops(fi
) {
755 nexthop_nh
->nh_parent
= fi
;
756 } endfor_nexthops(fi
)
762 nla_for_each_attr(nla
, cfg
->fc_mx
, cfg
->fc_mx_len
, remaining
) {
763 int type
= nla_type(nla
);
768 fi
->fib_metrics
[type
- 1] = nla_get_u32(nla
);
774 #ifdef CONFIG_IP_ROUTE_MULTIPATH
775 err
= fib_get_nhs(fi
, cfg
->fc_mp
, cfg
->fc_mp_len
, cfg
);
778 if (cfg
->fc_oif
&& fi
->fib_nh
->nh_oif
!= cfg
->fc_oif
)
780 if (cfg
->fc_gw
&& fi
->fib_nh
->nh_gw
!= cfg
->fc_gw
)
782 #ifdef CONFIG_NET_CLS_ROUTE
783 if (cfg
->fc_flow
&& fi
->fib_nh
->nh_tclassid
!= cfg
->fc_flow
)
790 struct fib_nh
*nh
= fi
->fib_nh
;
792 nh
->nh_oif
= cfg
->fc_oif
;
793 nh
->nh_gw
= cfg
->fc_gw
;
794 nh
->nh_flags
= cfg
->fc_flags
;
795 #ifdef CONFIG_NET_CLS_ROUTE
796 nh
->nh_tclassid
= cfg
->fc_flow
;
798 #ifdef CONFIG_IP_ROUTE_MULTIPATH
803 if (fib_props
[cfg
->fc_type
].error
) {
804 if (cfg
->fc_gw
|| cfg
->fc_oif
|| cfg
->fc_mp
)
809 if (cfg
->fc_scope
> RT_SCOPE_HOST
)
812 if (cfg
->fc_scope
== RT_SCOPE_HOST
) {
813 struct fib_nh
*nh
= fi
->fib_nh
;
815 /* Local address is added. */
816 if (nhs
!= 1 || nh
->nh_gw
)
818 nh
->nh_scope
= RT_SCOPE_NOWHERE
;
819 nh
->nh_dev
= dev_get_by_index(net
, fi
->fib_nh
->nh_oif
);
821 if (nh
->nh_dev
== NULL
)
824 change_nexthops(fi
) {
825 err
= fib_check_nh(cfg
, fi
, nexthop_nh
);
828 } endfor_nexthops(fi
)
831 if (fi
->fib_prefsrc
) {
832 if (cfg
->fc_type
!= RTN_LOCAL
|| !cfg
->fc_dst
||
833 fi
->fib_prefsrc
!= cfg
->fc_dst
)
834 if (inet_addr_type(net
, fi
->fib_prefsrc
) != RTN_LOCAL
)
839 ofi
= fib_find_info(fi
);
848 atomic_inc(&fi
->fib_clntref
);
849 spin_lock_bh(&fib_info_lock
);
850 hlist_add_head(&fi
->fib_hash
,
851 &fib_info_hash
[fib_info_hashfn(fi
)]);
852 if (fi
->fib_prefsrc
) {
853 struct hlist_head
*head
;
855 head
= &fib_info_laddrhash
[fib_laddr_hashfn(fi
->fib_prefsrc
)];
856 hlist_add_head(&fi
->fib_lhash
, head
);
858 change_nexthops(fi
) {
859 struct hlist_head
*head
;
862 if (!nexthop_nh
->nh_dev
)
864 hash
= fib_devindex_hashfn(nexthop_nh
->nh_dev
->ifindex
);
865 head
= &fib_info_devhash
[hash
];
866 hlist_add_head(&nexthop_nh
->nh_hash
, head
);
867 } endfor_nexthops(fi
)
868 spin_unlock_bh(&fib_info_lock
);
883 /* Note! fib_semantic_match intentionally uses RCU list functions. */
884 int fib_semantic_match(struct list_head
*head
, const struct flowi
*flp
,
885 struct fib_result
*res
, int prefixlen
, int fib_flags
)
887 struct fib_alias
*fa
;
890 list_for_each_entry_rcu(fa
, head
, fa_list
) {
894 fa
->fa_tos
!= flp
->fl4_tos
)
897 if (fa
->fa_scope
< flp
->fl4_scope
)
900 fib_alias_accessed(fa
);
902 err
= fib_props
[fa
->fa_type
].error
;
904 struct fib_info
*fi
= fa
->fa_info
;
906 if (fi
->fib_flags
& RTNH_F_DEAD
)
909 switch (fa
->fa_type
) {
916 if (nh
->nh_flags
& RTNH_F_DEAD
)
918 if (!flp
->oif
|| flp
->oif
== nh
->nh_oif
)
921 #ifdef CONFIG_IP_ROUTE_MULTIPATH
922 if (nhsel
< fi
->fib_nhs
) {
934 pr_warning("fib_semantic_match bad type %#x\n",
944 res
->prefixlen
= prefixlen
;
945 res
->nh_sel
= nh_sel
;
946 res
->type
= fa
->fa_type
;
947 res
->scope
= fa
->fa_scope
;
948 res
->fi
= fa
->fa_info
;
949 if (!(fib_flags
& FIB_LOOKUP_NOREF
))
950 atomic_inc(&res
->fi
->fib_clntref
);
954 /* Find appropriate source address to this destination */
956 __be32
__fib_res_prefsrc(struct fib_result
*res
)
958 return inet_select_addr(FIB_RES_DEV(*res
), FIB_RES_GW(*res
), res
->scope
);
961 int fib_dump_info(struct sk_buff
*skb
, u32 pid
, u32 seq
, int event
,
962 u32 tb_id
, u8 type
, u8 scope
, __be32 dst
, int dst_len
, u8 tos
,
963 struct fib_info
*fi
, unsigned int flags
)
965 struct nlmsghdr
*nlh
;
968 nlh
= nlmsg_put(skb
, pid
, seq
, event
, sizeof(*rtm
), flags
);
972 rtm
= nlmsg_data(nlh
);
973 rtm
->rtm_family
= AF_INET
;
974 rtm
->rtm_dst_len
= dst_len
;
975 rtm
->rtm_src_len
= 0;
978 rtm
->rtm_table
= tb_id
;
980 rtm
->rtm_table
= RT_TABLE_COMPAT
;
981 NLA_PUT_U32(skb
, RTA_TABLE
, tb_id
);
982 rtm
->rtm_type
= type
;
983 rtm
->rtm_flags
= fi
->fib_flags
;
984 rtm
->rtm_scope
= scope
;
985 rtm
->rtm_protocol
= fi
->fib_protocol
;
987 if (rtm
->rtm_dst_len
)
988 NLA_PUT_BE32(skb
, RTA_DST
, dst
);
990 if (fi
->fib_priority
)
991 NLA_PUT_U32(skb
, RTA_PRIORITY
, fi
->fib_priority
);
993 if (rtnetlink_put_metrics(skb
, fi
->fib_metrics
) < 0)
994 goto nla_put_failure
;
997 NLA_PUT_BE32(skb
, RTA_PREFSRC
, fi
->fib_prefsrc
);
999 if (fi
->fib_nhs
== 1) {
1000 if (fi
->fib_nh
->nh_gw
)
1001 NLA_PUT_BE32(skb
, RTA_GATEWAY
, fi
->fib_nh
->nh_gw
);
1003 if (fi
->fib_nh
->nh_oif
)
1004 NLA_PUT_U32(skb
, RTA_OIF
, fi
->fib_nh
->nh_oif
);
1005 #ifdef CONFIG_NET_CLS_ROUTE
1006 if (fi
->fib_nh
[0].nh_tclassid
)
1007 NLA_PUT_U32(skb
, RTA_FLOW
, fi
->fib_nh
[0].nh_tclassid
);
1010 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1011 if (fi
->fib_nhs
> 1) {
1012 struct rtnexthop
*rtnh
;
1015 mp
= nla_nest_start(skb
, RTA_MULTIPATH
);
1017 goto nla_put_failure
;
1020 rtnh
= nla_reserve_nohdr(skb
, sizeof(*rtnh
));
1022 goto nla_put_failure
;
1024 rtnh
->rtnh_flags
= nh
->nh_flags
& 0xFF;
1025 rtnh
->rtnh_hops
= nh
->nh_weight
- 1;
1026 rtnh
->rtnh_ifindex
= nh
->nh_oif
;
1029 NLA_PUT_BE32(skb
, RTA_GATEWAY
, nh
->nh_gw
);
1030 #ifdef CONFIG_NET_CLS_ROUTE
1031 if (nh
->nh_tclassid
)
1032 NLA_PUT_U32(skb
, RTA_FLOW
, nh
->nh_tclassid
);
1034 /* length of rtnetlink header + attributes */
1035 rtnh
->rtnh_len
= nlmsg_get_pos(skb
) - (void *) rtnh
;
1036 } endfor_nexthops(fi
);
1038 nla_nest_end(skb
, mp
);
1041 return nlmsg_end(skb
, nlh
);
1044 nlmsg_cancel(skb
, nlh
);
1050 * - local address disappeared -> we must delete all the entries
1052 * - device went down -> we must shutdown all nexthops going via it.
1054 int fib_sync_down_addr(struct net
*net
, __be32 local
)
1057 unsigned int hash
= fib_laddr_hashfn(local
);
1058 struct hlist_head
*head
= &fib_info_laddrhash
[hash
];
1059 struct hlist_node
*node
;
1060 struct fib_info
*fi
;
1062 if (fib_info_laddrhash
== NULL
|| local
== 0)
1065 hlist_for_each_entry(fi
, node
, head
, fib_lhash
) {
1066 if (!net_eq(fi
->fib_net
, net
))
1068 if (fi
->fib_prefsrc
== local
) {
1069 fi
->fib_flags
|= RTNH_F_DEAD
;
1076 int fib_sync_down_dev(struct net_device
*dev
, int force
)
1079 int scope
= RT_SCOPE_NOWHERE
;
1080 struct fib_info
*prev_fi
= NULL
;
1081 unsigned int hash
= fib_devindex_hashfn(dev
->ifindex
);
1082 struct hlist_head
*head
= &fib_info_devhash
[hash
];
1083 struct hlist_node
*node
;
1089 hlist_for_each_entry(nh
, node
, head
, nh_hash
) {
1090 struct fib_info
*fi
= nh
->nh_parent
;
1093 BUG_ON(!fi
->fib_nhs
);
1094 if (nh
->nh_dev
!= dev
|| fi
== prev_fi
)
1098 change_nexthops(fi
) {
1099 if (nexthop_nh
->nh_flags
& RTNH_F_DEAD
)
1101 else if (nexthop_nh
->nh_dev
== dev
&&
1102 nexthop_nh
->nh_scope
!= scope
) {
1103 nexthop_nh
->nh_flags
|= RTNH_F_DEAD
;
1104 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1105 spin_lock_bh(&fib_multipath_lock
);
1106 fi
->fib_power
-= nexthop_nh
->nh_power
;
1107 nexthop_nh
->nh_power
= 0;
1108 spin_unlock_bh(&fib_multipath_lock
);
1112 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1113 if (force
> 1 && nexthop_nh
->nh_dev
== dev
) {
1118 } endfor_nexthops(fi
)
1119 if (dead
== fi
->fib_nhs
) {
1120 fi
->fib_flags
|= RTNH_F_DEAD
;
1128 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1131 * Dead device goes up. We wake up dead nexthops.
1132 * It takes sense only on multipath routes.
1134 int fib_sync_up(struct net_device
*dev
)
1136 struct fib_info
*prev_fi
;
1138 struct hlist_head
*head
;
1139 struct hlist_node
*node
;
1143 if (!(dev
->flags
& IFF_UP
))
1147 hash
= fib_devindex_hashfn(dev
->ifindex
);
1148 head
= &fib_info_devhash
[hash
];
1151 hlist_for_each_entry(nh
, node
, head
, nh_hash
) {
1152 struct fib_info
*fi
= nh
->nh_parent
;
1155 BUG_ON(!fi
->fib_nhs
);
1156 if (nh
->nh_dev
!= dev
|| fi
== prev_fi
)
1161 change_nexthops(fi
) {
1162 if (!(nexthop_nh
->nh_flags
& RTNH_F_DEAD
)) {
1166 if (nexthop_nh
->nh_dev
== NULL
||
1167 !(nexthop_nh
->nh_dev
->flags
& IFF_UP
))
1169 if (nexthop_nh
->nh_dev
!= dev
||
1170 !__in_dev_get_rtnl(dev
))
1173 spin_lock_bh(&fib_multipath_lock
);
1174 nexthop_nh
->nh_power
= 0;
1175 nexthop_nh
->nh_flags
&= ~RTNH_F_DEAD
;
1176 spin_unlock_bh(&fib_multipath_lock
);
1177 } endfor_nexthops(fi
)
1180 fi
->fib_flags
&= ~RTNH_F_DEAD
;
1189 * The algorithm is suboptimal, but it provides really
1190 * fair weighted route distribution.
1192 void fib_select_multipath(const struct flowi
*flp
, struct fib_result
*res
)
1194 struct fib_info
*fi
= res
->fi
;
1197 spin_lock_bh(&fib_multipath_lock
);
1198 if (fi
->fib_power
<= 0) {
1200 change_nexthops(fi
) {
1201 if (!(nexthop_nh
->nh_flags
& RTNH_F_DEAD
)) {
1202 power
+= nexthop_nh
->nh_weight
;
1203 nexthop_nh
->nh_power
= nexthop_nh
->nh_weight
;
1205 } endfor_nexthops(fi
);
1206 fi
->fib_power
= power
;
1208 spin_unlock_bh(&fib_multipath_lock
);
1209 /* Race condition: route has just become dead. */
1216 /* w should be random number [0..fi->fib_power-1],
1217 * it is pretty bad approximation.
1220 w
= jiffies
% fi
->fib_power
;
1222 change_nexthops(fi
) {
1223 if (!(nexthop_nh
->nh_flags
& RTNH_F_DEAD
) &&
1224 nexthop_nh
->nh_power
) {
1225 w
-= nexthop_nh
->nh_power
;
1227 nexthop_nh
->nh_power
--;
1229 res
->nh_sel
= nhsel
;
1230 spin_unlock_bh(&fib_multipath_lock
);
1234 } endfor_nexthops(fi
);
1236 /* Race condition: route has just become dead. */
1238 spin_unlock_bh(&fib_multipath_lock
);