1 // SPDX-License-Identifier: GPL-2.0
2 /* Generic nexthop implementation
4 * Copyright (c) 2017-19 Cumulus Networks
5 * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
8 #include <linux/nexthop.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/slab.h>
12 #include <net/ipv6_stubs.h>
13 #include <net/lwtunnel.h>
14 #include <net/ndisc.h>
15 #include <net/nexthop.h>
16 #include <net/route.h>
19 static void remove_nexthop(struct net
*net
, struct nexthop
*nh
,
20 struct nl_info
*nlinfo
);
22 #define NH_DEV_HASHBITS 8
23 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
25 static const struct nla_policy rtm_nh_policy
[NHA_MAX
+ 1] = {
26 [NHA_ID
] = { .type
= NLA_U32
},
27 [NHA_GROUP
] = { .type
= NLA_BINARY
},
28 [NHA_GROUP_TYPE
] = { .type
= NLA_U16
},
29 [NHA_BLACKHOLE
] = { .type
= NLA_FLAG
},
30 [NHA_OIF
] = { .type
= NLA_U32
},
31 [NHA_GATEWAY
] = { .type
= NLA_BINARY
},
32 [NHA_ENCAP_TYPE
] = { .type
= NLA_U16
},
33 [NHA_ENCAP
] = { .type
= NLA_NESTED
},
34 [NHA_GROUPS
] = { .type
= NLA_FLAG
},
35 [NHA_MASTER
] = { .type
= NLA_U32
},
38 static unsigned int nh_dev_hashfn(unsigned int val
)
40 unsigned int mask
= NH_DEV_HASHSIZE
- 1;
43 (val
>> NH_DEV_HASHBITS
) ^
44 (val
>> (NH_DEV_HASHBITS
* 2))) & mask
;
47 static void nexthop_devhash_add(struct net
*net
, struct nh_info
*nhi
)
49 struct net_device
*dev
= nhi
->fib_nhc
.nhc_dev
;
50 struct hlist_head
*head
;
55 hash
= nh_dev_hashfn(dev
->ifindex
);
56 head
= &net
->nexthop
.devhash
[hash
];
57 hlist_add_head(&nhi
->dev_hash
, head
);
60 static void nexthop_free_mpath(struct nexthop
*nh
)
65 nhg
= rcu_dereference_raw(nh
->nh_grp
);
66 for (i
= 0; i
< nhg
->num_nh
; ++i
)
67 WARN_ON(nhg
->nh_entries
[i
].nh
);
72 static void nexthop_free_single(struct nexthop
*nh
)
76 nhi
= rcu_dereference_raw(nh
->nh_info
);
77 switch (nhi
->family
) {
79 fib_nh_release(nh
->net
, &nhi
->fib_nh
);
82 ipv6_stub
->fib6_nh_release(&nhi
->fib6_nh
);
88 void nexthop_free_rcu(struct rcu_head
*head
)
90 struct nexthop
*nh
= container_of(head
, struct nexthop
, rcu
);
93 nexthop_free_mpath(nh
);
95 nexthop_free_single(nh
);
99 EXPORT_SYMBOL_GPL(nexthop_free_rcu
);
101 static struct nexthop
*nexthop_alloc(void)
105 nh
= kzalloc(sizeof(struct nexthop
), GFP_KERNEL
);
107 INIT_LIST_HEAD(&nh
->fi_list
);
108 INIT_LIST_HEAD(&nh
->f6i_list
);
109 INIT_LIST_HEAD(&nh
->grp_list
);
114 static struct nh_group
*nexthop_grp_alloc(u16 num_nh
)
116 size_t sz
= offsetof(struct nexthop
, nh_grp
)
117 + sizeof(struct nh_group
)
118 + sizeof(struct nh_grp_entry
) * num_nh
;
119 struct nh_group
*nhg
;
121 nhg
= kzalloc(sz
, GFP_KERNEL
);
123 nhg
->num_nh
= num_nh
;
128 static void nh_base_seq_inc(struct net
*net
)
130 while (++net
->nexthop
.seq
== 0)
134 /* no reference taken; rcu lock or rtnl must be held */
135 struct nexthop
*nexthop_find_by_id(struct net
*net
, u32 id
)
137 struct rb_node
**pp
, *parent
= NULL
, *next
;
139 pp
= &net
->nexthop
.rb_root
.rb_node
;
143 next
= rcu_dereference_raw(*pp
);
148 nh
= rb_entry(parent
, struct nexthop
, rb_node
);
151 else if (id
> nh
->id
)
152 pp
= &next
->rb_right
;
158 EXPORT_SYMBOL_GPL(nexthop_find_by_id
);
160 /* used for auto id allocation; called with rtnl held */
161 static u32
nh_find_unused_id(struct net
*net
)
163 u32 id_start
= net
->nexthop
.last_id_allocated
;
166 net
->nexthop
.last_id_allocated
++;
167 if (net
->nexthop
.last_id_allocated
== id_start
)
170 if (!nexthop_find_by_id(net
, net
->nexthop
.last_id_allocated
))
171 return net
->nexthop
.last_id_allocated
;
176 static int nla_put_nh_group(struct sk_buff
*skb
, struct nh_group
*nhg
)
178 struct nexthop_grp
*p
;
179 size_t len
= nhg
->num_nh
* sizeof(*p
);
185 group_type
= NEXTHOP_GRP_TYPE_MPATH
;
187 if (nla_put_u16(skb
, NHA_GROUP_TYPE
, group_type
))
188 goto nla_put_failure
;
190 nla
= nla_reserve(skb
, NHA_GROUP
, len
);
192 goto nla_put_failure
;
195 for (i
= 0; i
< nhg
->num_nh
; ++i
) {
196 p
->id
= nhg
->nh_entries
[i
].nh
->id
;
197 p
->weight
= nhg
->nh_entries
[i
].weight
- 1;
207 static int nh_fill_node(struct sk_buff
*skb
, struct nexthop
*nh
,
208 int event
, u32 portid
, u32 seq
, unsigned int nlflags
)
210 struct fib6_nh
*fib6_nh
;
211 struct fib_nh
*fib_nh
;
212 struct nlmsghdr
*nlh
;
216 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*nhm
), nlflags
);
220 nhm
= nlmsg_data(nlh
);
221 nhm
->nh_family
= AF_UNSPEC
;
222 nhm
->nh_flags
= nh
->nh_flags
;
223 nhm
->nh_protocol
= nh
->protocol
;
227 if (nla_put_u32(skb
, NHA_ID
, nh
->id
))
228 goto nla_put_failure
;
231 struct nh_group
*nhg
= rtnl_dereference(nh
->nh_grp
);
233 if (nla_put_nh_group(skb
, nhg
))
234 goto nla_put_failure
;
238 nhi
= rtnl_dereference(nh
->nh_info
);
239 nhm
->nh_family
= nhi
->family
;
240 if (nhi
->reject_nh
) {
241 if (nla_put_flag(skb
, NHA_BLACKHOLE
))
242 goto nla_put_failure
;
245 const struct net_device
*dev
;
247 dev
= nhi
->fib_nhc
.nhc_dev
;
248 if (dev
&& nla_put_u32(skb
, NHA_OIF
, dev
->ifindex
))
249 goto nla_put_failure
;
252 nhm
->nh_scope
= nhi
->fib_nhc
.nhc_scope
;
253 switch (nhi
->family
) {
255 fib_nh
= &nhi
->fib_nh
;
256 if (fib_nh
->fib_nh_gw_family
&&
257 nla_put_u32(skb
, NHA_GATEWAY
, fib_nh
->fib_nh_gw4
))
258 goto nla_put_failure
;
262 fib6_nh
= &nhi
->fib6_nh
;
263 if (fib6_nh
->fib_nh_gw_family
&&
264 nla_put_in6_addr(skb
, NHA_GATEWAY
, &fib6_nh
->fib_nh_gw6
))
265 goto nla_put_failure
;
269 if (nhi
->fib_nhc
.nhc_lwtstate
&&
270 lwtunnel_fill_encap(skb
, nhi
->fib_nhc
.nhc_lwtstate
,
271 NHA_ENCAP
, NHA_ENCAP_TYPE
) < 0)
272 goto nla_put_failure
;
282 static size_t nh_nlmsg_size_grp(struct nexthop
*nh
)
284 struct nh_group
*nhg
= rtnl_dereference(nh
->nh_grp
);
285 size_t sz
= sizeof(struct nexthop_grp
) * nhg
->num_nh
;
287 return nla_total_size(sz
) +
288 nla_total_size(2); /* NHA_GROUP_TYPE */
291 static size_t nh_nlmsg_size_single(struct nexthop
*nh
)
293 struct nh_info
*nhi
= rtnl_dereference(nh
->nh_info
);
296 /* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
297 * are mutually exclusive
299 sz
= nla_total_size(4); /* NHA_OIF */
301 switch (nhi
->family
) {
303 if (nhi
->fib_nh
.fib_nh_gw_family
)
304 sz
+= nla_total_size(4); /* NHA_GATEWAY */
309 if (nhi
->fib6_nh
.fib_nh_gw_family
)
310 sz
+= nla_total_size(sizeof(const struct in6_addr
));
314 if (nhi
->fib_nhc
.nhc_lwtstate
) {
315 sz
+= lwtunnel_get_encap_size(nhi
->fib_nhc
.nhc_lwtstate
);
316 sz
+= nla_total_size(2); /* NHA_ENCAP_TYPE */
322 static size_t nh_nlmsg_size(struct nexthop
*nh
)
324 size_t sz
= NLMSG_ALIGN(sizeof(struct nhmsg
));
326 sz
+= nla_total_size(4); /* NHA_ID */
329 sz
+= nh_nlmsg_size_grp(nh
);
331 sz
+= nh_nlmsg_size_single(nh
);
336 static void nexthop_notify(int event
, struct nexthop
*nh
, struct nl_info
*info
)
338 unsigned int nlflags
= info
->nlh
? info
->nlh
->nlmsg_flags
: 0;
339 u32 seq
= info
->nlh
? info
->nlh
->nlmsg_seq
: 0;
343 skb
= nlmsg_new(nh_nlmsg_size(nh
), gfp_any());
347 err
= nh_fill_node(skb
, nh
, event
, info
->portid
, seq
, nlflags
);
349 /* -EMSGSIZE implies BUG in nh_nlmsg_size() */
350 WARN_ON(err
== -EMSGSIZE
);
355 rtnl_notify(skb
, info
->nl_net
, info
->portid
, RTNLGRP_NEXTHOP
,
356 info
->nlh
, gfp_any());
360 rtnl_set_sk_err(info
->nl_net
, RTNLGRP_NEXTHOP
, err
);
363 static bool valid_group_nh(struct nexthop
*nh
, unsigned int npaths
,
364 struct netlink_ext_ack
*extack
)
367 struct nh_group
*nhg
= rtnl_dereference(nh
->nh_grp
);
369 /* nested multipath (group within a group) is not
373 NL_SET_ERR_MSG(extack
,
374 "Multipath group can not be a nexthop within a group");
378 struct nh_info
*nhi
= rtnl_dereference(nh
->nh_info
);
380 if (nhi
->reject_nh
&& npaths
> 1) {
381 NL_SET_ERR_MSG(extack
,
382 "Blackhole nexthop can not be used in a group with more than 1 path");
390 static int nh_check_attr_group(struct net
*net
, struct nlattr
*tb
[],
391 struct netlink_ext_ack
*extack
)
393 unsigned int len
= nla_len(tb
[NHA_GROUP
]);
394 struct nexthop_grp
*nhg
;
397 if (len
& (sizeof(struct nexthop_grp
) - 1)) {
398 NL_SET_ERR_MSG(extack
,
399 "Invalid length for nexthop group attribute");
403 /* convert len to number of nexthop ids */
406 nhg
= nla_data(tb
[NHA_GROUP
]);
407 for (i
= 0; i
< len
; ++i
) {
408 if (nhg
[i
].resvd1
|| nhg
[i
].resvd2
) {
409 NL_SET_ERR_MSG(extack
, "Reserved fields in nexthop_grp must be 0");
412 if (nhg
[i
].weight
> 254) {
413 NL_SET_ERR_MSG(extack
, "Invalid value for weight");
416 for (j
= i
+ 1; j
< len
; ++j
) {
417 if (nhg
[i
].id
== nhg
[j
].id
) {
418 NL_SET_ERR_MSG(extack
, "Nexthop id can not be used twice in a group");
424 nhg
= nla_data(tb
[NHA_GROUP
]);
425 for (i
= 0; i
< len
; ++i
) {
428 nh
= nexthop_find_by_id(net
, nhg
[i
].id
);
430 NL_SET_ERR_MSG(extack
, "Invalid nexthop id");
433 if (!valid_group_nh(nh
, len
, extack
))
436 for (i
= NHA_GROUP
+ 1; i
< __NHA_MAX
; ++i
) {
440 NL_SET_ERR_MSG(extack
,
441 "No other attributes can be set in nexthop groups");
448 static bool ipv6_good_nh(const struct fib6_nh
*nh
)
450 int state
= NUD_REACHABLE
;
455 n
= __ipv6_neigh_lookup_noref_stub(nh
->fib_nh_dev
, &nh
->fib_nh_gw6
);
457 state
= n
->nud_state
;
459 rcu_read_unlock_bh();
461 return !!(state
& NUD_VALID
);
464 static bool ipv4_good_nh(const struct fib_nh
*nh
)
466 int state
= NUD_REACHABLE
;
471 n
= __ipv4_neigh_lookup_noref(nh
->fib_nh_dev
,
472 (__force u32
)nh
->fib_nh_gw4
);
474 state
= n
->nud_state
;
476 rcu_read_unlock_bh();
478 return !!(state
& NUD_VALID
);
481 struct nexthop
*nexthop_select_path(struct nexthop
*nh
, int hash
)
483 struct nexthop
*rc
= NULL
;
484 struct nh_group
*nhg
;
490 nhg
= rcu_dereference(nh
->nh_grp
);
491 for (i
= 0; i
< nhg
->num_nh
; ++i
) {
492 struct nh_grp_entry
*nhge
= &nhg
->nh_entries
[i
];
495 if (hash
> atomic_read(&nhge
->upper_bound
))
498 /* nexthops always check if it is good and does
499 * not rely on a sysctl for this behavior
501 nhi
= rcu_dereference(nhge
->nh
->nh_info
);
502 switch (nhi
->family
) {
504 if (ipv4_good_nh(&nhi
->fib_nh
))
508 if (ipv6_good_nh(&nhi
->fib6_nh
))
519 EXPORT_SYMBOL_GPL(nexthop_select_path
);
521 int nexthop_for_each_fib6_nh(struct nexthop
*nh
,
522 int (*cb
)(struct fib6_nh
*nh
, void *arg
),
529 struct nh_group
*nhg
;
532 nhg
= rcu_dereference_rtnl(nh
->nh_grp
);
533 for (i
= 0; i
< nhg
->num_nh
; i
++) {
534 struct nh_grp_entry
*nhge
= &nhg
->nh_entries
[i
];
536 nhi
= rcu_dereference_rtnl(nhge
->nh
->nh_info
);
537 err
= cb(&nhi
->fib6_nh
, arg
);
542 nhi
= rcu_dereference_rtnl(nh
->nh_info
);
543 err
= cb(&nhi
->fib6_nh
, arg
);
550 EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh
);
552 static int check_src_addr(const struct in6_addr
*saddr
,
553 struct netlink_ext_ack
*extack
)
555 if (!ipv6_addr_any(saddr
)) {
556 NL_SET_ERR_MSG(extack
, "IPv6 routes using source address can not use nexthop objects");
562 int fib6_check_nexthop(struct nexthop
*nh
, struct fib6_config
*cfg
,
563 struct netlink_ext_ack
*extack
)
567 /* fib6_src is unique to a fib6_info and limits the ability to cache
568 * routes in fib6_nh within a nexthop that is potentially shared
569 * across multiple fib entries. If the config wants to use source
570 * routing it can not use nexthop objects. mlxsw also does not allow
571 * fib6_src on routes.
573 if (cfg
&& check_src_addr(&cfg
->fc_src
, extack
) < 0)
577 struct nh_group
*nhg
;
579 nhg
= rtnl_dereference(nh
->nh_grp
);
583 nhi
= rtnl_dereference(nh
->nh_info
);
584 if (nhi
->family
== AF_INET
)
590 NL_SET_ERR_MSG(extack
, "IPv6 routes can not use an IPv4 nexthop");
593 EXPORT_SYMBOL_GPL(fib6_check_nexthop
);
595 /* if existing nexthop has ipv6 routes linked to it, need
596 * to verify this new spec works with ipv6
598 static int fib6_check_nh_list(struct nexthop
*old
, struct nexthop
*new,
599 struct netlink_ext_ack
*extack
)
601 struct fib6_info
*f6i
;
603 if (list_empty(&old
->f6i_list
))
606 list_for_each_entry(f6i
, &old
->f6i_list
, nh_list
) {
607 if (check_src_addr(&f6i
->fib6_src
.addr
, extack
) < 0)
611 return fib6_check_nexthop(new, NULL
, extack
);
614 static int nexthop_check_scope(struct nexthop
*nh
, u8 scope
,
615 struct netlink_ext_ack
*extack
)
619 nhi
= rtnl_dereference(nh
->nh_info
);
620 if (scope
== RT_SCOPE_HOST
&& nhi
->fib_nhc
.nhc_gw_family
) {
621 NL_SET_ERR_MSG(extack
,
622 "Route with host scope can not have a gateway");
626 if (nhi
->fib_nhc
.nhc_flags
& RTNH_F_ONLINK
&& scope
>= RT_SCOPE_LINK
) {
627 NL_SET_ERR_MSG(extack
, "Scope mismatch with nexthop");
634 /* Invoked by fib add code to verify nexthop by id is ok with
635 * config for prefix; parts of fib_check_nh not done when nexthop
638 int fib_check_nexthop(struct nexthop
*nh
, u8 scope
,
639 struct netlink_ext_ack
*extack
)
644 struct nh_group
*nhg
;
646 if (scope
== RT_SCOPE_HOST
) {
647 NL_SET_ERR_MSG(extack
, "Route with host scope can not have multiple nexthops");
652 nhg
= rtnl_dereference(nh
->nh_grp
);
653 /* all nexthops in a group have the same scope */
654 err
= nexthop_check_scope(nhg
->nh_entries
[0].nh
, scope
, extack
);
656 err
= nexthop_check_scope(nh
, scope
, extack
);
662 static int fib_check_nh_list(struct nexthop
*old
, struct nexthop
*new,
663 struct netlink_ext_ack
*extack
)
667 list_for_each_entry(fi
, &old
->fi_list
, nh_list
) {
670 err
= fib_check_nexthop(new, fi
->fib_scope
, extack
);
677 static void nh_group_rebalance(struct nh_group
*nhg
)
683 for (i
= 0; i
< nhg
->num_nh
; ++i
)
684 total
+= nhg
->nh_entries
[i
].weight
;
686 for (i
= 0; i
< nhg
->num_nh
; ++i
) {
687 struct nh_grp_entry
*nhge
= &nhg
->nh_entries
[i
];
691 upper_bound
= DIV_ROUND_CLOSEST_ULL((u64
)w
<< 31, total
) - 1;
692 atomic_set(&nhge
->upper_bound
, upper_bound
);
696 static void remove_nh_grp_entry(struct nh_grp_entry
*nhge
,
697 struct nh_group
*nhg
,
698 struct nl_info
*nlinfo
)
700 struct nexthop
*nh
= nhge
->nh
;
701 struct nh_grp_entry
*nhges
;
707 nhges
= nhg
->nh_entries
;
708 for (i
= 0; i
< nhg
->num_nh
; ++i
) {
710 nhges
[i
-1].nh
= nhges
[i
].nh
;
711 nhges
[i
-1].weight
= nhges
[i
].weight
;
712 list_del(&nhges
[i
].nh_list
);
713 list_add(&nhges
[i
-1].nh_list
, &nhges
[i
-1].nh
->grp_list
);
714 } else if (nhg
->nh_entries
[i
].nh
== nh
) {
723 nhg
->nh_entries
[nhg
->num_nh
].nh
= NULL
;
725 nh_group_rebalance(nhg
);
730 nexthop_notify(RTM_NEWNEXTHOP
, nhge
->nh_parent
, nlinfo
);
733 static void remove_nexthop_from_groups(struct net
*net
, struct nexthop
*nh
,
734 struct nl_info
*nlinfo
)
736 struct nh_grp_entry
*nhge
, *tmp
;
738 list_for_each_entry_safe(nhge
, tmp
, &nh
->grp_list
, nh_list
) {
739 struct nh_group
*nhg
;
741 list_del(&nhge
->nh_list
);
742 nhg
= rtnl_dereference(nhge
->nh_parent
->nh_grp
);
743 remove_nh_grp_entry(nhge
, nhg
, nlinfo
);
745 /* if this group has no more entries then remove it */
747 remove_nexthop(net
, nhge
->nh_parent
, nlinfo
);
751 static void remove_nexthop_group(struct nexthop
*nh
, struct nl_info
*nlinfo
)
753 struct nh_group
*nhg
= rcu_dereference_rtnl(nh
->nh_grp
);
754 int i
, num_nh
= nhg
->num_nh
;
756 for (i
= 0; i
< num_nh
; ++i
) {
757 struct nh_grp_entry
*nhge
= &nhg
->nh_entries
[i
];
759 if (WARN_ON(!nhge
->nh
))
762 list_del(&nhge
->nh_list
);
763 nexthop_put(nhge
->nh
);
769 /* not called for nexthop replace */
770 static void __remove_nexthop_fib(struct net
*net
, struct nexthop
*nh
)
772 struct fib6_info
*f6i
, *tmp
;
773 bool do_flush
= false;
776 list_for_each_entry(fi
, &nh
->fi_list
, nh_list
) {
777 fi
->fib_flags
|= RTNH_F_DEAD
;
783 /* ip6_del_rt removes the entry from this list hence the _safe */
784 list_for_each_entry_safe(f6i
, tmp
, &nh
->f6i_list
, nh_list
) {
785 /* __ip6_del_rt does a release, so do a hold here */
787 ipv6_stub
->ip6_del_rt(net
, f6i
,
788 !net
->ipv4
.sysctl_nexthop_compat_mode
);
792 static void __remove_nexthop(struct net
*net
, struct nexthop
*nh
,
793 struct nl_info
*nlinfo
)
795 __remove_nexthop_fib(net
, nh
);
798 remove_nexthop_group(nh
, nlinfo
);
802 nhi
= rtnl_dereference(nh
->nh_info
);
803 if (nhi
->fib_nhc
.nhc_dev
)
804 hlist_del(&nhi
->dev_hash
);
806 remove_nexthop_from_groups(net
, nh
, nlinfo
);
810 static void remove_nexthop(struct net
*net
, struct nexthop
*nh
,
811 struct nl_info
*nlinfo
)
813 /* remove from the tree */
814 rb_erase(&nh
->rb_node
, &net
->nexthop
.rb_root
);
817 nexthop_notify(RTM_DELNEXTHOP
, nh
, nlinfo
);
819 __remove_nexthop(net
, nh
, nlinfo
);
820 nh_base_seq_inc(net
);
825 /* if any FIB entries reference this nexthop, any dst entries
826 * need to be regenerated
828 static void nh_rt_cache_flush(struct net
*net
, struct nexthop
*nh
)
830 struct fib6_info
*f6i
;
832 if (!list_empty(&nh
->fi_list
))
835 list_for_each_entry(f6i
, &nh
->f6i_list
, nh_list
)
836 ipv6_stub
->fib6_update_sernum(net
, f6i
);
839 static int replace_nexthop_grp(struct net
*net
, struct nexthop
*old
,
841 struct netlink_ext_ack
*extack
)
843 struct nh_group
*oldg
, *newg
;
846 if (!new->is_group
) {
847 NL_SET_ERR_MSG(extack
, "Can not replace a nexthop group with a nexthop.");
851 oldg
= rtnl_dereference(old
->nh_grp
);
852 newg
= rtnl_dereference(new->nh_grp
);
854 /* update parents - used by nexthop code for cleanup */
855 for (i
= 0; i
< newg
->num_nh
; i
++)
856 newg
->nh_entries
[i
].nh_parent
= old
;
858 rcu_assign_pointer(old
->nh_grp
, newg
);
860 for (i
= 0; i
< oldg
->num_nh
; i
++)
861 oldg
->nh_entries
[i
].nh_parent
= new;
863 rcu_assign_pointer(new->nh_grp
, oldg
);
868 static int replace_nexthop_single(struct net
*net
, struct nexthop
*old
,
870 struct netlink_ext_ack
*extack
)
872 struct nh_info
*oldi
, *newi
;
875 NL_SET_ERR_MSG(extack
, "Can not replace a nexthop with a nexthop group.");
879 oldi
= rtnl_dereference(old
->nh_info
);
880 newi
= rtnl_dereference(new->nh_info
);
882 newi
->nh_parent
= old
;
883 oldi
->nh_parent
= new;
885 old
->protocol
= new->protocol
;
886 old
->nh_flags
= new->nh_flags
;
888 rcu_assign_pointer(old
->nh_info
, newi
);
889 rcu_assign_pointer(new->nh_info
, oldi
);
894 static void __nexthop_replace_notify(struct net
*net
, struct nexthop
*nh
,
895 struct nl_info
*info
)
897 struct fib6_info
*f6i
;
899 if (!list_empty(&nh
->fi_list
)) {
902 /* expectation is a few fib_info per nexthop and then
903 * a lot of routes per fib_info. So mark the fib_info
904 * and then walk the fib tables once
906 list_for_each_entry(fi
, &nh
->fi_list
, nh_list
)
907 fi
->nh_updated
= true;
909 fib_info_notify_update(net
, info
);
911 list_for_each_entry(fi
, &nh
->fi_list
, nh_list
)
912 fi
->nh_updated
= false;
915 list_for_each_entry(f6i
, &nh
->f6i_list
, nh_list
)
916 ipv6_stub
->fib6_rt_update(net
, f6i
, info
);
919 /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
920 * linked to this nexthop and for all groups that the nexthop
923 static void nexthop_replace_notify(struct net
*net
, struct nexthop
*nh
,
924 struct nl_info
*info
)
926 struct nh_grp_entry
*nhge
;
928 __nexthop_replace_notify(net
, nh
, info
);
930 list_for_each_entry(nhge
, &nh
->grp_list
, nh_list
)
931 __nexthop_replace_notify(net
, nhge
->nh_parent
, info
);
934 static int replace_nexthop(struct net
*net
, struct nexthop
*old
,
935 struct nexthop
*new, struct netlink_ext_ack
*extack
)
937 bool new_is_reject
= false;
938 struct nh_grp_entry
*nhge
;
941 /* check that existing FIB entries are ok with the
942 * new nexthop definition
944 err
= fib_check_nh_list(old
, new, extack
);
948 err
= fib6_check_nh_list(old
, new, extack
);
952 if (!new->is_group
) {
953 struct nh_info
*nhi
= rtnl_dereference(new->nh_info
);
955 new_is_reject
= nhi
->reject_nh
;
958 list_for_each_entry(nhge
, &old
->grp_list
, nh_list
) {
959 /* if new nexthop is a blackhole, any groups using this
960 * nexthop cannot have more than 1 path
963 nexthop_num_path(nhge
->nh_parent
) > 1) {
964 NL_SET_ERR_MSG(extack
, "Blackhole nexthop can not be a member of a group with more than one path");
968 err
= fib_check_nh_list(nhge
->nh_parent
, new, extack
);
972 err
= fib6_check_nh_list(nhge
->nh_parent
, new, extack
);
978 err
= replace_nexthop_grp(net
, old
, new, extack
);
980 err
= replace_nexthop_single(net
, old
, new, extack
);
983 nh_rt_cache_flush(net
, old
);
985 __remove_nexthop(net
, new, NULL
);
992 /* called with rtnl_lock held */
993 static int insert_nexthop(struct net
*net
, struct nexthop
*new_nh
,
994 struct nh_config
*cfg
, struct netlink_ext_ack
*extack
)
996 struct rb_node
**pp
, *parent
= NULL
, *next
;
997 struct rb_root
*root
= &net
->nexthop
.rb_root
;
998 bool replace
= !!(cfg
->nlflags
& NLM_F_REPLACE
);
999 bool create
= !!(cfg
->nlflags
& NLM_F_CREATE
);
1000 u32 new_id
= new_nh
->id
;
1001 int replace_notify
= 0;
1004 pp
= &root
->rb_node
;
1008 next
= rtnl_dereference(*pp
);
1014 nh
= rb_entry(parent
, struct nexthop
, rb_node
);
1015 if (new_id
< nh
->id
) {
1016 pp
= &next
->rb_left
;
1017 } else if (new_id
> nh
->id
) {
1018 pp
= &next
->rb_right
;
1019 } else if (replace
) {
1020 rc
= replace_nexthop(net
, nh
, new_nh
, extack
);
1022 new_nh
= nh
; /* send notification with old nh */
1027 /* id already exists and not a replace */
1032 if (replace
&& !create
) {
1033 NL_SET_ERR_MSG(extack
, "Replace specified without create and no entry exists");
1038 rb_link_node_rcu(&new_nh
->rb_node
, parent
, pp
);
1039 rb_insert_color(&new_nh
->rb_node
, root
);
1043 nh_base_seq_inc(net
);
1044 nexthop_notify(RTM_NEWNEXTHOP
, new_nh
, &cfg
->nlinfo
);
1045 if (replace_notify
&& net
->ipv4
.sysctl_nexthop_compat_mode
)
1046 nexthop_replace_notify(net
, new_nh
, &cfg
->nlinfo
);
1053 /* remove all nexthops tied to a device being deleted */
1054 static void nexthop_flush_dev(struct net_device
*dev
)
1056 unsigned int hash
= nh_dev_hashfn(dev
->ifindex
);
1057 struct net
*net
= dev_net(dev
);
1058 struct hlist_head
*head
= &net
->nexthop
.devhash
[hash
];
1059 struct hlist_node
*n
;
1060 struct nh_info
*nhi
;
1062 hlist_for_each_entry_safe(nhi
, n
, head
, dev_hash
) {
1063 if (nhi
->fib_nhc
.nhc_dev
!= dev
)
1066 remove_nexthop(net
, nhi
->nh_parent
, NULL
);
1070 /* rtnl; called when net namespace is deleted */
1071 static void flush_all_nexthops(struct net
*net
)
1073 struct rb_root
*root
= &net
->nexthop
.rb_root
;
1074 struct rb_node
*node
;
1077 while ((node
= rb_first(root
))) {
1078 nh
= rb_entry(node
, struct nexthop
, rb_node
);
1079 remove_nexthop(net
, nh
, NULL
);
1084 static struct nexthop
*nexthop_create_group(struct net
*net
,
1085 struct nh_config
*cfg
)
1087 struct nlattr
*grps_attr
= cfg
->nh_grp
;
1088 struct nexthop_grp
*entry
= nla_data(grps_attr
);
1089 struct nh_group
*nhg
;
1093 nh
= nexthop_alloc();
1095 return ERR_PTR(-ENOMEM
);
1099 nhg
= nexthop_grp_alloc(nla_len(grps_attr
) / sizeof(*entry
));
1102 return ERR_PTR(-ENOMEM
);
1105 for (i
= 0; i
< nhg
->num_nh
; ++i
) {
1106 struct nexthop
*nhe
;
1107 struct nh_info
*nhi
;
1109 nhe
= nexthop_find_by_id(net
, entry
[i
].id
);
1110 if (!nexthop_get(nhe
))
1113 nhi
= rtnl_dereference(nhe
->nh_info
);
1114 if (nhi
->family
== AF_INET
)
1117 nhg
->nh_entries
[i
].nh
= nhe
;
1118 nhg
->nh_entries
[i
].weight
= entry
[i
].weight
+ 1;
1119 list_add(&nhg
->nh_entries
[i
].nh_list
, &nhe
->grp_list
);
1120 nhg
->nh_entries
[i
].nh_parent
= nh
;
1123 if (cfg
->nh_grp_type
== NEXTHOP_GRP_TYPE_MPATH
) {
1125 nh_group_rebalance(nhg
);
1128 rcu_assign_pointer(nh
->nh_grp
, nhg
);
1134 nexthop_put(nhg
->nh_entries
[i
].nh
);
1139 return ERR_PTR(-ENOENT
);
1142 static int nh_create_ipv4(struct net
*net
, struct nexthop
*nh
,
1143 struct nh_info
*nhi
, struct nh_config
*cfg
,
1144 struct netlink_ext_ack
*extack
)
1146 struct fib_nh
*fib_nh
= &nhi
->fib_nh
;
1147 struct fib_config fib_cfg
= {
1148 .fc_oif
= cfg
->nh_ifindex
,
1149 .fc_gw4
= cfg
->gw
.ipv4
,
1150 .fc_gw_family
= cfg
->gw
.ipv4
? AF_INET
: 0,
1151 .fc_flags
= cfg
->nh_flags
,
1152 .fc_encap
= cfg
->nh_encap
,
1153 .fc_encap_type
= cfg
->nh_encap_type
,
1155 u32 tb_id
= l3mdev_fib_table(cfg
->dev
);
1158 err
= fib_nh_init(net
, fib_nh
, &fib_cfg
, 1, extack
);
1160 fib_nh_release(net
, fib_nh
);
1164 /* sets nh_dev if successful */
1165 err
= fib_check_nh(net
, fib_nh
, tb_id
, 0, extack
);
1167 nh
->nh_flags
= fib_nh
->fib_nh_flags
;
1168 fib_info_update_nhc_saddr(net
, &fib_nh
->nh_common
,
1169 fib_nh
->fib_nh_scope
);
1171 fib_nh_release(net
, fib_nh
);
1177 static int nh_create_ipv6(struct net
*net
, struct nexthop
*nh
,
1178 struct nh_info
*nhi
, struct nh_config
*cfg
,
1179 struct netlink_ext_ack
*extack
)
1181 struct fib6_nh
*fib6_nh
= &nhi
->fib6_nh
;
1182 struct fib6_config fib6_cfg
= {
1183 .fc_table
= l3mdev_fib_table(cfg
->dev
),
1184 .fc_ifindex
= cfg
->nh_ifindex
,
1185 .fc_gateway
= cfg
->gw
.ipv6
,
1186 .fc_flags
= cfg
->nh_flags
,
1187 .fc_encap
= cfg
->nh_encap
,
1188 .fc_encap_type
= cfg
->nh_encap_type
,
1192 if (!ipv6_addr_any(&cfg
->gw
.ipv6
))
1193 fib6_cfg
.fc_flags
|= RTF_GATEWAY
;
1195 /* sets nh_dev if successful */
1196 err
= ipv6_stub
->fib6_nh_init(net
, fib6_nh
, &fib6_cfg
, GFP_KERNEL
,
1199 ipv6_stub
->fib6_nh_release(fib6_nh
);
1201 nh
->nh_flags
= fib6_nh
->fib_nh_flags
;
1206 static struct nexthop
*nexthop_create(struct net
*net
, struct nh_config
*cfg
,
1207 struct netlink_ext_ack
*extack
)
1209 struct nh_info
*nhi
;
1213 nh
= nexthop_alloc();
1215 return ERR_PTR(-ENOMEM
);
1217 nhi
= kzalloc(sizeof(*nhi
), GFP_KERNEL
);
1220 return ERR_PTR(-ENOMEM
);
1223 nh
->nh_flags
= cfg
->nh_flags
;
1226 nhi
->nh_parent
= nh
;
1227 nhi
->family
= cfg
->nh_family
;
1228 nhi
->fib_nhc
.nhc_scope
= RT_SCOPE_LINK
;
1230 if (cfg
->nh_blackhole
) {
1232 cfg
->nh_ifindex
= net
->loopback_dev
->ifindex
;
1235 switch (cfg
->nh_family
) {
1237 err
= nh_create_ipv4(net
, nh
, nhi
, cfg
, extack
);
1240 err
= nh_create_ipv6(net
, nh
, nhi
, cfg
, extack
);
1247 return ERR_PTR(err
);
1250 /* add the entry to the device based hash */
1251 nexthop_devhash_add(net
, nhi
);
1253 rcu_assign_pointer(nh
->nh_info
, nhi
);
1258 /* called with rtnl lock held */
1259 static struct nexthop
*nexthop_add(struct net
*net
, struct nh_config
*cfg
,
1260 struct netlink_ext_ack
*extack
)
1265 if (cfg
->nlflags
& NLM_F_REPLACE
&& !cfg
->nh_id
) {
1266 NL_SET_ERR_MSG(extack
, "Replace requires nexthop id");
1267 return ERR_PTR(-EINVAL
);
1271 cfg
->nh_id
= nh_find_unused_id(net
);
1273 NL_SET_ERR_MSG(extack
, "No unused id");
1274 return ERR_PTR(-EINVAL
);
1279 nh
= nexthop_create_group(net
, cfg
);
1281 nh
= nexthop_create(net
, cfg
, extack
);
1286 refcount_set(&nh
->refcnt
, 1);
1287 nh
->id
= cfg
->nh_id
;
1288 nh
->protocol
= cfg
->nh_protocol
;
1291 err
= insert_nexthop(net
, nh
, cfg
, extack
);
1293 __remove_nexthop(net
, nh
, NULL
);
1301 static int rtm_to_nh_config(struct net
*net
, struct sk_buff
*skb
,
1302 struct nlmsghdr
*nlh
, struct nh_config
*cfg
,
1303 struct netlink_ext_ack
*extack
)
1305 struct nhmsg
*nhm
= nlmsg_data(nlh
);
1306 struct nlattr
*tb
[NHA_MAX
+ 1];
1309 err
= nlmsg_parse(nlh
, sizeof(*nhm
), tb
, NHA_MAX
, rtm_nh_policy
,
1315 if (nhm
->resvd
|| nhm
->nh_scope
) {
1316 NL_SET_ERR_MSG(extack
, "Invalid values in ancillary header");
1319 if (nhm
->nh_flags
& ~NEXTHOP_VALID_USER_FLAGS
) {
1320 NL_SET_ERR_MSG(extack
, "Invalid nexthop flags in ancillary header");
1324 switch (nhm
->nh_family
) {
1333 NL_SET_ERR_MSG(extack
, "Invalid address family");
1337 if (tb
[NHA_GROUPS
] || tb
[NHA_MASTER
]) {
1338 NL_SET_ERR_MSG(extack
, "Invalid attributes in request");
1342 memset(cfg
, 0, sizeof(*cfg
));
1343 cfg
->nlflags
= nlh
->nlmsg_flags
;
1344 cfg
->nlinfo
.portid
= NETLINK_CB(skb
).portid
;
1345 cfg
->nlinfo
.nlh
= nlh
;
1346 cfg
->nlinfo
.nl_net
= net
;
1348 cfg
->nh_family
= nhm
->nh_family
;
1349 cfg
->nh_protocol
= nhm
->nh_protocol
;
1350 cfg
->nh_flags
= nhm
->nh_flags
;
1353 cfg
->nh_id
= nla_get_u32(tb
[NHA_ID
]);
1355 if (tb
[NHA_GROUP
]) {
1356 if (nhm
->nh_family
!= AF_UNSPEC
) {
1357 NL_SET_ERR_MSG(extack
, "Invalid family for group");
1360 cfg
->nh_grp
= tb
[NHA_GROUP
];
1362 cfg
->nh_grp_type
= NEXTHOP_GRP_TYPE_MPATH
;
1363 if (tb
[NHA_GROUP_TYPE
])
1364 cfg
->nh_grp_type
= nla_get_u16(tb
[NHA_GROUP_TYPE
]);
1366 if (cfg
->nh_grp_type
> NEXTHOP_GRP_TYPE_MAX
) {
1367 NL_SET_ERR_MSG(extack
, "Invalid group type");
1370 err
= nh_check_attr_group(net
, tb
, extack
);
1372 /* no other attributes should be set */
1376 if (tb
[NHA_BLACKHOLE
]) {
1377 if (tb
[NHA_GATEWAY
] || tb
[NHA_OIF
] ||
1378 tb
[NHA_ENCAP
] || tb
[NHA_ENCAP_TYPE
]) {
1379 NL_SET_ERR_MSG(extack
, "Blackhole attribute can not be used with gateway or oif");
1383 cfg
->nh_blackhole
= 1;
1389 NL_SET_ERR_MSG(extack
, "Device attribute required for non-blackhole nexthops");
1393 cfg
->nh_ifindex
= nla_get_u32(tb
[NHA_OIF
]);
1394 if (cfg
->nh_ifindex
)
1395 cfg
->dev
= __dev_get_by_index(net
, cfg
->nh_ifindex
);
1398 NL_SET_ERR_MSG(extack
, "Invalid device index");
1400 } else if (!(cfg
->dev
->flags
& IFF_UP
)) {
1401 NL_SET_ERR_MSG(extack
, "Nexthop device is not up");
1404 } else if (!netif_carrier_ok(cfg
->dev
)) {
1405 NL_SET_ERR_MSG(extack
, "Carrier for nexthop device is down");
1411 if (tb
[NHA_GATEWAY
]) {
1412 struct nlattr
*gwa
= tb
[NHA_GATEWAY
];
1414 switch (cfg
->nh_family
) {
1416 if (nla_len(gwa
) != sizeof(u32
)) {
1417 NL_SET_ERR_MSG(extack
, "Invalid gateway");
1420 cfg
->gw
.ipv4
= nla_get_be32(gwa
);
1423 if (nla_len(gwa
) != sizeof(struct in6_addr
)) {
1424 NL_SET_ERR_MSG(extack
, "Invalid gateway");
1427 cfg
->gw
.ipv6
= nla_get_in6_addr(gwa
);
1430 NL_SET_ERR_MSG(extack
,
1431 "Unknown address family for gateway");
1435 /* device only nexthop (no gateway) */
1436 if (cfg
->nh_flags
& RTNH_F_ONLINK
) {
1437 NL_SET_ERR_MSG(extack
,
1438 "ONLINK flag can not be set for nexthop without a gateway");
1443 if (tb
[NHA_ENCAP
]) {
1444 cfg
->nh_encap
= tb
[NHA_ENCAP
];
1446 if (!tb
[NHA_ENCAP_TYPE
]) {
1447 NL_SET_ERR_MSG(extack
, "LWT encapsulation type is missing");
1451 cfg
->nh_encap_type
= nla_get_u16(tb
[NHA_ENCAP_TYPE
]);
1452 err
= lwtunnel_valid_encap_type(cfg
->nh_encap_type
, extack
);
1456 } else if (tb
[NHA_ENCAP_TYPE
]) {
1457 NL_SET_ERR_MSG(extack
, "LWT encapsulation attribute is missing");
1468 static int rtm_new_nexthop(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
1469 struct netlink_ext_ack
*extack
)
1471 struct net
*net
= sock_net(skb
->sk
);
1472 struct nh_config cfg
;
1476 err
= rtm_to_nh_config(net
, skb
, nlh
, &cfg
, extack
);
1478 nh
= nexthop_add(net
, &cfg
, extack
);
1486 static int nh_valid_get_del_req(struct nlmsghdr
*nlh
, u32
*id
,
1487 struct netlink_ext_ack
*extack
)
1489 struct nhmsg
*nhm
= nlmsg_data(nlh
);
1490 struct nlattr
*tb
[NHA_MAX
+ 1];
1493 err
= nlmsg_parse(nlh
, sizeof(*nhm
), tb
, NHA_MAX
, rtm_nh_policy
,
1499 for (i
= 0; i
< __NHA_MAX
; ++i
) {
1507 NL_SET_ERR_MSG_ATTR(extack
, tb
[i
],
1508 "Unexpected attribute in request");
1512 if (nhm
->nh_protocol
|| nhm
->resvd
|| nhm
->nh_scope
|| nhm
->nh_flags
) {
1513 NL_SET_ERR_MSG(extack
, "Invalid values in header");
1518 NL_SET_ERR_MSG(extack
, "Nexthop id is missing");
1522 *id
= nla_get_u32(tb
[NHA_ID
]);
1524 NL_SET_ERR_MSG(extack
, "Invalid nexthop id");
1532 static int rtm_del_nexthop(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
1533 struct netlink_ext_ack
*extack
)
1535 struct net
*net
= sock_net(skb
->sk
);
1536 struct nl_info nlinfo
= {
1539 .portid
= NETLINK_CB(skb
).portid
,
1545 err
= nh_valid_get_del_req(nlh
, &id
, extack
);
1549 nh
= nexthop_find_by_id(net
, id
);
1553 remove_nexthop(net
, nh
, &nlinfo
);
1559 static int rtm_get_nexthop(struct sk_buff
*in_skb
, struct nlmsghdr
*nlh
,
1560 struct netlink_ext_ack
*extack
)
1562 struct net
*net
= sock_net(in_skb
->sk
);
1563 struct sk_buff
*skb
= NULL
;
1568 err
= nh_valid_get_del_req(nlh
, &id
, extack
);
1573 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1578 nh
= nexthop_find_by_id(net
, id
);
1582 err
= nh_fill_node(skb
, nh
, RTM_NEWNEXTHOP
, NETLINK_CB(in_skb
).portid
,
1585 WARN_ON(err
== -EMSGSIZE
);
1589 err
= rtnl_unicast(skb
, net
, NETLINK_CB(in_skb
).portid
);
1597 static bool nh_dump_filtered(struct nexthop
*nh
, int dev_idx
, int master_idx
,
1598 bool group_filter
, u8 family
)
1600 const struct net_device
*dev
;
1601 const struct nh_info
*nhi
;
1603 if (group_filter
&& !nh
->is_group
)
1606 if (!dev_idx
&& !master_idx
&& !family
)
1612 nhi
= rtnl_dereference(nh
->nh_info
);
1613 if (family
&& nhi
->family
!= family
)
1616 dev
= nhi
->fib_nhc
.nhc_dev
;
1617 if (dev_idx
&& (!dev
|| dev
->ifindex
!= dev_idx
))
1621 struct net_device
*master
;
1626 master
= netdev_master_upper_dev_get((struct net_device
*)dev
);
1627 if (!master
|| master
->ifindex
!= master_idx
)
1634 static int nh_valid_dump_req(const struct nlmsghdr
*nlh
, int *dev_idx
,
1635 int *master_idx
, bool *group_filter
,
1636 struct netlink_callback
*cb
)
1638 struct netlink_ext_ack
*extack
= cb
->extack
;
1639 struct nlattr
*tb
[NHA_MAX
+ 1];
1644 err
= nlmsg_parse(nlh
, sizeof(*nhm
), tb
, NHA_MAX
, rtm_nh_policy
,
1649 for (i
= 0; i
<= NHA_MAX
; ++i
) {
1655 idx
= nla_get_u32(tb
[i
]);
1656 if (idx
> INT_MAX
) {
1657 NL_SET_ERR_MSG(extack
, "Invalid device index");
1663 idx
= nla_get_u32(tb
[i
]);
1664 if (idx
> INT_MAX
) {
1665 NL_SET_ERR_MSG(extack
, "Invalid master device index");
1671 *group_filter
= true;
1674 NL_SET_ERR_MSG(extack
, "Unsupported attribute in dump request");
1679 nhm
= nlmsg_data(nlh
);
1680 if (nhm
->nh_protocol
|| nhm
->resvd
|| nhm
->nh_scope
|| nhm
->nh_flags
) {
1681 NL_SET_ERR_MSG(extack
, "Invalid values in header for nexthop dump request");
1689 static int rtm_dump_nexthop(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1691 struct nhmsg
*nhm
= nlmsg_data(cb
->nlh
);
1692 int dev_filter_idx
= 0, master_idx
= 0;
1693 struct net
*net
= sock_net(skb
->sk
);
1694 struct rb_root
*root
= &net
->nexthop
.rb_root
;
1695 bool group_filter
= false;
1696 struct rb_node
*node
;
1700 err
= nh_valid_dump_req(cb
->nlh
, &dev_filter_idx
, &master_idx
,
1705 s_idx
= cb
->args
[0];
1706 for (node
= rb_first(root
); node
; node
= rb_next(node
)) {
1712 nh
= rb_entry(node
, struct nexthop
, rb_node
);
1713 if (nh_dump_filtered(nh
, dev_filter_idx
, master_idx
,
1714 group_filter
, nhm
->nh_family
))
1717 err
= nh_fill_node(skb
, nh
, RTM_NEWNEXTHOP
,
1718 NETLINK_CB(cb
->skb
).portid
,
1719 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
);
1721 if (likely(skb
->len
))
1734 cb
->seq
= net
->nexthop
.seq
;
1735 nl_dump_check_consistent(cb
, nlmsg_hdr(skb
));
1740 static void nexthop_sync_mtu(struct net_device
*dev
, u32 orig_mtu
)
1742 unsigned int hash
= nh_dev_hashfn(dev
->ifindex
);
1743 struct net
*net
= dev_net(dev
);
1744 struct hlist_head
*head
= &net
->nexthop
.devhash
[hash
];
1745 struct hlist_node
*n
;
1746 struct nh_info
*nhi
;
1748 hlist_for_each_entry_safe(nhi
, n
, head
, dev_hash
) {
1749 if (nhi
->fib_nhc
.nhc_dev
== dev
) {
1750 if (nhi
->family
== AF_INET
)
1751 fib_nhc_update_mtu(&nhi
->fib_nhc
, dev
->mtu
,
1758 static int nh_netdev_event(struct notifier_block
*this,
1759 unsigned long event
, void *ptr
)
1761 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1762 struct netdev_notifier_info_ext
*info_ext
;
1766 case NETDEV_UNREGISTER
:
1767 nexthop_flush_dev(dev
);
1770 if (!(dev_get_flags(dev
) & (IFF_RUNNING
| IFF_LOWER_UP
)))
1771 nexthop_flush_dev(dev
);
1773 case NETDEV_CHANGEMTU
:
1775 nexthop_sync_mtu(dev
, info_ext
->ext
.mtu
);
1776 rt_cache_flush(dev_net(dev
));
1782 static struct notifier_block nh_netdev_notifier
= {
1783 .notifier_call
= nh_netdev_event
,
1786 static void __net_exit
nexthop_net_exit(struct net
*net
)
1789 flush_all_nexthops(net
);
1791 kfree(net
->nexthop
.devhash
);
1794 static int __net_init
nexthop_net_init(struct net
*net
)
1796 size_t sz
= sizeof(struct hlist_head
) * NH_DEV_HASHSIZE
;
1798 net
->nexthop
.rb_root
= RB_ROOT
;
1799 net
->nexthop
.devhash
= kzalloc(sz
, GFP_KERNEL
);
1800 if (!net
->nexthop
.devhash
)
1806 static struct pernet_operations nexthop_net_ops
= {
1807 .init
= nexthop_net_init
,
1808 .exit
= nexthop_net_exit
,
1811 static int __init
nexthop_init(void)
1813 register_pernet_subsys(&nexthop_net_ops
);
1815 register_netdevice_notifier(&nh_netdev_notifier
);
1817 rtnl_register(PF_UNSPEC
, RTM_NEWNEXTHOP
, rtm_new_nexthop
, NULL
, 0);
1818 rtnl_register(PF_UNSPEC
, RTM_DELNEXTHOP
, rtm_del_nexthop
, NULL
, 0);
1819 rtnl_register(PF_UNSPEC
, RTM_GETNEXTHOP
, rtm_get_nexthop
,
1820 rtm_dump_nexthop
, 0);
1822 rtnl_register(PF_INET
, RTM_NEWNEXTHOP
, rtm_new_nexthop
, NULL
, 0);
1823 rtnl_register(PF_INET
, RTM_GETNEXTHOP
, NULL
, rtm_dump_nexthop
, 0);
1825 rtnl_register(PF_INET6
, RTM_NEWNEXTHOP
, rtm_new_nexthop
, NULL
, 0);
1826 rtnl_register(PF_INET6
, RTM_GETNEXTHOP
, NULL
, rtm_dump_nexthop
, 0);
1830 subsys_initcall(nexthop_init
);