1 // SPDX-License-Identifier: GPL-2.0
2 /* Generic nexthop implementation
4 * Copyright (c) 2017-19 Cumulus Networks
5 * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
8 #include <linux/nexthop.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/slab.h>
12 #include <net/ipv6_stubs.h>
13 #include <net/lwtunnel.h>
14 #include <net/ndisc.h>
15 #include <net/nexthop.h>
16 #include <net/route.h>
19 static void remove_nexthop(struct net
*net
, struct nexthop
*nh
,
20 struct nl_info
*nlinfo
);
22 #define NH_DEV_HASHBITS 8
23 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
25 static const struct nla_policy rtm_nh_policy
[NHA_MAX
+ 1] = {
26 [NHA_ID
] = { .type
= NLA_U32
},
27 [NHA_GROUP
] = { .type
= NLA_BINARY
},
28 [NHA_GROUP_TYPE
] = { .type
= NLA_U16
},
29 [NHA_BLACKHOLE
] = { .type
= NLA_FLAG
},
30 [NHA_OIF
] = { .type
= NLA_U32
},
31 [NHA_GATEWAY
] = { .type
= NLA_BINARY
},
32 [NHA_ENCAP_TYPE
] = { .type
= NLA_U16
},
33 [NHA_ENCAP
] = { .type
= NLA_NESTED
},
34 [NHA_GROUPS
] = { .type
= NLA_FLAG
},
35 [NHA_MASTER
] = { .type
= NLA_U32
},
38 static unsigned int nh_dev_hashfn(unsigned int val
)
40 unsigned int mask
= NH_DEV_HASHSIZE
- 1;
43 (val
>> NH_DEV_HASHBITS
) ^
44 (val
>> (NH_DEV_HASHBITS
* 2))) & mask
;
47 static void nexthop_devhash_add(struct net
*net
, struct nh_info
*nhi
)
49 struct net_device
*dev
= nhi
->fib_nhc
.nhc_dev
;
50 struct hlist_head
*head
;
55 hash
= nh_dev_hashfn(dev
->ifindex
);
56 head
= &net
->nexthop
.devhash
[hash
];
57 hlist_add_head(&nhi
->dev_hash
, head
);
60 static void nexthop_free_mpath(struct nexthop
*nh
)
65 nhg
= rcu_dereference_raw(nh
->nh_grp
);
66 for (i
= 0; i
< nhg
->num_nh
; ++i
)
67 WARN_ON(nhg
->nh_entries
[i
].nh
);
72 static void nexthop_free_single(struct nexthop
*nh
)
76 nhi
= rcu_dereference_raw(nh
->nh_info
);
77 switch (nhi
->family
) {
79 fib_nh_release(nh
->net
, &nhi
->fib_nh
);
82 ipv6_stub
->fib6_nh_release(&nhi
->fib6_nh
);
88 void nexthop_free_rcu(struct rcu_head
*head
)
90 struct nexthop
*nh
= container_of(head
, struct nexthop
, rcu
);
93 nexthop_free_mpath(nh
);
95 nexthop_free_single(nh
);
99 EXPORT_SYMBOL_GPL(nexthop_free_rcu
);
101 static struct nexthop
*nexthop_alloc(void)
105 nh
= kzalloc(sizeof(struct nexthop
), GFP_KERNEL
);
107 INIT_LIST_HEAD(&nh
->fi_list
);
108 INIT_LIST_HEAD(&nh
->f6i_list
);
109 INIT_LIST_HEAD(&nh
->grp_list
);
114 static struct nh_group
*nexthop_grp_alloc(u16 num_nh
)
116 size_t sz
= offsetof(struct nexthop
, nh_grp
)
117 + sizeof(struct nh_group
)
118 + sizeof(struct nh_grp_entry
) * num_nh
;
119 struct nh_group
*nhg
;
121 nhg
= kzalloc(sz
, GFP_KERNEL
);
123 nhg
->num_nh
= num_nh
;
128 static void nh_base_seq_inc(struct net
*net
)
130 while (++net
->nexthop
.seq
== 0)
134 /* no reference taken; rcu lock or rtnl must be held */
135 struct nexthop
*nexthop_find_by_id(struct net
*net
, u32 id
)
137 struct rb_node
**pp
, *parent
= NULL
, *next
;
139 pp
= &net
->nexthop
.rb_root
.rb_node
;
143 next
= rcu_dereference_raw(*pp
);
148 nh
= rb_entry(parent
, struct nexthop
, rb_node
);
151 else if (id
> nh
->id
)
152 pp
= &next
->rb_right
;
158 EXPORT_SYMBOL_GPL(nexthop_find_by_id
);
160 /* used for auto id allocation; called with rtnl held */
161 static u32
nh_find_unused_id(struct net
*net
)
163 u32 id_start
= net
->nexthop
.last_id_allocated
;
166 net
->nexthop
.last_id_allocated
++;
167 if (net
->nexthop
.last_id_allocated
== id_start
)
170 if (!nexthop_find_by_id(net
, net
->nexthop
.last_id_allocated
))
171 return net
->nexthop
.last_id_allocated
;
176 static int nla_put_nh_group(struct sk_buff
*skb
, struct nh_group
*nhg
)
178 struct nexthop_grp
*p
;
179 size_t len
= nhg
->num_nh
* sizeof(*p
);
185 group_type
= NEXTHOP_GRP_TYPE_MPATH
;
187 if (nla_put_u16(skb
, NHA_GROUP_TYPE
, group_type
))
188 goto nla_put_failure
;
190 nla
= nla_reserve(skb
, NHA_GROUP
, len
);
192 goto nla_put_failure
;
195 for (i
= 0; i
< nhg
->num_nh
; ++i
) {
196 p
->id
= nhg
->nh_entries
[i
].nh
->id
;
197 p
->weight
= nhg
->nh_entries
[i
].weight
- 1;
207 static int nh_fill_node(struct sk_buff
*skb
, struct nexthop
*nh
,
208 int event
, u32 portid
, u32 seq
, unsigned int nlflags
)
210 struct fib6_nh
*fib6_nh
;
211 struct fib_nh
*fib_nh
;
212 struct nlmsghdr
*nlh
;
216 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*nhm
), nlflags
);
220 nhm
= nlmsg_data(nlh
);
221 nhm
->nh_family
= AF_UNSPEC
;
222 nhm
->nh_flags
= nh
->nh_flags
;
223 nhm
->nh_protocol
= nh
->protocol
;
227 if (nla_put_u32(skb
, NHA_ID
, nh
->id
))
228 goto nla_put_failure
;
231 struct nh_group
*nhg
= rtnl_dereference(nh
->nh_grp
);
233 if (nla_put_nh_group(skb
, nhg
))
234 goto nla_put_failure
;
238 nhi
= rtnl_dereference(nh
->nh_info
);
239 nhm
->nh_family
= nhi
->family
;
240 if (nhi
->reject_nh
) {
241 if (nla_put_flag(skb
, NHA_BLACKHOLE
))
242 goto nla_put_failure
;
245 const struct net_device
*dev
;
247 dev
= nhi
->fib_nhc
.nhc_dev
;
248 if (dev
&& nla_put_u32(skb
, NHA_OIF
, dev
->ifindex
))
249 goto nla_put_failure
;
252 nhm
->nh_scope
= nhi
->fib_nhc
.nhc_scope
;
253 switch (nhi
->family
) {
255 fib_nh
= &nhi
->fib_nh
;
256 if (fib_nh
->fib_nh_gw_family
&&
257 nla_put_u32(skb
, NHA_GATEWAY
, fib_nh
->fib_nh_gw4
))
258 goto nla_put_failure
;
262 fib6_nh
= &nhi
->fib6_nh
;
263 if (fib6_nh
->fib_nh_gw_family
&&
264 nla_put_in6_addr(skb
, NHA_GATEWAY
, &fib6_nh
->fib_nh_gw6
))
265 goto nla_put_failure
;
269 if (nhi
->fib_nhc
.nhc_lwtstate
&&
270 lwtunnel_fill_encap(skb
, nhi
->fib_nhc
.nhc_lwtstate
,
271 NHA_ENCAP
, NHA_ENCAP_TYPE
) < 0)
272 goto nla_put_failure
;
282 static size_t nh_nlmsg_size_grp(struct nexthop
*nh
)
284 struct nh_group
*nhg
= rtnl_dereference(nh
->nh_grp
);
285 size_t sz
= sizeof(struct nexthop_grp
) * nhg
->num_nh
;
287 return nla_total_size(sz
) +
288 nla_total_size(2); /* NHA_GROUP_TYPE */
291 static size_t nh_nlmsg_size_single(struct nexthop
*nh
)
293 struct nh_info
*nhi
= rtnl_dereference(nh
->nh_info
);
296 /* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
297 * are mutually exclusive
299 sz
= nla_total_size(4); /* NHA_OIF */
301 switch (nhi
->family
) {
303 if (nhi
->fib_nh
.fib_nh_gw_family
)
304 sz
+= nla_total_size(4); /* NHA_GATEWAY */
309 if (nhi
->fib6_nh
.fib_nh_gw_family
)
310 sz
+= nla_total_size(sizeof(const struct in6_addr
));
314 if (nhi
->fib_nhc
.nhc_lwtstate
) {
315 sz
+= lwtunnel_get_encap_size(nhi
->fib_nhc
.nhc_lwtstate
);
316 sz
+= nla_total_size(2); /* NHA_ENCAP_TYPE */
322 static size_t nh_nlmsg_size(struct nexthop
*nh
)
324 size_t sz
= NLMSG_ALIGN(sizeof(struct nhmsg
));
326 sz
+= nla_total_size(4); /* NHA_ID */
329 sz
+= nh_nlmsg_size_grp(nh
);
331 sz
+= nh_nlmsg_size_single(nh
);
336 static void nexthop_notify(int event
, struct nexthop
*nh
, struct nl_info
*info
)
338 unsigned int nlflags
= info
->nlh
? info
->nlh
->nlmsg_flags
: 0;
339 u32 seq
= info
->nlh
? info
->nlh
->nlmsg_seq
: 0;
343 skb
= nlmsg_new(nh_nlmsg_size(nh
), gfp_any());
347 err
= nh_fill_node(skb
, nh
, event
, info
->portid
, seq
, nlflags
);
349 /* -EMSGSIZE implies BUG in nh_nlmsg_size() */
350 WARN_ON(err
== -EMSGSIZE
);
355 rtnl_notify(skb
, info
->nl_net
, info
->portid
, RTNLGRP_NEXTHOP
,
356 info
->nlh
, gfp_any());
360 rtnl_set_sk_err(info
->nl_net
, RTNLGRP_NEXTHOP
, err
);
363 static bool valid_group_nh(struct nexthop
*nh
, unsigned int npaths
,
364 struct netlink_ext_ack
*extack
)
367 struct nh_group
*nhg
= rtnl_dereference(nh
->nh_grp
);
369 /* nested multipath (group within a group) is not
373 NL_SET_ERR_MSG(extack
,
374 "Multipath group can not be a nexthop within a group");
378 struct nh_info
*nhi
= rtnl_dereference(nh
->nh_info
);
380 if (nhi
->reject_nh
&& npaths
> 1) {
381 NL_SET_ERR_MSG(extack
,
382 "Blackhole nexthop can not be used in a group with more than 1 path");
390 static int nh_check_attr_group(struct net
*net
, struct nlattr
*tb
[],
391 struct netlink_ext_ack
*extack
)
393 unsigned int len
= nla_len(tb
[NHA_GROUP
]);
394 struct nexthop_grp
*nhg
;
397 if (len
& (sizeof(struct nexthop_grp
) - 1)) {
398 NL_SET_ERR_MSG(extack
,
399 "Invalid length for nexthop group attribute");
403 /* convert len to number of nexthop ids */
406 nhg
= nla_data(tb
[NHA_GROUP
]);
407 for (i
= 0; i
< len
; ++i
) {
408 if (nhg
[i
].resvd1
|| nhg
[i
].resvd2
) {
409 NL_SET_ERR_MSG(extack
, "Reserved fields in nexthop_grp must be 0");
412 if (nhg
[i
].weight
> 254) {
413 NL_SET_ERR_MSG(extack
, "Invalid value for weight");
416 for (j
= i
+ 1; j
< len
; ++j
) {
417 if (nhg
[i
].id
== nhg
[j
].id
) {
418 NL_SET_ERR_MSG(extack
, "Nexthop id can not be used twice in a group");
424 nhg
= nla_data(tb
[NHA_GROUP
]);
425 for (i
= 0; i
< len
; ++i
) {
428 nh
= nexthop_find_by_id(net
, nhg
[i
].id
);
430 NL_SET_ERR_MSG(extack
, "Invalid nexthop id");
433 if (!valid_group_nh(nh
, len
, extack
))
436 for (i
= NHA_GROUP
+ 1; i
< __NHA_MAX
; ++i
) {
440 NL_SET_ERR_MSG(extack
,
441 "No other attributes can be set in nexthop groups");
448 static bool ipv6_good_nh(const struct fib6_nh
*nh
)
450 int state
= NUD_REACHABLE
;
455 n
= __ipv6_neigh_lookup_noref_stub(nh
->fib_nh_dev
, &nh
->fib_nh_gw6
);
457 state
= n
->nud_state
;
459 rcu_read_unlock_bh();
461 return !!(state
& NUD_VALID
);
464 static bool ipv4_good_nh(const struct fib_nh
*nh
)
466 int state
= NUD_REACHABLE
;
471 n
= __ipv4_neigh_lookup_noref(nh
->fib_nh_dev
,
472 (__force u32
)nh
->fib_nh_gw4
);
474 state
= n
->nud_state
;
476 rcu_read_unlock_bh();
478 return !!(state
& NUD_VALID
);
481 struct nexthop
*nexthop_select_path(struct nexthop
*nh
, int hash
)
483 struct nexthop
*rc
= NULL
;
484 struct nh_group
*nhg
;
490 nhg
= rcu_dereference(nh
->nh_grp
);
491 for (i
= 0; i
< nhg
->num_nh
; ++i
) {
492 struct nh_grp_entry
*nhge
= &nhg
->nh_entries
[i
];
495 if (hash
> atomic_read(&nhge
->upper_bound
))
498 /* nexthops always check if it is good and does
499 * not rely on a sysctl for this behavior
501 nhi
= rcu_dereference(nhge
->nh
->nh_info
);
502 switch (nhi
->family
) {
504 if (ipv4_good_nh(&nhi
->fib_nh
))
508 if (ipv6_good_nh(&nhi
->fib6_nh
))
519 EXPORT_SYMBOL_GPL(nexthop_select_path
);
521 int nexthop_for_each_fib6_nh(struct nexthop
*nh
,
522 int (*cb
)(struct fib6_nh
*nh
, void *arg
),
529 struct nh_group
*nhg
;
532 nhg
= rcu_dereference_rtnl(nh
->nh_grp
);
533 for (i
= 0; i
< nhg
->num_nh
; i
++) {
534 struct nh_grp_entry
*nhge
= &nhg
->nh_entries
[i
];
536 nhi
= rcu_dereference_rtnl(nhge
->nh
->nh_info
);
537 err
= cb(&nhi
->fib6_nh
, arg
);
542 nhi
= rcu_dereference_rtnl(nh
->nh_info
);
543 err
= cb(&nhi
->fib6_nh
, arg
);
550 EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh
);
552 static int check_src_addr(const struct in6_addr
*saddr
,
553 struct netlink_ext_ack
*extack
)
555 if (!ipv6_addr_any(saddr
)) {
556 NL_SET_ERR_MSG(extack
, "IPv6 routes using source address can not use nexthop objects");
562 int fib6_check_nexthop(struct nexthop
*nh
, struct fib6_config
*cfg
,
563 struct netlink_ext_ack
*extack
)
567 /* fib6_src is unique to a fib6_info and limits the ability to cache
568 * routes in fib6_nh within a nexthop that is potentially shared
569 * across multiple fib entries. If the config wants to use source
570 * routing it can not use nexthop objects. mlxsw also does not allow
571 * fib6_src on routes.
573 if (cfg
&& check_src_addr(&cfg
->fc_src
, extack
) < 0)
577 struct nh_group
*nhg
;
579 nhg
= rtnl_dereference(nh
->nh_grp
);
583 nhi
= rtnl_dereference(nh
->nh_info
);
584 if (nhi
->family
== AF_INET
)
590 NL_SET_ERR_MSG(extack
, "IPv6 routes can not use an IPv4 nexthop");
593 EXPORT_SYMBOL_GPL(fib6_check_nexthop
);
595 /* if existing nexthop has ipv6 routes linked to it, need
596 * to verify this new spec works with ipv6
598 static int fib6_check_nh_list(struct nexthop
*old
, struct nexthop
*new,
599 struct netlink_ext_ack
*extack
)
601 struct fib6_info
*f6i
;
603 if (list_empty(&old
->f6i_list
))
606 list_for_each_entry(f6i
, &old
->f6i_list
, nh_list
) {
607 if (check_src_addr(&f6i
->fib6_src
.addr
, extack
) < 0)
611 return fib6_check_nexthop(new, NULL
, extack
);
614 static int nexthop_check_scope(struct nexthop
*nh
, u8 scope
,
615 struct netlink_ext_ack
*extack
)
619 nhi
= rtnl_dereference(nh
->nh_info
);
620 if (scope
== RT_SCOPE_HOST
&& nhi
->fib_nhc
.nhc_gw_family
) {
621 NL_SET_ERR_MSG(extack
,
622 "Route with host scope can not have a gateway");
626 if (nhi
->fib_nhc
.nhc_flags
& RTNH_F_ONLINK
&& scope
>= RT_SCOPE_LINK
) {
627 NL_SET_ERR_MSG(extack
, "Scope mismatch with nexthop");
634 /* Invoked by fib add code to verify nexthop by id is ok with
635 * config for prefix; parts of fib_check_nh not done when nexthop
638 int fib_check_nexthop(struct nexthop
*nh
, u8 scope
,
639 struct netlink_ext_ack
*extack
)
644 struct nh_group
*nhg
;
646 if (scope
== RT_SCOPE_HOST
) {
647 NL_SET_ERR_MSG(extack
, "Route with host scope can not have multiple nexthops");
652 nhg
= rtnl_dereference(nh
->nh_grp
);
653 /* all nexthops in a group have the same scope */
654 err
= nexthop_check_scope(nhg
->nh_entries
[0].nh
, scope
, extack
);
656 err
= nexthop_check_scope(nh
, scope
, extack
);
662 static int fib_check_nh_list(struct nexthop
*old
, struct nexthop
*new,
663 struct netlink_ext_ack
*extack
)
667 list_for_each_entry(fi
, &old
->fi_list
, nh_list
) {
670 err
= fib_check_nexthop(new, fi
->fib_scope
, extack
);
677 static void nh_group_rebalance(struct nh_group
*nhg
)
683 for (i
= 0; i
< nhg
->num_nh
; ++i
)
684 total
+= nhg
->nh_entries
[i
].weight
;
686 for (i
= 0; i
< nhg
->num_nh
; ++i
) {
687 struct nh_grp_entry
*nhge
= &nhg
->nh_entries
[i
];
691 upper_bound
= DIV_ROUND_CLOSEST_ULL((u64
)w
<< 31, total
) - 1;
692 atomic_set(&nhge
->upper_bound
, upper_bound
);
696 static void remove_nh_grp_entry(struct nh_grp_entry
*nhge
,
697 struct nh_group
*nhg
,
698 struct nl_info
*nlinfo
)
700 struct nexthop
*nh
= nhge
->nh
;
701 struct nh_grp_entry
*nhges
;
707 nhges
= nhg
->nh_entries
;
708 for (i
= 0; i
< nhg
->num_nh
; ++i
) {
710 nhges
[i
-1].nh
= nhges
[i
].nh
;
711 nhges
[i
-1].weight
= nhges
[i
].weight
;
712 list_del(&nhges
[i
].nh_list
);
713 list_add(&nhges
[i
-1].nh_list
, &nhges
[i
-1].nh
->grp_list
);
714 } else if (nhg
->nh_entries
[i
].nh
== nh
) {
723 nhg
->nh_entries
[nhg
->num_nh
].nh
= NULL
;
725 nh_group_rebalance(nhg
);
730 nexthop_notify(RTM_NEWNEXTHOP
, nhge
->nh_parent
, nlinfo
);
733 static void remove_nexthop_from_groups(struct net
*net
, struct nexthop
*nh
,
734 struct nl_info
*nlinfo
)
736 struct nh_grp_entry
*nhge
, *tmp
;
738 list_for_each_entry_safe(nhge
, tmp
, &nh
->grp_list
, nh_list
) {
739 struct nh_group
*nhg
;
741 list_del(&nhge
->nh_list
);
742 nhg
= rtnl_dereference(nhge
->nh_parent
->nh_grp
);
743 remove_nh_grp_entry(nhge
, nhg
, nlinfo
);
745 /* if this group has no more entries then remove it */
747 remove_nexthop(net
, nhge
->nh_parent
, nlinfo
);
751 static void remove_nexthop_group(struct nexthop
*nh
, struct nl_info
*nlinfo
)
753 struct nh_group
*nhg
= rcu_dereference_rtnl(nh
->nh_grp
);
754 int i
, num_nh
= nhg
->num_nh
;
756 for (i
= 0; i
< num_nh
; ++i
) {
757 struct nh_grp_entry
*nhge
= &nhg
->nh_entries
[i
];
759 if (WARN_ON(!nhge
->nh
))
762 list_del(&nhge
->nh_list
);
763 nexthop_put(nhge
->nh
);
769 /* not called for nexthop replace */
770 static void __remove_nexthop_fib(struct net
*net
, struct nexthop
*nh
)
772 struct fib6_info
*f6i
, *tmp
;
773 bool do_flush
= false;
776 list_for_each_entry(fi
, &nh
->fi_list
, nh_list
) {
777 fi
->fib_flags
|= RTNH_F_DEAD
;
783 /* ip6_del_rt removes the entry from this list hence the _safe */
784 list_for_each_entry_safe(f6i
, tmp
, &nh
->f6i_list
, nh_list
) {
785 /* __ip6_del_rt does a release, so do a hold here */
787 ipv6_stub
->ip6_del_rt(net
, f6i
);
791 static void __remove_nexthop(struct net
*net
, struct nexthop
*nh
,
792 struct nl_info
*nlinfo
)
794 __remove_nexthop_fib(net
, nh
);
797 remove_nexthop_group(nh
, nlinfo
);
801 nhi
= rtnl_dereference(nh
->nh_info
);
802 if (nhi
->fib_nhc
.nhc_dev
)
803 hlist_del(&nhi
->dev_hash
);
805 remove_nexthop_from_groups(net
, nh
, nlinfo
);
809 static void remove_nexthop(struct net
*net
, struct nexthop
*nh
,
810 struct nl_info
*nlinfo
)
812 /* remove from the tree */
813 rb_erase(&nh
->rb_node
, &net
->nexthop
.rb_root
);
816 nexthop_notify(RTM_DELNEXTHOP
, nh
, nlinfo
);
818 __remove_nexthop(net
, nh
, nlinfo
);
819 nh_base_seq_inc(net
);
824 /* if any FIB entries reference this nexthop, any dst entries
825 * need to be regenerated
827 static void nh_rt_cache_flush(struct net
*net
, struct nexthop
*nh
)
829 struct fib6_info
*f6i
;
831 if (!list_empty(&nh
->fi_list
))
834 list_for_each_entry(f6i
, &nh
->f6i_list
, nh_list
)
835 ipv6_stub
->fib6_update_sernum(net
, f6i
);
838 static int replace_nexthop_grp(struct net
*net
, struct nexthop
*old
,
840 struct netlink_ext_ack
*extack
)
842 struct nh_group
*oldg
, *newg
;
845 if (!new->is_group
) {
846 NL_SET_ERR_MSG(extack
, "Can not replace a nexthop group with a nexthop.");
850 oldg
= rtnl_dereference(old
->nh_grp
);
851 newg
= rtnl_dereference(new->nh_grp
);
853 /* update parents - used by nexthop code for cleanup */
854 for (i
= 0; i
< newg
->num_nh
; i
++)
855 newg
->nh_entries
[i
].nh_parent
= old
;
857 rcu_assign_pointer(old
->nh_grp
, newg
);
859 for (i
= 0; i
< oldg
->num_nh
; i
++)
860 oldg
->nh_entries
[i
].nh_parent
= new;
862 rcu_assign_pointer(new->nh_grp
, oldg
);
867 static int replace_nexthop_single(struct net
*net
, struct nexthop
*old
,
869 struct netlink_ext_ack
*extack
)
871 struct nh_info
*oldi
, *newi
;
874 NL_SET_ERR_MSG(extack
, "Can not replace a nexthop with a nexthop group.");
878 oldi
= rtnl_dereference(old
->nh_info
);
879 newi
= rtnl_dereference(new->nh_info
);
881 newi
->nh_parent
= old
;
882 oldi
->nh_parent
= new;
884 old
->protocol
= new->protocol
;
885 old
->nh_flags
= new->nh_flags
;
887 rcu_assign_pointer(old
->nh_info
, newi
);
888 rcu_assign_pointer(new->nh_info
, oldi
);
893 static void __nexthop_replace_notify(struct net
*net
, struct nexthop
*nh
,
894 struct nl_info
*info
)
896 struct fib6_info
*f6i
;
898 if (!list_empty(&nh
->fi_list
)) {
901 /* expectation is a few fib_info per nexthop and then
902 * a lot of routes per fib_info. So mark the fib_info
903 * and then walk the fib tables once
905 list_for_each_entry(fi
, &nh
->fi_list
, nh_list
)
906 fi
->nh_updated
= true;
908 fib_info_notify_update(net
, info
);
910 list_for_each_entry(fi
, &nh
->fi_list
, nh_list
)
911 fi
->nh_updated
= false;
914 list_for_each_entry(f6i
, &nh
->f6i_list
, nh_list
)
915 ipv6_stub
->fib6_rt_update(net
, f6i
, info
);
918 /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
919 * linked to this nexthop and for all groups that the nexthop
922 static void nexthop_replace_notify(struct net
*net
, struct nexthop
*nh
,
923 struct nl_info
*info
)
925 struct nh_grp_entry
*nhge
;
927 __nexthop_replace_notify(net
, nh
, info
);
929 list_for_each_entry(nhge
, &nh
->grp_list
, nh_list
)
930 __nexthop_replace_notify(net
, nhge
->nh_parent
, info
);
933 static int replace_nexthop(struct net
*net
, struct nexthop
*old
,
934 struct nexthop
*new, struct netlink_ext_ack
*extack
)
936 bool new_is_reject
= false;
937 struct nh_grp_entry
*nhge
;
940 /* check that existing FIB entries are ok with the
941 * new nexthop definition
943 err
= fib_check_nh_list(old
, new, extack
);
947 err
= fib6_check_nh_list(old
, new, extack
);
951 if (!new->is_group
) {
952 struct nh_info
*nhi
= rtnl_dereference(new->nh_info
);
954 new_is_reject
= nhi
->reject_nh
;
957 list_for_each_entry(nhge
, &old
->grp_list
, nh_list
) {
958 /* if new nexthop is a blackhole, any groups using this
959 * nexthop cannot have more than 1 path
962 nexthop_num_path(nhge
->nh_parent
) > 1) {
963 NL_SET_ERR_MSG(extack
, "Blackhole nexthop can not be a member of a group with more than one path");
967 err
= fib_check_nh_list(nhge
->nh_parent
, new, extack
);
971 err
= fib6_check_nh_list(nhge
->nh_parent
, new, extack
);
977 err
= replace_nexthop_grp(net
, old
, new, extack
);
979 err
= replace_nexthop_single(net
, old
, new, extack
);
982 nh_rt_cache_flush(net
, old
);
984 __remove_nexthop(net
, new, NULL
);
991 /* called with rtnl_lock held */
992 static int insert_nexthop(struct net
*net
, struct nexthop
*new_nh
,
993 struct nh_config
*cfg
, struct netlink_ext_ack
*extack
)
995 struct rb_node
**pp
, *parent
= NULL
, *next
;
996 struct rb_root
*root
= &net
->nexthop
.rb_root
;
997 bool replace
= !!(cfg
->nlflags
& NLM_F_REPLACE
);
998 bool create
= !!(cfg
->nlflags
& NLM_F_CREATE
);
999 u32 new_id
= new_nh
->id
;
1000 int replace_notify
= 0;
1003 pp
= &root
->rb_node
;
1007 next
= rtnl_dereference(*pp
);
1013 nh
= rb_entry(parent
, struct nexthop
, rb_node
);
1014 if (new_id
< nh
->id
) {
1015 pp
= &next
->rb_left
;
1016 } else if (new_id
> nh
->id
) {
1017 pp
= &next
->rb_right
;
1018 } else if (replace
) {
1019 rc
= replace_nexthop(net
, nh
, new_nh
, extack
);
1021 new_nh
= nh
; /* send notification with old nh */
1026 /* id already exists and not a replace */
1031 if (replace
&& !create
) {
1032 NL_SET_ERR_MSG(extack
, "Replace specified without create and no entry exists");
1037 rb_link_node_rcu(&new_nh
->rb_node
, parent
, pp
);
1038 rb_insert_color(&new_nh
->rb_node
, root
);
1042 nh_base_seq_inc(net
);
1043 nexthop_notify(RTM_NEWNEXTHOP
, new_nh
, &cfg
->nlinfo
);
1045 nexthop_replace_notify(net
, new_nh
, &cfg
->nlinfo
);
1052 /* remove all nexthops tied to a device being deleted */
1053 static void nexthop_flush_dev(struct net_device
*dev
)
1055 unsigned int hash
= nh_dev_hashfn(dev
->ifindex
);
1056 struct net
*net
= dev_net(dev
);
1057 struct hlist_head
*head
= &net
->nexthop
.devhash
[hash
];
1058 struct hlist_node
*n
;
1059 struct nh_info
*nhi
;
1061 hlist_for_each_entry_safe(nhi
, n
, head
, dev_hash
) {
1062 if (nhi
->fib_nhc
.nhc_dev
!= dev
)
1065 remove_nexthop(net
, nhi
->nh_parent
, NULL
);
1069 /* rtnl; called when net namespace is deleted */
1070 static void flush_all_nexthops(struct net
*net
)
1072 struct rb_root
*root
= &net
->nexthop
.rb_root
;
1073 struct rb_node
*node
;
1076 while ((node
= rb_first(root
))) {
1077 nh
= rb_entry(node
, struct nexthop
, rb_node
);
1078 remove_nexthop(net
, nh
, NULL
);
1083 static struct nexthop
*nexthop_create_group(struct net
*net
,
1084 struct nh_config
*cfg
)
1086 struct nlattr
*grps_attr
= cfg
->nh_grp
;
1087 struct nexthop_grp
*entry
= nla_data(grps_attr
);
1088 struct nh_group
*nhg
;
1092 nh
= nexthop_alloc();
1094 return ERR_PTR(-ENOMEM
);
1098 nhg
= nexthop_grp_alloc(nla_len(grps_attr
) / sizeof(*entry
));
1101 return ERR_PTR(-ENOMEM
);
1104 for (i
= 0; i
< nhg
->num_nh
; ++i
) {
1105 struct nexthop
*nhe
;
1106 struct nh_info
*nhi
;
1108 nhe
= nexthop_find_by_id(net
, entry
[i
].id
);
1109 if (!nexthop_get(nhe
))
1112 nhi
= rtnl_dereference(nhe
->nh_info
);
1113 if (nhi
->family
== AF_INET
)
1116 nhg
->nh_entries
[i
].nh
= nhe
;
1117 nhg
->nh_entries
[i
].weight
= entry
[i
].weight
+ 1;
1118 list_add(&nhg
->nh_entries
[i
].nh_list
, &nhe
->grp_list
);
1119 nhg
->nh_entries
[i
].nh_parent
= nh
;
1122 if (cfg
->nh_grp_type
== NEXTHOP_GRP_TYPE_MPATH
) {
1124 nh_group_rebalance(nhg
);
1127 rcu_assign_pointer(nh
->nh_grp
, nhg
);
1133 nexthop_put(nhg
->nh_entries
[i
].nh
);
1138 return ERR_PTR(-ENOENT
);
1141 static int nh_create_ipv4(struct net
*net
, struct nexthop
*nh
,
1142 struct nh_info
*nhi
, struct nh_config
*cfg
,
1143 struct netlink_ext_ack
*extack
)
1145 struct fib_nh
*fib_nh
= &nhi
->fib_nh
;
1146 struct fib_config fib_cfg
= {
1147 .fc_oif
= cfg
->nh_ifindex
,
1148 .fc_gw4
= cfg
->gw
.ipv4
,
1149 .fc_gw_family
= cfg
->gw
.ipv4
? AF_INET
: 0,
1150 .fc_flags
= cfg
->nh_flags
,
1151 .fc_encap
= cfg
->nh_encap
,
1152 .fc_encap_type
= cfg
->nh_encap_type
,
1154 u32 tb_id
= l3mdev_fib_table(cfg
->dev
);
1157 err
= fib_nh_init(net
, fib_nh
, &fib_cfg
, 1, extack
);
1159 fib_nh_release(net
, fib_nh
);
1163 /* sets nh_dev if successful */
1164 err
= fib_check_nh(net
, fib_nh
, tb_id
, 0, extack
);
1166 nh
->nh_flags
= fib_nh
->fib_nh_flags
;
1167 fib_info_update_nhc_saddr(net
, &fib_nh
->nh_common
,
1168 fib_nh
->fib_nh_scope
);
1170 fib_nh_release(net
, fib_nh
);
1176 static int nh_create_ipv6(struct net
*net
, struct nexthop
*nh
,
1177 struct nh_info
*nhi
, struct nh_config
*cfg
,
1178 struct netlink_ext_ack
*extack
)
1180 struct fib6_nh
*fib6_nh
= &nhi
->fib6_nh
;
1181 struct fib6_config fib6_cfg
= {
1182 .fc_table
= l3mdev_fib_table(cfg
->dev
),
1183 .fc_ifindex
= cfg
->nh_ifindex
,
1184 .fc_gateway
= cfg
->gw
.ipv6
,
1185 .fc_flags
= cfg
->nh_flags
,
1186 .fc_encap
= cfg
->nh_encap
,
1187 .fc_encap_type
= cfg
->nh_encap_type
,
1191 if (!ipv6_addr_any(&cfg
->gw
.ipv6
))
1192 fib6_cfg
.fc_flags
|= RTF_GATEWAY
;
1194 /* sets nh_dev if successful */
1195 err
= ipv6_stub
->fib6_nh_init(net
, fib6_nh
, &fib6_cfg
, GFP_KERNEL
,
1198 ipv6_stub
->fib6_nh_release(fib6_nh
);
1200 nh
->nh_flags
= fib6_nh
->fib_nh_flags
;
1205 static struct nexthop
*nexthop_create(struct net
*net
, struct nh_config
*cfg
,
1206 struct netlink_ext_ack
*extack
)
1208 struct nh_info
*nhi
;
1212 nh
= nexthop_alloc();
1214 return ERR_PTR(-ENOMEM
);
1216 nhi
= kzalloc(sizeof(*nhi
), GFP_KERNEL
);
1219 return ERR_PTR(-ENOMEM
);
1222 nh
->nh_flags
= cfg
->nh_flags
;
1225 nhi
->nh_parent
= nh
;
1226 nhi
->family
= cfg
->nh_family
;
1227 nhi
->fib_nhc
.nhc_scope
= RT_SCOPE_LINK
;
1229 if (cfg
->nh_blackhole
) {
1231 cfg
->nh_ifindex
= net
->loopback_dev
->ifindex
;
1234 switch (cfg
->nh_family
) {
1236 err
= nh_create_ipv4(net
, nh
, nhi
, cfg
, extack
);
1239 err
= nh_create_ipv6(net
, nh
, nhi
, cfg
, extack
);
1246 return ERR_PTR(err
);
1249 /* add the entry to the device based hash */
1250 nexthop_devhash_add(net
, nhi
);
1252 rcu_assign_pointer(nh
->nh_info
, nhi
);
1257 /* called with rtnl lock held */
1258 static struct nexthop
*nexthop_add(struct net
*net
, struct nh_config
*cfg
,
1259 struct netlink_ext_ack
*extack
)
1264 if (cfg
->nlflags
& NLM_F_REPLACE
&& !cfg
->nh_id
) {
1265 NL_SET_ERR_MSG(extack
, "Replace requires nexthop id");
1266 return ERR_PTR(-EINVAL
);
1270 cfg
->nh_id
= nh_find_unused_id(net
);
1272 NL_SET_ERR_MSG(extack
, "No unused id");
1273 return ERR_PTR(-EINVAL
);
1278 nh
= nexthop_create_group(net
, cfg
);
1280 nh
= nexthop_create(net
, cfg
, extack
);
1285 refcount_set(&nh
->refcnt
, 1);
1286 nh
->id
= cfg
->nh_id
;
1287 nh
->protocol
= cfg
->nh_protocol
;
1290 err
= insert_nexthop(net
, nh
, cfg
, extack
);
1292 __remove_nexthop(net
, nh
, NULL
);
1300 static int rtm_to_nh_config(struct net
*net
, struct sk_buff
*skb
,
1301 struct nlmsghdr
*nlh
, struct nh_config
*cfg
,
1302 struct netlink_ext_ack
*extack
)
1304 struct nhmsg
*nhm
= nlmsg_data(nlh
);
1305 struct nlattr
*tb
[NHA_MAX
+ 1];
1308 err
= nlmsg_parse(nlh
, sizeof(*nhm
), tb
, NHA_MAX
, rtm_nh_policy
,
1314 if (nhm
->resvd
|| nhm
->nh_scope
) {
1315 NL_SET_ERR_MSG(extack
, "Invalid values in ancillary header");
1318 if (nhm
->nh_flags
& ~NEXTHOP_VALID_USER_FLAGS
) {
1319 NL_SET_ERR_MSG(extack
, "Invalid nexthop flags in ancillary header");
1323 switch (nhm
->nh_family
) {
1332 NL_SET_ERR_MSG(extack
, "Invalid address family");
1336 if (tb
[NHA_GROUPS
] || tb
[NHA_MASTER
]) {
1337 NL_SET_ERR_MSG(extack
, "Invalid attributes in request");
1341 memset(cfg
, 0, sizeof(*cfg
));
1342 cfg
->nlflags
= nlh
->nlmsg_flags
;
1343 cfg
->nlinfo
.portid
= NETLINK_CB(skb
).portid
;
1344 cfg
->nlinfo
.nlh
= nlh
;
1345 cfg
->nlinfo
.nl_net
= net
;
1347 cfg
->nh_family
= nhm
->nh_family
;
1348 cfg
->nh_protocol
= nhm
->nh_protocol
;
1349 cfg
->nh_flags
= nhm
->nh_flags
;
1352 cfg
->nh_id
= nla_get_u32(tb
[NHA_ID
]);
1354 if (tb
[NHA_GROUP
]) {
1355 if (nhm
->nh_family
!= AF_UNSPEC
) {
1356 NL_SET_ERR_MSG(extack
, "Invalid family for group");
1359 cfg
->nh_grp
= tb
[NHA_GROUP
];
1361 cfg
->nh_grp_type
= NEXTHOP_GRP_TYPE_MPATH
;
1362 if (tb
[NHA_GROUP_TYPE
])
1363 cfg
->nh_grp_type
= nla_get_u16(tb
[NHA_GROUP_TYPE
]);
1365 if (cfg
->nh_grp_type
> NEXTHOP_GRP_TYPE_MAX
) {
1366 NL_SET_ERR_MSG(extack
, "Invalid group type");
1369 err
= nh_check_attr_group(net
, tb
, extack
);
1371 /* no other attributes should be set */
1375 if (tb
[NHA_BLACKHOLE
]) {
1376 if (tb
[NHA_GATEWAY
] || tb
[NHA_OIF
] ||
1377 tb
[NHA_ENCAP
] || tb
[NHA_ENCAP_TYPE
]) {
1378 NL_SET_ERR_MSG(extack
, "Blackhole attribute can not be used with gateway or oif");
1382 cfg
->nh_blackhole
= 1;
1388 NL_SET_ERR_MSG(extack
, "Device attribute required for non-blackhole nexthops");
1392 cfg
->nh_ifindex
= nla_get_u32(tb
[NHA_OIF
]);
1393 if (cfg
->nh_ifindex
)
1394 cfg
->dev
= __dev_get_by_index(net
, cfg
->nh_ifindex
);
1397 NL_SET_ERR_MSG(extack
, "Invalid device index");
1399 } else if (!(cfg
->dev
->flags
& IFF_UP
)) {
1400 NL_SET_ERR_MSG(extack
, "Nexthop device is not up");
1403 } else if (!netif_carrier_ok(cfg
->dev
)) {
1404 NL_SET_ERR_MSG(extack
, "Carrier for nexthop device is down");
1410 if (tb
[NHA_GATEWAY
]) {
1411 struct nlattr
*gwa
= tb
[NHA_GATEWAY
];
1413 switch (cfg
->nh_family
) {
1415 if (nla_len(gwa
) != sizeof(u32
)) {
1416 NL_SET_ERR_MSG(extack
, "Invalid gateway");
1419 cfg
->gw
.ipv4
= nla_get_be32(gwa
);
1422 if (nla_len(gwa
) != sizeof(struct in6_addr
)) {
1423 NL_SET_ERR_MSG(extack
, "Invalid gateway");
1426 cfg
->gw
.ipv6
= nla_get_in6_addr(gwa
);
1429 NL_SET_ERR_MSG(extack
,
1430 "Unknown address family for gateway");
1434 /* device only nexthop (no gateway) */
1435 if (cfg
->nh_flags
& RTNH_F_ONLINK
) {
1436 NL_SET_ERR_MSG(extack
,
1437 "ONLINK flag can not be set for nexthop without a gateway");
1442 if (tb
[NHA_ENCAP
]) {
1443 cfg
->nh_encap
= tb
[NHA_ENCAP
];
1445 if (!tb
[NHA_ENCAP_TYPE
]) {
1446 NL_SET_ERR_MSG(extack
, "LWT encapsulation type is missing");
1450 cfg
->nh_encap_type
= nla_get_u16(tb
[NHA_ENCAP_TYPE
]);
1451 err
= lwtunnel_valid_encap_type(cfg
->nh_encap_type
, extack
);
1455 } else if (tb
[NHA_ENCAP_TYPE
]) {
1456 NL_SET_ERR_MSG(extack
, "LWT encapsulation attribute is missing");
1467 static int rtm_new_nexthop(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
1468 struct netlink_ext_ack
*extack
)
1470 struct net
*net
= sock_net(skb
->sk
);
1471 struct nh_config cfg
;
1475 err
= rtm_to_nh_config(net
, skb
, nlh
, &cfg
, extack
);
1477 nh
= nexthop_add(net
, &cfg
, extack
);
1485 static int nh_valid_get_del_req(struct nlmsghdr
*nlh
, u32
*id
,
1486 struct netlink_ext_ack
*extack
)
1488 struct nhmsg
*nhm
= nlmsg_data(nlh
);
1489 struct nlattr
*tb
[NHA_MAX
+ 1];
1492 err
= nlmsg_parse(nlh
, sizeof(*nhm
), tb
, NHA_MAX
, rtm_nh_policy
,
1498 for (i
= 0; i
< __NHA_MAX
; ++i
) {
1506 NL_SET_ERR_MSG_ATTR(extack
, tb
[i
],
1507 "Unexpected attribute in request");
1511 if (nhm
->nh_protocol
|| nhm
->resvd
|| nhm
->nh_scope
|| nhm
->nh_flags
) {
1512 NL_SET_ERR_MSG(extack
, "Invalid values in header");
1517 NL_SET_ERR_MSG(extack
, "Nexthop id is missing");
1521 *id
= nla_get_u32(tb
[NHA_ID
]);
1523 NL_SET_ERR_MSG(extack
, "Invalid nexthop id");
1531 static int rtm_del_nexthop(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
1532 struct netlink_ext_ack
*extack
)
1534 struct net
*net
= sock_net(skb
->sk
);
1535 struct nl_info nlinfo
= {
1538 .portid
= NETLINK_CB(skb
).portid
,
1544 err
= nh_valid_get_del_req(nlh
, &id
, extack
);
1548 nh
= nexthop_find_by_id(net
, id
);
1552 remove_nexthop(net
, nh
, &nlinfo
);
1558 static int rtm_get_nexthop(struct sk_buff
*in_skb
, struct nlmsghdr
*nlh
,
1559 struct netlink_ext_ack
*extack
)
1561 struct net
*net
= sock_net(in_skb
->sk
);
1562 struct sk_buff
*skb
= NULL
;
1567 err
= nh_valid_get_del_req(nlh
, &id
, extack
);
1572 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1577 nh
= nexthop_find_by_id(net
, id
);
1581 err
= nh_fill_node(skb
, nh
, RTM_NEWNEXTHOP
, NETLINK_CB(in_skb
).portid
,
1584 WARN_ON(err
== -EMSGSIZE
);
1588 err
= rtnl_unicast(skb
, net
, NETLINK_CB(in_skb
).portid
);
1596 static bool nh_dump_filtered(struct nexthop
*nh
, int dev_idx
, int master_idx
,
1597 bool group_filter
, u8 family
)
1599 const struct net_device
*dev
;
1600 const struct nh_info
*nhi
;
1602 if (group_filter
&& !nh
->is_group
)
1605 if (!dev_idx
&& !master_idx
&& !family
)
1611 nhi
= rtnl_dereference(nh
->nh_info
);
1612 if (family
&& nhi
->family
!= family
)
1615 dev
= nhi
->fib_nhc
.nhc_dev
;
1616 if (dev_idx
&& (!dev
|| dev
->ifindex
!= dev_idx
))
1620 struct net_device
*master
;
1625 master
= netdev_master_upper_dev_get((struct net_device
*)dev
);
1626 if (!master
|| master
->ifindex
!= master_idx
)
1633 static int nh_valid_dump_req(const struct nlmsghdr
*nlh
, int *dev_idx
,
1634 int *master_idx
, bool *group_filter
,
1635 struct netlink_callback
*cb
)
1637 struct netlink_ext_ack
*extack
= cb
->extack
;
1638 struct nlattr
*tb
[NHA_MAX
+ 1];
1643 err
= nlmsg_parse(nlh
, sizeof(*nhm
), tb
, NHA_MAX
, rtm_nh_policy
,
1648 for (i
= 0; i
<= NHA_MAX
; ++i
) {
1654 idx
= nla_get_u32(tb
[i
]);
1655 if (idx
> INT_MAX
) {
1656 NL_SET_ERR_MSG(extack
, "Invalid device index");
1662 idx
= nla_get_u32(tb
[i
]);
1663 if (idx
> INT_MAX
) {
1664 NL_SET_ERR_MSG(extack
, "Invalid master device index");
1670 *group_filter
= true;
1673 NL_SET_ERR_MSG(extack
, "Unsupported attribute in dump request");
1678 nhm
= nlmsg_data(nlh
);
1679 if (nhm
->nh_protocol
|| nhm
->resvd
|| nhm
->nh_scope
|| nhm
->nh_flags
) {
1680 NL_SET_ERR_MSG(extack
, "Invalid values in header for nexthop dump request");
1688 static int rtm_dump_nexthop(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1690 struct nhmsg
*nhm
= nlmsg_data(cb
->nlh
);
1691 int dev_filter_idx
= 0, master_idx
= 0;
1692 struct net
*net
= sock_net(skb
->sk
);
1693 struct rb_root
*root
= &net
->nexthop
.rb_root
;
1694 bool group_filter
= false;
1695 struct rb_node
*node
;
1699 err
= nh_valid_dump_req(cb
->nlh
, &dev_filter_idx
, &master_idx
,
1704 s_idx
= cb
->args
[0];
1705 for (node
= rb_first(root
); node
; node
= rb_next(node
)) {
1711 nh
= rb_entry(node
, struct nexthop
, rb_node
);
1712 if (nh_dump_filtered(nh
, dev_filter_idx
, master_idx
,
1713 group_filter
, nhm
->nh_family
))
1716 err
= nh_fill_node(skb
, nh
, RTM_NEWNEXTHOP
,
1717 NETLINK_CB(cb
->skb
).portid
,
1718 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
);
1720 if (likely(skb
->len
))
1733 cb
->seq
= net
->nexthop
.seq
;
1734 nl_dump_check_consistent(cb
, nlmsg_hdr(skb
));
1739 static void nexthop_sync_mtu(struct net_device
*dev
, u32 orig_mtu
)
1741 unsigned int hash
= nh_dev_hashfn(dev
->ifindex
);
1742 struct net
*net
= dev_net(dev
);
1743 struct hlist_head
*head
= &net
->nexthop
.devhash
[hash
];
1744 struct hlist_node
*n
;
1745 struct nh_info
*nhi
;
1747 hlist_for_each_entry_safe(nhi
, n
, head
, dev_hash
) {
1748 if (nhi
->fib_nhc
.nhc_dev
== dev
) {
1749 if (nhi
->family
== AF_INET
)
1750 fib_nhc_update_mtu(&nhi
->fib_nhc
, dev
->mtu
,
1757 static int nh_netdev_event(struct notifier_block
*this,
1758 unsigned long event
, void *ptr
)
1760 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1761 struct netdev_notifier_info_ext
*info_ext
;
1765 case NETDEV_UNREGISTER
:
1766 nexthop_flush_dev(dev
);
1769 if (!(dev_get_flags(dev
) & (IFF_RUNNING
| IFF_LOWER_UP
)))
1770 nexthop_flush_dev(dev
);
1772 case NETDEV_CHANGEMTU
:
1774 nexthop_sync_mtu(dev
, info_ext
->ext
.mtu
);
1775 rt_cache_flush(dev_net(dev
));
1781 static struct notifier_block nh_netdev_notifier
= {
1782 .notifier_call
= nh_netdev_event
,
1785 static void __net_exit
nexthop_net_exit(struct net
*net
)
1788 flush_all_nexthops(net
);
1790 kfree(net
->nexthop
.devhash
);
1793 static int __net_init
nexthop_net_init(struct net
*net
)
1795 size_t sz
= sizeof(struct hlist_head
) * NH_DEV_HASHSIZE
;
1797 net
->nexthop
.rb_root
= RB_ROOT
;
1798 net
->nexthop
.devhash
= kzalloc(sz
, GFP_KERNEL
);
1799 if (!net
->nexthop
.devhash
)
1805 static struct pernet_operations nexthop_net_ops
= {
1806 .init
= nexthop_net_init
,
1807 .exit
= nexthop_net_exit
,
1810 static int __init
nexthop_init(void)
1812 register_pernet_subsys(&nexthop_net_ops
);
1814 register_netdevice_notifier(&nh_netdev_notifier
);
1816 rtnl_register(PF_UNSPEC
, RTM_NEWNEXTHOP
, rtm_new_nexthop
, NULL
, 0);
1817 rtnl_register(PF_UNSPEC
, RTM_DELNEXTHOP
, rtm_del_nexthop
, NULL
, 0);
1818 rtnl_register(PF_UNSPEC
, RTM_GETNEXTHOP
, rtm_get_nexthop
,
1819 rtm_dump_nexthop
, 0);
1821 rtnl_register(PF_INET
, RTM_NEWNEXTHOP
, rtm_new_nexthop
, NULL
, 0);
1822 rtnl_register(PF_INET
, RTM_GETNEXTHOP
, NULL
, rtm_dump_nexthop
, 0);
1824 rtnl_register(PF_INET6
, RTM_NEWNEXTHOP
, rtm_new_nexthop
, NULL
, 0);
1825 rtnl_register(PF_INET6
, RTM_GETNEXTHOP
, NULL
, rtm_dump_nexthop
, 0);
1829 subsys_initcall(nexthop_init
);