1 // SPDX-License-Identifier: GPL-2.0
2 /* Generic nexthop implementation
4 * Copyright (c) 2017-19 Cumulus Networks
5 * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
8 #include <linux/nexthop.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/slab.h>
12 #include <net/ipv6_stubs.h>
13 #include <net/lwtunnel.h>
14 #include <net/ndisc.h>
15 #include <net/nexthop.h>
16 #include <net/route.h>
19 static void remove_nexthop(struct net
*net
, struct nexthop
*nh
,
20 struct nl_info
*nlinfo
);
22 #define NH_DEV_HASHBITS 8
23 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
25 static const struct nla_policy rtm_nh_policy
[NHA_MAX
+ 1] = {
26 [NHA_ID
] = { .type
= NLA_U32
},
27 [NHA_GROUP
] = { .type
= NLA_BINARY
},
28 [NHA_GROUP_TYPE
] = { .type
= NLA_U16
},
29 [NHA_BLACKHOLE
] = { .type
= NLA_FLAG
},
30 [NHA_OIF
] = { .type
= NLA_U32
},
31 [NHA_GATEWAY
] = { .type
= NLA_BINARY
},
32 [NHA_ENCAP_TYPE
] = { .type
= NLA_U16
},
33 [NHA_ENCAP
] = { .type
= NLA_NESTED
},
34 [NHA_GROUPS
] = { .type
= NLA_FLAG
},
35 [NHA_MASTER
] = { .type
= NLA_U32
},
38 static unsigned int nh_dev_hashfn(unsigned int val
)
40 unsigned int mask
= NH_DEV_HASHSIZE
- 1;
43 (val
>> NH_DEV_HASHBITS
) ^
44 (val
>> (NH_DEV_HASHBITS
* 2))) & mask
;
47 static void nexthop_devhash_add(struct net
*net
, struct nh_info
*nhi
)
49 struct net_device
*dev
= nhi
->fib_nhc
.nhc_dev
;
50 struct hlist_head
*head
;
55 hash
= nh_dev_hashfn(dev
->ifindex
);
56 head
= &net
->nexthop
.devhash
[hash
];
57 hlist_add_head(&nhi
->dev_hash
, head
);
60 static void nexthop_free_mpath(struct nexthop
*nh
)
65 nhg
= rcu_dereference_raw(nh
->nh_grp
);
66 for (i
= 0; i
< nhg
->num_nh
; ++i
)
67 WARN_ON(nhg
->nh_entries
[i
].nh
);
72 static void nexthop_free_single(struct nexthop
*nh
)
76 nhi
= rcu_dereference_raw(nh
->nh_info
);
77 switch (nhi
->family
) {
79 fib_nh_release(nh
->net
, &nhi
->fib_nh
);
82 ipv6_stub
->fib6_nh_release(&nhi
->fib6_nh
);
88 void nexthop_free_rcu(struct rcu_head
*head
)
90 struct nexthop
*nh
= container_of(head
, struct nexthop
, rcu
);
93 nexthop_free_mpath(nh
);
95 nexthop_free_single(nh
);
99 EXPORT_SYMBOL_GPL(nexthop_free_rcu
);
101 static struct nexthop
*nexthop_alloc(void)
105 nh
= kzalloc(sizeof(struct nexthop
), GFP_KERNEL
);
107 INIT_LIST_HEAD(&nh
->fi_list
);
108 INIT_LIST_HEAD(&nh
->f6i_list
);
109 INIT_LIST_HEAD(&nh
->grp_list
);
114 static struct nh_group
*nexthop_grp_alloc(u16 num_nh
)
116 size_t sz
= offsetof(struct nexthop
, nh_grp
)
117 + sizeof(struct nh_group
)
118 + sizeof(struct nh_grp_entry
) * num_nh
;
119 struct nh_group
*nhg
;
121 nhg
= kzalloc(sz
, GFP_KERNEL
);
123 nhg
->num_nh
= num_nh
;
128 static void nh_base_seq_inc(struct net
*net
)
130 while (++net
->nexthop
.seq
== 0)
134 /* no reference taken; rcu lock or rtnl must be held */
135 struct nexthop
*nexthop_find_by_id(struct net
*net
, u32 id
)
137 struct rb_node
**pp
, *parent
= NULL
, *next
;
139 pp
= &net
->nexthop
.rb_root
.rb_node
;
143 next
= rcu_dereference_raw(*pp
);
148 nh
= rb_entry(parent
, struct nexthop
, rb_node
);
151 else if (id
> nh
->id
)
152 pp
= &next
->rb_right
;
158 EXPORT_SYMBOL_GPL(nexthop_find_by_id
);
160 /* used for auto id allocation; called with rtnl held */
161 static u32
nh_find_unused_id(struct net
*net
)
163 u32 id_start
= net
->nexthop
.last_id_allocated
;
166 net
->nexthop
.last_id_allocated
++;
167 if (net
->nexthop
.last_id_allocated
== id_start
)
170 if (!nexthop_find_by_id(net
, net
->nexthop
.last_id_allocated
))
171 return net
->nexthop
.last_id_allocated
;
176 static int nla_put_nh_group(struct sk_buff
*skb
, struct nh_group
*nhg
)
178 struct nexthop_grp
*p
;
179 size_t len
= nhg
->num_nh
* sizeof(*p
);
185 group_type
= NEXTHOP_GRP_TYPE_MPATH
;
187 if (nla_put_u16(skb
, NHA_GROUP_TYPE
, group_type
))
188 goto nla_put_failure
;
190 nla
= nla_reserve(skb
, NHA_GROUP
, len
);
192 goto nla_put_failure
;
195 for (i
= 0; i
< nhg
->num_nh
; ++i
) {
196 p
->id
= nhg
->nh_entries
[i
].nh
->id
;
197 p
->weight
= nhg
->nh_entries
[i
].weight
- 1;
207 static int nh_fill_node(struct sk_buff
*skb
, struct nexthop
*nh
,
208 int event
, u32 portid
, u32 seq
, unsigned int nlflags
)
210 struct fib6_nh
*fib6_nh
;
211 struct fib_nh
*fib_nh
;
212 struct nlmsghdr
*nlh
;
216 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*nhm
), nlflags
);
220 nhm
= nlmsg_data(nlh
);
221 nhm
->nh_family
= AF_UNSPEC
;
222 nhm
->nh_flags
= nh
->nh_flags
;
223 nhm
->nh_protocol
= nh
->protocol
;
227 if (nla_put_u32(skb
, NHA_ID
, nh
->id
))
228 goto nla_put_failure
;
231 struct nh_group
*nhg
= rtnl_dereference(nh
->nh_grp
);
233 if (nla_put_nh_group(skb
, nhg
))
234 goto nla_put_failure
;
238 nhi
= rtnl_dereference(nh
->nh_info
);
239 nhm
->nh_family
= nhi
->family
;
240 if (nhi
->reject_nh
) {
241 if (nla_put_flag(skb
, NHA_BLACKHOLE
))
242 goto nla_put_failure
;
245 const struct net_device
*dev
;
247 dev
= nhi
->fib_nhc
.nhc_dev
;
248 if (dev
&& nla_put_u32(skb
, NHA_OIF
, dev
->ifindex
))
249 goto nla_put_failure
;
252 nhm
->nh_scope
= nhi
->fib_nhc
.nhc_scope
;
253 switch (nhi
->family
) {
255 fib_nh
= &nhi
->fib_nh
;
256 if (fib_nh
->fib_nh_gw_family
&&
257 nla_put_u32(skb
, NHA_GATEWAY
, fib_nh
->fib_nh_gw4
))
258 goto nla_put_failure
;
262 fib6_nh
= &nhi
->fib6_nh
;
263 if (fib6_nh
->fib_nh_gw_family
&&
264 nla_put_in6_addr(skb
, NHA_GATEWAY
, &fib6_nh
->fib_nh_gw6
))
265 goto nla_put_failure
;
269 if (nhi
->fib_nhc
.nhc_lwtstate
&&
270 lwtunnel_fill_encap(skb
, nhi
->fib_nhc
.nhc_lwtstate
,
271 NHA_ENCAP
, NHA_ENCAP_TYPE
) < 0)
272 goto nla_put_failure
;
282 static size_t nh_nlmsg_size_grp(struct nexthop
*nh
)
284 struct nh_group
*nhg
= rtnl_dereference(nh
->nh_grp
);
285 size_t sz
= sizeof(struct nexthop_grp
) * nhg
->num_nh
;
287 return nla_total_size(sz
) +
288 nla_total_size(2); /* NHA_GROUP_TYPE */
291 static size_t nh_nlmsg_size_single(struct nexthop
*nh
)
293 struct nh_info
*nhi
= rtnl_dereference(nh
->nh_info
);
296 /* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
297 * are mutually exclusive
299 sz
= nla_total_size(4); /* NHA_OIF */
301 switch (nhi
->family
) {
303 if (nhi
->fib_nh
.fib_nh_gw_family
)
304 sz
+= nla_total_size(4); /* NHA_GATEWAY */
309 if (nhi
->fib6_nh
.fib_nh_gw_family
)
310 sz
+= nla_total_size(sizeof(const struct in6_addr
));
314 if (nhi
->fib_nhc
.nhc_lwtstate
) {
315 sz
+= lwtunnel_get_encap_size(nhi
->fib_nhc
.nhc_lwtstate
);
316 sz
+= nla_total_size(2); /* NHA_ENCAP_TYPE */
322 static size_t nh_nlmsg_size(struct nexthop
*nh
)
324 size_t sz
= nla_total_size(4); /* NHA_ID */
327 sz
+= nh_nlmsg_size_grp(nh
);
329 sz
+= nh_nlmsg_size_single(nh
);
334 static void nexthop_notify(int event
, struct nexthop
*nh
, struct nl_info
*info
)
336 unsigned int nlflags
= info
->nlh
? info
->nlh
->nlmsg_flags
: 0;
337 u32 seq
= info
->nlh
? info
->nlh
->nlmsg_seq
: 0;
341 skb
= nlmsg_new(nh_nlmsg_size(nh
), gfp_any());
345 err
= nh_fill_node(skb
, nh
, event
, info
->portid
, seq
, nlflags
);
347 /* -EMSGSIZE implies BUG in nh_nlmsg_size() */
348 WARN_ON(err
== -EMSGSIZE
);
353 rtnl_notify(skb
, info
->nl_net
, info
->portid
, RTNLGRP_NEXTHOP
,
354 info
->nlh
, gfp_any());
358 rtnl_set_sk_err(info
->nl_net
, RTNLGRP_NEXTHOP
, err
);
361 static bool valid_group_nh(struct nexthop
*nh
, unsigned int npaths
,
362 struct netlink_ext_ack
*extack
)
365 struct nh_group
*nhg
= rtnl_dereference(nh
->nh_grp
);
367 /* nested multipath (group within a group) is not
371 NL_SET_ERR_MSG(extack
,
372 "Multipath group can not be a nexthop within a group");
376 struct nh_info
*nhi
= rtnl_dereference(nh
->nh_info
);
378 if (nhi
->reject_nh
&& npaths
> 1) {
379 NL_SET_ERR_MSG(extack
,
380 "Blackhole nexthop can not be used in a group with more than 1 path");
388 static int nh_check_attr_group(struct net
*net
, struct nlattr
*tb
[],
389 struct netlink_ext_ack
*extack
)
391 unsigned int len
= nla_len(tb
[NHA_GROUP
]);
392 struct nexthop_grp
*nhg
;
395 if (len
& (sizeof(struct nexthop_grp
) - 1)) {
396 NL_SET_ERR_MSG(extack
,
397 "Invalid length for nexthop group attribute");
401 /* convert len to number of nexthop ids */
404 nhg
= nla_data(tb
[NHA_GROUP
]);
405 for (i
= 0; i
< len
; ++i
) {
406 if (nhg
[i
].resvd1
|| nhg
[i
].resvd2
) {
407 NL_SET_ERR_MSG(extack
, "Reserved fields in nexthop_grp must be 0");
410 if (nhg
[i
].weight
> 254) {
411 NL_SET_ERR_MSG(extack
, "Invalid value for weight");
414 for (j
= i
+ 1; j
< len
; ++j
) {
415 if (nhg
[i
].id
== nhg
[j
].id
) {
416 NL_SET_ERR_MSG(extack
, "Nexthop id can not be used twice in a group");
422 nhg
= nla_data(tb
[NHA_GROUP
]);
423 for (i
= 0; i
< len
; ++i
) {
426 nh
= nexthop_find_by_id(net
, nhg
[i
].id
);
428 NL_SET_ERR_MSG(extack
, "Invalid nexthop id");
431 if (!valid_group_nh(nh
, len
, extack
))
434 for (i
= NHA_GROUP
+ 1; i
< __NHA_MAX
; ++i
) {
438 NL_SET_ERR_MSG(extack
,
439 "No other attributes can be set in nexthop groups");
446 static bool ipv6_good_nh(const struct fib6_nh
*nh
)
448 int state
= NUD_REACHABLE
;
453 n
= __ipv6_neigh_lookup_noref_stub(nh
->fib_nh_dev
, &nh
->fib_nh_gw6
);
455 state
= n
->nud_state
;
457 rcu_read_unlock_bh();
459 return !!(state
& NUD_VALID
);
462 static bool ipv4_good_nh(const struct fib_nh
*nh
)
464 int state
= NUD_REACHABLE
;
469 n
= __ipv4_neigh_lookup_noref(nh
->fib_nh_dev
,
470 (__force u32
)nh
->fib_nh_gw4
);
472 state
= n
->nud_state
;
474 rcu_read_unlock_bh();
476 return !!(state
& NUD_VALID
);
479 struct nexthop
*nexthop_select_path(struct nexthop
*nh
, int hash
)
481 struct nexthop
*rc
= NULL
;
482 struct nh_group
*nhg
;
488 nhg
= rcu_dereference(nh
->nh_grp
);
489 for (i
= 0; i
< nhg
->num_nh
; ++i
) {
490 struct nh_grp_entry
*nhge
= &nhg
->nh_entries
[i
];
493 if (hash
> atomic_read(&nhge
->upper_bound
))
496 /* nexthops always check if it is good and does
497 * not rely on a sysctl for this behavior
499 nhi
= rcu_dereference(nhge
->nh
->nh_info
);
500 switch (nhi
->family
) {
502 if (ipv4_good_nh(&nhi
->fib_nh
))
506 if (ipv6_good_nh(&nhi
->fib6_nh
))
517 EXPORT_SYMBOL_GPL(nexthop_select_path
);
519 int nexthop_for_each_fib6_nh(struct nexthop
*nh
,
520 int (*cb
)(struct fib6_nh
*nh
, void *arg
),
527 struct nh_group
*nhg
;
530 nhg
= rcu_dereference_rtnl(nh
->nh_grp
);
531 for (i
= 0; i
< nhg
->num_nh
; i
++) {
532 struct nh_grp_entry
*nhge
= &nhg
->nh_entries
[i
];
534 nhi
= rcu_dereference_rtnl(nhge
->nh
->nh_info
);
535 err
= cb(&nhi
->fib6_nh
, arg
);
540 nhi
= rcu_dereference_rtnl(nh
->nh_info
);
541 err
= cb(&nhi
->fib6_nh
, arg
);
548 EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh
);
550 static int check_src_addr(const struct in6_addr
*saddr
,
551 struct netlink_ext_ack
*extack
)
553 if (!ipv6_addr_any(saddr
)) {
554 NL_SET_ERR_MSG(extack
, "IPv6 routes using source address can not use nexthop objects");
560 int fib6_check_nexthop(struct nexthop
*nh
, struct fib6_config
*cfg
,
561 struct netlink_ext_ack
*extack
)
565 /* fib6_src is unique to a fib6_info and limits the ability to cache
566 * routes in fib6_nh within a nexthop that is potentially shared
567 * across multiple fib entries. If the config wants to use source
568 * routing it can not use nexthop objects. mlxsw also does not allow
569 * fib6_src on routes.
571 if (cfg
&& check_src_addr(&cfg
->fc_src
, extack
) < 0)
575 struct nh_group
*nhg
;
577 nhg
= rtnl_dereference(nh
->nh_grp
);
581 nhi
= rtnl_dereference(nh
->nh_info
);
582 if (nhi
->family
== AF_INET
)
588 NL_SET_ERR_MSG(extack
, "IPv6 routes can not use an IPv4 nexthop");
591 EXPORT_SYMBOL_GPL(fib6_check_nexthop
);
593 /* if existing nexthop has ipv6 routes linked to it, need
594 * to verify this new spec works with ipv6
596 static int fib6_check_nh_list(struct nexthop
*old
, struct nexthop
*new,
597 struct netlink_ext_ack
*extack
)
599 struct fib6_info
*f6i
;
601 if (list_empty(&old
->f6i_list
))
604 list_for_each_entry(f6i
, &old
->f6i_list
, nh_list
) {
605 if (check_src_addr(&f6i
->fib6_src
.addr
, extack
) < 0)
609 return fib6_check_nexthop(new, NULL
, extack
);
612 static int nexthop_check_scope(struct nexthop
*nh
, u8 scope
,
613 struct netlink_ext_ack
*extack
)
617 nhi
= rtnl_dereference(nh
->nh_info
);
618 if (scope
== RT_SCOPE_HOST
&& nhi
->fib_nhc
.nhc_gw_family
) {
619 NL_SET_ERR_MSG(extack
,
620 "Route with host scope can not have a gateway");
624 if (nhi
->fib_nhc
.nhc_flags
& RTNH_F_ONLINK
&& scope
>= RT_SCOPE_LINK
) {
625 NL_SET_ERR_MSG(extack
, "Scope mismatch with nexthop");
632 /* Invoked by fib add code to verify nexthop by id is ok with
633 * config for prefix; parts of fib_check_nh not done when nexthop
636 int fib_check_nexthop(struct nexthop
*nh
, u8 scope
,
637 struct netlink_ext_ack
*extack
)
642 struct nh_group
*nhg
;
644 if (scope
== RT_SCOPE_HOST
) {
645 NL_SET_ERR_MSG(extack
, "Route with host scope can not have multiple nexthops");
650 nhg
= rtnl_dereference(nh
->nh_grp
);
651 /* all nexthops in a group have the same scope */
652 err
= nexthop_check_scope(nhg
->nh_entries
[0].nh
, scope
, extack
);
654 err
= nexthop_check_scope(nh
, scope
, extack
);
660 static int fib_check_nh_list(struct nexthop
*old
, struct nexthop
*new,
661 struct netlink_ext_ack
*extack
)
665 list_for_each_entry(fi
, &old
->fi_list
, nh_list
) {
668 err
= fib_check_nexthop(new, fi
->fib_scope
, extack
);
675 static void nh_group_rebalance(struct nh_group
*nhg
)
681 for (i
= 0; i
< nhg
->num_nh
; ++i
)
682 total
+= nhg
->nh_entries
[i
].weight
;
684 for (i
= 0; i
< nhg
->num_nh
; ++i
) {
685 struct nh_grp_entry
*nhge
= &nhg
->nh_entries
[i
];
689 upper_bound
= DIV_ROUND_CLOSEST_ULL((u64
)w
<< 31, total
) - 1;
690 atomic_set(&nhge
->upper_bound
, upper_bound
);
694 static void remove_nh_grp_entry(struct nh_grp_entry
*nhge
,
695 struct nh_group
*nhg
,
696 struct nl_info
*nlinfo
)
698 struct nexthop
*nh
= nhge
->nh
;
699 struct nh_grp_entry
*nhges
;
705 nhges
= nhg
->nh_entries
;
706 for (i
= 0; i
< nhg
->num_nh
; ++i
) {
708 nhges
[i
-1].nh
= nhges
[i
].nh
;
709 nhges
[i
-1].weight
= nhges
[i
].weight
;
710 list_del(&nhges
[i
].nh_list
);
711 list_add(&nhges
[i
-1].nh_list
, &nhges
[i
-1].nh
->grp_list
);
712 } else if (nhg
->nh_entries
[i
].nh
== nh
) {
721 nhg
->nh_entries
[nhg
->num_nh
].nh
= NULL
;
723 nh_group_rebalance(nhg
);
728 nexthop_notify(RTM_NEWNEXTHOP
, nhge
->nh_parent
, nlinfo
);
731 static void remove_nexthop_from_groups(struct net
*net
, struct nexthop
*nh
,
732 struct nl_info
*nlinfo
)
734 struct nh_grp_entry
*nhge
, *tmp
;
736 list_for_each_entry_safe(nhge
, tmp
, &nh
->grp_list
, nh_list
) {
737 struct nh_group
*nhg
;
739 list_del(&nhge
->nh_list
);
740 nhg
= rtnl_dereference(nhge
->nh_parent
->nh_grp
);
741 remove_nh_grp_entry(nhge
, nhg
, nlinfo
);
743 /* if this group has no more entries then remove it */
745 remove_nexthop(net
, nhge
->nh_parent
, nlinfo
);
749 static void remove_nexthop_group(struct nexthop
*nh
, struct nl_info
*nlinfo
)
751 struct nh_group
*nhg
= rcu_dereference_rtnl(nh
->nh_grp
);
752 int i
, num_nh
= nhg
->num_nh
;
754 for (i
= 0; i
< num_nh
; ++i
) {
755 struct nh_grp_entry
*nhge
= &nhg
->nh_entries
[i
];
757 if (WARN_ON(!nhge
->nh
))
760 list_del(&nhge
->nh_list
);
761 nexthop_put(nhge
->nh
);
767 /* not called for nexthop replace */
768 static void __remove_nexthop_fib(struct net
*net
, struct nexthop
*nh
)
770 struct fib6_info
*f6i
, *tmp
;
771 bool do_flush
= false;
774 list_for_each_entry(fi
, &nh
->fi_list
, nh_list
) {
775 fi
->fib_flags
|= RTNH_F_DEAD
;
781 /* ip6_del_rt removes the entry from this list hence the _safe */
782 list_for_each_entry_safe(f6i
, tmp
, &nh
->f6i_list
, nh_list
) {
783 /* __ip6_del_rt does a release, so do a hold here */
785 ipv6_stub
->ip6_del_rt(net
, f6i
);
789 static void __remove_nexthop(struct net
*net
, struct nexthop
*nh
,
790 struct nl_info
*nlinfo
)
792 __remove_nexthop_fib(net
, nh
);
795 remove_nexthop_group(nh
, nlinfo
);
799 nhi
= rtnl_dereference(nh
->nh_info
);
800 if (nhi
->fib_nhc
.nhc_dev
)
801 hlist_del(&nhi
->dev_hash
);
803 remove_nexthop_from_groups(net
, nh
, nlinfo
);
807 static void remove_nexthop(struct net
*net
, struct nexthop
*nh
,
808 struct nl_info
*nlinfo
)
810 /* remove from the tree */
811 rb_erase(&nh
->rb_node
, &net
->nexthop
.rb_root
);
814 nexthop_notify(RTM_DELNEXTHOP
, nh
, nlinfo
);
816 __remove_nexthop(net
, nh
, nlinfo
);
817 nh_base_seq_inc(net
);
822 /* if any FIB entries reference this nexthop, any dst entries
823 * need to be regenerated
825 static void nh_rt_cache_flush(struct net
*net
, struct nexthop
*nh
)
827 struct fib6_info
*f6i
;
829 if (!list_empty(&nh
->fi_list
))
832 list_for_each_entry(f6i
, &nh
->f6i_list
, nh_list
)
833 ipv6_stub
->fib6_update_sernum(net
, f6i
);
836 static int replace_nexthop_grp(struct net
*net
, struct nexthop
*old
,
838 struct netlink_ext_ack
*extack
)
840 struct nh_group
*oldg
, *newg
;
843 if (!new->is_group
) {
844 NL_SET_ERR_MSG(extack
, "Can not replace a nexthop group with a nexthop.");
848 oldg
= rtnl_dereference(old
->nh_grp
);
849 newg
= rtnl_dereference(new->nh_grp
);
851 /* update parents - used by nexthop code for cleanup */
852 for (i
= 0; i
< newg
->num_nh
; i
++)
853 newg
->nh_entries
[i
].nh_parent
= old
;
855 rcu_assign_pointer(old
->nh_grp
, newg
);
857 for (i
= 0; i
< oldg
->num_nh
; i
++)
858 oldg
->nh_entries
[i
].nh_parent
= new;
860 rcu_assign_pointer(new->nh_grp
, oldg
);
865 static int replace_nexthop_single(struct net
*net
, struct nexthop
*old
,
867 struct netlink_ext_ack
*extack
)
869 struct nh_info
*oldi
, *newi
;
872 NL_SET_ERR_MSG(extack
, "Can not replace a nexthop with a nexthop group.");
876 oldi
= rtnl_dereference(old
->nh_info
);
877 newi
= rtnl_dereference(new->nh_info
);
879 newi
->nh_parent
= old
;
880 oldi
->nh_parent
= new;
882 old
->protocol
= new->protocol
;
883 old
->nh_flags
= new->nh_flags
;
885 rcu_assign_pointer(old
->nh_info
, newi
);
886 rcu_assign_pointer(new->nh_info
, oldi
);
891 static void __nexthop_replace_notify(struct net
*net
, struct nexthop
*nh
,
892 struct nl_info
*info
)
894 struct fib6_info
*f6i
;
896 if (!list_empty(&nh
->fi_list
)) {
899 /* expectation is a few fib_info per nexthop and then
900 * a lot of routes per fib_info. So mark the fib_info
901 * and then walk the fib tables once
903 list_for_each_entry(fi
, &nh
->fi_list
, nh_list
)
904 fi
->nh_updated
= true;
906 fib_info_notify_update(net
, info
);
908 list_for_each_entry(fi
, &nh
->fi_list
, nh_list
)
909 fi
->nh_updated
= false;
912 list_for_each_entry(f6i
, &nh
->f6i_list
, nh_list
)
913 ipv6_stub
->fib6_rt_update(net
, f6i
, info
);
916 /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
917 * linked to this nexthop and for all groups that the nexthop
920 static void nexthop_replace_notify(struct net
*net
, struct nexthop
*nh
,
921 struct nl_info
*info
)
923 struct nh_grp_entry
*nhge
;
925 __nexthop_replace_notify(net
, nh
, info
);
927 list_for_each_entry(nhge
, &nh
->grp_list
, nh_list
)
928 __nexthop_replace_notify(net
, nhge
->nh_parent
, info
);
931 static int replace_nexthop(struct net
*net
, struct nexthop
*old
,
932 struct nexthop
*new, struct netlink_ext_ack
*extack
)
934 bool new_is_reject
= false;
935 struct nh_grp_entry
*nhge
;
938 /* check that existing FIB entries are ok with the
939 * new nexthop definition
941 err
= fib_check_nh_list(old
, new, extack
);
945 err
= fib6_check_nh_list(old
, new, extack
);
949 if (!new->is_group
) {
950 struct nh_info
*nhi
= rtnl_dereference(new->nh_info
);
952 new_is_reject
= nhi
->reject_nh
;
955 list_for_each_entry(nhge
, &old
->grp_list
, nh_list
) {
956 /* if new nexthop is a blackhole, any groups using this
957 * nexthop cannot have more than 1 path
960 nexthop_num_path(nhge
->nh_parent
) > 1) {
961 NL_SET_ERR_MSG(extack
, "Blackhole nexthop can not be a member of a group with more than one path");
965 err
= fib_check_nh_list(nhge
->nh_parent
, new, extack
);
969 err
= fib6_check_nh_list(nhge
->nh_parent
, new, extack
);
975 err
= replace_nexthop_grp(net
, old
, new, extack
);
977 err
= replace_nexthop_single(net
, old
, new, extack
);
980 nh_rt_cache_flush(net
, old
);
982 __remove_nexthop(net
, new, NULL
);
989 /* called with rtnl_lock held */
990 static int insert_nexthop(struct net
*net
, struct nexthop
*new_nh
,
991 struct nh_config
*cfg
, struct netlink_ext_ack
*extack
)
993 struct rb_node
**pp
, *parent
= NULL
, *next
;
994 struct rb_root
*root
= &net
->nexthop
.rb_root
;
995 bool replace
= !!(cfg
->nlflags
& NLM_F_REPLACE
);
996 bool create
= !!(cfg
->nlflags
& NLM_F_CREATE
);
997 u32 new_id
= new_nh
->id
;
998 int replace_notify
= 0;
1001 pp
= &root
->rb_node
;
1005 next
= rtnl_dereference(*pp
);
1011 nh
= rb_entry(parent
, struct nexthop
, rb_node
);
1012 if (new_id
< nh
->id
) {
1013 pp
= &next
->rb_left
;
1014 } else if (new_id
> nh
->id
) {
1015 pp
= &next
->rb_right
;
1016 } else if (replace
) {
1017 rc
= replace_nexthop(net
, nh
, new_nh
, extack
);
1019 new_nh
= nh
; /* send notification with old nh */
1024 /* id already exists and not a replace */
1029 if (replace
&& !create
) {
1030 NL_SET_ERR_MSG(extack
, "Replace specified without create and no entry exists");
1035 rb_link_node_rcu(&new_nh
->rb_node
, parent
, pp
);
1036 rb_insert_color(&new_nh
->rb_node
, root
);
1040 nh_base_seq_inc(net
);
1041 nexthop_notify(RTM_NEWNEXTHOP
, new_nh
, &cfg
->nlinfo
);
1043 nexthop_replace_notify(net
, new_nh
, &cfg
->nlinfo
);
1050 /* remove all nexthops tied to a device being deleted */
1051 static void nexthop_flush_dev(struct net_device
*dev
)
1053 unsigned int hash
= nh_dev_hashfn(dev
->ifindex
);
1054 struct net
*net
= dev_net(dev
);
1055 struct hlist_head
*head
= &net
->nexthop
.devhash
[hash
];
1056 struct hlist_node
*n
;
1057 struct nh_info
*nhi
;
1059 hlist_for_each_entry_safe(nhi
, n
, head
, dev_hash
) {
1060 if (nhi
->fib_nhc
.nhc_dev
!= dev
)
1063 remove_nexthop(net
, nhi
->nh_parent
, NULL
);
1067 /* rtnl; called when net namespace is deleted */
1068 static void flush_all_nexthops(struct net
*net
)
1070 struct rb_root
*root
= &net
->nexthop
.rb_root
;
1071 struct rb_node
*node
;
1074 while ((node
= rb_first(root
))) {
1075 nh
= rb_entry(node
, struct nexthop
, rb_node
);
1076 remove_nexthop(net
, nh
, NULL
);
1081 static struct nexthop
*nexthop_create_group(struct net
*net
,
1082 struct nh_config
*cfg
)
1084 struct nlattr
*grps_attr
= cfg
->nh_grp
;
1085 struct nexthop_grp
*entry
= nla_data(grps_attr
);
1086 struct nh_group
*nhg
;
1090 nh
= nexthop_alloc();
1092 return ERR_PTR(-ENOMEM
);
1096 nhg
= nexthop_grp_alloc(nla_len(grps_attr
) / sizeof(*entry
));
1099 return ERR_PTR(-ENOMEM
);
1102 for (i
= 0; i
< nhg
->num_nh
; ++i
) {
1103 struct nexthop
*nhe
;
1104 struct nh_info
*nhi
;
1106 nhe
= nexthop_find_by_id(net
, entry
[i
].id
);
1107 if (!nexthop_get(nhe
))
1110 nhi
= rtnl_dereference(nhe
->nh_info
);
1111 if (nhi
->family
== AF_INET
)
1114 nhg
->nh_entries
[i
].nh
= nhe
;
1115 nhg
->nh_entries
[i
].weight
= entry
[i
].weight
+ 1;
1116 list_add(&nhg
->nh_entries
[i
].nh_list
, &nhe
->grp_list
);
1117 nhg
->nh_entries
[i
].nh_parent
= nh
;
1120 if (cfg
->nh_grp_type
== NEXTHOP_GRP_TYPE_MPATH
) {
1122 nh_group_rebalance(nhg
);
1125 rcu_assign_pointer(nh
->nh_grp
, nhg
);
1131 nexthop_put(nhg
->nh_entries
[i
].nh
);
1136 return ERR_PTR(-ENOENT
);
1139 static int nh_create_ipv4(struct net
*net
, struct nexthop
*nh
,
1140 struct nh_info
*nhi
, struct nh_config
*cfg
,
1141 struct netlink_ext_ack
*extack
)
1143 struct fib_nh
*fib_nh
= &nhi
->fib_nh
;
1144 struct fib_config fib_cfg
= {
1145 .fc_oif
= cfg
->nh_ifindex
,
1146 .fc_gw4
= cfg
->gw
.ipv4
,
1147 .fc_gw_family
= cfg
->gw
.ipv4
? AF_INET
: 0,
1148 .fc_flags
= cfg
->nh_flags
,
1149 .fc_encap
= cfg
->nh_encap
,
1150 .fc_encap_type
= cfg
->nh_encap_type
,
1152 u32 tb_id
= l3mdev_fib_table(cfg
->dev
);
1155 err
= fib_nh_init(net
, fib_nh
, &fib_cfg
, 1, extack
);
1157 fib_nh_release(net
, fib_nh
);
1161 /* sets nh_dev if successful */
1162 err
= fib_check_nh(net
, fib_nh
, tb_id
, 0, extack
);
1164 nh
->nh_flags
= fib_nh
->fib_nh_flags
;
1165 fib_info_update_nhc_saddr(net
, &fib_nh
->nh_common
,
1166 fib_nh
->fib_nh_scope
);
1168 fib_nh_release(net
, fib_nh
);
1174 static int nh_create_ipv6(struct net
*net
, struct nexthop
*nh
,
1175 struct nh_info
*nhi
, struct nh_config
*cfg
,
1176 struct netlink_ext_ack
*extack
)
1178 struct fib6_nh
*fib6_nh
= &nhi
->fib6_nh
;
1179 struct fib6_config fib6_cfg
= {
1180 .fc_table
= l3mdev_fib_table(cfg
->dev
),
1181 .fc_ifindex
= cfg
->nh_ifindex
,
1182 .fc_gateway
= cfg
->gw
.ipv6
,
1183 .fc_flags
= cfg
->nh_flags
,
1184 .fc_encap
= cfg
->nh_encap
,
1185 .fc_encap_type
= cfg
->nh_encap_type
,
1189 if (!ipv6_addr_any(&cfg
->gw
.ipv6
))
1190 fib6_cfg
.fc_flags
|= RTF_GATEWAY
;
1192 /* sets nh_dev if successful */
1193 err
= ipv6_stub
->fib6_nh_init(net
, fib6_nh
, &fib6_cfg
, GFP_KERNEL
,
1196 ipv6_stub
->fib6_nh_release(fib6_nh
);
1198 nh
->nh_flags
= fib6_nh
->fib_nh_flags
;
1203 static struct nexthop
*nexthop_create(struct net
*net
, struct nh_config
*cfg
,
1204 struct netlink_ext_ack
*extack
)
1206 struct nh_info
*nhi
;
1210 nh
= nexthop_alloc();
1212 return ERR_PTR(-ENOMEM
);
1214 nhi
= kzalloc(sizeof(*nhi
), GFP_KERNEL
);
1217 return ERR_PTR(-ENOMEM
);
1220 nh
->nh_flags
= cfg
->nh_flags
;
1223 nhi
->nh_parent
= nh
;
1224 nhi
->family
= cfg
->nh_family
;
1225 nhi
->fib_nhc
.nhc_scope
= RT_SCOPE_LINK
;
1227 if (cfg
->nh_blackhole
) {
1229 cfg
->nh_ifindex
= net
->loopback_dev
->ifindex
;
1232 switch (cfg
->nh_family
) {
1234 err
= nh_create_ipv4(net
, nh
, nhi
, cfg
, extack
);
1237 err
= nh_create_ipv6(net
, nh
, nhi
, cfg
, extack
);
1244 return ERR_PTR(err
);
1247 /* add the entry to the device based hash */
1248 nexthop_devhash_add(net
, nhi
);
1250 rcu_assign_pointer(nh
->nh_info
, nhi
);
1255 /* called with rtnl lock held */
1256 static struct nexthop
*nexthop_add(struct net
*net
, struct nh_config
*cfg
,
1257 struct netlink_ext_ack
*extack
)
1262 if (cfg
->nlflags
& NLM_F_REPLACE
&& !cfg
->nh_id
) {
1263 NL_SET_ERR_MSG(extack
, "Replace requires nexthop id");
1264 return ERR_PTR(-EINVAL
);
1268 cfg
->nh_id
= nh_find_unused_id(net
);
1270 NL_SET_ERR_MSG(extack
, "No unused id");
1271 return ERR_PTR(-EINVAL
);
1276 nh
= nexthop_create_group(net
, cfg
);
1278 nh
= nexthop_create(net
, cfg
, extack
);
1283 refcount_set(&nh
->refcnt
, 1);
1284 nh
->id
= cfg
->nh_id
;
1285 nh
->protocol
= cfg
->nh_protocol
;
1288 err
= insert_nexthop(net
, nh
, cfg
, extack
);
1290 __remove_nexthop(net
, nh
, NULL
);
1298 static int rtm_to_nh_config(struct net
*net
, struct sk_buff
*skb
,
1299 struct nlmsghdr
*nlh
, struct nh_config
*cfg
,
1300 struct netlink_ext_ack
*extack
)
1302 struct nhmsg
*nhm
= nlmsg_data(nlh
);
1303 struct nlattr
*tb
[NHA_MAX
+ 1];
1306 err
= nlmsg_parse(nlh
, sizeof(*nhm
), tb
, NHA_MAX
, rtm_nh_policy
,
1312 if (nhm
->resvd
|| nhm
->nh_scope
) {
1313 NL_SET_ERR_MSG(extack
, "Invalid values in ancillary header");
1316 if (nhm
->nh_flags
& ~NEXTHOP_VALID_USER_FLAGS
) {
1317 NL_SET_ERR_MSG(extack
, "Invalid nexthop flags in ancillary header");
1321 switch (nhm
->nh_family
) {
1330 NL_SET_ERR_MSG(extack
, "Invalid address family");
1334 if (tb
[NHA_GROUPS
] || tb
[NHA_MASTER
]) {
1335 NL_SET_ERR_MSG(extack
, "Invalid attributes in request");
1339 memset(cfg
, 0, sizeof(*cfg
));
1340 cfg
->nlflags
= nlh
->nlmsg_flags
;
1341 cfg
->nlinfo
.portid
= NETLINK_CB(skb
).portid
;
1342 cfg
->nlinfo
.nlh
= nlh
;
1343 cfg
->nlinfo
.nl_net
= net
;
1345 cfg
->nh_family
= nhm
->nh_family
;
1346 cfg
->nh_protocol
= nhm
->nh_protocol
;
1347 cfg
->nh_flags
= nhm
->nh_flags
;
1350 cfg
->nh_id
= nla_get_u32(tb
[NHA_ID
]);
1352 if (tb
[NHA_GROUP
]) {
1353 if (nhm
->nh_family
!= AF_UNSPEC
) {
1354 NL_SET_ERR_MSG(extack
, "Invalid family for group");
1357 cfg
->nh_grp
= tb
[NHA_GROUP
];
1359 cfg
->nh_grp_type
= NEXTHOP_GRP_TYPE_MPATH
;
1360 if (tb
[NHA_GROUP_TYPE
])
1361 cfg
->nh_grp_type
= nla_get_u16(tb
[NHA_GROUP_TYPE
]);
1363 if (cfg
->nh_grp_type
> NEXTHOP_GRP_TYPE_MAX
) {
1364 NL_SET_ERR_MSG(extack
, "Invalid group type");
1367 err
= nh_check_attr_group(net
, tb
, extack
);
1369 /* no other attributes should be set */
1373 if (tb
[NHA_BLACKHOLE
]) {
1374 if (tb
[NHA_GATEWAY
] || tb
[NHA_OIF
] ||
1375 tb
[NHA_ENCAP
] || tb
[NHA_ENCAP_TYPE
]) {
1376 NL_SET_ERR_MSG(extack
, "Blackhole attribute can not be used with gateway or oif");
1380 cfg
->nh_blackhole
= 1;
1386 NL_SET_ERR_MSG(extack
, "Device attribute required for non-blackhole nexthops");
1390 cfg
->nh_ifindex
= nla_get_u32(tb
[NHA_OIF
]);
1391 if (cfg
->nh_ifindex
)
1392 cfg
->dev
= __dev_get_by_index(net
, cfg
->nh_ifindex
);
1395 NL_SET_ERR_MSG(extack
, "Invalid device index");
1397 } else if (!(cfg
->dev
->flags
& IFF_UP
)) {
1398 NL_SET_ERR_MSG(extack
, "Nexthop device is not up");
1401 } else if (!netif_carrier_ok(cfg
->dev
)) {
1402 NL_SET_ERR_MSG(extack
, "Carrier for nexthop device is down");
1408 if (tb
[NHA_GATEWAY
]) {
1409 struct nlattr
*gwa
= tb
[NHA_GATEWAY
];
1411 switch (cfg
->nh_family
) {
1413 if (nla_len(gwa
) != sizeof(u32
)) {
1414 NL_SET_ERR_MSG(extack
, "Invalid gateway");
1417 cfg
->gw
.ipv4
= nla_get_be32(gwa
);
1420 if (nla_len(gwa
) != sizeof(struct in6_addr
)) {
1421 NL_SET_ERR_MSG(extack
, "Invalid gateway");
1424 cfg
->gw
.ipv6
= nla_get_in6_addr(gwa
);
1427 NL_SET_ERR_MSG(extack
,
1428 "Unknown address family for gateway");
1432 /* device only nexthop (no gateway) */
1433 if (cfg
->nh_flags
& RTNH_F_ONLINK
) {
1434 NL_SET_ERR_MSG(extack
,
1435 "ONLINK flag can not be set for nexthop without a gateway");
1440 if (tb
[NHA_ENCAP
]) {
1441 cfg
->nh_encap
= tb
[NHA_ENCAP
];
1443 if (!tb
[NHA_ENCAP_TYPE
]) {
1444 NL_SET_ERR_MSG(extack
, "LWT encapsulation type is missing");
1448 cfg
->nh_encap_type
= nla_get_u16(tb
[NHA_ENCAP_TYPE
]);
1449 err
= lwtunnel_valid_encap_type(cfg
->nh_encap_type
, extack
);
1453 } else if (tb
[NHA_ENCAP_TYPE
]) {
1454 NL_SET_ERR_MSG(extack
, "LWT encapsulation attribute is missing");
1465 static int rtm_new_nexthop(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
1466 struct netlink_ext_ack
*extack
)
1468 struct net
*net
= sock_net(skb
->sk
);
1469 struct nh_config cfg
;
1473 err
= rtm_to_nh_config(net
, skb
, nlh
, &cfg
, extack
);
1475 nh
= nexthop_add(net
, &cfg
, extack
);
1483 static int nh_valid_get_del_req(struct nlmsghdr
*nlh
, u32
*id
,
1484 struct netlink_ext_ack
*extack
)
1486 struct nhmsg
*nhm
= nlmsg_data(nlh
);
1487 struct nlattr
*tb
[NHA_MAX
+ 1];
1490 err
= nlmsg_parse(nlh
, sizeof(*nhm
), tb
, NHA_MAX
, rtm_nh_policy
,
1496 for (i
= 0; i
< __NHA_MAX
; ++i
) {
1504 NL_SET_ERR_MSG_ATTR(extack
, tb
[i
],
1505 "Unexpected attribute in request");
1509 if (nhm
->nh_protocol
|| nhm
->resvd
|| nhm
->nh_scope
|| nhm
->nh_flags
) {
1510 NL_SET_ERR_MSG(extack
, "Invalid values in header");
1515 NL_SET_ERR_MSG(extack
, "Nexthop id is missing");
1519 *id
= nla_get_u32(tb
[NHA_ID
]);
1521 NL_SET_ERR_MSG(extack
, "Invalid nexthop id");
1529 static int rtm_del_nexthop(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
1530 struct netlink_ext_ack
*extack
)
1532 struct net
*net
= sock_net(skb
->sk
);
1533 struct nl_info nlinfo
= {
1536 .portid
= NETLINK_CB(skb
).portid
,
1542 err
= nh_valid_get_del_req(nlh
, &id
, extack
);
1546 nh
= nexthop_find_by_id(net
, id
);
1550 remove_nexthop(net
, nh
, &nlinfo
);
1556 static int rtm_get_nexthop(struct sk_buff
*in_skb
, struct nlmsghdr
*nlh
,
1557 struct netlink_ext_ack
*extack
)
1559 struct net
*net
= sock_net(in_skb
->sk
);
1560 struct sk_buff
*skb
= NULL
;
1565 err
= nh_valid_get_del_req(nlh
, &id
, extack
);
1570 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1575 nh
= nexthop_find_by_id(net
, id
);
1579 err
= nh_fill_node(skb
, nh
, RTM_NEWNEXTHOP
, NETLINK_CB(in_skb
).portid
,
1582 WARN_ON(err
== -EMSGSIZE
);
1586 err
= rtnl_unicast(skb
, net
, NETLINK_CB(in_skb
).portid
);
1594 static bool nh_dump_filtered(struct nexthop
*nh
, int dev_idx
, int master_idx
,
1595 bool group_filter
, u8 family
)
1597 const struct net_device
*dev
;
1598 const struct nh_info
*nhi
;
1600 if (group_filter
&& !nh
->is_group
)
1603 if (!dev_idx
&& !master_idx
&& !family
)
1609 nhi
= rtnl_dereference(nh
->nh_info
);
1610 if (family
&& nhi
->family
!= family
)
1613 dev
= nhi
->fib_nhc
.nhc_dev
;
1614 if (dev_idx
&& (!dev
|| dev
->ifindex
!= dev_idx
))
1618 struct net_device
*master
;
1623 master
= netdev_master_upper_dev_get((struct net_device
*)dev
);
1624 if (!master
|| master
->ifindex
!= master_idx
)
1631 static int nh_valid_dump_req(const struct nlmsghdr
*nlh
, int *dev_idx
,
1632 int *master_idx
, bool *group_filter
,
1633 struct netlink_callback
*cb
)
1635 struct netlink_ext_ack
*extack
= cb
->extack
;
1636 struct nlattr
*tb
[NHA_MAX
+ 1];
1641 err
= nlmsg_parse(nlh
, sizeof(*nhm
), tb
, NHA_MAX
, rtm_nh_policy
,
1646 for (i
= 0; i
<= NHA_MAX
; ++i
) {
1652 idx
= nla_get_u32(tb
[i
]);
1653 if (idx
> INT_MAX
) {
1654 NL_SET_ERR_MSG(extack
, "Invalid device index");
1660 idx
= nla_get_u32(tb
[i
]);
1661 if (idx
> INT_MAX
) {
1662 NL_SET_ERR_MSG(extack
, "Invalid master device index");
1668 *group_filter
= true;
1671 NL_SET_ERR_MSG(extack
, "Unsupported attribute in dump request");
1676 nhm
= nlmsg_data(nlh
);
1677 if (nhm
->nh_protocol
|| nhm
->resvd
|| nhm
->nh_scope
|| nhm
->nh_flags
) {
1678 NL_SET_ERR_MSG(extack
, "Invalid values in header for nexthop dump request");
1686 static int rtm_dump_nexthop(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1688 struct nhmsg
*nhm
= nlmsg_data(cb
->nlh
);
1689 int dev_filter_idx
= 0, master_idx
= 0;
1690 struct net
*net
= sock_net(skb
->sk
);
1691 struct rb_root
*root
= &net
->nexthop
.rb_root
;
1692 bool group_filter
= false;
1693 struct rb_node
*node
;
1697 err
= nh_valid_dump_req(cb
->nlh
, &dev_filter_idx
, &master_idx
,
1702 s_idx
= cb
->args
[0];
1703 for (node
= rb_first(root
); node
; node
= rb_next(node
)) {
1709 nh
= rb_entry(node
, struct nexthop
, rb_node
);
1710 if (nh_dump_filtered(nh
, dev_filter_idx
, master_idx
,
1711 group_filter
, nhm
->nh_family
))
1714 err
= nh_fill_node(skb
, nh
, RTM_NEWNEXTHOP
,
1715 NETLINK_CB(cb
->skb
).portid
,
1716 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
);
1718 if (likely(skb
->len
))
1731 cb
->seq
= net
->nexthop
.seq
;
1732 nl_dump_check_consistent(cb
, nlmsg_hdr(skb
));
1737 static void nexthop_sync_mtu(struct net_device
*dev
, u32 orig_mtu
)
1739 unsigned int hash
= nh_dev_hashfn(dev
->ifindex
);
1740 struct net
*net
= dev_net(dev
);
1741 struct hlist_head
*head
= &net
->nexthop
.devhash
[hash
];
1742 struct hlist_node
*n
;
1743 struct nh_info
*nhi
;
1745 hlist_for_each_entry_safe(nhi
, n
, head
, dev_hash
) {
1746 if (nhi
->fib_nhc
.nhc_dev
== dev
) {
1747 if (nhi
->family
== AF_INET
)
1748 fib_nhc_update_mtu(&nhi
->fib_nhc
, dev
->mtu
,
1755 static int nh_netdev_event(struct notifier_block
*this,
1756 unsigned long event
, void *ptr
)
1758 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1759 struct netdev_notifier_info_ext
*info_ext
;
1763 case NETDEV_UNREGISTER
:
1764 nexthop_flush_dev(dev
);
1767 if (!(dev_get_flags(dev
) & (IFF_RUNNING
| IFF_LOWER_UP
)))
1768 nexthop_flush_dev(dev
);
1770 case NETDEV_CHANGEMTU
:
1772 nexthop_sync_mtu(dev
, info_ext
->ext
.mtu
);
1773 rt_cache_flush(dev_net(dev
));
1779 static struct notifier_block nh_netdev_notifier
= {
1780 .notifier_call
= nh_netdev_event
,
1783 static void __net_exit
nexthop_net_exit(struct net
*net
)
1786 flush_all_nexthops(net
);
1788 kfree(net
->nexthop
.devhash
);
1791 static int __net_init
nexthop_net_init(struct net
*net
)
1793 size_t sz
= sizeof(struct hlist_head
) * NH_DEV_HASHSIZE
;
1795 net
->nexthop
.rb_root
= RB_ROOT
;
1796 net
->nexthop
.devhash
= kzalloc(sz
, GFP_KERNEL
);
1797 if (!net
->nexthop
.devhash
)
1803 static struct pernet_operations nexthop_net_ops
= {
1804 .init
= nexthop_net_init
,
1805 .exit
= nexthop_net_exit
,
1808 static int __init
nexthop_init(void)
1810 register_pernet_subsys(&nexthop_net_ops
);
1812 register_netdevice_notifier(&nh_netdev_notifier
);
1814 rtnl_register(PF_UNSPEC
, RTM_NEWNEXTHOP
, rtm_new_nexthop
, NULL
, 0);
1815 rtnl_register(PF_UNSPEC
, RTM_DELNEXTHOP
, rtm_del_nexthop
, NULL
, 0);
1816 rtnl_register(PF_UNSPEC
, RTM_GETNEXTHOP
, rtm_get_nexthop
,
1817 rtm_dump_nexthop
, 0);
1819 rtnl_register(PF_INET
, RTM_NEWNEXTHOP
, rtm_new_nexthop
, NULL
, 0);
1820 rtnl_register(PF_INET
, RTM_GETNEXTHOP
, NULL
, rtm_dump_nexthop
, 0);
1822 rtnl_register(PF_INET6
, RTM_NEWNEXTHOP
, rtm_new_nexthop
, NULL
, 0);
1823 rtnl_register(PF_INET6
, RTM_GETNEXTHOP
, NULL
, rtm_dump_nexthop
, 0);
1827 subsys_initcall(nexthop_init
);