2 * net/core/fib_rules.c Generic Routing Rules
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
8 * Authors: Thomas Graf <tgraf@suug.ch>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <net/net_namespace.h>
18 #include <net/fib_rules.h>
19 #include <net/ip_tunnels.h>
21 static const struct fib_kuid_range fib_kuid_range_unset
= {
26 bool fib_rule_matchall(const struct fib_rule
*rule
)
28 if (rule
->iifindex
|| rule
->oifindex
|| rule
->mark
|| rule
->tun_id
||
31 if (rule
->suppress_ifgroup
!= -1 || rule
->suppress_prefixlen
!= -1)
33 if (!uid_eq(rule
->uid_range
.start
, fib_kuid_range_unset
.start
) ||
34 !uid_eq(rule
->uid_range
.end
, fib_kuid_range_unset
.end
))
36 if (fib_rule_port_range_set(&rule
->sport_range
))
38 if (fib_rule_port_range_set(&rule
->dport_range
))
42 EXPORT_SYMBOL_GPL(fib_rule_matchall
);
44 int fib_default_rule_add(struct fib_rules_ops
*ops
,
45 u32 pref
, u32 table
, u32 flags
)
49 r
= kzalloc(ops
->rule_size
, GFP_KERNEL
);
53 refcount_set(&r
->refcnt
, 1);
54 r
->action
= FR_ACT_TO_TBL
;
58 r
->proto
= RTPROT_KERNEL
;
59 r
->fr_net
= ops
->fro_net
;
60 r
->uid_range
= fib_kuid_range_unset
;
62 r
->suppress_prefixlen
= -1;
63 r
->suppress_ifgroup
= -1;
65 /* The lock is not required here, the list in unreacheable
66 * at the moment this function is called */
67 list_add_tail(&r
->list
, &ops
->rules_list
);
70 EXPORT_SYMBOL(fib_default_rule_add
);
72 static u32
fib_default_rule_pref(struct fib_rules_ops
*ops
)
74 struct list_head
*pos
;
75 struct fib_rule
*rule
;
77 if (!list_empty(&ops
->rules_list
)) {
78 pos
= ops
->rules_list
.next
;
79 if (pos
->next
!= &ops
->rules_list
) {
80 rule
= list_entry(pos
->next
, struct fib_rule
, list
);
82 return rule
->pref
- 1;
89 static void notify_rule_change(int event
, struct fib_rule
*rule
,
90 struct fib_rules_ops
*ops
, struct nlmsghdr
*nlh
,
93 static struct fib_rules_ops
*lookup_rules_ops(struct net
*net
, int family
)
95 struct fib_rules_ops
*ops
;
98 list_for_each_entry_rcu(ops
, &net
->rules_ops
, list
) {
99 if (ops
->family
== family
) {
100 if (!try_module_get(ops
->owner
))
111 static void rules_ops_put(struct fib_rules_ops
*ops
)
114 module_put(ops
->owner
);
117 static void flush_route_cache(struct fib_rules_ops
*ops
)
119 if (ops
->flush_cache
)
120 ops
->flush_cache(ops
);
123 static int __fib_rules_register(struct fib_rules_ops
*ops
)
126 struct fib_rules_ops
*o
;
131 if (ops
->rule_size
< sizeof(struct fib_rule
))
134 if (ops
->match
== NULL
|| ops
->configure
== NULL
||
135 ops
->compare
== NULL
|| ops
->fill
== NULL
||
139 spin_lock(&net
->rules_mod_lock
);
140 list_for_each_entry(o
, &net
->rules_ops
, list
)
141 if (ops
->family
== o
->family
)
144 list_add_tail_rcu(&ops
->list
, &net
->rules_ops
);
147 spin_unlock(&net
->rules_mod_lock
);
152 struct fib_rules_ops
*
153 fib_rules_register(const struct fib_rules_ops
*tmpl
, struct net
*net
)
155 struct fib_rules_ops
*ops
;
158 ops
= kmemdup(tmpl
, sizeof(*ops
), GFP_KERNEL
);
160 return ERR_PTR(-ENOMEM
);
162 INIT_LIST_HEAD(&ops
->rules_list
);
165 err
= __fib_rules_register(ops
);
173 EXPORT_SYMBOL_GPL(fib_rules_register
);
175 static void fib_rules_cleanup_ops(struct fib_rules_ops
*ops
)
177 struct fib_rule
*rule
, *tmp
;
179 list_for_each_entry_safe(rule
, tmp
, &ops
->rules_list
, list
) {
180 list_del_rcu(&rule
->list
);
187 void fib_rules_unregister(struct fib_rules_ops
*ops
)
189 struct net
*net
= ops
->fro_net
;
191 spin_lock(&net
->rules_mod_lock
);
192 list_del_rcu(&ops
->list
);
193 spin_unlock(&net
->rules_mod_lock
);
195 fib_rules_cleanup_ops(ops
);
198 EXPORT_SYMBOL_GPL(fib_rules_unregister
);
200 static int uid_range_set(struct fib_kuid_range
*range
)
202 return uid_valid(range
->start
) && uid_valid(range
->end
);
205 static struct fib_kuid_range
nla_get_kuid_range(struct nlattr
**tb
)
207 struct fib_rule_uid_range
*in
;
208 struct fib_kuid_range out
;
210 in
= (struct fib_rule_uid_range
*)nla_data(tb
[FRA_UID_RANGE
]);
212 out
.start
= make_kuid(current_user_ns(), in
->start
);
213 out
.end
= make_kuid(current_user_ns(), in
->end
);
218 static int nla_put_uid_range(struct sk_buff
*skb
, struct fib_kuid_range
*range
)
220 struct fib_rule_uid_range out
= {
221 from_kuid_munged(current_user_ns(), range
->start
),
222 from_kuid_munged(current_user_ns(), range
->end
)
225 return nla_put(skb
, FRA_UID_RANGE
, sizeof(out
), &out
);
228 static int nla_get_port_range(struct nlattr
*pattr
,
229 struct fib_rule_port_range
*port_range
)
231 const struct fib_rule_port_range
*pr
= nla_data(pattr
);
233 if (!fib_rule_port_range_valid(pr
))
236 port_range
->start
= pr
->start
;
237 port_range
->end
= pr
->end
;
242 static int nla_put_port_range(struct sk_buff
*skb
, int attrtype
,
243 struct fib_rule_port_range
*range
)
245 return nla_put(skb
, attrtype
, sizeof(*range
), range
);
248 static int fib_rule_match(struct fib_rule
*rule
, struct fib_rules_ops
*ops
,
249 struct flowi
*fl
, int flags
,
250 struct fib_lookup_arg
*arg
)
254 if (rule
->iifindex
&& (rule
->iifindex
!= fl
->flowi_iif
))
257 if (rule
->oifindex
&& (rule
->oifindex
!= fl
->flowi_oif
))
260 if ((rule
->mark
^ fl
->flowi_mark
) & rule
->mark_mask
)
263 if (rule
->tun_id
&& (rule
->tun_id
!= fl
->flowi_tun_key
.tun_id
))
266 if (rule
->l3mdev
&& !l3mdev_fib_rule_match(rule
->fr_net
, fl
, arg
))
269 if (uid_lt(fl
->flowi_uid
, rule
->uid_range
.start
) ||
270 uid_gt(fl
->flowi_uid
, rule
->uid_range
.end
))
273 ret
= ops
->match(rule
, fl
, flags
);
275 return (rule
->flags
& FIB_RULE_INVERT
) ? !ret
: ret
;
278 int fib_rules_lookup(struct fib_rules_ops
*ops
, struct flowi
*fl
,
279 int flags
, struct fib_lookup_arg
*arg
)
281 struct fib_rule
*rule
;
286 list_for_each_entry_rcu(rule
, &ops
->rules_list
, list
) {
288 if (!fib_rule_match(rule
, ops
, fl
, flags
, arg
))
291 if (rule
->action
== FR_ACT_GOTO
) {
292 struct fib_rule
*target
;
294 target
= rcu_dereference(rule
->ctarget
);
295 if (target
== NULL
) {
301 } else if (rule
->action
== FR_ACT_NOP
)
304 err
= ops
->action(rule
, fl
, flags
, arg
);
306 if (!err
&& ops
->suppress
&& ops
->suppress(rule
, arg
))
309 if (err
!= -EAGAIN
) {
310 if ((arg
->flags
& FIB_LOOKUP_NOREF
) ||
311 likely(refcount_inc_not_zero(&rule
->refcnt
))) {
325 EXPORT_SYMBOL_GPL(fib_rules_lookup
);
327 static int call_fib_rule_notifier(struct notifier_block
*nb
, struct net
*net
,
328 enum fib_event_type event_type
,
329 struct fib_rule
*rule
, int family
)
331 struct fib_rule_notifier_info info
= {
332 .info
.family
= family
,
336 return call_fib_notifier(nb
, net
, event_type
, &info
.info
);
339 static int call_fib_rule_notifiers(struct net
*net
,
340 enum fib_event_type event_type
,
341 struct fib_rule
*rule
,
342 struct fib_rules_ops
*ops
,
343 struct netlink_ext_ack
*extack
)
345 struct fib_rule_notifier_info info
= {
346 .info
.family
= ops
->family
,
347 .info
.extack
= extack
,
351 ops
->fib_rules_seq
++;
352 return call_fib_notifiers(net
, event_type
, &info
.info
);
355 /* Called with rcu_read_lock() */
356 int fib_rules_dump(struct net
*net
, struct notifier_block
*nb
, int family
)
358 struct fib_rules_ops
*ops
;
359 struct fib_rule
*rule
;
361 ops
= lookup_rules_ops(net
, family
);
363 return -EAFNOSUPPORT
;
364 list_for_each_entry_rcu(rule
, &ops
->rules_list
, list
)
365 call_fib_rule_notifier(nb
, net
, FIB_EVENT_RULE_ADD
, rule
,
371 EXPORT_SYMBOL_GPL(fib_rules_dump
);
373 unsigned int fib_rules_seq_read(struct net
*net
, int family
)
375 unsigned int fib_rules_seq
;
376 struct fib_rules_ops
*ops
;
380 ops
= lookup_rules_ops(net
, family
);
383 fib_rules_seq
= ops
->fib_rules_seq
;
386 return fib_rules_seq
;
388 EXPORT_SYMBOL_GPL(fib_rules_seq_read
);
390 static struct fib_rule
*rule_find(struct fib_rules_ops
*ops
,
391 struct fib_rule_hdr
*frh
,
393 struct fib_rule
*rule
,
398 list_for_each_entry(r
, &ops
->rules_list
, list
) {
399 if (rule
->action
&& r
->action
!= rule
->action
)
402 if (rule
->table
&& r
->table
!= rule
->table
)
405 if (user_priority
&& r
->pref
!= rule
->pref
)
408 if (rule
->iifname
[0] &&
409 memcmp(r
->iifname
, rule
->iifname
, IFNAMSIZ
))
412 if (rule
->oifname
[0] &&
413 memcmp(r
->oifname
, rule
->oifname
, IFNAMSIZ
))
416 if (rule
->mark
&& r
->mark
!= rule
->mark
)
419 if (rule
->suppress_ifgroup
!= -1 &&
420 r
->suppress_ifgroup
!= rule
->suppress_ifgroup
)
423 if (rule
->suppress_prefixlen
!= -1 &&
424 r
->suppress_prefixlen
!= rule
->suppress_prefixlen
)
427 if (rule
->mark_mask
&& r
->mark_mask
!= rule
->mark_mask
)
430 if (rule
->tun_id
&& r
->tun_id
!= rule
->tun_id
)
433 if (r
->fr_net
!= rule
->fr_net
)
436 if (rule
->l3mdev
&& r
->l3mdev
!= rule
->l3mdev
)
439 if (uid_range_set(&rule
->uid_range
) &&
440 (!uid_eq(r
->uid_range
.start
, rule
->uid_range
.start
) ||
441 !uid_eq(r
->uid_range
.end
, rule
->uid_range
.end
)))
444 if (rule
->ip_proto
&& r
->ip_proto
!= rule
->ip_proto
)
447 if (rule
->proto
&& r
->proto
!= rule
->proto
)
450 if (fib_rule_port_range_set(&rule
->sport_range
) &&
451 !fib_rule_port_range_compare(&r
->sport_range
,
455 if (fib_rule_port_range_set(&rule
->dport_range
) &&
456 !fib_rule_port_range_compare(&r
->dport_range
,
460 if (!ops
->compare(r
, frh
, tb
))
468 #ifdef CONFIG_NET_L3_MASTER_DEV
469 static int fib_nl2rule_l3mdev(struct nlattr
*nla
, struct fib_rule
*nlrule
,
470 struct netlink_ext_ack
*extack
)
472 nlrule
->l3mdev
= nla_get_u8(nla
);
473 if (nlrule
->l3mdev
!= 1) {
474 NL_SET_ERR_MSG(extack
, "Invalid l3mdev attribute");
481 static int fib_nl2rule_l3mdev(struct nlattr
*nla
, struct fib_rule
*nlrule
,
482 struct netlink_ext_ack
*extack
)
484 NL_SET_ERR_MSG(extack
, "l3mdev support is not enabled in kernel");
489 static int fib_nl2rule(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
490 struct netlink_ext_ack
*extack
,
491 struct fib_rules_ops
*ops
,
493 struct fib_rule
**rule
,
496 struct net
*net
= sock_net(skb
->sk
);
497 struct fib_rule_hdr
*frh
= nlmsg_data(nlh
);
498 struct fib_rule
*nlrule
= NULL
;
503 frh
->src_len
> (ops
->addr_size
* 8) ||
504 nla_len(tb
[FRA_SRC
]) != ops
->addr_size
) {
505 NL_SET_ERR_MSG(extack
, "Invalid source address");
511 frh
->dst_len
> (ops
->addr_size
* 8) ||
512 nla_len(tb
[FRA_DST
]) != ops
->addr_size
) {
513 NL_SET_ERR_MSG(extack
, "Invalid dst address");
517 nlrule
= kzalloc(ops
->rule_size
, GFP_KERNEL
);
522 refcount_set(&nlrule
->refcnt
, 1);
523 nlrule
->fr_net
= net
;
525 if (tb
[FRA_PRIORITY
]) {
526 nlrule
->pref
= nla_get_u32(tb
[FRA_PRIORITY
]);
527 *user_priority
= true;
529 nlrule
->pref
= fib_default_rule_pref(ops
);
532 nlrule
->proto
= tb
[FRA_PROTOCOL
] ?
533 nla_get_u8(tb
[FRA_PROTOCOL
]) : RTPROT_UNSPEC
;
535 if (tb
[FRA_IIFNAME
]) {
536 struct net_device
*dev
;
538 nlrule
->iifindex
= -1;
539 nla_strlcpy(nlrule
->iifname
, tb
[FRA_IIFNAME
], IFNAMSIZ
);
540 dev
= __dev_get_by_name(net
, nlrule
->iifname
);
542 nlrule
->iifindex
= dev
->ifindex
;
545 if (tb
[FRA_OIFNAME
]) {
546 struct net_device
*dev
;
548 nlrule
->oifindex
= -1;
549 nla_strlcpy(nlrule
->oifname
, tb
[FRA_OIFNAME
], IFNAMSIZ
);
550 dev
= __dev_get_by_name(net
, nlrule
->oifname
);
552 nlrule
->oifindex
= dev
->ifindex
;
555 if (tb
[FRA_FWMARK
]) {
556 nlrule
->mark
= nla_get_u32(tb
[FRA_FWMARK
]);
558 /* compatibility: if the mark value is non-zero all bits
559 * are compared unless a mask is explicitly specified.
561 nlrule
->mark_mask
= 0xFFFFFFFF;
565 nlrule
->mark_mask
= nla_get_u32(tb
[FRA_FWMASK
]);
568 nlrule
->tun_id
= nla_get_be64(tb
[FRA_TUN_ID
]);
571 if (tb
[FRA_L3MDEV
] &&
572 fib_nl2rule_l3mdev(tb
[FRA_L3MDEV
], nlrule
, extack
) < 0)
575 nlrule
->action
= frh
->action
;
576 nlrule
->flags
= frh
->flags
;
577 nlrule
->table
= frh_get_table(frh
, tb
);
578 if (tb
[FRA_SUPPRESS_PREFIXLEN
])
579 nlrule
->suppress_prefixlen
= nla_get_u32(tb
[FRA_SUPPRESS_PREFIXLEN
]);
581 nlrule
->suppress_prefixlen
= -1;
583 if (tb
[FRA_SUPPRESS_IFGROUP
])
584 nlrule
->suppress_ifgroup
= nla_get_u32(tb
[FRA_SUPPRESS_IFGROUP
]);
586 nlrule
->suppress_ifgroup
= -1;
589 if (nlrule
->action
!= FR_ACT_GOTO
) {
590 NL_SET_ERR_MSG(extack
, "Unexpected goto");
594 nlrule
->target
= nla_get_u32(tb
[FRA_GOTO
]);
595 /* Backward jumps are prohibited to avoid endless loops */
596 if (nlrule
->target
<= nlrule
->pref
) {
597 NL_SET_ERR_MSG(extack
, "Backward goto not supported");
600 } else if (nlrule
->action
== FR_ACT_GOTO
) {
601 NL_SET_ERR_MSG(extack
, "Missing goto target for action goto");
605 if (nlrule
->l3mdev
&& nlrule
->table
) {
606 NL_SET_ERR_MSG(extack
, "l3mdev and table are mutually exclusive");
610 if (tb
[FRA_UID_RANGE
]) {
611 if (current_user_ns() != net
->user_ns
) {
613 NL_SET_ERR_MSG(extack
, "No permission to set uid");
617 nlrule
->uid_range
= nla_get_kuid_range(tb
);
619 if (!uid_range_set(&nlrule
->uid_range
) ||
620 !uid_lte(nlrule
->uid_range
.start
, nlrule
->uid_range
.end
)) {
621 NL_SET_ERR_MSG(extack
, "Invalid uid range");
625 nlrule
->uid_range
= fib_kuid_range_unset
;
628 if (tb
[FRA_IP_PROTO
])
629 nlrule
->ip_proto
= nla_get_u8(tb
[FRA_IP_PROTO
]);
631 if (tb
[FRA_SPORT_RANGE
]) {
632 err
= nla_get_port_range(tb
[FRA_SPORT_RANGE
],
633 &nlrule
->sport_range
);
635 NL_SET_ERR_MSG(extack
, "Invalid sport range");
640 if (tb
[FRA_DPORT_RANGE
]) {
641 err
= nla_get_port_range(tb
[FRA_DPORT_RANGE
],
642 &nlrule
->dport_range
);
644 NL_SET_ERR_MSG(extack
, "Invalid dport range");
659 static int rule_exists(struct fib_rules_ops
*ops
, struct fib_rule_hdr
*frh
,
660 struct nlattr
**tb
, struct fib_rule
*rule
)
664 list_for_each_entry(r
, &ops
->rules_list
, list
) {
665 if (r
->action
!= rule
->action
)
668 if (r
->table
!= rule
->table
)
671 if (r
->pref
!= rule
->pref
)
674 if (memcmp(r
->iifname
, rule
->iifname
, IFNAMSIZ
))
677 if (memcmp(r
->oifname
, rule
->oifname
, IFNAMSIZ
))
680 if (r
->mark
!= rule
->mark
)
683 if (r
->suppress_ifgroup
!= rule
->suppress_ifgroup
)
686 if (r
->suppress_prefixlen
!= rule
->suppress_prefixlen
)
689 if (r
->mark_mask
!= rule
->mark_mask
)
692 if (r
->tun_id
!= rule
->tun_id
)
695 if (r
->fr_net
!= rule
->fr_net
)
698 if (r
->l3mdev
!= rule
->l3mdev
)
701 if (!uid_eq(r
->uid_range
.start
, rule
->uid_range
.start
) ||
702 !uid_eq(r
->uid_range
.end
, rule
->uid_range
.end
))
705 if (r
->ip_proto
!= rule
->ip_proto
)
708 if (r
->proto
!= rule
->proto
)
711 if (!fib_rule_port_range_compare(&r
->sport_range
,
715 if (!fib_rule_port_range_compare(&r
->dport_range
,
719 if (!ops
->compare(r
, frh
, tb
))
726 int fib_nl_newrule(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
727 struct netlink_ext_ack
*extack
)
729 struct net
*net
= sock_net(skb
->sk
);
730 struct fib_rule_hdr
*frh
= nlmsg_data(nlh
);
731 struct fib_rules_ops
*ops
= NULL
;
732 struct fib_rule
*rule
= NULL
, *r
, *last
= NULL
;
733 struct nlattr
*tb
[FRA_MAX
+ 1];
734 int err
= -EINVAL
, unresolved
= 0;
735 bool user_priority
= false;
737 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*frh
))) {
738 NL_SET_ERR_MSG(extack
, "Invalid msg length");
742 ops
= lookup_rules_ops(net
, frh
->family
);
745 NL_SET_ERR_MSG(extack
, "Rule family not supported");
749 err
= nlmsg_parse(nlh
, sizeof(*frh
), tb
, FRA_MAX
, ops
->policy
, extack
);
751 NL_SET_ERR_MSG(extack
, "Error parsing msg");
755 err
= fib_nl2rule(skb
, nlh
, extack
, ops
, tb
, &rule
, &user_priority
);
759 if ((nlh
->nlmsg_flags
& NLM_F_EXCL
) &&
760 rule_exists(ops
, frh
, tb
, rule
)) {
765 err
= ops
->configure(rule
, skb
, frh
, tb
, extack
);
769 err
= call_fib_rule_notifiers(net
, FIB_EVENT_RULE_ADD
, rule
, ops
,
774 list_for_each_entry(r
, &ops
->rules_list
, list
) {
775 if (r
->pref
== rule
->target
) {
776 RCU_INIT_POINTER(rule
->ctarget
, r
);
781 if (rcu_dereference_protected(rule
->ctarget
, 1) == NULL
)
784 list_for_each_entry(r
, &ops
->rules_list
, list
) {
785 if (r
->pref
> rule
->pref
)
791 list_add_rcu(&rule
->list
, &last
->list
);
793 list_add_rcu(&rule
->list
, &ops
->rules_list
);
795 if (ops
->unresolved_rules
) {
797 * There are unresolved goto rules in the list, check if
798 * any of them are pointing to this new rule.
800 list_for_each_entry(r
, &ops
->rules_list
, list
) {
801 if (r
->action
== FR_ACT_GOTO
&&
802 r
->target
== rule
->pref
&&
803 rtnl_dereference(r
->ctarget
) == NULL
) {
804 rcu_assign_pointer(r
->ctarget
, rule
);
805 if (--ops
->unresolved_rules
== 0)
811 if (rule
->action
== FR_ACT_GOTO
)
812 ops
->nr_goto_rules
++;
815 ops
->unresolved_rules
++;
818 ip_tunnel_need_metadata();
820 notify_rule_change(RTM_NEWRULE
, rule
, ops
, nlh
, NETLINK_CB(skb
).portid
);
821 flush_route_cache(ops
);
831 EXPORT_SYMBOL_GPL(fib_nl_newrule
);
833 int fib_nl_delrule(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
834 struct netlink_ext_ack
*extack
)
836 struct net
*net
= sock_net(skb
->sk
);
837 struct fib_rule_hdr
*frh
= nlmsg_data(nlh
);
838 struct fib_rules_ops
*ops
= NULL
;
839 struct fib_rule
*rule
= NULL
, *r
, *nlrule
= NULL
;
840 struct nlattr
*tb
[FRA_MAX
+1];
842 bool user_priority
= false;
844 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*frh
))) {
845 NL_SET_ERR_MSG(extack
, "Invalid msg length");
849 ops
= lookup_rules_ops(net
, frh
->family
);
852 NL_SET_ERR_MSG(extack
, "Rule family not supported");
856 err
= nlmsg_parse(nlh
, sizeof(*frh
), tb
, FRA_MAX
, ops
->policy
, extack
);
858 NL_SET_ERR_MSG(extack
, "Error parsing msg");
862 err
= fib_nl2rule(skb
, nlh
, extack
, ops
, tb
, &nlrule
, &user_priority
);
866 rule
= rule_find(ops
, frh
, tb
, nlrule
, user_priority
);
872 if (rule
->flags
& FIB_RULE_PERMANENT
) {
878 err
= ops
->delete(rule
);
884 ip_tunnel_unneed_metadata();
886 list_del_rcu(&rule
->list
);
888 if (rule
->action
== FR_ACT_GOTO
) {
889 ops
->nr_goto_rules
--;
890 if (rtnl_dereference(rule
->ctarget
) == NULL
)
891 ops
->unresolved_rules
--;
895 * Check if this rule is a target to any of them. If so,
896 * adjust to the next one with the same preference or
897 * disable them. As this operation is eventually very
898 * expensive, it is only performed if goto rules, except
899 * current if it is goto rule, have actually been added.
901 if (ops
->nr_goto_rules
> 0) {
904 n
= list_next_entry(rule
, list
);
905 if (&n
->list
== &ops
->rules_list
|| n
->pref
!= rule
->pref
)
907 list_for_each_entry(r
, &ops
->rules_list
, list
) {
908 if (rtnl_dereference(r
->ctarget
) != rule
)
910 rcu_assign_pointer(r
->ctarget
, n
);
912 ops
->unresolved_rules
++;
916 call_fib_rule_notifiers(net
, FIB_EVENT_RULE_DEL
, rule
, ops
,
918 notify_rule_change(RTM_DELRULE
, rule
, ops
, nlh
,
919 NETLINK_CB(skb
).portid
);
921 flush_route_cache(ops
);
931 EXPORT_SYMBOL_GPL(fib_nl_delrule
);
933 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops
*ops
,
934 struct fib_rule
*rule
)
936 size_t payload
= NLMSG_ALIGN(sizeof(struct fib_rule_hdr
))
937 + nla_total_size(IFNAMSIZ
) /* FRA_IIFNAME */
938 + nla_total_size(IFNAMSIZ
) /* FRA_OIFNAME */
939 + nla_total_size(4) /* FRA_PRIORITY */
940 + nla_total_size(4) /* FRA_TABLE */
941 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
942 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
943 + nla_total_size(4) /* FRA_FWMARK */
944 + nla_total_size(4) /* FRA_FWMASK */
945 + nla_total_size_64bit(8) /* FRA_TUN_ID */
946 + nla_total_size(sizeof(struct fib_kuid_range
))
947 + nla_total_size(1) /* FRA_PROTOCOL */
948 + nla_total_size(1) /* FRA_IP_PROTO */
949 + nla_total_size(sizeof(struct fib_rule_port_range
)) /* FRA_SPORT_RANGE */
950 + nla_total_size(sizeof(struct fib_rule_port_range
)); /* FRA_DPORT_RANGE */
952 if (ops
->nlmsg_payload
)
953 payload
+= ops
->nlmsg_payload(rule
);
958 static int fib_nl_fill_rule(struct sk_buff
*skb
, struct fib_rule
*rule
,
959 u32 pid
, u32 seq
, int type
, int flags
,
960 struct fib_rules_ops
*ops
)
962 struct nlmsghdr
*nlh
;
963 struct fib_rule_hdr
*frh
;
965 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*frh
), flags
);
969 frh
= nlmsg_data(nlh
);
970 frh
->family
= ops
->family
;
971 frh
->table
= rule
->table
;
972 if (nla_put_u32(skb
, FRA_TABLE
, rule
->table
))
973 goto nla_put_failure
;
974 if (nla_put_u32(skb
, FRA_SUPPRESS_PREFIXLEN
, rule
->suppress_prefixlen
))
975 goto nla_put_failure
;
978 frh
->action
= rule
->action
;
979 frh
->flags
= rule
->flags
;
981 if (nla_put_u8(skb
, FRA_PROTOCOL
, rule
->proto
))
982 goto nla_put_failure
;
984 if (rule
->action
== FR_ACT_GOTO
&&
985 rcu_access_pointer(rule
->ctarget
) == NULL
)
986 frh
->flags
|= FIB_RULE_UNRESOLVED
;
988 if (rule
->iifname
[0]) {
989 if (nla_put_string(skb
, FRA_IIFNAME
, rule
->iifname
))
990 goto nla_put_failure
;
991 if (rule
->iifindex
== -1)
992 frh
->flags
|= FIB_RULE_IIF_DETACHED
;
995 if (rule
->oifname
[0]) {
996 if (nla_put_string(skb
, FRA_OIFNAME
, rule
->oifname
))
997 goto nla_put_failure
;
998 if (rule
->oifindex
== -1)
999 frh
->flags
|= FIB_RULE_OIF_DETACHED
;
1003 nla_put_u32(skb
, FRA_PRIORITY
, rule
->pref
)) ||
1005 nla_put_u32(skb
, FRA_FWMARK
, rule
->mark
)) ||
1006 ((rule
->mark_mask
|| rule
->mark
) &&
1007 nla_put_u32(skb
, FRA_FWMASK
, rule
->mark_mask
)) ||
1009 nla_put_u32(skb
, FRA_GOTO
, rule
->target
)) ||
1011 nla_put_be64(skb
, FRA_TUN_ID
, rule
->tun_id
, FRA_PAD
)) ||
1013 nla_put_u8(skb
, FRA_L3MDEV
, rule
->l3mdev
)) ||
1014 (uid_range_set(&rule
->uid_range
) &&
1015 nla_put_uid_range(skb
, &rule
->uid_range
)) ||
1016 (fib_rule_port_range_set(&rule
->sport_range
) &&
1017 nla_put_port_range(skb
, FRA_SPORT_RANGE
, &rule
->sport_range
)) ||
1018 (fib_rule_port_range_set(&rule
->dport_range
) &&
1019 nla_put_port_range(skb
, FRA_DPORT_RANGE
, &rule
->dport_range
)) ||
1020 (rule
->ip_proto
&& nla_put_u8(skb
, FRA_IP_PROTO
, rule
->ip_proto
)))
1021 goto nla_put_failure
;
1023 if (rule
->suppress_ifgroup
!= -1) {
1024 if (nla_put_u32(skb
, FRA_SUPPRESS_IFGROUP
, rule
->suppress_ifgroup
))
1025 goto nla_put_failure
;
1028 if (ops
->fill(rule
, skb
, frh
) < 0)
1029 goto nla_put_failure
;
1031 nlmsg_end(skb
, nlh
);
1035 nlmsg_cancel(skb
, nlh
);
1039 static int dump_rules(struct sk_buff
*skb
, struct netlink_callback
*cb
,
1040 struct fib_rules_ops
*ops
)
1043 struct fib_rule
*rule
;
1047 list_for_each_entry_rcu(rule
, &ops
->rules_list
, list
) {
1048 if (idx
< cb
->args
[1])
1051 err
= fib_nl_fill_rule(skb
, rule
, NETLINK_CB(cb
->skb
).portid
,
1052 cb
->nlh
->nlmsg_seq
, RTM_NEWRULE
,
1066 static int fib_valid_dumprule_req(const struct nlmsghdr
*nlh
,
1067 struct netlink_ext_ack
*extack
)
1069 struct fib_rule_hdr
*frh
;
1071 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*frh
))) {
1072 NL_SET_ERR_MSG(extack
, "Invalid header for fib rule dump request");
1076 frh
= nlmsg_data(nlh
);
1077 if (frh
->dst_len
|| frh
->src_len
|| frh
->tos
|| frh
->table
||
1078 frh
->res1
|| frh
->res2
|| frh
->action
|| frh
->flags
) {
1079 NL_SET_ERR_MSG(extack
,
1080 "Invalid values in header for fib rule dump request");
1084 if (nlmsg_attrlen(nlh
, sizeof(*frh
))) {
1085 NL_SET_ERR_MSG(extack
, "Invalid data after header in fib rule dump request");
1092 static int fib_nl_dumprule(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1094 const struct nlmsghdr
*nlh
= cb
->nlh
;
1095 struct net
*net
= sock_net(skb
->sk
);
1096 struct fib_rules_ops
*ops
;
1097 int idx
= 0, family
;
1099 if (cb
->strict_check
) {
1100 int err
= fib_valid_dumprule_req(nlh
, cb
->extack
);
1106 family
= rtnl_msg_family(nlh
);
1107 if (family
!= AF_UNSPEC
) {
1108 /* Protocol specific dump request */
1109 ops
= lookup_rules_ops(net
, family
);
1111 return -EAFNOSUPPORT
;
1113 dump_rules(skb
, cb
, ops
);
1119 list_for_each_entry_rcu(ops
, &net
->rules_ops
, list
) {
1120 if (idx
< cb
->args
[0] || !try_module_get(ops
->owner
))
1123 if (dump_rules(skb
, cb
, ops
) < 0)
1136 static void notify_rule_change(int event
, struct fib_rule
*rule
,
1137 struct fib_rules_ops
*ops
, struct nlmsghdr
*nlh
,
1141 struct sk_buff
*skb
;
1145 skb
= nlmsg_new(fib_rule_nlmsg_size(ops
, rule
), GFP_KERNEL
);
1149 err
= fib_nl_fill_rule(skb
, rule
, pid
, nlh
->nlmsg_seq
, event
, 0, ops
);
1151 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
1152 WARN_ON(err
== -EMSGSIZE
);
1157 rtnl_notify(skb
, net
, pid
, ops
->nlgroup
, nlh
, GFP_KERNEL
);
1161 rtnl_set_sk_err(net
, ops
->nlgroup
, err
);
1164 static void attach_rules(struct list_head
*rules
, struct net_device
*dev
)
1166 struct fib_rule
*rule
;
1168 list_for_each_entry(rule
, rules
, list
) {
1169 if (rule
->iifindex
== -1 &&
1170 strcmp(dev
->name
, rule
->iifname
) == 0)
1171 rule
->iifindex
= dev
->ifindex
;
1172 if (rule
->oifindex
== -1 &&
1173 strcmp(dev
->name
, rule
->oifname
) == 0)
1174 rule
->oifindex
= dev
->ifindex
;
1178 static void detach_rules(struct list_head
*rules
, struct net_device
*dev
)
1180 struct fib_rule
*rule
;
1182 list_for_each_entry(rule
, rules
, list
) {
1183 if (rule
->iifindex
== dev
->ifindex
)
1184 rule
->iifindex
= -1;
1185 if (rule
->oifindex
== dev
->ifindex
)
1186 rule
->oifindex
= -1;
1191 static int fib_rules_event(struct notifier_block
*this, unsigned long event
,
1194 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1195 struct net
*net
= dev_net(dev
);
1196 struct fib_rules_ops
*ops
;
1201 case NETDEV_REGISTER
:
1202 list_for_each_entry(ops
, &net
->rules_ops
, list
)
1203 attach_rules(&ops
->rules_list
, dev
);
1206 case NETDEV_CHANGENAME
:
1207 list_for_each_entry(ops
, &net
->rules_ops
, list
) {
1208 detach_rules(&ops
->rules_list
, dev
);
1209 attach_rules(&ops
->rules_list
, dev
);
1213 case NETDEV_UNREGISTER
:
1214 list_for_each_entry(ops
, &net
->rules_ops
, list
)
1215 detach_rules(&ops
->rules_list
, dev
);
1222 static struct notifier_block fib_rules_notifier
= {
1223 .notifier_call
= fib_rules_event
,
1226 static int __net_init
fib_rules_net_init(struct net
*net
)
1228 INIT_LIST_HEAD(&net
->rules_ops
);
1229 spin_lock_init(&net
->rules_mod_lock
);
1233 static void __net_exit
fib_rules_net_exit(struct net
*net
)
1235 WARN_ON_ONCE(!list_empty(&net
->rules_ops
));
1238 static struct pernet_operations fib_rules_net_ops
= {
1239 .init
= fib_rules_net_init
,
1240 .exit
= fib_rules_net_exit
,
1243 static int __init
fib_rules_init(void)
1246 rtnl_register(PF_UNSPEC
, RTM_NEWRULE
, fib_nl_newrule
, NULL
, 0);
1247 rtnl_register(PF_UNSPEC
, RTM_DELRULE
, fib_nl_delrule
, NULL
, 0);
1248 rtnl_register(PF_UNSPEC
, RTM_GETRULE
, NULL
, fib_nl_dumprule
, 0);
1250 err
= register_pernet_subsys(&fib_rules_net_ops
);
1254 err
= register_netdevice_notifier(&fib_rules_notifier
);
1256 goto fail_unregister
;
1261 unregister_pernet_subsys(&fib_rules_net_ops
);
1263 rtnl_unregister(PF_UNSPEC
, RTM_NEWRULE
);
1264 rtnl_unregister(PF_UNSPEC
, RTM_DELRULE
);
1265 rtnl_unregister(PF_UNSPEC
, RTM_GETRULE
);
1269 subsys_initcall(fib_rules_init
);