2 * net/core/fib_rules.c Generic Routing Rules
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
8 * Authors: Thomas Graf <tgraf@suug.ch>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <net/net_namespace.h>
18 #include <net/fib_rules.h>
19 #include <net/ip_tunnels.h>
21 static const struct fib_kuid_range fib_kuid_range_unset
= {
26 bool fib_rule_matchall(const struct fib_rule
*rule
)
28 if (rule
->iifindex
|| rule
->oifindex
|| rule
->mark
|| rule
->tun_id
||
31 if (rule
->suppress_ifgroup
!= -1 || rule
->suppress_prefixlen
!= -1)
33 if (!uid_eq(rule
->uid_range
.start
, fib_kuid_range_unset
.start
) ||
34 !uid_eq(rule
->uid_range
.end
, fib_kuid_range_unset
.end
))
38 EXPORT_SYMBOL_GPL(fib_rule_matchall
);
40 int fib_default_rule_add(struct fib_rules_ops
*ops
,
41 u32 pref
, u32 table
, u32 flags
)
45 r
= kzalloc(ops
->rule_size
, GFP_KERNEL
);
49 refcount_set(&r
->refcnt
, 1);
50 r
->action
= FR_ACT_TO_TBL
;
54 r
->fr_net
= ops
->fro_net
;
55 r
->uid_range
= fib_kuid_range_unset
;
57 r
->suppress_prefixlen
= -1;
58 r
->suppress_ifgroup
= -1;
60 /* The lock is not required here, the list in unreacheable
61 * at the moment this function is called */
62 list_add_tail(&r
->list
, &ops
->rules_list
);
65 EXPORT_SYMBOL(fib_default_rule_add
);
67 static u32
fib_default_rule_pref(struct fib_rules_ops
*ops
)
69 struct list_head
*pos
;
70 struct fib_rule
*rule
;
72 if (!list_empty(&ops
->rules_list
)) {
73 pos
= ops
->rules_list
.next
;
74 if (pos
->next
!= &ops
->rules_list
) {
75 rule
= list_entry(pos
->next
, struct fib_rule
, list
);
77 return rule
->pref
- 1;
84 static void notify_rule_change(int event
, struct fib_rule
*rule
,
85 struct fib_rules_ops
*ops
, struct nlmsghdr
*nlh
,
88 static struct fib_rules_ops
*lookup_rules_ops(struct net
*net
, int family
)
90 struct fib_rules_ops
*ops
;
93 list_for_each_entry_rcu(ops
, &net
->rules_ops
, list
) {
94 if (ops
->family
== family
) {
95 if (!try_module_get(ops
->owner
))
106 static void rules_ops_put(struct fib_rules_ops
*ops
)
109 module_put(ops
->owner
);
112 static void flush_route_cache(struct fib_rules_ops
*ops
)
114 if (ops
->flush_cache
)
115 ops
->flush_cache(ops
);
118 static int __fib_rules_register(struct fib_rules_ops
*ops
)
121 struct fib_rules_ops
*o
;
126 if (ops
->rule_size
< sizeof(struct fib_rule
))
129 if (ops
->match
== NULL
|| ops
->configure
== NULL
||
130 ops
->compare
== NULL
|| ops
->fill
== NULL
||
134 spin_lock(&net
->rules_mod_lock
);
135 list_for_each_entry(o
, &net
->rules_ops
, list
)
136 if (ops
->family
== o
->family
)
139 list_add_tail_rcu(&ops
->list
, &net
->rules_ops
);
142 spin_unlock(&net
->rules_mod_lock
);
147 struct fib_rules_ops
*
148 fib_rules_register(const struct fib_rules_ops
*tmpl
, struct net
*net
)
150 struct fib_rules_ops
*ops
;
153 ops
= kmemdup(tmpl
, sizeof(*ops
), GFP_KERNEL
);
155 return ERR_PTR(-ENOMEM
);
157 INIT_LIST_HEAD(&ops
->rules_list
);
160 err
= __fib_rules_register(ops
);
168 EXPORT_SYMBOL_GPL(fib_rules_register
);
170 static void fib_rules_cleanup_ops(struct fib_rules_ops
*ops
)
172 struct fib_rule
*rule
, *tmp
;
174 list_for_each_entry_safe(rule
, tmp
, &ops
->rules_list
, list
) {
175 list_del_rcu(&rule
->list
);
182 void fib_rules_unregister(struct fib_rules_ops
*ops
)
184 struct net
*net
= ops
->fro_net
;
186 spin_lock(&net
->rules_mod_lock
);
187 list_del_rcu(&ops
->list
);
188 spin_unlock(&net
->rules_mod_lock
);
190 fib_rules_cleanup_ops(ops
);
193 EXPORT_SYMBOL_GPL(fib_rules_unregister
);
195 static int uid_range_set(struct fib_kuid_range
*range
)
197 return uid_valid(range
->start
) && uid_valid(range
->end
);
200 static struct fib_kuid_range
nla_get_kuid_range(struct nlattr
**tb
)
202 struct fib_rule_uid_range
*in
;
203 struct fib_kuid_range out
;
205 in
= (struct fib_rule_uid_range
*)nla_data(tb
[FRA_UID_RANGE
]);
207 out
.start
= make_kuid(current_user_ns(), in
->start
);
208 out
.end
= make_kuid(current_user_ns(), in
->end
);
213 static int nla_put_uid_range(struct sk_buff
*skb
, struct fib_kuid_range
*range
)
215 struct fib_rule_uid_range out
= {
216 from_kuid_munged(current_user_ns(), range
->start
),
217 from_kuid_munged(current_user_ns(), range
->end
)
220 return nla_put(skb
, FRA_UID_RANGE
, sizeof(out
), &out
);
223 static int fib_rule_match(struct fib_rule
*rule
, struct fib_rules_ops
*ops
,
224 struct flowi
*fl
, int flags
,
225 struct fib_lookup_arg
*arg
)
229 if (rule
->iifindex
&& (rule
->iifindex
!= fl
->flowi_iif
))
232 if (rule
->oifindex
&& (rule
->oifindex
!= fl
->flowi_oif
))
235 if ((rule
->mark
^ fl
->flowi_mark
) & rule
->mark_mask
)
238 if (rule
->tun_id
&& (rule
->tun_id
!= fl
->flowi_tun_key
.tun_id
))
241 if (rule
->l3mdev
&& !l3mdev_fib_rule_match(rule
->fr_net
, fl
, arg
))
244 if (uid_lt(fl
->flowi_uid
, rule
->uid_range
.start
) ||
245 uid_gt(fl
->flowi_uid
, rule
->uid_range
.end
))
248 ret
= ops
->match(rule
, fl
, flags
);
250 return (rule
->flags
& FIB_RULE_INVERT
) ? !ret
: ret
;
253 int fib_rules_lookup(struct fib_rules_ops
*ops
, struct flowi
*fl
,
254 int flags
, struct fib_lookup_arg
*arg
)
256 struct fib_rule
*rule
;
261 list_for_each_entry_rcu(rule
, &ops
->rules_list
, list
) {
263 if (!fib_rule_match(rule
, ops
, fl
, flags
, arg
))
266 if (rule
->action
== FR_ACT_GOTO
) {
267 struct fib_rule
*target
;
269 target
= rcu_dereference(rule
->ctarget
);
270 if (target
== NULL
) {
276 } else if (rule
->action
== FR_ACT_NOP
)
279 err
= ops
->action(rule
, fl
, flags
, arg
);
281 if (!err
&& ops
->suppress
&& ops
->suppress(rule
, arg
))
284 if (err
!= -EAGAIN
) {
285 if ((arg
->flags
& FIB_LOOKUP_NOREF
) ||
286 likely(refcount_inc_not_zero(&rule
->refcnt
))) {
300 EXPORT_SYMBOL_GPL(fib_rules_lookup
);
302 static int call_fib_rule_notifier(struct notifier_block
*nb
, struct net
*net
,
303 enum fib_event_type event_type
,
304 struct fib_rule
*rule
, int family
)
306 struct fib_rule_notifier_info info
= {
307 .info
.family
= family
,
311 return call_fib_notifier(nb
, net
, event_type
, &info
.info
);
314 static int call_fib_rule_notifiers(struct net
*net
,
315 enum fib_event_type event_type
,
316 struct fib_rule
*rule
,
317 struct fib_rules_ops
*ops
,
318 struct netlink_ext_ack
*extack
)
320 struct fib_rule_notifier_info info
= {
321 .info
.family
= ops
->family
,
322 .info
.extack
= extack
,
326 ops
->fib_rules_seq
++;
327 return call_fib_notifiers(net
, event_type
, &info
.info
);
330 /* Called with rcu_read_lock() */
331 int fib_rules_dump(struct net
*net
, struct notifier_block
*nb
, int family
)
333 struct fib_rules_ops
*ops
;
334 struct fib_rule
*rule
;
336 ops
= lookup_rules_ops(net
, family
);
338 return -EAFNOSUPPORT
;
339 list_for_each_entry_rcu(rule
, &ops
->rules_list
, list
)
340 call_fib_rule_notifier(nb
, net
, FIB_EVENT_RULE_ADD
, rule
,
346 EXPORT_SYMBOL_GPL(fib_rules_dump
);
348 unsigned int fib_rules_seq_read(struct net
*net
, int family
)
350 unsigned int fib_rules_seq
;
351 struct fib_rules_ops
*ops
;
355 ops
= lookup_rules_ops(net
, family
);
358 fib_rules_seq
= ops
->fib_rules_seq
;
361 return fib_rules_seq
;
363 EXPORT_SYMBOL_GPL(fib_rules_seq_read
);
365 static int validate_rulemsg(struct fib_rule_hdr
*frh
, struct nlattr
**tb
,
366 struct fib_rules_ops
*ops
)
371 if (tb
[FRA_SRC
] == NULL
||
372 frh
->src_len
> (ops
->addr_size
* 8) ||
373 nla_len(tb
[FRA_SRC
]) != ops
->addr_size
)
377 if (tb
[FRA_DST
] == NULL
||
378 frh
->dst_len
> (ops
->addr_size
* 8) ||
379 nla_len(tb
[FRA_DST
]) != ops
->addr_size
)
387 static int rule_exists(struct fib_rules_ops
*ops
, struct fib_rule_hdr
*frh
,
388 struct nlattr
**tb
, struct fib_rule
*rule
)
392 list_for_each_entry(r
, &ops
->rules_list
, list
) {
393 if (r
->action
!= rule
->action
)
396 if (r
->table
!= rule
->table
)
399 if (r
->pref
!= rule
->pref
)
402 if (memcmp(r
->iifname
, rule
->iifname
, IFNAMSIZ
))
405 if (memcmp(r
->oifname
, rule
->oifname
, IFNAMSIZ
))
408 if (r
->mark
!= rule
->mark
)
411 if (r
->mark_mask
!= rule
->mark_mask
)
414 if (r
->tun_id
!= rule
->tun_id
)
417 if (r
->fr_net
!= rule
->fr_net
)
420 if (r
->l3mdev
!= rule
->l3mdev
)
423 if (!uid_eq(r
->uid_range
.start
, rule
->uid_range
.start
) ||
424 !uid_eq(r
->uid_range
.end
, rule
->uid_range
.end
))
427 if (!ops
->compare(r
, frh
, tb
))
434 int fib_nl_newrule(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
435 struct netlink_ext_ack
*extack
)
437 struct net
*net
= sock_net(skb
->sk
);
438 struct fib_rule_hdr
*frh
= nlmsg_data(nlh
);
439 struct fib_rules_ops
*ops
= NULL
;
440 struct fib_rule
*rule
, *r
, *last
= NULL
;
441 struct nlattr
*tb
[FRA_MAX
+1];
442 int err
= -EINVAL
, unresolved
= 0;
444 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*frh
)))
447 ops
= lookup_rules_ops(net
, frh
->family
);
453 err
= nlmsg_parse(nlh
, sizeof(*frh
), tb
, FRA_MAX
, ops
->policy
, extack
);
457 err
= validate_rulemsg(frh
, tb
, ops
);
461 rule
= kzalloc(ops
->rule_size
, GFP_KERNEL
);
466 refcount_set(&rule
->refcnt
, 1);
469 rule
->pref
= tb
[FRA_PRIORITY
] ? nla_get_u32(tb
[FRA_PRIORITY
])
470 : fib_default_rule_pref(ops
);
472 if (tb
[FRA_IIFNAME
]) {
473 struct net_device
*dev
;
476 nla_strlcpy(rule
->iifname
, tb
[FRA_IIFNAME
], IFNAMSIZ
);
477 dev
= __dev_get_by_name(net
, rule
->iifname
);
479 rule
->iifindex
= dev
->ifindex
;
482 if (tb
[FRA_OIFNAME
]) {
483 struct net_device
*dev
;
486 nla_strlcpy(rule
->oifname
, tb
[FRA_OIFNAME
], IFNAMSIZ
);
487 dev
= __dev_get_by_name(net
, rule
->oifname
);
489 rule
->oifindex
= dev
->ifindex
;
492 if (tb
[FRA_FWMARK
]) {
493 rule
->mark
= nla_get_u32(tb
[FRA_FWMARK
]);
495 /* compatibility: if the mark value is non-zero all bits
496 * are compared unless a mask is explicitly specified.
498 rule
->mark_mask
= 0xFFFFFFFF;
502 rule
->mark_mask
= nla_get_u32(tb
[FRA_FWMASK
]);
505 rule
->tun_id
= nla_get_be64(tb
[FRA_TUN_ID
]);
508 if (tb
[FRA_L3MDEV
]) {
509 #ifdef CONFIG_NET_L3_MASTER_DEV
510 rule
->l3mdev
= nla_get_u8(tb
[FRA_L3MDEV
]);
511 if (rule
->l3mdev
!= 1)
516 rule
->action
= frh
->action
;
517 rule
->flags
= frh
->flags
;
518 rule
->table
= frh_get_table(frh
, tb
);
519 if (tb
[FRA_SUPPRESS_PREFIXLEN
])
520 rule
->suppress_prefixlen
= nla_get_u32(tb
[FRA_SUPPRESS_PREFIXLEN
]);
522 rule
->suppress_prefixlen
= -1;
524 if (tb
[FRA_SUPPRESS_IFGROUP
])
525 rule
->suppress_ifgroup
= nla_get_u32(tb
[FRA_SUPPRESS_IFGROUP
]);
527 rule
->suppress_ifgroup
= -1;
530 if (rule
->action
!= FR_ACT_GOTO
)
533 rule
->target
= nla_get_u32(tb
[FRA_GOTO
]);
534 /* Backward jumps are prohibited to avoid endless loops */
535 if (rule
->target
<= rule
->pref
)
538 list_for_each_entry(r
, &ops
->rules_list
, list
) {
539 if (r
->pref
== rule
->target
) {
540 RCU_INIT_POINTER(rule
->ctarget
, r
);
545 if (rcu_dereference_protected(rule
->ctarget
, 1) == NULL
)
547 } else if (rule
->action
== FR_ACT_GOTO
)
550 if (rule
->l3mdev
&& rule
->table
)
553 if (tb
[FRA_UID_RANGE
]) {
554 if (current_user_ns() != net
->user_ns
) {
559 rule
->uid_range
= nla_get_kuid_range(tb
);
561 if (!uid_range_set(&rule
->uid_range
) ||
562 !uid_lte(rule
->uid_range
.start
, rule
->uid_range
.end
))
565 rule
->uid_range
= fib_kuid_range_unset
;
568 if ((nlh
->nlmsg_flags
& NLM_F_EXCL
) &&
569 rule_exists(ops
, frh
, tb
, rule
)) {
574 err
= ops
->configure(rule
, skb
, frh
, tb
);
578 list_for_each_entry(r
, &ops
->rules_list
, list
) {
579 if (r
->pref
> rule
->pref
)
585 list_add_rcu(&rule
->list
, &last
->list
);
587 list_add_rcu(&rule
->list
, &ops
->rules_list
);
589 if (ops
->unresolved_rules
) {
591 * There are unresolved goto rules in the list, check if
592 * any of them are pointing to this new rule.
594 list_for_each_entry(r
, &ops
->rules_list
, list
) {
595 if (r
->action
== FR_ACT_GOTO
&&
596 r
->target
== rule
->pref
&&
597 rtnl_dereference(r
->ctarget
) == NULL
) {
598 rcu_assign_pointer(r
->ctarget
, rule
);
599 if (--ops
->unresolved_rules
== 0)
605 if (rule
->action
== FR_ACT_GOTO
)
606 ops
->nr_goto_rules
++;
609 ops
->unresolved_rules
++;
612 ip_tunnel_need_metadata();
614 call_fib_rule_notifiers(net
, FIB_EVENT_RULE_ADD
, rule
, ops
, extack
);
615 notify_rule_change(RTM_NEWRULE
, rule
, ops
, nlh
, NETLINK_CB(skb
).portid
);
616 flush_route_cache(ops
);
626 EXPORT_SYMBOL_GPL(fib_nl_newrule
);
628 int fib_nl_delrule(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
629 struct netlink_ext_ack
*extack
)
631 struct net
*net
= sock_net(skb
->sk
);
632 struct fib_rule_hdr
*frh
= nlmsg_data(nlh
);
633 struct fib_rules_ops
*ops
= NULL
;
634 struct fib_rule
*rule
, *r
;
635 struct nlattr
*tb
[FRA_MAX
+1];
636 struct fib_kuid_range range
;
639 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*frh
)))
642 ops
= lookup_rules_ops(net
, frh
->family
);
648 err
= nlmsg_parse(nlh
, sizeof(*frh
), tb
, FRA_MAX
, ops
->policy
, extack
);
652 err
= validate_rulemsg(frh
, tb
, ops
);
656 if (tb
[FRA_UID_RANGE
]) {
657 range
= nla_get_kuid_range(tb
);
658 if (!uid_range_set(&range
)) {
663 range
= fib_kuid_range_unset
;
666 list_for_each_entry(rule
, &ops
->rules_list
, list
) {
667 if (frh
->action
&& (frh
->action
!= rule
->action
))
670 if (frh_get_table(frh
, tb
) &&
671 (frh_get_table(frh
, tb
) != rule
->table
))
674 if (tb
[FRA_PRIORITY
] &&
675 (rule
->pref
!= nla_get_u32(tb
[FRA_PRIORITY
])))
678 if (tb
[FRA_IIFNAME
] &&
679 nla_strcmp(tb
[FRA_IIFNAME
], rule
->iifname
))
682 if (tb
[FRA_OIFNAME
] &&
683 nla_strcmp(tb
[FRA_OIFNAME
], rule
->oifname
))
686 if (tb
[FRA_FWMARK
] &&
687 (rule
->mark
!= nla_get_u32(tb
[FRA_FWMARK
])))
690 if (tb
[FRA_FWMASK
] &&
691 (rule
->mark_mask
!= nla_get_u32(tb
[FRA_FWMASK
])))
694 if (tb
[FRA_TUN_ID
] &&
695 (rule
->tun_id
!= nla_get_be64(tb
[FRA_TUN_ID
])))
698 if (tb
[FRA_L3MDEV
] &&
699 (rule
->l3mdev
!= nla_get_u8(tb
[FRA_L3MDEV
])))
702 if (uid_range_set(&range
) &&
703 (!uid_eq(rule
->uid_range
.start
, range
.start
) ||
704 !uid_eq(rule
->uid_range
.end
, range
.end
)))
707 if (!ops
->compare(rule
, frh
, tb
))
710 if (rule
->flags
& FIB_RULE_PERMANENT
) {
716 err
= ops
->delete(rule
);
722 ip_tunnel_unneed_metadata();
724 list_del_rcu(&rule
->list
);
726 if (rule
->action
== FR_ACT_GOTO
) {
727 ops
->nr_goto_rules
--;
728 if (rtnl_dereference(rule
->ctarget
) == NULL
)
729 ops
->unresolved_rules
--;
733 * Check if this rule is a target to any of them. If so,
734 * adjust to the next one with the same preference or
735 * disable them. As this operation is eventually very
736 * expensive, it is only performed if goto rules, except
737 * current if it is goto rule, have actually been added.
739 if (ops
->nr_goto_rules
> 0) {
742 n
= list_next_entry(rule
, list
);
743 if (&n
->list
== &ops
->rules_list
|| n
->pref
!= rule
->pref
)
745 list_for_each_entry(r
, &ops
->rules_list
, list
) {
746 if (rtnl_dereference(r
->ctarget
) != rule
)
748 rcu_assign_pointer(r
->ctarget
, n
);
750 ops
->unresolved_rules
++;
754 call_fib_rule_notifiers(net
, FIB_EVENT_RULE_DEL
, rule
, ops
,
756 notify_rule_change(RTM_DELRULE
, rule
, ops
, nlh
,
757 NETLINK_CB(skb
).portid
);
759 flush_route_cache(ops
);
769 EXPORT_SYMBOL_GPL(fib_nl_delrule
);
771 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops
*ops
,
772 struct fib_rule
*rule
)
774 size_t payload
= NLMSG_ALIGN(sizeof(struct fib_rule_hdr
))
775 + nla_total_size(IFNAMSIZ
) /* FRA_IIFNAME */
776 + nla_total_size(IFNAMSIZ
) /* FRA_OIFNAME */
777 + nla_total_size(4) /* FRA_PRIORITY */
778 + nla_total_size(4) /* FRA_TABLE */
779 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
780 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
781 + nla_total_size(4) /* FRA_FWMARK */
782 + nla_total_size(4) /* FRA_FWMASK */
783 + nla_total_size_64bit(8) /* FRA_TUN_ID */
784 + nla_total_size(sizeof(struct fib_kuid_range
));
786 if (ops
->nlmsg_payload
)
787 payload
+= ops
->nlmsg_payload(rule
);
792 static int fib_nl_fill_rule(struct sk_buff
*skb
, struct fib_rule
*rule
,
793 u32 pid
, u32 seq
, int type
, int flags
,
794 struct fib_rules_ops
*ops
)
796 struct nlmsghdr
*nlh
;
797 struct fib_rule_hdr
*frh
;
799 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*frh
), flags
);
803 frh
= nlmsg_data(nlh
);
804 frh
->family
= ops
->family
;
805 frh
->table
= rule
->table
;
806 if (nla_put_u32(skb
, FRA_TABLE
, rule
->table
))
807 goto nla_put_failure
;
808 if (nla_put_u32(skb
, FRA_SUPPRESS_PREFIXLEN
, rule
->suppress_prefixlen
))
809 goto nla_put_failure
;
812 frh
->action
= rule
->action
;
813 frh
->flags
= rule
->flags
;
815 if (rule
->action
== FR_ACT_GOTO
&&
816 rcu_access_pointer(rule
->ctarget
) == NULL
)
817 frh
->flags
|= FIB_RULE_UNRESOLVED
;
819 if (rule
->iifname
[0]) {
820 if (nla_put_string(skb
, FRA_IIFNAME
, rule
->iifname
))
821 goto nla_put_failure
;
822 if (rule
->iifindex
== -1)
823 frh
->flags
|= FIB_RULE_IIF_DETACHED
;
826 if (rule
->oifname
[0]) {
827 if (nla_put_string(skb
, FRA_OIFNAME
, rule
->oifname
))
828 goto nla_put_failure
;
829 if (rule
->oifindex
== -1)
830 frh
->flags
|= FIB_RULE_OIF_DETACHED
;
834 nla_put_u32(skb
, FRA_PRIORITY
, rule
->pref
)) ||
836 nla_put_u32(skb
, FRA_FWMARK
, rule
->mark
)) ||
837 ((rule
->mark_mask
|| rule
->mark
) &&
838 nla_put_u32(skb
, FRA_FWMASK
, rule
->mark_mask
)) ||
840 nla_put_u32(skb
, FRA_GOTO
, rule
->target
)) ||
842 nla_put_be64(skb
, FRA_TUN_ID
, rule
->tun_id
, FRA_PAD
)) ||
844 nla_put_u8(skb
, FRA_L3MDEV
, rule
->l3mdev
)) ||
845 (uid_range_set(&rule
->uid_range
) &&
846 nla_put_uid_range(skb
, &rule
->uid_range
)))
847 goto nla_put_failure
;
849 if (rule
->suppress_ifgroup
!= -1) {
850 if (nla_put_u32(skb
, FRA_SUPPRESS_IFGROUP
, rule
->suppress_ifgroup
))
851 goto nla_put_failure
;
854 if (ops
->fill(rule
, skb
, frh
) < 0)
855 goto nla_put_failure
;
861 nlmsg_cancel(skb
, nlh
);
865 static int dump_rules(struct sk_buff
*skb
, struct netlink_callback
*cb
,
866 struct fib_rules_ops
*ops
)
869 struct fib_rule
*rule
;
873 list_for_each_entry_rcu(rule
, &ops
->rules_list
, list
) {
874 if (idx
< cb
->args
[1])
877 err
= fib_nl_fill_rule(skb
, rule
, NETLINK_CB(cb
->skb
).portid
,
878 cb
->nlh
->nlmsg_seq
, RTM_NEWRULE
,
892 static int fib_nl_dumprule(struct sk_buff
*skb
, struct netlink_callback
*cb
)
894 struct net
*net
= sock_net(skb
->sk
);
895 struct fib_rules_ops
*ops
;
898 family
= rtnl_msg_family(cb
->nlh
);
899 if (family
!= AF_UNSPEC
) {
900 /* Protocol specific dump request */
901 ops
= lookup_rules_ops(net
, family
);
903 return -EAFNOSUPPORT
;
905 dump_rules(skb
, cb
, ops
);
911 list_for_each_entry_rcu(ops
, &net
->rules_ops
, list
) {
912 if (idx
< cb
->args
[0] || !try_module_get(ops
->owner
))
915 if (dump_rules(skb
, cb
, ops
) < 0)
928 static void notify_rule_change(int event
, struct fib_rule
*rule
,
929 struct fib_rules_ops
*ops
, struct nlmsghdr
*nlh
,
937 skb
= nlmsg_new(fib_rule_nlmsg_size(ops
, rule
), GFP_KERNEL
);
941 err
= fib_nl_fill_rule(skb
, rule
, pid
, nlh
->nlmsg_seq
, event
, 0, ops
);
943 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
944 WARN_ON(err
== -EMSGSIZE
);
949 rtnl_notify(skb
, net
, pid
, ops
->nlgroup
, nlh
, GFP_KERNEL
);
953 rtnl_set_sk_err(net
, ops
->nlgroup
, err
);
956 static void attach_rules(struct list_head
*rules
, struct net_device
*dev
)
958 struct fib_rule
*rule
;
960 list_for_each_entry(rule
, rules
, list
) {
961 if (rule
->iifindex
== -1 &&
962 strcmp(dev
->name
, rule
->iifname
) == 0)
963 rule
->iifindex
= dev
->ifindex
;
964 if (rule
->oifindex
== -1 &&
965 strcmp(dev
->name
, rule
->oifname
) == 0)
966 rule
->oifindex
= dev
->ifindex
;
970 static void detach_rules(struct list_head
*rules
, struct net_device
*dev
)
972 struct fib_rule
*rule
;
974 list_for_each_entry(rule
, rules
, list
) {
975 if (rule
->iifindex
== dev
->ifindex
)
977 if (rule
->oifindex
== dev
->ifindex
)
983 static int fib_rules_event(struct notifier_block
*this, unsigned long event
,
986 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
987 struct net
*net
= dev_net(dev
);
988 struct fib_rules_ops
*ops
;
993 case NETDEV_REGISTER
:
994 list_for_each_entry(ops
, &net
->rules_ops
, list
)
995 attach_rules(&ops
->rules_list
, dev
);
998 case NETDEV_CHANGENAME
:
999 list_for_each_entry(ops
, &net
->rules_ops
, list
) {
1000 detach_rules(&ops
->rules_list
, dev
);
1001 attach_rules(&ops
->rules_list
, dev
);
1005 case NETDEV_UNREGISTER
:
1006 list_for_each_entry(ops
, &net
->rules_ops
, list
)
1007 detach_rules(&ops
->rules_list
, dev
);
1014 static struct notifier_block fib_rules_notifier
= {
1015 .notifier_call
= fib_rules_event
,
1018 static int __net_init
fib_rules_net_init(struct net
*net
)
1020 INIT_LIST_HEAD(&net
->rules_ops
);
1021 spin_lock_init(&net
->rules_mod_lock
);
1025 static void __net_exit
fib_rules_net_exit(struct net
*net
)
1027 WARN_ON_ONCE(!list_empty(&net
->rules_ops
));
1030 static struct pernet_operations fib_rules_net_ops
= {
1031 .init
= fib_rules_net_init
,
1032 .exit
= fib_rules_net_exit
,
1035 static int __init
fib_rules_init(void)
1038 rtnl_register(PF_UNSPEC
, RTM_NEWRULE
, fib_nl_newrule
, NULL
, 0);
1039 rtnl_register(PF_UNSPEC
, RTM_DELRULE
, fib_nl_delrule
, NULL
, 0);
1040 rtnl_register(PF_UNSPEC
, RTM_GETRULE
, NULL
, fib_nl_dumprule
, 0);
1042 err
= register_pernet_subsys(&fib_rules_net_ops
);
1046 err
= register_netdevice_notifier(&fib_rules_notifier
);
1048 goto fail_unregister
;
1053 unregister_pernet_subsys(&fib_rules_net_ops
);
1055 rtnl_unregister(PF_UNSPEC
, RTM_NEWRULE
);
1056 rtnl_unregister(PF_UNSPEC
, RTM_DELRULE
);
1057 rtnl_unregister(PF_UNSPEC
, RTM_GETRULE
);
1061 subsys_initcall(fib_rules_init
);