2 * net/core/fib_rules.c Generic Routing Rules
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
8 * Authors: Thomas Graf <tgraf@suug.ch>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <net/net_namespace.h>
18 #include <net/fib_rules.h>
20 int fib_default_rule_add(struct fib_rules_ops
*ops
,
21 u32 pref
, u32 table
, u32 flags
)
25 r
= kzalloc(ops
->rule_size
, GFP_KERNEL
);
29 atomic_set(&r
->refcnt
, 1);
30 r
->action
= FR_ACT_TO_TBL
;
34 r
->fr_net
= hold_net(ops
->fro_net
);
36 r
->suppress_prefixlen
= -1;
37 r
->suppress_ifgroup
= -1;
39 /* The lock is not required here, the list in unreacheable
40 * at the moment this function is called */
41 list_add_tail(&r
->list
, &ops
->rules_list
);
44 EXPORT_SYMBOL(fib_default_rule_add
);
46 u32
fib_default_rule_pref(struct fib_rules_ops
*ops
)
48 struct list_head
*pos
;
49 struct fib_rule
*rule
;
51 if (!list_empty(&ops
->rules_list
)) {
52 pos
= ops
->rules_list
.next
;
53 if (pos
->next
!= &ops
->rules_list
) {
54 rule
= list_entry(pos
->next
, struct fib_rule
, list
);
56 return rule
->pref
- 1;
62 EXPORT_SYMBOL(fib_default_rule_pref
);
64 static void notify_rule_change(int event
, struct fib_rule
*rule
,
65 struct fib_rules_ops
*ops
, struct nlmsghdr
*nlh
,
68 static struct fib_rules_ops
*lookup_rules_ops(struct net
*net
, int family
)
70 struct fib_rules_ops
*ops
;
73 list_for_each_entry_rcu(ops
, &net
->rules_ops
, list
) {
74 if (ops
->family
== family
) {
75 if (!try_module_get(ops
->owner
))
86 static void rules_ops_put(struct fib_rules_ops
*ops
)
89 module_put(ops
->owner
);
92 static void flush_route_cache(struct fib_rules_ops
*ops
)
95 ops
->flush_cache(ops
);
98 static int __fib_rules_register(struct fib_rules_ops
*ops
)
101 struct fib_rules_ops
*o
;
106 if (ops
->rule_size
< sizeof(struct fib_rule
))
109 if (ops
->match
== NULL
|| ops
->configure
== NULL
||
110 ops
->compare
== NULL
|| ops
->fill
== NULL
||
114 spin_lock(&net
->rules_mod_lock
);
115 list_for_each_entry(o
, &net
->rules_ops
, list
)
116 if (ops
->family
== o
->family
)
120 list_add_tail_rcu(&ops
->list
, &net
->rules_ops
);
123 spin_unlock(&net
->rules_mod_lock
);
128 struct fib_rules_ops
*
129 fib_rules_register(const struct fib_rules_ops
*tmpl
, struct net
*net
)
131 struct fib_rules_ops
*ops
;
134 ops
= kmemdup(tmpl
, sizeof(*ops
), GFP_KERNEL
);
136 return ERR_PTR(-ENOMEM
);
138 INIT_LIST_HEAD(&ops
->rules_list
);
141 err
= __fib_rules_register(ops
);
149 EXPORT_SYMBOL_GPL(fib_rules_register
);
151 static void fib_rules_cleanup_ops(struct fib_rules_ops
*ops
)
153 struct fib_rule
*rule
, *tmp
;
155 list_for_each_entry_safe(rule
, tmp
, &ops
->rules_list
, list
) {
156 list_del_rcu(&rule
->list
);
163 static void fib_rules_put_rcu(struct rcu_head
*head
)
165 struct fib_rules_ops
*ops
= container_of(head
, struct fib_rules_ops
, rcu
);
166 struct net
*net
= ops
->fro_net
;
172 void fib_rules_unregister(struct fib_rules_ops
*ops
)
174 struct net
*net
= ops
->fro_net
;
176 spin_lock(&net
->rules_mod_lock
);
177 list_del_rcu(&ops
->list
);
178 fib_rules_cleanup_ops(ops
);
179 spin_unlock(&net
->rules_mod_lock
);
181 call_rcu(&ops
->rcu
, fib_rules_put_rcu
);
183 EXPORT_SYMBOL_GPL(fib_rules_unregister
);
185 static int fib_rule_match(struct fib_rule
*rule
, struct fib_rules_ops
*ops
,
186 struct flowi
*fl
, int flags
)
190 if (rule
->iifindex
&& (rule
->iifindex
!= fl
->flowi_iif
))
193 if (rule
->oifindex
&& (rule
->oifindex
!= fl
->flowi_oif
))
196 if ((rule
->mark
^ fl
->flowi_mark
) & rule
->mark_mask
)
199 ret
= ops
->match(rule
, fl
, flags
);
201 return (rule
->flags
& FIB_RULE_INVERT
) ? !ret
: ret
;
204 int fib_rules_lookup(struct fib_rules_ops
*ops
, struct flowi
*fl
,
205 int flags
, struct fib_lookup_arg
*arg
)
207 struct fib_rule
*rule
;
212 list_for_each_entry_rcu(rule
, &ops
->rules_list
, list
) {
214 if (!fib_rule_match(rule
, ops
, fl
, flags
))
217 if (rule
->action
== FR_ACT_GOTO
) {
218 struct fib_rule
*target
;
220 target
= rcu_dereference(rule
->ctarget
);
221 if (target
== NULL
) {
227 } else if (rule
->action
== FR_ACT_NOP
)
230 err
= ops
->action(rule
, fl
, flags
, arg
);
232 if (!err
&& ops
->suppress
&& ops
->suppress(rule
, arg
))
235 if (err
!= -EAGAIN
) {
236 if ((arg
->flags
& FIB_LOOKUP_NOREF
) ||
237 likely(atomic_inc_not_zero(&rule
->refcnt
))) {
251 EXPORT_SYMBOL_GPL(fib_rules_lookup
);
253 static int validate_rulemsg(struct fib_rule_hdr
*frh
, struct nlattr
**tb
,
254 struct fib_rules_ops
*ops
)
259 if (tb
[FRA_SRC
] == NULL
||
260 frh
->src_len
> (ops
->addr_size
* 8) ||
261 nla_len(tb
[FRA_SRC
]) != ops
->addr_size
)
265 if (tb
[FRA_DST
] == NULL
||
266 frh
->dst_len
> (ops
->addr_size
* 8) ||
267 nla_len(tb
[FRA_DST
]) != ops
->addr_size
)
275 static int fib_nl_newrule(struct sk_buff
*skb
, struct nlmsghdr
* nlh
)
277 struct net
*net
= sock_net(skb
->sk
);
278 struct fib_rule_hdr
*frh
= nlmsg_data(nlh
);
279 struct fib_rules_ops
*ops
= NULL
;
280 struct fib_rule
*rule
, *r
, *last
= NULL
;
281 struct nlattr
*tb
[FRA_MAX
+1];
282 int err
= -EINVAL
, unresolved
= 0;
284 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*frh
)))
287 ops
= lookup_rules_ops(net
, frh
->family
);
293 err
= nlmsg_parse(nlh
, sizeof(*frh
), tb
, FRA_MAX
, ops
->policy
);
297 err
= validate_rulemsg(frh
, tb
, ops
);
301 rule
= kzalloc(ops
->rule_size
, GFP_KERNEL
);
306 rule
->fr_net
= hold_net(net
);
308 if (tb
[FRA_PRIORITY
])
309 rule
->pref
= nla_get_u32(tb
[FRA_PRIORITY
]);
311 if (tb
[FRA_IIFNAME
]) {
312 struct net_device
*dev
;
315 nla_strlcpy(rule
->iifname
, tb
[FRA_IIFNAME
], IFNAMSIZ
);
316 dev
= __dev_get_by_name(net
, rule
->iifname
);
318 rule
->iifindex
= dev
->ifindex
;
321 if (tb
[FRA_OIFNAME
]) {
322 struct net_device
*dev
;
325 nla_strlcpy(rule
->oifname
, tb
[FRA_OIFNAME
], IFNAMSIZ
);
326 dev
= __dev_get_by_name(net
, rule
->oifname
);
328 rule
->oifindex
= dev
->ifindex
;
331 if (tb
[FRA_FWMARK
]) {
332 rule
->mark
= nla_get_u32(tb
[FRA_FWMARK
]);
334 /* compatibility: if the mark value is non-zero all bits
335 * are compared unless a mask is explicitly specified.
337 rule
->mark_mask
= 0xFFFFFFFF;
341 rule
->mark_mask
= nla_get_u32(tb
[FRA_FWMASK
]);
343 rule
->action
= frh
->action
;
344 rule
->flags
= frh
->flags
;
345 rule
->table
= frh_get_table(frh
, tb
);
346 if (tb
[FRA_SUPPRESS_PREFIXLEN
])
347 rule
->suppress_prefixlen
= nla_get_u32(tb
[FRA_SUPPRESS_PREFIXLEN
]);
349 rule
->suppress_prefixlen
= -1;
351 if (tb
[FRA_SUPPRESS_IFGROUP
])
352 rule
->suppress_ifgroup
= nla_get_u32(tb
[FRA_SUPPRESS_IFGROUP
]);
354 rule
->suppress_ifgroup
= -1;
356 if (!tb
[FRA_PRIORITY
] && ops
->default_pref
)
357 rule
->pref
= ops
->default_pref(ops
);
361 if (rule
->action
!= FR_ACT_GOTO
)
364 rule
->target
= nla_get_u32(tb
[FRA_GOTO
]);
365 /* Backward jumps are prohibited to avoid endless loops */
366 if (rule
->target
<= rule
->pref
)
369 list_for_each_entry(r
, &ops
->rules_list
, list
) {
370 if (r
->pref
== rule
->target
) {
371 RCU_INIT_POINTER(rule
->ctarget
, r
);
376 if (rcu_dereference_protected(rule
->ctarget
, 1) == NULL
)
378 } else if (rule
->action
== FR_ACT_GOTO
)
381 err
= ops
->configure(rule
, skb
, frh
, tb
);
385 list_for_each_entry(r
, &ops
->rules_list
, list
) {
386 if (r
->pref
> rule
->pref
)
394 list_add_rcu(&rule
->list
, &last
->list
);
396 list_add_rcu(&rule
->list
, &ops
->rules_list
);
398 if (ops
->unresolved_rules
) {
400 * There are unresolved goto rules in the list, check if
401 * any of them are pointing to this new rule.
403 list_for_each_entry(r
, &ops
->rules_list
, list
) {
404 if (r
->action
== FR_ACT_GOTO
&&
405 r
->target
== rule
->pref
&&
406 rtnl_dereference(r
->ctarget
) == NULL
) {
407 rcu_assign_pointer(r
->ctarget
, rule
);
408 if (--ops
->unresolved_rules
== 0)
414 if (rule
->action
== FR_ACT_GOTO
)
415 ops
->nr_goto_rules
++;
418 ops
->unresolved_rules
++;
420 notify_rule_change(RTM_NEWRULE
, rule
, ops
, nlh
, NETLINK_CB(skb
).portid
);
421 flush_route_cache(ops
);
426 release_net(rule
->fr_net
);
433 static int fib_nl_delrule(struct sk_buff
*skb
, struct nlmsghdr
* nlh
)
435 struct net
*net
= sock_net(skb
->sk
);
436 struct fib_rule_hdr
*frh
= nlmsg_data(nlh
);
437 struct fib_rules_ops
*ops
= NULL
;
438 struct fib_rule
*rule
, *tmp
;
439 struct nlattr
*tb
[FRA_MAX
+1];
442 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*frh
)))
445 ops
= lookup_rules_ops(net
, frh
->family
);
451 err
= nlmsg_parse(nlh
, sizeof(*frh
), tb
, FRA_MAX
, ops
->policy
);
455 err
= validate_rulemsg(frh
, tb
, ops
);
459 list_for_each_entry(rule
, &ops
->rules_list
, list
) {
460 if (frh
->action
&& (frh
->action
!= rule
->action
))
463 if (frh_get_table(frh
, tb
) &&
464 (frh_get_table(frh
, tb
) != rule
->table
))
467 if (tb
[FRA_PRIORITY
] &&
468 (rule
->pref
!= nla_get_u32(tb
[FRA_PRIORITY
])))
471 if (tb
[FRA_IIFNAME
] &&
472 nla_strcmp(tb
[FRA_IIFNAME
], rule
->iifname
))
475 if (tb
[FRA_OIFNAME
] &&
476 nla_strcmp(tb
[FRA_OIFNAME
], rule
->oifname
))
479 if (tb
[FRA_FWMARK
] &&
480 (rule
->mark
!= nla_get_u32(tb
[FRA_FWMARK
])))
483 if (tb
[FRA_FWMASK
] &&
484 (rule
->mark_mask
!= nla_get_u32(tb
[FRA_FWMASK
])))
487 if (!ops
->compare(rule
, frh
, tb
))
490 if (rule
->flags
& FIB_RULE_PERMANENT
) {
495 list_del_rcu(&rule
->list
);
497 if (rule
->action
== FR_ACT_GOTO
) {
498 ops
->nr_goto_rules
--;
499 if (rtnl_dereference(rule
->ctarget
) == NULL
)
500 ops
->unresolved_rules
--;
504 * Check if this rule is a target to any of them. If so,
505 * disable them. As this operation is eventually very
506 * expensive, it is only performed if goto rules have
507 * actually been added.
509 if (ops
->nr_goto_rules
> 0) {
510 list_for_each_entry(tmp
, &ops
->rules_list
, list
) {
511 if (rtnl_dereference(tmp
->ctarget
) == rule
) {
512 RCU_INIT_POINTER(tmp
->ctarget
, NULL
);
513 ops
->unresolved_rules
++;
518 notify_rule_change(RTM_DELRULE
, rule
, ops
, nlh
,
519 NETLINK_CB(skb
).portid
);
523 flush_route_cache(ops
);
534 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops
*ops
,
535 struct fib_rule
*rule
)
537 size_t payload
= NLMSG_ALIGN(sizeof(struct fib_rule_hdr
))
538 + nla_total_size(IFNAMSIZ
) /* FRA_IIFNAME */
539 + nla_total_size(IFNAMSIZ
) /* FRA_OIFNAME */
540 + nla_total_size(4) /* FRA_PRIORITY */
541 + nla_total_size(4) /* FRA_TABLE */
542 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
543 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
544 + nla_total_size(4) /* FRA_FWMARK */
545 + nla_total_size(4); /* FRA_FWMASK */
547 if (ops
->nlmsg_payload
)
548 payload
+= ops
->nlmsg_payload(rule
);
553 static int fib_nl_fill_rule(struct sk_buff
*skb
, struct fib_rule
*rule
,
554 u32 pid
, u32 seq
, int type
, int flags
,
555 struct fib_rules_ops
*ops
)
557 struct nlmsghdr
*nlh
;
558 struct fib_rule_hdr
*frh
;
560 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*frh
), flags
);
564 frh
= nlmsg_data(nlh
);
565 frh
->family
= ops
->family
;
566 frh
->table
= rule
->table
;
567 if (nla_put_u32(skb
, FRA_TABLE
, rule
->table
))
568 goto nla_put_failure
;
569 if (nla_put_u32(skb
, FRA_SUPPRESS_PREFIXLEN
, rule
->suppress_prefixlen
))
570 goto nla_put_failure
;
573 frh
->action
= rule
->action
;
574 frh
->flags
= rule
->flags
;
576 if (rule
->action
== FR_ACT_GOTO
&&
577 rcu_access_pointer(rule
->ctarget
) == NULL
)
578 frh
->flags
|= FIB_RULE_UNRESOLVED
;
580 if (rule
->iifname
[0]) {
581 if (nla_put_string(skb
, FRA_IIFNAME
, rule
->iifname
))
582 goto nla_put_failure
;
583 if (rule
->iifindex
== -1)
584 frh
->flags
|= FIB_RULE_IIF_DETACHED
;
587 if (rule
->oifname
[0]) {
588 if (nla_put_string(skb
, FRA_OIFNAME
, rule
->oifname
))
589 goto nla_put_failure
;
590 if (rule
->oifindex
== -1)
591 frh
->flags
|= FIB_RULE_OIF_DETACHED
;
595 nla_put_u32(skb
, FRA_PRIORITY
, rule
->pref
)) ||
597 nla_put_u32(skb
, FRA_FWMARK
, rule
->mark
)) ||
598 ((rule
->mark_mask
|| rule
->mark
) &&
599 nla_put_u32(skb
, FRA_FWMASK
, rule
->mark_mask
)) ||
601 nla_put_u32(skb
, FRA_GOTO
, rule
->target
)))
602 goto nla_put_failure
;
604 if (rule
->suppress_ifgroup
!= -1) {
605 if (nla_put_u32(skb
, FRA_SUPPRESS_IFGROUP
, rule
->suppress_ifgroup
))
606 goto nla_put_failure
;
609 if (ops
->fill(rule
, skb
, frh
) < 0)
610 goto nla_put_failure
;
612 return nlmsg_end(skb
, nlh
);
615 nlmsg_cancel(skb
, nlh
);
619 static int dump_rules(struct sk_buff
*skb
, struct netlink_callback
*cb
,
620 struct fib_rules_ops
*ops
)
623 struct fib_rule
*rule
;
626 list_for_each_entry_rcu(rule
, &ops
->rules_list
, list
) {
627 if (idx
< cb
->args
[1])
630 if (fib_nl_fill_rule(skb
, rule
, NETLINK_CB(cb
->skb
).portid
,
631 cb
->nlh
->nlmsg_seq
, RTM_NEWRULE
,
632 NLM_F_MULTI
, ops
) < 0)
644 static int fib_nl_dumprule(struct sk_buff
*skb
, struct netlink_callback
*cb
)
646 struct net
*net
= sock_net(skb
->sk
);
647 struct fib_rules_ops
*ops
;
650 family
= rtnl_msg_family(cb
->nlh
);
651 if (family
!= AF_UNSPEC
) {
652 /* Protocol specific dump request */
653 ops
= lookup_rules_ops(net
, family
);
655 return -EAFNOSUPPORT
;
657 return dump_rules(skb
, cb
, ops
);
661 list_for_each_entry_rcu(ops
, &net
->rules_ops
, list
) {
662 if (idx
< cb
->args
[0] || !try_module_get(ops
->owner
))
665 if (dump_rules(skb
, cb
, ops
) < 0)
678 static void notify_rule_change(int event
, struct fib_rule
*rule
,
679 struct fib_rules_ops
*ops
, struct nlmsghdr
*nlh
,
687 skb
= nlmsg_new(fib_rule_nlmsg_size(ops
, rule
), GFP_KERNEL
);
691 err
= fib_nl_fill_rule(skb
, rule
, pid
, nlh
->nlmsg_seq
, event
, 0, ops
);
693 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
694 WARN_ON(err
== -EMSGSIZE
);
699 rtnl_notify(skb
, net
, pid
, ops
->nlgroup
, nlh
, GFP_KERNEL
);
703 rtnl_set_sk_err(net
, ops
->nlgroup
, err
);
706 static void attach_rules(struct list_head
*rules
, struct net_device
*dev
)
708 struct fib_rule
*rule
;
710 list_for_each_entry(rule
, rules
, list
) {
711 if (rule
->iifindex
== -1 &&
712 strcmp(dev
->name
, rule
->iifname
) == 0)
713 rule
->iifindex
= dev
->ifindex
;
714 if (rule
->oifindex
== -1 &&
715 strcmp(dev
->name
, rule
->oifname
) == 0)
716 rule
->oifindex
= dev
->ifindex
;
720 static void detach_rules(struct list_head
*rules
, struct net_device
*dev
)
722 struct fib_rule
*rule
;
724 list_for_each_entry(rule
, rules
, list
) {
725 if (rule
->iifindex
== dev
->ifindex
)
727 if (rule
->oifindex
== dev
->ifindex
)
733 static int fib_rules_event(struct notifier_block
*this, unsigned long event
,
736 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
737 struct net
*net
= dev_net(dev
);
738 struct fib_rules_ops
*ops
;
743 case NETDEV_REGISTER
:
744 list_for_each_entry(ops
, &net
->rules_ops
, list
)
745 attach_rules(&ops
->rules_list
, dev
);
748 case NETDEV_CHANGENAME
:
749 list_for_each_entry(ops
, &net
->rules_ops
, list
) {
750 detach_rules(&ops
->rules_list
, dev
);
751 attach_rules(&ops
->rules_list
, dev
);
755 case NETDEV_UNREGISTER
:
756 list_for_each_entry(ops
, &net
->rules_ops
, list
)
757 detach_rules(&ops
->rules_list
, dev
);
764 static struct notifier_block fib_rules_notifier
= {
765 .notifier_call
= fib_rules_event
,
768 static int __net_init
fib_rules_net_init(struct net
*net
)
770 INIT_LIST_HEAD(&net
->rules_ops
);
771 spin_lock_init(&net
->rules_mod_lock
);
775 static struct pernet_operations fib_rules_net_ops
= {
776 .init
= fib_rules_net_init
,
779 static int __init
fib_rules_init(void)
782 rtnl_register(PF_UNSPEC
, RTM_NEWRULE
, fib_nl_newrule
, NULL
, NULL
);
783 rtnl_register(PF_UNSPEC
, RTM_DELRULE
, fib_nl_delrule
, NULL
, NULL
);
784 rtnl_register(PF_UNSPEC
, RTM_GETRULE
, NULL
, fib_nl_dumprule
, NULL
);
786 err
= register_pernet_subsys(&fib_rules_net_ops
);
790 err
= register_netdevice_notifier(&fib_rules_notifier
);
792 goto fail_unregister
;
797 unregister_pernet_subsys(&fib_rules_net_ops
);
799 rtnl_unregister(PF_UNSPEC
, RTM_NEWRULE
);
800 rtnl_unregister(PF_UNSPEC
, RTM_DELRULE
);
801 rtnl_unregister(PF_UNSPEC
, RTM_GETRULE
);
805 subsys_initcall(fib_rules_init
);