2 * net/core/fib_rules.c Generic Routing Rules
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
8 * Authors: Thomas Graf <tgraf@suug.ch>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <net/net_namespace.h>
18 #include <net/fib_rules.h>
19 #include <net/ip_tunnels.h>
21 static const struct fib_kuid_range fib_kuid_range_unset
= {
26 int fib_default_rule_add(struct fib_rules_ops
*ops
,
27 u32 pref
, u32 table
, u32 flags
)
31 r
= kzalloc(ops
->rule_size
, GFP_KERNEL
);
35 atomic_set(&r
->refcnt
, 1);
36 r
->action
= FR_ACT_TO_TBL
;
40 r
->fr_net
= ops
->fro_net
;
41 r
->uid_range
= fib_kuid_range_unset
;
43 r
->suppress_prefixlen
= -1;
44 r
->suppress_ifgroup
= -1;
46 /* The lock is not required here, the list in unreacheable
47 * at the moment this function is called */
48 list_add_tail(&r
->list
, &ops
->rules_list
);
51 EXPORT_SYMBOL(fib_default_rule_add
);
53 static u32
fib_default_rule_pref(struct fib_rules_ops
*ops
)
55 struct list_head
*pos
;
56 struct fib_rule
*rule
;
58 if (!list_empty(&ops
->rules_list
)) {
59 pos
= ops
->rules_list
.next
;
60 if (pos
->next
!= &ops
->rules_list
) {
61 rule
= list_entry(pos
->next
, struct fib_rule
, list
);
63 return rule
->pref
- 1;
70 static void notify_rule_change(int event
, struct fib_rule
*rule
,
71 struct fib_rules_ops
*ops
, struct nlmsghdr
*nlh
,
74 static struct fib_rules_ops
*lookup_rules_ops(struct net
*net
, int family
)
76 struct fib_rules_ops
*ops
;
79 list_for_each_entry_rcu(ops
, &net
->rules_ops
, list
) {
80 if (ops
->family
== family
) {
81 if (!try_module_get(ops
->owner
))
92 static void rules_ops_put(struct fib_rules_ops
*ops
)
95 module_put(ops
->owner
);
98 static void flush_route_cache(struct fib_rules_ops
*ops
)
100 if (ops
->flush_cache
)
101 ops
->flush_cache(ops
);
104 static int __fib_rules_register(struct fib_rules_ops
*ops
)
107 struct fib_rules_ops
*o
;
112 if (ops
->rule_size
< sizeof(struct fib_rule
))
115 if (ops
->match
== NULL
|| ops
->configure
== NULL
||
116 ops
->compare
== NULL
|| ops
->fill
== NULL
||
120 spin_lock(&net
->rules_mod_lock
);
121 list_for_each_entry(o
, &net
->rules_ops
, list
)
122 if (ops
->family
== o
->family
)
125 list_add_tail_rcu(&ops
->list
, &net
->rules_ops
);
128 spin_unlock(&net
->rules_mod_lock
);
133 struct fib_rules_ops
*
134 fib_rules_register(const struct fib_rules_ops
*tmpl
, struct net
*net
)
136 struct fib_rules_ops
*ops
;
139 ops
= kmemdup(tmpl
, sizeof(*ops
), GFP_KERNEL
);
141 return ERR_PTR(-ENOMEM
);
143 INIT_LIST_HEAD(&ops
->rules_list
);
146 err
= __fib_rules_register(ops
);
154 EXPORT_SYMBOL_GPL(fib_rules_register
);
156 static void fib_rules_cleanup_ops(struct fib_rules_ops
*ops
)
158 struct fib_rule
*rule
, *tmp
;
160 list_for_each_entry_safe(rule
, tmp
, &ops
->rules_list
, list
) {
161 list_del_rcu(&rule
->list
);
168 void fib_rules_unregister(struct fib_rules_ops
*ops
)
170 struct net
*net
= ops
->fro_net
;
172 spin_lock(&net
->rules_mod_lock
);
173 list_del_rcu(&ops
->list
);
174 spin_unlock(&net
->rules_mod_lock
);
176 fib_rules_cleanup_ops(ops
);
179 EXPORT_SYMBOL_GPL(fib_rules_unregister
);
181 static int uid_range_set(struct fib_kuid_range
*range
)
183 return uid_valid(range
->start
) && uid_valid(range
->end
);
186 static struct fib_kuid_range
nla_get_kuid_range(struct nlattr
**tb
)
188 struct fib_rule_uid_range
*in
;
189 struct fib_kuid_range out
;
191 in
= (struct fib_rule_uid_range
*)nla_data(tb
[FRA_UID_RANGE
]);
193 out
.start
= make_kuid(current_user_ns(), in
->start
);
194 out
.end
= make_kuid(current_user_ns(), in
->end
);
199 static int nla_put_uid_range(struct sk_buff
*skb
, struct fib_kuid_range
*range
)
201 struct fib_rule_uid_range out
= {
202 from_kuid_munged(current_user_ns(), range
->start
),
203 from_kuid_munged(current_user_ns(), range
->end
)
206 return nla_put(skb
, FRA_UID_RANGE
, sizeof(out
), &out
);
209 static int fib_rule_match(struct fib_rule
*rule
, struct fib_rules_ops
*ops
,
210 struct flowi
*fl
, int flags
,
211 struct fib_lookup_arg
*arg
)
215 if (rule
->iifindex
&& (rule
->iifindex
!= fl
->flowi_iif
))
218 if (rule
->oifindex
&& (rule
->oifindex
!= fl
->flowi_oif
))
221 if ((rule
->mark
^ fl
->flowi_mark
) & rule
->mark_mask
)
224 if (rule
->tun_id
&& (rule
->tun_id
!= fl
->flowi_tun_key
.tun_id
))
227 if (rule
->l3mdev
&& !l3mdev_fib_rule_match(rule
->fr_net
, fl
, arg
))
230 if (uid_lt(fl
->flowi_uid
, rule
->uid_range
.start
) ||
231 uid_gt(fl
->flowi_uid
, rule
->uid_range
.end
))
234 ret
= ops
->match(rule
, fl
, flags
);
236 return (rule
->flags
& FIB_RULE_INVERT
) ? !ret
: ret
;
239 int fib_rules_lookup(struct fib_rules_ops
*ops
, struct flowi
*fl
,
240 int flags
, struct fib_lookup_arg
*arg
)
242 struct fib_rule
*rule
;
247 list_for_each_entry_rcu(rule
, &ops
->rules_list
, list
) {
249 if (!fib_rule_match(rule
, ops
, fl
, flags
, arg
))
252 if (rule
->action
== FR_ACT_GOTO
) {
253 struct fib_rule
*target
;
255 target
= rcu_dereference(rule
->ctarget
);
256 if (target
== NULL
) {
262 } else if (rule
->action
== FR_ACT_NOP
)
265 err
= ops
->action(rule
, fl
, flags
, arg
);
267 if (!err
&& ops
->suppress
&& ops
->suppress(rule
, arg
))
270 if (err
!= -EAGAIN
) {
271 if ((arg
->flags
& FIB_LOOKUP_NOREF
) ||
272 likely(atomic_inc_not_zero(&rule
->refcnt
))) {
286 EXPORT_SYMBOL_GPL(fib_rules_lookup
);
288 static int validate_rulemsg(struct fib_rule_hdr
*frh
, struct nlattr
**tb
,
289 struct fib_rules_ops
*ops
)
294 if (tb
[FRA_SRC
] == NULL
||
295 frh
->src_len
> (ops
->addr_size
* 8) ||
296 nla_len(tb
[FRA_SRC
]) != ops
->addr_size
)
300 if (tb
[FRA_DST
] == NULL
||
301 frh
->dst_len
> (ops
->addr_size
* 8) ||
302 nla_len(tb
[FRA_DST
]) != ops
->addr_size
)
310 static int rule_exists(struct fib_rules_ops
*ops
, struct fib_rule_hdr
*frh
,
311 struct nlattr
**tb
, struct fib_rule
*rule
)
315 list_for_each_entry(r
, &ops
->rules_list
, list
) {
316 if (r
->action
!= rule
->action
)
319 if (r
->table
!= rule
->table
)
322 if (r
->pref
!= rule
->pref
)
325 if (memcmp(r
->iifname
, rule
->iifname
, IFNAMSIZ
))
328 if (memcmp(r
->oifname
, rule
->oifname
, IFNAMSIZ
))
331 if (r
->mark
!= rule
->mark
)
334 if (r
->mark_mask
!= rule
->mark_mask
)
337 if (r
->tun_id
!= rule
->tun_id
)
340 if (r
->fr_net
!= rule
->fr_net
)
343 if (r
->l3mdev
!= rule
->l3mdev
)
346 if (!uid_eq(r
->uid_range
.start
, rule
->uid_range
.start
) ||
347 !uid_eq(r
->uid_range
.end
, rule
->uid_range
.end
))
350 if (!ops
->compare(r
, frh
, tb
))
357 int fib_nl_newrule(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
359 struct net
*net
= sock_net(skb
->sk
);
360 struct fib_rule_hdr
*frh
= nlmsg_data(nlh
);
361 struct fib_rules_ops
*ops
= NULL
;
362 struct fib_rule
*rule
, *r
, *last
= NULL
;
363 struct nlattr
*tb
[FRA_MAX
+1];
364 int err
= -EINVAL
, unresolved
= 0;
366 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*frh
)))
369 ops
= lookup_rules_ops(net
, frh
->family
);
375 err
= nlmsg_parse(nlh
, sizeof(*frh
), tb
, FRA_MAX
, ops
->policy
);
379 err
= validate_rulemsg(frh
, tb
, ops
);
383 rule
= kzalloc(ops
->rule_size
, GFP_KERNEL
);
390 rule
->pref
= tb
[FRA_PRIORITY
] ? nla_get_u32(tb
[FRA_PRIORITY
])
391 : fib_default_rule_pref(ops
);
393 if (tb
[FRA_IIFNAME
]) {
394 struct net_device
*dev
;
397 nla_strlcpy(rule
->iifname
, tb
[FRA_IIFNAME
], IFNAMSIZ
);
398 dev
= __dev_get_by_name(net
, rule
->iifname
);
400 rule
->iifindex
= dev
->ifindex
;
403 if (tb
[FRA_OIFNAME
]) {
404 struct net_device
*dev
;
407 nla_strlcpy(rule
->oifname
, tb
[FRA_OIFNAME
], IFNAMSIZ
);
408 dev
= __dev_get_by_name(net
, rule
->oifname
);
410 rule
->oifindex
= dev
->ifindex
;
413 if (tb
[FRA_FWMARK
]) {
414 rule
->mark
= nla_get_u32(tb
[FRA_FWMARK
]);
416 /* compatibility: if the mark value is non-zero all bits
417 * are compared unless a mask is explicitly specified.
419 rule
->mark_mask
= 0xFFFFFFFF;
423 rule
->mark_mask
= nla_get_u32(tb
[FRA_FWMASK
]);
426 rule
->tun_id
= nla_get_be64(tb
[FRA_TUN_ID
]);
428 if (tb
[FRA_L3MDEV
]) {
429 #ifdef CONFIG_NET_L3_MASTER_DEV
430 rule
->l3mdev
= nla_get_u8(tb
[FRA_L3MDEV
]);
431 if (rule
->l3mdev
!= 1)
436 rule
->action
= frh
->action
;
437 rule
->flags
= frh
->flags
;
438 rule
->table
= frh_get_table(frh
, tb
);
439 if (tb
[FRA_SUPPRESS_PREFIXLEN
])
440 rule
->suppress_prefixlen
= nla_get_u32(tb
[FRA_SUPPRESS_PREFIXLEN
]);
442 rule
->suppress_prefixlen
= -1;
444 if (tb
[FRA_SUPPRESS_IFGROUP
])
445 rule
->suppress_ifgroup
= nla_get_u32(tb
[FRA_SUPPRESS_IFGROUP
]);
447 rule
->suppress_ifgroup
= -1;
451 if (rule
->action
!= FR_ACT_GOTO
)
454 rule
->target
= nla_get_u32(tb
[FRA_GOTO
]);
455 /* Backward jumps are prohibited to avoid endless loops */
456 if (rule
->target
<= rule
->pref
)
459 list_for_each_entry(r
, &ops
->rules_list
, list
) {
460 if (r
->pref
== rule
->target
) {
461 RCU_INIT_POINTER(rule
->ctarget
, r
);
466 if (rcu_dereference_protected(rule
->ctarget
, 1) == NULL
)
468 } else if (rule
->action
== FR_ACT_GOTO
)
471 if (rule
->l3mdev
&& rule
->table
)
474 if (tb
[FRA_UID_RANGE
]) {
475 if (current_user_ns() != net
->user_ns
) {
480 rule
->uid_range
= nla_get_kuid_range(tb
);
482 if (!uid_range_set(&rule
->uid_range
) ||
483 !uid_lte(rule
->uid_range
.start
, rule
->uid_range
.end
))
486 rule
->uid_range
= fib_kuid_range_unset
;
489 if ((nlh
->nlmsg_flags
& NLM_F_EXCL
) &&
490 rule_exists(ops
, frh
, tb
, rule
)) {
495 err
= ops
->configure(rule
, skb
, frh
, tb
);
499 list_for_each_entry(r
, &ops
->rules_list
, list
) {
500 if (r
->pref
> rule
->pref
)
508 list_add_rcu(&rule
->list
, &last
->list
);
510 list_add_rcu(&rule
->list
, &ops
->rules_list
);
512 if (ops
->unresolved_rules
) {
514 * There are unresolved goto rules in the list, check if
515 * any of them are pointing to this new rule.
517 list_for_each_entry(r
, &ops
->rules_list
, list
) {
518 if (r
->action
== FR_ACT_GOTO
&&
519 r
->target
== rule
->pref
&&
520 rtnl_dereference(r
->ctarget
) == NULL
) {
521 rcu_assign_pointer(r
->ctarget
, rule
);
522 if (--ops
->unresolved_rules
== 0)
528 if (rule
->action
== FR_ACT_GOTO
)
529 ops
->nr_goto_rules
++;
532 ops
->unresolved_rules
++;
535 ip_tunnel_need_metadata();
537 notify_rule_change(RTM_NEWRULE
, rule
, ops
, nlh
, NETLINK_CB(skb
).portid
);
538 flush_route_cache(ops
);
548 EXPORT_SYMBOL_GPL(fib_nl_newrule
);
550 int fib_nl_delrule(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
552 struct net
*net
= sock_net(skb
->sk
);
553 struct fib_rule_hdr
*frh
= nlmsg_data(nlh
);
554 struct fib_rules_ops
*ops
= NULL
;
555 struct fib_rule
*rule
, *tmp
;
556 struct nlattr
*tb
[FRA_MAX
+1];
557 struct fib_kuid_range range
;
560 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*frh
)))
563 ops
= lookup_rules_ops(net
, frh
->family
);
569 err
= nlmsg_parse(nlh
, sizeof(*frh
), tb
, FRA_MAX
, ops
->policy
);
573 err
= validate_rulemsg(frh
, tb
, ops
);
577 if (tb
[FRA_UID_RANGE
]) {
578 range
= nla_get_kuid_range(tb
);
579 if (!uid_range_set(&range
))
582 range
= fib_kuid_range_unset
;
585 list_for_each_entry(rule
, &ops
->rules_list
, list
) {
586 if (frh
->action
&& (frh
->action
!= rule
->action
))
589 if (frh_get_table(frh
, tb
) &&
590 (frh_get_table(frh
, tb
) != rule
->table
))
593 if (tb
[FRA_PRIORITY
] &&
594 (rule
->pref
!= nla_get_u32(tb
[FRA_PRIORITY
])))
597 if (tb
[FRA_IIFNAME
] &&
598 nla_strcmp(tb
[FRA_IIFNAME
], rule
->iifname
))
601 if (tb
[FRA_OIFNAME
] &&
602 nla_strcmp(tb
[FRA_OIFNAME
], rule
->oifname
))
605 if (tb
[FRA_FWMARK
] &&
606 (rule
->mark
!= nla_get_u32(tb
[FRA_FWMARK
])))
609 if (tb
[FRA_FWMASK
] &&
610 (rule
->mark_mask
!= nla_get_u32(tb
[FRA_FWMASK
])))
613 if (tb
[FRA_TUN_ID
] &&
614 (rule
->tun_id
!= nla_get_be64(tb
[FRA_TUN_ID
])))
617 if (tb
[FRA_L3MDEV
] &&
618 (rule
->l3mdev
!= nla_get_u8(tb
[FRA_L3MDEV
])))
621 if (uid_range_set(&range
) &&
622 (!uid_eq(rule
->uid_range
.start
, range
.start
) ||
623 !uid_eq(rule
->uid_range
.end
, range
.end
)))
626 if (!ops
->compare(rule
, frh
, tb
))
629 if (rule
->flags
& FIB_RULE_PERMANENT
) {
635 err
= ops
->delete(rule
);
641 ip_tunnel_unneed_metadata();
643 list_del_rcu(&rule
->list
);
645 if (rule
->action
== FR_ACT_GOTO
) {
646 ops
->nr_goto_rules
--;
647 if (rtnl_dereference(rule
->ctarget
) == NULL
)
648 ops
->unresolved_rules
--;
652 * Check if this rule is a target to any of them. If so,
653 * disable them. As this operation is eventually very
654 * expensive, it is only performed if goto rules have
655 * actually been added.
657 if (ops
->nr_goto_rules
> 0) {
658 list_for_each_entry(tmp
, &ops
->rules_list
, list
) {
659 if (rtnl_dereference(tmp
->ctarget
) == rule
) {
660 RCU_INIT_POINTER(tmp
->ctarget
, NULL
);
661 ops
->unresolved_rules
++;
666 notify_rule_change(RTM_DELRULE
, rule
, ops
, nlh
,
667 NETLINK_CB(skb
).portid
);
669 flush_route_cache(ops
);
679 EXPORT_SYMBOL_GPL(fib_nl_delrule
);
681 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops
*ops
,
682 struct fib_rule
*rule
)
684 size_t payload
= NLMSG_ALIGN(sizeof(struct fib_rule_hdr
))
685 + nla_total_size(IFNAMSIZ
) /* FRA_IIFNAME */
686 + nla_total_size(IFNAMSIZ
) /* FRA_OIFNAME */
687 + nla_total_size(4) /* FRA_PRIORITY */
688 + nla_total_size(4) /* FRA_TABLE */
689 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
690 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
691 + nla_total_size(4) /* FRA_FWMARK */
692 + nla_total_size(4) /* FRA_FWMASK */
693 + nla_total_size_64bit(8) /* FRA_TUN_ID */
694 + nla_total_size(sizeof(struct fib_kuid_range
));
696 if (ops
->nlmsg_payload
)
697 payload
+= ops
->nlmsg_payload(rule
);
702 static int fib_nl_fill_rule(struct sk_buff
*skb
, struct fib_rule
*rule
,
703 u32 pid
, u32 seq
, int type
, int flags
,
704 struct fib_rules_ops
*ops
)
706 struct nlmsghdr
*nlh
;
707 struct fib_rule_hdr
*frh
;
709 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*frh
), flags
);
713 frh
= nlmsg_data(nlh
);
714 frh
->family
= ops
->family
;
715 frh
->table
= rule
->table
;
716 if (nla_put_u32(skb
, FRA_TABLE
, rule
->table
))
717 goto nla_put_failure
;
718 if (nla_put_u32(skb
, FRA_SUPPRESS_PREFIXLEN
, rule
->suppress_prefixlen
))
719 goto nla_put_failure
;
722 frh
->action
= rule
->action
;
723 frh
->flags
= rule
->flags
;
725 if (rule
->action
== FR_ACT_GOTO
&&
726 rcu_access_pointer(rule
->ctarget
) == NULL
)
727 frh
->flags
|= FIB_RULE_UNRESOLVED
;
729 if (rule
->iifname
[0]) {
730 if (nla_put_string(skb
, FRA_IIFNAME
, rule
->iifname
))
731 goto nla_put_failure
;
732 if (rule
->iifindex
== -1)
733 frh
->flags
|= FIB_RULE_IIF_DETACHED
;
736 if (rule
->oifname
[0]) {
737 if (nla_put_string(skb
, FRA_OIFNAME
, rule
->oifname
))
738 goto nla_put_failure
;
739 if (rule
->oifindex
== -1)
740 frh
->flags
|= FIB_RULE_OIF_DETACHED
;
744 nla_put_u32(skb
, FRA_PRIORITY
, rule
->pref
)) ||
746 nla_put_u32(skb
, FRA_FWMARK
, rule
->mark
)) ||
747 ((rule
->mark_mask
|| rule
->mark
) &&
748 nla_put_u32(skb
, FRA_FWMASK
, rule
->mark_mask
)) ||
750 nla_put_u32(skb
, FRA_GOTO
, rule
->target
)) ||
752 nla_put_be64(skb
, FRA_TUN_ID
, rule
->tun_id
, FRA_PAD
)) ||
754 nla_put_u8(skb
, FRA_L3MDEV
, rule
->l3mdev
)) ||
755 (uid_range_set(&rule
->uid_range
) &&
756 nla_put_uid_range(skb
, &rule
->uid_range
)))
757 goto nla_put_failure
;
759 if (rule
->suppress_ifgroup
!= -1) {
760 if (nla_put_u32(skb
, FRA_SUPPRESS_IFGROUP
, rule
->suppress_ifgroup
))
761 goto nla_put_failure
;
764 if (ops
->fill(rule
, skb
, frh
) < 0)
765 goto nla_put_failure
;
771 nlmsg_cancel(skb
, nlh
);
775 static int dump_rules(struct sk_buff
*skb
, struct netlink_callback
*cb
,
776 struct fib_rules_ops
*ops
)
779 struct fib_rule
*rule
;
783 list_for_each_entry_rcu(rule
, &ops
->rules_list
, list
) {
784 if (idx
< cb
->args
[1])
787 err
= fib_nl_fill_rule(skb
, rule
, NETLINK_CB(cb
->skb
).portid
,
788 cb
->nlh
->nlmsg_seq
, RTM_NEWRULE
,
802 static int fib_nl_dumprule(struct sk_buff
*skb
, struct netlink_callback
*cb
)
804 struct net
*net
= sock_net(skb
->sk
);
805 struct fib_rules_ops
*ops
;
808 family
= rtnl_msg_family(cb
->nlh
);
809 if (family
!= AF_UNSPEC
) {
810 /* Protocol specific dump request */
811 ops
= lookup_rules_ops(net
, family
);
813 return -EAFNOSUPPORT
;
815 dump_rules(skb
, cb
, ops
);
821 list_for_each_entry_rcu(ops
, &net
->rules_ops
, list
) {
822 if (idx
< cb
->args
[0] || !try_module_get(ops
->owner
))
825 if (dump_rules(skb
, cb
, ops
) < 0)
838 static void notify_rule_change(int event
, struct fib_rule
*rule
,
839 struct fib_rules_ops
*ops
, struct nlmsghdr
*nlh
,
847 skb
= nlmsg_new(fib_rule_nlmsg_size(ops
, rule
), GFP_KERNEL
);
851 err
= fib_nl_fill_rule(skb
, rule
, pid
, nlh
->nlmsg_seq
, event
, 0, ops
);
853 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
854 WARN_ON(err
== -EMSGSIZE
);
859 rtnl_notify(skb
, net
, pid
, ops
->nlgroup
, nlh
, GFP_KERNEL
);
863 rtnl_set_sk_err(net
, ops
->nlgroup
, err
);
866 static void attach_rules(struct list_head
*rules
, struct net_device
*dev
)
868 struct fib_rule
*rule
;
870 list_for_each_entry(rule
, rules
, list
) {
871 if (rule
->iifindex
== -1 &&
872 strcmp(dev
->name
, rule
->iifname
) == 0)
873 rule
->iifindex
= dev
->ifindex
;
874 if (rule
->oifindex
== -1 &&
875 strcmp(dev
->name
, rule
->oifname
) == 0)
876 rule
->oifindex
= dev
->ifindex
;
880 static void detach_rules(struct list_head
*rules
, struct net_device
*dev
)
882 struct fib_rule
*rule
;
884 list_for_each_entry(rule
, rules
, list
) {
885 if (rule
->iifindex
== dev
->ifindex
)
887 if (rule
->oifindex
== dev
->ifindex
)
893 static int fib_rules_event(struct notifier_block
*this, unsigned long event
,
896 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
897 struct net
*net
= dev_net(dev
);
898 struct fib_rules_ops
*ops
;
903 case NETDEV_REGISTER
:
904 list_for_each_entry(ops
, &net
->rules_ops
, list
)
905 attach_rules(&ops
->rules_list
, dev
);
908 case NETDEV_CHANGENAME
:
909 list_for_each_entry(ops
, &net
->rules_ops
, list
) {
910 detach_rules(&ops
->rules_list
, dev
);
911 attach_rules(&ops
->rules_list
, dev
);
915 case NETDEV_UNREGISTER
:
916 list_for_each_entry(ops
, &net
->rules_ops
, list
)
917 detach_rules(&ops
->rules_list
, dev
);
924 static struct notifier_block fib_rules_notifier
= {
925 .notifier_call
= fib_rules_event
,
928 static int __net_init
fib_rules_net_init(struct net
*net
)
930 INIT_LIST_HEAD(&net
->rules_ops
);
931 spin_lock_init(&net
->rules_mod_lock
);
935 static struct pernet_operations fib_rules_net_ops
= {
936 .init
= fib_rules_net_init
,
939 static int __init
fib_rules_init(void)
942 rtnl_register(PF_UNSPEC
, RTM_NEWRULE
, fib_nl_newrule
, NULL
, NULL
);
943 rtnl_register(PF_UNSPEC
, RTM_DELRULE
, fib_nl_delrule
, NULL
, NULL
);
944 rtnl_register(PF_UNSPEC
, RTM_GETRULE
, NULL
, fib_nl_dumprule
, NULL
);
946 err
= register_pernet_subsys(&fib_rules_net_ops
);
950 err
= register_netdevice_notifier(&fib_rules_notifier
);
952 goto fail_unregister
;
957 unregister_pernet_subsys(&fib_rules_net_ops
);
959 rtnl_unregister(PF_UNSPEC
, RTM_NEWRULE
);
960 rtnl_unregister(PF_UNSPEC
, RTM_DELRULE
);
961 rtnl_unregister(PF_UNSPEC
, RTM_GETRULE
);
965 subsys_initcall(fib_rules_init
);