2 * net/sched/act_api.c Packet action API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Author: Jamal Hadi Salim
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/kmod.h>
22 #include <linux/err.h>
23 #include <linux/module.h>
24 #include <linux/rhashtable.h>
25 #include <linux/list.h>
26 #include <net/net_namespace.h>
28 #include <net/sch_generic.h>
29 #include <net/pkt_cls.h>
30 #include <net/act_api.h>
31 #include <net/netlink.h>
33 static int tcf_action_goto_chain_init(struct tc_action
*a
, struct tcf_proto
*tp
)
35 u32 chain_index
= a
->tcfa_action
& TC_ACT_EXT_VAL_MASK
;
39 a
->goto_chain
= tcf_chain_get(tp
->chain
->block
, chain_index
, true);
45 static void tcf_action_goto_chain_fini(struct tc_action
*a
)
47 tcf_chain_put(a
->goto_chain
);
50 static void tcf_action_goto_chain_exec(const struct tc_action
*a
,
51 struct tcf_result
*res
)
53 const struct tcf_chain
*chain
= a
->goto_chain
;
55 res
->goto_tp
= rcu_dereference_bh(chain
->filter_chain
);
58 /* XXX: For standalone actions, we don't need a RCU grace period either, because
59 * actions are always connected to filters and filters are already destroyed in
60 * RCU callbacks, so after a RCU grace period actions are already disconnected
61 * from filters. Readers later can not find us.
63 static void free_tcf(struct tc_action
*p
)
65 free_percpu(p
->cpu_bstats
);
66 free_percpu(p
->cpu_qstats
);
69 kfree(p
->act_cookie
->data
);
73 tcf_action_goto_chain_fini(p
);
78 static void tcf_idr_remove(struct tcf_idrinfo
*idrinfo
, struct tc_action
*p
)
80 spin_lock_bh(&idrinfo
->lock
);
81 idr_remove(&idrinfo
->action_idr
, p
->tcfa_index
);
82 spin_unlock_bh(&idrinfo
->lock
);
83 gen_kill_estimator(&p
->tcfa_rate_est
);
87 int __tcf_idr_release(struct tc_action
*p
, bool bind
, bool strict
)
96 else if (strict
&& p
->tcfa_bindcnt
> 0)
100 if (p
->tcfa_bindcnt
<= 0 && p
->tcfa_refcnt
<= 0) {
103 tcf_idr_remove(p
->idrinfo
, p
);
110 EXPORT_SYMBOL(__tcf_idr_release
);
112 static int tcf_dump_walker(struct tcf_idrinfo
*idrinfo
, struct sk_buff
*skb
,
113 struct netlink_callback
*cb
)
115 int err
= 0, index
= -1, s_i
= 0, n_i
= 0;
116 u32 act_flags
= cb
->args
[2];
117 unsigned long jiffy_since
= cb
->args
[3];
119 struct idr
*idr
= &idrinfo
->action_idr
;
121 unsigned long id
= 1;
123 spin_lock_bh(&idrinfo
->lock
);
127 idr_for_each_entry_ul(idr
, p
, id
) {
133 time_after(jiffy_since
,
134 (unsigned long)p
->tcfa_tm
.lastuse
))
137 nest
= nla_nest_start(skb
, n_i
);
139 goto nla_put_failure
;
140 err
= tcf_action_dump_1(skb
, p
, 0, 0);
143 nlmsg_trim(skb
, nest
);
146 nla_nest_end(skb
, nest
);
148 if (!(act_flags
& TCA_FLAG_LARGE_DUMP_ON
) &&
149 n_i
>= TCA_ACT_MAX_PRIO
)
154 cb
->args
[0] = index
+ 1;
156 spin_unlock_bh(&idrinfo
->lock
);
158 if (act_flags
& TCA_FLAG_LARGE_DUMP_ON
)
164 nla_nest_cancel(skb
, nest
);
168 static int tcf_del_walker(struct tcf_idrinfo
*idrinfo
, struct sk_buff
*skb
,
169 const struct tc_action_ops
*ops
)
174 struct idr
*idr
= &idrinfo
->action_idr
;
176 unsigned long id
= 1;
178 nest
= nla_nest_start(skb
, 0);
180 goto nla_put_failure
;
181 if (nla_put_string(skb
, TCA_KIND
, ops
->kind
))
182 goto nla_put_failure
;
184 idr_for_each_entry_ul(idr
, p
, id
) {
185 ret
= __tcf_idr_release(p
, false, true);
186 if (ret
== ACT_P_DELETED
) {
187 module_put(ops
->owner
);
189 } else if (ret
< 0) {
190 goto nla_put_failure
;
193 if (nla_put_u32(skb
, TCA_FCNT
, n_i
))
194 goto nla_put_failure
;
195 nla_nest_end(skb
, nest
);
199 nla_nest_cancel(skb
, nest
);
203 int tcf_generic_walker(struct tc_action_net
*tn
, struct sk_buff
*skb
,
204 struct netlink_callback
*cb
, int type
,
205 const struct tc_action_ops
*ops
)
207 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
209 if (type
== RTM_DELACTION
) {
210 return tcf_del_walker(idrinfo
, skb
, ops
);
211 } else if (type
== RTM_GETACTION
) {
212 return tcf_dump_walker(idrinfo
, skb
, cb
);
214 WARN(1, "tcf_generic_walker: unknown action %d\n", type
);
218 EXPORT_SYMBOL(tcf_generic_walker
);
220 static struct tc_action
*tcf_idr_lookup(u32 index
, struct tcf_idrinfo
*idrinfo
)
222 struct tc_action
*p
= NULL
;
224 spin_lock_bh(&idrinfo
->lock
);
225 p
= idr_find(&idrinfo
->action_idr
, index
);
226 spin_unlock_bh(&idrinfo
->lock
);
231 int tcf_idr_search(struct tc_action_net
*tn
, struct tc_action
**a
, u32 index
)
233 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
234 struct tc_action
*p
= tcf_idr_lookup(index
, idrinfo
);
242 EXPORT_SYMBOL(tcf_idr_search
);
244 bool tcf_idr_check(struct tc_action_net
*tn
, u32 index
, struct tc_action
**a
,
247 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
248 struct tc_action
*p
= tcf_idr_lookup(index
, idrinfo
);
259 EXPORT_SYMBOL(tcf_idr_check
);
261 void tcf_idr_cleanup(struct tc_action
*a
, struct nlattr
*est
)
264 gen_kill_estimator(&a
->tcfa_rate_est
);
267 EXPORT_SYMBOL(tcf_idr_cleanup
);
269 int tcf_idr_create(struct tc_action_net
*tn
, u32 index
, struct nlattr
*est
,
270 struct tc_action
**a
, const struct tc_action_ops
*ops
,
271 int bind
, bool cpustats
)
273 struct tc_action
*p
= kzalloc(ops
->size
, GFP_KERNEL
);
274 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
275 struct idr
*idr
= &idrinfo
->action_idr
;
285 p
->cpu_bstats
= netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu
);
288 p
->cpu_qstats
= alloc_percpu(struct gnet_stats_queue
);
292 spin_lock_init(&p
->tcfa_lock
);
293 idr_preload(GFP_KERNEL
);
294 spin_lock_bh(&idrinfo
->lock
);
295 /* user doesn't specify an index */
298 err
= idr_alloc_u32(idr
, NULL
, &index
, UINT_MAX
, GFP_ATOMIC
);
300 err
= idr_alloc_u32(idr
, NULL
, &index
, index
, GFP_ATOMIC
);
302 spin_unlock_bh(&idrinfo
->lock
);
307 p
->tcfa_index
= index
;
308 p
->tcfa_tm
.install
= jiffies
;
309 p
->tcfa_tm
.lastuse
= jiffies
;
310 p
->tcfa_tm
.firstuse
= 0;
312 err
= gen_new_estimator(&p
->tcfa_bstats
, p
->cpu_bstats
,
314 &p
->tcfa_lock
, NULL
, est
);
319 p
->idrinfo
= idrinfo
;
321 INIT_LIST_HEAD(&p
->list
);
325 idr_remove(idr
, index
);
327 free_percpu(p
->cpu_qstats
);
329 free_percpu(p
->cpu_bstats
);
334 EXPORT_SYMBOL(tcf_idr_create
);
336 void tcf_idr_insert(struct tc_action_net
*tn
, struct tc_action
*a
)
338 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
340 spin_lock_bh(&idrinfo
->lock
);
341 idr_replace(&idrinfo
->action_idr
, a
, a
->tcfa_index
);
342 spin_unlock_bh(&idrinfo
->lock
);
344 EXPORT_SYMBOL(tcf_idr_insert
);
346 void tcf_idrinfo_destroy(const struct tc_action_ops
*ops
,
347 struct tcf_idrinfo
*idrinfo
)
349 struct idr
*idr
= &idrinfo
->action_idr
;
352 unsigned long id
= 1;
354 idr_for_each_entry_ul(idr
, p
, id
) {
355 ret
= __tcf_idr_release(p
, false, true);
356 if (ret
== ACT_P_DELETED
)
357 module_put(ops
->owner
);
361 idr_destroy(&idrinfo
->action_idr
);
363 EXPORT_SYMBOL(tcf_idrinfo_destroy
);
365 static LIST_HEAD(act_base
);
366 static DEFINE_RWLOCK(act_mod_lock
);
368 int tcf_register_action(struct tc_action_ops
*act
,
369 struct pernet_operations
*ops
)
371 struct tc_action_ops
*a
;
374 if (!act
->act
|| !act
->dump
|| !act
->init
|| !act
->walk
|| !act
->lookup
)
377 /* We have to register pernet ops before making the action ops visible,
378 * otherwise tcf_action_init_1() could get a partially initialized
381 ret
= register_pernet_subsys(ops
);
385 write_lock(&act_mod_lock
);
386 list_for_each_entry(a
, &act_base
, head
) {
387 if (act
->type
== a
->type
|| (strcmp(act
->kind
, a
->kind
) == 0)) {
388 write_unlock(&act_mod_lock
);
389 unregister_pernet_subsys(ops
);
393 list_add_tail(&act
->head
, &act_base
);
394 write_unlock(&act_mod_lock
);
398 EXPORT_SYMBOL(tcf_register_action
);
400 int tcf_unregister_action(struct tc_action_ops
*act
,
401 struct pernet_operations
*ops
)
403 struct tc_action_ops
*a
;
406 write_lock(&act_mod_lock
);
407 list_for_each_entry(a
, &act_base
, head
) {
409 list_del(&act
->head
);
414 write_unlock(&act_mod_lock
);
416 unregister_pernet_subsys(ops
);
419 EXPORT_SYMBOL(tcf_unregister_action
);
422 static struct tc_action_ops
*tc_lookup_action_n(char *kind
)
424 struct tc_action_ops
*a
, *res
= NULL
;
427 read_lock(&act_mod_lock
);
428 list_for_each_entry(a
, &act_base
, head
) {
429 if (strcmp(kind
, a
->kind
) == 0) {
430 if (try_module_get(a
->owner
))
435 read_unlock(&act_mod_lock
);
440 /* lookup by nlattr */
441 static struct tc_action_ops
*tc_lookup_action(struct nlattr
*kind
)
443 struct tc_action_ops
*a
, *res
= NULL
;
446 read_lock(&act_mod_lock
);
447 list_for_each_entry(a
, &act_base
, head
) {
448 if (nla_strcmp(kind
, a
->kind
) == 0) {
449 if (try_module_get(a
->owner
))
454 read_unlock(&act_mod_lock
);
459 /*TCA_ACT_MAX_PRIO is 32, there count upto 32 */
460 #define TCA_ACT_MAX_PRIO_MASK 0x1FF
461 int tcf_action_exec(struct sk_buff
*skb
, struct tc_action
**actions
,
462 int nr_actions
, struct tcf_result
*res
)
465 u32 jmp_ttl
= TCA_ACT_MAX_PRIO
; /*matches actions per filter */
469 if (skb_skip_tc_classify(skb
))
473 for (i
= 0; i
< nr_actions
; i
++) {
474 const struct tc_action
*a
= actions
[i
];
476 if (jmp_prgcnt
> 0) {
481 ret
= a
->ops
->act(skb
, a
, res
);
482 if (ret
== TC_ACT_REPEAT
)
483 goto repeat
; /* we need a ttl - JHS */
485 if (TC_ACT_EXT_CMP(ret
, TC_ACT_JUMP
)) {
486 jmp_prgcnt
= ret
& TCA_ACT_MAX_PRIO_MASK
;
487 if (!jmp_prgcnt
|| (jmp_prgcnt
> nr_actions
)) {
488 /* faulty opcode, stop pipeline */
493 goto restart_act_graph
;
494 else /* faulty graph, stop pipeline */
497 } else if (TC_ACT_EXT_CMP(ret
, TC_ACT_GOTO_CHAIN
)) {
498 tcf_action_goto_chain_exec(a
, res
);
501 if (ret
!= TC_ACT_PIPE
)
507 EXPORT_SYMBOL(tcf_action_exec
);
509 int tcf_action_destroy(struct list_head
*actions
, int bind
)
511 const struct tc_action_ops
*ops
;
512 struct tc_action
*a
, *tmp
;
515 list_for_each_entry_safe(a
, tmp
, actions
, list
) {
517 ret
= __tcf_idr_release(a
, bind
, true);
518 if (ret
== ACT_P_DELETED
)
519 module_put(ops
->owner
);
527 tcf_action_dump_old(struct sk_buff
*skb
, struct tc_action
*a
, int bind
, int ref
)
529 return a
->ops
->dump(skb
, a
, bind
, ref
);
533 tcf_action_dump_1(struct sk_buff
*skb
, struct tc_action
*a
, int bind
, int ref
)
536 unsigned char *b
= skb_tail_pointer(skb
);
539 if (nla_put_string(skb
, TCA_KIND
, a
->ops
->kind
))
540 goto nla_put_failure
;
541 if (tcf_action_copy_stats(skb
, a
, 0))
542 goto nla_put_failure
;
544 if (nla_put(skb
, TCA_ACT_COOKIE
, a
->act_cookie
->len
,
545 a
->act_cookie
->data
))
546 goto nla_put_failure
;
549 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
551 goto nla_put_failure
;
552 err
= tcf_action_dump_old(skb
, a
, bind
, ref
);
554 nla_nest_end(skb
, nest
);
562 EXPORT_SYMBOL(tcf_action_dump_1
);
564 int tcf_action_dump(struct sk_buff
*skb
, struct list_head
*actions
,
571 list_for_each_entry(a
, actions
, list
) {
572 nest
= nla_nest_start(skb
, a
->order
);
574 goto nla_put_failure
;
575 err
= tcf_action_dump_1(skb
, a
, bind
, ref
);
578 nla_nest_end(skb
, nest
);
586 nla_nest_cancel(skb
, nest
);
590 static struct tc_cookie
*nla_memdup_cookie(struct nlattr
**tb
)
592 struct tc_cookie
*c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
596 c
->data
= nla_memdup(tb
[TCA_ACT_COOKIE
], GFP_KERNEL
);
601 c
->len
= nla_len(tb
[TCA_ACT_COOKIE
]);
606 struct tc_action
*tcf_action_init_1(struct net
*net
, struct tcf_proto
*tp
,
607 struct nlattr
*nla
, struct nlattr
*est
,
608 char *name
, int ovr
, int bind
)
611 struct tc_action_ops
*a_o
;
612 struct tc_cookie
*cookie
= NULL
;
613 char act_name
[IFNAMSIZ
];
614 struct nlattr
*tb
[TCA_ACT_MAX
+ 1];
619 err
= nla_parse_nested(tb
, TCA_ACT_MAX
, nla
, NULL
, NULL
);
623 kind
= tb
[TCA_ACT_KIND
];
626 if (nla_strlcpy(act_name
, kind
, IFNAMSIZ
) >= IFNAMSIZ
)
628 if (tb
[TCA_ACT_COOKIE
]) {
629 int cklen
= nla_len(tb
[TCA_ACT_COOKIE
]);
631 if (cklen
> TC_COOKIE_MAX_SIZE
)
634 cookie
= nla_memdup_cookie(tb
);
642 if (strlcpy(act_name
, name
, IFNAMSIZ
) >= IFNAMSIZ
)
646 a_o
= tc_lookup_action_n(act_name
);
648 #ifdef CONFIG_MODULES
650 request_module("act_%s", act_name
);
653 a_o
= tc_lookup_action_n(act_name
);
655 /* We dropped the RTNL semaphore in order to
656 * perform the module load. So, even if we
657 * succeeded in loading the module we have to
658 * tell the caller to replay the request. We
659 * indicate this using -EAGAIN.
670 /* backward compatibility for policer */
672 err
= a_o
->init(net
, tb
[TCA_ACT_OPTIONS
], est
, &a
, ovr
, bind
);
674 err
= a_o
->init(net
, nla
, est
, &a
, ovr
, bind
);
678 if (name
== NULL
&& tb
[TCA_ACT_COOKIE
]) {
680 kfree(a
->act_cookie
->data
);
681 kfree(a
->act_cookie
);
683 a
->act_cookie
= cookie
;
686 /* module count goes up only when brand new policy is created
687 * if it exists and is only bound to in a_o->init() then
688 * ACT_P_CREATED is not returned (a zero is).
690 if (err
!= ACT_P_CREATED
)
691 module_put(a_o
->owner
);
693 if (TC_ACT_EXT_CMP(a
->tcfa_action
, TC_ACT_GOTO_CHAIN
)) {
694 err
= tcf_action_goto_chain_init(a
, tp
);
698 list_add_tail(&a
->list
, &actions
);
699 tcf_action_destroy(&actions
, bind
);
707 module_put(a_o
->owner
);
716 static void cleanup_a(struct list_head
*actions
, int ovr
)
723 list_for_each_entry(a
, actions
, list
)
727 int tcf_action_init(struct net
*net
, struct tcf_proto
*tp
, struct nlattr
*nla
,
728 struct nlattr
*est
, char *name
, int ovr
, int bind
,
729 struct list_head
*actions
)
731 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+ 1];
732 struct tc_action
*act
;
736 err
= nla_parse_nested(tb
, TCA_ACT_MAX_PRIO
, nla
, NULL
, NULL
);
740 for (i
= 1; i
<= TCA_ACT_MAX_PRIO
&& tb
[i
]; i
++) {
741 act
= tcf_action_init_1(net
, tp
, tb
[i
], est
, name
, ovr
, bind
);
749 list_add_tail(&act
->list
, actions
);
752 /* Remove the temp refcnt which was necessary to protect against
753 * destroying an existing action which was being replaced
755 cleanup_a(actions
, ovr
);
759 tcf_action_destroy(actions
, bind
);
763 int tcf_action_copy_stats(struct sk_buff
*skb
, struct tc_action
*p
,
772 /* compat_mode being true specifies a call that is supposed
773 * to add additional backward compatibility statistic TLVs.
776 if (p
->type
== TCA_OLD_COMPAT
)
777 err
= gnet_stats_start_copy_compat(skb
, 0,
785 err
= gnet_stats_start_copy(skb
, TCA_ACT_STATS
,
786 &p
->tcfa_lock
, &d
, TCA_ACT_PAD
);
791 if (gnet_stats_copy_basic(NULL
, &d
, p
->cpu_bstats
, &p
->tcfa_bstats
) < 0 ||
792 gnet_stats_copy_rate_est(&d
, &p
->tcfa_rate_est
) < 0 ||
793 gnet_stats_copy_queue(&d
, p
->cpu_qstats
,
795 p
->tcfa_qstats
.qlen
) < 0)
798 if (gnet_stats_finish_copy(&d
) < 0)
807 static int tca_get_fill(struct sk_buff
*skb
, struct list_head
*actions
,
808 u32 portid
, u32 seq
, u16 flags
, int event
, int bind
,
812 struct nlmsghdr
*nlh
;
813 unsigned char *b
= skb_tail_pointer(skb
);
816 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*t
), flags
);
820 t
->tca_family
= AF_UNSPEC
;
824 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
828 if (tcf_action_dump(skb
, actions
, bind
, ref
) < 0)
831 nla_nest_end(skb
, nest
);
833 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
842 tcf_get_notify(struct net
*net
, u32 portid
, struct nlmsghdr
*n
,
843 struct list_head
*actions
, int event
)
847 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
850 if (tca_get_fill(skb
, actions
, portid
, n
->nlmsg_seq
, 0, event
,
856 return rtnl_unicast(skb
, net
, portid
);
859 static struct tc_action
*tcf_action_get_1(struct net
*net
, struct nlattr
*nla
,
860 struct nlmsghdr
*n
, u32 portid
)
862 struct nlattr
*tb
[TCA_ACT_MAX
+ 1];
863 const struct tc_action_ops
*ops
;
868 err
= nla_parse_nested(tb
, TCA_ACT_MAX
, nla
, NULL
, NULL
);
873 if (tb
[TCA_ACT_INDEX
] == NULL
||
874 nla_len(tb
[TCA_ACT_INDEX
]) < sizeof(index
))
876 index
= nla_get_u32(tb
[TCA_ACT_INDEX
]);
879 ops
= tc_lookup_action(tb
[TCA_ACT_KIND
]);
880 if (!ops
) /* could happen in batch of actions */
883 if (ops
->lookup(net
, &a
, index
) == 0)
886 module_put(ops
->owner
);
890 module_put(ops
->owner
);
895 static int tca_action_flush(struct net
*net
, struct nlattr
*nla
,
896 struct nlmsghdr
*n
, u32 portid
)
900 struct nlmsghdr
*nlh
;
902 struct netlink_callback dcb
;
904 struct nlattr
*tb
[TCA_ACT_MAX
+ 1];
905 const struct tc_action_ops
*ops
;
909 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
911 pr_debug("tca_action_flush: failed skb alloc\n");
915 b
= skb_tail_pointer(skb
);
917 err
= nla_parse_nested(tb
, TCA_ACT_MAX
, nla
, NULL
, NULL
);
922 kind
= tb
[TCA_ACT_KIND
];
923 ops
= tc_lookup_action(kind
);
924 if (!ops
) /*some idjot trying to flush unknown action */
927 nlh
= nlmsg_put(skb
, portid
, n
->nlmsg_seq
, RTM_DELACTION
,
932 t
->tca_family
= AF_UNSPEC
;
936 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
940 err
= ops
->walk(net
, skb
, &dcb
, RTM_DELACTION
, ops
);
944 nla_nest_end(skb
, nest
);
946 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
947 nlh
->nlmsg_flags
|= NLM_F_ROOT
;
948 module_put(ops
->owner
);
949 err
= rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
950 n
->nlmsg_flags
& NLM_F_ECHO
);
957 module_put(ops
->owner
);
964 tcf_del_notify(struct net
*net
, struct nlmsghdr
*n
, struct list_head
*actions
,
970 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
974 if (tca_get_fill(skb
, actions
, portid
, n
->nlmsg_seq
, 0, RTM_DELACTION
,
980 /* now do the delete */
981 ret
= tcf_action_destroy(actions
, 0);
987 ret
= rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
988 n
->nlmsg_flags
& NLM_F_ECHO
);
995 tca_action_gd(struct net
*net
, struct nlattr
*nla
, struct nlmsghdr
*n
,
996 u32 portid
, int event
)
999 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+ 1];
1000 struct tc_action
*act
;
1003 ret
= nla_parse_nested(tb
, TCA_ACT_MAX_PRIO
, nla
, NULL
, NULL
);
1007 if (event
== RTM_DELACTION
&& n
->nlmsg_flags
& NLM_F_ROOT
) {
1009 return tca_action_flush(net
, tb
[1], n
, portid
);
1014 for (i
= 1; i
<= TCA_ACT_MAX_PRIO
&& tb
[i
]; i
++) {
1015 act
= tcf_action_get_1(net
, tb
[i
], n
, portid
);
1021 list_add_tail(&act
->list
, &actions
);
1024 if (event
== RTM_GETACTION
)
1025 ret
= tcf_get_notify(net
, portid
, n
, &actions
, event
);
1027 ret
= tcf_del_notify(net
, n
, &actions
, portid
);
1033 if (event
!= RTM_GETACTION
)
1034 tcf_action_destroy(&actions
, 0);
1039 tcf_add_notify(struct net
*net
, struct nlmsghdr
*n
, struct list_head
*actions
,
1042 struct sk_buff
*skb
;
1045 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1049 if (tca_get_fill(skb
, actions
, portid
, n
->nlmsg_seq
, n
->nlmsg_flags
,
1050 RTM_NEWACTION
, 0, 0) <= 0) {
1055 err
= rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1056 n
->nlmsg_flags
& NLM_F_ECHO
);
1062 static int tcf_action_add(struct net
*net
, struct nlattr
*nla
,
1063 struct nlmsghdr
*n
, u32 portid
, int ovr
)
1068 ret
= tcf_action_init(net
, NULL
, nla
, NULL
, NULL
, ovr
, 0, &actions
);
1072 return tcf_add_notify(net
, n
, &actions
, portid
);
1075 static u32 tcaa_root_flags_allowed
= TCA_FLAG_LARGE_DUMP_ON
;
1076 static const struct nla_policy tcaa_policy
[TCA_ROOT_MAX
+ 1] = {
1077 [TCA_ROOT_FLAGS
] = { .type
= NLA_BITFIELD32
,
1078 .validation_data
= &tcaa_root_flags_allowed
},
1079 [TCA_ROOT_TIME_DELTA
] = { .type
= NLA_U32
},
1082 static int tc_ctl_action(struct sk_buff
*skb
, struct nlmsghdr
*n
,
1083 struct netlink_ext_ack
*extack
)
1085 struct net
*net
= sock_net(skb
->sk
);
1086 struct nlattr
*tca
[TCA_ROOT_MAX
+ 1];
1087 u32 portid
= skb
? NETLINK_CB(skb
).portid
: 0;
1088 int ret
= 0, ovr
= 0;
1090 if ((n
->nlmsg_type
!= RTM_GETACTION
) &&
1091 !netlink_capable(skb
, CAP_NET_ADMIN
))
1094 ret
= nlmsg_parse(n
, sizeof(struct tcamsg
), tca
, TCA_ROOT_MAX
, NULL
,
1099 if (tca
[TCA_ACT_TAB
] == NULL
) {
1100 pr_notice("tc_ctl_action: received NO action attribs\n");
1104 /* n->nlmsg_flags & NLM_F_CREATE */
1105 switch (n
->nlmsg_type
) {
1107 /* we are going to assume all other flags
1108 * imply create only if it doesn't exist
1109 * Note that CREATE | EXCL implies that
1110 * but since we want avoid ambiguity (eg when flags
1111 * is zero) then just set this
1113 if (n
->nlmsg_flags
& NLM_F_REPLACE
)
1116 ret
= tcf_action_add(net
, tca
[TCA_ACT_TAB
], n
, portid
, ovr
);
1121 ret
= tca_action_gd(net
, tca
[TCA_ACT_TAB
], n
,
1122 portid
, RTM_DELACTION
);
1125 ret
= tca_action_gd(net
, tca
[TCA_ACT_TAB
], n
,
1126 portid
, RTM_GETACTION
);
1135 static struct nlattr
*find_dump_kind(struct nlattr
**nla
)
1137 struct nlattr
*tb1
, *tb2
[TCA_ACT_MAX
+ 1];
1138 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+ 1];
1139 struct nlattr
*kind
;
1141 tb1
= nla
[TCA_ACT_TAB
];
1145 if (nla_parse(tb
, TCA_ACT_MAX_PRIO
, nla_data(tb1
),
1146 NLMSG_ALIGN(nla_len(tb1
)), NULL
, NULL
) < 0)
1151 if (nla_parse_nested(tb2
, TCA_ACT_MAX
, tb
[1], NULL
, NULL
) < 0)
1153 kind
= tb2
[TCA_ACT_KIND
];
1158 static int tc_dump_action(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1160 struct net
*net
= sock_net(skb
->sk
);
1161 struct nlmsghdr
*nlh
;
1162 unsigned char *b
= skb_tail_pointer(skb
);
1163 struct nlattr
*nest
;
1164 struct tc_action_ops
*a_o
;
1166 struct tcamsg
*t
= (struct tcamsg
*) nlmsg_data(cb
->nlh
);
1167 struct nlattr
*tb
[TCA_ROOT_MAX
+ 1];
1168 struct nlattr
*count_attr
= NULL
;
1169 unsigned long jiffy_since
= 0;
1170 struct nlattr
*kind
= NULL
;
1171 struct nla_bitfield32 bf
;
1172 u32 msecs_since
= 0;
1175 ret
= nlmsg_parse(cb
->nlh
, sizeof(struct tcamsg
), tb
, TCA_ROOT_MAX
,
1180 kind
= find_dump_kind(tb
);
1182 pr_info("tc_dump_action: action bad kind\n");
1186 a_o
= tc_lookup_action(kind
);
1191 if (tb
[TCA_ROOT_FLAGS
]) {
1192 bf
= nla_get_bitfield32(tb
[TCA_ROOT_FLAGS
]);
1193 cb
->args
[2] = bf
.value
;
1196 if (tb
[TCA_ROOT_TIME_DELTA
]) {
1197 msecs_since
= nla_get_u32(tb
[TCA_ROOT_TIME_DELTA
]);
1200 nlh
= nlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
1201 cb
->nlh
->nlmsg_type
, sizeof(*t
), 0);
1203 goto out_module_put
;
1206 jiffy_since
= jiffies
- msecs_to_jiffies(msecs_since
);
1208 t
= nlmsg_data(nlh
);
1209 t
->tca_family
= AF_UNSPEC
;
1212 cb
->args
[3] = jiffy_since
;
1213 count_attr
= nla_reserve(skb
, TCA_ROOT_COUNT
, sizeof(u32
));
1215 goto out_module_put
;
1217 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
1219 goto out_module_put
;
1221 ret
= a_o
->walk(net
, skb
, cb
, RTM_GETACTION
, a_o
);
1223 goto out_module_put
;
1226 nla_nest_end(skb
, nest
);
1228 act_count
= cb
->args
[1];
1229 memcpy(nla_data(count_attr
), &act_count
, sizeof(u32
));
1234 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1235 if (NETLINK_CB(cb
->skb
).portid
&& ret
)
1236 nlh
->nlmsg_flags
|= NLM_F_MULTI
;
1237 module_put(a_o
->owner
);
1241 module_put(a_o
->owner
);
1246 struct tcf_action_net
{
1247 struct rhashtable egdev_ht
;
1250 static unsigned int tcf_action_net_id
;
1252 struct tcf_action_egdev_cb
{
1253 struct list_head list
;
1258 struct tcf_action_egdev
{
1259 struct rhash_head ht_node
;
1260 const struct net_device
*dev
;
1261 unsigned int refcnt
;
1262 struct list_head cb_list
;
1265 static const struct rhashtable_params tcf_action_egdev_ht_params
= {
1266 .key_offset
= offsetof(struct tcf_action_egdev
, dev
),
1267 .head_offset
= offsetof(struct tcf_action_egdev
, ht_node
),
1268 .key_len
= sizeof(const struct net_device
*),
1271 static struct tcf_action_egdev
*
1272 tcf_action_egdev_lookup(const struct net_device
*dev
)
1274 struct net
*net
= dev_net(dev
);
1275 struct tcf_action_net
*tan
= net_generic(net
, tcf_action_net_id
);
1277 return rhashtable_lookup_fast(&tan
->egdev_ht
, &dev
,
1278 tcf_action_egdev_ht_params
);
1281 static struct tcf_action_egdev
*
1282 tcf_action_egdev_get(const struct net_device
*dev
)
1284 struct tcf_action_egdev
*egdev
;
1285 struct tcf_action_net
*tan
;
1287 egdev
= tcf_action_egdev_lookup(dev
);
1291 egdev
= kzalloc(sizeof(*egdev
), GFP_KERNEL
);
1294 INIT_LIST_HEAD(&egdev
->cb_list
);
1296 tan
= net_generic(dev_net(dev
), tcf_action_net_id
);
1297 rhashtable_insert_fast(&tan
->egdev_ht
, &egdev
->ht_node
,
1298 tcf_action_egdev_ht_params
);
1305 static void tcf_action_egdev_put(struct tcf_action_egdev
*egdev
)
1307 struct tcf_action_net
*tan
;
1309 if (--egdev
->refcnt
)
1311 tan
= net_generic(dev_net(egdev
->dev
), tcf_action_net_id
);
1312 rhashtable_remove_fast(&tan
->egdev_ht
, &egdev
->ht_node
,
1313 tcf_action_egdev_ht_params
);
1317 static struct tcf_action_egdev_cb
*
1318 tcf_action_egdev_cb_lookup(struct tcf_action_egdev
*egdev
,
1319 tc_setup_cb_t
*cb
, void *cb_priv
)
1321 struct tcf_action_egdev_cb
*egdev_cb
;
1323 list_for_each_entry(egdev_cb
, &egdev
->cb_list
, list
)
1324 if (egdev_cb
->cb
== cb
&& egdev_cb
->cb_priv
== cb_priv
)
1329 static int tcf_action_egdev_cb_call(struct tcf_action_egdev
*egdev
,
1330 enum tc_setup_type type
,
1331 void *type_data
, bool err_stop
)
1333 struct tcf_action_egdev_cb
*egdev_cb
;
1337 list_for_each_entry(egdev_cb
, &egdev
->cb_list
, list
) {
1338 err
= egdev_cb
->cb(type
, type_data
, egdev_cb
->cb_priv
);
1349 static int tcf_action_egdev_cb_add(struct tcf_action_egdev
*egdev
,
1350 tc_setup_cb_t
*cb
, void *cb_priv
)
1352 struct tcf_action_egdev_cb
*egdev_cb
;
1354 egdev_cb
= tcf_action_egdev_cb_lookup(egdev
, cb
, cb_priv
);
1355 if (WARN_ON(egdev_cb
))
1357 egdev_cb
= kzalloc(sizeof(*egdev_cb
), GFP_KERNEL
);
1361 egdev_cb
->cb_priv
= cb_priv
;
1362 list_add(&egdev_cb
->list
, &egdev
->cb_list
);
1366 static void tcf_action_egdev_cb_del(struct tcf_action_egdev
*egdev
,
1367 tc_setup_cb_t
*cb
, void *cb_priv
)
1369 struct tcf_action_egdev_cb
*egdev_cb
;
1371 egdev_cb
= tcf_action_egdev_cb_lookup(egdev
, cb
, cb_priv
);
1372 if (WARN_ON(!egdev_cb
))
1374 list_del(&egdev_cb
->list
);
1378 static int __tc_setup_cb_egdev_register(const struct net_device
*dev
,
1379 tc_setup_cb_t
*cb
, void *cb_priv
)
1381 struct tcf_action_egdev
*egdev
= tcf_action_egdev_get(dev
);
1386 err
= tcf_action_egdev_cb_add(egdev
, cb
, cb_priv
);
1392 tcf_action_egdev_put(egdev
);
1395 int tc_setup_cb_egdev_register(const struct net_device
*dev
,
1396 tc_setup_cb_t
*cb
, void *cb_priv
)
1401 err
= __tc_setup_cb_egdev_register(dev
, cb
, cb_priv
);
1405 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_register
);
1407 static void __tc_setup_cb_egdev_unregister(const struct net_device
*dev
,
1408 tc_setup_cb_t
*cb
, void *cb_priv
)
1410 struct tcf_action_egdev
*egdev
= tcf_action_egdev_lookup(dev
);
1412 if (WARN_ON(!egdev
))
1414 tcf_action_egdev_cb_del(egdev
, cb
, cb_priv
);
1415 tcf_action_egdev_put(egdev
);
1417 void tc_setup_cb_egdev_unregister(const struct net_device
*dev
,
1418 tc_setup_cb_t
*cb
, void *cb_priv
)
1421 __tc_setup_cb_egdev_unregister(dev
, cb
, cb_priv
);
1424 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_unregister
);
1426 int tc_setup_cb_egdev_call(const struct net_device
*dev
,
1427 enum tc_setup_type type
, void *type_data
,
1430 struct tcf_action_egdev
*egdev
= tcf_action_egdev_lookup(dev
);
1434 return tcf_action_egdev_cb_call(egdev
, type
, type_data
, err_stop
);
1436 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_call
);
1438 static __net_init
int tcf_action_net_init(struct net
*net
)
1440 struct tcf_action_net
*tan
= net_generic(net
, tcf_action_net_id
);
1442 return rhashtable_init(&tan
->egdev_ht
, &tcf_action_egdev_ht_params
);
1445 static void __net_exit
tcf_action_net_exit(struct net
*net
)
1447 struct tcf_action_net
*tan
= net_generic(net
, tcf_action_net_id
);
1449 rhashtable_destroy(&tan
->egdev_ht
);
1452 static struct pernet_operations tcf_action_net_ops
= {
1453 .init
= tcf_action_net_init
,
1454 .exit
= tcf_action_net_exit
,
1455 .id
= &tcf_action_net_id
,
1456 .size
= sizeof(struct tcf_action_net
),
1459 static int __init
tc_action_init(void)
1463 err
= register_pernet_subsys(&tcf_action_net_ops
);
1467 rtnl_register(PF_UNSPEC
, RTM_NEWACTION
, tc_ctl_action
, NULL
, 0);
1468 rtnl_register(PF_UNSPEC
, RTM_DELACTION
, tc_ctl_action
, NULL
, 0);
1469 rtnl_register(PF_UNSPEC
, RTM_GETACTION
, tc_ctl_action
, tc_dump_action
,
1475 subsys_initcall(tc_action_init
);