2 * net/sched/act_api.c Packet action API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Author: Jamal Hadi Salim
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/kmod.h>
22 #include <linux/err.h>
23 #include <linux/module.h>
24 #include <linux/rhashtable.h>
25 #include <linux/list.h>
26 #include <net/net_namespace.h>
28 #include <net/sch_generic.h>
29 #include <net/pkt_cls.h>
30 #include <net/act_api.h>
31 #include <net/netlink.h>
33 static int tcf_action_goto_chain_init(struct tc_action
*a
, struct tcf_proto
*tp
)
35 u32 chain_index
= a
->tcfa_action
& TC_ACT_EXT_VAL_MASK
;
39 a
->goto_chain
= tcf_chain_get_by_act(tp
->chain
->block
, chain_index
);
45 static void tcf_action_goto_chain_fini(struct tc_action
*a
)
47 tcf_chain_put_by_act(a
->goto_chain
);
50 static void tcf_action_goto_chain_exec(const struct tc_action
*a
,
51 struct tcf_result
*res
)
53 const struct tcf_chain
*chain
= a
->goto_chain
;
55 res
->goto_tp
= rcu_dereference_bh(chain
->filter_chain
);
58 static void tcf_free_cookie_rcu(struct rcu_head
*p
)
60 struct tc_cookie
*cookie
= container_of(p
, struct tc_cookie
, rcu
);
66 static void tcf_set_action_cookie(struct tc_cookie __rcu
**old_cookie
,
67 struct tc_cookie
*new_cookie
)
69 struct tc_cookie
*old
;
71 old
= xchg((__force
struct tc_cookie
**)old_cookie
, new_cookie
);
73 call_rcu(&old
->rcu
, tcf_free_cookie_rcu
);
76 /* XXX: For standalone actions, we don't need a RCU grace period either, because
77 * actions are always connected to filters and filters are already destroyed in
78 * RCU callbacks, so after a RCU grace period actions are already disconnected
79 * from filters. Readers later can not find us.
81 static void free_tcf(struct tc_action
*p
)
83 free_percpu(p
->cpu_bstats
);
84 free_percpu(p
->cpu_qstats
);
86 tcf_set_action_cookie(&p
->act_cookie
, NULL
);
88 tcf_action_goto_chain_fini(p
);
93 static void tcf_action_cleanup(struct tc_action
*p
)
98 gen_kill_estimator(&p
->tcfa_rate_est
);
102 static int __tcf_action_put(struct tc_action
*p
, bool bind
)
104 struct tcf_idrinfo
*idrinfo
= p
->idrinfo
;
106 if (refcount_dec_and_lock(&p
->tcfa_refcnt
, &idrinfo
->lock
)) {
108 atomic_dec(&p
->tcfa_bindcnt
);
109 idr_remove(&idrinfo
->action_idr
, p
->tcfa_index
);
110 spin_unlock(&idrinfo
->lock
);
112 tcf_action_cleanup(p
);
117 atomic_dec(&p
->tcfa_bindcnt
);
122 int __tcf_idr_release(struct tc_action
*p
, bool bind
, bool strict
)
126 /* Release with strict==1 and bind==0 is only called through act API
127 * interface (classifiers always bind). Only case when action with
128 * positive reference count and zero bind count can exist is when it was
129 * also created with act API (unbinding last classifier will destroy the
130 * action if it was created by classifier). So only case when bind count
131 * can be changed after initial check is when unbound action is
132 * destroyed by act API while classifier binds to action with same id
133 * concurrently. This result either creation of new action(same behavior
134 * as before), or reusing existing action if concurrent process
135 * increments reference count before action is deleted. Both scenarios
139 if (!bind
&& strict
&& atomic_read(&p
->tcfa_bindcnt
) > 0)
142 if (__tcf_action_put(p
, bind
))
148 EXPORT_SYMBOL(__tcf_idr_release
);
150 static size_t tcf_action_shared_attrs_size(const struct tc_action
*act
)
152 struct tc_cookie
*act_cookie
;
156 act_cookie
= rcu_dereference(act
->act_cookie
);
159 cookie_len
= nla_total_size(act_cookie
->len
);
162 return nla_total_size(0) /* action number nested */
163 + nla_total_size(IFNAMSIZ
) /* TCA_ACT_KIND */
164 + cookie_len
/* TCA_ACT_COOKIE */
165 + nla_total_size(0) /* TCA_ACT_STATS nested */
166 /* TCA_STATS_BASIC */
167 + nla_total_size_64bit(sizeof(struct gnet_stats_basic
))
168 /* TCA_STATS_QUEUE */
169 + nla_total_size_64bit(sizeof(struct gnet_stats_queue
))
170 + nla_total_size(0) /* TCA_OPTIONS nested */
171 + nla_total_size(sizeof(struct tcf_t
)); /* TCA_GACT_TM */
174 static size_t tcf_action_full_attrs_size(size_t sz
)
176 return NLMSG_HDRLEN
/* struct nlmsghdr */
177 + sizeof(struct tcamsg
)
178 + nla_total_size(0) /* TCA_ACT_TAB nested */
182 static size_t tcf_action_fill_size(const struct tc_action
*act
)
184 size_t sz
= tcf_action_shared_attrs_size(act
);
186 if (act
->ops
->get_fill_size
)
187 return act
->ops
->get_fill_size(act
) + sz
;
191 static int tcf_dump_walker(struct tcf_idrinfo
*idrinfo
, struct sk_buff
*skb
,
192 struct netlink_callback
*cb
)
194 int err
= 0, index
= -1, s_i
= 0, n_i
= 0;
195 u32 act_flags
= cb
->args
[2];
196 unsigned long jiffy_since
= cb
->args
[3];
198 struct idr
*idr
= &idrinfo
->action_idr
;
200 unsigned long id
= 1;
202 spin_lock(&idrinfo
->lock
);
206 idr_for_each_entry_ul(idr
, p
, id
) {
212 time_after(jiffy_since
,
213 (unsigned long)p
->tcfa_tm
.lastuse
))
216 nest
= nla_nest_start(skb
, n_i
);
219 goto nla_put_failure
;
221 err
= tcf_action_dump_1(skb
, p
, 0, 0);
224 nlmsg_trim(skb
, nest
);
227 nla_nest_end(skb
, nest
);
229 if (!(act_flags
& TCA_FLAG_LARGE_DUMP_ON
) &&
230 n_i
>= TCA_ACT_MAX_PRIO
)
235 cb
->args
[0] = index
+ 1;
237 spin_unlock(&idrinfo
->lock
);
239 if (act_flags
& TCA_FLAG_LARGE_DUMP_ON
)
245 nla_nest_cancel(skb
, nest
);
249 static int tcf_del_walker(struct tcf_idrinfo
*idrinfo
, struct sk_buff
*skb
,
250 const struct tc_action_ops
*ops
)
255 struct idr
*idr
= &idrinfo
->action_idr
;
257 unsigned long id
= 1;
259 nest
= nla_nest_start(skb
, 0);
261 goto nla_put_failure
;
262 if (nla_put_string(skb
, TCA_KIND
, ops
->kind
))
263 goto nla_put_failure
;
265 idr_for_each_entry_ul(idr
, p
, id
) {
266 ret
= __tcf_idr_release(p
, false, true);
267 if (ret
== ACT_P_DELETED
) {
268 module_put(ops
->owner
);
270 } else if (ret
< 0) {
271 goto nla_put_failure
;
274 if (nla_put_u32(skb
, TCA_FCNT
, n_i
))
275 goto nla_put_failure
;
276 nla_nest_end(skb
, nest
);
280 nla_nest_cancel(skb
, nest
);
284 int tcf_generic_walker(struct tc_action_net
*tn
, struct sk_buff
*skb
,
285 struct netlink_callback
*cb
, int type
,
286 const struct tc_action_ops
*ops
,
287 struct netlink_ext_ack
*extack
)
289 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
291 if (type
== RTM_DELACTION
) {
292 return tcf_del_walker(idrinfo
, skb
, ops
);
293 } else if (type
== RTM_GETACTION
) {
294 return tcf_dump_walker(idrinfo
, skb
, cb
);
296 WARN(1, "tcf_generic_walker: unknown command %d\n", type
);
297 NL_SET_ERR_MSG(extack
, "tcf_generic_walker: unknown command");
301 EXPORT_SYMBOL(tcf_generic_walker
);
303 int tcf_idr_search(struct tc_action_net
*tn
, struct tc_action
**a
, u32 index
)
305 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
308 spin_lock(&idrinfo
->lock
);
309 p
= idr_find(&idrinfo
->action_idr
, index
);
313 refcount_inc(&p
->tcfa_refcnt
);
314 spin_unlock(&idrinfo
->lock
);
322 EXPORT_SYMBOL(tcf_idr_search
);
324 static int tcf_idr_delete_index(struct tcf_idrinfo
*idrinfo
, u32 index
)
329 spin_lock(&idrinfo
->lock
);
330 p
= idr_find(&idrinfo
->action_idr
, index
);
332 spin_unlock(&idrinfo
->lock
);
336 if (!atomic_read(&p
->tcfa_bindcnt
)) {
337 if (refcount_dec_and_test(&p
->tcfa_refcnt
)) {
338 struct module
*owner
= p
->ops
->owner
;
340 WARN_ON(p
!= idr_remove(&idrinfo
->action_idr
,
342 spin_unlock(&idrinfo
->lock
);
344 tcf_action_cleanup(p
);
353 spin_unlock(&idrinfo
->lock
);
357 int tcf_idr_create(struct tc_action_net
*tn
, u32 index
, struct nlattr
*est
,
358 struct tc_action
**a
, const struct tc_action_ops
*ops
,
359 int bind
, bool cpustats
)
361 struct tc_action
*p
= kzalloc(ops
->size
, GFP_KERNEL
);
362 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
367 refcount_set(&p
->tcfa_refcnt
, 1);
369 atomic_set(&p
->tcfa_bindcnt
, 1);
372 p
->cpu_bstats
= netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu
);
375 p
->cpu_qstats
= alloc_percpu(struct gnet_stats_queue
);
379 spin_lock_init(&p
->tcfa_lock
);
380 p
->tcfa_index
= index
;
381 p
->tcfa_tm
.install
= jiffies
;
382 p
->tcfa_tm
.lastuse
= jiffies
;
383 p
->tcfa_tm
.firstuse
= 0;
385 err
= gen_new_estimator(&p
->tcfa_bstats
, p
->cpu_bstats
,
387 &p
->tcfa_lock
, NULL
, est
);
392 p
->idrinfo
= idrinfo
;
397 free_percpu(p
->cpu_qstats
);
399 free_percpu(p
->cpu_bstats
);
404 EXPORT_SYMBOL(tcf_idr_create
);
406 void tcf_idr_insert(struct tc_action_net
*tn
, struct tc_action
*a
)
408 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
410 spin_lock(&idrinfo
->lock
);
411 /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
412 WARN_ON(!IS_ERR(idr_replace(&idrinfo
->action_idr
, a
, a
->tcfa_index
)));
413 spin_unlock(&idrinfo
->lock
);
415 EXPORT_SYMBOL(tcf_idr_insert
);
417 /* Cleanup idr index that was allocated but not initialized. */
419 void tcf_idr_cleanup(struct tc_action_net
*tn
, u32 index
)
421 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
423 spin_lock(&idrinfo
->lock
);
424 /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
425 WARN_ON(!IS_ERR(idr_remove(&idrinfo
->action_idr
, index
)));
426 spin_unlock(&idrinfo
->lock
);
428 EXPORT_SYMBOL(tcf_idr_cleanup
);
430 /* Check if action with specified index exists. If actions is found, increments
431 * its reference and bind counters, and return 1. Otherwise insert temporary
432 * error pointer (to prevent concurrent users from inserting actions with same
433 * index) and return 0.
436 int tcf_idr_check_alloc(struct tc_action_net
*tn
, u32
*index
,
437 struct tc_action
**a
, int bind
)
439 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
444 spin_lock(&idrinfo
->lock
);
446 p
= idr_find(&idrinfo
->action_idr
, *index
);
448 /* This means that another process allocated
449 * index but did not assign the pointer yet.
451 spin_unlock(&idrinfo
->lock
);
456 refcount_inc(&p
->tcfa_refcnt
);
458 atomic_inc(&p
->tcfa_bindcnt
);
463 ret
= idr_alloc_u32(&idrinfo
->action_idr
, NULL
, index
,
466 idr_replace(&idrinfo
->action_idr
,
467 ERR_PTR(-EBUSY
), *index
);
472 ret
= idr_alloc_u32(&idrinfo
->action_idr
, NULL
, index
,
473 UINT_MAX
, GFP_ATOMIC
);
475 idr_replace(&idrinfo
->action_idr
, ERR_PTR(-EBUSY
),
478 spin_unlock(&idrinfo
->lock
);
481 EXPORT_SYMBOL(tcf_idr_check_alloc
);
483 void tcf_idrinfo_destroy(const struct tc_action_ops
*ops
,
484 struct tcf_idrinfo
*idrinfo
)
486 struct idr
*idr
= &idrinfo
->action_idr
;
489 unsigned long id
= 1;
491 idr_for_each_entry_ul(idr
, p
, id
) {
492 ret
= __tcf_idr_release(p
, false, true);
493 if (ret
== ACT_P_DELETED
)
494 module_put(ops
->owner
);
498 idr_destroy(&idrinfo
->action_idr
);
500 EXPORT_SYMBOL(tcf_idrinfo_destroy
);
502 static LIST_HEAD(act_base
);
503 static DEFINE_RWLOCK(act_mod_lock
);
505 int tcf_register_action(struct tc_action_ops
*act
,
506 struct pernet_operations
*ops
)
508 struct tc_action_ops
*a
;
511 if (!act
->act
|| !act
->dump
|| !act
->init
|| !act
->walk
|| !act
->lookup
)
514 /* We have to register pernet ops before making the action ops visible,
515 * otherwise tcf_action_init_1() could get a partially initialized
518 ret
= register_pernet_subsys(ops
);
522 write_lock(&act_mod_lock
);
523 list_for_each_entry(a
, &act_base
, head
) {
524 if (act
->type
== a
->type
|| (strcmp(act
->kind
, a
->kind
) == 0)) {
525 write_unlock(&act_mod_lock
);
526 unregister_pernet_subsys(ops
);
530 list_add_tail(&act
->head
, &act_base
);
531 write_unlock(&act_mod_lock
);
535 EXPORT_SYMBOL(tcf_register_action
);
537 int tcf_unregister_action(struct tc_action_ops
*act
,
538 struct pernet_operations
*ops
)
540 struct tc_action_ops
*a
;
543 write_lock(&act_mod_lock
);
544 list_for_each_entry(a
, &act_base
, head
) {
546 list_del(&act
->head
);
551 write_unlock(&act_mod_lock
);
553 unregister_pernet_subsys(ops
);
556 EXPORT_SYMBOL(tcf_unregister_action
);
559 static struct tc_action_ops
*tc_lookup_action_n(char *kind
)
561 struct tc_action_ops
*a
, *res
= NULL
;
564 read_lock(&act_mod_lock
);
565 list_for_each_entry(a
, &act_base
, head
) {
566 if (strcmp(kind
, a
->kind
) == 0) {
567 if (try_module_get(a
->owner
))
572 read_unlock(&act_mod_lock
);
577 /* lookup by nlattr */
578 static struct tc_action_ops
*tc_lookup_action(struct nlattr
*kind
)
580 struct tc_action_ops
*a
, *res
= NULL
;
583 read_lock(&act_mod_lock
);
584 list_for_each_entry(a
, &act_base
, head
) {
585 if (nla_strcmp(kind
, a
->kind
) == 0) {
586 if (try_module_get(a
->owner
))
591 read_unlock(&act_mod_lock
);
596 /*TCA_ACT_MAX_PRIO is 32, there count upto 32 */
597 #define TCA_ACT_MAX_PRIO_MASK 0x1FF
598 int tcf_action_exec(struct sk_buff
*skb
, struct tc_action
**actions
,
599 int nr_actions
, struct tcf_result
*res
)
602 u32 jmp_ttl
= TCA_ACT_MAX_PRIO
; /*matches actions per filter */
606 if (skb_skip_tc_classify(skb
))
610 for (i
= 0; i
< nr_actions
; i
++) {
611 const struct tc_action
*a
= actions
[i
];
613 if (jmp_prgcnt
> 0) {
618 ret
= a
->ops
->act(skb
, a
, res
);
619 if (ret
== TC_ACT_REPEAT
)
620 goto repeat
; /* we need a ttl - JHS */
622 if (TC_ACT_EXT_CMP(ret
, TC_ACT_JUMP
)) {
623 jmp_prgcnt
= ret
& TCA_ACT_MAX_PRIO_MASK
;
624 if (!jmp_prgcnt
|| (jmp_prgcnt
> nr_actions
)) {
625 /* faulty opcode, stop pipeline */
630 goto restart_act_graph
;
631 else /* faulty graph, stop pipeline */
634 } else if (TC_ACT_EXT_CMP(ret
, TC_ACT_GOTO_CHAIN
)) {
635 tcf_action_goto_chain_exec(a
, res
);
638 if (ret
!= TC_ACT_PIPE
)
644 EXPORT_SYMBOL(tcf_action_exec
);
646 int tcf_action_destroy(struct tc_action
*actions
[], int bind
)
648 const struct tc_action_ops
*ops
;
652 for (i
= 0; i
< TCA_ACT_MAX_PRIO
&& actions
[i
]; i
++) {
656 ret
= __tcf_idr_release(a
, bind
, true);
657 if (ret
== ACT_P_DELETED
)
658 module_put(ops
->owner
);
665 static int tcf_action_destroy_1(struct tc_action
*a
, int bind
)
667 struct tc_action
*actions
[] = { a
, NULL
};
669 return tcf_action_destroy(actions
, bind
);
672 static int tcf_action_put(struct tc_action
*p
)
674 return __tcf_action_put(p
, false);
677 /* Put all actions in this array, skip those NULL's. */
678 static void tcf_action_put_many(struct tc_action
*actions
[])
682 for (i
= 0; i
< TCA_ACT_MAX_PRIO
; i
++) {
683 struct tc_action
*a
= actions
[i
];
684 const struct tc_action_ops
*ops
;
689 if (tcf_action_put(a
))
690 module_put(ops
->owner
);
695 tcf_action_dump_old(struct sk_buff
*skb
, struct tc_action
*a
, int bind
, int ref
)
697 return a
->ops
->dump(skb
, a
, bind
, ref
);
701 tcf_action_dump_1(struct sk_buff
*skb
, struct tc_action
*a
, int bind
, int ref
)
704 unsigned char *b
= skb_tail_pointer(skb
);
706 struct tc_cookie
*cookie
;
708 if (nla_put_string(skb
, TCA_KIND
, a
->ops
->kind
))
709 goto nla_put_failure
;
710 if (tcf_action_copy_stats(skb
, a
, 0))
711 goto nla_put_failure
;
714 cookie
= rcu_dereference(a
->act_cookie
);
716 if (nla_put(skb
, TCA_ACT_COOKIE
, cookie
->len
, cookie
->data
)) {
718 goto nla_put_failure
;
723 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
725 goto nla_put_failure
;
726 err
= tcf_action_dump_old(skb
, a
, bind
, ref
);
728 nla_nest_end(skb
, nest
);
736 EXPORT_SYMBOL(tcf_action_dump_1
);
738 int tcf_action_dump(struct sk_buff
*skb
, struct tc_action
*actions
[],
742 int err
= -EINVAL
, i
;
745 for (i
= 0; i
< TCA_ACT_MAX_PRIO
&& actions
[i
]; i
++) {
747 nest
= nla_nest_start(skb
, a
->order
);
749 goto nla_put_failure
;
750 err
= tcf_action_dump_1(skb
, a
, bind
, ref
);
753 nla_nest_end(skb
, nest
);
761 nla_nest_cancel(skb
, nest
);
765 static struct tc_cookie
*nla_memdup_cookie(struct nlattr
**tb
)
767 struct tc_cookie
*c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
771 c
->data
= nla_memdup(tb
[TCA_ACT_COOKIE
], GFP_KERNEL
);
776 c
->len
= nla_len(tb
[TCA_ACT_COOKIE
]);
781 static bool tcf_action_valid(int action
)
783 int opcode
= TC_ACT_EXT_OPCODE(action
);
786 return action
<= TC_ACT_VALUE_MAX
;
787 return opcode
<= TC_ACT_EXT_OPCODE_MAX
|| action
== TC_ACT_UNSPEC
;
790 struct tc_action
*tcf_action_init_1(struct net
*net
, struct tcf_proto
*tp
,
791 struct nlattr
*nla
, struct nlattr
*est
,
792 char *name
, int ovr
, int bind
,
794 struct netlink_ext_ack
*extack
)
797 struct tc_action_ops
*a_o
;
798 struct tc_cookie
*cookie
= NULL
;
799 char act_name
[IFNAMSIZ
];
800 struct nlattr
*tb
[TCA_ACT_MAX
+ 1];
805 err
= nla_parse_nested(tb
, TCA_ACT_MAX
, nla
, NULL
, extack
);
809 kind
= tb
[TCA_ACT_KIND
];
811 NL_SET_ERR_MSG(extack
, "TC action kind must be specified");
814 if (nla_strlcpy(act_name
, kind
, IFNAMSIZ
) >= IFNAMSIZ
) {
815 NL_SET_ERR_MSG(extack
, "TC action name too long");
818 if (tb
[TCA_ACT_COOKIE
]) {
819 int cklen
= nla_len(tb
[TCA_ACT_COOKIE
]);
821 if (cklen
> TC_COOKIE_MAX_SIZE
) {
822 NL_SET_ERR_MSG(extack
, "TC cookie size above the maximum");
826 cookie
= nla_memdup_cookie(tb
);
828 NL_SET_ERR_MSG(extack
, "No memory to generate TC cookie");
834 if (strlcpy(act_name
, name
, IFNAMSIZ
) >= IFNAMSIZ
) {
835 NL_SET_ERR_MSG(extack
, "TC action name too long");
841 a_o
= tc_lookup_action_n(act_name
);
843 #ifdef CONFIG_MODULES
846 request_module("act_%s", act_name
);
850 a_o
= tc_lookup_action_n(act_name
);
852 /* We dropped the RTNL semaphore in order to
853 * perform the module load. So, even if we
854 * succeeded in loading the module we have to
855 * tell the caller to replay the request. We
856 * indicate this using -EAGAIN.
863 NL_SET_ERR_MSG(extack
, "Failed to load TC action module");
868 /* backward compatibility for policer */
870 err
= a_o
->init(net
, tb
[TCA_ACT_OPTIONS
], est
, &a
, ovr
, bind
,
873 err
= a_o
->init(net
, nla
, est
, &a
, ovr
, bind
, rtnl_held
,
878 if (!name
&& tb
[TCA_ACT_COOKIE
])
879 tcf_set_action_cookie(&a
->act_cookie
, cookie
);
881 /* module count goes up only when brand new policy is created
882 * if it exists and is only bound to in a_o->init() then
883 * ACT_P_CREATED is not returned (a zero is).
885 if (err
!= ACT_P_CREATED
)
886 module_put(a_o
->owner
);
888 if (TC_ACT_EXT_CMP(a
->tcfa_action
, TC_ACT_GOTO_CHAIN
)) {
889 err
= tcf_action_goto_chain_init(a
, tp
);
891 tcf_action_destroy_1(a
, bind
);
892 NL_SET_ERR_MSG(extack
, "Failed to init TC action chain");
897 if (!tcf_action_valid(a
->tcfa_action
)) {
898 tcf_action_destroy_1(a
, bind
);
899 NL_SET_ERR_MSG(extack
, "Invalid control action value");
900 return ERR_PTR(-EINVAL
);
906 module_put(a_o
->owner
);
915 /* Returns numbers of initialized actions or negative error. */
917 int tcf_action_init(struct net
*net
, struct tcf_proto
*tp
, struct nlattr
*nla
,
918 struct nlattr
*est
, char *name
, int ovr
, int bind
,
919 struct tc_action
*actions
[], size_t *attr_size
,
920 bool rtnl_held
, struct netlink_ext_ack
*extack
)
922 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+ 1];
923 struct tc_action
*act
;
928 err
= nla_parse_nested(tb
, TCA_ACT_MAX_PRIO
, nla
, NULL
, extack
);
932 for (i
= 1; i
<= TCA_ACT_MAX_PRIO
&& tb
[i
]; i
++) {
933 act
= tcf_action_init_1(net
, tp
, tb
[i
], est
, name
, ovr
, bind
,
940 sz
+= tcf_action_fill_size(act
);
941 /* Start from index 0 */
942 actions
[i
- 1] = act
;
945 *attr_size
= tcf_action_full_attrs_size(sz
);
949 tcf_action_destroy(actions
, bind
);
953 int tcf_action_copy_stats(struct sk_buff
*skb
, struct tc_action
*p
,
962 /* compat_mode being true specifies a call that is supposed
963 * to add additional backward compatibility statistic TLVs.
966 if (p
->type
== TCA_OLD_COMPAT
)
967 err
= gnet_stats_start_copy_compat(skb
, 0,
975 err
= gnet_stats_start_copy(skb
, TCA_ACT_STATS
,
976 &p
->tcfa_lock
, &d
, TCA_ACT_PAD
);
981 if (gnet_stats_copy_basic(NULL
, &d
, p
->cpu_bstats
, &p
->tcfa_bstats
) < 0 ||
982 gnet_stats_copy_rate_est(&d
, &p
->tcfa_rate_est
) < 0 ||
983 gnet_stats_copy_queue(&d
, p
->cpu_qstats
,
985 p
->tcfa_qstats
.qlen
) < 0)
988 if (gnet_stats_finish_copy(&d
) < 0)
997 static int tca_get_fill(struct sk_buff
*skb
, struct tc_action
*actions
[],
998 u32 portid
, u32 seq
, u16 flags
, int event
, int bind
,
1002 struct nlmsghdr
*nlh
;
1003 unsigned char *b
= skb_tail_pointer(skb
);
1004 struct nlattr
*nest
;
1006 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*t
), flags
);
1008 goto out_nlmsg_trim
;
1009 t
= nlmsg_data(nlh
);
1010 t
->tca_family
= AF_UNSPEC
;
1014 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
1016 goto out_nlmsg_trim
;
1018 if (tcf_action_dump(skb
, actions
, bind
, ref
) < 0)
1019 goto out_nlmsg_trim
;
1021 nla_nest_end(skb
, nest
);
1023 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1032 tcf_get_notify(struct net
*net
, u32 portid
, struct nlmsghdr
*n
,
1033 struct tc_action
*actions
[], int event
,
1034 struct netlink_ext_ack
*extack
)
1036 struct sk_buff
*skb
;
1038 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1041 if (tca_get_fill(skb
, actions
, portid
, n
->nlmsg_seq
, 0, event
,
1043 NL_SET_ERR_MSG(extack
, "Failed to fill netlink attributes while adding TC action");
1048 return rtnl_unicast(skb
, net
, portid
);
1051 static struct tc_action
*tcf_action_get_1(struct net
*net
, struct nlattr
*nla
,
1052 struct nlmsghdr
*n
, u32 portid
,
1053 struct netlink_ext_ack
*extack
)
1055 struct nlattr
*tb
[TCA_ACT_MAX
+ 1];
1056 const struct tc_action_ops
*ops
;
1057 struct tc_action
*a
;
1061 err
= nla_parse_nested(tb
, TCA_ACT_MAX
, nla
, NULL
, extack
);
1066 if (tb
[TCA_ACT_INDEX
] == NULL
||
1067 nla_len(tb
[TCA_ACT_INDEX
]) < sizeof(index
)) {
1068 NL_SET_ERR_MSG(extack
, "Invalid TC action index value");
1071 index
= nla_get_u32(tb
[TCA_ACT_INDEX
]);
1074 ops
= tc_lookup_action(tb
[TCA_ACT_KIND
]);
1075 if (!ops
) { /* could happen in batch of actions */
1076 NL_SET_ERR_MSG(extack
, "Specified TC action not found");
1080 if (ops
->lookup(net
, &a
, index
, extack
) == 0)
1083 module_put(ops
->owner
);
1087 module_put(ops
->owner
);
1089 return ERR_PTR(err
);
1092 static int tca_action_flush(struct net
*net
, struct nlattr
*nla
,
1093 struct nlmsghdr
*n
, u32 portid
,
1094 struct netlink_ext_ack
*extack
)
1096 struct sk_buff
*skb
;
1098 struct nlmsghdr
*nlh
;
1100 struct netlink_callback dcb
;
1101 struct nlattr
*nest
;
1102 struct nlattr
*tb
[TCA_ACT_MAX
+ 1];
1103 const struct tc_action_ops
*ops
;
1104 struct nlattr
*kind
;
1107 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1111 b
= skb_tail_pointer(skb
);
1113 err
= nla_parse_nested(tb
, TCA_ACT_MAX
, nla
, NULL
, extack
);
1118 kind
= tb
[TCA_ACT_KIND
];
1119 ops
= tc_lookup_action(kind
);
1120 if (!ops
) { /*some idjot trying to flush unknown action */
1121 NL_SET_ERR_MSG(extack
, "Cannot flush unknown TC action");
1125 nlh
= nlmsg_put(skb
, portid
, n
->nlmsg_seq
, RTM_DELACTION
,
1128 NL_SET_ERR_MSG(extack
, "Failed to create TC action flush notification");
1129 goto out_module_put
;
1131 t
= nlmsg_data(nlh
);
1132 t
->tca_family
= AF_UNSPEC
;
1136 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
1138 NL_SET_ERR_MSG(extack
, "Failed to add new netlink message");
1139 goto out_module_put
;
1142 err
= ops
->walk(net
, skb
, &dcb
, RTM_DELACTION
, ops
, extack
);
1144 nla_nest_cancel(skb
, nest
);
1145 goto out_module_put
;
1148 nla_nest_end(skb
, nest
);
1150 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1151 nlh
->nlmsg_flags
|= NLM_F_ROOT
;
1152 module_put(ops
->owner
);
1153 err
= rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1154 n
->nlmsg_flags
& NLM_F_ECHO
);
1158 NL_SET_ERR_MSG(extack
, "Failed to send TC action flush notification");
1163 module_put(ops
->owner
);
1169 static int tcf_action_delete(struct net
*net
, struct tc_action
*actions
[])
1173 for (i
= 0; i
< TCA_ACT_MAX_PRIO
&& actions
[i
]; i
++) {
1174 struct tc_action
*a
= actions
[i
];
1175 const struct tc_action_ops
*ops
= a
->ops
;
1176 /* Actions can be deleted concurrently so we must save their
1177 * type and id to search again after reference is released.
1179 struct tcf_idrinfo
*idrinfo
= a
->idrinfo
;
1180 u32 act_index
= a
->tcfa_index
;
1183 if (tcf_action_put(a
)) {
1184 /* last reference, action was deleted concurrently */
1185 module_put(ops
->owner
);
1189 /* now do the delete */
1190 ret
= tcf_idr_delete_index(idrinfo
, act_index
);
1199 tcf_del_notify(struct net
*net
, struct nlmsghdr
*n
, struct tc_action
*actions
[],
1200 u32 portid
, size_t attr_size
, struct netlink_ext_ack
*extack
)
1203 struct sk_buff
*skb
;
1205 skb
= alloc_skb(attr_size
<= NLMSG_GOODSIZE
? NLMSG_GOODSIZE
: attr_size
,
1210 if (tca_get_fill(skb
, actions
, portid
, n
->nlmsg_seq
, 0, RTM_DELACTION
,
1212 NL_SET_ERR_MSG(extack
, "Failed to fill netlink TC action attributes");
1217 /* now do the delete */
1218 ret
= tcf_action_delete(net
, actions
);
1220 NL_SET_ERR_MSG(extack
, "Failed to delete TC action");
1225 ret
= rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1226 n
->nlmsg_flags
& NLM_F_ECHO
);
1233 tca_action_gd(struct net
*net
, struct nlattr
*nla
, struct nlmsghdr
*n
,
1234 u32 portid
, int event
, struct netlink_ext_ack
*extack
)
1237 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+ 1];
1238 struct tc_action
*act
;
1239 size_t attr_size
= 0;
1240 struct tc_action
*actions
[TCA_ACT_MAX_PRIO
] = {};
1242 ret
= nla_parse_nested(tb
, TCA_ACT_MAX_PRIO
, nla
, NULL
, extack
);
1246 if (event
== RTM_DELACTION
&& n
->nlmsg_flags
& NLM_F_ROOT
) {
1248 return tca_action_flush(net
, tb
[1], n
, portid
, extack
);
1250 NL_SET_ERR_MSG(extack
, "Invalid netlink attributes while flushing TC action");
1254 for (i
= 1; i
<= TCA_ACT_MAX_PRIO
&& tb
[i
]; i
++) {
1255 act
= tcf_action_get_1(net
, tb
[i
], n
, portid
, extack
);
1261 attr_size
+= tcf_action_fill_size(act
);
1262 actions
[i
- 1] = act
;
1265 attr_size
= tcf_action_full_attrs_size(attr_size
);
1267 if (event
== RTM_GETACTION
)
1268 ret
= tcf_get_notify(net
, portid
, n
, actions
, event
, extack
);
1270 ret
= tcf_del_notify(net
, n
, actions
, portid
, attr_size
, extack
);
1276 tcf_action_put_many(actions
);
1281 tcf_add_notify(struct net
*net
, struct nlmsghdr
*n
, struct tc_action
*actions
[],
1282 u32 portid
, size_t attr_size
, struct netlink_ext_ack
*extack
)
1284 struct sk_buff
*skb
;
1287 skb
= alloc_skb(attr_size
<= NLMSG_GOODSIZE
? NLMSG_GOODSIZE
: attr_size
,
1292 if (tca_get_fill(skb
, actions
, portid
, n
->nlmsg_seq
, n
->nlmsg_flags
,
1293 RTM_NEWACTION
, 0, 0) <= 0) {
1294 NL_SET_ERR_MSG(extack
, "Failed to fill netlink attributes while adding TC action");
1299 err
= rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1300 n
->nlmsg_flags
& NLM_F_ECHO
);
1306 static int tcf_action_add(struct net
*net
, struct nlattr
*nla
,
1307 struct nlmsghdr
*n
, u32 portid
, int ovr
,
1308 struct netlink_ext_ack
*extack
)
1310 size_t attr_size
= 0;
1312 struct tc_action
*actions
[TCA_ACT_MAX_PRIO
] = {};
1314 ret
= tcf_action_init(net
, NULL
, nla
, NULL
, NULL
, ovr
, 0, actions
,
1315 &attr_size
, true, extack
);
1318 ret
= tcf_add_notify(net
, n
, actions
, portid
, attr_size
, extack
);
1320 tcf_action_put_many(actions
);
1325 static u32 tcaa_root_flags_allowed
= TCA_FLAG_LARGE_DUMP_ON
;
1326 static const struct nla_policy tcaa_policy
[TCA_ROOT_MAX
+ 1] = {
1327 [TCA_ROOT_FLAGS
] = { .type
= NLA_BITFIELD32
,
1328 .validation_data
= &tcaa_root_flags_allowed
},
1329 [TCA_ROOT_TIME_DELTA
] = { .type
= NLA_U32
},
1332 static int tc_ctl_action(struct sk_buff
*skb
, struct nlmsghdr
*n
,
1333 struct netlink_ext_ack
*extack
)
1335 struct net
*net
= sock_net(skb
->sk
);
1336 struct nlattr
*tca
[TCA_ROOT_MAX
+ 1];
1337 u32 portid
= skb
? NETLINK_CB(skb
).portid
: 0;
1338 int ret
= 0, ovr
= 0;
1340 if ((n
->nlmsg_type
!= RTM_GETACTION
) &&
1341 !netlink_capable(skb
, CAP_NET_ADMIN
))
1344 ret
= nlmsg_parse(n
, sizeof(struct tcamsg
), tca
, TCA_ROOT_MAX
, NULL
,
1349 if (tca
[TCA_ACT_TAB
] == NULL
) {
1350 NL_SET_ERR_MSG(extack
, "Netlink action attributes missing");
1354 /* n->nlmsg_flags & NLM_F_CREATE */
1355 switch (n
->nlmsg_type
) {
1357 /* we are going to assume all other flags
1358 * imply create only if it doesn't exist
1359 * Note that CREATE | EXCL implies that
1360 * but since we want avoid ambiguity (eg when flags
1361 * is zero) then just set this
1363 if (n
->nlmsg_flags
& NLM_F_REPLACE
)
1366 ret
= tcf_action_add(net
, tca
[TCA_ACT_TAB
], n
, portid
, ovr
,
1372 ret
= tca_action_gd(net
, tca
[TCA_ACT_TAB
], n
,
1373 portid
, RTM_DELACTION
, extack
);
1376 ret
= tca_action_gd(net
, tca
[TCA_ACT_TAB
], n
,
1377 portid
, RTM_GETACTION
, extack
);
1386 static struct nlattr
*find_dump_kind(struct nlattr
**nla
)
1388 struct nlattr
*tb1
, *tb2
[TCA_ACT_MAX
+ 1];
1389 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+ 1];
1390 struct nlattr
*kind
;
1392 tb1
= nla
[TCA_ACT_TAB
];
1396 if (nla_parse(tb
, TCA_ACT_MAX_PRIO
, nla_data(tb1
),
1397 NLMSG_ALIGN(nla_len(tb1
)), NULL
, NULL
) < 0)
1402 if (nla_parse_nested(tb2
, TCA_ACT_MAX
, tb
[1], NULL
, NULL
) < 0)
1404 kind
= tb2
[TCA_ACT_KIND
];
1409 static int tc_dump_action(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1411 struct net
*net
= sock_net(skb
->sk
);
1412 struct nlmsghdr
*nlh
;
1413 unsigned char *b
= skb_tail_pointer(skb
);
1414 struct nlattr
*nest
;
1415 struct tc_action_ops
*a_o
;
1417 struct tcamsg
*t
= (struct tcamsg
*) nlmsg_data(cb
->nlh
);
1418 struct nlattr
*tb
[TCA_ROOT_MAX
+ 1];
1419 struct nlattr
*count_attr
= NULL
;
1420 unsigned long jiffy_since
= 0;
1421 struct nlattr
*kind
= NULL
;
1422 struct nla_bitfield32 bf
;
1423 u32 msecs_since
= 0;
1426 ret
= nlmsg_parse(cb
->nlh
, sizeof(struct tcamsg
), tb
, TCA_ROOT_MAX
,
1431 kind
= find_dump_kind(tb
);
1433 pr_info("tc_dump_action: action bad kind\n");
1437 a_o
= tc_lookup_action(kind
);
1442 if (tb
[TCA_ROOT_FLAGS
]) {
1443 bf
= nla_get_bitfield32(tb
[TCA_ROOT_FLAGS
]);
1444 cb
->args
[2] = bf
.value
;
1447 if (tb
[TCA_ROOT_TIME_DELTA
]) {
1448 msecs_since
= nla_get_u32(tb
[TCA_ROOT_TIME_DELTA
]);
1451 nlh
= nlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
1452 cb
->nlh
->nlmsg_type
, sizeof(*t
), 0);
1454 goto out_module_put
;
1457 jiffy_since
= jiffies
- msecs_to_jiffies(msecs_since
);
1459 t
= nlmsg_data(nlh
);
1460 t
->tca_family
= AF_UNSPEC
;
1463 cb
->args
[3] = jiffy_since
;
1464 count_attr
= nla_reserve(skb
, TCA_ROOT_COUNT
, sizeof(u32
));
1466 goto out_module_put
;
1468 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
1470 goto out_module_put
;
1472 ret
= a_o
->walk(net
, skb
, cb
, RTM_GETACTION
, a_o
, NULL
);
1474 goto out_module_put
;
1477 nla_nest_end(skb
, nest
);
1479 act_count
= cb
->args
[1];
1480 memcpy(nla_data(count_attr
), &act_count
, sizeof(u32
));
1485 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1486 if (NETLINK_CB(cb
->skb
).portid
&& ret
)
1487 nlh
->nlmsg_flags
|= NLM_F_MULTI
;
1488 module_put(a_o
->owner
);
1492 module_put(a_o
->owner
);
1497 struct tcf_action_net
{
1498 struct rhashtable egdev_ht
;
1501 static unsigned int tcf_action_net_id
;
1503 struct tcf_action_egdev_cb
{
1504 struct list_head list
;
1509 struct tcf_action_egdev
{
1510 struct rhash_head ht_node
;
1511 const struct net_device
*dev
;
1512 unsigned int refcnt
;
1513 struct list_head cb_list
;
1516 static const struct rhashtable_params tcf_action_egdev_ht_params
= {
1517 .key_offset
= offsetof(struct tcf_action_egdev
, dev
),
1518 .head_offset
= offsetof(struct tcf_action_egdev
, ht_node
),
1519 .key_len
= sizeof(const struct net_device
*),
1522 static struct tcf_action_egdev
*
1523 tcf_action_egdev_lookup(const struct net_device
*dev
)
1525 struct net
*net
= dev_net(dev
);
1526 struct tcf_action_net
*tan
= net_generic(net
, tcf_action_net_id
);
1528 return rhashtable_lookup_fast(&tan
->egdev_ht
, &dev
,
1529 tcf_action_egdev_ht_params
);
1532 static struct tcf_action_egdev
*
1533 tcf_action_egdev_get(const struct net_device
*dev
)
1535 struct tcf_action_egdev
*egdev
;
1536 struct tcf_action_net
*tan
;
1538 egdev
= tcf_action_egdev_lookup(dev
);
1542 egdev
= kzalloc(sizeof(*egdev
), GFP_KERNEL
);
1545 INIT_LIST_HEAD(&egdev
->cb_list
);
1547 tan
= net_generic(dev_net(dev
), tcf_action_net_id
);
1548 rhashtable_insert_fast(&tan
->egdev_ht
, &egdev
->ht_node
,
1549 tcf_action_egdev_ht_params
);
1556 static void tcf_action_egdev_put(struct tcf_action_egdev
*egdev
)
1558 struct tcf_action_net
*tan
;
1560 if (--egdev
->refcnt
)
1562 tan
= net_generic(dev_net(egdev
->dev
), tcf_action_net_id
);
1563 rhashtable_remove_fast(&tan
->egdev_ht
, &egdev
->ht_node
,
1564 tcf_action_egdev_ht_params
);
1568 static struct tcf_action_egdev_cb
*
1569 tcf_action_egdev_cb_lookup(struct tcf_action_egdev
*egdev
,
1570 tc_setup_cb_t
*cb
, void *cb_priv
)
1572 struct tcf_action_egdev_cb
*egdev_cb
;
1574 list_for_each_entry(egdev_cb
, &egdev
->cb_list
, list
)
1575 if (egdev_cb
->cb
== cb
&& egdev_cb
->cb_priv
== cb_priv
)
1580 static int tcf_action_egdev_cb_call(struct tcf_action_egdev
*egdev
,
1581 enum tc_setup_type type
,
1582 void *type_data
, bool err_stop
)
1584 struct tcf_action_egdev_cb
*egdev_cb
;
1588 list_for_each_entry(egdev_cb
, &egdev
->cb_list
, list
) {
1589 err
= egdev_cb
->cb(type
, type_data
, egdev_cb
->cb_priv
);
1600 static int tcf_action_egdev_cb_add(struct tcf_action_egdev
*egdev
,
1601 tc_setup_cb_t
*cb
, void *cb_priv
)
1603 struct tcf_action_egdev_cb
*egdev_cb
;
1605 egdev_cb
= tcf_action_egdev_cb_lookup(egdev
, cb
, cb_priv
);
1606 if (WARN_ON(egdev_cb
))
1608 egdev_cb
= kzalloc(sizeof(*egdev_cb
), GFP_KERNEL
);
1612 egdev_cb
->cb_priv
= cb_priv
;
1613 list_add(&egdev_cb
->list
, &egdev
->cb_list
);
1617 static void tcf_action_egdev_cb_del(struct tcf_action_egdev
*egdev
,
1618 tc_setup_cb_t
*cb
, void *cb_priv
)
1620 struct tcf_action_egdev_cb
*egdev_cb
;
1622 egdev_cb
= tcf_action_egdev_cb_lookup(egdev
, cb
, cb_priv
);
1623 if (WARN_ON(!egdev_cb
))
1625 list_del(&egdev_cb
->list
);
1629 static int __tc_setup_cb_egdev_register(const struct net_device
*dev
,
1630 tc_setup_cb_t
*cb
, void *cb_priv
)
1632 struct tcf_action_egdev
*egdev
= tcf_action_egdev_get(dev
);
1637 err
= tcf_action_egdev_cb_add(egdev
, cb
, cb_priv
);
1643 tcf_action_egdev_put(egdev
);
1646 int tc_setup_cb_egdev_register(const struct net_device
*dev
,
1647 tc_setup_cb_t
*cb
, void *cb_priv
)
1652 err
= __tc_setup_cb_egdev_register(dev
, cb
, cb_priv
);
1656 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_register
);
1658 static void __tc_setup_cb_egdev_unregister(const struct net_device
*dev
,
1659 tc_setup_cb_t
*cb
, void *cb_priv
)
1661 struct tcf_action_egdev
*egdev
= tcf_action_egdev_lookup(dev
);
1663 if (WARN_ON(!egdev
))
1665 tcf_action_egdev_cb_del(egdev
, cb
, cb_priv
);
1666 tcf_action_egdev_put(egdev
);
1668 void tc_setup_cb_egdev_unregister(const struct net_device
*dev
,
1669 tc_setup_cb_t
*cb
, void *cb_priv
)
1672 __tc_setup_cb_egdev_unregister(dev
, cb
, cb_priv
);
1675 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_unregister
);
1677 int tc_setup_cb_egdev_call(const struct net_device
*dev
,
1678 enum tc_setup_type type
, void *type_data
,
1681 struct tcf_action_egdev
*egdev
= tcf_action_egdev_lookup(dev
);
1685 return tcf_action_egdev_cb_call(egdev
, type
, type_data
, err_stop
);
1687 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_call
);
1689 static __net_init
int tcf_action_net_init(struct net
*net
)
1691 struct tcf_action_net
*tan
= net_generic(net
, tcf_action_net_id
);
1693 return rhashtable_init(&tan
->egdev_ht
, &tcf_action_egdev_ht_params
);
1696 static void __net_exit
tcf_action_net_exit(struct net
*net
)
1698 struct tcf_action_net
*tan
= net_generic(net
, tcf_action_net_id
);
1700 rhashtable_destroy(&tan
->egdev_ht
);
1703 static struct pernet_operations tcf_action_net_ops
= {
1704 .init
= tcf_action_net_init
,
1705 .exit
= tcf_action_net_exit
,
1706 .id
= &tcf_action_net_id
,
1707 .size
= sizeof(struct tcf_action_net
),
1710 static int __init
tc_action_init(void)
1714 err
= register_pernet_subsys(&tcf_action_net_ops
);
1718 rtnl_register(PF_UNSPEC
, RTM_NEWACTION
, tc_ctl_action
, NULL
, 0);
1719 rtnl_register(PF_UNSPEC
, RTM_DELACTION
, tc_ctl_action
, NULL
, 0);
1720 rtnl_register(PF_UNSPEC
, RTM_GETACTION
, tc_ctl_action
, tc_dump_action
,
1726 subsys_initcall(tc_action_init
);