1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/act_api.c Packet action API.
5 * Author: Jamal Hadi Salim
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/string.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
15 #include <linux/kmod.h>
16 #include <linux/err.h>
17 #include <linux/module.h>
18 #include <net/net_namespace.h>
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/tc_act/tc_pedit.h>
23 #include <net/act_api.h>
24 #include <net/netlink.h>
25 #include <net/flow_offload.h>
26 #include <net/tc_wrapper.h>
29 DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count
);
30 EXPORT_SYMBOL_GPL(tcf_frag_xmit_count
);
33 int tcf_dev_queue_xmit(struct sk_buff
*skb
, int (*xmit
)(struct sk_buff
*skb
))
36 if (static_branch_unlikely(&tcf_frag_xmit_count
))
37 return sch_frag_xmit_hook(skb
, xmit
);
42 EXPORT_SYMBOL_GPL(tcf_dev_queue_xmit
);
44 static void tcf_action_goto_chain_exec(const struct tc_action
*a
,
45 struct tcf_result
*res
)
47 const struct tcf_chain
*chain
= rcu_dereference_bh(a
->goto_chain
);
49 res
->goto_tp
= rcu_dereference_bh(chain
->filter_chain
);
52 static void tcf_free_cookie_rcu(struct rcu_head
*p
)
54 struct tc_cookie
*cookie
= container_of(p
, struct tc_cookie
, rcu
);
60 static void tcf_set_action_cookie(struct tc_cookie __rcu
**old_cookie
,
61 struct tc_cookie
*new_cookie
)
63 struct tc_cookie
*old
;
65 old
= unrcu_pointer(xchg(old_cookie
, RCU_INITIALIZER(new_cookie
)));
67 call_rcu(&old
->rcu
, tcf_free_cookie_rcu
);
70 int tcf_action_check_ctrlact(int action
, struct tcf_proto
*tp
,
71 struct tcf_chain
**newchain
,
72 struct netlink_ext_ack
*extack
)
74 int opcode
= TC_ACT_EXT_OPCODE(action
), ret
= -EINVAL
;
78 ret
= action
> TC_ACT_VALUE_MAX
? -EINVAL
: 0;
79 else if (opcode
<= TC_ACT_EXT_OPCODE_MAX
|| action
== TC_ACT_UNSPEC
)
82 NL_SET_ERR_MSG(extack
, "invalid control action");
86 if (TC_ACT_EXT_CMP(action
, TC_ACT_GOTO_CHAIN
)) {
87 chain_index
= action
& TC_ACT_EXT_VAL_MASK
;
88 if (!tp
|| !newchain
) {
90 NL_SET_ERR_MSG(extack
,
91 "can't goto NULL proto/chain");
94 *newchain
= tcf_chain_get_by_act(tp
->chain
->block
, chain_index
);
97 NL_SET_ERR_MSG(extack
,
98 "can't allocate goto_chain");
104 EXPORT_SYMBOL(tcf_action_check_ctrlact
);
106 struct tcf_chain
*tcf_action_set_ctrlact(struct tc_action
*a
, int action
,
107 struct tcf_chain
*goto_chain
)
109 a
->tcfa_action
= action
;
110 goto_chain
= rcu_replace_pointer(a
->goto_chain
, goto_chain
, 1);
113 EXPORT_SYMBOL(tcf_action_set_ctrlact
);
115 /* XXX: For standalone actions, we don't need a RCU grace period either, because
116 * actions are always connected to filters and filters are already destroyed in
117 * RCU callbacks, so after a RCU grace period actions are already disconnected
118 * from filters. Readers later can not find us.
120 static void free_tcf(struct tc_action
*p
)
122 struct tcf_chain
*chain
= rcu_dereference_protected(p
->goto_chain
, 1);
124 free_percpu(p
->cpu_bstats
);
125 free_percpu(p
->cpu_bstats_hw
);
126 free_percpu(p
->cpu_qstats
);
128 tcf_set_action_cookie(&p
->user_cookie
, NULL
);
130 tcf_chain_put_by_act(chain
);
135 static void offload_action_hw_count_set(struct tc_action
*act
,
138 act
->in_hw_count
= hw_count
;
141 static void offload_action_hw_count_inc(struct tc_action
*act
,
144 act
->in_hw_count
+= hw_count
;
147 static void offload_action_hw_count_dec(struct tc_action
*act
,
150 act
->in_hw_count
= act
->in_hw_count
> hw_count
?
151 act
->in_hw_count
- hw_count
: 0;
154 static unsigned int tcf_offload_act_num_actions_single(struct tc_action
*act
)
156 if (is_tcf_pedit(act
))
157 return tcf_pedit_nkeys(act
);
162 static bool tc_act_skip_hw(u32 flags
)
164 return (flags
& TCA_ACT_FLAGS_SKIP_HW
) ? true : false;
167 static bool tc_act_skip_sw(u32 flags
)
169 return (flags
& TCA_ACT_FLAGS_SKIP_SW
) ? true : false;
172 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
173 static bool tc_act_flags_valid(u32 flags
)
175 flags
&= TCA_ACT_FLAGS_SKIP_HW
| TCA_ACT_FLAGS_SKIP_SW
;
177 return flags
^ (TCA_ACT_FLAGS_SKIP_HW
| TCA_ACT_FLAGS_SKIP_SW
);
180 static int offload_action_init(struct flow_offload_action
*fl_action
,
181 struct tc_action
*act
,
182 enum offload_act_command cmd
,
183 struct netlink_ext_ack
*extack
)
187 fl_action
->extack
= extack
;
188 fl_action
->command
= cmd
;
189 fl_action
->index
= act
->tcfa_index
;
190 fl_action
->cookie
= (unsigned long)act
;
192 if (act
->ops
->offload_act_setup
) {
193 spin_lock_bh(&act
->tcfa_lock
);
194 err
= act
->ops
->offload_act_setup(act
, fl_action
, NULL
,
196 spin_unlock_bh(&act
->tcfa_lock
);
203 static int tcf_action_offload_cmd_ex(struct flow_offload_action
*fl_act
,
208 err
= flow_indr_dev_setup_offload(NULL
, NULL
, TC_SETUP_ACT
,
219 static int tcf_action_offload_cmd_cb_ex(struct flow_offload_action
*fl_act
,
221 flow_indr_block_bind_cb_t
*cb
,
226 err
= cb(NULL
, NULL
, cb_priv
, TC_SETUP_ACT
, NULL
, fl_act
, NULL
);
236 static int tcf_action_offload_cmd(struct flow_offload_action
*fl_act
,
238 flow_indr_block_bind_cb_t
*cb
,
241 return cb
? tcf_action_offload_cmd_cb_ex(fl_act
, hw_count
,
243 tcf_action_offload_cmd_ex(fl_act
, hw_count
);
246 static int tcf_action_offload_add_ex(struct tc_action
*action
,
247 struct netlink_ext_ack
*extack
,
248 flow_indr_block_bind_cb_t
*cb
,
251 bool skip_sw
= tc_act_skip_sw(action
->tcfa_flags
);
252 struct tc_action
*actions
[TCA_ACT_MAX_PRIO
] = {
255 struct flow_offload_action
*fl_action
;
259 if (tc_act_skip_hw(action
->tcfa_flags
))
262 num
= tcf_offload_act_num_actions_single(action
);
263 fl_action
= offload_action_alloc(num
);
267 err
= offload_action_init(fl_action
, action
, FLOW_ACT_REPLACE
, extack
);
271 err
= tc_setup_action(&fl_action
->action
, actions
, 0, extack
);
273 NL_SET_ERR_MSG_MOD(extack
,
274 "Failed to setup tc actions for offload");
278 err
= tcf_action_offload_cmd(fl_action
, &in_hw_count
, cb
, cb_priv
);
280 cb
? offload_action_hw_count_inc(action
, in_hw_count
) :
281 offload_action_hw_count_set(action
, in_hw_count
);
283 if (skip_sw
&& !tc_act_in_hw(action
))
286 tc_cleanup_offload_action(&fl_action
->action
);
294 /* offload the tc action after it is inserted */
295 static int tcf_action_offload_add(struct tc_action
*action
,
296 struct netlink_ext_ack
*extack
)
298 return tcf_action_offload_add_ex(action
, extack
, NULL
, NULL
);
301 int tcf_action_update_hw_stats(struct tc_action
*action
)
303 struct flow_offload_action fl_act
= {};
306 err
= offload_action_init(&fl_act
, action
, FLOW_ACT_STATS
, NULL
);
310 err
= tcf_action_offload_cmd(&fl_act
, NULL
, NULL
, NULL
);
313 tcf_action_stats_update(action
, fl_act
.stats
.bytes
,
316 fl_act
.stats
.lastused
,
319 action
->used_hw_stats
= fl_act
.stats
.used_hw_stats
;
320 action
->used_hw_stats_valid
= true;
327 EXPORT_SYMBOL(tcf_action_update_hw_stats
);
329 static int tcf_action_offload_del_ex(struct tc_action
*action
,
330 flow_indr_block_bind_cb_t
*cb
,
333 struct flow_offload_action fl_act
= {};
337 if (!tc_act_in_hw(action
))
340 err
= offload_action_init(&fl_act
, action
, FLOW_ACT_DESTROY
, NULL
);
344 err
= tcf_action_offload_cmd(&fl_act
, &in_hw_count
, cb
, cb_priv
);
348 if (!cb
&& action
->in_hw_count
!= in_hw_count
)
351 /* do not need to update hw state when deleting action */
352 if (cb
&& in_hw_count
)
353 offload_action_hw_count_dec(action
, in_hw_count
);
358 static int tcf_action_offload_del(struct tc_action
*action
)
360 return tcf_action_offload_del_ex(action
, NULL
, NULL
);
363 static void tcf_action_cleanup(struct tc_action
*p
)
365 tcf_action_offload_del(p
);
369 gen_kill_estimator(&p
->tcfa_rate_est
);
373 static int __tcf_action_put(struct tc_action
*p
, bool bind
)
375 struct tcf_idrinfo
*idrinfo
= p
->idrinfo
;
377 if (refcount_dec_and_mutex_lock(&p
->tcfa_refcnt
, &idrinfo
->lock
)) {
379 atomic_dec(&p
->tcfa_bindcnt
);
380 idr_remove(&idrinfo
->action_idr
, p
->tcfa_index
);
381 mutex_unlock(&idrinfo
->lock
);
383 tcf_action_cleanup(p
);
388 atomic_dec(&p
->tcfa_bindcnt
);
393 static int __tcf_idr_release(struct tc_action
*p
, bool bind
, bool strict
)
397 /* Release with strict==1 and bind==0 is only called through act API
398 * interface (classifiers always bind). Only case when action with
399 * positive reference count and zero bind count can exist is when it was
400 * also created with act API (unbinding last classifier will destroy the
401 * action if it was created by classifier). So only case when bind count
402 * can be changed after initial check is when unbound action is
403 * destroyed by act API while classifier binds to action with same id
404 * concurrently. This result either creation of new action(same behavior
405 * as before), or reusing existing action if concurrent process
406 * increments reference count before action is deleted. Both scenarios
410 if (!bind
&& strict
&& atomic_read(&p
->tcfa_bindcnt
) > 0)
413 if (__tcf_action_put(p
, bind
))
420 int tcf_idr_release(struct tc_action
*a
, bool bind
)
422 const struct tc_action_ops
*ops
= a
->ops
;
425 ret
= __tcf_idr_release(a
, bind
, false);
426 if (ret
== ACT_P_DELETED
)
427 module_put(ops
->owner
);
430 EXPORT_SYMBOL(tcf_idr_release
);
432 static size_t tcf_action_shared_attrs_size(const struct tc_action
*act
)
434 struct tc_cookie
*user_cookie
;
438 user_cookie
= rcu_dereference(act
->user_cookie
);
441 cookie_len
= nla_total_size(user_cookie
->len
);
444 return nla_total_size(0) /* action number nested */
445 + nla_total_size(IFNAMSIZ
) /* TCA_ACT_KIND */
446 + cookie_len
/* TCA_ACT_COOKIE */
447 + nla_total_size(sizeof(struct nla_bitfield32
)) /* TCA_ACT_HW_STATS */
448 + nla_total_size(0) /* TCA_ACT_STATS nested */
449 + nla_total_size(sizeof(struct nla_bitfield32
)) /* TCA_ACT_FLAGS */
450 /* TCA_STATS_BASIC */
451 + nla_total_size_64bit(sizeof(struct gnet_stats_basic
))
452 /* TCA_STATS_PKT64 */
453 + nla_total_size_64bit(sizeof(u64
))
454 /* TCA_STATS_QUEUE */
455 + nla_total_size_64bit(sizeof(struct gnet_stats_queue
))
456 + nla_total_size(0) /* TCA_ACT_OPTIONS nested */
457 + nla_total_size(sizeof(struct tcf_t
)); /* TCA_GACT_TM */
460 static size_t tcf_action_full_attrs_size(size_t sz
)
462 return NLMSG_HDRLEN
/* struct nlmsghdr */
463 + sizeof(struct tcamsg
)
464 + nla_total_size(0) /* TCA_ACT_TAB nested */
468 static size_t tcf_action_fill_size(const struct tc_action
*act
)
470 size_t sz
= tcf_action_shared_attrs_size(act
);
472 if (act
->ops
->get_fill_size
)
473 return act
->ops
->get_fill_size(act
) + sz
;
478 tcf_action_dump_terse(struct sk_buff
*skb
, struct tc_action
*a
, bool from_act
)
480 unsigned char *b
= skb_tail_pointer(skb
);
481 struct tc_cookie
*cookie
;
483 if (nla_put_string(skb
, TCA_ACT_KIND
, a
->ops
->kind
))
484 goto nla_put_failure
;
485 if (tcf_action_copy_stats(skb
, a
, 0))
486 goto nla_put_failure
;
487 if (from_act
&& nla_put_u32(skb
, TCA_ACT_INDEX
, a
->tcfa_index
))
488 goto nla_put_failure
;
491 cookie
= rcu_dereference(a
->user_cookie
);
493 if (nla_put(skb
, TCA_ACT_COOKIE
, cookie
->len
, cookie
->data
)) {
495 goto nla_put_failure
;
507 static int tcf_dump_walker(struct tcf_idrinfo
*idrinfo
, struct sk_buff
*skb
,
508 struct netlink_callback
*cb
)
510 int err
= 0, index
= -1, s_i
= 0, n_i
= 0;
511 u32 act_flags
= cb
->args
[2];
512 unsigned long jiffy_since
= cb
->args
[3];
514 struct idr
*idr
= &idrinfo
->action_idr
;
516 unsigned long id
= 1;
519 mutex_lock(&idrinfo
->lock
);
523 idr_for_each_entry_ul(idr
, p
, tmp
, id
) {
531 time_after(jiffy_since
,
532 (unsigned long)p
->tcfa_tm
.lastuse
))
535 tcf_action_update_hw_stats(p
);
537 nest
= nla_nest_start_noflag(skb
, n_i
);
540 goto nla_put_failure
;
542 err
= (act_flags
& TCA_ACT_FLAG_TERSE_DUMP
) ?
543 tcf_action_dump_terse(skb
, p
, true) :
544 tcf_action_dump_1(skb
, p
, 0, 0);
547 nlmsg_trim(skb
, nest
);
550 nla_nest_end(skb
, nest
);
552 if (!(act_flags
& TCA_ACT_FLAG_LARGE_DUMP_ON
) &&
553 n_i
>= TCA_ACT_MAX_PRIO
)
558 cb
->args
[0] = index
+ 1;
560 mutex_unlock(&idrinfo
->lock
);
562 if (act_flags
& TCA_ACT_FLAG_LARGE_DUMP_ON
)
568 nla_nest_cancel(skb
, nest
);
572 static int tcf_idr_release_unsafe(struct tc_action
*p
)
574 if (atomic_read(&p
->tcfa_bindcnt
) > 0)
577 if (refcount_dec_and_test(&p
->tcfa_refcnt
)) {
578 idr_remove(&p
->idrinfo
->action_idr
, p
->tcfa_index
);
579 tcf_action_cleanup(p
);
580 return ACT_P_DELETED
;
586 static int tcf_del_walker(struct tcf_idrinfo
*idrinfo
, struct sk_buff
*skb
,
587 const struct tc_action_ops
*ops
,
588 struct netlink_ext_ack
*extack
)
593 struct idr
*idr
= &idrinfo
->action_idr
;
595 unsigned long id
= 1;
598 nest
= nla_nest_start_noflag(skb
, 0);
600 goto nla_put_failure
;
601 if (nla_put_string(skb
, TCA_ACT_KIND
, ops
->kind
))
602 goto nla_put_failure
;
605 mutex_lock(&idrinfo
->lock
);
606 idr_for_each_entry_ul(idr
, p
, tmp
, id
) {
609 ret
= tcf_idr_release_unsafe(p
);
610 if (ret
== ACT_P_DELETED
)
611 module_put(ops
->owner
);
616 mutex_unlock(&idrinfo
->lock
);
619 NL_SET_ERR_MSG(extack
, "Unable to flush all TC actions");
621 goto nla_put_failure
;
624 ret
= nla_put_u32(skb
, TCA_FCNT
, n_i
);
626 goto nla_put_failure
;
627 nla_nest_end(skb
, nest
);
631 nla_nest_cancel(skb
, nest
);
635 int tcf_generic_walker(struct tc_action_net
*tn
, struct sk_buff
*skb
,
636 struct netlink_callback
*cb
, int type
,
637 const struct tc_action_ops
*ops
,
638 struct netlink_ext_ack
*extack
)
640 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
642 if (type
== RTM_DELACTION
) {
643 return tcf_del_walker(idrinfo
, skb
, ops
, extack
);
644 } else if (type
== RTM_GETACTION
) {
645 return tcf_dump_walker(idrinfo
, skb
, cb
);
647 WARN(1, "tcf_generic_walker: unknown command %d\n", type
);
648 NL_SET_ERR_MSG(extack
, "tcf_generic_walker: unknown command");
652 EXPORT_SYMBOL(tcf_generic_walker
);
654 int tcf_idr_search(struct tc_action_net
*tn
, struct tc_action
**a
, u32 index
)
656 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
659 mutex_lock(&idrinfo
->lock
);
660 p
= idr_find(&idrinfo
->action_idr
, index
);
664 refcount_inc(&p
->tcfa_refcnt
);
665 mutex_unlock(&idrinfo
->lock
);
673 EXPORT_SYMBOL(tcf_idr_search
);
675 static int __tcf_generic_walker(struct net
*net
, struct sk_buff
*skb
,
676 struct netlink_callback
*cb
, int type
,
677 const struct tc_action_ops
*ops
,
678 struct netlink_ext_ack
*extack
)
680 struct tc_action_net
*tn
= net_generic(net
, ops
->net_id
);
682 if (unlikely(ops
->walk
))
683 return ops
->walk(net
, skb
, cb
, type
, ops
, extack
);
685 return tcf_generic_walker(tn
, skb
, cb
, type
, ops
, extack
);
688 static int __tcf_idr_search(struct net
*net
,
689 const struct tc_action_ops
*ops
,
690 struct tc_action
**a
, u32 index
)
692 struct tc_action_net
*tn
= net_generic(net
, ops
->net_id
);
694 if (unlikely(ops
->lookup
))
695 return ops
->lookup(net
, a
, index
);
697 return tcf_idr_search(tn
, a
, index
);
700 static int tcf_idr_delete_index(struct tcf_idrinfo
*idrinfo
, u32 index
)
705 mutex_lock(&idrinfo
->lock
);
706 p
= idr_find(&idrinfo
->action_idr
, index
);
708 mutex_unlock(&idrinfo
->lock
);
712 if (!atomic_read(&p
->tcfa_bindcnt
)) {
713 if (refcount_dec_and_test(&p
->tcfa_refcnt
)) {
714 struct module
*owner
= p
->ops
->owner
;
716 WARN_ON(p
!= idr_remove(&idrinfo
->action_idr
,
718 mutex_unlock(&idrinfo
->lock
);
720 tcf_action_cleanup(p
);
729 mutex_unlock(&idrinfo
->lock
);
733 int tcf_idr_create(struct tc_action_net
*tn
, u32 index
, struct nlattr
*est
,
734 struct tc_action
**a
, const struct tc_action_ops
*ops
,
735 int bind
, bool cpustats
, u32 flags
)
737 struct tc_action
*p
= kzalloc(ops
->size
, GFP_KERNEL
);
738 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
743 refcount_set(&p
->tcfa_refcnt
, 1);
745 atomic_set(&p
->tcfa_bindcnt
, 1);
748 p
->cpu_bstats
= netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync
);
751 p
->cpu_bstats_hw
= netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync
);
752 if (!p
->cpu_bstats_hw
)
754 p
->cpu_qstats
= alloc_percpu(struct gnet_stats_queue
);
758 gnet_stats_basic_sync_init(&p
->tcfa_bstats
);
759 gnet_stats_basic_sync_init(&p
->tcfa_bstats_hw
);
760 spin_lock_init(&p
->tcfa_lock
);
761 p
->tcfa_index
= index
;
762 p
->tcfa_tm
.install
= jiffies
;
763 p
->tcfa_tm
.lastuse
= jiffies
;
764 p
->tcfa_tm
.firstuse
= 0;
765 p
->tcfa_flags
= flags
;
767 err
= gen_new_estimator(&p
->tcfa_bstats
, p
->cpu_bstats
,
769 &p
->tcfa_lock
, false, est
);
774 p
->idrinfo
= idrinfo
;
775 __module_get(ops
->owner
);
780 free_percpu(p
->cpu_qstats
);
782 free_percpu(p
->cpu_bstats_hw
);
784 free_percpu(p
->cpu_bstats
);
789 EXPORT_SYMBOL(tcf_idr_create
);
791 int tcf_idr_create_from_flags(struct tc_action_net
*tn
, u32 index
,
792 struct nlattr
*est
, struct tc_action
**a
,
793 const struct tc_action_ops
*ops
, int bind
,
796 /* Set cpustats according to actions flags. */
797 return tcf_idr_create(tn
, index
, est
, a
, ops
, bind
,
798 !(flags
& TCA_ACT_FLAGS_NO_PERCPU_STATS
), flags
);
800 EXPORT_SYMBOL(tcf_idr_create_from_flags
);
802 /* Cleanup idr index that was allocated but not initialized. */
804 void tcf_idr_cleanup(struct tc_action_net
*tn
, u32 index
)
806 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
808 mutex_lock(&idrinfo
->lock
);
809 /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
810 WARN_ON(!IS_ERR(idr_remove(&idrinfo
->action_idr
, index
)));
811 mutex_unlock(&idrinfo
->lock
);
813 EXPORT_SYMBOL(tcf_idr_cleanup
);
815 /* Check if action with specified index exists. If actions is found, increments
816 * its reference and bind counters, and return 1. Otherwise insert temporary
817 * error pointer (to prevent concurrent users from inserting actions with same
818 * index) and return 0.
820 * May return -EAGAIN for binding actions in case of a parallel add/delete on
821 * the requested index.
824 int tcf_idr_check_alloc(struct tc_action_net
*tn
, u32
*index
,
825 struct tc_action
**a
, int bind
)
827 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
834 p
= idr_find(&idrinfo
->action_idr
, *index
);
837 /* This means that another process allocated
838 * index but did not assign the pointer yet.
845 /* Empty slot, try to allocate it */
851 if (!refcount_inc_not_zero(&p
->tcfa_refcnt
)) {
852 /* Action was deleted in parallel */
858 atomic_inc(&p
->tcfa_bindcnt
);
873 mutex_lock(&idrinfo
->lock
);
874 ret
= idr_alloc_u32(&idrinfo
->action_idr
, ERR_PTR(-EBUSY
), index
, max
,
876 mutex_unlock(&idrinfo
->lock
);
878 /* N binds raced for action allocation,
879 * retry for all the ones that failed.
881 if (ret
== -ENOSPC
&& *index
== max
)
886 EXPORT_SYMBOL(tcf_idr_check_alloc
);
888 void tcf_idrinfo_destroy(const struct tc_action_ops
*ops
,
889 struct tcf_idrinfo
*idrinfo
)
891 struct idr
*idr
= &idrinfo
->action_idr
;
894 unsigned long id
= 1;
897 idr_for_each_entry_ul(idr
, p
, tmp
, id
) {
898 ret
= __tcf_idr_release(p
, false, true);
899 if (ret
== ACT_P_DELETED
)
900 module_put(ops
->owner
);
904 idr_destroy(&idrinfo
->action_idr
);
906 EXPORT_SYMBOL(tcf_idrinfo_destroy
);
908 static LIST_HEAD(act_base
);
909 static DEFINE_RWLOCK(act_mod_lock
);
910 /* since act ops id is stored in pernet subsystem list,
911 * then there is no way to walk through only all the action
912 * subsystem, so we keep tc action pernet ops id for
913 * reoffload to walk through.
915 static LIST_HEAD(act_pernet_id_list
);
916 static DEFINE_MUTEX(act_id_mutex
);
917 struct tc_act_pernet_id
{
918 struct list_head list
;
922 static int tcf_pernet_add_id_list(unsigned int id
)
924 struct tc_act_pernet_id
*id_ptr
;
927 mutex_lock(&act_id_mutex
);
928 list_for_each_entry(id_ptr
, &act_pernet_id_list
, list
) {
929 if (id_ptr
->id
== id
) {
935 id_ptr
= kzalloc(sizeof(*id_ptr
), GFP_KERNEL
);
942 list_add_tail(&id_ptr
->list
, &act_pernet_id_list
);
945 mutex_unlock(&act_id_mutex
);
949 static void tcf_pernet_del_id_list(unsigned int id
)
951 struct tc_act_pernet_id
*id_ptr
;
953 mutex_lock(&act_id_mutex
);
954 list_for_each_entry(id_ptr
, &act_pernet_id_list
, list
) {
955 if (id_ptr
->id
== id
) {
956 list_del(&id_ptr
->list
);
961 mutex_unlock(&act_id_mutex
);
964 int tcf_register_action(struct tc_action_ops
*act
,
965 struct pernet_operations
*ops
)
967 struct tc_action_ops
*a
;
970 if (!act
->act
|| !act
->dump
|| !act
->init
)
973 /* We have to register pernet ops before making the action ops visible,
974 * otherwise tcf_action_init_1() could get a partially initialized
977 ret
= register_pernet_subsys(ops
);
982 ret
= tcf_pernet_add_id_list(*ops
->id
);
987 write_lock(&act_mod_lock
);
988 list_for_each_entry(a
, &act_base
, head
) {
989 if (act
->id
== a
->id
|| (strcmp(act
->kind
, a
->kind
) == 0)) {
994 list_add_tail(&act
->head
, &act_base
);
995 write_unlock(&act_mod_lock
);
1000 write_unlock(&act_mod_lock
);
1002 tcf_pernet_del_id_list(*ops
->id
);
1004 unregister_pernet_subsys(ops
);
1007 EXPORT_SYMBOL(tcf_register_action
);
1009 int tcf_unregister_action(struct tc_action_ops
*act
,
1010 struct pernet_operations
*ops
)
1012 struct tc_action_ops
*a
;
1015 write_lock(&act_mod_lock
);
1016 list_for_each_entry(a
, &act_base
, head
) {
1018 list_del(&act
->head
);
1023 write_unlock(&act_mod_lock
);
1025 unregister_pernet_subsys(ops
);
1027 tcf_pernet_del_id_list(*ops
->id
);
1031 EXPORT_SYMBOL(tcf_unregister_action
);
1033 /* lookup by name */
1034 static struct tc_action_ops
*tc_lookup_action_n(char *kind
)
1036 struct tc_action_ops
*a
, *res
= NULL
;
1039 read_lock(&act_mod_lock
);
1040 list_for_each_entry(a
, &act_base
, head
) {
1041 if (strcmp(kind
, a
->kind
) == 0) {
1042 if (try_module_get(a
->owner
))
1047 read_unlock(&act_mod_lock
);
1052 /* lookup by nlattr */
1053 static struct tc_action_ops
*tc_lookup_action(struct nlattr
*kind
)
1055 struct tc_action_ops
*a
, *res
= NULL
;
1058 read_lock(&act_mod_lock
);
1059 list_for_each_entry(a
, &act_base
, head
) {
1060 if (nla_strcmp(kind
, a
->kind
) == 0) {
1061 if (try_module_get(a
->owner
))
1066 read_unlock(&act_mod_lock
);
1071 /*TCA_ACT_MAX_PRIO is 32, there count up to 32 */
1072 #define TCA_ACT_MAX_PRIO_MASK 0x1FF
1073 int tcf_action_exec(struct sk_buff
*skb
, struct tc_action
**actions
,
1074 int nr_actions
, struct tcf_result
*res
)
1077 u32 jmp_ttl
= TCA_ACT_MAX_PRIO
; /*matches actions per filter */
1079 int ret
= TC_ACT_OK
;
1081 if (skb_skip_tc_classify(skb
))
1085 for (i
= 0; i
< nr_actions
; i
++) {
1086 const struct tc_action
*a
= actions
[i
];
1089 if (jmp_prgcnt
> 0) {
1094 if (tc_act_skip_sw(a
->tcfa_flags
))
1099 ret
= tc_act(skb
, a
, res
);
1100 if (unlikely(ret
== TC_ACT_REPEAT
)) {
1101 if (--repeat_ttl
!= 0)
1103 /* suspicious opcode, stop pipeline */
1104 net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n");
1107 if (TC_ACT_EXT_CMP(ret
, TC_ACT_JUMP
)) {
1108 jmp_prgcnt
= ret
& TCA_ACT_MAX_PRIO_MASK
;
1109 if (!jmp_prgcnt
|| (jmp_prgcnt
> nr_actions
)) {
1110 /* faulty opcode, stop pipeline */
1115 goto restart_act_graph
;
1116 else /* faulty graph, stop pipeline */
1119 } else if (TC_ACT_EXT_CMP(ret
, TC_ACT_GOTO_CHAIN
)) {
1120 if (unlikely(!rcu_access_pointer(a
->goto_chain
))) {
1121 tcf_set_drop_reason(skb
,
1122 SKB_DROP_REASON_TC_CHAIN_NOTFOUND
);
1125 tcf_action_goto_chain_exec(a
, res
);
1128 if (ret
!= TC_ACT_PIPE
)
1134 EXPORT_SYMBOL(tcf_action_exec
);
1136 int tcf_action_destroy(struct tc_action
*actions
[], int bind
)
1138 const struct tc_action_ops
*ops
;
1139 struct tc_action
*a
;
1142 tcf_act_for_each_action(i
, a
, actions
) {
1145 ret
= __tcf_idr_release(a
, bind
, true);
1146 if (ret
== ACT_P_DELETED
)
1147 module_put(ops
->owner
);
1154 static int tcf_action_put(struct tc_action
*p
)
1156 return __tcf_action_put(p
, false);
1159 static void tcf_action_put_many(struct tc_action
*actions
[])
1161 struct tc_action
*a
;
1164 tcf_act_for_each_action(i
, a
, actions
) {
1165 const struct tc_action_ops
*ops
= a
->ops
;
1166 if (tcf_action_put(a
))
1167 module_put(ops
->owner
);
1171 static void tca_put_bound_many(struct tc_action
*actions
[], int init_res
[])
1173 struct tc_action
*a
;
1176 tcf_act_for_each_action(i
, a
, actions
) {
1177 const struct tc_action_ops
*ops
= a
->ops
;
1179 if (init_res
[i
] == ACT_P_CREATED
)
1182 if (tcf_action_put(a
))
1183 module_put(ops
->owner
);
1188 tcf_action_dump_old(struct sk_buff
*skb
, struct tc_action
*a
, int bind
, int ref
)
1190 return a
->ops
->dump(skb
, a
, bind
, ref
);
1194 tcf_action_dump_1(struct sk_buff
*skb
, struct tc_action
*a
, int bind
, int ref
)
1197 unsigned char *b
= skb_tail_pointer(skb
);
1198 struct nlattr
*nest
;
1201 if (tcf_action_dump_terse(skb
, a
, false))
1202 goto nla_put_failure
;
1204 if (a
->hw_stats
!= TCA_ACT_HW_STATS_ANY
&&
1205 nla_put_bitfield32(skb
, TCA_ACT_HW_STATS
,
1206 a
->hw_stats
, TCA_ACT_HW_STATS_ANY
))
1207 goto nla_put_failure
;
1209 if (a
->used_hw_stats_valid
&&
1210 nla_put_bitfield32(skb
, TCA_ACT_USED_HW_STATS
,
1211 a
->used_hw_stats
, TCA_ACT_HW_STATS_ANY
))
1212 goto nla_put_failure
;
1214 flags
= a
->tcfa_flags
& TCA_ACT_FLAGS_USER_MASK
;
1216 nla_put_bitfield32(skb
, TCA_ACT_FLAGS
,
1218 goto nla_put_failure
;
1220 if (nla_put_u32(skb
, TCA_ACT_IN_HW_COUNT
, a
->in_hw_count
))
1221 goto nla_put_failure
;
1223 nest
= nla_nest_start_noflag(skb
, TCA_ACT_OPTIONS
);
1225 goto nla_put_failure
;
1226 err
= tcf_action_dump_old(skb
, a
, bind
, ref
);
1228 nla_nest_end(skb
, nest
);
1236 EXPORT_SYMBOL(tcf_action_dump_1
);
1238 int tcf_action_dump(struct sk_buff
*skb
, struct tc_action
*actions
[],
1239 int bind
, int ref
, bool terse
)
1241 struct tc_action
*a
;
1242 int err
= -EINVAL
, i
;
1243 struct nlattr
*nest
;
1245 tcf_act_for_each_action(i
, a
, actions
) {
1246 nest
= nla_nest_start_noflag(skb
, i
+ 1);
1248 goto nla_put_failure
;
1249 err
= terse
? tcf_action_dump_terse(skb
, a
, false) :
1250 tcf_action_dump_1(skb
, a
, bind
, ref
);
1253 nla_nest_end(skb
, nest
);
1261 nla_nest_cancel(skb
, nest
);
1265 static struct tc_cookie
*nla_memdup_cookie(struct nlattr
**tb
)
1267 struct tc_cookie
*c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
1271 c
->data
= nla_memdup(tb
[TCA_ACT_COOKIE
], GFP_KERNEL
);
1276 c
->len
= nla_len(tb
[TCA_ACT_COOKIE
]);
1281 static u8
tcf_action_hw_stats_get(struct nlattr
*hw_stats_attr
)
1283 struct nla_bitfield32 hw_stats_bf
;
1285 /* If the user did not pass the attr, that means he does
1286 * not care about the type. Return "any" in that case
1287 * which is setting on all supported types.
1290 return TCA_ACT_HW_STATS_ANY
;
1291 hw_stats_bf
= nla_get_bitfield32(hw_stats_attr
);
1292 return hw_stats_bf
.value
;
1295 static const struct nla_policy tcf_action_policy
[TCA_ACT_MAX
+ 1] = {
1296 [TCA_ACT_KIND
] = { .type
= NLA_STRING
},
1297 [TCA_ACT_INDEX
] = { .type
= NLA_U32
},
1298 [TCA_ACT_COOKIE
] = { .type
= NLA_BINARY
,
1299 .len
= TC_COOKIE_MAX_SIZE
},
1300 [TCA_ACT_OPTIONS
] = { .type
= NLA_NESTED
},
1301 [TCA_ACT_FLAGS
] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS
|
1302 TCA_ACT_FLAGS_SKIP_HW
|
1303 TCA_ACT_FLAGS_SKIP_SW
),
1304 [TCA_ACT_HW_STATS
] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY
),
1307 void tcf_idr_insert_many(struct tc_action
*actions
[], int init_res
[])
1309 struct tc_action
*a
;
1312 tcf_act_for_each_action(i
, a
, actions
) {
1313 struct tcf_idrinfo
*idrinfo
;
1315 if (init_res
[i
] == ACT_P_BOUND
)
1318 idrinfo
= a
->idrinfo
;
1319 mutex_lock(&idrinfo
->lock
);
1320 /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
1321 idr_replace(&idrinfo
->action_idr
, a
, a
->tcfa_index
);
1322 mutex_unlock(&idrinfo
->lock
);
1326 struct tc_action_ops
*tc_action_load_ops(struct nlattr
*nla
, u32 flags
,
1327 struct netlink_ext_ack
*extack
)
1329 bool police
= flags
& TCA_ACT_FLAGS_POLICE
;
1330 struct nlattr
*tb
[TCA_ACT_MAX
+ 1];
1331 struct tc_action_ops
*a_o
;
1332 char act_name
[IFNAMSIZ
];
1333 struct nlattr
*kind
;
1337 err
= nla_parse_nested_deprecated(tb
, TCA_ACT_MAX
, nla
,
1338 tcf_action_policy
, extack
);
1340 return ERR_PTR(err
);
1342 kind
= tb
[TCA_ACT_KIND
];
1344 NL_SET_ERR_MSG(extack
, "TC action kind must be specified");
1345 return ERR_PTR(err
);
1347 if (nla_strscpy(act_name
, kind
, IFNAMSIZ
) < 0) {
1348 NL_SET_ERR_MSG(extack
, "TC action name too long");
1349 return ERR_PTR(err
);
1352 if (strscpy(act_name
, "police", IFNAMSIZ
) < 0) {
1353 NL_SET_ERR_MSG(extack
, "TC action name too long");
1354 return ERR_PTR(-EINVAL
);
1358 a_o
= tc_lookup_action_n(act_name
);
1360 #ifdef CONFIG_MODULES
1361 bool rtnl_held
= !(flags
& TCA_ACT_FLAGS_NO_RTNL
);
1365 request_module(NET_ACT_ALIAS_PREFIX
"%s", act_name
);
1369 a_o
= tc_lookup_action_n(act_name
);
1371 /* We dropped the RTNL semaphore in order to
1372 * perform the module load. So, even if we
1373 * succeeded in loading the module we have to
1374 * tell the caller to replay the request. We
1375 * indicate this using -EAGAIN.
1378 module_put(a_o
->owner
);
1379 return ERR_PTR(-EAGAIN
);
1382 NL_SET_ERR_MSG(extack
, "Failed to load TC action module");
1383 return ERR_PTR(-ENOENT
);
1389 struct tc_action
*tcf_action_init_1(struct net
*net
, struct tcf_proto
*tp
,
1390 struct nlattr
*nla
, struct nlattr
*est
,
1391 struct tc_action_ops
*a_o
, int *init_res
,
1392 u32 flags
, struct netlink_ext_ack
*extack
)
1394 bool police
= flags
& TCA_ACT_FLAGS_POLICE
;
1395 struct nla_bitfield32 userflags
= { 0, 0 };
1396 struct tc_cookie
*user_cookie
= NULL
;
1397 u8 hw_stats
= TCA_ACT_HW_STATS_ANY
;
1398 struct nlattr
*tb
[TCA_ACT_MAX
+ 1];
1399 struct tc_action
*a
;
1402 /* backward compatibility for policer */
1404 err
= nla_parse_nested_deprecated(tb
, TCA_ACT_MAX
, nla
,
1405 tcf_action_policy
, extack
);
1407 return ERR_PTR(err
);
1408 if (tb
[TCA_ACT_COOKIE
]) {
1409 user_cookie
= nla_memdup_cookie(tb
);
1411 NL_SET_ERR_MSG(extack
, "No memory to generate TC cookie");
1416 hw_stats
= tcf_action_hw_stats_get(tb
[TCA_ACT_HW_STATS
]);
1417 if (tb
[TCA_ACT_FLAGS
]) {
1418 userflags
= nla_get_bitfield32(tb
[TCA_ACT_FLAGS
]);
1419 if (!tc_act_flags_valid(userflags
.value
)) {
1425 err
= a_o
->init(net
, tb
[TCA_ACT_OPTIONS
], est
, &a
, tp
,
1426 userflags
.value
| flags
, extack
);
1428 err
= a_o
->init(net
, nla
, est
, &a
, tp
, userflags
.value
| flags
,
1435 if (!police
&& tb
[TCA_ACT_COOKIE
])
1436 tcf_set_action_cookie(&a
->user_cookie
, user_cookie
);
1439 a
->hw_stats
= hw_stats
;
1445 kfree(user_cookie
->data
);
1448 return ERR_PTR(err
);
1451 static bool tc_act_bind(u32 flags
)
1453 return !!(flags
& TCA_ACT_FLAGS_BIND
);
1456 /* Returns numbers of initialized actions or negative error. */
1458 int tcf_action_init(struct net
*net
, struct tcf_proto
*tp
, struct nlattr
*nla
,
1459 struct nlattr
*est
, struct tc_action
*actions
[],
1460 int init_res
[], size_t *attr_size
,
1461 u32 flags
, u32 fl_flags
,
1462 struct netlink_ext_ack
*extack
)
1464 struct tc_action_ops
*ops
[TCA_ACT_MAX_PRIO
] = {};
1465 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+ 1];
1466 struct tc_action
*act
;
1471 err
= nla_parse_nested_deprecated(tb
, TCA_ACT_MAX_PRIO
, nla
, NULL
,
1476 for (i
= 1; i
<= TCA_ACT_MAX_PRIO
&& tb
[i
]; i
++) {
1477 struct tc_action_ops
*a_o
;
1479 a_o
= tc_action_load_ops(tb
[i
], flags
, extack
);
1487 for (i
= 1; i
<= TCA_ACT_MAX_PRIO
&& tb
[i
]; i
++) {
1488 act
= tcf_action_init_1(net
, tp
, tb
[i
], est
, ops
[i
- 1],
1489 &init_res
[i
- 1], flags
, extack
);
1494 sz
+= tcf_action_fill_size(act
);
1495 /* Start from index 0 */
1496 actions
[i
- 1] = act
;
1497 if (tc_act_bind(flags
)) {
1498 bool skip_sw
= tc_skip_sw(fl_flags
);
1499 bool skip_hw
= tc_skip_hw(fl_flags
);
1501 if (tc_act_bind(act
->tcfa_flags
))
1503 if (skip_sw
!= tc_act_skip_sw(act
->tcfa_flags
) ||
1504 skip_hw
!= tc_act_skip_hw(act
->tcfa_flags
)) {
1505 NL_SET_ERR_MSG(extack
,
1506 "Mismatch between action and filter offload flags");
1511 err
= tcf_action_offload_add(act
, extack
);
1512 if (tc_act_skip_sw(act
->tcfa_flags
) && err
)
1517 /* We have to commit them all together, because if any error happened in
1518 * between, we could not handle the failure gracefully.
1520 tcf_idr_insert_many(actions
, init_res
);
1522 *attr_size
= tcf_action_full_attrs_size(sz
);
1527 tcf_action_destroy(actions
, flags
& TCA_ACT_FLAGS_BIND
);
1529 for (i
= 0; i
< TCA_ACT_MAX_PRIO
&& ops
[i
]; i
++)
1530 module_put(ops
[i
]->owner
);
1534 void tcf_action_update_stats(struct tc_action
*a
, u64 bytes
, u64 packets
,
1537 if (a
->cpu_bstats
) {
1538 _bstats_update(this_cpu_ptr(a
->cpu_bstats
), bytes
, packets
);
1540 this_cpu_ptr(a
->cpu_qstats
)->drops
+= drops
;
1543 _bstats_update(this_cpu_ptr(a
->cpu_bstats_hw
),
1548 _bstats_update(&a
->tcfa_bstats
, bytes
, packets
);
1549 a
->tcfa_qstats
.drops
+= drops
;
1551 _bstats_update(&a
->tcfa_bstats_hw
, bytes
, packets
);
1553 EXPORT_SYMBOL(tcf_action_update_stats
);
1555 int tcf_action_copy_stats(struct sk_buff
*skb
, struct tc_action
*p
,
1564 /* compat_mode being true specifies a call that is supposed
1565 * to add additional backward compatibility statistic TLVs.
1568 if (p
->type
== TCA_OLD_COMPAT
)
1569 err
= gnet_stats_start_copy_compat(skb
, 0,
1577 err
= gnet_stats_start_copy(skb
, TCA_ACT_STATS
,
1578 &p
->tcfa_lock
, &d
, TCA_ACT_PAD
);
1583 if (gnet_stats_copy_basic(&d
, p
->cpu_bstats
,
1584 &p
->tcfa_bstats
, false) < 0 ||
1585 gnet_stats_copy_basic_hw(&d
, p
->cpu_bstats_hw
,
1586 &p
->tcfa_bstats_hw
, false) < 0 ||
1587 gnet_stats_copy_rate_est(&d
, &p
->tcfa_rate_est
) < 0 ||
1588 gnet_stats_copy_queue(&d
, p
->cpu_qstats
,
1590 p
->tcfa_qstats
.qlen
) < 0)
1593 if (gnet_stats_finish_copy(&d
) < 0)
1602 static int tca_get_fill(struct sk_buff
*skb
, struct tc_action
*actions
[],
1603 u32 portid
, u32 seq
, u16 flags
, int event
, int bind
,
1604 int ref
, struct netlink_ext_ack
*extack
)
1607 struct nlmsghdr
*nlh
;
1608 unsigned char *b
= skb_tail_pointer(skb
);
1609 struct nlattr
*nest
;
1611 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*t
), flags
);
1613 goto out_nlmsg_trim
;
1614 t
= nlmsg_data(nlh
);
1615 t
->tca_family
= AF_UNSPEC
;
1619 if (extack
&& extack
->_msg
&&
1620 nla_put_string(skb
, TCA_ROOT_EXT_WARN_MSG
, extack
->_msg
))
1621 goto out_nlmsg_trim
;
1623 nest
= nla_nest_start_noflag(skb
, TCA_ACT_TAB
);
1625 goto out_nlmsg_trim
;
1627 if (tcf_action_dump(skb
, actions
, bind
, ref
, false) < 0)
1628 goto out_nlmsg_trim
;
1630 nla_nest_end(skb
, nest
);
1632 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1642 tcf_get_notify(struct net
*net
, u32 portid
, struct nlmsghdr
*n
,
1643 struct tc_action
*actions
[], int event
,
1644 struct netlink_ext_ack
*extack
)
1646 struct sk_buff
*skb
;
1648 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1651 if (tca_get_fill(skb
, actions
, portid
, n
->nlmsg_seq
, 0, event
,
1653 NL_SET_ERR_MSG(extack
, "Failed to fill netlink attributes while adding TC action");
1658 return rtnl_unicast(skb
, net
, portid
);
1661 static struct tc_action
*tcf_action_get_1(struct net
*net
, struct nlattr
*nla
,
1662 struct nlmsghdr
*n
, u32 portid
,
1663 struct netlink_ext_ack
*extack
)
1665 struct nlattr
*tb
[TCA_ACT_MAX
+ 1];
1666 const struct tc_action_ops
*ops
;
1667 struct tc_action
*a
;
1671 err
= nla_parse_nested_deprecated(tb
, TCA_ACT_MAX
, nla
,
1672 tcf_action_policy
, extack
);
1677 if (tb
[TCA_ACT_INDEX
] == NULL
||
1678 nla_len(tb
[TCA_ACT_INDEX
]) < sizeof(index
)) {
1679 NL_SET_ERR_MSG(extack
, "Invalid TC action index value");
1682 index
= nla_get_u32(tb
[TCA_ACT_INDEX
]);
1685 ops
= tc_lookup_action(tb
[TCA_ACT_KIND
]);
1686 if (!ops
) { /* could happen in batch of actions */
1687 NL_SET_ERR_MSG(extack
, "Specified TC action kind not found");
1691 if (__tcf_idr_search(net
, ops
, &a
, index
) == 0) {
1692 NL_SET_ERR_MSG(extack
, "TC action with specified index not found");
1696 module_put(ops
->owner
);
1700 module_put(ops
->owner
);
1702 return ERR_PTR(err
);
1705 static int tca_action_flush(struct net
*net
, struct nlattr
*nla
,
1706 struct nlmsghdr
*n
, u32 portid
,
1707 struct netlink_ext_ack
*extack
)
1709 struct sk_buff
*skb
;
1711 struct nlmsghdr
*nlh
;
1713 struct netlink_callback dcb
;
1714 struct nlattr
*nest
;
1715 struct nlattr
*tb
[TCA_ACT_MAX
+ 1];
1716 const struct tc_action_ops
*ops
;
1717 struct nlattr
*kind
;
1720 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1724 b
= skb_tail_pointer(skb
);
1726 err
= nla_parse_nested_deprecated(tb
, TCA_ACT_MAX
, nla
,
1727 tcf_action_policy
, extack
);
1732 kind
= tb
[TCA_ACT_KIND
];
1733 ops
= tc_lookup_action(kind
);
1734 if (!ops
) { /*some idjot trying to flush unknown action */
1735 NL_SET_ERR_MSG(extack
, "Cannot flush unknown TC action");
1739 nlh
= nlmsg_put(skb
, portid
, n
->nlmsg_seq
, RTM_DELACTION
,
1742 NL_SET_ERR_MSG(extack
, "Failed to create TC action flush notification");
1743 goto out_module_put
;
1745 t
= nlmsg_data(nlh
);
1746 t
->tca_family
= AF_UNSPEC
;
1750 nest
= nla_nest_start_noflag(skb
, TCA_ACT_TAB
);
1752 NL_SET_ERR_MSG(extack
, "Failed to add new netlink message");
1753 goto out_module_put
;
1756 err
= __tcf_generic_walker(net
, skb
, &dcb
, RTM_DELACTION
, ops
, extack
);
1758 nla_nest_cancel(skb
, nest
);
1759 goto out_module_put
;
1762 nla_nest_end(skb
, nest
);
1764 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1765 nlh
->nlmsg_flags
|= NLM_F_ROOT
;
1766 module_put(ops
->owner
);
1767 err
= rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1768 n
->nlmsg_flags
& NLM_F_ECHO
);
1770 NL_SET_ERR_MSG(extack
, "Failed to send TC action flush notification");
1775 module_put(ops
->owner
);
1781 static int tcf_action_delete(struct net
*net
, struct tc_action
*actions
[])
1783 struct tc_action
*a
;
1786 tcf_act_for_each_action(i
, a
, actions
) {
1787 const struct tc_action_ops
*ops
= a
->ops
;
1788 /* Actions can be deleted concurrently so we must save their
1789 * type and id to search again after reference is released.
1791 struct tcf_idrinfo
*idrinfo
= a
->idrinfo
;
1792 u32 act_index
= a
->tcfa_index
;
1795 if (tcf_action_put(a
)) {
1796 /* last reference, action was deleted concurrently */
1797 module_put(ops
->owner
);
1801 /* now do the delete */
1802 ret
= tcf_idr_delete_index(idrinfo
, act_index
);
1810 static struct sk_buff
*tcf_reoffload_del_notify_msg(struct net
*net
,
1811 struct tc_action
*action
)
1813 size_t attr_size
= tcf_action_fill_size(action
);
1814 struct tc_action
*actions
[TCA_ACT_MAX_PRIO
] = {
1817 struct sk_buff
*skb
;
1819 skb
= alloc_skb(max(attr_size
, NLMSG_GOODSIZE
), GFP_KERNEL
);
1821 return ERR_PTR(-ENOBUFS
);
1823 if (tca_get_fill(skb
, actions
, 0, 0, 0, RTM_DELACTION
, 0, 1, NULL
) <= 0) {
1825 return ERR_PTR(-EINVAL
);
1831 static int tcf_reoffload_del_notify(struct net
*net
, struct tc_action
*action
)
1833 const struct tc_action_ops
*ops
= action
->ops
;
1834 struct sk_buff
*skb
;
1837 if (!rtnl_notify_needed(net
, 0, RTNLGRP_TC
)) {
1840 skb
= tcf_reoffload_del_notify_msg(net
, action
);
1842 return PTR_ERR(skb
);
1845 ret
= tcf_idr_release_unsafe(action
);
1846 if (ret
== ACT_P_DELETED
) {
1847 module_put(ops
->owner
);
1848 ret
= rtnetlink_maybe_send(skb
, net
, 0, RTNLGRP_TC
, 0);
1856 int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t
*cb
,
1857 void *cb_priv
, bool add
)
1859 struct tc_act_pernet_id
*id_ptr
;
1860 struct tcf_idrinfo
*idrinfo
;
1861 struct tc_action_net
*tn
;
1862 struct tc_action
*p
;
1863 unsigned int act_id
;
1873 down_read(&net_rwsem
);
1874 mutex_lock(&act_id_mutex
);
1877 list_for_each_entry(id_ptr
, &act_pernet_id_list
, list
) {
1878 act_id
= id_ptr
->id
;
1879 tn
= net_generic(net
, act_id
);
1882 idrinfo
= tn
->idrinfo
;
1886 mutex_lock(&idrinfo
->lock
);
1887 idr
= &idrinfo
->action_idr
;
1888 idr_for_each_entry_ul(idr
, p
, tmp
, id
) {
1889 if (IS_ERR(p
) || tc_act_bind(p
->tcfa_flags
))
1892 tcf_action_offload_add_ex(p
, NULL
, cb
,
1897 /* cb unregister to update hw count */
1898 ret
= tcf_action_offload_del_ex(p
, cb
, cb_priv
);
1901 if (tc_act_skip_sw(p
->tcfa_flags
) &&
1903 tcf_reoffload_del_notify(net
, p
);
1905 mutex_unlock(&idrinfo
->lock
);
1908 mutex_unlock(&act_id_mutex
);
1909 up_read(&net_rwsem
);
1914 static struct sk_buff
*tcf_del_notify_msg(struct net
*net
, struct nlmsghdr
*n
,
1915 struct tc_action
*actions
[],
1916 u32 portid
, size_t attr_size
,
1917 struct netlink_ext_ack
*extack
)
1919 struct sk_buff
*skb
;
1921 skb
= alloc_skb(max(attr_size
, NLMSG_GOODSIZE
), GFP_KERNEL
);
1923 return ERR_PTR(-ENOBUFS
);
1925 if (tca_get_fill(skb
, actions
, portid
, n
->nlmsg_seq
, 0, RTM_DELACTION
,
1926 0, 2, extack
) <= 0) {
1927 NL_SET_ERR_MSG(extack
, "Failed to fill netlink TC action attributes");
1929 return ERR_PTR(-EINVAL
);
1935 static int tcf_del_notify(struct net
*net
, struct nlmsghdr
*n
,
1936 struct tc_action
*actions
[], u32 portid
,
1937 size_t attr_size
, struct netlink_ext_ack
*extack
)
1939 struct sk_buff
*skb
;
1942 if (!rtnl_notify_needed(net
, n
->nlmsg_flags
, RTNLGRP_TC
)) {
1945 skb
= tcf_del_notify_msg(net
, n
, actions
, portid
, attr_size
,
1948 return PTR_ERR(skb
);
1951 /* now do the delete */
1952 ret
= tcf_action_delete(net
, actions
);
1954 NL_SET_ERR_MSG(extack
, "Failed to delete TC action");
1959 return rtnetlink_maybe_send(skb
, net
, portid
, RTNLGRP_TC
,
1960 n
->nlmsg_flags
& NLM_F_ECHO
);
1964 tca_action_gd(struct net
*net
, struct nlattr
*nla
, struct nlmsghdr
*n
,
1965 u32 portid
, int event
, struct netlink_ext_ack
*extack
)
1968 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+ 1];
1969 struct tc_action
*act
;
1970 size_t attr_size
= 0;
1971 struct tc_action
*actions
[TCA_ACT_MAX_PRIO
] = {};
1973 ret
= nla_parse_nested_deprecated(tb
, TCA_ACT_MAX_PRIO
, nla
, NULL
,
1978 if (event
== RTM_DELACTION
&& n
->nlmsg_flags
& NLM_F_ROOT
) {
1980 return tca_action_flush(net
, tb
[1], n
, portid
, extack
);
1982 NL_SET_ERR_MSG(extack
, "Invalid netlink attributes while flushing TC action");
1986 for (i
= 1; i
<= TCA_ACT_MAX_PRIO
&& tb
[i
]; i
++) {
1987 act
= tcf_action_get_1(net
, tb
[i
], n
, portid
, extack
);
1992 attr_size
+= tcf_action_fill_size(act
);
1993 actions
[i
- 1] = act
;
1996 attr_size
= tcf_action_full_attrs_size(attr_size
);
1998 if (event
== RTM_GETACTION
)
1999 ret
= tcf_get_notify(net
, portid
, n
, actions
, event
, extack
);
2001 ret
= tcf_del_notify(net
, n
, actions
, portid
, attr_size
, extack
);
2007 tcf_action_put_many(actions
);
2011 static struct sk_buff
*tcf_add_notify_msg(struct net
*net
, struct nlmsghdr
*n
,
2012 struct tc_action
*actions
[],
2013 u32 portid
, size_t attr_size
,
2014 struct netlink_ext_ack
*extack
)
2016 struct sk_buff
*skb
;
2018 skb
= alloc_skb(max(attr_size
, NLMSG_GOODSIZE
), GFP_KERNEL
);
2020 return ERR_PTR(-ENOBUFS
);
2022 if (tca_get_fill(skb
, actions
, portid
, n
->nlmsg_seq
, n
->nlmsg_flags
,
2023 RTM_NEWACTION
, 0, 0, extack
) <= 0) {
2024 NL_SET_ERR_MSG(extack
, "Failed to fill netlink attributes while adding TC action");
2026 return ERR_PTR(-EINVAL
);
2032 static int tcf_add_notify(struct net
*net
, struct nlmsghdr
*n
,
2033 struct tc_action
*actions
[], u32 portid
,
2034 size_t attr_size
, struct netlink_ext_ack
*extack
)
2036 struct sk_buff
*skb
;
2038 if (!rtnl_notify_needed(net
, n
->nlmsg_flags
, RTNLGRP_TC
)) {
2041 skb
= tcf_add_notify_msg(net
, n
, actions
, portid
, attr_size
,
2044 return PTR_ERR(skb
);
2047 return rtnetlink_maybe_send(skb
, net
, portid
, RTNLGRP_TC
,
2048 n
->nlmsg_flags
& NLM_F_ECHO
);
2051 static int tcf_action_add(struct net
*net
, struct nlattr
*nla
,
2052 struct nlmsghdr
*n
, u32 portid
, u32 flags
,
2053 struct netlink_ext_ack
*extack
)
2055 size_t attr_size
= 0;
2057 struct tc_action
*actions
[TCA_ACT_MAX_PRIO
] = {};
2058 int init_res
[TCA_ACT_MAX_PRIO
] = {};
2060 for (loop
= 0; loop
< 10; loop
++) {
2061 ret
= tcf_action_init(net
, NULL
, nla
, NULL
, actions
, init_res
,
2062 &attr_size
, flags
, 0, extack
);
2070 ret
= tcf_add_notify(net
, n
, actions
, portid
, attr_size
, extack
);
2072 /* only put bound actions */
2073 tca_put_bound_many(actions
, init_res
);
2078 static const struct nla_policy tcaa_policy
[TCA_ROOT_MAX
+ 1] = {
2079 [TCA_ROOT_FLAGS
] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAG_LARGE_DUMP_ON
|
2080 TCA_ACT_FLAG_TERSE_DUMP
),
2081 [TCA_ROOT_TIME_DELTA
] = { .type
= NLA_U32
},
2084 static int tc_ctl_action(struct sk_buff
*skb
, struct nlmsghdr
*n
,
2085 struct netlink_ext_ack
*extack
)
2087 struct net
*net
= sock_net(skb
->sk
);
2088 struct nlattr
*tca
[TCA_ROOT_MAX
+ 1];
2089 u32 portid
= NETLINK_CB(skb
).portid
;
2093 if ((n
->nlmsg_type
!= RTM_GETACTION
) &&
2094 !netlink_capable(skb
, CAP_NET_ADMIN
))
2097 ret
= nlmsg_parse_deprecated(n
, sizeof(struct tcamsg
), tca
,
2098 TCA_ROOT_MAX
, NULL
, extack
);
2102 if (tca
[TCA_ACT_TAB
] == NULL
) {
2103 NL_SET_ERR_MSG(extack
, "Netlink action attributes missing");
2107 /* n->nlmsg_flags & NLM_F_CREATE */
2108 switch (n
->nlmsg_type
) {
2110 /* we are going to assume all other flags
2111 * imply create only if it doesn't exist
2112 * Note that CREATE | EXCL implies that
2113 * but since we want avoid ambiguity (eg when flags
2114 * is zero) then just set this
2116 if (n
->nlmsg_flags
& NLM_F_REPLACE
)
2117 flags
= TCA_ACT_FLAGS_REPLACE
;
2118 ret
= tcf_action_add(net
, tca
[TCA_ACT_TAB
], n
, portid
, flags
,
2122 ret
= tca_action_gd(net
, tca
[TCA_ACT_TAB
], n
,
2123 portid
, RTM_DELACTION
, extack
);
2126 ret
= tca_action_gd(net
, tca
[TCA_ACT_TAB
], n
,
2127 portid
, RTM_GETACTION
, extack
);
2136 static struct nlattr
*find_dump_kind(struct nlattr
**nla
)
2138 struct nlattr
*tb1
, *tb2
[TCA_ACT_MAX
+ 1];
2139 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+ 1];
2140 struct nlattr
*kind
;
2142 tb1
= nla
[TCA_ACT_TAB
];
2146 if (nla_parse_deprecated(tb
, TCA_ACT_MAX_PRIO
, nla_data(tb1
), NLMSG_ALIGN(nla_len(tb1
)), NULL
, NULL
) < 0)
2151 if (nla_parse_nested_deprecated(tb2
, TCA_ACT_MAX
, tb
[1], tcf_action_policy
, NULL
) < 0)
2153 kind
= tb2
[TCA_ACT_KIND
];
2158 static int tc_dump_action(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2160 struct net
*net
= sock_net(skb
->sk
);
2161 struct nlmsghdr
*nlh
;
2162 unsigned char *b
= skb_tail_pointer(skb
);
2163 struct nlattr
*nest
;
2164 struct tc_action_ops
*a_o
;
2166 struct tcamsg
*t
= (struct tcamsg
*) nlmsg_data(cb
->nlh
);
2167 struct nlattr
*tb
[TCA_ROOT_MAX
+ 1];
2168 struct nlattr
*count_attr
= NULL
;
2169 unsigned long jiffy_since
= 0;
2170 struct nlattr
*kind
= NULL
;
2171 struct nla_bitfield32 bf
;
2172 u32 msecs_since
= 0;
2175 ret
= nlmsg_parse_deprecated(cb
->nlh
, sizeof(struct tcamsg
), tb
,
2176 TCA_ROOT_MAX
, tcaa_policy
, cb
->extack
);
2180 kind
= find_dump_kind(tb
);
2182 pr_info("tc_dump_action: action bad kind\n");
2186 a_o
= tc_lookup_action(kind
);
2191 if (tb
[TCA_ROOT_FLAGS
]) {
2192 bf
= nla_get_bitfield32(tb
[TCA_ROOT_FLAGS
]);
2193 cb
->args
[2] = bf
.value
;
2196 if (tb
[TCA_ROOT_TIME_DELTA
]) {
2197 msecs_since
= nla_get_u32(tb
[TCA_ROOT_TIME_DELTA
]);
2200 nlh
= nlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
2201 cb
->nlh
->nlmsg_type
, sizeof(*t
), 0);
2203 goto out_module_put
;
2206 jiffy_since
= jiffies
- msecs_to_jiffies(msecs_since
);
2208 t
= nlmsg_data(nlh
);
2209 t
->tca_family
= AF_UNSPEC
;
2212 cb
->args
[3] = jiffy_since
;
2213 count_attr
= nla_reserve(skb
, TCA_ROOT_COUNT
, sizeof(u32
));
2215 goto out_module_put
;
2217 nest
= nla_nest_start_noflag(skb
, TCA_ACT_TAB
);
2219 goto out_module_put
;
2221 ret
= __tcf_generic_walker(net
, skb
, cb
, RTM_GETACTION
, a_o
, NULL
);
2223 goto out_module_put
;
2226 nla_nest_end(skb
, nest
);
2228 act_count
= cb
->args
[1];
2229 memcpy(nla_data(count_attr
), &act_count
, sizeof(u32
));
2234 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
2235 if (NETLINK_CB(cb
->skb
).portid
&& ret
)
2236 nlh
->nlmsg_flags
|= NLM_F_MULTI
;
2237 module_put(a_o
->owner
);
2241 module_put(a_o
->owner
);
2246 static int __init
tc_action_init(void)
2248 rtnl_register(PF_UNSPEC
, RTM_NEWACTION
, tc_ctl_action
, NULL
, 0);
2249 rtnl_register(PF_UNSPEC
, RTM_DELACTION
, tc_ctl_action
, NULL
, 0);
2250 rtnl_register(PF_UNSPEC
, RTM_GETACTION
, tc_ctl_action
, tc_dump_action
,
2256 subsys_initcall(tc_action_init
);