WIP FPC-III support
[linux/fpc-iii.git] / net / sched / act_api.c
blob2e85b636b27bd53e46b0124d54bce72283ac1167
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/act_api.c Packet action API.
5 * Author: Jamal Hadi Salim
6 */
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/string.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
15 #include <linux/kmod.h>
16 #include <linux/err.h>
17 #include <linux/module.h>
18 #include <net/net_namespace.h>
19 #include <net/sock.h>
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/act_api.h>
23 #include <net/netlink.h>
25 #ifdef CONFIG_INET
26 DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
27 EXPORT_SYMBOL_GPL(tcf_frag_xmit_count);
28 #endif
30 int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
32 #ifdef CONFIG_INET
33 if (static_branch_unlikely(&tcf_frag_xmit_count))
34 return sch_frag_xmit_hook(skb, xmit);
35 #endif
37 return xmit(skb);
39 EXPORT_SYMBOL_GPL(tcf_dev_queue_xmit);
41 static void tcf_action_goto_chain_exec(const struct tc_action *a,
42 struct tcf_result *res)
44 const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
46 res->goto_tp = rcu_dereference_bh(chain->filter_chain);
49 static void tcf_free_cookie_rcu(struct rcu_head *p)
51 struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
53 kfree(cookie->data);
54 kfree(cookie);
57 static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
58 struct tc_cookie *new_cookie)
60 struct tc_cookie *old;
62 old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
63 if (old)
64 call_rcu(&old->rcu, tcf_free_cookie_rcu);
67 int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
68 struct tcf_chain **newchain,
69 struct netlink_ext_ack *extack)
71 int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
72 u32 chain_index;
74 if (!opcode)
75 ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
76 else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
77 ret = 0;
78 if (ret) {
79 NL_SET_ERR_MSG(extack, "invalid control action");
80 goto end;
83 if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
84 chain_index = action & TC_ACT_EXT_VAL_MASK;
85 if (!tp || !newchain) {
86 ret = -EINVAL;
87 NL_SET_ERR_MSG(extack,
88 "can't goto NULL proto/chain");
89 goto end;
91 *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
92 if (!*newchain) {
93 ret = -ENOMEM;
94 NL_SET_ERR_MSG(extack,
95 "can't allocate goto_chain");
98 end:
99 return ret;
101 EXPORT_SYMBOL(tcf_action_check_ctrlact);
103 struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
104 struct tcf_chain *goto_chain)
106 a->tcfa_action = action;
107 goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1);
108 return goto_chain;
110 EXPORT_SYMBOL(tcf_action_set_ctrlact);
112 /* XXX: For standalone actions, we don't need a RCU grace period either, because
113 * actions are always connected to filters and filters are already destroyed in
114 * RCU callbacks, so after a RCU grace period actions are already disconnected
115 * from filters. Readers later can not find us.
117 static void free_tcf(struct tc_action *p)
119 struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
121 free_percpu(p->cpu_bstats);
122 free_percpu(p->cpu_bstats_hw);
123 free_percpu(p->cpu_qstats);
125 tcf_set_action_cookie(&p->act_cookie, NULL);
126 if (chain)
127 tcf_chain_put_by_act(chain);
129 kfree(p);
132 static void tcf_action_cleanup(struct tc_action *p)
134 if (p->ops->cleanup)
135 p->ops->cleanup(p);
137 gen_kill_estimator(&p->tcfa_rate_est);
138 free_tcf(p);
141 static int __tcf_action_put(struct tc_action *p, bool bind)
143 struct tcf_idrinfo *idrinfo = p->idrinfo;
145 if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
146 if (bind)
147 atomic_dec(&p->tcfa_bindcnt);
148 idr_remove(&idrinfo->action_idr, p->tcfa_index);
149 mutex_unlock(&idrinfo->lock);
151 tcf_action_cleanup(p);
152 return 1;
155 if (bind)
156 atomic_dec(&p->tcfa_bindcnt);
158 return 0;
161 int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
163 int ret = 0;
165 /* Release with strict==1 and bind==0 is only called through act API
166 * interface (classifiers always bind). Only case when action with
167 * positive reference count and zero bind count can exist is when it was
168 * also created with act API (unbinding last classifier will destroy the
169 * action if it was created by classifier). So only case when bind count
170 * can be changed after initial check is when unbound action is
171 * destroyed by act API while classifier binds to action with same id
172 * concurrently. This result either creation of new action(same behavior
173 * as before), or reusing existing action if concurrent process
174 * increments reference count before action is deleted. Both scenarios
175 * are acceptable.
177 if (p) {
178 if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
179 return -EPERM;
181 if (__tcf_action_put(p, bind))
182 ret = ACT_P_DELETED;
185 return ret;
187 EXPORT_SYMBOL(__tcf_idr_release);
189 static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
191 struct tc_cookie *act_cookie;
192 u32 cookie_len = 0;
194 rcu_read_lock();
195 act_cookie = rcu_dereference(act->act_cookie);
197 if (act_cookie)
198 cookie_len = nla_total_size(act_cookie->len);
199 rcu_read_unlock();
201 return nla_total_size(0) /* action number nested */
202 + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
203 + cookie_len /* TCA_ACT_COOKIE */
204 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */
205 + nla_total_size(0) /* TCA_ACT_STATS nested */
206 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */
207 /* TCA_STATS_BASIC */
208 + nla_total_size_64bit(sizeof(struct gnet_stats_basic))
209 /* TCA_STATS_PKT64 */
210 + nla_total_size_64bit(sizeof(u64))
211 /* TCA_STATS_QUEUE */
212 + nla_total_size_64bit(sizeof(struct gnet_stats_queue))
213 + nla_total_size(0) /* TCA_OPTIONS nested */
214 + nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */
217 static size_t tcf_action_full_attrs_size(size_t sz)
219 return NLMSG_HDRLEN /* struct nlmsghdr */
220 + sizeof(struct tcamsg)
221 + nla_total_size(0) /* TCA_ACT_TAB nested */
222 + sz;
225 static size_t tcf_action_fill_size(const struct tc_action *act)
227 size_t sz = tcf_action_shared_attrs_size(act);
229 if (act->ops->get_fill_size)
230 return act->ops->get_fill_size(act) + sz;
231 return sz;
234 static int
235 tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act)
237 unsigned char *b = skb_tail_pointer(skb);
238 struct tc_cookie *cookie;
240 if (nla_put_string(skb, TCA_KIND, a->ops->kind))
241 goto nla_put_failure;
242 if (tcf_action_copy_stats(skb, a, 0))
243 goto nla_put_failure;
244 if (from_act && nla_put_u32(skb, TCA_ACT_INDEX, a->tcfa_index))
245 goto nla_put_failure;
247 rcu_read_lock();
248 cookie = rcu_dereference(a->act_cookie);
249 if (cookie) {
250 if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
251 rcu_read_unlock();
252 goto nla_put_failure;
255 rcu_read_unlock();
257 return 0;
259 nla_put_failure:
260 nlmsg_trim(skb, b);
261 return -1;
264 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
265 struct netlink_callback *cb)
267 int err = 0, index = -1, s_i = 0, n_i = 0;
268 u32 act_flags = cb->args[2];
269 unsigned long jiffy_since = cb->args[3];
270 struct nlattr *nest;
271 struct idr *idr = &idrinfo->action_idr;
272 struct tc_action *p;
273 unsigned long id = 1;
274 unsigned long tmp;
276 mutex_lock(&idrinfo->lock);
278 s_i = cb->args[0];
280 idr_for_each_entry_ul(idr, p, tmp, id) {
281 index++;
282 if (index < s_i)
283 continue;
284 if (IS_ERR(p))
285 continue;
287 if (jiffy_since &&
288 time_after(jiffy_since,
289 (unsigned long)p->tcfa_tm.lastuse))
290 continue;
292 nest = nla_nest_start_noflag(skb, n_i);
293 if (!nest) {
294 index--;
295 goto nla_put_failure;
297 err = (act_flags & TCA_ACT_FLAG_TERSE_DUMP) ?
298 tcf_action_dump_terse(skb, p, true) :
299 tcf_action_dump_1(skb, p, 0, 0);
300 if (err < 0) {
301 index--;
302 nlmsg_trim(skb, nest);
303 goto done;
305 nla_nest_end(skb, nest);
306 n_i++;
307 if (!(act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) &&
308 n_i >= TCA_ACT_MAX_PRIO)
309 goto done;
311 done:
312 if (index >= 0)
313 cb->args[0] = index + 1;
315 mutex_unlock(&idrinfo->lock);
316 if (n_i) {
317 if (act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON)
318 cb->args[1] = n_i;
320 return n_i;
322 nla_put_failure:
323 nla_nest_cancel(skb, nest);
324 goto done;
327 static int tcf_idr_release_unsafe(struct tc_action *p)
329 if (atomic_read(&p->tcfa_bindcnt) > 0)
330 return -EPERM;
332 if (refcount_dec_and_test(&p->tcfa_refcnt)) {
333 idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
334 tcf_action_cleanup(p);
335 return ACT_P_DELETED;
338 return 0;
341 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
342 const struct tc_action_ops *ops)
344 struct nlattr *nest;
345 int n_i = 0;
346 int ret = -EINVAL;
347 struct idr *idr = &idrinfo->action_idr;
348 struct tc_action *p;
349 unsigned long id = 1;
350 unsigned long tmp;
352 nest = nla_nest_start_noflag(skb, 0);
353 if (nest == NULL)
354 goto nla_put_failure;
355 if (nla_put_string(skb, TCA_KIND, ops->kind))
356 goto nla_put_failure;
358 mutex_lock(&idrinfo->lock);
359 idr_for_each_entry_ul(idr, p, tmp, id) {
360 if (IS_ERR(p))
361 continue;
362 ret = tcf_idr_release_unsafe(p);
363 if (ret == ACT_P_DELETED) {
364 module_put(ops->owner);
365 n_i++;
366 } else if (ret < 0) {
367 mutex_unlock(&idrinfo->lock);
368 goto nla_put_failure;
371 mutex_unlock(&idrinfo->lock);
373 if (nla_put_u32(skb, TCA_FCNT, n_i))
374 goto nla_put_failure;
375 nla_nest_end(skb, nest);
377 return n_i;
378 nla_put_failure:
379 nla_nest_cancel(skb, nest);
380 return ret;
383 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
384 struct netlink_callback *cb, int type,
385 const struct tc_action_ops *ops,
386 struct netlink_ext_ack *extack)
388 struct tcf_idrinfo *idrinfo = tn->idrinfo;
390 if (type == RTM_DELACTION) {
391 return tcf_del_walker(idrinfo, skb, ops);
392 } else if (type == RTM_GETACTION) {
393 return tcf_dump_walker(idrinfo, skb, cb);
394 } else {
395 WARN(1, "tcf_generic_walker: unknown command %d\n", type);
396 NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command");
397 return -EINVAL;
400 EXPORT_SYMBOL(tcf_generic_walker);
402 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
404 struct tcf_idrinfo *idrinfo = tn->idrinfo;
405 struct tc_action *p;
407 mutex_lock(&idrinfo->lock);
408 p = idr_find(&idrinfo->action_idr, index);
409 if (IS_ERR(p))
410 p = NULL;
411 else if (p)
412 refcount_inc(&p->tcfa_refcnt);
413 mutex_unlock(&idrinfo->lock);
415 if (p) {
416 *a = p;
417 return true;
419 return false;
421 EXPORT_SYMBOL(tcf_idr_search);
423 static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
425 struct tc_action *p;
426 int ret = 0;
428 mutex_lock(&idrinfo->lock);
429 p = idr_find(&idrinfo->action_idr, index);
430 if (!p) {
431 mutex_unlock(&idrinfo->lock);
432 return -ENOENT;
435 if (!atomic_read(&p->tcfa_bindcnt)) {
436 if (refcount_dec_and_test(&p->tcfa_refcnt)) {
437 struct module *owner = p->ops->owner;
439 WARN_ON(p != idr_remove(&idrinfo->action_idr,
440 p->tcfa_index));
441 mutex_unlock(&idrinfo->lock);
443 tcf_action_cleanup(p);
444 module_put(owner);
445 return 0;
447 ret = 0;
448 } else {
449 ret = -EPERM;
452 mutex_unlock(&idrinfo->lock);
453 return ret;
456 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
457 struct tc_action **a, const struct tc_action_ops *ops,
458 int bind, bool cpustats, u32 flags)
460 struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
461 struct tcf_idrinfo *idrinfo = tn->idrinfo;
462 int err = -ENOMEM;
464 if (unlikely(!p))
465 return -ENOMEM;
466 refcount_set(&p->tcfa_refcnt, 1);
467 if (bind)
468 atomic_set(&p->tcfa_bindcnt, 1);
470 if (cpustats) {
471 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
472 if (!p->cpu_bstats)
473 goto err1;
474 p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
475 if (!p->cpu_bstats_hw)
476 goto err2;
477 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
478 if (!p->cpu_qstats)
479 goto err3;
481 spin_lock_init(&p->tcfa_lock);
482 p->tcfa_index = index;
483 p->tcfa_tm.install = jiffies;
484 p->tcfa_tm.lastuse = jiffies;
485 p->tcfa_tm.firstuse = 0;
486 p->tcfa_flags = flags;
487 if (est) {
488 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
489 &p->tcfa_rate_est,
490 &p->tcfa_lock, NULL, est);
491 if (err)
492 goto err4;
495 p->idrinfo = idrinfo;
496 p->ops = ops;
497 *a = p;
498 return 0;
499 err4:
500 free_percpu(p->cpu_qstats);
501 err3:
502 free_percpu(p->cpu_bstats_hw);
503 err2:
504 free_percpu(p->cpu_bstats);
505 err1:
506 kfree(p);
507 return err;
509 EXPORT_SYMBOL(tcf_idr_create);
511 int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
512 struct nlattr *est, struct tc_action **a,
513 const struct tc_action_ops *ops, int bind,
514 u32 flags)
516 /* Set cpustats according to actions flags. */
517 return tcf_idr_create(tn, index, est, a, ops, bind,
518 !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags);
520 EXPORT_SYMBOL(tcf_idr_create_from_flags);
522 /* Cleanup idr index that was allocated but not initialized. */
524 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
526 struct tcf_idrinfo *idrinfo = tn->idrinfo;
528 mutex_lock(&idrinfo->lock);
529 /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
530 WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
531 mutex_unlock(&idrinfo->lock);
533 EXPORT_SYMBOL(tcf_idr_cleanup);
535 /* Check if action with specified index exists. If actions is found, increments
536 * its reference and bind counters, and return 1. Otherwise insert temporary
537 * error pointer (to prevent concurrent users from inserting actions with same
538 * index) and return 0.
541 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
542 struct tc_action **a, int bind)
544 struct tcf_idrinfo *idrinfo = tn->idrinfo;
545 struct tc_action *p;
546 int ret;
548 again:
549 mutex_lock(&idrinfo->lock);
550 if (*index) {
551 p = idr_find(&idrinfo->action_idr, *index);
552 if (IS_ERR(p)) {
553 /* This means that another process allocated
554 * index but did not assign the pointer yet.
556 mutex_unlock(&idrinfo->lock);
557 goto again;
560 if (p) {
561 refcount_inc(&p->tcfa_refcnt);
562 if (bind)
563 atomic_inc(&p->tcfa_bindcnt);
564 *a = p;
565 ret = 1;
566 } else {
567 *a = NULL;
568 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
569 *index, GFP_KERNEL);
570 if (!ret)
571 idr_replace(&idrinfo->action_idr,
572 ERR_PTR(-EBUSY), *index);
574 } else {
575 *index = 1;
576 *a = NULL;
577 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
578 UINT_MAX, GFP_KERNEL);
579 if (!ret)
580 idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
581 *index);
583 mutex_unlock(&idrinfo->lock);
584 return ret;
586 EXPORT_SYMBOL(tcf_idr_check_alloc);
588 void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
589 struct tcf_idrinfo *idrinfo)
591 struct idr *idr = &idrinfo->action_idr;
592 struct tc_action *p;
593 int ret;
594 unsigned long id = 1;
595 unsigned long tmp;
597 idr_for_each_entry_ul(idr, p, tmp, id) {
598 ret = __tcf_idr_release(p, false, true);
599 if (ret == ACT_P_DELETED)
600 module_put(ops->owner);
601 else if (ret < 0)
602 return;
604 idr_destroy(&idrinfo->action_idr);
606 EXPORT_SYMBOL(tcf_idrinfo_destroy);
608 static LIST_HEAD(act_base);
609 static DEFINE_RWLOCK(act_mod_lock);
611 int tcf_register_action(struct tc_action_ops *act,
612 struct pernet_operations *ops)
614 struct tc_action_ops *a;
615 int ret;
617 if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
618 return -EINVAL;
620 /* We have to register pernet ops before making the action ops visible,
621 * otherwise tcf_action_init_1() could get a partially initialized
622 * netns.
624 ret = register_pernet_subsys(ops);
625 if (ret)
626 return ret;
628 write_lock(&act_mod_lock);
629 list_for_each_entry(a, &act_base, head) {
630 if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) {
631 write_unlock(&act_mod_lock);
632 unregister_pernet_subsys(ops);
633 return -EEXIST;
636 list_add_tail(&act->head, &act_base);
637 write_unlock(&act_mod_lock);
639 return 0;
641 EXPORT_SYMBOL(tcf_register_action);
643 int tcf_unregister_action(struct tc_action_ops *act,
644 struct pernet_operations *ops)
646 struct tc_action_ops *a;
647 int err = -ENOENT;
649 write_lock(&act_mod_lock);
650 list_for_each_entry(a, &act_base, head) {
651 if (a == act) {
652 list_del(&act->head);
653 err = 0;
654 break;
657 write_unlock(&act_mod_lock);
658 if (!err)
659 unregister_pernet_subsys(ops);
660 return err;
662 EXPORT_SYMBOL(tcf_unregister_action);
664 /* lookup by name */
665 static struct tc_action_ops *tc_lookup_action_n(char *kind)
667 struct tc_action_ops *a, *res = NULL;
669 if (kind) {
670 read_lock(&act_mod_lock);
671 list_for_each_entry(a, &act_base, head) {
672 if (strcmp(kind, a->kind) == 0) {
673 if (try_module_get(a->owner))
674 res = a;
675 break;
678 read_unlock(&act_mod_lock);
680 return res;
683 /* lookup by nlattr */
684 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
686 struct tc_action_ops *a, *res = NULL;
688 if (kind) {
689 read_lock(&act_mod_lock);
690 list_for_each_entry(a, &act_base, head) {
691 if (nla_strcmp(kind, a->kind) == 0) {
692 if (try_module_get(a->owner))
693 res = a;
694 break;
697 read_unlock(&act_mod_lock);
699 return res;
702 /*TCA_ACT_MAX_PRIO is 32, there count up to 32 */
703 #define TCA_ACT_MAX_PRIO_MASK 0x1FF
704 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
705 int nr_actions, struct tcf_result *res)
707 u32 jmp_prgcnt = 0;
708 u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
709 int i;
710 int ret = TC_ACT_OK;
712 if (skb_skip_tc_classify(skb))
713 return TC_ACT_OK;
715 restart_act_graph:
716 for (i = 0; i < nr_actions; i++) {
717 const struct tc_action *a = actions[i];
719 if (jmp_prgcnt > 0) {
720 jmp_prgcnt -= 1;
721 continue;
723 repeat:
724 ret = a->ops->act(skb, a, res);
725 if (ret == TC_ACT_REPEAT)
726 goto repeat; /* we need a ttl - JHS */
728 if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
729 jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
730 if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
731 /* faulty opcode, stop pipeline */
732 return TC_ACT_OK;
733 } else {
734 jmp_ttl -= 1;
735 if (jmp_ttl > 0)
736 goto restart_act_graph;
737 else /* faulty graph, stop pipeline */
738 return TC_ACT_OK;
740 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
741 if (unlikely(!rcu_access_pointer(a->goto_chain))) {
742 net_warn_ratelimited("can't go to NULL chain!\n");
743 return TC_ACT_SHOT;
745 tcf_action_goto_chain_exec(a, res);
748 if (ret != TC_ACT_PIPE)
749 break;
752 return ret;
754 EXPORT_SYMBOL(tcf_action_exec);
756 int tcf_action_destroy(struct tc_action *actions[], int bind)
758 const struct tc_action_ops *ops;
759 struct tc_action *a;
760 int ret = 0, i;
762 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
763 a = actions[i];
764 actions[i] = NULL;
765 ops = a->ops;
766 ret = __tcf_idr_release(a, bind, true);
767 if (ret == ACT_P_DELETED)
768 module_put(ops->owner);
769 else if (ret < 0)
770 return ret;
772 return ret;
775 static int tcf_action_put(struct tc_action *p)
777 return __tcf_action_put(p, false);
780 /* Put all actions in this array, skip those NULL's. */
781 static void tcf_action_put_many(struct tc_action *actions[])
783 int i;
785 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
786 struct tc_action *a = actions[i];
787 const struct tc_action_ops *ops;
789 if (!a)
790 continue;
791 ops = a->ops;
792 if (tcf_action_put(a))
793 module_put(ops->owner);
798 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
800 return a->ops->dump(skb, a, bind, ref);
804 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
806 int err = -EINVAL;
807 unsigned char *b = skb_tail_pointer(skb);
808 struct nlattr *nest;
810 if (tcf_action_dump_terse(skb, a, false))
811 goto nla_put_failure;
813 if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
814 nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
815 a->hw_stats, TCA_ACT_HW_STATS_ANY))
816 goto nla_put_failure;
818 if (a->used_hw_stats_valid &&
819 nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
820 a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
821 goto nla_put_failure;
823 if (a->tcfa_flags &&
824 nla_put_bitfield32(skb, TCA_ACT_FLAGS,
825 a->tcfa_flags, a->tcfa_flags))
826 goto nla_put_failure;
828 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
829 if (nest == NULL)
830 goto nla_put_failure;
831 err = tcf_action_dump_old(skb, a, bind, ref);
832 if (err > 0) {
833 nla_nest_end(skb, nest);
834 return err;
837 nla_put_failure:
838 nlmsg_trim(skb, b);
839 return -1;
841 EXPORT_SYMBOL(tcf_action_dump_1);
843 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
844 int bind, int ref, bool terse)
846 struct tc_action *a;
847 int err = -EINVAL, i;
848 struct nlattr *nest;
850 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
851 a = actions[i];
852 nest = nla_nest_start_noflag(skb, i + 1);
853 if (nest == NULL)
854 goto nla_put_failure;
855 err = terse ? tcf_action_dump_terse(skb, a, false) :
856 tcf_action_dump_1(skb, a, bind, ref);
857 if (err < 0)
858 goto errout;
859 nla_nest_end(skb, nest);
862 return 0;
864 nla_put_failure:
865 err = -EINVAL;
866 errout:
867 nla_nest_cancel(skb, nest);
868 return err;
871 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
873 struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
874 if (!c)
875 return NULL;
877 c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
878 if (!c->data) {
879 kfree(c);
880 return NULL;
882 c->len = nla_len(tb[TCA_ACT_COOKIE]);
884 return c;
887 static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr)
889 struct nla_bitfield32 hw_stats_bf;
891 /* If the user did not pass the attr, that means he does
892 * not care about the type. Return "any" in that case
893 * which is setting on all supported types.
895 if (!hw_stats_attr)
896 return TCA_ACT_HW_STATS_ANY;
897 hw_stats_bf = nla_get_bitfield32(hw_stats_attr);
898 return hw_stats_bf.value;
901 static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
902 [TCA_ACT_KIND] = { .type = NLA_STRING },
903 [TCA_ACT_INDEX] = { .type = NLA_U32 },
904 [TCA_ACT_COOKIE] = { .type = NLA_BINARY,
905 .len = TC_COOKIE_MAX_SIZE },
906 [TCA_ACT_OPTIONS] = { .type = NLA_NESTED },
907 [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS),
908 [TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
911 static void tcf_idr_insert_many(struct tc_action *actions[])
913 int i;
915 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
916 struct tc_action *a = actions[i];
917 struct tcf_idrinfo *idrinfo;
919 if (!a)
920 continue;
921 idrinfo = a->idrinfo;
922 mutex_lock(&idrinfo->lock);
923 /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if
924 * it is just created, otherwise this is just a nop.
926 idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
927 mutex_unlock(&idrinfo->lock);
931 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
932 struct nlattr *nla, struct nlattr *est,
933 char *name, int ovr, int bind,
934 bool rtnl_held,
935 struct netlink_ext_ack *extack)
937 struct nla_bitfield32 flags = { 0, 0 };
938 u8 hw_stats = TCA_ACT_HW_STATS_ANY;
939 struct tc_action *a;
940 struct tc_action_ops *a_o;
941 struct tc_cookie *cookie = NULL;
942 char act_name[IFNAMSIZ];
943 struct nlattr *tb[TCA_ACT_MAX + 1];
944 struct nlattr *kind;
945 int err;
947 if (name == NULL) {
948 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
949 tcf_action_policy, extack);
950 if (err < 0)
951 goto err_out;
952 err = -EINVAL;
953 kind = tb[TCA_ACT_KIND];
954 if (!kind) {
955 NL_SET_ERR_MSG(extack, "TC action kind must be specified");
956 goto err_out;
958 if (nla_strscpy(act_name, kind, IFNAMSIZ) < 0) {
959 NL_SET_ERR_MSG(extack, "TC action name too long");
960 goto err_out;
962 if (tb[TCA_ACT_COOKIE]) {
963 cookie = nla_memdup_cookie(tb);
964 if (!cookie) {
965 NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
966 err = -ENOMEM;
967 goto err_out;
970 hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
971 if (tb[TCA_ACT_FLAGS])
972 flags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
973 } else {
974 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) {
975 NL_SET_ERR_MSG(extack, "TC action name too long");
976 err = -EINVAL;
977 goto err_out;
981 a_o = tc_lookup_action_n(act_name);
982 if (a_o == NULL) {
983 #ifdef CONFIG_MODULES
984 if (rtnl_held)
985 rtnl_unlock();
986 request_module("act_%s", act_name);
987 if (rtnl_held)
988 rtnl_lock();
990 a_o = tc_lookup_action_n(act_name);
992 /* We dropped the RTNL semaphore in order to
993 * perform the module load. So, even if we
994 * succeeded in loading the module we have to
995 * tell the caller to replay the request. We
996 * indicate this using -EAGAIN.
998 if (a_o != NULL) {
999 err = -EAGAIN;
1000 goto err_mod;
1002 #endif
1003 NL_SET_ERR_MSG(extack, "Failed to load TC action module");
1004 err = -ENOENT;
1005 goto err_free;
1008 /* backward compatibility for policer */
1009 if (name == NULL)
1010 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind,
1011 rtnl_held, tp, flags.value, extack);
1012 else
1013 err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held,
1014 tp, flags.value, extack);
1015 if (err < 0)
1016 goto err_mod;
1018 if (!name && tb[TCA_ACT_COOKIE])
1019 tcf_set_action_cookie(&a->act_cookie, cookie);
1021 if (!name)
1022 a->hw_stats = hw_stats;
1024 /* module count goes up only when brand new policy is created
1025 * if it exists and is only bound to in a_o->init() then
1026 * ACT_P_CREATED is not returned (a zero is).
1028 if (err != ACT_P_CREATED)
1029 module_put(a_o->owner);
1031 return a;
1033 err_mod:
1034 module_put(a_o->owner);
1035 err_free:
1036 if (cookie) {
1037 kfree(cookie->data);
1038 kfree(cookie);
1040 err_out:
1041 return ERR_PTR(err);
1044 /* Returns numbers of initialized actions or negative error. */
1046 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
1047 struct nlattr *est, char *name, int ovr, int bind,
1048 struct tc_action *actions[], size_t *attr_size,
1049 bool rtnl_held, struct netlink_ext_ack *extack)
1051 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1052 struct tc_action *act;
1053 size_t sz = 0;
1054 int err;
1055 int i;
1057 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1058 extack);
1059 if (err < 0)
1060 return err;
1062 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1063 act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind,
1064 rtnl_held, extack);
1065 if (IS_ERR(act)) {
1066 err = PTR_ERR(act);
1067 goto err;
1069 sz += tcf_action_fill_size(act);
1070 /* Start from index 0 */
1071 actions[i - 1] = act;
1074 /* We have to commit them all together, because if any error happened in
1075 * between, we could not handle the failure gracefully.
1077 tcf_idr_insert_many(actions);
1079 *attr_size = tcf_action_full_attrs_size(sz);
1080 return i - 1;
1082 err:
1083 tcf_action_destroy(actions, bind);
1084 return err;
1087 void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
1088 u64 drops, bool hw)
1090 if (a->cpu_bstats) {
1091 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
1093 this_cpu_ptr(a->cpu_qstats)->drops += drops;
1095 if (hw)
1096 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
1097 bytes, packets);
1098 return;
1101 _bstats_update(&a->tcfa_bstats, bytes, packets);
1102 a->tcfa_qstats.drops += drops;
1103 if (hw)
1104 _bstats_update(&a->tcfa_bstats_hw, bytes, packets);
1106 EXPORT_SYMBOL(tcf_action_update_stats);
1108 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
1109 int compat_mode)
1111 int err = 0;
1112 struct gnet_dump d;
1114 if (p == NULL)
1115 goto errout;
1117 /* compat_mode being true specifies a call that is supposed
1118 * to add additional backward compatibility statistic TLVs.
1120 if (compat_mode) {
1121 if (p->type == TCA_OLD_COMPAT)
1122 err = gnet_stats_start_copy_compat(skb, 0,
1123 TCA_STATS,
1124 TCA_XSTATS,
1125 &p->tcfa_lock, &d,
1126 TCA_PAD);
1127 else
1128 return 0;
1129 } else
1130 err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
1131 &p->tcfa_lock, &d, TCA_ACT_PAD);
1133 if (err < 0)
1134 goto errout;
1136 if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 ||
1137 gnet_stats_copy_basic_hw(NULL, &d, p->cpu_bstats_hw,
1138 &p->tcfa_bstats_hw) < 0 ||
1139 gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
1140 gnet_stats_copy_queue(&d, p->cpu_qstats,
1141 &p->tcfa_qstats,
1142 p->tcfa_qstats.qlen) < 0)
1143 goto errout;
1145 if (gnet_stats_finish_copy(&d) < 0)
1146 goto errout;
1148 return 0;
1150 errout:
1151 return -1;
1154 static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
1155 u32 portid, u32 seq, u16 flags, int event, int bind,
1156 int ref)
1158 struct tcamsg *t;
1159 struct nlmsghdr *nlh;
1160 unsigned char *b = skb_tail_pointer(skb);
1161 struct nlattr *nest;
1163 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
1164 if (!nlh)
1165 goto out_nlmsg_trim;
1166 t = nlmsg_data(nlh);
1167 t->tca_family = AF_UNSPEC;
1168 t->tca__pad1 = 0;
1169 t->tca__pad2 = 0;
1171 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1172 if (!nest)
1173 goto out_nlmsg_trim;
1175 if (tcf_action_dump(skb, actions, bind, ref, false) < 0)
1176 goto out_nlmsg_trim;
1178 nla_nest_end(skb, nest);
1180 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1181 return skb->len;
1183 out_nlmsg_trim:
1184 nlmsg_trim(skb, b);
1185 return -1;
1188 static int
1189 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
1190 struct tc_action *actions[], int event,
1191 struct netlink_ext_ack *extack)
1193 struct sk_buff *skb;
1195 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1196 if (!skb)
1197 return -ENOBUFS;
1198 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
1199 0, 1) <= 0) {
1200 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1201 kfree_skb(skb);
1202 return -EINVAL;
1205 return rtnl_unicast(skb, net, portid);
1208 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
1209 struct nlmsghdr *n, u32 portid,
1210 struct netlink_ext_ack *extack)
1212 struct nlattr *tb[TCA_ACT_MAX + 1];
1213 const struct tc_action_ops *ops;
1214 struct tc_action *a;
1215 int index;
1216 int err;
1218 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1219 tcf_action_policy, extack);
1220 if (err < 0)
1221 goto err_out;
1223 err = -EINVAL;
1224 if (tb[TCA_ACT_INDEX] == NULL ||
1225 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) {
1226 NL_SET_ERR_MSG(extack, "Invalid TC action index value");
1227 goto err_out;
1229 index = nla_get_u32(tb[TCA_ACT_INDEX]);
1231 err = -EINVAL;
1232 ops = tc_lookup_action(tb[TCA_ACT_KIND]);
1233 if (!ops) { /* could happen in batch of actions */
1234 NL_SET_ERR_MSG(extack, "Specified TC action kind not found");
1235 goto err_out;
1237 err = -ENOENT;
1238 if (ops->lookup(net, &a, index) == 0) {
1239 NL_SET_ERR_MSG(extack, "TC action with specified index not found");
1240 goto err_mod;
1243 module_put(ops->owner);
1244 return a;
1246 err_mod:
1247 module_put(ops->owner);
1248 err_out:
1249 return ERR_PTR(err);
1252 static int tca_action_flush(struct net *net, struct nlattr *nla,
1253 struct nlmsghdr *n, u32 portid,
1254 struct netlink_ext_ack *extack)
1256 struct sk_buff *skb;
1257 unsigned char *b;
1258 struct nlmsghdr *nlh;
1259 struct tcamsg *t;
1260 struct netlink_callback dcb;
1261 struct nlattr *nest;
1262 struct nlattr *tb[TCA_ACT_MAX + 1];
1263 const struct tc_action_ops *ops;
1264 struct nlattr *kind;
1265 int err = -ENOMEM;
1267 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1268 if (!skb)
1269 return err;
1271 b = skb_tail_pointer(skb);
1273 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1274 tcf_action_policy, extack);
1275 if (err < 0)
1276 goto err_out;
1278 err = -EINVAL;
1279 kind = tb[TCA_ACT_KIND];
1280 ops = tc_lookup_action(kind);
1281 if (!ops) { /*some idjot trying to flush unknown action */
1282 NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action");
1283 goto err_out;
1286 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
1287 sizeof(*t), 0);
1288 if (!nlh) {
1289 NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification");
1290 goto out_module_put;
1292 t = nlmsg_data(nlh);
1293 t->tca_family = AF_UNSPEC;
1294 t->tca__pad1 = 0;
1295 t->tca__pad2 = 0;
1297 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1298 if (!nest) {
1299 NL_SET_ERR_MSG(extack, "Failed to add new netlink message");
1300 goto out_module_put;
1303 err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack);
1304 if (err <= 0) {
1305 nla_nest_cancel(skb, nest);
1306 goto out_module_put;
1309 nla_nest_end(skb, nest);
1311 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1312 nlh->nlmsg_flags |= NLM_F_ROOT;
1313 module_put(ops->owner);
1314 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1315 n->nlmsg_flags & NLM_F_ECHO);
1316 if (err > 0)
1317 return 0;
1318 if (err < 0)
1319 NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification");
1321 return err;
1323 out_module_put:
1324 module_put(ops->owner);
1325 err_out:
1326 kfree_skb(skb);
1327 return err;
1330 static int tcf_action_delete(struct net *net, struct tc_action *actions[])
1332 int i;
1334 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1335 struct tc_action *a = actions[i];
1336 const struct tc_action_ops *ops = a->ops;
1337 /* Actions can be deleted concurrently so we must save their
1338 * type and id to search again after reference is released.
1340 struct tcf_idrinfo *idrinfo = a->idrinfo;
1341 u32 act_index = a->tcfa_index;
1343 actions[i] = NULL;
1344 if (tcf_action_put(a)) {
1345 /* last reference, action was deleted concurrently */
1346 module_put(ops->owner);
1347 } else {
1348 int ret;
1350 /* now do the delete */
1351 ret = tcf_idr_delete_index(idrinfo, act_index);
1352 if (ret < 0)
1353 return ret;
1356 return 0;
1359 static int
1360 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1361 u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1363 int ret;
1364 struct sk_buff *skb;
1366 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1367 GFP_KERNEL);
1368 if (!skb)
1369 return -ENOBUFS;
1371 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
1372 0, 2) <= 0) {
1373 NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
1374 kfree_skb(skb);
1375 return -EINVAL;
1378 /* now do the delete */
1379 ret = tcf_action_delete(net, actions);
1380 if (ret < 0) {
1381 NL_SET_ERR_MSG(extack, "Failed to delete TC action");
1382 kfree_skb(skb);
1383 return ret;
1386 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1387 n->nlmsg_flags & NLM_F_ECHO);
1388 if (ret > 0)
1389 return 0;
1390 return ret;
1393 static int
1394 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1395 u32 portid, int event, struct netlink_ext_ack *extack)
1397 int i, ret;
1398 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1399 struct tc_action *act;
1400 size_t attr_size = 0;
1401 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1403 ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1404 extack);
1405 if (ret < 0)
1406 return ret;
1408 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
1409 if (tb[1])
1410 return tca_action_flush(net, tb[1], n, portid, extack);
1412 NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action");
1413 return -EINVAL;
1416 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1417 act = tcf_action_get_1(net, tb[i], n, portid, extack);
1418 if (IS_ERR(act)) {
1419 ret = PTR_ERR(act);
1420 goto err;
1422 attr_size += tcf_action_fill_size(act);
1423 actions[i - 1] = act;
1426 attr_size = tcf_action_full_attrs_size(attr_size);
1428 if (event == RTM_GETACTION)
1429 ret = tcf_get_notify(net, portid, n, actions, event, extack);
1430 else { /* delete */
1431 ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
1432 if (ret)
1433 goto err;
1434 return 0;
1436 err:
1437 tcf_action_put_many(actions);
1438 return ret;
1441 static int
1442 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1443 u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1445 struct sk_buff *skb;
1446 int err = 0;
1448 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1449 GFP_KERNEL);
1450 if (!skb)
1451 return -ENOBUFS;
1453 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
1454 RTM_NEWACTION, 0, 0) <= 0) {
1455 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1456 kfree_skb(skb);
1457 return -EINVAL;
1460 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1461 n->nlmsg_flags & NLM_F_ECHO);
1462 if (err > 0)
1463 err = 0;
1464 return err;
1467 static int tcf_action_add(struct net *net, struct nlattr *nla,
1468 struct nlmsghdr *n, u32 portid, int ovr,
1469 struct netlink_ext_ack *extack)
1471 size_t attr_size = 0;
1472 int loop, ret;
1473 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1475 for (loop = 0; loop < 10; loop++) {
1476 ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
1477 actions, &attr_size, true, extack);
1478 if (ret != -EAGAIN)
1479 break;
1482 if (ret < 0)
1483 return ret;
1484 ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
1485 if (ovr)
1486 tcf_action_put_many(actions);
1488 return ret;
1491 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
1492 [TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAG_LARGE_DUMP_ON |
1493 TCA_ACT_FLAG_TERSE_DUMP),
1494 [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 },
1497 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
1498 struct netlink_ext_ack *extack)
1500 struct net *net = sock_net(skb->sk);
1501 struct nlattr *tca[TCA_ROOT_MAX + 1];
1502 u32 portid = NETLINK_CB(skb).portid;
1503 int ret = 0, ovr = 0;
1505 if ((n->nlmsg_type != RTM_GETACTION) &&
1506 !netlink_capable(skb, CAP_NET_ADMIN))
1507 return -EPERM;
1509 ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca,
1510 TCA_ROOT_MAX, NULL, extack);
1511 if (ret < 0)
1512 return ret;
1514 if (tca[TCA_ACT_TAB] == NULL) {
1515 NL_SET_ERR_MSG(extack, "Netlink action attributes missing");
1516 return -EINVAL;
1519 /* n->nlmsg_flags & NLM_F_CREATE */
1520 switch (n->nlmsg_type) {
1521 case RTM_NEWACTION:
1522 /* we are going to assume all other flags
1523 * imply create only if it doesn't exist
1524 * Note that CREATE | EXCL implies that
1525 * but since we want avoid ambiguity (eg when flags
1526 * is zero) then just set this
1528 if (n->nlmsg_flags & NLM_F_REPLACE)
1529 ovr = 1;
1530 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr,
1531 extack);
1532 break;
1533 case RTM_DELACTION:
1534 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1535 portid, RTM_DELACTION, extack);
1536 break;
1537 case RTM_GETACTION:
1538 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1539 portid, RTM_GETACTION, extack);
1540 break;
1541 default:
1542 BUG();
1545 return ret;
1548 static struct nlattr *find_dump_kind(struct nlattr **nla)
1550 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
1551 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1552 struct nlattr *kind;
1554 tb1 = nla[TCA_ACT_TAB];
1555 if (tb1 == NULL)
1556 return NULL;
1558 if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
1559 return NULL;
1561 if (tb[1] == NULL)
1562 return NULL;
1563 if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0)
1564 return NULL;
1565 kind = tb2[TCA_ACT_KIND];
1567 return kind;
1570 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1572 struct net *net = sock_net(skb->sk);
1573 struct nlmsghdr *nlh;
1574 unsigned char *b = skb_tail_pointer(skb);
1575 struct nlattr *nest;
1576 struct tc_action_ops *a_o;
1577 int ret = 0;
1578 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
1579 struct nlattr *tb[TCA_ROOT_MAX + 1];
1580 struct nlattr *count_attr = NULL;
1581 unsigned long jiffy_since = 0;
1582 struct nlattr *kind = NULL;
1583 struct nla_bitfield32 bf;
1584 u32 msecs_since = 0;
1585 u32 act_count = 0;
1587 ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb,
1588 TCA_ROOT_MAX, tcaa_policy, cb->extack);
1589 if (ret < 0)
1590 return ret;
1592 kind = find_dump_kind(tb);
1593 if (kind == NULL) {
1594 pr_info("tc_dump_action: action bad kind\n");
1595 return 0;
1598 a_o = tc_lookup_action(kind);
1599 if (a_o == NULL)
1600 return 0;
1602 cb->args[2] = 0;
1603 if (tb[TCA_ROOT_FLAGS]) {
1604 bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
1605 cb->args[2] = bf.value;
1608 if (tb[TCA_ROOT_TIME_DELTA]) {
1609 msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
1612 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1613 cb->nlh->nlmsg_type, sizeof(*t), 0);
1614 if (!nlh)
1615 goto out_module_put;
1617 if (msecs_since)
1618 jiffy_since = jiffies - msecs_to_jiffies(msecs_since);
1620 t = nlmsg_data(nlh);
1621 t->tca_family = AF_UNSPEC;
1622 t->tca__pad1 = 0;
1623 t->tca__pad2 = 0;
1624 cb->args[3] = jiffy_since;
1625 count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
1626 if (!count_attr)
1627 goto out_module_put;
1629 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1630 if (nest == NULL)
1631 goto out_module_put;
1633 ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL);
1634 if (ret < 0)
1635 goto out_module_put;
1637 if (ret > 0) {
1638 nla_nest_end(skb, nest);
1639 ret = skb->len;
1640 act_count = cb->args[1];
1641 memcpy(nla_data(count_attr), &act_count, sizeof(u32));
1642 cb->args[1] = 0;
1643 } else
1644 nlmsg_trim(skb, b);
1646 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1647 if (NETLINK_CB(cb->skb).portid && ret)
1648 nlh->nlmsg_flags |= NLM_F_MULTI;
1649 module_put(a_o->owner);
1650 return skb->len;
1652 out_module_put:
1653 module_put(a_o->owner);
1654 nlmsg_trim(skb, b);
1655 return skb->len;
1658 static int __init tc_action_init(void)
1660 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
1661 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
1662 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
1665 return 0;
1668 subsys_initcall(tc_action_init);