xtensa: support DMA buffers in high memory
[cris-mirror.git] / net / sched / act_api.c
blobeba6682727dd9c238da4b48d915a768933be12de
1 /*
2 * net/sched/act_api.c Packet action API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Author: Jamal Hadi Salim
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/kmod.h>
22 #include <linux/err.h>
23 #include <linux/module.h>
24 #include <linux/rhashtable.h>
25 #include <linux/list.h>
26 #include <net/net_namespace.h>
27 #include <net/sock.h>
28 #include <net/sch_generic.h>
29 #include <net/pkt_cls.h>
30 #include <net/act_api.h>
31 #include <net/netlink.h>
33 static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp)
35 u32 chain_index = a->tcfa_action & TC_ACT_EXT_VAL_MASK;
37 if (!tp)
38 return -EINVAL;
39 a->goto_chain = tcf_chain_get(tp->chain->block, chain_index, true);
40 if (!a->goto_chain)
41 return -ENOMEM;
42 return 0;
45 static void tcf_action_goto_chain_fini(struct tc_action *a)
47 tcf_chain_put(a->goto_chain);
50 static void tcf_action_goto_chain_exec(const struct tc_action *a,
51 struct tcf_result *res)
53 const struct tcf_chain *chain = a->goto_chain;
55 res->goto_tp = rcu_dereference_bh(chain->filter_chain);
58 /* XXX: For standalone actions, we don't need a RCU grace period either, because
59 * actions are always connected to filters and filters are already destroyed in
60 * RCU callbacks, so after a RCU grace period actions are already disconnected
61 * from filters. Readers later can not find us.
63 static void free_tcf(struct tc_action *p)
65 free_percpu(p->cpu_bstats);
66 free_percpu(p->cpu_qstats);
68 if (p->act_cookie) {
69 kfree(p->act_cookie->data);
70 kfree(p->act_cookie);
72 if (p->goto_chain)
73 tcf_action_goto_chain_fini(p);
75 kfree(p);
78 static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p)
80 spin_lock_bh(&idrinfo->lock);
81 idr_remove(&idrinfo->action_idr, p->tcfa_index);
82 spin_unlock_bh(&idrinfo->lock);
83 gen_kill_estimator(&p->tcfa_rate_est);
84 free_tcf(p);
87 int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
89 int ret = 0;
91 ASSERT_RTNL();
93 if (p) {
94 if (bind)
95 p->tcfa_bindcnt--;
96 else if (strict && p->tcfa_bindcnt > 0)
97 return -EPERM;
99 p->tcfa_refcnt--;
100 if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) {
101 if (p->ops->cleanup)
102 p->ops->cleanup(p);
103 tcf_idr_remove(p->idrinfo, p);
104 ret = ACT_P_DELETED;
108 return ret;
110 EXPORT_SYMBOL(__tcf_idr_release);
112 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
113 struct netlink_callback *cb)
115 int err = 0, index = -1, s_i = 0, n_i = 0;
116 u32 act_flags = cb->args[2];
117 unsigned long jiffy_since = cb->args[3];
118 struct nlattr *nest;
119 struct idr *idr = &idrinfo->action_idr;
120 struct tc_action *p;
121 unsigned long id = 1;
123 spin_lock_bh(&idrinfo->lock);
125 s_i = cb->args[0];
127 idr_for_each_entry_ul(idr, p, id) {
128 index++;
129 if (index < s_i)
130 continue;
132 if (jiffy_since &&
133 time_after(jiffy_since,
134 (unsigned long)p->tcfa_tm.lastuse))
135 continue;
137 nest = nla_nest_start(skb, n_i);
138 if (!nest)
139 goto nla_put_failure;
140 err = tcf_action_dump_1(skb, p, 0, 0);
141 if (err < 0) {
142 index--;
143 nlmsg_trim(skb, nest);
144 goto done;
146 nla_nest_end(skb, nest);
147 n_i++;
148 if (!(act_flags & TCA_FLAG_LARGE_DUMP_ON) &&
149 n_i >= TCA_ACT_MAX_PRIO)
150 goto done;
152 done:
153 if (index >= 0)
154 cb->args[0] = index + 1;
156 spin_unlock_bh(&idrinfo->lock);
157 if (n_i) {
158 if (act_flags & TCA_FLAG_LARGE_DUMP_ON)
159 cb->args[1] = n_i;
161 return n_i;
163 nla_put_failure:
164 nla_nest_cancel(skb, nest);
165 goto done;
168 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
169 const struct tc_action_ops *ops)
171 struct nlattr *nest;
172 int n_i = 0;
173 int ret = -EINVAL;
174 struct idr *idr = &idrinfo->action_idr;
175 struct tc_action *p;
176 unsigned long id = 1;
178 nest = nla_nest_start(skb, 0);
179 if (nest == NULL)
180 goto nla_put_failure;
181 if (nla_put_string(skb, TCA_KIND, ops->kind))
182 goto nla_put_failure;
184 idr_for_each_entry_ul(idr, p, id) {
185 ret = __tcf_idr_release(p, false, true);
186 if (ret == ACT_P_DELETED) {
187 module_put(ops->owner);
188 n_i++;
189 } else if (ret < 0) {
190 goto nla_put_failure;
193 if (nla_put_u32(skb, TCA_FCNT, n_i))
194 goto nla_put_failure;
195 nla_nest_end(skb, nest);
197 return n_i;
198 nla_put_failure:
199 nla_nest_cancel(skb, nest);
200 return ret;
203 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
204 struct netlink_callback *cb, int type,
205 const struct tc_action_ops *ops)
207 struct tcf_idrinfo *idrinfo = tn->idrinfo;
209 if (type == RTM_DELACTION) {
210 return tcf_del_walker(idrinfo, skb, ops);
211 } else if (type == RTM_GETACTION) {
212 return tcf_dump_walker(idrinfo, skb, cb);
213 } else {
214 WARN(1, "tcf_generic_walker: unknown action %d\n", type);
215 return -EINVAL;
218 EXPORT_SYMBOL(tcf_generic_walker);
220 static struct tc_action *tcf_idr_lookup(u32 index, struct tcf_idrinfo *idrinfo)
222 struct tc_action *p = NULL;
224 spin_lock_bh(&idrinfo->lock);
225 p = idr_find(&idrinfo->action_idr, index);
226 spin_unlock_bh(&idrinfo->lock);
228 return p;
231 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
233 struct tcf_idrinfo *idrinfo = tn->idrinfo;
234 struct tc_action *p = tcf_idr_lookup(index, idrinfo);
236 if (p) {
237 *a = p;
238 return 1;
240 return 0;
242 EXPORT_SYMBOL(tcf_idr_search);
244 bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
245 int bind)
247 struct tcf_idrinfo *idrinfo = tn->idrinfo;
248 struct tc_action *p = tcf_idr_lookup(index, idrinfo);
250 if (index && p) {
251 if (bind)
252 p->tcfa_bindcnt++;
253 p->tcfa_refcnt++;
254 *a = p;
255 return true;
257 return false;
259 EXPORT_SYMBOL(tcf_idr_check);
261 void tcf_idr_cleanup(struct tc_action *a, struct nlattr *est)
263 if (est)
264 gen_kill_estimator(&a->tcfa_rate_est);
265 free_tcf(a);
267 EXPORT_SYMBOL(tcf_idr_cleanup);
269 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
270 struct tc_action **a, const struct tc_action_ops *ops,
271 int bind, bool cpustats)
273 struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
274 struct tcf_idrinfo *idrinfo = tn->idrinfo;
275 struct idr *idr = &idrinfo->action_idr;
276 int err = -ENOMEM;
278 if (unlikely(!p))
279 return -ENOMEM;
280 p->tcfa_refcnt = 1;
281 if (bind)
282 p->tcfa_bindcnt = 1;
284 if (cpustats) {
285 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
286 if (!p->cpu_bstats)
287 goto err1;
288 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
289 if (!p->cpu_qstats)
290 goto err2;
292 spin_lock_init(&p->tcfa_lock);
293 idr_preload(GFP_KERNEL);
294 spin_lock_bh(&idrinfo->lock);
295 /* user doesn't specify an index */
296 if (!index) {
297 index = 1;
298 err = idr_alloc_u32(idr, NULL, &index, UINT_MAX, GFP_ATOMIC);
299 } else {
300 err = idr_alloc_u32(idr, NULL, &index, index, GFP_ATOMIC);
302 spin_unlock_bh(&idrinfo->lock);
303 idr_preload_end();
304 if (err)
305 goto err3;
307 p->tcfa_index = index;
308 p->tcfa_tm.install = jiffies;
309 p->tcfa_tm.lastuse = jiffies;
310 p->tcfa_tm.firstuse = 0;
311 if (est) {
312 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
313 &p->tcfa_rate_est,
314 &p->tcfa_lock, NULL, est);
315 if (err)
316 goto err4;
319 p->idrinfo = idrinfo;
320 p->ops = ops;
321 INIT_LIST_HEAD(&p->list);
322 *a = p;
323 return 0;
324 err4:
325 idr_remove(idr, index);
326 err3:
327 free_percpu(p->cpu_qstats);
328 err2:
329 free_percpu(p->cpu_bstats);
330 err1:
331 kfree(p);
332 return err;
334 EXPORT_SYMBOL(tcf_idr_create);
336 void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a)
338 struct tcf_idrinfo *idrinfo = tn->idrinfo;
340 spin_lock_bh(&idrinfo->lock);
341 idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
342 spin_unlock_bh(&idrinfo->lock);
344 EXPORT_SYMBOL(tcf_idr_insert);
346 void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
347 struct tcf_idrinfo *idrinfo)
349 struct idr *idr = &idrinfo->action_idr;
350 struct tc_action *p;
351 int ret;
352 unsigned long id = 1;
354 idr_for_each_entry_ul(idr, p, id) {
355 ret = __tcf_idr_release(p, false, true);
356 if (ret == ACT_P_DELETED)
357 module_put(ops->owner);
358 else if (ret < 0)
359 return;
361 idr_destroy(&idrinfo->action_idr);
363 EXPORT_SYMBOL(tcf_idrinfo_destroy);
365 static LIST_HEAD(act_base);
366 static DEFINE_RWLOCK(act_mod_lock);
368 int tcf_register_action(struct tc_action_ops *act,
369 struct pernet_operations *ops)
371 struct tc_action_ops *a;
372 int ret;
374 if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
375 return -EINVAL;
377 /* We have to register pernet ops before making the action ops visible,
378 * otherwise tcf_action_init_1() could get a partially initialized
379 * netns.
381 ret = register_pernet_subsys(ops);
382 if (ret)
383 return ret;
385 write_lock(&act_mod_lock);
386 list_for_each_entry(a, &act_base, head) {
387 if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
388 write_unlock(&act_mod_lock);
389 unregister_pernet_subsys(ops);
390 return -EEXIST;
393 list_add_tail(&act->head, &act_base);
394 write_unlock(&act_mod_lock);
396 return 0;
398 EXPORT_SYMBOL(tcf_register_action);
400 int tcf_unregister_action(struct tc_action_ops *act,
401 struct pernet_operations *ops)
403 struct tc_action_ops *a;
404 int err = -ENOENT;
406 write_lock(&act_mod_lock);
407 list_for_each_entry(a, &act_base, head) {
408 if (a == act) {
409 list_del(&act->head);
410 err = 0;
411 break;
414 write_unlock(&act_mod_lock);
415 if (!err)
416 unregister_pernet_subsys(ops);
417 return err;
419 EXPORT_SYMBOL(tcf_unregister_action);
421 /* lookup by name */
422 static struct tc_action_ops *tc_lookup_action_n(char *kind)
424 struct tc_action_ops *a, *res = NULL;
426 if (kind) {
427 read_lock(&act_mod_lock);
428 list_for_each_entry(a, &act_base, head) {
429 if (strcmp(kind, a->kind) == 0) {
430 if (try_module_get(a->owner))
431 res = a;
432 break;
435 read_unlock(&act_mod_lock);
437 return res;
440 /* lookup by nlattr */
441 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
443 struct tc_action_ops *a, *res = NULL;
445 if (kind) {
446 read_lock(&act_mod_lock);
447 list_for_each_entry(a, &act_base, head) {
448 if (nla_strcmp(kind, a->kind) == 0) {
449 if (try_module_get(a->owner))
450 res = a;
451 break;
454 read_unlock(&act_mod_lock);
456 return res;
459 /*TCA_ACT_MAX_PRIO is 32, there count upto 32 */
460 #define TCA_ACT_MAX_PRIO_MASK 0x1FF
461 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
462 int nr_actions, struct tcf_result *res)
464 u32 jmp_prgcnt = 0;
465 u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
466 int i;
467 int ret = TC_ACT_OK;
469 if (skb_skip_tc_classify(skb))
470 return TC_ACT_OK;
472 restart_act_graph:
473 for (i = 0; i < nr_actions; i++) {
474 const struct tc_action *a = actions[i];
476 if (jmp_prgcnt > 0) {
477 jmp_prgcnt -= 1;
478 continue;
480 repeat:
481 ret = a->ops->act(skb, a, res);
482 if (ret == TC_ACT_REPEAT)
483 goto repeat; /* we need a ttl - JHS */
485 if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
486 jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
487 if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
488 /* faulty opcode, stop pipeline */
489 return TC_ACT_OK;
490 } else {
491 jmp_ttl -= 1;
492 if (jmp_ttl > 0)
493 goto restart_act_graph;
494 else /* faulty graph, stop pipeline */
495 return TC_ACT_OK;
497 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
498 tcf_action_goto_chain_exec(a, res);
501 if (ret != TC_ACT_PIPE)
502 break;
505 return ret;
507 EXPORT_SYMBOL(tcf_action_exec);
509 int tcf_action_destroy(struct list_head *actions, int bind)
511 const struct tc_action_ops *ops;
512 struct tc_action *a, *tmp;
513 int ret = 0;
515 list_for_each_entry_safe(a, tmp, actions, list) {
516 ops = a->ops;
517 ret = __tcf_idr_release(a, bind, true);
518 if (ret == ACT_P_DELETED)
519 module_put(ops->owner);
520 else if (ret < 0)
521 return ret;
523 return ret;
527 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
529 return a->ops->dump(skb, a, bind, ref);
533 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
535 int err = -EINVAL;
536 unsigned char *b = skb_tail_pointer(skb);
537 struct nlattr *nest;
539 if (nla_put_string(skb, TCA_KIND, a->ops->kind))
540 goto nla_put_failure;
541 if (tcf_action_copy_stats(skb, a, 0))
542 goto nla_put_failure;
543 if (a->act_cookie) {
544 if (nla_put(skb, TCA_ACT_COOKIE, a->act_cookie->len,
545 a->act_cookie->data))
546 goto nla_put_failure;
549 nest = nla_nest_start(skb, TCA_OPTIONS);
550 if (nest == NULL)
551 goto nla_put_failure;
552 err = tcf_action_dump_old(skb, a, bind, ref);
553 if (err > 0) {
554 nla_nest_end(skb, nest);
555 return err;
558 nla_put_failure:
559 nlmsg_trim(skb, b);
560 return -1;
562 EXPORT_SYMBOL(tcf_action_dump_1);
564 int tcf_action_dump(struct sk_buff *skb, struct list_head *actions,
565 int bind, int ref)
567 struct tc_action *a;
568 int err = -EINVAL;
569 struct nlattr *nest;
571 list_for_each_entry(a, actions, list) {
572 nest = nla_nest_start(skb, a->order);
573 if (nest == NULL)
574 goto nla_put_failure;
575 err = tcf_action_dump_1(skb, a, bind, ref);
576 if (err < 0)
577 goto errout;
578 nla_nest_end(skb, nest);
581 return 0;
583 nla_put_failure:
584 err = -EINVAL;
585 errout:
586 nla_nest_cancel(skb, nest);
587 return err;
590 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
592 struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
593 if (!c)
594 return NULL;
596 c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
597 if (!c->data) {
598 kfree(c);
599 return NULL;
601 c->len = nla_len(tb[TCA_ACT_COOKIE]);
603 return c;
606 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
607 struct nlattr *nla, struct nlattr *est,
608 char *name, int ovr, int bind)
610 struct tc_action *a;
611 struct tc_action_ops *a_o;
612 struct tc_cookie *cookie = NULL;
613 char act_name[IFNAMSIZ];
614 struct nlattr *tb[TCA_ACT_MAX + 1];
615 struct nlattr *kind;
616 int err;
618 if (name == NULL) {
619 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, NULL);
620 if (err < 0)
621 goto err_out;
622 err = -EINVAL;
623 kind = tb[TCA_ACT_KIND];
624 if (kind == NULL)
625 goto err_out;
626 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ)
627 goto err_out;
628 if (tb[TCA_ACT_COOKIE]) {
629 int cklen = nla_len(tb[TCA_ACT_COOKIE]);
631 if (cklen > TC_COOKIE_MAX_SIZE)
632 goto err_out;
634 cookie = nla_memdup_cookie(tb);
635 if (!cookie) {
636 err = -ENOMEM;
637 goto err_out;
640 } else {
641 err = -EINVAL;
642 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ)
643 goto err_out;
646 a_o = tc_lookup_action_n(act_name);
647 if (a_o == NULL) {
648 #ifdef CONFIG_MODULES
649 rtnl_unlock();
650 request_module("act_%s", act_name);
651 rtnl_lock();
653 a_o = tc_lookup_action_n(act_name);
655 /* We dropped the RTNL semaphore in order to
656 * perform the module load. So, even if we
657 * succeeded in loading the module we have to
658 * tell the caller to replay the request. We
659 * indicate this using -EAGAIN.
661 if (a_o != NULL) {
662 err = -EAGAIN;
663 goto err_mod;
665 #endif
666 err = -ENOENT;
667 goto err_out;
670 /* backward compatibility for policer */
671 if (name == NULL)
672 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind);
673 else
674 err = a_o->init(net, nla, est, &a, ovr, bind);
675 if (err < 0)
676 goto err_mod;
678 if (name == NULL && tb[TCA_ACT_COOKIE]) {
679 if (a->act_cookie) {
680 kfree(a->act_cookie->data);
681 kfree(a->act_cookie);
683 a->act_cookie = cookie;
686 /* module count goes up only when brand new policy is created
687 * if it exists and is only bound to in a_o->init() then
688 * ACT_P_CREATED is not returned (a zero is).
690 if (err != ACT_P_CREATED)
691 module_put(a_o->owner);
693 if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) {
694 err = tcf_action_goto_chain_init(a, tp);
695 if (err) {
696 LIST_HEAD(actions);
698 list_add_tail(&a->list, &actions);
699 tcf_action_destroy(&actions, bind);
700 return ERR_PTR(err);
704 return a;
706 err_mod:
707 module_put(a_o->owner);
708 err_out:
709 if (cookie) {
710 kfree(cookie->data);
711 kfree(cookie);
713 return ERR_PTR(err);
716 static void cleanup_a(struct list_head *actions, int ovr)
718 struct tc_action *a;
720 if (!ovr)
721 return;
723 list_for_each_entry(a, actions, list)
724 a->tcfa_refcnt--;
727 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
728 struct nlattr *est, char *name, int ovr, int bind,
729 struct list_head *actions)
731 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
732 struct tc_action *act;
733 int err;
734 int i;
736 err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, NULL);
737 if (err < 0)
738 return err;
740 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
741 act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind);
742 if (IS_ERR(act)) {
743 err = PTR_ERR(act);
744 goto err;
746 act->order = i;
747 if (ovr)
748 act->tcfa_refcnt++;
749 list_add_tail(&act->list, actions);
752 /* Remove the temp refcnt which was necessary to protect against
753 * destroying an existing action which was being replaced
755 cleanup_a(actions, ovr);
756 return 0;
758 err:
759 tcf_action_destroy(actions, bind);
760 return err;
763 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
764 int compat_mode)
766 int err = 0;
767 struct gnet_dump d;
769 if (p == NULL)
770 goto errout;
772 /* compat_mode being true specifies a call that is supposed
773 * to add additional backward compatibility statistic TLVs.
775 if (compat_mode) {
776 if (p->type == TCA_OLD_COMPAT)
777 err = gnet_stats_start_copy_compat(skb, 0,
778 TCA_STATS,
779 TCA_XSTATS,
780 &p->tcfa_lock, &d,
781 TCA_PAD);
782 else
783 return 0;
784 } else
785 err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
786 &p->tcfa_lock, &d, TCA_ACT_PAD);
788 if (err < 0)
789 goto errout;
791 if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 ||
792 gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
793 gnet_stats_copy_queue(&d, p->cpu_qstats,
794 &p->tcfa_qstats,
795 p->tcfa_qstats.qlen) < 0)
796 goto errout;
798 if (gnet_stats_finish_copy(&d) < 0)
799 goto errout;
801 return 0;
803 errout:
804 return -1;
807 static int tca_get_fill(struct sk_buff *skb, struct list_head *actions,
808 u32 portid, u32 seq, u16 flags, int event, int bind,
809 int ref)
811 struct tcamsg *t;
812 struct nlmsghdr *nlh;
813 unsigned char *b = skb_tail_pointer(skb);
814 struct nlattr *nest;
816 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
817 if (!nlh)
818 goto out_nlmsg_trim;
819 t = nlmsg_data(nlh);
820 t->tca_family = AF_UNSPEC;
821 t->tca__pad1 = 0;
822 t->tca__pad2 = 0;
824 nest = nla_nest_start(skb, TCA_ACT_TAB);
825 if (nest == NULL)
826 goto out_nlmsg_trim;
828 if (tcf_action_dump(skb, actions, bind, ref) < 0)
829 goto out_nlmsg_trim;
831 nla_nest_end(skb, nest);
833 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
834 return skb->len;
836 out_nlmsg_trim:
837 nlmsg_trim(skb, b);
838 return -1;
841 static int
842 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
843 struct list_head *actions, int event)
845 struct sk_buff *skb;
847 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
848 if (!skb)
849 return -ENOBUFS;
850 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
851 0, 0) <= 0) {
852 kfree_skb(skb);
853 return -EINVAL;
856 return rtnl_unicast(skb, net, portid);
859 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
860 struct nlmsghdr *n, u32 portid)
862 struct nlattr *tb[TCA_ACT_MAX + 1];
863 const struct tc_action_ops *ops;
864 struct tc_action *a;
865 int index;
866 int err;
868 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, NULL);
869 if (err < 0)
870 goto err_out;
872 err = -EINVAL;
873 if (tb[TCA_ACT_INDEX] == NULL ||
874 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index))
875 goto err_out;
876 index = nla_get_u32(tb[TCA_ACT_INDEX]);
878 err = -EINVAL;
879 ops = tc_lookup_action(tb[TCA_ACT_KIND]);
880 if (!ops) /* could happen in batch of actions */
881 goto err_out;
882 err = -ENOENT;
883 if (ops->lookup(net, &a, index) == 0)
884 goto err_mod;
886 module_put(ops->owner);
887 return a;
889 err_mod:
890 module_put(ops->owner);
891 err_out:
892 return ERR_PTR(err);
895 static int tca_action_flush(struct net *net, struct nlattr *nla,
896 struct nlmsghdr *n, u32 portid)
898 struct sk_buff *skb;
899 unsigned char *b;
900 struct nlmsghdr *nlh;
901 struct tcamsg *t;
902 struct netlink_callback dcb;
903 struct nlattr *nest;
904 struct nlattr *tb[TCA_ACT_MAX + 1];
905 const struct tc_action_ops *ops;
906 struct nlattr *kind;
907 int err = -ENOMEM;
909 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
910 if (!skb) {
911 pr_debug("tca_action_flush: failed skb alloc\n");
912 return err;
915 b = skb_tail_pointer(skb);
917 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, NULL);
918 if (err < 0)
919 goto err_out;
921 err = -EINVAL;
922 kind = tb[TCA_ACT_KIND];
923 ops = tc_lookup_action(kind);
924 if (!ops) /*some idjot trying to flush unknown action */
925 goto err_out;
927 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
928 sizeof(*t), 0);
929 if (!nlh)
930 goto out_module_put;
931 t = nlmsg_data(nlh);
932 t->tca_family = AF_UNSPEC;
933 t->tca__pad1 = 0;
934 t->tca__pad2 = 0;
936 nest = nla_nest_start(skb, TCA_ACT_TAB);
937 if (nest == NULL)
938 goto out_module_put;
940 err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops);
941 if (err <= 0)
942 goto out_module_put;
944 nla_nest_end(skb, nest);
946 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
947 nlh->nlmsg_flags |= NLM_F_ROOT;
948 module_put(ops->owner);
949 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
950 n->nlmsg_flags & NLM_F_ECHO);
951 if (err > 0)
952 return 0;
954 return err;
956 out_module_put:
957 module_put(ops->owner);
958 err_out:
959 kfree_skb(skb);
960 return err;
963 static int
964 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
965 u32 portid)
967 int ret;
968 struct sk_buff *skb;
970 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
971 if (!skb)
972 return -ENOBUFS;
974 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
975 0, 1) <= 0) {
976 kfree_skb(skb);
977 return -EINVAL;
980 /* now do the delete */
981 ret = tcf_action_destroy(actions, 0);
982 if (ret < 0) {
983 kfree_skb(skb);
984 return ret;
987 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
988 n->nlmsg_flags & NLM_F_ECHO);
989 if (ret > 0)
990 return 0;
991 return ret;
994 static int
995 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
996 u32 portid, int event)
998 int i, ret;
999 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1000 struct tc_action *act;
1001 LIST_HEAD(actions);
1003 ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, NULL);
1004 if (ret < 0)
1005 return ret;
1007 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
1008 if (tb[1] != NULL)
1009 return tca_action_flush(net, tb[1], n, portid);
1010 else
1011 return -EINVAL;
1014 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1015 act = tcf_action_get_1(net, tb[i], n, portid);
1016 if (IS_ERR(act)) {
1017 ret = PTR_ERR(act);
1018 goto err;
1020 act->order = i;
1021 list_add_tail(&act->list, &actions);
1024 if (event == RTM_GETACTION)
1025 ret = tcf_get_notify(net, portid, n, &actions, event);
1026 else { /* delete */
1027 ret = tcf_del_notify(net, n, &actions, portid);
1028 if (ret)
1029 goto err;
1030 return ret;
1032 err:
1033 if (event != RTM_GETACTION)
1034 tcf_action_destroy(&actions, 0);
1035 return ret;
1038 static int
1039 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
1040 u32 portid)
1042 struct sk_buff *skb;
1043 int err = 0;
1045 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1046 if (!skb)
1047 return -ENOBUFS;
1049 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
1050 RTM_NEWACTION, 0, 0) <= 0) {
1051 kfree_skb(skb);
1052 return -EINVAL;
1055 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1056 n->nlmsg_flags & NLM_F_ECHO);
1057 if (err > 0)
1058 err = 0;
1059 return err;
1062 static int tcf_action_add(struct net *net, struct nlattr *nla,
1063 struct nlmsghdr *n, u32 portid, int ovr)
1065 int ret = 0;
1066 LIST_HEAD(actions);
1068 ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, &actions);
1069 if (ret)
1070 return ret;
1072 return tcf_add_notify(net, n, &actions, portid);
1075 static u32 tcaa_root_flags_allowed = TCA_FLAG_LARGE_DUMP_ON;
1076 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
1077 [TCA_ROOT_FLAGS] = { .type = NLA_BITFIELD32,
1078 .validation_data = &tcaa_root_flags_allowed },
1079 [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 },
1082 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
1083 struct netlink_ext_ack *extack)
1085 struct net *net = sock_net(skb->sk);
1086 struct nlattr *tca[TCA_ROOT_MAX + 1];
1087 u32 portid = skb ? NETLINK_CB(skb).portid : 0;
1088 int ret = 0, ovr = 0;
1090 if ((n->nlmsg_type != RTM_GETACTION) &&
1091 !netlink_capable(skb, CAP_NET_ADMIN))
1092 return -EPERM;
1094 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ROOT_MAX, NULL,
1095 extack);
1096 if (ret < 0)
1097 return ret;
1099 if (tca[TCA_ACT_TAB] == NULL) {
1100 pr_notice("tc_ctl_action: received NO action attribs\n");
1101 return -EINVAL;
1104 /* n->nlmsg_flags & NLM_F_CREATE */
1105 switch (n->nlmsg_type) {
1106 case RTM_NEWACTION:
1107 /* we are going to assume all other flags
1108 * imply create only if it doesn't exist
1109 * Note that CREATE | EXCL implies that
1110 * but since we want avoid ambiguity (eg when flags
1111 * is zero) then just set this
1113 if (n->nlmsg_flags & NLM_F_REPLACE)
1114 ovr = 1;
1115 replay:
1116 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr);
1117 if (ret == -EAGAIN)
1118 goto replay;
1119 break;
1120 case RTM_DELACTION:
1121 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1122 portid, RTM_DELACTION);
1123 break;
1124 case RTM_GETACTION:
1125 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1126 portid, RTM_GETACTION);
1127 break;
1128 default:
1129 BUG();
1132 return ret;
1135 static struct nlattr *find_dump_kind(struct nlattr **nla)
1137 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
1138 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1139 struct nlattr *kind;
1141 tb1 = nla[TCA_ACT_TAB];
1142 if (tb1 == NULL)
1143 return NULL;
1145 if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1),
1146 NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
1147 return NULL;
1149 if (tb[1] == NULL)
1150 return NULL;
1151 if (nla_parse_nested(tb2, TCA_ACT_MAX, tb[1], NULL, NULL) < 0)
1152 return NULL;
1153 kind = tb2[TCA_ACT_KIND];
1155 return kind;
1158 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1160 struct net *net = sock_net(skb->sk);
1161 struct nlmsghdr *nlh;
1162 unsigned char *b = skb_tail_pointer(skb);
1163 struct nlattr *nest;
1164 struct tc_action_ops *a_o;
1165 int ret = 0;
1166 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
1167 struct nlattr *tb[TCA_ROOT_MAX + 1];
1168 struct nlattr *count_attr = NULL;
1169 unsigned long jiffy_since = 0;
1170 struct nlattr *kind = NULL;
1171 struct nla_bitfield32 bf;
1172 u32 msecs_since = 0;
1173 u32 act_count = 0;
1175 ret = nlmsg_parse(cb->nlh, sizeof(struct tcamsg), tb, TCA_ROOT_MAX,
1176 tcaa_policy, NULL);
1177 if (ret < 0)
1178 return ret;
1180 kind = find_dump_kind(tb);
1181 if (kind == NULL) {
1182 pr_info("tc_dump_action: action bad kind\n");
1183 return 0;
1186 a_o = tc_lookup_action(kind);
1187 if (a_o == NULL)
1188 return 0;
1190 cb->args[2] = 0;
1191 if (tb[TCA_ROOT_FLAGS]) {
1192 bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
1193 cb->args[2] = bf.value;
1196 if (tb[TCA_ROOT_TIME_DELTA]) {
1197 msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
1200 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1201 cb->nlh->nlmsg_type, sizeof(*t), 0);
1202 if (!nlh)
1203 goto out_module_put;
1205 if (msecs_since)
1206 jiffy_since = jiffies - msecs_to_jiffies(msecs_since);
1208 t = nlmsg_data(nlh);
1209 t->tca_family = AF_UNSPEC;
1210 t->tca__pad1 = 0;
1211 t->tca__pad2 = 0;
1212 cb->args[3] = jiffy_since;
1213 count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
1214 if (!count_attr)
1215 goto out_module_put;
1217 nest = nla_nest_start(skb, TCA_ACT_TAB);
1218 if (nest == NULL)
1219 goto out_module_put;
1221 ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o);
1222 if (ret < 0)
1223 goto out_module_put;
1225 if (ret > 0) {
1226 nla_nest_end(skb, nest);
1227 ret = skb->len;
1228 act_count = cb->args[1];
1229 memcpy(nla_data(count_attr), &act_count, sizeof(u32));
1230 cb->args[1] = 0;
1231 } else
1232 nlmsg_trim(skb, b);
1234 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1235 if (NETLINK_CB(cb->skb).portid && ret)
1236 nlh->nlmsg_flags |= NLM_F_MULTI;
1237 module_put(a_o->owner);
1238 return skb->len;
1240 out_module_put:
1241 module_put(a_o->owner);
1242 nlmsg_trim(skb, b);
1243 return skb->len;
1246 struct tcf_action_net {
1247 struct rhashtable egdev_ht;
1250 static unsigned int tcf_action_net_id;
1252 struct tcf_action_egdev_cb {
1253 struct list_head list;
1254 tc_setup_cb_t *cb;
1255 void *cb_priv;
1258 struct tcf_action_egdev {
1259 struct rhash_head ht_node;
1260 const struct net_device *dev;
1261 unsigned int refcnt;
1262 struct list_head cb_list;
1265 static const struct rhashtable_params tcf_action_egdev_ht_params = {
1266 .key_offset = offsetof(struct tcf_action_egdev, dev),
1267 .head_offset = offsetof(struct tcf_action_egdev, ht_node),
1268 .key_len = sizeof(const struct net_device *),
1271 static struct tcf_action_egdev *
1272 tcf_action_egdev_lookup(const struct net_device *dev)
1274 struct net *net = dev_net(dev);
1275 struct tcf_action_net *tan = net_generic(net, tcf_action_net_id);
1277 return rhashtable_lookup_fast(&tan->egdev_ht, &dev,
1278 tcf_action_egdev_ht_params);
1281 static struct tcf_action_egdev *
1282 tcf_action_egdev_get(const struct net_device *dev)
1284 struct tcf_action_egdev *egdev;
1285 struct tcf_action_net *tan;
1287 egdev = tcf_action_egdev_lookup(dev);
1288 if (egdev)
1289 goto inc_ref;
1291 egdev = kzalloc(sizeof(*egdev), GFP_KERNEL);
1292 if (!egdev)
1293 return NULL;
1294 INIT_LIST_HEAD(&egdev->cb_list);
1295 egdev->dev = dev;
1296 tan = net_generic(dev_net(dev), tcf_action_net_id);
1297 rhashtable_insert_fast(&tan->egdev_ht, &egdev->ht_node,
1298 tcf_action_egdev_ht_params);
1300 inc_ref:
1301 egdev->refcnt++;
1302 return egdev;
1305 static void tcf_action_egdev_put(struct tcf_action_egdev *egdev)
1307 struct tcf_action_net *tan;
1309 if (--egdev->refcnt)
1310 return;
1311 tan = net_generic(dev_net(egdev->dev), tcf_action_net_id);
1312 rhashtable_remove_fast(&tan->egdev_ht, &egdev->ht_node,
1313 tcf_action_egdev_ht_params);
1314 kfree(egdev);
1317 static struct tcf_action_egdev_cb *
1318 tcf_action_egdev_cb_lookup(struct tcf_action_egdev *egdev,
1319 tc_setup_cb_t *cb, void *cb_priv)
1321 struct tcf_action_egdev_cb *egdev_cb;
1323 list_for_each_entry(egdev_cb, &egdev->cb_list, list)
1324 if (egdev_cb->cb == cb && egdev_cb->cb_priv == cb_priv)
1325 return egdev_cb;
1326 return NULL;
1329 static int tcf_action_egdev_cb_call(struct tcf_action_egdev *egdev,
1330 enum tc_setup_type type,
1331 void *type_data, bool err_stop)
1333 struct tcf_action_egdev_cb *egdev_cb;
1334 int ok_count = 0;
1335 int err;
1337 list_for_each_entry(egdev_cb, &egdev->cb_list, list) {
1338 err = egdev_cb->cb(type, type_data, egdev_cb->cb_priv);
1339 if (err) {
1340 if (err_stop)
1341 return err;
1342 } else {
1343 ok_count++;
1346 return ok_count;
1349 static int tcf_action_egdev_cb_add(struct tcf_action_egdev *egdev,
1350 tc_setup_cb_t *cb, void *cb_priv)
1352 struct tcf_action_egdev_cb *egdev_cb;
1354 egdev_cb = tcf_action_egdev_cb_lookup(egdev, cb, cb_priv);
1355 if (WARN_ON(egdev_cb))
1356 return -EEXIST;
1357 egdev_cb = kzalloc(sizeof(*egdev_cb), GFP_KERNEL);
1358 if (!egdev_cb)
1359 return -ENOMEM;
1360 egdev_cb->cb = cb;
1361 egdev_cb->cb_priv = cb_priv;
1362 list_add(&egdev_cb->list, &egdev->cb_list);
1363 return 0;
1366 static void tcf_action_egdev_cb_del(struct tcf_action_egdev *egdev,
1367 tc_setup_cb_t *cb, void *cb_priv)
1369 struct tcf_action_egdev_cb *egdev_cb;
1371 egdev_cb = tcf_action_egdev_cb_lookup(egdev, cb, cb_priv);
1372 if (WARN_ON(!egdev_cb))
1373 return;
1374 list_del(&egdev_cb->list);
1375 kfree(egdev_cb);
1378 static int __tc_setup_cb_egdev_register(const struct net_device *dev,
1379 tc_setup_cb_t *cb, void *cb_priv)
1381 struct tcf_action_egdev *egdev = tcf_action_egdev_get(dev);
1382 int err;
1384 if (!egdev)
1385 return -ENOMEM;
1386 err = tcf_action_egdev_cb_add(egdev, cb, cb_priv);
1387 if (err)
1388 goto err_cb_add;
1389 return 0;
1391 err_cb_add:
1392 tcf_action_egdev_put(egdev);
1393 return err;
1395 int tc_setup_cb_egdev_register(const struct net_device *dev,
1396 tc_setup_cb_t *cb, void *cb_priv)
1398 int err;
1400 rtnl_lock();
1401 err = __tc_setup_cb_egdev_register(dev, cb, cb_priv);
1402 rtnl_unlock();
1403 return err;
1405 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_register);
1407 static void __tc_setup_cb_egdev_unregister(const struct net_device *dev,
1408 tc_setup_cb_t *cb, void *cb_priv)
1410 struct tcf_action_egdev *egdev = tcf_action_egdev_lookup(dev);
1412 if (WARN_ON(!egdev))
1413 return;
1414 tcf_action_egdev_cb_del(egdev, cb, cb_priv);
1415 tcf_action_egdev_put(egdev);
1417 void tc_setup_cb_egdev_unregister(const struct net_device *dev,
1418 tc_setup_cb_t *cb, void *cb_priv)
1420 rtnl_lock();
1421 __tc_setup_cb_egdev_unregister(dev, cb, cb_priv);
1422 rtnl_unlock();
1424 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_unregister);
1426 int tc_setup_cb_egdev_call(const struct net_device *dev,
1427 enum tc_setup_type type, void *type_data,
1428 bool err_stop)
1430 struct tcf_action_egdev *egdev = tcf_action_egdev_lookup(dev);
1432 if (!egdev)
1433 return 0;
1434 return tcf_action_egdev_cb_call(egdev, type, type_data, err_stop);
1436 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_call);
1438 static __net_init int tcf_action_net_init(struct net *net)
1440 struct tcf_action_net *tan = net_generic(net, tcf_action_net_id);
1442 return rhashtable_init(&tan->egdev_ht, &tcf_action_egdev_ht_params);
1445 static void __net_exit tcf_action_net_exit(struct net *net)
1447 struct tcf_action_net *tan = net_generic(net, tcf_action_net_id);
1449 rhashtable_destroy(&tan->egdev_ht);
1452 static struct pernet_operations tcf_action_net_ops = {
1453 .init = tcf_action_net_init,
1454 .exit = tcf_action_net_exit,
1455 .id = &tcf_action_net_id,
1456 .size = sizeof(struct tcf_action_net),
1459 static int __init tc_action_init(void)
1461 int err;
1463 err = register_pernet_subsys(&tcf_action_net_ops);
1464 if (err)
1465 return err;
1467 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
1468 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
1469 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
1472 return 0;
1475 subsys_initcall(tc_action_init);