Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / net / sched / cls_tcindex.c
blob78bec347b8b66f660e620dd715d0eb68f9bcd2d3
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
5 * Written 1998,1999 by Werner Almesberger, EPFL ICA
6 */
8 #include <linux/module.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/skbuff.h>
12 #include <linux/errno.h>
13 #include <linux/slab.h>
14 #include <linux/refcount.h>
15 #include <net/act_api.h>
16 #include <net/netlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/sch_generic.h>
21 * Passing parameters to the root seems to be done more awkwardly than really
22 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
23 * verified. FIXME.
26 #define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
27 #define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
30 struct tcindex_data;
32 struct tcindex_filter_result {
33 struct tcf_exts exts;
34 struct tcf_result res;
35 struct tcindex_data *p;
36 struct rcu_work rwork;
39 struct tcindex_filter {
40 u16 key;
41 struct tcindex_filter_result result;
42 struct tcindex_filter __rcu *next;
43 struct rcu_work rwork;
47 struct tcindex_data {
48 struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
49 struct tcindex_filter __rcu **h; /* imperfect hash; */
50 struct tcf_proto *tp;
51 u16 mask; /* AND key with mask */
52 u32 shift; /* shift ANDed key to the right */
53 u32 hash; /* hash table size; 0 if undefined */
54 u32 alloc_hash; /* allocated size */
55 u32 fall_through; /* 0: only classify if explicit match */
56 refcount_t refcnt; /* a temporary refcnt for perfect hash */
57 struct rcu_work rwork;
60 static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
62 return tcf_exts_has_actions(&r->exts) || r->res.classid;
65 static void tcindex_data_get(struct tcindex_data *p)
67 refcount_inc(&p->refcnt);
70 static void tcindex_data_put(struct tcindex_data *p)
72 if (refcount_dec_and_test(&p->refcnt)) {
73 kfree(p->perfect);
74 kfree(p->h);
75 kfree(p);
79 static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
80 u16 key)
82 if (p->perfect) {
83 struct tcindex_filter_result *f = p->perfect + key;
85 return tcindex_filter_is_set(f) ? f : NULL;
86 } else if (p->h) {
87 struct tcindex_filter __rcu **fp;
88 struct tcindex_filter *f;
90 fp = &p->h[key % p->hash];
91 for (f = rcu_dereference_bh_rtnl(*fp);
93 fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
94 if (f->key == key)
95 return &f->result;
98 return NULL;
102 static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
103 struct tcf_result *res)
105 struct tcindex_data *p = rcu_dereference_bh(tp->root);
106 struct tcindex_filter_result *f;
107 int key = (skb->tc_index & p->mask) >> p->shift;
109 pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
110 skb, tp, res, p);
112 f = tcindex_lookup(p, key);
113 if (!f) {
114 struct Qdisc *q = tcf_block_q(tp->chain->block);
116 if (!p->fall_through)
117 return -1;
118 res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key);
119 res->class = 0;
120 pr_debug("alg 0x%x\n", res->classid);
121 return 0;
123 *res = f->res;
124 pr_debug("map 0x%x\n", res->classid);
126 return tcf_exts_exec(skb, &f->exts, res);
130 static void *tcindex_get(struct tcf_proto *tp, u32 handle)
132 struct tcindex_data *p = rtnl_dereference(tp->root);
133 struct tcindex_filter_result *r;
135 pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
136 if (p->perfect && handle >= p->alloc_hash)
137 return NULL;
138 r = tcindex_lookup(p, handle);
139 return r && tcindex_filter_is_set(r) ? r : NULL;
142 static int tcindex_init(struct tcf_proto *tp)
144 struct tcindex_data *p;
146 pr_debug("tcindex_init(tp %p)\n", tp);
147 p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
148 if (!p)
149 return -ENOMEM;
151 p->mask = 0xffff;
152 p->hash = DEFAULT_HASH_SIZE;
153 p->fall_through = 1;
154 refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */
156 rcu_assign_pointer(tp->root, p);
157 return 0;
160 static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
162 tcf_exts_destroy(&r->exts);
163 tcf_exts_put_net(&r->exts);
164 tcindex_data_put(r->p);
167 static void tcindex_destroy_rexts_work(struct work_struct *work)
169 struct tcindex_filter_result *r;
171 r = container_of(to_rcu_work(work),
172 struct tcindex_filter_result,
173 rwork);
174 rtnl_lock();
175 __tcindex_destroy_rexts(r);
176 rtnl_unlock();
179 static void __tcindex_destroy_fexts(struct tcindex_filter *f)
181 tcf_exts_destroy(&f->result.exts);
182 tcf_exts_put_net(&f->result.exts);
183 kfree(f);
186 static void tcindex_destroy_fexts_work(struct work_struct *work)
188 struct tcindex_filter *f = container_of(to_rcu_work(work),
189 struct tcindex_filter,
190 rwork);
192 rtnl_lock();
193 __tcindex_destroy_fexts(f);
194 rtnl_unlock();
197 static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
198 bool rtnl_held, struct netlink_ext_ack *extack)
200 struct tcindex_data *p = rtnl_dereference(tp->root);
201 struct tcindex_filter_result *r = arg;
202 struct tcindex_filter __rcu **walk;
203 struct tcindex_filter *f = NULL;
205 pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
206 if (p->perfect) {
207 if (!r->res.class)
208 return -ENOENT;
209 } else {
210 int i;
212 for (i = 0; i < p->hash; i++) {
213 walk = p->h + i;
214 for (f = rtnl_dereference(*walk); f;
215 walk = &f->next, f = rtnl_dereference(*walk)) {
216 if (&f->result == r)
217 goto found;
220 return -ENOENT;
222 found:
223 rcu_assign_pointer(*walk, rtnl_dereference(f->next));
225 tcf_unbind_filter(tp, &r->res);
226 /* all classifiers are required to call tcf_exts_destroy() after rcu
227 * grace period, since converted-to-rcu actions are relying on that
228 * in cleanup() callback
230 if (f) {
231 if (tcf_exts_get_net(&f->result.exts))
232 tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work);
233 else
234 __tcindex_destroy_fexts(f);
235 } else {
236 tcindex_data_get(p);
238 if (tcf_exts_get_net(&r->exts))
239 tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
240 else
241 __tcindex_destroy_rexts(r);
244 *last = false;
245 return 0;
248 static void tcindex_destroy_work(struct work_struct *work)
250 struct tcindex_data *p = container_of(to_rcu_work(work),
251 struct tcindex_data,
252 rwork);
254 tcindex_data_put(p);
257 static inline int
258 valid_perfect_hash(struct tcindex_data *p)
260 return p->hash > (p->mask >> p->shift);
263 static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
264 [TCA_TCINDEX_HASH] = { .type = NLA_U32 },
265 [TCA_TCINDEX_MASK] = { .type = NLA_U16 },
266 [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 },
267 [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 },
268 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
271 static int tcindex_filter_result_init(struct tcindex_filter_result *r,
272 struct tcindex_data *p,
273 struct net *net)
275 memset(r, 0, sizeof(*r));
276 r->p = p;
277 return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
278 TCA_TCINDEX_POLICE);
281 static void tcindex_partial_destroy_work(struct work_struct *work)
283 struct tcindex_data *p = container_of(to_rcu_work(work),
284 struct tcindex_data,
285 rwork);
287 rtnl_lock();
288 kfree(p->perfect);
289 kfree(p);
290 rtnl_unlock();
293 static void tcindex_free_perfect_hash(struct tcindex_data *cp)
295 int i;
297 for (i = 0; i < cp->hash; i++)
298 tcf_exts_destroy(&cp->perfect[i].exts);
299 kfree(cp->perfect);
302 static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
304 int i, err = 0;
306 cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
307 GFP_KERNEL);
308 if (!cp->perfect)
309 return -ENOMEM;
311 for (i = 0; i < cp->hash; i++) {
312 err = tcf_exts_init(&cp->perfect[i].exts, net,
313 TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
314 if (err < 0)
315 goto errout;
316 cp->perfect[i].p = cp;
319 return 0;
321 errout:
322 tcindex_free_perfect_hash(cp);
323 return err;
326 static int
327 tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
328 u32 handle, struct tcindex_data *p,
329 struct tcindex_filter_result *r, struct nlattr **tb,
330 struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
332 struct tcindex_filter_result new_filter_result, *old_r = r;
333 struct tcindex_data *cp = NULL, *oldp;
334 struct tcindex_filter *f = NULL; /* make gcc behave */
335 struct tcf_result cr = {};
336 int err, balloc = 0;
337 struct tcf_exts e;
339 err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
340 if (err < 0)
341 return err;
342 err = tcf_exts_validate(net, tp, tb, est, &e, ovr, true, extack);
343 if (err < 0)
344 goto errout;
346 err = -ENOMEM;
347 /* tcindex_data attributes must look atomic to classifier/lookup so
348 * allocate new tcindex data and RCU assign it onto root. Keeping
349 * perfect hash and hash pointers from old data.
351 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
352 if (!cp)
353 goto errout;
355 cp->mask = p->mask;
356 cp->shift = p->shift;
357 cp->hash = p->hash;
358 cp->alloc_hash = p->alloc_hash;
359 cp->fall_through = p->fall_through;
360 cp->tp = tp;
361 refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */
363 if (tb[TCA_TCINDEX_HASH])
364 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
366 if (tb[TCA_TCINDEX_MASK])
367 cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
369 if (tb[TCA_TCINDEX_SHIFT])
370 cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
372 if (!cp->hash) {
373 /* Hash not specified, use perfect hash if the upper limit
374 * of the hashing index is below the threshold.
376 if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
377 cp->hash = (cp->mask >> cp->shift) + 1;
378 else
379 cp->hash = DEFAULT_HASH_SIZE;
382 if (p->perfect) {
383 int i;
385 if (tcindex_alloc_perfect_hash(net, cp) < 0)
386 goto errout;
387 cp->alloc_hash = cp->hash;
388 for (i = 0; i < min(cp->hash, p->hash); i++)
389 cp->perfect[i].res = p->perfect[i].res;
390 balloc = 1;
392 cp->h = p->h;
394 err = tcindex_filter_result_init(&new_filter_result, cp, net);
395 if (err < 0)
396 goto errout_alloc;
397 if (old_r)
398 cr = r->res;
400 err = -EBUSY;
402 /* Hash already allocated, make sure that we still meet the
403 * requirements for the allocated hash.
405 if (cp->perfect) {
406 if (!valid_perfect_hash(cp) ||
407 cp->hash > cp->alloc_hash)
408 goto errout_alloc;
409 } else if (cp->h && cp->hash != cp->alloc_hash) {
410 goto errout_alloc;
413 err = -EINVAL;
414 if (tb[TCA_TCINDEX_FALL_THROUGH])
415 cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
417 if (!cp->perfect && !cp->h)
418 cp->alloc_hash = cp->hash;
420 /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
421 * but then, we'd fail handles that may become valid after some future
422 * mask change. While this is extremely unlikely to ever matter,
423 * the check below is safer (and also more backwards-compatible).
425 if (cp->perfect || valid_perfect_hash(cp))
426 if (handle >= cp->alloc_hash)
427 goto errout_alloc;
430 err = -ENOMEM;
431 if (!cp->perfect && !cp->h) {
432 if (valid_perfect_hash(cp)) {
433 if (tcindex_alloc_perfect_hash(net, cp) < 0)
434 goto errout_alloc;
435 balloc = 1;
436 } else {
437 struct tcindex_filter __rcu **hash;
439 hash = kcalloc(cp->hash,
440 sizeof(struct tcindex_filter *),
441 GFP_KERNEL);
443 if (!hash)
444 goto errout_alloc;
446 cp->h = hash;
447 balloc = 2;
451 if (cp->perfect)
452 r = cp->perfect + handle;
453 else
454 r = tcindex_lookup(cp, handle) ? : &new_filter_result;
456 if (r == &new_filter_result) {
457 f = kzalloc(sizeof(*f), GFP_KERNEL);
458 if (!f)
459 goto errout_alloc;
460 f->key = handle;
461 f->next = NULL;
462 err = tcindex_filter_result_init(&f->result, cp, net);
463 if (err < 0) {
464 kfree(f);
465 goto errout_alloc;
469 if (tb[TCA_TCINDEX_CLASSID]) {
470 cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
471 tcf_bind_filter(tp, &cr, base);
474 if (old_r && old_r != r) {
475 err = tcindex_filter_result_init(old_r, cp, net);
476 if (err < 0) {
477 kfree(f);
478 goto errout_alloc;
482 oldp = p;
483 r->res = cr;
484 tcf_exts_change(&r->exts, &e);
486 rcu_assign_pointer(tp->root, cp);
488 if (r == &new_filter_result) {
489 struct tcindex_filter *nfp;
490 struct tcindex_filter __rcu **fp;
492 f->result.res = r->res;
493 tcf_exts_change(&f->result.exts, &r->exts);
495 fp = cp->h + (handle % cp->hash);
496 for (nfp = rtnl_dereference(*fp);
497 nfp;
498 fp = &nfp->next, nfp = rtnl_dereference(*fp))
499 ; /* nothing */
501 rcu_assign_pointer(*fp, f);
502 } else {
503 tcf_exts_destroy(&new_filter_result.exts);
506 if (oldp)
507 tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
508 return 0;
510 errout_alloc:
511 if (balloc == 1)
512 tcindex_free_perfect_hash(cp);
513 else if (balloc == 2)
514 kfree(cp->h);
515 tcf_exts_destroy(&new_filter_result.exts);
516 errout:
517 kfree(cp);
518 tcf_exts_destroy(&e);
519 return err;
522 static int
523 tcindex_change(struct net *net, struct sk_buff *in_skb,
524 struct tcf_proto *tp, unsigned long base, u32 handle,
525 struct nlattr **tca, void **arg, bool ovr,
526 bool rtnl_held, struct netlink_ext_ack *extack)
528 struct nlattr *opt = tca[TCA_OPTIONS];
529 struct nlattr *tb[TCA_TCINDEX_MAX + 1];
530 struct tcindex_data *p = rtnl_dereference(tp->root);
531 struct tcindex_filter_result *r = *arg;
532 int err;
534 pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
535 "p %p,r %p,*arg %p\n",
536 tp, handle, tca, arg, opt, p, r, *arg);
538 if (!opt)
539 return 0;
541 err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt,
542 tcindex_policy, NULL);
543 if (err < 0)
544 return err;
546 return tcindex_set_parms(net, tp, base, handle, p, r, tb,
547 tca[TCA_RATE], ovr, extack);
550 static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker,
551 bool rtnl_held)
553 struct tcindex_data *p = rtnl_dereference(tp->root);
554 struct tcindex_filter *f, *next;
555 int i;
557 pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
558 if (p->perfect) {
559 for (i = 0; i < p->hash; i++) {
560 if (!p->perfect[i].res.class)
561 continue;
562 if (walker->count >= walker->skip) {
563 if (walker->fn(tp, p->perfect + i, walker) < 0) {
564 walker->stop = 1;
565 return;
568 walker->count++;
571 if (!p->h)
572 return;
573 for (i = 0; i < p->hash; i++) {
574 for (f = rtnl_dereference(p->h[i]); f; f = next) {
575 next = rtnl_dereference(f->next);
576 if (walker->count >= walker->skip) {
577 if (walker->fn(tp, &f->result, walker) < 0) {
578 walker->stop = 1;
579 return;
582 walker->count++;
587 static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
588 struct netlink_ext_ack *extack)
590 struct tcindex_data *p = rtnl_dereference(tp->root);
591 int i;
593 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
595 if (p->perfect) {
596 for (i = 0; i < p->hash; i++) {
597 struct tcindex_filter_result *r = p->perfect + i;
599 /* tcf_queue_work() does not guarantee the ordering we
600 * want, so we have to take this refcnt temporarily to
601 * ensure 'p' is freed after all tcindex_filter_result
602 * here. Imperfect hash does not need this, because it
603 * uses linked lists rather than an array.
605 tcindex_data_get(p);
607 tcf_unbind_filter(tp, &r->res);
608 if (tcf_exts_get_net(&r->exts))
609 tcf_queue_work(&r->rwork,
610 tcindex_destroy_rexts_work);
611 else
612 __tcindex_destroy_rexts(r);
616 for (i = 0; p->h && i < p->hash; i++) {
617 struct tcindex_filter *f, *next;
618 bool last;
620 for (f = rtnl_dereference(p->h[i]); f; f = next) {
621 next = rtnl_dereference(f->next);
622 tcindex_delete(tp, &f->result, &last, rtnl_held, NULL);
626 tcf_queue_work(&p->rwork, tcindex_destroy_work);
630 static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
631 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
633 struct tcindex_data *p = rtnl_dereference(tp->root);
634 struct tcindex_filter_result *r = fh;
635 struct nlattr *nest;
637 pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
638 tp, fh, skb, t, p, r);
639 pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
641 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
642 if (nest == NULL)
643 goto nla_put_failure;
645 if (!fh) {
646 t->tcm_handle = ~0; /* whatever ... */
647 if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
648 nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
649 nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
650 nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
651 goto nla_put_failure;
652 nla_nest_end(skb, nest);
653 } else {
654 if (p->perfect) {
655 t->tcm_handle = r - p->perfect;
656 } else {
657 struct tcindex_filter *f;
658 struct tcindex_filter __rcu **fp;
659 int i;
661 t->tcm_handle = 0;
662 for (i = 0; !t->tcm_handle && i < p->hash; i++) {
663 fp = &p->h[i];
664 for (f = rtnl_dereference(*fp);
665 !t->tcm_handle && f;
666 fp = &f->next, f = rtnl_dereference(*fp)) {
667 if (&f->result == r)
668 t->tcm_handle = f->key;
672 pr_debug("handle = %d\n", t->tcm_handle);
673 if (r->res.class &&
674 nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
675 goto nla_put_failure;
677 if (tcf_exts_dump(skb, &r->exts) < 0)
678 goto nla_put_failure;
679 nla_nest_end(skb, nest);
681 if (tcf_exts_dump_stats(skb, &r->exts) < 0)
682 goto nla_put_failure;
685 return skb->len;
687 nla_put_failure:
688 nla_nest_cancel(skb, nest);
689 return -1;
692 static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl,
693 void *q, unsigned long base)
695 struct tcindex_filter_result *r = fh;
697 if (r && r->res.classid == classid) {
698 if (cl)
699 __tcf_bind_filter(q, &r->res, base);
700 else
701 __tcf_unbind_filter(q, &r->res);
705 static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
706 .kind = "tcindex",
707 .classify = tcindex_classify,
708 .init = tcindex_init,
709 .destroy = tcindex_destroy,
710 .get = tcindex_get,
711 .change = tcindex_change,
712 .delete = tcindex_delete,
713 .walk = tcindex_walk,
714 .dump = tcindex_dump,
715 .bind_class = tcindex_bind_class,
716 .owner = THIS_MODULE,
719 static int __init init_tcindex(void)
721 return register_tcf_proto_ops(&cls_tcindex_ops);
724 static void __exit exit_tcindex(void)
726 unregister_tcf_proto_ops(&cls_tcindex_ops);
729 module_init(init_tcindex)
730 module_exit(exit_tcindex)
731 MODULE_LICENSE("GPL");