drm/tests: hdmi: Fix memory leaks in drm_display_mode_from_cea_vic()
[drm/drm-misc.git] / net / sched / cls_route.c
blobb9c58c040c30579eb8ea0ab49f4394f456c5d3c6
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/cls_route.c ROUTE4 classifier.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 */
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <net/dst.h>
16 #include <net/route.h>
17 #include <net/netlink.h>
18 #include <net/act_api.h>
19 #include <net/pkt_cls.h>
20 #include <net/tc_wrapper.h>
23 * 1. For now we assume that route tags < 256.
24 * It allows to use direct table lookups, instead of hash tables.
25 * 2. For now we assume that "from TAG" and "fromdev DEV" statements
26 * are mutually exclusive.
27 * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
29 struct route4_fastmap {
30 struct route4_filter *filter;
31 u32 id;
32 int iif;
35 struct route4_head {
36 struct route4_fastmap fastmap[16];
37 struct route4_bucket __rcu *table[256 + 1];
38 struct rcu_head rcu;
41 struct route4_bucket {
42 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
43 struct route4_filter __rcu *ht[16 + 16 + 1];
44 struct rcu_head rcu;
47 struct route4_filter {
48 struct route4_filter __rcu *next;
49 u32 id;
50 int iif;
52 struct tcf_result res;
53 struct tcf_exts exts;
54 u32 handle;
55 struct route4_bucket *bkt;
56 struct tcf_proto *tp;
57 struct rcu_work rwork;
60 #define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
62 static inline int route4_fastmap_hash(u32 id, int iif)
64 return id & 0xF;
67 static DEFINE_SPINLOCK(fastmap_lock);
68 static void
69 route4_reset_fastmap(struct route4_head *head)
71 spin_lock_bh(&fastmap_lock);
72 memset(head->fastmap, 0, sizeof(head->fastmap));
73 spin_unlock_bh(&fastmap_lock);
76 static void
77 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
78 struct route4_filter *f)
80 int h = route4_fastmap_hash(id, iif);
82 /* fastmap updates must look atomic to aling id, iff, filter */
83 spin_lock_bh(&fastmap_lock);
84 head->fastmap[h].id = id;
85 head->fastmap[h].iif = iif;
86 head->fastmap[h].filter = f;
87 spin_unlock_bh(&fastmap_lock);
90 static inline int route4_hash_to(u32 id)
92 return id & 0xFF;
95 static inline int route4_hash_from(u32 id)
97 return (id >> 16) & 0xF;
100 static inline int route4_hash_iif(int iif)
102 return 16 + ((iif >> 16) & 0xF);
105 static inline int route4_hash_wild(void)
107 return 32;
110 #define ROUTE4_APPLY_RESULT() \
112 *res = f->res; \
113 if (tcf_exts_has_actions(&f->exts)) { \
114 int r = tcf_exts_exec(skb, &f->exts, res); \
115 if (r < 0) { \
116 dont_cache = 1; \
117 continue; \
119 return r; \
120 } else if (!dont_cache) \
121 route4_set_fastmap(head, id, iif, f); \
122 return 0; \
125 TC_INDIRECT_SCOPE int route4_classify(struct sk_buff *skb,
126 const struct tcf_proto *tp,
127 struct tcf_result *res)
129 struct route4_head *head = rcu_dereference_bh(tp->root);
130 struct dst_entry *dst;
131 struct route4_bucket *b;
132 struct route4_filter *f;
133 u32 id, h;
134 int iif, dont_cache = 0;
136 dst = skb_dst(skb);
137 if (!dst)
138 goto failure;
140 id = dst->tclassid;
142 iif = inet_iif(skb);
144 h = route4_fastmap_hash(id, iif);
146 spin_lock(&fastmap_lock);
147 if (id == head->fastmap[h].id &&
148 iif == head->fastmap[h].iif &&
149 (f = head->fastmap[h].filter) != NULL) {
150 if (f == ROUTE4_FAILURE) {
151 spin_unlock(&fastmap_lock);
152 goto failure;
155 *res = f->res;
156 spin_unlock(&fastmap_lock);
157 return 0;
159 spin_unlock(&fastmap_lock);
161 h = route4_hash_to(id);
163 restart:
164 b = rcu_dereference_bh(head->table[h]);
165 if (b) {
166 for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]);
168 f = rcu_dereference_bh(f->next))
169 if (f->id == id)
170 ROUTE4_APPLY_RESULT();
172 for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]);
174 f = rcu_dereference_bh(f->next))
175 if (f->iif == iif)
176 ROUTE4_APPLY_RESULT();
178 for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]);
180 f = rcu_dereference_bh(f->next))
181 ROUTE4_APPLY_RESULT();
183 if (h < 256) {
184 h = 256;
185 id &= ~0xFFFF;
186 goto restart;
189 if (!dont_cache)
190 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
191 failure:
192 return -1;
195 static inline u32 to_hash(u32 id)
197 u32 h = id & 0xFF;
199 if (id & 0x8000)
200 h += 256;
201 return h;
204 static inline u32 from_hash(u32 id)
206 id &= 0xFFFF;
207 if (id == 0xFFFF)
208 return 32;
209 if (!(id & 0x8000)) {
210 if (id > 255)
211 return 256;
212 return id & 0xF;
214 return 16 + (id & 0xF);
217 static void *route4_get(struct tcf_proto *tp, u32 handle)
219 struct route4_head *head = rtnl_dereference(tp->root);
220 struct route4_bucket *b;
221 struct route4_filter *f;
222 unsigned int h1, h2;
224 h1 = to_hash(handle);
225 if (h1 > 256)
226 return NULL;
228 h2 = from_hash(handle >> 16);
229 if (h2 > 32)
230 return NULL;
232 b = rtnl_dereference(head->table[h1]);
233 if (b) {
234 for (f = rtnl_dereference(b->ht[h2]);
236 f = rtnl_dereference(f->next))
237 if (f->handle == handle)
238 return f;
240 return NULL;
243 static int route4_init(struct tcf_proto *tp)
245 struct route4_head *head;
247 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
248 if (head == NULL)
249 return -ENOBUFS;
251 rcu_assign_pointer(tp->root, head);
252 return 0;
255 static void __route4_delete_filter(struct route4_filter *f)
257 tcf_exts_destroy(&f->exts);
258 tcf_exts_put_net(&f->exts);
259 kfree(f);
262 static void route4_delete_filter_work(struct work_struct *work)
264 struct route4_filter *f = container_of(to_rcu_work(work),
265 struct route4_filter,
266 rwork);
267 rtnl_lock();
268 __route4_delete_filter(f);
269 rtnl_unlock();
272 static void route4_queue_work(struct route4_filter *f)
274 tcf_queue_work(&f->rwork, route4_delete_filter_work);
277 static void route4_destroy(struct tcf_proto *tp, bool rtnl_held,
278 struct netlink_ext_ack *extack)
280 struct route4_head *head = rtnl_dereference(tp->root);
281 int h1, h2;
283 if (head == NULL)
284 return;
286 for (h1 = 0; h1 <= 256; h1++) {
287 struct route4_bucket *b;
289 b = rtnl_dereference(head->table[h1]);
290 if (b) {
291 for (h2 = 0; h2 <= 32; h2++) {
292 struct route4_filter *f;
294 while ((f = rtnl_dereference(b->ht[h2])) != NULL) {
295 struct route4_filter *next;
297 next = rtnl_dereference(f->next);
298 RCU_INIT_POINTER(b->ht[h2], next);
299 tcf_unbind_filter(tp, &f->res);
300 if (tcf_exts_get_net(&f->exts))
301 route4_queue_work(f);
302 else
303 __route4_delete_filter(f);
306 RCU_INIT_POINTER(head->table[h1], NULL);
307 kfree_rcu(b, rcu);
310 kfree_rcu(head, rcu);
313 static int route4_delete(struct tcf_proto *tp, void *arg, bool *last,
314 bool rtnl_held, struct netlink_ext_ack *extack)
316 struct route4_head *head = rtnl_dereference(tp->root);
317 struct route4_filter *f = arg;
318 struct route4_filter __rcu **fp;
319 struct route4_filter *nf;
320 struct route4_bucket *b;
321 unsigned int h = 0;
322 int i, h1;
324 if (!head || !f)
325 return -EINVAL;
327 h = f->handle;
328 b = f->bkt;
330 fp = &b->ht[from_hash(h >> 16)];
331 for (nf = rtnl_dereference(*fp); nf;
332 fp = &nf->next, nf = rtnl_dereference(*fp)) {
333 if (nf == f) {
334 /* unlink it */
335 RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
337 /* Remove any fastmap lookups that might ref filter
338 * notice we unlink'd the filter so we can't get it
339 * back in the fastmap.
341 route4_reset_fastmap(head);
343 /* Delete it */
344 tcf_unbind_filter(tp, &f->res);
345 tcf_exts_get_net(&f->exts);
346 tcf_queue_work(&f->rwork, route4_delete_filter_work);
348 /* Strip RTNL protected tree */
349 for (i = 0; i <= 32; i++) {
350 struct route4_filter *rt;
352 rt = rtnl_dereference(b->ht[i]);
353 if (rt)
354 goto out;
357 /* OK, session has no flows */
358 RCU_INIT_POINTER(head->table[to_hash(h)], NULL);
359 kfree_rcu(b, rcu);
360 break;
364 out:
365 *last = true;
366 for (h1 = 0; h1 <= 256; h1++) {
367 if (rcu_access_pointer(head->table[h1])) {
368 *last = false;
369 break;
373 return 0;
376 static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
377 [TCA_ROUTE4_CLASSID] = { .type = NLA_U32 },
378 [TCA_ROUTE4_TO] = NLA_POLICY_MAX(NLA_U32, 0xFF),
379 [TCA_ROUTE4_FROM] = NLA_POLICY_MAX(NLA_U32, 0xFF),
380 [TCA_ROUTE4_IIF] = NLA_POLICY_MAX(NLA_U32, 0x7FFF),
383 static int route4_set_parms(struct net *net, struct tcf_proto *tp,
384 unsigned long base, struct route4_filter *f,
385 u32 handle, struct route4_head *head,
386 struct nlattr **tb, struct nlattr *est, int new,
387 u32 flags, struct netlink_ext_ack *extack)
389 u32 id = 0, to = 0, nhandle = 0x8000;
390 struct route4_filter *fp;
391 unsigned int h1;
392 struct route4_bucket *b;
393 int err;
395 err = tcf_exts_validate(net, tp, tb, est, &f->exts, flags, extack);
396 if (err < 0)
397 return err;
399 if (tb[TCA_ROUTE4_TO]) {
400 if (new && handle & 0x8000) {
401 NL_SET_ERR_MSG(extack, "Invalid handle");
402 return -EINVAL;
404 to = nla_get_u32(tb[TCA_ROUTE4_TO]);
405 nhandle = to;
408 if (tb[TCA_ROUTE4_FROM] && tb[TCA_ROUTE4_IIF]) {
409 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_ROUTE4_FROM],
410 "'from' and 'fromif' are mutually exclusive");
411 return -EINVAL;
414 if (tb[TCA_ROUTE4_FROM]) {
415 id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
416 nhandle |= id << 16;
417 } else if (tb[TCA_ROUTE4_IIF]) {
418 id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
419 nhandle |= (id | 0x8000) << 16;
420 } else
421 nhandle |= 0xFFFF << 16;
423 if (handle && new) {
424 nhandle |= handle & 0x7F00;
425 if (nhandle != handle) {
426 NL_SET_ERR_MSG_FMT(extack,
427 "Handle mismatch constructed: %x (expected: %x)",
428 handle, nhandle);
429 return -EINVAL;
433 if (!nhandle) {
434 NL_SET_ERR_MSG(extack, "Replacing with handle of 0 is invalid");
435 return -EINVAL;
438 h1 = to_hash(nhandle);
439 b = rtnl_dereference(head->table[h1]);
440 if (!b) {
441 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
442 if (b == NULL)
443 return -ENOBUFS;
445 rcu_assign_pointer(head->table[h1], b);
446 } else {
447 unsigned int h2 = from_hash(nhandle >> 16);
449 for (fp = rtnl_dereference(b->ht[h2]);
451 fp = rtnl_dereference(fp->next))
452 if (fp->handle == f->handle)
453 return -EEXIST;
456 if (tb[TCA_ROUTE4_TO])
457 f->id = to;
459 if (tb[TCA_ROUTE4_FROM])
460 f->id = to | id<<16;
461 else if (tb[TCA_ROUTE4_IIF])
462 f->iif = id;
464 f->handle = nhandle;
465 f->bkt = b;
466 f->tp = tp;
468 if (tb[TCA_ROUTE4_CLASSID]) {
469 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
470 tcf_bind_filter(tp, &f->res, base);
473 return 0;
476 static int route4_change(struct net *net, struct sk_buff *in_skb,
477 struct tcf_proto *tp, unsigned long base, u32 handle,
478 struct nlattr **tca, void **arg, u32 flags,
479 struct netlink_ext_ack *extack)
481 struct route4_head *head = rtnl_dereference(tp->root);
482 struct route4_filter __rcu **fp;
483 struct route4_filter *fold, *f1, *pfp, *f = NULL;
484 struct route4_bucket *b;
485 struct nlattr *tb[TCA_ROUTE4_MAX + 1];
486 unsigned int h, th;
487 int err;
488 bool new = true;
490 if (!handle) {
491 NL_SET_ERR_MSG(extack, "Creating with handle of 0 is invalid");
492 return -EINVAL;
495 if (NL_REQ_ATTR_CHECK(extack, NULL, tca, TCA_OPTIONS)) {
496 NL_SET_ERR_MSG_MOD(extack, "Missing options");
497 return -EINVAL;
500 err = nla_parse_nested_deprecated(tb, TCA_ROUTE4_MAX, tca[TCA_OPTIONS],
501 route4_policy, NULL);
502 if (err < 0)
503 return err;
505 fold = *arg;
506 if (fold && fold->handle != handle)
507 return -EINVAL;
509 err = -ENOBUFS;
510 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
511 if (!f)
512 goto errout;
514 err = tcf_exts_init(&f->exts, net, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
515 if (err < 0)
516 goto errout;
518 if (fold) {
519 f->id = fold->id;
520 f->iif = fold->iif;
521 f->handle = fold->handle;
523 f->tp = fold->tp;
524 f->bkt = fold->bkt;
525 new = false;
528 err = route4_set_parms(net, tp, base, f, handle, head, tb,
529 tca[TCA_RATE], new, flags, extack);
530 if (err < 0)
531 goto errout;
533 h = from_hash(f->handle >> 16);
534 fp = &f->bkt->ht[h];
535 for (pfp = rtnl_dereference(*fp);
536 (f1 = rtnl_dereference(*fp)) != NULL;
537 fp = &f1->next)
538 if (f->handle < f1->handle)
539 break;
541 tcf_block_netif_keep_dst(tp->chain->block);
542 rcu_assign_pointer(f->next, f1);
543 rcu_assign_pointer(*fp, f);
545 if (fold) {
546 th = to_hash(fold->handle);
547 h = from_hash(fold->handle >> 16);
548 b = rtnl_dereference(head->table[th]);
549 if (b) {
550 fp = &b->ht[h];
551 for (pfp = rtnl_dereference(*fp); pfp;
552 fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
553 if (pfp == fold) {
554 rcu_assign_pointer(*fp, fold->next);
555 break;
561 route4_reset_fastmap(head);
562 *arg = f;
563 if (fold) {
564 tcf_unbind_filter(tp, &fold->res);
565 tcf_exts_get_net(&fold->exts);
566 tcf_queue_work(&fold->rwork, route4_delete_filter_work);
568 return 0;
570 errout:
571 if (f)
572 tcf_exts_destroy(&f->exts);
573 kfree(f);
574 return err;
577 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg,
578 bool rtnl_held)
580 struct route4_head *head = rtnl_dereference(tp->root);
581 unsigned int h, h1;
583 if (head == NULL || arg->stop)
584 return;
586 for (h = 0; h <= 256; h++) {
587 struct route4_bucket *b = rtnl_dereference(head->table[h]);
589 if (b) {
590 for (h1 = 0; h1 <= 32; h1++) {
591 struct route4_filter *f;
593 for (f = rtnl_dereference(b->ht[h1]);
595 f = rtnl_dereference(f->next)) {
596 if (!tc_cls_stats_dump(tp, arg, f))
597 return;
604 static int route4_dump(struct net *net, struct tcf_proto *tp, void *fh,
605 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
607 struct route4_filter *f = fh;
608 struct nlattr *nest;
609 u32 id;
611 if (f == NULL)
612 return skb->len;
614 t->tcm_handle = f->handle;
616 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
617 if (nest == NULL)
618 goto nla_put_failure;
620 if (!(f->handle & 0x8000)) {
621 id = f->id & 0xFF;
622 if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
623 goto nla_put_failure;
625 if (f->handle & 0x80000000) {
626 if ((f->handle >> 16) != 0xFFFF &&
627 nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
628 goto nla_put_failure;
629 } else {
630 id = f->id >> 16;
631 if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
632 goto nla_put_failure;
634 if (f->res.classid &&
635 nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
636 goto nla_put_failure;
638 if (tcf_exts_dump(skb, &f->exts) < 0)
639 goto nla_put_failure;
641 nla_nest_end(skb, nest);
643 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
644 goto nla_put_failure;
646 return skb->len;
648 nla_put_failure:
649 nla_nest_cancel(skb, nest);
650 return -1;
653 static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
654 unsigned long base)
656 struct route4_filter *f = fh;
658 tc_cls_bind_class(classid, cl, q, &f->res, base);
661 static struct tcf_proto_ops cls_route4_ops __read_mostly = {
662 .kind = "route",
663 .classify = route4_classify,
664 .init = route4_init,
665 .destroy = route4_destroy,
666 .get = route4_get,
667 .change = route4_change,
668 .delete = route4_delete,
669 .walk = route4_walk,
670 .dump = route4_dump,
671 .bind_class = route4_bind_class,
672 .owner = THIS_MODULE,
674 MODULE_ALIAS_NET_CLS("route");
676 static int __init init_route4(void)
678 return register_tcf_proto_ops(&cls_route4_ops);
681 static void __exit exit_route4(void)
683 unregister_tcf_proto_ops(&cls_route4_ops);
686 module_init(init_route4)
687 module_exit(exit_route4)
688 MODULE_DESCRIPTION("Routing table realm based TC classifier");
689 MODULE_LICENSE("GPL");