laptops: move laptop-mode.txt to Documentation/laptops/
[pv_ops_mirror.git] / net / sched / cls_u32.c
blobb18fa95ef248e78f8a898248fa3466970b9f820d
1 /*
2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * The filters are packed to hash tables of key nodes
12 * with a set of 32bit key/mask pairs at every node.
13 * Nodes reference next level hash tables etc.
15 * This scheme is the best universal classifier I managed to
16 * invent; it is not super-fast, but it is not slow (provided you
17 * program it correctly), and general enough. And its relative
18 * speed grows as the number of rules becomes larger.
20 * It seems that it represents the best middle point between
21 * speed and manageability both by human and by machine.
23 * It is especially useful for link sharing combined with QoS;
24 * pure RSVP doesn't need such a general approach and can use
25 * much simpler (and faster) schemes, sort of cls_rsvp.c.
27 * JHS: We should remove the CONFIG_NET_CLS_IND from here
28 * eventually when the meta match extension is made available
30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
33 #include <linux/module.h>
34 #include <linux/types.h>
35 #include <linux/kernel.h>
36 #include <linux/string.h>
37 #include <linux/errno.h>
38 #include <linux/rtnetlink.h>
39 #include <linux/skbuff.h>
40 #include <net/netlink.h>
41 #include <net/act_api.h>
42 #include <net/pkt_cls.h>
44 struct tc_u_knode
46 struct tc_u_knode *next;
47 u32 handle;
48 struct tc_u_hnode *ht_up;
49 struct tcf_exts exts;
50 #ifdef CONFIG_NET_CLS_IND
51 char indev[IFNAMSIZ];
52 #endif
53 u8 fshift;
54 struct tcf_result res;
55 struct tc_u_hnode *ht_down;
56 #ifdef CONFIG_CLS_U32_PERF
57 struct tc_u32_pcnt *pf;
58 #endif
59 #ifdef CONFIG_CLS_U32_MARK
60 struct tc_u32_mark mark;
61 #endif
62 struct tc_u32_sel sel;
65 struct tc_u_hnode
67 struct tc_u_hnode *next;
68 u32 handle;
69 u32 prio;
70 struct tc_u_common *tp_c;
71 int refcnt;
72 unsigned divisor;
73 struct tc_u_knode *ht[1];
76 struct tc_u_common
78 struct tc_u_common *next;
79 struct tc_u_hnode *hlist;
80 struct Qdisc *q;
81 int refcnt;
82 u32 hgenerator;
85 static const struct tcf_ext_map u32_ext_map = {
86 .action = TCA_U32_ACT,
87 .police = TCA_U32_POLICE
90 static struct tc_u_common *u32_list;
92 static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel, u8 fshift)
94 unsigned h = ntohl(key & sel->hmask)>>fshift;
96 return h;
99 static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res)
101 struct {
102 struct tc_u_knode *knode;
103 u8 *ptr;
104 } stack[TC_U32_MAXDEPTH];
106 struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
107 u8 *ptr = skb_network_header(skb);
108 struct tc_u_knode *n;
109 int sdepth = 0;
110 int off2 = 0;
111 int sel = 0;
112 #ifdef CONFIG_CLS_U32_PERF
113 int j;
114 #endif
115 int i, r;
117 next_ht:
118 n = ht->ht[sel];
120 next_knode:
121 if (n) {
122 struct tc_u32_key *key = n->sel.keys;
124 #ifdef CONFIG_CLS_U32_PERF
125 n->pf->rcnt +=1;
126 j = 0;
127 #endif
129 #ifdef CONFIG_CLS_U32_MARK
130 if ((skb->mark & n->mark.mask) != n->mark.val) {
131 n = n->next;
132 goto next_knode;
133 } else {
134 n->mark.success++;
136 #endif
138 for (i = n->sel.nkeys; i>0; i--, key++) {
140 if ((*(u32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) {
141 n = n->next;
142 goto next_knode;
144 #ifdef CONFIG_CLS_U32_PERF
145 n->pf->kcnts[j] +=1;
146 j++;
147 #endif
149 if (n->ht_down == NULL) {
150 check_terminal:
151 if (n->sel.flags&TC_U32_TERMINAL) {
153 *res = n->res;
154 #ifdef CONFIG_NET_CLS_IND
155 if (!tcf_match_indev(skb, n->indev)) {
156 n = n->next;
157 goto next_knode;
159 #endif
160 #ifdef CONFIG_CLS_U32_PERF
161 n->pf->rhit +=1;
162 #endif
163 r = tcf_exts_exec(skb, &n->exts, res);
164 if (r < 0) {
165 n = n->next;
166 goto next_knode;
169 return r;
171 n = n->next;
172 goto next_knode;
175 /* PUSH */
176 if (sdepth >= TC_U32_MAXDEPTH)
177 goto deadloop;
178 stack[sdepth].knode = n;
179 stack[sdepth].ptr = ptr;
180 sdepth++;
182 ht = n->ht_down;
183 sel = 0;
184 if (ht->divisor)
185 sel = ht->divisor&u32_hash_fold(*(u32*)(ptr+n->sel.hoff), &n->sel,n->fshift);
187 if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
188 goto next_ht;
190 if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
191 off2 = n->sel.off + 3;
192 if (n->sel.flags&TC_U32_VAROFFSET)
193 off2 += ntohs(n->sel.offmask & *(u16*)(ptr+n->sel.offoff)) >>n->sel.offshift;
194 off2 &= ~3;
196 if (n->sel.flags&TC_U32_EAT) {
197 ptr += off2;
198 off2 = 0;
201 if (ptr < skb_tail_pointer(skb))
202 goto next_ht;
205 /* POP */
206 if (sdepth--) {
207 n = stack[sdepth].knode;
208 ht = n->ht_up;
209 ptr = stack[sdepth].ptr;
210 goto check_terminal;
212 return -1;
214 deadloop:
215 if (net_ratelimit())
216 printk("cls_u32: dead loop\n");
217 return -1;
220 static __inline__ struct tc_u_hnode *
221 u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
223 struct tc_u_hnode *ht;
225 for (ht = tp_c->hlist; ht; ht = ht->next)
226 if (ht->handle == handle)
227 break;
229 return ht;
232 static __inline__ struct tc_u_knode *
233 u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
235 unsigned sel;
236 struct tc_u_knode *n = NULL;
238 sel = TC_U32_HASH(handle);
239 if (sel > ht->divisor)
240 goto out;
242 for (n = ht->ht[sel]; n; n = n->next)
243 if (n->handle == handle)
244 break;
245 out:
246 return n;
250 static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
252 struct tc_u_hnode *ht;
253 struct tc_u_common *tp_c = tp->data;
255 if (TC_U32_HTID(handle) == TC_U32_ROOT)
256 ht = tp->root;
257 else
258 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
260 if (!ht)
261 return 0;
263 if (TC_U32_KEY(handle) == 0)
264 return (unsigned long)ht;
266 return (unsigned long)u32_lookup_key(ht, handle);
269 static void u32_put(struct tcf_proto *tp, unsigned long f)
273 static u32 gen_new_htid(struct tc_u_common *tp_c)
275 int i = 0x800;
277 do {
278 if (++tp_c->hgenerator == 0x7FF)
279 tp_c->hgenerator = 1;
280 } while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
282 return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
285 static int u32_init(struct tcf_proto *tp)
287 struct tc_u_hnode *root_ht;
288 struct tc_u_common *tp_c;
290 for (tp_c = u32_list; tp_c; tp_c = tp_c->next)
291 if (tp_c->q == tp->q)
292 break;
294 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
295 if (root_ht == NULL)
296 return -ENOBUFS;
298 root_ht->divisor = 0;
299 root_ht->refcnt++;
300 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
301 root_ht->prio = tp->prio;
303 if (tp_c == NULL) {
304 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
305 if (tp_c == NULL) {
306 kfree(root_ht);
307 return -ENOBUFS;
309 tp_c->q = tp->q;
310 tp_c->next = u32_list;
311 u32_list = tp_c;
314 tp_c->refcnt++;
315 root_ht->next = tp_c->hlist;
316 tp_c->hlist = root_ht;
317 root_ht->tp_c = tp_c;
319 tp->root = root_ht;
320 tp->data = tp_c;
321 return 0;
324 static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
326 tcf_unbind_filter(tp, &n->res);
327 tcf_exts_destroy(tp, &n->exts);
328 if (n->ht_down)
329 n->ht_down->refcnt--;
330 #ifdef CONFIG_CLS_U32_PERF
331 kfree(n->pf);
332 #endif
333 kfree(n);
334 return 0;
337 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
339 struct tc_u_knode **kp;
340 struct tc_u_hnode *ht = key->ht_up;
342 if (ht) {
343 for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
344 if (*kp == key) {
345 tcf_tree_lock(tp);
346 *kp = key->next;
347 tcf_tree_unlock(tp);
349 u32_destroy_key(tp, key);
350 return 0;
354 BUG_TRAP(0);
355 return 0;
358 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
360 struct tc_u_knode *n;
361 unsigned h;
363 for (h=0; h<=ht->divisor; h++) {
364 while ((n = ht->ht[h]) != NULL) {
365 ht->ht[h] = n->next;
367 u32_destroy_key(tp, n);
372 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
374 struct tc_u_common *tp_c = tp->data;
375 struct tc_u_hnode **hn;
377 BUG_TRAP(!ht->refcnt);
379 u32_clear_hnode(tp, ht);
381 for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) {
382 if (*hn == ht) {
383 *hn = ht->next;
384 kfree(ht);
385 return 0;
389 BUG_TRAP(0);
390 return -ENOENT;
393 static void u32_destroy(struct tcf_proto *tp)
395 struct tc_u_common *tp_c = tp->data;
396 struct tc_u_hnode *root_ht = xchg(&tp->root, NULL);
398 BUG_TRAP(root_ht != NULL);
400 if (root_ht && --root_ht->refcnt == 0)
401 u32_destroy_hnode(tp, root_ht);
403 if (--tp_c->refcnt == 0) {
404 struct tc_u_hnode *ht;
405 struct tc_u_common **tp_cp;
407 for (tp_cp = &u32_list; *tp_cp; tp_cp = &(*tp_cp)->next) {
408 if (*tp_cp == tp_c) {
409 *tp_cp = tp_c->next;
410 break;
414 for (ht=tp_c->hlist; ht; ht = ht->next)
415 u32_clear_hnode(tp, ht);
417 while ((ht = tp_c->hlist) != NULL) {
418 tp_c->hlist = ht->next;
420 BUG_TRAP(ht->refcnt == 0);
422 kfree(ht);
425 kfree(tp_c);
428 tp->data = NULL;
431 static int u32_delete(struct tcf_proto *tp, unsigned long arg)
433 struct tc_u_hnode *ht = (struct tc_u_hnode*)arg;
435 if (ht == NULL)
436 return 0;
438 if (TC_U32_KEY(ht->handle))
439 return u32_delete_key(tp, (struct tc_u_knode*)ht);
441 if (tp->root == ht)
442 return -EINVAL;
444 if (--ht->refcnt == 0)
445 u32_destroy_hnode(tp, ht);
447 return 0;
450 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
452 struct tc_u_knode *n;
453 unsigned i = 0x7FF;
455 for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
456 if (i < TC_U32_NODE(n->handle))
457 i = TC_U32_NODE(n->handle);
458 i++;
460 return handle|(i>0xFFF ? 0xFFF : i);
463 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
464 [TCA_U32_CLASSID] = { .type = NLA_U32 },
465 [TCA_U32_HASH] = { .type = NLA_U32 },
466 [TCA_U32_LINK] = { .type = NLA_U32 },
467 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
468 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
469 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
470 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
473 static int u32_set_parms(struct tcf_proto *tp, unsigned long base,
474 struct tc_u_hnode *ht,
475 struct tc_u_knode *n, struct nlattr **tb,
476 struct nlattr *est)
478 int err;
479 struct tcf_exts e;
481 err = tcf_exts_validate(tp, tb, est, &e, &u32_ext_map);
482 if (err < 0)
483 return err;
485 err = -EINVAL;
486 if (tb[TCA_U32_LINK]) {
487 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
488 struct tc_u_hnode *ht_down = NULL;
490 if (TC_U32_KEY(handle))
491 goto errout;
493 if (handle) {
494 ht_down = u32_lookup_ht(ht->tp_c, handle);
496 if (ht_down == NULL)
497 goto errout;
498 ht_down->refcnt++;
501 tcf_tree_lock(tp);
502 ht_down = xchg(&n->ht_down, ht_down);
503 tcf_tree_unlock(tp);
505 if (ht_down)
506 ht_down->refcnt--;
508 if (tb[TCA_U32_CLASSID]) {
509 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
510 tcf_bind_filter(tp, &n->res, base);
513 #ifdef CONFIG_NET_CLS_IND
514 if (tb[TCA_U32_INDEV]) {
515 err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV]);
516 if (err < 0)
517 goto errout;
519 #endif
520 tcf_exts_change(tp, &n->exts, &e);
522 return 0;
523 errout:
524 tcf_exts_destroy(tp, &e);
525 return err;
528 static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
529 struct nlattr **tca,
530 unsigned long *arg)
532 struct tc_u_common *tp_c = tp->data;
533 struct tc_u_hnode *ht;
534 struct tc_u_knode *n;
535 struct tc_u32_sel *s;
536 struct nlattr *opt = tca[TCA_OPTIONS];
537 struct nlattr *tb[TCA_U32_MAX + 1];
538 u32 htid;
539 int err;
541 if (opt == NULL)
542 return handle ? -EINVAL : 0;
544 err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy);
545 if (err < 0)
546 return err;
548 if ((n = (struct tc_u_knode*)*arg) != NULL) {
549 if (TC_U32_KEY(n->handle) == 0)
550 return -EINVAL;
552 return u32_set_parms(tp, base, n->ht_up, n, tb, tca[TCA_RATE]);
555 if (tb[TCA_U32_DIVISOR]) {
556 unsigned divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
558 if (--divisor > 0x100)
559 return -EINVAL;
560 if (TC_U32_KEY(handle))
561 return -EINVAL;
562 if (handle == 0) {
563 handle = gen_new_htid(tp->data);
564 if (handle == 0)
565 return -ENOMEM;
567 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
568 if (ht == NULL)
569 return -ENOBUFS;
570 ht->tp_c = tp_c;
571 ht->refcnt = 0;
572 ht->divisor = divisor;
573 ht->handle = handle;
574 ht->prio = tp->prio;
575 ht->next = tp_c->hlist;
576 tp_c->hlist = ht;
577 *arg = (unsigned long)ht;
578 return 0;
581 if (tb[TCA_U32_HASH]) {
582 htid = nla_get_u32(tb[TCA_U32_HASH]);
583 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
584 ht = tp->root;
585 htid = ht->handle;
586 } else {
587 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
588 if (ht == NULL)
589 return -EINVAL;
591 } else {
592 ht = tp->root;
593 htid = ht->handle;
596 if (ht->divisor < TC_U32_HASH(htid))
597 return -EINVAL;
599 if (handle) {
600 if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
601 return -EINVAL;
602 handle = htid | TC_U32_NODE(handle);
603 } else
604 handle = gen_new_kid(ht, htid);
606 if (tb[TCA_U32_SEL] == NULL)
607 return -EINVAL;
609 s = nla_data(tb[TCA_U32_SEL]);
611 n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
612 if (n == NULL)
613 return -ENOBUFS;
615 #ifdef CONFIG_CLS_U32_PERF
616 n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
617 if (n->pf == NULL) {
618 kfree(n);
619 return -ENOBUFS;
621 #endif
623 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
624 n->ht_up = ht;
625 n->handle = handle;
626 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
628 #ifdef CONFIG_CLS_U32_MARK
629 if (tb[TCA_U32_MARK]) {
630 struct tc_u32_mark *mark;
632 mark = nla_data(tb[TCA_U32_MARK]);
633 memcpy(&n->mark, mark, sizeof(struct tc_u32_mark));
634 n->mark.success = 0;
636 #endif
638 err = u32_set_parms(tp, base, ht, n, tb, tca[TCA_RATE]);
639 if (err == 0) {
640 struct tc_u_knode **ins;
641 for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
642 if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
643 break;
645 n->next = *ins;
646 wmb();
647 *ins = n;
649 *arg = (unsigned long)n;
650 return 0;
652 #ifdef CONFIG_CLS_U32_PERF
653 kfree(n->pf);
654 #endif
655 kfree(n);
656 return err;
659 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
661 struct tc_u_common *tp_c = tp->data;
662 struct tc_u_hnode *ht;
663 struct tc_u_knode *n;
664 unsigned h;
666 if (arg->stop)
667 return;
669 for (ht = tp_c->hlist; ht; ht = ht->next) {
670 if (ht->prio != tp->prio)
671 continue;
672 if (arg->count >= arg->skip) {
673 if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
674 arg->stop = 1;
675 return;
678 arg->count++;
679 for (h = 0; h <= ht->divisor; h++) {
680 for (n = ht->ht[h]; n; n = n->next) {
681 if (arg->count < arg->skip) {
682 arg->count++;
683 continue;
685 if (arg->fn(tp, (unsigned long)n, arg) < 0) {
686 arg->stop = 1;
687 return;
689 arg->count++;
695 static int u32_dump(struct tcf_proto *tp, unsigned long fh,
696 struct sk_buff *skb, struct tcmsg *t)
698 struct tc_u_knode *n = (struct tc_u_knode*)fh;
699 struct nlattr *nest;
701 if (n == NULL)
702 return skb->len;
704 t->tcm_handle = n->handle;
706 nest = nla_nest_start(skb, TCA_OPTIONS);
707 if (nest == NULL)
708 goto nla_put_failure;
710 if (TC_U32_KEY(n->handle) == 0) {
711 struct tc_u_hnode *ht = (struct tc_u_hnode*)fh;
712 u32 divisor = ht->divisor+1;
713 NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor);
714 } else {
715 NLA_PUT(skb, TCA_U32_SEL,
716 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
717 &n->sel);
718 if (n->ht_up) {
719 u32 htid = n->handle & 0xFFFFF000;
720 NLA_PUT_U32(skb, TCA_U32_HASH, htid);
722 if (n->res.classid)
723 NLA_PUT_U32(skb, TCA_U32_CLASSID, n->res.classid);
724 if (n->ht_down)
725 NLA_PUT_U32(skb, TCA_U32_LINK, n->ht_down->handle);
727 #ifdef CONFIG_CLS_U32_MARK
728 if (n->mark.val || n->mark.mask)
729 NLA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark);
730 #endif
732 if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0)
733 goto nla_put_failure;
735 #ifdef CONFIG_NET_CLS_IND
736 if(strlen(n->indev))
737 NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev);
738 #endif
739 #ifdef CONFIG_CLS_U32_PERF
740 NLA_PUT(skb, TCA_U32_PCNT,
741 sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
742 n->pf);
743 #endif
746 nla_nest_end(skb, nest);
748 if (TC_U32_KEY(n->handle))
749 if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0)
750 goto nla_put_failure;
751 return skb->len;
753 nla_put_failure:
754 nla_nest_cancel(skb, nest);
755 return -1;
758 static struct tcf_proto_ops cls_u32_ops __read_mostly = {
759 .kind = "u32",
760 .classify = u32_classify,
761 .init = u32_init,
762 .destroy = u32_destroy,
763 .get = u32_get,
764 .put = u32_put,
765 .change = u32_change,
766 .delete = u32_delete,
767 .walk = u32_walk,
768 .dump = u32_dump,
769 .owner = THIS_MODULE,
772 static int __init init_u32(void)
774 printk("u32 classifier\n");
775 #ifdef CONFIG_CLS_U32_PERF
776 printk(" Performance counters on\n");
777 #endif
778 #ifdef CONFIG_NET_CLS_IND
779 printk(" input device check on \n");
780 #endif
781 #ifdef CONFIG_NET_CLS_ACT
782 printk(" Actions configured \n");
783 #endif
784 return register_tcf_proto_ops(&cls_u32_ops);
787 static void __exit exit_u32(void)
789 unregister_tcf_proto_ops(&cls_u32_ops);
792 module_init(init_u32)
793 module_exit(exit_u32)
794 MODULE_LICENSE("GPL");