2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * The filters are packed to hash tables of key nodes
12 * with a set of 32bit key/mask pairs at every node.
13 * Nodes reference next level hash tables etc.
15 * This scheme is the best universal classifier I managed to
16 * invent; it is not super-fast, but it is not slow (provided you
17 * program it correctly), and general enough. And its relative
18 * speed grows as the number of rules becomes larger.
20 * It seems that it represents the best middle point between
21 * speed and manageability both by human and by machine.
23 * It is especially useful for link sharing combined with QoS;
24 * pure RSVP doesn't need such a general approach and can use
25 * much simpler (and faster) schemes, sort of cls_rsvp.c.
27 * JHS: We should remove the CONFIG_NET_CLS_IND from here
28 * eventually when the meta match extension is made available
30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
33 #include <linux/module.h>
34 #include <linux/types.h>
35 #include <linux/kernel.h>
36 #include <linux/string.h>
37 #include <linux/errno.h>
38 #include <linux/rtnetlink.h>
39 #include <linux/skbuff.h>
40 #include <net/netlink.h>
41 #include <net/act_api.h>
42 #include <net/pkt_cls.h>
46 struct tc_u_knode
*next
;
48 struct tc_u_hnode
*ht_up
;
50 #ifdef CONFIG_NET_CLS_IND
54 struct tcf_result res
;
55 struct tc_u_hnode
*ht_down
;
56 #ifdef CONFIG_CLS_U32_PERF
57 struct tc_u32_pcnt
*pf
;
59 #ifdef CONFIG_CLS_U32_MARK
60 struct tc_u32_mark mark
;
62 struct tc_u32_sel sel
;
67 struct tc_u_hnode
*next
;
70 struct tc_u_common
*tp_c
;
73 struct tc_u_knode
*ht
[1];
78 struct tc_u_common
*next
;
79 struct tc_u_hnode
*hlist
;
85 static struct tcf_ext_map u32_ext_map
= {
86 .action
= TCA_U32_ACT
,
87 .police
= TCA_U32_POLICE
90 static struct tc_u_common
*u32_list
;
92 static __inline__
unsigned u32_hash_fold(u32 key
, struct tc_u32_sel
*sel
, u8 fshift
)
94 unsigned h
= (key
& sel
->hmask
)>>fshift
;
99 static int u32_classify(struct sk_buff
*skb
, struct tcf_proto
*tp
, struct tcf_result
*res
)
102 struct tc_u_knode
*knode
;
104 } stack
[TC_U32_MAXDEPTH
];
106 struct tc_u_hnode
*ht
= (struct tc_u_hnode
*)tp
->root
;
107 u8
*ptr
= skb_network_header(skb
);
108 struct tc_u_knode
*n
;
112 #ifdef CONFIG_CLS_U32_PERF
122 struct tc_u32_key
*key
= n
->sel
.keys
;
124 #ifdef CONFIG_CLS_U32_PERF
129 #ifdef CONFIG_CLS_U32_MARK
130 if ((skb
->mark
& n
->mark
.mask
) != n
->mark
.val
) {
138 for (i
= n
->sel
.nkeys
; i
>0; i
--, key
++) {
140 if ((*(u32
*)(ptr
+key
->off
+(off2
&key
->offmask
))^key
->val
)&key
->mask
) {
144 #ifdef CONFIG_CLS_U32_PERF
149 if (n
->ht_down
== NULL
) {
151 if (n
->sel
.flags
&TC_U32_TERMINAL
) {
154 #ifdef CONFIG_NET_CLS_IND
155 if (!tcf_match_indev(skb
, n
->indev
)) {
160 #ifdef CONFIG_CLS_U32_PERF
163 r
= tcf_exts_exec(skb
, &n
->exts
, res
);
176 if (sdepth
>= TC_U32_MAXDEPTH
)
178 stack
[sdepth
].knode
= n
;
179 stack
[sdepth
].ptr
= ptr
;
185 sel
= ht
->divisor
&u32_hash_fold(*(u32
*)(ptr
+n
->sel
.hoff
), &n
->sel
,n
->fshift
);
187 if (!(n
->sel
.flags
&(TC_U32_VAROFFSET
|TC_U32_OFFSET
|TC_U32_EAT
)))
190 if (n
->sel
.flags
&(TC_U32_OFFSET
|TC_U32_VAROFFSET
)) {
191 off2
= n
->sel
.off
+ 3;
192 if (n
->sel
.flags
&TC_U32_VAROFFSET
)
193 off2
+= ntohs(n
->sel
.offmask
& *(u16
*)(ptr
+n
->sel
.offoff
)) >>n
->sel
.offshift
;
196 if (n
->sel
.flags
&TC_U32_EAT
) {
201 if (ptr
< skb_tail_pointer(skb
))
207 n
= stack
[sdepth
].knode
;
209 ptr
= stack
[sdepth
].ptr
;
216 printk("cls_u32: dead loop\n");
220 static __inline__
struct tc_u_hnode
*
221 u32_lookup_ht(struct tc_u_common
*tp_c
, u32 handle
)
223 struct tc_u_hnode
*ht
;
225 for (ht
= tp_c
->hlist
; ht
; ht
= ht
->next
)
226 if (ht
->handle
== handle
)
232 static __inline__
struct tc_u_knode
*
233 u32_lookup_key(struct tc_u_hnode
*ht
, u32 handle
)
236 struct tc_u_knode
*n
= NULL
;
238 sel
= TC_U32_HASH(handle
);
239 if (sel
> ht
->divisor
)
242 for (n
= ht
->ht
[sel
]; n
; n
= n
->next
)
243 if (n
->handle
== handle
)
250 static unsigned long u32_get(struct tcf_proto
*tp
, u32 handle
)
252 struct tc_u_hnode
*ht
;
253 struct tc_u_common
*tp_c
= tp
->data
;
255 if (TC_U32_HTID(handle
) == TC_U32_ROOT
)
258 ht
= u32_lookup_ht(tp_c
, TC_U32_HTID(handle
));
263 if (TC_U32_KEY(handle
) == 0)
264 return (unsigned long)ht
;
266 return (unsigned long)u32_lookup_key(ht
, handle
);
269 static void u32_put(struct tcf_proto
*tp
, unsigned long f
)
273 static u32
gen_new_htid(struct tc_u_common
*tp_c
)
278 if (++tp_c
->hgenerator
== 0x7FF)
279 tp_c
->hgenerator
= 1;
280 } while (--i
>0 && u32_lookup_ht(tp_c
, (tp_c
->hgenerator
|0x800)<<20));
282 return i
> 0 ? (tp_c
->hgenerator
|0x800)<<20 : 0;
285 static int u32_init(struct tcf_proto
*tp
)
287 struct tc_u_hnode
*root_ht
;
288 struct tc_u_common
*tp_c
;
290 for (tp_c
= u32_list
; tp_c
; tp_c
= tp_c
->next
)
291 if (tp_c
->q
== tp
->q
)
294 root_ht
= kzalloc(sizeof(*root_ht
), GFP_KERNEL
);
298 root_ht
->divisor
= 0;
300 root_ht
->handle
= tp_c
? gen_new_htid(tp_c
) : 0x80000000;
301 root_ht
->prio
= tp
->prio
;
304 tp_c
= kzalloc(sizeof(*tp_c
), GFP_KERNEL
);
310 tp_c
->next
= u32_list
;
315 root_ht
->next
= tp_c
->hlist
;
316 tp_c
->hlist
= root_ht
;
317 root_ht
->tp_c
= tp_c
;
324 static int u32_destroy_key(struct tcf_proto
*tp
, struct tc_u_knode
*n
)
326 tcf_unbind_filter(tp
, &n
->res
);
327 tcf_exts_destroy(tp
, &n
->exts
);
329 n
->ht_down
->refcnt
--;
330 #ifdef CONFIG_CLS_U32_PERF
337 static int u32_delete_key(struct tcf_proto
*tp
, struct tc_u_knode
* key
)
339 struct tc_u_knode
**kp
;
340 struct tc_u_hnode
*ht
= key
->ht_up
;
343 for (kp
= &ht
->ht
[TC_U32_HASH(key
->handle
)]; *kp
; kp
= &(*kp
)->next
) {
349 u32_destroy_key(tp
, key
);
358 static void u32_clear_hnode(struct tcf_proto
*tp
, struct tc_u_hnode
*ht
)
360 struct tc_u_knode
*n
;
363 for (h
=0; h
<=ht
->divisor
; h
++) {
364 while ((n
= ht
->ht
[h
]) != NULL
) {
367 u32_destroy_key(tp
, n
);
372 static int u32_destroy_hnode(struct tcf_proto
*tp
, struct tc_u_hnode
*ht
)
374 struct tc_u_common
*tp_c
= tp
->data
;
375 struct tc_u_hnode
**hn
;
377 BUG_TRAP(!ht
->refcnt
);
379 u32_clear_hnode(tp
, ht
);
381 for (hn
= &tp_c
->hlist
; *hn
; hn
= &(*hn
)->next
) {
393 static void u32_destroy(struct tcf_proto
*tp
)
395 struct tc_u_common
*tp_c
= tp
->data
;
396 struct tc_u_hnode
*root_ht
= xchg(&tp
->root
, NULL
);
398 BUG_TRAP(root_ht
!= NULL
);
400 if (root_ht
&& --root_ht
->refcnt
== 0)
401 u32_destroy_hnode(tp
, root_ht
);
403 if (--tp_c
->refcnt
== 0) {
404 struct tc_u_hnode
*ht
;
405 struct tc_u_common
**tp_cp
;
407 for (tp_cp
= &u32_list
; *tp_cp
; tp_cp
= &(*tp_cp
)->next
) {
408 if (*tp_cp
== tp_c
) {
414 for (ht
=tp_c
->hlist
; ht
; ht
= ht
->next
)
415 u32_clear_hnode(tp
, ht
);
417 while ((ht
= tp_c
->hlist
) != NULL
) {
418 tp_c
->hlist
= ht
->next
;
420 BUG_TRAP(ht
->refcnt
== 0);
431 static int u32_delete(struct tcf_proto
*tp
, unsigned long arg
)
433 struct tc_u_hnode
*ht
= (struct tc_u_hnode
*)arg
;
438 if (TC_U32_KEY(ht
->handle
))
439 return u32_delete_key(tp
, (struct tc_u_knode
*)ht
);
444 if (--ht
->refcnt
== 0)
445 u32_destroy_hnode(tp
, ht
);
450 static u32
gen_new_kid(struct tc_u_hnode
*ht
, u32 handle
)
452 struct tc_u_knode
*n
;
455 for (n
=ht
->ht
[TC_U32_HASH(handle
)]; n
; n
= n
->next
)
456 if (i
< TC_U32_NODE(n
->handle
))
457 i
= TC_U32_NODE(n
->handle
);
460 return handle
|(i
>0xFFF ? 0xFFF : i
);
463 static int u32_set_parms(struct tcf_proto
*tp
, unsigned long base
,
464 struct tc_u_hnode
*ht
,
465 struct tc_u_knode
*n
, struct rtattr
**tb
,
471 err
= tcf_exts_validate(tp
, tb
, est
, &e
, &u32_ext_map
);
476 if (tb
[TCA_U32_LINK
-1]) {
477 u32 handle
= *(u32
*)RTA_DATA(tb
[TCA_U32_LINK
-1]);
478 struct tc_u_hnode
*ht_down
= NULL
;
480 if (TC_U32_KEY(handle
))
484 ht_down
= u32_lookup_ht(ht
->tp_c
, handle
);
492 ht_down
= xchg(&n
->ht_down
, ht_down
);
498 if (tb
[TCA_U32_CLASSID
-1]) {
499 n
->res
.classid
= *(u32
*)RTA_DATA(tb
[TCA_U32_CLASSID
-1]);
500 tcf_bind_filter(tp
, &n
->res
, base
);
503 #ifdef CONFIG_NET_CLS_IND
504 if (tb
[TCA_U32_INDEV
-1]) {
505 int err
= tcf_change_indev(tp
, n
->indev
, tb
[TCA_U32_INDEV
-1]);
510 tcf_exts_change(tp
, &n
->exts
, &e
);
514 tcf_exts_destroy(tp
, &e
);
518 static int u32_change(struct tcf_proto
*tp
, unsigned long base
, u32 handle
,
522 struct tc_u_common
*tp_c
= tp
->data
;
523 struct tc_u_hnode
*ht
;
524 struct tc_u_knode
*n
;
525 struct tc_u32_sel
*s
;
526 struct rtattr
*opt
= tca
[TCA_OPTIONS
-1];
527 struct rtattr
*tb
[TCA_U32_MAX
];
532 return handle
? -EINVAL
: 0;
534 if (rtattr_parse_nested(tb
, TCA_U32_MAX
, opt
) < 0)
537 if ((n
= (struct tc_u_knode
*)*arg
) != NULL
) {
538 if (TC_U32_KEY(n
->handle
) == 0)
541 return u32_set_parms(tp
, base
, n
->ht_up
, n
, tb
, tca
[TCA_RATE
-1]);
544 if (tb
[TCA_U32_DIVISOR
-1]) {
545 unsigned divisor
= *(unsigned*)RTA_DATA(tb
[TCA_U32_DIVISOR
-1]);
547 if (--divisor
> 0x100)
549 if (TC_U32_KEY(handle
))
552 handle
= gen_new_htid(tp
->data
);
556 ht
= kzalloc(sizeof(*ht
) + divisor
*sizeof(void*), GFP_KERNEL
);
561 ht
->divisor
= divisor
;
564 ht
->next
= tp_c
->hlist
;
566 *arg
= (unsigned long)ht
;
570 if (tb
[TCA_U32_HASH
-1]) {
571 htid
= *(unsigned*)RTA_DATA(tb
[TCA_U32_HASH
-1]);
572 if (TC_U32_HTID(htid
) == TC_U32_ROOT
) {
576 ht
= u32_lookup_ht(tp
->data
, TC_U32_HTID(htid
));
585 if (ht
->divisor
< TC_U32_HASH(htid
))
589 if (TC_U32_HTID(handle
) && TC_U32_HTID(handle
^htid
))
591 handle
= htid
| TC_U32_NODE(handle
);
593 handle
= gen_new_kid(ht
, htid
);
595 if (tb
[TCA_U32_SEL
-1] == 0 ||
596 RTA_PAYLOAD(tb
[TCA_U32_SEL
-1]) < sizeof(struct tc_u32_sel
))
599 s
= RTA_DATA(tb
[TCA_U32_SEL
-1]);
601 n
= kzalloc(sizeof(*n
) + s
->nkeys
*sizeof(struct tc_u32_key
), GFP_KERNEL
);
605 #ifdef CONFIG_CLS_U32_PERF
606 n
->pf
= kzalloc(sizeof(struct tc_u32_pcnt
) + s
->nkeys
*sizeof(u64
), GFP_KERNEL
);
613 memcpy(&n
->sel
, s
, sizeof(*s
) + s
->nkeys
*sizeof(struct tc_u32_key
));
620 while (!(mask
& 1)) {
628 #ifdef CONFIG_CLS_U32_MARK
629 if (tb
[TCA_U32_MARK
-1]) {
630 struct tc_u32_mark
*mark
;
632 if (RTA_PAYLOAD(tb
[TCA_U32_MARK
-1]) < sizeof(struct tc_u32_mark
)) {
633 #ifdef CONFIG_CLS_U32_PERF
639 mark
= RTA_DATA(tb
[TCA_U32_MARK
-1]);
640 memcpy(&n
->mark
, mark
, sizeof(struct tc_u32_mark
));
645 err
= u32_set_parms(tp
, base
, ht
, n
, tb
, tca
[TCA_RATE
-1]);
647 struct tc_u_knode
**ins
;
648 for (ins
= &ht
->ht
[TC_U32_HASH(handle
)]; *ins
; ins
= &(*ins
)->next
)
649 if (TC_U32_NODE(handle
) < TC_U32_NODE((*ins
)->handle
))
656 *arg
= (unsigned long)n
;
659 #ifdef CONFIG_CLS_U32_PERF
666 static void u32_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
668 struct tc_u_common
*tp_c
= tp
->data
;
669 struct tc_u_hnode
*ht
;
670 struct tc_u_knode
*n
;
676 for (ht
= tp_c
->hlist
; ht
; ht
= ht
->next
) {
677 if (ht
->prio
!= tp
->prio
)
679 if (arg
->count
>= arg
->skip
) {
680 if (arg
->fn(tp
, (unsigned long)ht
, arg
) < 0) {
686 for (h
= 0; h
<= ht
->divisor
; h
++) {
687 for (n
= ht
->ht
[h
]; n
; n
= n
->next
) {
688 if (arg
->count
< arg
->skip
) {
692 if (arg
->fn(tp
, (unsigned long)n
, arg
) < 0) {
702 static int u32_dump(struct tcf_proto
*tp
, unsigned long fh
,
703 struct sk_buff
*skb
, struct tcmsg
*t
)
705 struct tc_u_knode
*n
= (struct tc_u_knode
*)fh
;
706 unsigned char *b
= skb_tail_pointer(skb
);
712 t
->tcm_handle
= n
->handle
;
714 rta
= (struct rtattr
*)b
;
715 RTA_PUT(skb
, TCA_OPTIONS
, 0, NULL
);
717 if (TC_U32_KEY(n
->handle
) == 0) {
718 struct tc_u_hnode
*ht
= (struct tc_u_hnode
*)fh
;
719 u32 divisor
= ht
->divisor
+1;
720 RTA_PUT(skb
, TCA_U32_DIVISOR
, 4, &divisor
);
722 RTA_PUT(skb
, TCA_U32_SEL
,
723 sizeof(n
->sel
) + n
->sel
.nkeys
*sizeof(struct tc_u32_key
),
726 u32 htid
= n
->handle
& 0xFFFFF000;
727 RTA_PUT(skb
, TCA_U32_HASH
, 4, &htid
);
730 RTA_PUT(skb
, TCA_U32_CLASSID
, 4, &n
->res
.classid
);
732 RTA_PUT(skb
, TCA_U32_LINK
, 4, &n
->ht_down
->handle
);
734 #ifdef CONFIG_CLS_U32_MARK
735 if (n
->mark
.val
|| n
->mark
.mask
)
736 RTA_PUT(skb
, TCA_U32_MARK
, sizeof(n
->mark
), &n
->mark
);
739 if (tcf_exts_dump(skb
, &n
->exts
, &u32_ext_map
) < 0)
742 #ifdef CONFIG_NET_CLS_IND
744 RTA_PUT(skb
, TCA_U32_INDEV
, IFNAMSIZ
, n
->indev
);
746 #ifdef CONFIG_CLS_U32_PERF
747 RTA_PUT(skb
, TCA_U32_PCNT
,
748 sizeof(struct tc_u32_pcnt
) + n
->sel
.nkeys
*sizeof(u64
),
753 rta
->rta_len
= skb_tail_pointer(skb
) - b
;
754 if (TC_U32_KEY(n
->handle
))
755 if (tcf_exts_dump_stats(skb
, &n
->exts
, &u32_ext_map
) < 0)
764 static struct tcf_proto_ops cls_u32_ops
= {
767 .classify
= u32_classify
,
769 .destroy
= u32_destroy
,
772 .change
= u32_change
,
773 .delete = u32_delete
,
776 .owner
= THIS_MODULE
,
779 static int __init
init_u32(void)
781 printk("u32 classifier\n");
782 #ifdef CONFIG_CLS_U32_PERF
783 printk(" Performance counters on\n");
785 #ifdef CONFIG_NET_CLS_IND
786 printk(" input device check on \n");
788 #ifdef CONFIG_NET_CLS_ACT
789 printk(" Actions configured \n");
791 return register_tcf_proto_ops(&cls_u32_ops
);
794 static void __exit
exit_u32(void)
796 unregister_tcf_proto_ops(&cls_u32_ops
);
799 module_init(init_u32
)
800 module_exit(exit_u32
)
801 MODULE_LICENSE("GPL");