2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/skbuff.h>
19 #include <net/route.h>
20 #include <net/netlink.h>
21 #include <net/act_api.h>
22 #include <net/pkt_cls.h>
25 1. For now we assume that route tags < 256.
26 It allows to use direct table lookups, instead of hash tables.
27 2. For now we assume that "from TAG" and "fromdev DEV" statements
28 are mutually exclusive.
29 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
34 struct route4_filter
*filter
;
41 struct route4_fastmap fastmap
[16];
42 struct route4_bucket
*table
[256+1];
47 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
48 struct route4_filter
*ht
[16+16+1];
53 struct route4_filter
*next
;
57 struct tcf_result res
;
60 struct route4_bucket
*bkt
;
63 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
65 static struct tcf_ext_map route_ext_map
= {
66 .police
= TCA_ROUTE4_POLICE
,
67 .action
= TCA_ROUTE4_ACT
70 static __inline__
int route4_fastmap_hash(u32 id
, int iif
)
76 void route4_reset_fastmap(struct net_device
*dev
, struct route4_head
*head
, u32 id
)
79 memset(head
->fastmap
, 0, sizeof(head
->fastmap
));
80 qdisc_unlock_tree(dev
);
84 route4_set_fastmap(struct route4_head
*head
, u32 id
, int iif
,
85 struct route4_filter
*f
)
87 int h
= route4_fastmap_hash(id
, iif
);
88 head
->fastmap
[h
].id
= id
;
89 head
->fastmap
[h
].iif
= iif
;
90 head
->fastmap
[h
].filter
= f
;
93 static __inline__
int route4_hash_to(u32 id
)
98 static __inline__
int route4_hash_from(u32 id
)
103 static __inline__
int route4_hash_iif(int iif
)
105 return 16 + ((iif
>>16)&0xF);
108 static __inline__
int route4_hash_wild(void)
113 #define ROUTE4_APPLY_RESULT() \
116 if (tcf_exts_is_available(&f->exts)) { \
117 int r = tcf_exts_exec(skb, &f->exts, res); \
123 } else if (!dont_cache) \
124 route4_set_fastmap(head, id, iif, f); \
128 static int route4_classify(struct sk_buff
*skb
, struct tcf_proto
*tp
,
129 struct tcf_result
*res
)
131 struct route4_head
*head
= (struct route4_head
*)tp
->root
;
132 struct dst_entry
*dst
;
133 struct route4_bucket
*b
;
134 struct route4_filter
*f
;
136 int iif
, dont_cache
= 0;
138 if ((dst
= skb
->dst
) == NULL
)
145 iif
= ((struct rtable
*)dst
)->fl
.iif
;
147 h
= route4_fastmap_hash(id
, iif
);
148 if (id
== head
->fastmap
[h
].id
&&
149 iif
== head
->fastmap
[h
].iif
&&
150 (f
= head
->fastmap
[h
].filter
) != NULL
) {
151 if (f
== ROUTE4_FAILURE
)
158 h
= route4_hash_to(id
);
161 if ((b
= head
->table
[h
]) != NULL
) {
162 for (f
= b
->ht
[route4_hash_from(id
)]; f
; f
= f
->next
)
164 ROUTE4_APPLY_RESULT();
166 for (f
= b
->ht
[route4_hash_iif(iif
)]; f
; f
= f
->next
)
168 ROUTE4_APPLY_RESULT();
170 for (f
= b
->ht
[route4_hash_wild()]; f
; f
= f
->next
)
171 ROUTE4_APPLY_RESULT();
181 route4_set_fastmap(head
, id
, iif
, ROUTE4_FAILURE
);
186 if (id
&& (TC_H_MAJ(id
) == 0 ||
187 !(TC_H_MAJ(id
^tp
->q
->handle
)))) {
195 static inline u32
to_hash(u32 id
)
203 static inline u32
from_hash(u32 id
)
208 if (!(id
& 0x8000)) {
213 return 16 + (id
&0xF);
216 static unsigned long route4_get(struct tcf_proto
*tp
, u32 handle
)
218 struct route4_head
*head
= (struct route4_head
*)tp
->root
;
219 struct route4_bucket
*b
;
220 struct route4_filter
*f
;
226 h1
= to_hash(handle
);
230 h2
= from_hash(handle
>>16);
234 if ((b
= head
->table
[h1
]) != NULL
) {
235 for (f
= b
->ht
[h2
]; f
; f
= f
->next
)
236 if (f
->handle
== handle
)
237 return (unsigned long)f
;
242 static void route4_put(struct tcf_proto
*tp
, unsigned long f
)
246 static int route4_init(struct tcf_proto
*tp
)
252 route4_delete_filter(struct tcf_proto
*tp
, struct route4_filter
*f
)
254 tcf_unbind_filter(tp
, &f
->res
);
255 tcf_exts_destroy(tp
, &f
->exts
);
259 static void route4_destroy(struct tcf_proto
*tp
)
261 struct route4_head
*head
= xchg(&tp
->root
, NULL
);
267 for (h1
=0; h1
<=256; h1
++) {
268 struct route4_bucket
*b
;
270 if ((b
= head
->table
[h1
]) != NULL
) {
271 for (h2
=0; h2
<=32; h2
++) {
272 struct route4_filter
*f
;
274 while ((f
= b
->ht
[h2
]) != NULL
) {
276 route4_delete_filter(tp
, f
);
285 static int route4_delete(struct tcf_proto
*tp
, unsigned long arg
)
287 struct route4_head
*head
= (struct route4_head
*)tp
->root
;
288 struct route4_filter
**fp
, *f
= (struct route4_filter
*)arg
;
290 struct route4_bucket
*b
;
299 for (fp
= &b
->ht
[from_hash(h
>>16)]; *fp
; fp
= &(*fp
)->next
) {
305 route4_reset_fastmap(tp
->q
->dev
, head
, f
->id
);
306 route4_delete_filter(tp
, f
);
310 for (i
=0; i
<=32; i
++)
314 /* OK, session has no flows */
316 head
->table
[to_hash(h
)] = NULL
;
326 static int route4_set_parms(struct tcf_proto
*tp
, unsigned long base
,
327 struct route4_filter
*f
, u32 handle
, struct route4_head
*head
,
328 struct rtattr
**tb
, struct rtattr
*est
, int new)
331 u32 id
= 0, to
= 0, nhandle
= 0x8000;
332 struct route4_filter
*fp
;
334 struct route4_bucket
*b
;
337 err
= tcf_exts_validate(tp
, tb
, est
, &e
, &route_ext_map
);
342 if (tb
[TCA_ROUTE4_CLASSID
-1])
343 if (RTA_PAYLOAD(tb
[TCA_ROUTE4_CLASSID
-1]) < sizeof(u32
))
346 if (tb
[TCA_ROUTE4_TO
-1]) {
347 if (new && handle
& 0x8000)
349 if (RTA_PAYLOAD(tb
[TCA_ROUTE4_TO
-1]) < sizeof(u32
))
351 to
= *(u32
*)RTA_DATA(tb
[TCA_ROUTE4_TO
-1]);
357 if (tb
[TCA_ROUTE4_FROM
-1]) {
358 if (tb
[TCA_ROUTE4_IIF
-1])
360 if (RTA_PAYLOAD(tb
[TCA_ROUTE4_FROM
-1]) < sizeof(u32
))
362 id
= *(u32
*)RTA_DATA(tb
[TCA_ROUTE4_FROM
-1]);
366 } else if (tb
[TCA_ROUTE4_IIF
-1]) {
367 if (RTA_PAYLOAD(tb
[TCA_ROUTE4_IIF
-1]) < sizeof(u32
))
369 id
= *(u32
*)RTA_DATA(tb
[TCA_ROUTE4_IIF
-1]);
372 nhandle
|= (id
| 0x8000) << 16;
374 nhandle
|= 0xFFFF << 16;
377 nhandle
|= handle
& 0x7F00;
378 if (nhandle
!= handle
)
382 h1
= to_hash(nhandle
);
383 if ((b
= head
->table
[h1
]) == NULL
) {
385 b
= kzalloc(sizeof(struct route4_bucket
), GFP_KERNEL
);
393 unsigned int h2
= from_hash(nhandle
>> 16);
395 for (fp
= b
->ht
[h2
]; fp
; fp
= fp
->next
)
396 if (fp
->handle
== f
->handle
)
401 if (tb
[TCA_ROUTE4_TO
-1])
404 if (tb
[TCA_ROUTE4_FROM
-1])
406 else if (tb
[TCA_ROUTE4_IIF
-1])
413 if (tb
[TCA_ROUTE4_CLASSID
-1]) {
414 f
->res
.classid
= *(u32
*)RTA_DATA(tb
[TCA_ROUTE4_CLASSID
-1]);
415 tcf_bind_filter(tp
, &f
->res
, base
);
418 tcf_exts_change(tp
, &f
->exts
, &e
);
422 tcf_exts_destroy(tp
, &e
);
426 static int route4_change(struct tcf_proto
*tp
, unsigned long base
,
431 struct route4_head
*head
= tp
->root
;
432 struct route4_filter
*f
, *f1
, **fp
;
433 struct route4_bucket
*b
;
434 struct rtattr
*opt
= tca
[TCA_OPTIONS
-1];
435 struct rtattr
*tb
[TCA_ROUTE4_MAX
];
441 return handle
? -EINVAL
: 0;
443 if (rtattr_parse_nested(tb
, TCA_ROUTE4_MAX
, opt
) < 0)
446 if ((f
= (struct route4_filter
*)*arg
) != NULL
) {
447 if (f
->handle
!= handle
&& handle
)
451 old_handle
= f
->handle
;
453 err
= route4_set_parms(tp
, base
, f
, handle
, head
, tb
,
463 head
= kzalloc(sizeof(struct route4_head
), GFP_KERNEL
);
472 f
= kzalloc(sizeof(struct route4_filter
), GFP_KERNEL
);
476 err
= route4_set_parms(tp
, base
, f
, handle
, head
, tb
,
482 h
= from_hash(f
->handle
>> 16);
483 for (fp
= &f
->bkt
->ht
[h
]; (f1
=*fp
) != NULL
; fp
= &f1
->next
)
484 if (f
->handle
< f1
->handle
)
491 if (old_handle
&& f
->handle
!= old_handle
) {
492 th
= to_hash(old_handle
);
493 h
= from_hash(old_handle
>> 16);
494 if ((b
= head
->table
[th
]) != NULL
) {
495 for (fp
= &b
->ht
[h
]; *fp
; fp
= &(*fp
)->next
) {
505 route4_reset_fastmap(tp
->q
->dev
, head
, f
->id
);
506 *arg
= (unsigned long)f
;
514 static void route4_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
516 struct route4_head
*head
= tp
->root
;
525 for (h
= 0; h
<= 256; h
++) {
526 struct route4_bucket
*b
= head
->table
[h
];
529 for (h1
= 0; h1
<= 32; h1
++) {
530 struct route4_filter
*f
;
532 for (f
= b
->ht
[h1
]; f
; f
= f
->next
) {
533 if (arg
->count
< arg
->skip
) {
537 if (arg
->fn(tp
, (unsigned long)f
, arg
) < 0) {
548 static int route4_dump(struct tcf_proto
*tp
, unsigned long fh
,
549 struct sk_buff
*skb
, struct tcmsg
*t
)
551 struct route4_filter
*f
= (struct route4_filter
*)fh
;
552 unsigned char *b
= skb_tail_pointer(skb
);
559 t
->tcm_handle
= f
->handle
;
561 rta
= (struct rtattr
*)b
;
562 RTA_PUT(skb
, TCA_OPTIONS
, 0, NULL
);
564 if (!(f
->handle
&0x8000)) {
566 RTA_PUT(skb
, TCA_ROUTE4_TO
, sizeof(id
), &id
);
568 if (f
->handle
&0x80000000) {
569 if ((f
->handle
>>16) != 0xFFFF)
570 RTA_PUT(skb
, TCA_ROUTE4_IIF
, sizeof(f
->iif
), &f
->iif
);
573 RTA_PUT(skb
, TCA_ROUTE4_FROM
, sizeof(id
), &id
);
576 RTA_PUT(skb
, TCA_ROUTE4_CLASSID
, 4, &f
->res
.classid
);
578 if (tcf_exts_dump(skb
, &f
->exts
, &route_ext_map
) < 0)
581 rta
->rta_len
= skb_tail_pointer(skb
) - b
;
583 if (tcf_exts_dump_stats(skb
, &f
->exts
, &route_ext_map
) < 0)
593 static struct tcf_proto_ops cls_route4_ops
= {
596 .classify
= route4_classify
,
598 .destroy
= route4_destroy
,
601 .change
= route4_change
,
602 .delete = route4_delete
,
605 .owner
= THIS_MODULE
,
608 static int __init
init_route4(void)
610 return register_tcf_proto_ops(&cls_route4_ops
);
613 static void __exit
exit_route4(void)
615 unregister_tcf_proto_ops(&cls_route4_ops
);
618 module_init(init_route4
)
619 module_exit(exit_route4
)
620 MODULE_LICENSE("GPL");