2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
20 #include <net/route.h>
21 #include <net/netlink.h>
22 #include <net/act_api.h>
23 #include <net/pkt_cls.h>
26 * 1. For now we assume that route tags < 256.
27 * It allows to use direct table lookups, instead of hash tables.
28 * 2. For now we assume that "from TAG" and "fromdev DEV" statements
29 * are mutually exclusive.
30 * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
33 struct route4_fastmap
{
34 struct route4_filter
*filter
;
40 struct route4_fastmap fastmap
[16];
41 struct route4_bucket
*table
[256 + 1];
44 struct route4_bucket
{
45 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
46 struct route4_filter
*ht
[16 + 16 + 1];
49 struct route4_filter
{
50 struct route4_filter
*next
;
54 struct tcf_result res
;
57 struct route4_bucket
*bkt
;
60 #define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
62 static inline int route4_fastmap_hash(u32 id
, int iif
)
68 route4_reset_fastmap(struct Qdisc
*q
, struct route4_head
*head
, u32 id
)
70 spinlock_t
*root_lock
= qdisc_root_sleeping_lock(q
);
72 spin_lock_bh(root_lock
);
73 memset(head
->fastmap
, 0, sizeof(head
->fastmap
));
74 spin_unlock_bh(root_lock
);
78 route4_set_fastmap(struct route4_head
*head
, u32 id
, int iif
,
79 struct route4_filter
*f
)
81 int h
= route4_fastmap_hash(id
, iif
);
83 head
->fastmap
[h
].id
= id
;
84 head
->fastmap
[h
].iif
= iif
;
85 head
->fastmap
[h
].filter
= f
;
88 static inline int route4_hash_to(u32 id
)
93 static inline int route4_hash_from(u32 id
)
95 return (id
>> 16) & 0xF;
98 static inline int route4_hash_iif(int iif
)
100 return 16 + ((iif
>> 16) & 0xF);
103 static inline int route4_hash_wild(void)
108 #define ROUTE4_APPLY_RESULT() \
111 if (tcf_exts_is_available(&f->exts)) { \
112 int r = tcf_exts_exec(skb, &f->exts, res); \
118 } else if (!dont_cache) \
119 route4_set_fastmap(head, id, iif, f); \
123 static int route4_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
124 struct tcf_result
*res
)
126 struct route4_head
*head
= tp
->root
;
127 struct dst_entry
*dst
;
128 struct route4_bucket
*b
;
129 struct route4_filter
*f
;
131 int iif
, dont_cache
= 0;
143 h
= route4_fastmap_hash(id
, iif
);
144 if (id
== head
->fastmap
[h
].id
&&
145 iif
== head
->fastmap
[h
].iif
&&
146 (f
= head
->fastmap
[h
].filter
) != NULL
) {
147 if (f
== ROUTE4_FAILURE
)
154 h
= route4_hash_to(id
);
159 for (f
= b
->ht
[route4_hash_from(id
)]; f
; f
= f
->next
)
161 ROUTE4_APPLY_RESULT();
163 for (f
= b
->ht
[route4_hash_iif(iif
)]; f
; f
= f
->next
)
165 ROUTE4_APPLY_RESULT();
167 for (f
= b
->ht
[route4_hash_wild()]; f
; f
= f
->next
)
168 ROUTE4_APPLY_RESULT();
178 route4_set_fastmap(head
, id
, iif
, ROUTE4_FAILURE
);
183 if (id
&& (TC_H_MAJ(id
) == 0 ||
184 !(TC_H_MAJ(id
^tp
->q
->handle
)))) {
192 static inline u32
to_hash(u32 id
)
201 static inline u32
from_hash(u32 id
)
206 if (!(id
& 0x8000)) {
211 return 16 + (id
& 0xF);
214 static unsigned long route4_get(struct tcf_proto
*tp
, u32 handle
)
216 struct route4_head
*head
= tp
->root
;
217 struct route4_bucket
*b
;
218 struct route4_filter
*f
;
224 h1
= to_hash(handle
);
228 h2
= from_hash(handle
>> 16);
234 for (f
= b
->ht
[h2
]; f
; f
= f
->next
)
235 if (f
->handle
== handle
)
236 return (unsigned long)f
;
241 static void route4_put(struct tcf_proto
*tp
, unsigned long f
)
245 static int route4_init(struct tcf_proto
*tp
)
251 route4_delete_filter(struct tcf_proto
*tp
, struct route4_filter
*f
)
253 tcf_unbind_filter(tp
, &f
->res
);
254 tcf_exts_destroy(tp
, &f
->exts
);
258 static void route4_destroy(struct tcf_proto
*tp
)
260 struct route4_head
*head
= tp
->root
;
266 for (h1
= 0; h1
<= 256; h1
++) {
267 struct route4_bucket
*b
;
271 for (h2
= 0; h2
<= 32; h2
++) {
272 struct route4_filter
*f
;
274 while ((f
= b
->ht
[h2
]) != NULL
) {
276 route4_delete_filter(tp
, f
);
285 static int route4_delete(struct tcf_proto
*tp
, unsigned long arg
)
287 struct route4_head
*head
= tp
->root
;
288 struct route4_filter
**fp
, *f
= (struct route4_filter
*)arg
;
290 struct route4_bucket
*b
;
299 for (fp
= &b
->ht
[from_hash(h
>> 16)]; *fp
; fp
= &(*fp
)->next
) {
305 route4_reset_fastmap(tp
->q
, head
, f
->id
);
306 route4_delete_filter(tp
, f
);
310 for (i
= 0; i
<= 32; i
++)
314 /* OK, session has no flows */
316 head
->table
[to_hash(h
)] = NULL
;
326 static const struct nla_policy route4_policy
[TCA_ROUTE4_MAX
+ 1] = {
327 [TCA_ROUTE4_CLASSID
] = { .type
= NLA_U32
},
328 [TCA_ROUTE4_TO
] = { .type
= NLA_U32
},
329 [TCA_ROUTE4_FROM
] = { .type
= NLA_U32
},
330 [TCA_ROUTE4_IIF
] = { .type
= NLA_U32
},
333 static int route4_set_parms(struct net
*net
, struct tcf_proto
*tp
,
334 unsigned long base
, struct route4_filter
*f
,
335 u32 handle
, struct route4_head
*head
,
336 struct nlattr
**tb
, struct nlattr
*est
, int new)
339 u32 id
= 0, to
= 0, nhandle
= 0x8000;
340 struct route4_filter
*fp
;
342 struct route4_bucket
*b
;
345 tcf_exts_init(&e
, TCA_ROUTE4_ACT
, TCA_ROUTE4_POLICE
);
346 err
= tcf_exts_validate(net
, tp
, tb
, est
, &e
);
351 if (tb
[TCA_ROUTE4_TO
]) {
352 if (new && handle
& 0x8000)
354 to
= nla_get_u32(tb
[TCA_ROUTE4_TO
]);
360 if (tb
[TCA_ROUTE4_FROM
]) {
361 if (tb
[TCA_ROUTE4_IIF
])
363 id
= nla_get_u32(tb
[TCA_ROUTE4_FROM
]);
367 } else if (tb
[TCA_ROUTE4_IIF
]) {
368 id
= nla_get_u32(tb
[TCA_ROUTE4_IIF
]);
371 nhandle
|= (id
| 0x8000) << 16;
373 nhandle
|= 0xFFFF << 16;
376 nhandle
|= handle
& 0x7F00;
377 if (nhandle
!= handle
)
381 h1
= to_hash(nhandle
);
385 b
= kzalloc(sizeof(struct route4_bucket
), GFP_KERNEL
);
393 unsigned int h2
= from_hash(nhandle
>> 16);
396 for (fp
= b
->ht
[h2
]; fp
; fp
= fp
->next
)
397 if (fp
->handle
== f
->handle
)
402 if (tb
[TCA_ROUTE4_TO
])
405 if (tb
[TCA_ROUTE4_FROM
])
407 else if (tb
[TCA_ROUTE4_IIF
])
414 if (tb
[TCA_ROUTE4_CLASSID
]) {
415 f
->res
.classid
= nla_get_u32(tb
[TCA_ROUTE4_CLASSID
]);
416 tcf_bind_filter(tp
, &f
->res
, base
);
419 tcf_exts_change(tp
, &f
->exts
, &e
);
423 tcf_exts_destroy(tp
, &e
);
427 static int route4_change(struct net
*net
, struct sk_buff
*in_skb
,
428 struct tcf_proto
*tp
, unsigned long base
,
433 struct route4_head
*head
= tp
->root
;
434 struct route4_filter
*f
, *f1
, **fp
;
435 struct route4_bucket
*b
;
436 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
437 struct nlattr
*tb
[TCA_ROUTE4_MAX
+ 1];
443 return handle
? -EINVAL
: 0;
445 err
= nla_parse_nested(tb
, TCA_ROUTE4_MAX
, opt
, route4_policy
);
449 f
= (struct route4_filter
*)*arg
;
451 if (f
->handle
!= handle
&& handle
)
455 old_handle
= f
->handle
;
457 err
= route4_set_parms(net
, tp
, base
, f
, handle
, head
, tb
,
467 head
= kzalloc(sizeof(struct route4_head
), GFP_KERNEL
);
476 f
= kzalloc(sizeof(struct route4_filter
), GFP_KERNEL
);
480 tcf_exts_init(&f
->exts
, TCA_ROUTE4_ACT
, TCA_ROUTE4_POLICE
);
481 err
= route4_set_parms(net
, tp
, base
, f
, handle
, head
, tb
,
487 h
= from_hash(f
->handle
>> 16);
488 for (fp
= &f
->bkt
->ht
[h
]; (f1
= *fp
) != NULL
; fp
= &f1
->next
)
489 if (f
->handle
< f1
->handle
)
496 if (old_handle
&& f
->handle
!= old_handle
) {
497 th
= to_hash(old_handle
);
498 h
= from_hash(old_handle
>> 16);
501 for (fp
= &b
->ht
[h
]; *fp
; fp
= &(*fp
)->next
) {
511 route4_reset_fastmap(tp
->q
, head
, f
->id
);
512 *arg
= (unsigned long)f
;
520 static void route4_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
522 struct route4_head
*head
= tp
->root
;
531 for (h
= 0; h
<= 256; h
++) {
532 struct route4_bucket
*b
= head
->table
[h
];
535 for (h1
= 0; h1
<= 32; h1
++) {
536 struct route4_filter
*f
;
538 for (f
= b
->ht
[h1
]; f
; f
= f
->next
) {
539 if (arg
->count
< arg
->skip
) {
543 if (arg
->fn(tp
, (unsigned long)f
, arg
) < 0) {
554 static int route4_dump(struct net
*net
, struct tcf_proto
*tp
, unsigned long fh
,
555 struct sk_buff
*skb
, struct tcmsg
*t
)
557 struct route4_filter
*f
= (struct route4_filter
*)fh
;
558 unsigned char *b
= skb_tail_pointer(skb
);
565 t
->tcm_handle
= f
->handle
;
567 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
569 goto nla_put_failure
;
571 if (!(f
->handle
& 0x8000)) {
573 if (nla_put_u32(skb
, TCA_ROUTE4_TO
, id
))
574 goto nla_put_failure
;
576 if (f
->handle
& 0x80000000) {
577 if ((f
->handle
>> 16) != 0xFFFF &&
578 nla_put_u32(skb
, TCA_ROUTE4_IIF
, f
->iif
))
579 goto nla_put_failure
;
582 if (nla_put_u32(skb
, TCA_ROUTE4_FROM
, id
))
583 goto nla_put_failure
;
585 if (f
->res
.classid
&&
586 nla_put_u32(skb
, TCA_ROUTE4_CLASSID
, f
->res
.classid
))
587 goto nla_put_failure
;
589 if (tcf_exts_dump(skb
, &f
->exts
) < 0)
590 goto nla_put_failure
;
592 nla_nest_end(skb
, nest
);
594 if (tcf_exts_dump_stats(skb
, &f
->exts
) < 0)
595 goto nla_put_failure
;
604 static struct tcf_proto_ops cls_route4_ops __read_mostly
= {
606 .classify
= route4_classify
,
608 .destroy
= route4_destroy
,
611 .change
= route4_change
,
612 .delete = route4_delete
,
615 .owner
= THIS_MODULE
,
618 static int __init
init_route4(void)
620 return register_tcf_proto_ops(&cls_route4_ops
);
623 static void __exit
exit_route4(void)
625 unregister_tcf_proto_ops(&cls_route4_ops
);
628 module_init(init_route4
)
629 module_exit(exit_route4
)
630 MODULE_LICENSE("GPL");