[B43]: Fix sparse warnings.
[linux/fpc-iii.git] / net / sched / cls_route.c
blob0a8409c1d28ae4d3c4cdeeb346625a8ef01beee1
1 /*
2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/skbuff.h>
18 #include <net/dst.h>
19 #include <net/route.h>
20 #include <net/netlink.h>
21 #include <net/act_api.h>
22 #include <net/pkt_cls.h>
25 1. For now we assume that route tags < 256.
26 It allows to use direct table lookups, instead of hash tables.
27 2. For now we assume that "from TAG" and "fromdev DEV" statements
28 are mutually exclusive.
29 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
32 struct route4_fastmap
34 struct route4_filter *filter;
35 u32 id;
36 int iif;
39 struct route4_head
41 struct route4_fastmap fastmap[16];
42 struct route4_bucket *table[256+1];
45 struct route4_bucket
47 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
48 struct route4_filter *ht[16+16+1];
51 struct route4_filter
53 struct route4_filter *next;
54 u32 id;
55 int iif;
57 struct tcf_result res;
58 struct tcf_exts exts;
59 u32 handle;
60 struct route4_bucket *bkt;
63 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
65 static struct tcf_ext_map route_ext_map = {
66 .police = TCA_ROUTE4_POLICE,
67 .action = TCA_ROUTE4_ACT
70 static __inline__ int route4_fastmap_hash(u32 id, int iif)
72 return id&0xF;
75 static inline
76 void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
78 qdisc_lock_tree(dev);
79 memset(head->fastmap, 0, sizeof(head->fastmap));
80 qdisc_unlock_tree(dev);
83 static inline void
84 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
85 struct route4_filter *f)
87 int h = route4_fastmap_hash(id, iif);
88 head->fastmap[h].id = id;
89 head->fastmap[h].iif = iif;
90 head->fastmap[h].filter = f;
93 static __inline__ int route4_hash_to(u32 id)
95 return id&0xFF;
98 static __inline__ int route4_hash_from(u32 id)
100 return (id>>16)&0xF;
103 static __inline__ int route4_hash_iif(int iif)
105 return 16 + ((iif>>16)&0xF);
108 static __inline__ int route4_hash_wild(void)
110 return 32;
113 #define ROUTE4_APPLY_RESULT() \
115 *res = f->res; \
116 if (tcf_exts_is_available(&f->exts)) { \
117 int r = tcf_exts_exec(skb, &f->exts, res); \
118 if (r < 0) { \
119 dont_cache = 1; \
120 continue; \
122 return r; \
123 } else if (!dont_cache) \
124 route4_set_fastmap(head, id, iif, f); \
125 return 0; \
128 static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
129 struct tcf_result *res)
131 struct route4_head *head = (struct route4_head*)tp->root;
132 struct dst_entry *dst;
133 struct route4_bucket *b;
134 struct route4_filter *f;
135 u32 id, h;
136 int iif, dont_cache = 0;
138 if ((dst = skb->dst) == NULL)
139 goto failure;
141 id = dst->tclassid;
142 if (head == NULL)
143 goto old_method;
145 iif = ((struct rtable*)dst)->fl.iif;
147 h = route4_fastmap_hash(id, iif);
148 if (id == head->fastmap[h].id &&
149 iif == head->fastmap[h].iif &&
150 (f = head->fastmap[h].filter) != NULL) {
151 if (f == ROUTE4_FAILURE)
152 goto failure;
154 *res = f->res;
155 return 0;
158 h = route4_hash_to(id);
160 restart:
161 if ((b = head->table[h]) != NULL) {
162 for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
163 if (f->id == id)
164 ROUTE4_APPLY_RESULT();
166 for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
167 if (f->iif == iif)
168 ROUTE4_APPLY_RESULT();
170 for (f = b->ht[route4_hash_wild()]; f; f = f->next)
171 ROUTE4_APPLY_RESULT();
174 if (h < 256) {
175 h = 256;
176 id &= ~0xFFFF;
177 goto restart;
180 if (!dont_cache)
181 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
182 failure:
183 return -1;
185 old_method:
186 if (id && (TC_H_MAJ(id) == 0 ||
187 !(TC_H_MAJ(id^tp->q->handle)))) {
188 res->classid = id;
189 res->class = 0;
190 return 0;
192 return -1;
195 static inline u32 to_hash(u32 id)
197 u32 h = id&0xFF;
198 if (id&0x8000)
199 h += 256;
200 return h;
203 static inline u32 from_hash(u32 id)
205 id &= 0xFFFF;
206 if (id == 0xFFFF)
207 return 32;
208 if (!(id & 0x8000)) {
209 if (id > 255)
210 return 256;
211 return id&0xF;
213 return 16 + (id&0xF);
216 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
218 struct route4_head *head = (struct route4_head*)tp->root;
219 struct route4_bucket *b;
220 struct route4_filter *f;
221 unsigned h1, h2;
223 if (!head)
224 return 0;
226 h1 = to_hash(handle);
227 if (h1 > 256)
228 return 0;
230 h2 = from_hash(handle>>16);
231 if (h2 > 32)
232 return 0;
234 if ((b = head->table[h1]) != NULL) {
235 for (f = b->ht[h2]; f; f = f->next)
236 if (f->handle == handle)
237 return (unsigned long)f;
239 return 0;
242 static void route4_put(struct tcf_proto *tp, unsigned long f)
246 static int route4_init(struct tcf_proto *tp)
248 return 0;
251 static inline void
252 route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
254 tcf_unbind_filter(tp, &f->res);
255 tcf_exts_destroy(tp, &f->exts);
256 kfree(f);
259 static void route4_destroy(struct tcf_proto *tp)
261 struct route4_head *head = xchg(&tp->root, NULL);
262 int h1, h2;
264 if (head == NULL)
265 return;
267 for (h1=0; h1<=256; h1++) {
268 struct route4_bucket *b;
270 if ((b = head->table[h1]) != NULL) {
271 for (h2=0; h2<=32; h2++) {
272 struct route4_filter *f;
274 while ((f = b->ht[h2]) != NULL) {
275 b->ht[h2] = f->next;
276 route4_delete_filter(tp, f);
279 kfree(b);
282 kfree(head);
285 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
287 struct route4_head *head = (struct route4_head*)tp->root;
288 struct route4_filter **fp, *f = (struct route4_filter*)arg;
289 unsigned h = 0;
290 struct route4_bucket *b;
291 int i;
293 if (!head || !f)
294 return -EINVAL;
296 h = f->handle;
297 b = f->bkt;
299 for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
300 if (*fp == f) {
301 tcf_tree_lock(tp);
302 *fp = f->next;
303 tcf_tree_unlock(tp);
305 route4_reset_fastmap(tp->q->dev, head, f->id);
306 route4_delete_filter(tp, f);
308 /* Strip tree */
310 for (i=0; i<=32; i++)
311 if (b->ht[i])
312 return 0;
314 /* OK, session has no flows */
315 tcf_tree_lock(tp);
316 head->table[to_hash(h)] = NULL;
317 tcf_tree_unlock(tp);
319 kfree(b);
320 return 0;
323 return 0;
326 static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
327 struct route4_filter *f, u32 handle, struct route4_head *head,
328 struct rtattr **tb, struct rtattr *est, int new)
330 int err;
331 u32 id = 0, to = 0, nhandle = 0x8000;
332 struct route4_filter *fp;
333 unsigned int h1;
334 struct route4_bucket *b;
335 struct tcf_exts e;
337 err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
338 if (err < 0)
339 return err;
341 err = -EINVAL;
342 if (tb[TCA_ROUTE4_CLASSID-1])
343 if (RTA_PAYLOAD(tb[TCA_ROUTE4_CLASSID-1]) < sizeof(u32))
344 goto errout;
346 if (tb[TCA_ROUTE4_TO-1]) {
347 if (new && handle & 0x8000)
348 goto errout;
349 if (RTA_PAYLOAD(tb[TCA_ROUTE4_TO-1]) < sizeof(u32))
350 goto errout;
351 to = *(u32*)RTA_DATA(tb[TCA_ROUTE4_TO-1]);
352 if (to > 0xFF)
353 goto errout;
354 nhandle = to;
357 if (tb[TCA_ROUTE4_FROM-1]) {
358 if (tb[TCA_ROUTE4_IIF-1])
359 goto errout;
360 if (RTA_PAYLOAD(tb[TCA_ROUTE4_FROM-1]) < sizeof(u32))
361 goto errout;
362 id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_FROM-1]);
363 if (id > 0xFF)
364 goto errout;
365 nhandle |= id << 16;
366 } else if (tb[TCA_ROUTE4_IIF-1]) {
367 if (RTA_PAYLOAD(tb[TCA_ROUTE4_IIF-1]) < sizeof(u32))
368 goto errout;
369 id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_IIF-1]);
370 if (id > 0x7FFF)
371 goto errout;
372 nhandle |= (id | 0x8000) << 16;
373 } else
374 nhandle |= 0xFFFF << 16;
376 if (handle && new) {
377 nhandle |= handle & 0x7F00;
378 if (nhandle != handle)
379 goto errout;
382 h1 = to_hash(nhandle);
383 if ((b = head->table[h1]) == NULL) {
384 err = -ENOBUFS;
385 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
386 if (b == NULL)
387 goto errout;
389 tcf_tree_lock(tp);
390 head->table[h1] = b;
391 tcf_tree_unlock(tp);
392 } else {
393 unsigned int h2 = from_hash(nhandle >> 16);
394 err = -EEXIST;
395 for (fp = b->ht[h2]; fp; fp = fp->next)
396 if (fp->handle == f->handle)
397 goto errout;
400 tcf_tree_lock(tp);
401 if (tb[TCA_ROUTE4_TO-1])
402 f->id = to;
404 if (tb[TCA_ROUTE4_FROM-1])
405 f->id = to | id<<16;
406 else if (tb[TCA_ROUTE4_IIF-1])
407 f->iif = id;
409 f->handle = nhandle;
410 f->bkt = b;
411 tcf_tree_unlock(tp);
413 if (tb[TCA_ROUTE4_CLASSID-1]) {
414 f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
415 tcf_bind_filter(tp, &f->res, base);
418 tcf_exts_change(tp, &f->exts, &e);
420 return 0;
421 errout:
422 tcf_exts_destroy(tp, &e);
423 return err;
426 static int route4_change(struct tcf_proto *tp, unsigned long base,
427 u32 handle,
428 struct rtattr **tca,
429 unsigned long *arg)
431 struct route4_head *head = tp->root;
432 struct route4_filter *f, *f1, **fp;
433 struct route4_bucket *b;
434 struct rtattr *opt = tca[TCA_OPTIONS-1];
435 struct rtattr *tb[TCA_ROUTE4_MAX];
436 unsigned int h, th;
437 u32 old_handle = 0;
438 int err;
440 if (opt == NULL)
441 return handle ? -EINVAL : 0;
443 if (rtattr_parse_nested(tb, TCA_ROUTE4_MAX, opt) < 0)
444 return -EINVAL;
446 if ((f = (struct route4_filter*)*arg) != NULL) {
447 if (f->handle != handle && handle)
448 return -EINVAL;
450 if (f->bkt)
451 old_handle = f->handle;
453 err = route4_set_parms(tp, base, f, handle, head, tb,
454 tca[TCA_RATE-1], 0);
455 if (err < 0)
456 return err;
458 goto reinsert;
461 err = -ENOBUFS;
462 if (head == NULL) {
463 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
464 if (head == NULL)
465 goto errout;
467 tcf_tree_lock(tp);
468 tp->root = head;
469 tcf_tree_unlock(tp);
472 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
473 if (f == NULL)
474 goto errout;
476 err = route4_set_parms(tp, base, f, handle, head, tb,
477 tca[TCA_RATE-1], 1);
478 if (err < 0)
479 goto errout;
481 reinsert:
482 h = from_hash(f->handle >> 16);
483 for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
484 if (f->handle < f1->handle)
485 break;
487 f->next = f1;
488 tcf_tree_lock(tp);
489 *fp = f;
491 if (old_handle && f->handle != old_handle) {
492 th = to_hash(old_handle);
493 h = from_hash(old_handle >> 16);
494 if ((b = head->table[th]) != NULL) {
495 for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
496 if (*fp == f) {
497 *fp = f->next;
498 break;
503 tcf_tree_unlock(tp);
505 route4_reset_fastmap(tp->q->dev, head, f->id);
506 *arg = (unsigned long)f;
507 return 0;
509 errout:
510 kfree(f);
511 return err;
514 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
516 struct route4_head *head = tp->root;
517 unsigned h, h1;
519 if (head == NULL)
520 arg->stop = 1;
522 if (arg->stop)
523 return;
525 for (h = 0; h <= 256; h++) {
526 struct route4_bucket *b = head->table[h];
528 if (b) {
529 for (h1 = 0; h1 <= 32; h1++) {
530 struct route4_filter *f;
532 for (f = b->ht[h1]; f; f = f->next) {
533 if (arg->count < arg->skip) {
534 arg->count++;
535 continue;
537 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
538 arg->stop = 1;
539 return;
541 arg->count++;
548 static int route4_dump(struct tcf_proto *tp, unsigned long fh,
549 struct sk_buff *skb, struct tcmsg *t)
551 struct route4_filter *f = (struct route4_filter*)fh;
552 unsigned char *b = skb_tail_pointer(skb);
553 struct rtattr *rta;
554 u32 id;
556 if (f == NULL)
557 return skb->len;
559 t->tcm_handle = f->handle;
561 rta = (struct rtattr*)b;
562 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
564 if (!(f->handle&0x8000)) {
565 id = f->id&0xFF;
566 RTA_PUT(skb, TCA_ROUTE4_TO, sizeof(id), &id);
568 if (f->handle&0x80000000) {
569 if ((f->handle>>16) != 0xFFFF)
570 RTA_PUT(skb, TCA_ROUTE4_IIF, sizeof(f->iif), &f->iif);
571 } else {
572 id = f->id>>16;
573 RTA_PUT(skb, TCA_ROUTE4_FROM, sizeof(id), &id);
575 if (f->res.classid)
576 RTA_PUT(skb, TCA_ROUTE4_CLASSID, 4, &f->res.classid);
578 if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
579 goto rtattr_failure;
581 rta->rta_len = skb_tail_pointer(skb) - b;
583 if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
584 goto rtattr_failure;
586 return skb->len;
588 rtattr_failure:
589 nlmsg_trim(skb, b);
590 return -1;
593 static struct tcf_proto_ops cls_route4_ops = {
594 .next = NULL,
595 .kind = "route",
596 .classify = route4_classify,
597 .init = route4_init,
598 .destroy = route4_destroy,
599 .get = route4_get,
600 .put = route4_put,
601 .change = route4_change,
602 .delete = route4_delete,
603 .walk = route4_walk,
604 .dump = route4_dump,
605 .owner = THIS_MODULE,
608 static int __init init_route4(void)
610 return register_tcf_proto_ops(&cls_route4_ops);
613 static void __exit exit_route4(void)
615 unregister_tcf_proto_ops(&cls_route4_ops);
618 module_init(init_route4)
619 module_exit(exit_route4)
620 MODULE_LICENSE("GPL");