Save sram context after changing MPU, DSP or core clocks
[linux-ginger.git] / net / sched / cls_route.c
blobdd872d5383efe371fa4333dcc1af724039b047b5
1 /*
2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/skbuff.h>
18 #include <net/dst.h>
19 #include <net/route.h>
20 #include <net/netlink.h>
21 #include <net/act_api.h>
22 #include <net/pkt_cls.h>
25 1. For now we assume that route tags < 256.
26 It allows to use direct table lookups, instead of hash tables.
27 2. For now we assume that "from TAG" and "fromdev DEV" statements
28 are mutually exclusive.
29 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
32 struct route4_fastmap
34 struct route4_filter *filter;
35 u32 id;
36 int iif;
39 struct route4_head
41 struct route4_fastmap fastmap[16];
42 struct route4_bucket *table[256+1];
45 struct route4_bucket
47 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
48 struct route4_filter *ht[16+16+1];
51 struct route4_filter
53 struct route4_filter *next;
54 u32 id;
55 int iif;
57 struct tcf_result res;
58 struct tcf_exts exts;
59 u32 handle;
60 struct route4_bucket *bkt;
63 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
65 static const struct tcf_ext_map route_ext_map = {
66 .police = TCA_ROUTE4_POLICE,
67 .action = TCA_ROUTE4_ACT
70 static __inline__ int route4_fastmap_hash(u32 id, int iif)
72 return id&0xF;
75 static inline
76 void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
78 spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
80 spin_lock_bh(root_lock);
81 memset(head->fastmap, 0, sizeof(head->fastmap));
82 spin_unlock_bh(root_lock);
85 static inline void
86 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
87 struct route4_filter *f)
89 int h = route4_fastmap_hash(id, iif);
90 head->fastmap[h].id = id;
91 head->fastmap[h].iif = iif;
92 head->fastmap[h].filter = f;
95 static __inline__ int route4_hash_to(u32 id)
97 return id&0xFF;
100 static __inline__ int route4_hash_from(u32 id)
102 return (id>>16)&0xF;
105 static __inline__ int route4_hash_iif(int iif)
107 return 16 + ((iif>>16)&0xF);
110 static __inline__ int route4_hash_wild(void)
112 return 32;
115 #define ROUTE4_APPLY_RESULT() \
117 *res = f->res; \
118 if (tcf_exts_is_available(&f->exts)) { \
119 int r = tcf_exts_exec(skb, &f->exts, res); \
120 if (r < 0) { \
121 dont_cache = 1; \
122 continue; \
124 return r; \
125 } else if (!dont_cache) \
126 route4_set_fastmap(head, id, iif, f); \
127 return 0; \
130 static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
131 struct tcf_result *res)
133 struct route4_head *head = (struct route4_head*)tp->root;
134 struct dst_entry *dst;
135 struct route4_bucket *b;
136 struct route4_filter *f;
137 u32 id, h;
138 int iif, dont_cache = 0;
140 if ((dst = skb_dst(skb)) == NULL)
141 goto failure;
143 id = dst->tclassid;
144 if (head == NULL)
145 goto old_method;
147 iif = ((struct rtable*)dst)->fl.iif;
149 h = route4_fastmap_hash(id, iif);
150 if (id == head->fastmap[h].id &&
151 iif == head->fastmap[h].iif &&
152 (f = head->fastmap[h].filter) != NULL) {
153 if (f == ROUTE4_FAILURE)
154 goto failure;
156 *res = f->res;
157 return 0;
160 h = route4_hash_to(id);
162 restart:
163 if ((b = head->table[h]) != NULL) {
164 for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
165 if (f->id == id)
166 ROUTE4_APPLY_RESULT();
168 for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
169 if (f->iif == iif)
170 ROUTE4_APPLY_RESULT();
172 for (f = b->ht[route4_hash_wild()]; f; f = f->next)
173 ROUTE4_APPLY_RESULT();
176 if (h < 256) {
177 h = 256;
178 id &= ~0xFFFF;
179 goto restart;
182 if (!dont_cache)
183 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
184 failure:
185 return -1;
187 old_method:
188 if (id && (TC_H_MAJ(id) == 0 ||
189 !(TC_H_MAJ(id^tp->q->handle)))) {
190 res->classid = id;
191 res->class = 0;
192 return 0;
194 return -1;
197 static inline u32 to_hash(u32 id)
199 u32 h = id&0xFF;
200 if (id&0x8000)
201 h += 256;
202 return h;
205 static inline u32 from_hash(u32 id)
207 id &= 0xFFFF;
208 if (id == 0xFFFF)
209 return 32;
210 if (!(id & 0x8000)) {
211 if (id > 255)
212 return 256;
213 return id&0xF;
215 return 16 + (id&0xF);
218 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
220 struct route4_head *head = (struct route4_head*)tp->root;
221 struct route4_bucket *b;
222 struct route4_filter *f;
223 unsigned h1, h2;
225 if (!head)
226 return 0;
228 h1 = to_hash(handle);
229 if (h1 > 256)
230 return 0;
232 h2 = from_hash(handle>>16);
233 if (h2 > 32)
234 return 0;
236 if ((b = head->table[h1]) != NULL) {
237 for (f = b->ht[h2]; f; f = f->next)
238 if (f->handle == handle)
239 return (unsigned long)f;
241 return 0;
244 static void route4_put(struct tcf_proto *tp, unsigned long f)
248 static int route4_init(struct tcf_proto *tp)
250 return 0;
253 static inline void
254 route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
256 tcf_unbind_filter(tp, &f->res);
257 tcf_exts_destroy(tp, &f->exts);
258 kfree(f);
261 static void route4_destroy(struct tcf_proto *tp)
263 struct route4_head *head = tp->root;
264 int h1, h2;
266 if (head == NULL)
267 return;
269 for (h1=0; h1<=256; h1++) {
270 struct route4_bucket *b;
272 if ((b = head->table[h1]) != NULL) {
273 for (h2=0; h2<=32; h2++) {
274 struct route4_filter *f;
276 while ((f = b->ht[h2]) != NULL) {
277 b->ht[h2] = f->next;
278 route4_delete_filter(tp, f);
281 kfree(b);
284 kfree(head);
287 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
289 struct route4_head *head = (struct route4_head*)tp->root;
290 struct route4_filter **fp, *f = (struct route4_filter*)arg;
291 unsigned h = 0;
292 struct route4_bucket *b;
293 int i;
295 if (!head || !f)
296 return -EINVAL;
298 h = f->handle;
299 b = f->bkt;
301 for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
302 if (*fp == f) {
303 tcf_tree_lock(tp);
304 *fp = f->next;
305 tcf_tree_unlock(tp);
307 route4_reset_fastmap(tp->q, head, f->id);
308 route4_delete_filter(tp, f);
310 /* Strip tree */
312 for (i=0; i<=32; i++)
313 if (b->ht[i])
314 return 0;
316 /* OK, session has no flows */
317 tcf_tree_lock(tp);
318 head->table[to_hash(h)] = NULL;
319 tcf_tree_unlock(tp);
321 kfree(b);
322 return 0;
325 return 0;
328 static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
329 [TCA_ROUTE4_CLASSID] = { .type = NLA_U32 },
330 [TCA_ROUTE4_TO] = { .type = NLA_U32 },
331 [TCA_ROUTE4_FROM] = { .type = NLA_U32 },
332 [TCA_ROUTE4_IIF] = { .type = NLA_U32 },
335 static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
336 struct route4_filter *f, u32 handle, struct route4_head *head,
337 struct nlattr **tb, struct nlattr *est, int new)
339 int err;
340 u32 id = 0, to = 0, nhandle = 0x8000;
341 struct route4_filter *fp;
342 unsigned int h1;
343 struct route4_bucket *b;
344 struct tcf_exts e;
346 err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
347 if (err < 0)
348 return err;
350 err = -EINVAL;
351 if (tb[TCA_ROUTE4_TO]) {
352 if (new && handle & 0x8000)
353 goto errout;
354 to = nla_get_u32(tb[TCA_ROUTE4_TO]);
355 if (to > 0xFF)
356 goto errout;
357 nhandle = to;
360 if (tb[TCA_ROUTE4_FROM]) {
361 if (tb[TCA_ROUTE4_IIF])
362 goto errout;
363 id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
364 if (id > 0xFF)
365 goto errout;
366 nhandle |= id << 16;
367 } else if (tb[TCA_ROUTE4_IIF]) {
368 id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
369 if (id > 0x7FFF)
370 goto errout;
371 nhandle |= (id | 0x8000) << 16;
372 } else
373 nhandle |= 0xFFFF << 16;
375 if (handle && new) {
376 nhandle |= handle & 0x7F00;
377 if (nhandle != handle)
378 goto errout;
381 h1 = to_hash(nhandle);
382 if ((b = head->table[h1]) == NULL) {
383 err = -ENOBUFS;
384 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
385 if (b == NULL)
386 goto errout;
388 tcf_tree_lock(tp);
389 head->table[h1] = b;
390 tcf_tree_unlock(tp);
391 } else {
392 unsigned int h2 = from_hash(nhandle >> 16);
393 err = -EEXIST;
394 for (fp = b->ht[h2]; fp; fp = fp->next)
395 if (fp->handle == f->handle)
396 goto errout;
399 tcf_tree_lock(tp);
400 if (tb[TCA_ROUTE4_TO])
401 f->id = to;
403 if (tb[TCA_ROUTE4_FROM])
404 f->id = to | id<<16;
405 else if (tb[TCA_ROUTE4_IIF])
406 f->iif = id;
408 f->handle = nhandle;
409 f->bkt = b;
410 tcf_tree_unlock(tp);
412 if (tb[TCA_ROUTE4_CLASSID]) {
413 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
414 tcf_bind_filter(tp, &f->res, base);
417 tcf_exts_change(tp, &f->exts, &e);
419 return 0;
420 errout:
421 tcf_exts_destroy(tp, &e);
422 return err;
425 static int route4_change(struct tcf_proto *tp, unsigned long base,
426 u32 handle,
427 struct nlattr **tca,
428 unsigned long *arg)
430 struct route4_head *head = tp->root;
431 struct route4_filter *f, *f1, **fp;
432 struct route4_bucket *b;
433 struct nlattr *opt = tca[TCA_OPTIONS];
434 struct nlattr *tb[TCA_ROUTE4_MAX + 1];
435 unsigned int h, th;
436 u32 old_handle = 0;
437 int err;
439 if (opt == NULL)
440 return handle ? -EINVAL : 0;
442 err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy);
443 if (err < 0)
444 return err;
446 if ((f = (struct route4_filter*)*arg) != NULL) {
447 if (f->handle != handle && handle)
448 return -EINVAL;
450 if (f->bkt)
451 old_handle = f->handle;
453 err = route4_set_parms(tp, base, f, handle, head, tb,
454 tca[TCA_RATE], 0);
455 if (err < 0)
456 return err;
458 goto reinsert;
461 err = -ENOBUFS;
462 if (head == NULL) {
463 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
464 if (head == NULL)
465 goto errout;
467 tcf_tree_lock(tp);
468 tp->root = head;
469 tcf_tree_unlock(tp);
472 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
473 if (f == NULL)
474 goto errout;
476 err = route4_set_parms(tp, base, f, handle, head, tb,
477 tca[TCA_RATE], 1);
478 if (err < 0)
479 goto errout;
481 reinsert:
482 h = from_hash(f->handle >> 16);
483 for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
484 if (f->handle < f1->handle)
485 break;
487 f->next = f1;
488 tcf_tree_lock(tp);
489 *fp = f;
491 if (old_handle && f->handle != old_handle) {
492 th = to_hash(old_handle);
493 h = from_hash(old_handle >> 16);
494 if ((b = head->table[th]) != NULL) {
495 for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
496 if (*fp == f) {
497 *fp = f->next;
498 break;
503 tcf_tree_unlock(tp);
505 route4_reset_fastmap(tp->q, head, f->id);
506 *arg = (unsigned long)f;
507 return 0;
509 errout:
510 kfree(f);
511 return err;
514 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
516 struct route4_head *head = tp->root;
517 unsigned h, h1;
519 if (head == NULL)
520 arg->stop = 1;
522 if (arg->stop)
523 return;
525 for (h = 0; h <= 256; h++) {
526 struct route4_bucket *b = head->table[h];
528 if (b) {
529 for (h1 = 0; h1 <= 32; h1++) {
530 struct route4_filter *f;
532 for (f = b->ht[h1]; f; f = f->next) {
533 if (arg->count < arg->skip) {
534 arg->count++;
535 continue;
537 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
538 arg->stop = 1;
539 return;
541 arg->count++;
548 static int route4_dump(struct tcf_proto *tp, unsigned long fh,
549 struct sk_buff *skb, struct tcmsg *t)
551 struct route4_filter *f = (struct route4_filter*)fh;
552 unsigned char *b = skb_tail_pointer(skb);
553 struct nlattr *nest;
554 u32 id;
556 if (f == NULL)
557 return skb->len;
559 t->tcm_handle = f->handle;
561 nest = nla_nest_start(skb, TCA_OPTIONS);
562 if (nest == NULL)
563 goto nla_put_failure;
565 if (!(f->handle&0x8000)) {
566 id = f->id&0xFF;
567 NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
569 if (f->handle&0x80000000) {
570 if ((f->handle>>16) != 0xFFFF)
571 NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
572 } else {
573 id = f->id>>16;
574 NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
576 if (f->res.classid)
577 NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid);
579 if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
580 goto nla_put_failure;
582 nla_nest_end(skb, nest);
584 if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
585 goto nla_put_failure;
587 return skb->len;
589 nla_put_failure:
590 nlmsg_trim(skb, b);
591 return -1;
594 static struct tcf_proto_ops cls_route4_ops __read_mostly = {
595 .kind = "route",
596 .classify = route4_classify,
597 .init = route4_init,
598 .destroy = route4_destroy,
599 .get = route4_get,
600 .put = route4_put,
601 .change = route4_change,
602 .delete = route4_delete,
603 .walk = route4_walk,
604 .dump = route4_dump,
605 .owner = THIS_MODULE,
608 static int __init init_route4(void)
610 return register_tcf_proto_ops(&cls_route4_ops);
613 static void __exit exit_route4(void)
615 unregister_tcf_proto_ops(&cls_route4_ops);
618 module_init(init_route4)
619 module_exit(exit_route4)
620 MODULE_LICENSE("GPL");