smsc95xx: check return value of smsc95xx_reset
[linux/fpc-iii.git] / net / sched / cls_route.c
blobf20373588a99e38a46acdcaa98df5286f345575f
1 /*
2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <net/dst.h>
20 #include <net/route.h>
21 #include <net/netlink.h>
22 #include <net/act_api.h>
23 #include <net/pkt_cls.h>
26 * 1. For now we assume that route tags < 256.
27 * It allows to use direct table lookups, instead of hash tables.
28 * 2. For now we assume that "from TAG" and "fromdev DEV" statements
29 * are mutually exclusive.
30 * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
32 struct route4_fastmap {
33 struct route4_filter *filter;
34 u32 id;
35 int iif;
38 struct route4_head {
39 struct route4_fastmap fastmap[16];
40 struct route4_bucket __rcu *table[256 + 1];
41 struct rcu_head rcu;
44 struct route4_bucket {
45 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
46 struct route4_filter __rcu *ht[16 + 16 + 1];
47 struct rcu_head rcu;
50 struct route4_filter {
51 struct route4_filter __rcu *next;
52 u32 id;
53 int iif;
55 struct tcf_result res;
56 struct tcf_exts exts;
57 u32 handle;
58 struct route4_bucket *bkt;
59 struct tcf_proto *tp;
60 struct rcu_head rcu;
63 #define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
65 static inline int route4_fastmap_hash(u32 id, int iif)
67 return id & 0xF;
70 static DEFINE_SPINLOCK(fastmap_lock);
71 static void
72 route4_reset_fastmap(struct route4_head *head)
74 spin_lock_bh(&fastmap_lock);
75 memset(head->fastmap, 0, sizeof(head->fastmap));
76 spin_unlock_bh(&fastmap_lock);
79 static void
80 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
81 struct route4_filter *f)
83 int h = route4_fastmap_hash(id, iif);
85 /* fastmap updates must look atomic to aling id, iff, filter */
86 spin_lock_bh(&fastmap_lock);
87 head->fastmap[h].id = id;
88 head->fastmap[h].iif = iif;
89 head->fastmap[h].filter = f;
90 spin_unlock_bh(&fastmap_lock);
93 static inline int route4_hash_to(u32 id)
95 return id & 0xFF;
98 static inline int route4_hash_from(u32 id)
100 return (id >> 16) & 0xF;
103 static inline int route4_hash_iif(int iif)
105 return 16 + ((iif >> 16) & 0xF);
108 static inline int route4_hash_wild(void)
110 return 32;
113 #define ROUTE4_APPLY_RESULT() \
115 *res = f->res; \
116 if (tcf_exts_is_available(&f->exts)) { \
117 int r = tcf_exts_exec(skb, &f->exts, res); \
118 if (r < 0) { \
119 dont_cache = 1; \
120 continue; \
122 return r; \
123 } else if (!dont_cache) \
124 route4_set_fastmap(head, id, iif, f); \
125 return 0; \
128 static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp,
129 struct tcf_result *res)
131 struct route4_head *head = rcu_dereference_bh(tp->root);
132 struct dst_entry *dst;
133 struct route4_bucket *b;
134 struct route4_filter *f;
135 u32 id, h;
136 int iif, dont_cache = 0;
138 dst = skb_dst(skb);
139 if (!dst)
140 goto failure;
142 id = dst->tclassid;
143 if (head == NULL)
144 goto old_method;
146 iif = inet_iif(skb);
148 h = route4_fastmap_hash(id, iif);
150 spin_lock(&fastmap_lock);
151 if (id == head->fastmap[h].id &&
152 iif == head->fastmap[h].iif &&
153 (f = head->fastmap[h].filter) != NULL) {
154 if (f == ROUTE4_FAILURE) {
155 spin_unlock(&fastmap_lock);
156 goto failure;
159 *res = f->res;
160 spin_unlock(&fastmap_lock);
161 return 0;
163 spin_unlock(&fastmap_lock);
165 h = route4_hash_to(id);
167 restart:
168 b = rcu_dereference_bh(head->table[h]);
169 if (b) {
170 for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]);
172 f = rcu_dereference_bh(f->next))
173 if (f->id == id)
174 ROUTE4_APPLY_RESULT();
176 for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]);
178 f = rcu_dereference_bh(f->next))
179 if (f->iif == iif)
180 ROUTE4_APPLY_RESULT();
182 for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]);
184 f = rcu_dereference_bh(f->next))
185 ROUTE4_APPLY_RESULT();
187 if (h < 256) {
188 h = 256;
189 id &= ~0xFFFF;
190 goto restart;
193 if (!dont_cache)
194 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
195 failure:
196 return -1;
198 old_method:
199 if (id && (TC_H_MAJ(id) == 0 ||
200 !(TC_H_MAJ(id^tp->q->handle)))) {
201 res->classid = id;
202 res->class = 0;
203 return 0;
205 return -1;
208 static inline u32 to_hash(u32 id)
210 u32 h = id & 0xFF;
212 if (id & 0x8000)
213 h += 256;
214 return h;
217 static inline u32 from_hash(u32 id)
219 id &= 0xFFFF;
220 if (id == 0xFFFF)
221 return 32;
222 if (!(id & 0x8000)) {
223 if (id > 255)
224 return 256;
225 return id & 0xF;
227 return 16 + (id & 0xF);
230 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
232 struct route4_head *head = rtnl_dereference(tp->root);
233 struct route4_bucket *b;
234 struct route4_filter *f;
235 unsigned int h1, h2;
237 if (!head)
238 return 0;
240 h1 = to_hash(handle);
241 if (h1 > 256)
242 return 0;
244 h2 = from_hash(handle >> 16);
245 if (h2 > 32)
246 return 0;
248 b = rtnl_dereference(head->table[h1]);
249 if (b) {
250 for (f = rtnl_dereference(b->ht[h2]);
252 f = rtnl_dereference(f->next))
253 if (f->handle == handle)
254 return (unsigned long)f;
256 return 0;
259 static int route4_init(struct tcf_proto *tp)
261 struct route4_head *head;
263 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
264 if (head == NULL)
265 return -ENOBUFS;
267 rcu_assign_pointer(tp->root, head);
268 return 0;
271 static void route4_delete_filter(struct rcu_head *head)
273 struct route4_filter *f = container_of(head, struct route4_filter, rcu);
275 tcf_exts_destroy(&f->exts);
276 kfree(f);
279 static bool route4_destroy(struct tcf_proto *tp, bool force)
281 struct route4_head *head = rtnl_dereference(tp->root);
282 int h1, h2;
284 if (head == NULL)
285 return true;
287 if (!force) {
288 for (h1 = 0; h1 <= 256; h1++) {
289 if (rcu_access_pointer(head->table[h1]))
290 return false;
294 for (h1 = 0; h1 <= 256; h1++) {
295 struct route4_bucket *b;
297 b = rtnl_dereference(head->table[h1]);
298 if (b) {
299 for (h2 = 0; h2 <= 32; h2++) {
300 struct route4_filter *f;
302 while ((f = rtnl_dereference(b->ht[h2])) != NULL) {
303 struct route4_filter *next;
305 next = rtnl_dereference(f->next);
306 RCU_INIT_POINTER(b->ht[h2], next);
307 tcf_unbind_filter(tp, &f->res);
308 call_rcu(&f->rcu, route4_delete_filter);
311 RCU_INIT_POINTER(head->table[h1], NULL);
312 kfree_rcu(b, rcu);
315 RCU_INIT_POINTER(tp->root, NULL);
316 kfree_rcu(head, rcu);
317 return true;
320 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
322 struct route4_head *head = rtnl_dereference(tp->root);
323 struct route4_filter *f = (struct route4_filter *)arg;
324 struct route4_filter __rcu **fp;
325 struct route4_filter *nf;
326 struct route4_bucket *b;
327 unsigned int h = 0;
328 int i;
330 if (!head || !f)
331 return -EINVAL;
333 h = f->handle;
334 b = f->bkt;
336 fp = &b->ht[from_hash(h >> 16)];
337 for (nf = rtnl_dereference(*fp); nf;
338 fp = &nf->next, nf = rtnl_dereference(*fp)) {
339 if (nf == f) {
340 /* unlink it */
341 RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
343 /* Remove any fastmap lookups that might ref filter
344 * notice we unlink'd the filter so we can't get it
345 * back in the fastmap.
347 route4_reset_fastmap(head);
349 /* Delete it */
350 tcf_unbind_filter(tp, &f->res);
351 call_rcu(&f->rcu, route4_delete_filter);
353 /* Strip RTNL protected tree */
354 for (i = 0; i <= 32; i++) {
355 struct route4_filter *rt;
357 rt = rtnl_dereference(b->ht[i]);
358 if (rt)
359 return 0;
362 /* OK, session has no flows */
363 RCU_INIT_POINTER(head->table[to_hash(h)], NULL);
364 kfree_rcu(b, rcu);
366 return 0;
369 return 0;
372 static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
373 [TCA_ROUTE4_CLASSID] = { .type = NLA_U32 },
374 [TCA_ROUTE4_TO] = { .type = NLA_U32 },
375 [TCA_ROUTE4_FROM] = { .type = NLA_U32 },
376 [TCA_ROUTE4_IIF] = { .type = NLA_U32 },
379 static int route4_set_parms(struct net *net, struct tcf_proto *tp,
380 unsigned long base, struct route4_filter *f,
381 u32 handle, struct route4_head *head,
382 struct nlattr **tb, struct nlattr *est, int new,
383 bool ovr)
385 u32 id = 0, to = 0, nhandle = 0x8000;
386 struct route4_filter *fp;
387 unsigned int h1;
388 struct route4_bucket *b;
389 struct tcf_exts e;
390 int err;
392 err = tcf_exts_init(&e, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
393 if (err < 0)
394 return err;
395 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
396 if (err < 0)
397 goto errout;
399 err = -EINVAL;
400 if (tb[TCA_ROUTE4_TO]) {
401 if (new && handle & 0x8000)
402 goto errout;
403 to = nla_get_u32(tb[TCA_ROUTE4_TO]);
404 if (to > 0xFF)
405 goto errout;
406 nhandle = to;
409 if (tb[TCA_ROUTE4_FROM]) {
410 if (tb[TCA_ROUTE4_IIF])
411 goto errout;
412 id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
413 if (id > 0xFF)
414 goto errout;
415 nhandle |= id << 16;
416 } else if (tb[TCA_ROUTE4_IIF]) {
417 id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
418 if (id > 0x7FFF)
419 goto errout;
420 nhandle |= (id | 0x8000) << 16;
421 } else
422 nhandle |= 0xFFFF << 16;
424 if (handle && new) {
425 nhandle |= handle & 0x7F00;
426 if (nhandle != handle)
427 goto errout;
430 h1 = to_hash(nhandle);
431 b = rtnl_dereference(head->table[h1]);
432 if (!b) {
433 err = -ENOBUFS;
434 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
435 if (b == NULL)
436 goto errout;
438 rcu_assign_pointer(head->table[h1], b);
439 } else {
440 unsigned int h2 = from_hash(nhandle >> 16);
442 err = -EEXIST;
443 for (fp = rtnl_dereference(b->ht[h2]);
445 fp = rtnl_dereference(fp->next))
446 if (fp->handle == f->handle)
447 goto errout;
450 if (tb[TCA_ROUTE4_TO])
451 f->id = to;
453 if (tb[TCA_ROUTE4_FROM])
454 f->id = to | id<<16;
455 else if (tb[TCA_ROUTE4_IIF])
456 f->iif = id;
458 f->handle = nhandle;
459 f->bkt = b;
460 f->tp = tp;
462 if (tb[TCA_ROUTE4_CLASSID]) {
463 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
464 tcf_bind_filter(tp, &f->res, base);
467 tcf_exts_change(tp, &f->exts, &e);
469 return 0;
470 errout:
471 tcf_exts_destroy(&e);
472 return err;
475 static int route4_change(struct net *net, struct sk_buff *in_skb,
476 struct tcf_proto *tp, unsigned long base, u32 handle,
477 struct nlattr **tca, unsigned long *arg, bool ovr)
479 struct route4_head *head = rtnl_dereference(tp->root);
480 struct route4_filter __rcu **fp;
481 struct route4_filter *fold, *f1, *pfp, *f = NULL;
482 struct route4_bucket *b;
483 struct nlattr *opt = tca[TCA_OPTIONS];
484 struct nlattr *tb[TCA_ROUTE4_MAX + 1];
485 unsigned int h, th;
486 int err;
487 bool new = true;
489 if (opt == NULL)
490 return handle ? -EINVAL : 0;
492 err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy);
493 if (err < 0)
494 return err;
496 fold = (struct route4_filter *)*arg;
497 if (fold && handle && fold->handle != handle)
498 return -EINVAL;
500 err = -ENOBUFS;
501 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
502 if (!f)
503 goto errout;
505 err = tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
506 if (err < 0)
507 goto errout;
509 if (fold) {
510 f->id = fold->id;
511 f->iif = fold->iif;
512 f->res = fold->res;
513 f->handle = fold->handle;
515 f->tp = fold->tp;
516 f->bkt = fold->bkt;
517 new = false;
520 err = route4_set_parms(net, tp, base, f, handle, head, tb,
521 tca[TCA_RATE], new, ovr);
522 if (err < 0)
523 goto errout;
525 h = from_hash(f->handle >> 16);
526 fp = &f->bkt->ht[h];
527 for (pfp = rtnl_dereference(*fp);
528 (f1 = rtnl_dereference(*fp)) != NULL;
529 fp = &f1->next)
530 if (f->handle < f1->handle)
531 break;
533 netif_keep_dst(qdisc_dev(tp->q));
534 rcu_assign_pointer(f->next, f1);
535 rcu_assign_pointer(*fp, f);
537 if (fold && fold->handle && f->handle != fold->handle) {
538 th = to_hash(fold->handle);
539 h = from_hash(fold->handle >> 16);
540 b = rtnl_dereference(head->table[th]);
541 if (b) {
542 fp = &b->ht[h];
543 for (pfp = rtnl_dereference(*fp); pfp;
544 fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
545 if (pfp == fold) {
546 rcu_assign_pointer(*fp, fold->next);
547 break;
553 route4_reset_fastmap(head);
554 *arg = (unsigned long)f;
555 if (fold) {
556 tcf_unbind_filter(tp, &fold->res);
557 call_rcu(&fold->rcu, route4_delete_filter);
559 return 0;
561 errout:
562 if (f)
563 tcf_exts_destroy(&f->exts);
564 kfree(f);
565 return err;
568 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
570 struct route4_head *head = rtnl_dereference(tp->root);
571 unsigned int h, h1;
573 if (head == NULL)
574 arg->stop = 1;
576 if (arg->stop)
577 return;
579 for (h = 0; h <= 256; h++) {
580 struct route4_bucket *b = rtnl_dereference(head->table[h]);
582 if (b) {
583 for (h1 = 0; h1 <= 32; h1++) {
584 struct route4_filter *f;
586 for (f = rtnl_dereference(b->ht[h1]);
588 f = rtnl_dereference(f->next)) {
589 if (arg->count < arg->skip) {
590 arg->count++;
591 continue;
593 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
594 arg->stop = 1;
595 return;
597 arg->count++;
604 static int route4_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
605 struct sk_buff *skb, struct tcmsg *t)
607 struct route4_filter *f = (struct route4_filter *)fh;
608 struct nlattr *nest;
609 u32 id;
611 if (f == NULL)
612 return skb->len;
614 t->tcm_handle = f->handle;
616 nest = nla_nest_start(skb, TCA_OPTIONS);
617 if (nest == NULL)
618 goto nla_put_failure;
620 if (!(f->handle & 0x8000)) {
621 id = f->id & 0xFF;
622 if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
623 goto nla_put_failure;
625 if (f->handle & 0x80000000) {
626 if ((f->handle >> 16) != 0xFFFF &&
627 nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
628 goto nla_put_failure;
629 } else {
630 id = f->id >> 16;
631 if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
632 goto nla_put_failure;
634 if (f->res.classid &&
635 nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
636 goto nla_put_failure;
638 if (tcf_exts_dump(skb, &f->exts) < 0)
639 goto nla_put_failure;
641 nla_nest_end(skb, nest);
643 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
644 goto nla_put_failure;
646 return skb->len;
648 nla_put_failure:
649 nla_nest_cancel(skb, nest);
650 return -1;
653 static struct tcf_proto_ops cls_route4_ops __read_mostly = {
654 .kind = "route",
655 .classify = route4_classify,
656 .init = route4_init,
657 .destroy = route4_destroy,
658 .get = route4_get,
659 .change = route4_change,
660 .delete = route4_delete,
661 .walk = route4_walk,
662 .dump = route4_dump,
663 .owner = THIS_MODULE,
666 static int __init init_route4(void)
668 return register_tcf_proto_ops(&cls_route4_ops);
671 static void __exit exit_route4(void)
673 unregister_tcf_proto_ops(&cls_route4_ops);
676 module_init(init_route4)
677 module_exit(exit_route4)
678 MODULE_LICENSE("GPL");