2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
20 #include <net/route.h>
21 #include <net/netlink.h>
22 #include <net/act_api.h>
23 #include <net/pkt_cls.h>
26 * 1. For now we assume that route tags < 256.
27 * It allows to use direct table lookups, instead of hash tables.
28 * 2. For now we assume that "from TAG" and "fromdev DEV" statements
29 * are mutually exclusive.
30 * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
32 struct route4_fastmap
{
33 struct route4_filter
*filter
;
39 struct route4_fastmap fastmap
[16];
40 struct route4_bucket __rcu
*table
[256 + 1];
44 struct route4_bucket
{
45 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
46 struct route4_filter __rcu
*ht
[16 + 16 + 1];
50 struct route4_filter
{
51 struct route4_filter __rcu
*next
;
55 struct tcf_result res
;
58 struct route4_bucket
*bkt
;
63 #define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
65 static inline int route4_fastmap_hash(u32 id
, int iif
)
70 static DEFINE_SPINLOCK(fastmap_lock
);
72 route4_reset_fastmap(struct route4_head
*head
)
74 spin_lock_bh(&fastmap_lock
);
75 memset(head
->fastmap
, 0, sizeof(head
->fastmap
));
76 spin_unlock_bh(&fastmap_lock
);
80 route4_set_fastmap(struct route4_head
*head
, u32 id
, int iif
,
81 struct route4_filter
*f
)
83 int h
= route4_fastmap_hash(id
, iif
);
85 /* fastmap updates must look atomic to aling id, iff, filter */
86 spin_lock_bh(&fastmap_lock
);
87 head
->fastmap
[h
].id
= id
;
88 head
->fastmap
[h
].iif
= iif
;
89 head
->fastmap
[h
].filter
= f
;
90 spin_unlock_bh(&fastmap_lock
);
93 static inline int route4_hash_to(u32 id
)
98 static inline int route4_hash_from(u32 id
)
100 return (id
>> 16) & 0xF;
103 static inline int route4_hash_iif(int iif
)
105 return 16 + ((iif
>> 16) & 0xF);
108 static inline int route4_hash_wild(void)
113 #define ROUTE4_APPLY_RESULT() \
116 if (tcf_exts_is_available(&f->exts)) { \
117 int r = tcf_exts_exec(skb, &f->exts, res); \
123 } else if (!dont_cache) \
124 route4_set_fastmap(head, id, iif, f); \
128 static int route4_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
129 struct tcf_result
*res
)
131 struct route4_head
*head
= rcu_dereference_bh(tp
->root
);
132 struct dst_entry
*dst
;
133 struct route4_bucket
*b
;
134 struct route4_filter
*f
;
136 int iif
, dont_cache
= 0;
148 h
= route4_fastmap_hash(id
, iif
);
150 spin_lock(&fastmap_lock
);
151 if (id
== head
->fastmap
[h
].id
&&
152 iif
== head
->fastmap
[h
].iif
&&
153 (f
= head
->fastmap
[h
].filter
) != NULL
) {
154 if (f
== ROUTE4_FAILURE
) {
155 spin_unlock(&fastmap_lock
);
160 spin_unlock(&fastmap_lock
);
163 spin_unlock(&fastmap_lock
);
165 h
= route4_hash_to(id
);
168 b
= rcu_dereference_bh(head
->table
[h
]);
170 for (f
= rcu_dereference_bh(b
->ht
[route4_hash_from(id
)]);
172 f
= rcu_dereference_bh(f
->next
))
174 ROUTE4_APPLY_RESULT();
176 for (f
= rcu_dereference_bh(b
->ht
[route4_hash_iif(iif
)]);
178 f
= rcu_dereference_bh(f
->next
))
180 ROUTE4_APPLY_RESULT();
182 for (f
= rcu_dereference_bh(b
->ht
[route4_hash_wild()]);
184 f
= rcu_dereference_bh(f
->next
))
185 ROUTE4_APPLY_RESULT();
194 route4_set_fastmap(head
, id
, iif
, ROUTE4_FAILURE
);
199 if (id
&& (TC_H_MAJ(id
) == 0 ||
200 !(TC_H_MAJ(id
^tp
->q
->handle
)))) {
208 static inline u32
to_hash(u32 id
)
217 static inline u32
from_hash(u32 id
)
222 if (!(id
& 0x8000)) {
227 return 16 + (id
& 0xF);
230 static unsigned long route4_get(struct tcf_proto
*tp
, u32 handle
)
232 struct route4_head
*head
= rtnl_dereference(tp
->root
);
233 struct route4_bucket
*b
;
234 struct route4_filter
*f
;
240 h1
= to_hash(handle
);
244 h2
= from_hash(handle
>> 16);
248 b
= rtnl_dereference(head
->table
[h1
]);
250 for (f
= rtnl_dereference(b
->ht
[h2
]);
252 f
= rtnl_dereference(f
->next
))
253 if (f
->handle
== handle
)
254 return (unsigned long)f
;
259 static int route4_init(struct tcf_proto
*tp
)
261 struct route4_head
*head
;
263 head
= kzalloc(sizeof(struct route4_head
), GFP_KERNEL
);
267 rcu_assign_pointer(tp
->root
, head
);
271 static void route4_delete_filter(struct rcu_head
*head
)
273 struct route4_filter
*f
= container_of(head
, struct route4_filter
, rcu
);
275 tcf_exts_destroy(&f
->exts
);
279 static bool route4_destroy(struct tcf_proto
*tp
, bool force
)
281 struct route4_head
*head
= rtnl_dereference(tp
->root
);
288 for (h1
= 0; h1
<= 256; h1
++) {
289 if (rcu_access_pointer(head
->table
[h1
]))
294 for (h1
= 0; h1
<= 256; h1
++) {
295 struct route4_bucket
*b
;
297 b
= rtnl_dereference(head
->table
[h1
]);
299 for (h2
= 0; h2
<= 32; h2
++) {
300 struct route4_filter
*f
;
302 while ((f
= rtnl_dereference(b
->ht
[h2
])) != NULL
) {
303 struct route4_filter
*next
;
305 next
= rtnl_dereference(f
->next
);
306 RCU_INIT_POINTER(b
->ht
[h2
], next
);
307 tcf_unbind_filter(tp
, &f
->res
);
308 call_rcu(&f
->rcu
, route4_delete_filter
);
311 RCU_INIT_POINTER(head
->table
[h1
], NULL
);
315 RCU_INIT_POINTER(tp
->root
, NULL
);
316 kfree_rcu(head
, rcu
);
320 static int route4_delete(struct tcf_proto
*tp
, unsigned long arg
)
322 struct route4_head
*head
= rtnl_dereference(tp
->root
);
323 struct route4_filter
*f
= (struct route4_filter
*)arg
;
324 struct route4_filter __rcu
**fp
;
325 struct route4_filter
*nf
;
326 struct route4_bucket
*b
;
336 fp
= &b
->ht
[from_hash(h
>> 16)];
337 for (nf
= rtnl_dereference(*fp
); nf
;
338 fp
= &nf
->next
, nf
= rtnl_dereference(*fp
)) {
341 RCU_INIT_POINTER(*fp
, rtnl_dereference(f
->next
));
343 /* Remove any fastmap lookups that might ref filter
344 * notice we unlink'd the filter so we can't get it
345 * back in the fastmap.
347 route4_reset_fastmap(head
);
350 tcf_unbind_filter(tp
, &f
->res
);
351 call_rcu(&f
->rcu
, route4_delete_filter
);
353 /* Strip RTNL protected tree */
354 for (i
= 0; i
<= 32; i
++) {
355 struct route4_filter
*rt
;
357 rt
= rtnl_dereference(b
->ht
[i
]);
362 /* OK, session has no flows */
363 RCU_INIT_POINTER(head
->table
[to_hash(h
)], NULL
);
372 static const struct nla_policy route4_policy
[TCA_ROUTE4_MAX
+ 1] = {
373 [TCA_ROUTE4_CLASSID
] = { .type
= NLA_U32
},
374 [TCA_ROUTE4_TO
] = { .type
= NLA_U32
},
375 [TCA_ROUTE4_FROM
] = { .type
= NLA_U32
},
376 [TCA_ROUTE4_IIF
] = { .type
= NLA_U32
},
379 static int route4_set_parms(struct net
*net
, struct tcf_proto
*tp
,
380 unsigned long base
, struct route4_filter
*f
,
381 u32 handle
, struct route4_head
*head
,
382 struct nlattr
**tb
, struct nlattr
*est
, int new,
385 u32 id
= 0, to
= 0, nhandle
= 0x8000;
386 struct route4_filter
*fp
;
388 struct route4_bucket
*b
;
392 err
= tcf_exts_init(&e
, TCA_ROUTE4_ACT
, TCA_ROUTE4_POLICE
);
395 err
= tcf_exts_validate(net
, tp
, tb
, est
, &e
, ovr
);
400 if (tb
[TCA_ROUTE4_TO
]) {
401 if (new && handle
& 0x8000)
403 to
= nla_get_u32(tb
[TCA_ROUTE4_TO
]);
409 if (tb
[TCA_ROUTE4_FROM
]) {
410 if (tb
[TCA_ROUTE4_IIF
])
412 id
= nla_get_u32(tb
[TCA_ROUTE4_FROM
]);
416 } else if (tb
[TCA_ROUTE4_IIF
]) {
417 id
= nla_get_u32(tb
[TCA_ROUTE4_IIF
]);
420 nhandle
|= (id
| 0x8000) << 16;
422 nhandle
|= 0xFFFF << 16;
425 nhandle
|= handle
& 0x7F00;
426 if (nhandle
!= handle
)
430 h1
= to_hash(nhandle
);
431 b
= rtnl_dereference(head
->table
[h1
]);
434 b
= kzalloc(sizeof(struct route4_bucket
), GFP_KERNEL
);
438 rcu_assign_pointer(head
->table
[h1
], b
);
440 unsigned int h2
= from_hash(nhandle
>> 16);
443 for (fp
= rtnl_dereference(b
->ht
[h2
]);
445 fp
= rtnl_dereference(fp
->next
))
446 if (fp
->handle
== f
->handle
)
450 if (tb
[TCA_ROUTE4_TO
])
453 if (tb
[TCA_ROUTE4_FROM
])
455 else if (tb
[TCA_ROUTE4_IIF
])
462 if (tb
[TCA_ROUTE4_CLASSID
]) {
463 f
->res
.classid
= nla_get_u32(tb
[TCA_ROUTE4_CLASSID
]);
464 tcf_bind_filter(tp
, &f
->res
, base
);
467 tcf_exts_change(tp
, &f
->exts
, &e
);
471 tcf_exts_destroy(&e
);
475 static int route4_change(struct net
*net
, struct sk_buff
*in_skb
,
476 struct tcf_proto
*tp
, unsigned long base
, u32 handle
,
477 struct nlattr
**tca
, unsigned long *arg
, bool ovr
)
479 struct route4_head
*head
= rtnl_dereference(tp
->root
);
480 struct route4_filter __rcu
**fp
;
481 struct route4_filter
*fold
, *f1
, *pfp
, *f
= NULL
;
482 struct route4_bucket
*b
;
483 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
484 struct nlattr
*tb
[TCA_ROUTE4_MAX
+ 1];
490 return handle
? -EINVAL
: 0;
492 err
= nla_parse_nested(tb
, TCA_ROUTE4_MAX
, opt
, route4_policy
);
496 fold
= (struct route4_filter
*)*arg
;
497 if (fold
&& handle
&& fold
->handle
!= handle
)
501 f
= kzalloc(sizeof(struct route4_filter
), GFP_KERNEL
);
505 err
= tcf_exts_init(&f
->exts
, TCA_ROUTE4_ACT
, TCA_ROUTE4_POLICE
);
513 f
->handle
= fold
->handle
;
520 err
= route4_set_parms(net
, tp
, base
, f
, handle
, head
, tb
,
521 tca
[TCA_RATE
], new, ovr
);
525 h
= from_hash(f
->handle
>> 16);
527 for (pfp
= rtnl_dereference(*fp
);
528 (f1
= rtnl_dereference(*fp
)) != NULL
;
530 if (f
->handle
< f1
->handle
)
533 netif_keep_dst(qdisc_dev(tp
->q
));
534 rcu_assign_pointer(f
->next
, f1
);
535 rcu_assign_pointer(*fp
, f
);
537 if (fold
&& fold
->handle
&& f
->handle
!= fold
->handle
) {
538 th
= to_hash(fold
->handle
);
539 h
= from_hash(fold
->handle
>> 16);
540 b
= rtnl_dereference(head
->table
[th
]);
543 for (pfp
= rtnl_dereference(*fp
); pfp
;
544 fp
= &pfp
->next
, pfp
= rtnl_dereference(*fp
)) {
553 route4_reset_fastmap(head
);
554 *arg
= (unsigned long)f
;
556 tcf_unbind_filter(tp
, &fold
->res
);
557 call_rcu(&fold
->rcu
, route4_delete_filter
);
563 tcf_exts_destroy(&f
->exts
);
568 static void route4_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
570 struct route4_head
*head
= rtnl_dereference(tp
->root
);
579 for (h
= 0; h
<= 256; h
++) {
580 struct route4_bucket
*b
= rtnl_dereference(head
->table
[h
]);
583 for (h1
= 0; h1
<= 32; h1
++) {
584 struct route4_filter
*f
;
586 for (f
= rtnl_dereference(b
->ht
[h1
]);
588 f
= rtnl_dereference(f
->next
)) {
589 if (arg
->count
< arg
->skip
) {
593 if (arg
->fn(tp
, (unsigned long)f
, arg
) < 0) {
604 static int route4_dump(struct net
*net
, struct tcf_proto
*tp
, unsigned long fh
,
605 struct sk_buff
*skb
, struct tcmsg
*t
)
607 struct route4_filter
*f
= (struct route4_filter
*)fh
;
614 t
->tcm_handle
= f
->handle
;
616 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
618 goto nla_put_failure
;
620 if (!(f
->handle
& 0x8000)) {
622 if (nla_put_u32(skb
, TCA_ROUTE4_TO
, id
))
623 goto nla_put_failure
;
625 if (f
->handle
& 0x80000000) {
626 if ((f
->handle
>> 16) != 0xFFFF &&
627 nla_put_u32(skb
, TCA_ROUTE4_IIF
, f
->iif
))
628 goto nla_put_failure
;
631 if (nla_put_u32(skb
, TCA_ROUTE4_FROM
, id
))
632 goto nla_put_failure
;
634 if (f
->res
.classid
&&
635 nla_put_u32(skb
, TCA_ROUTE4_CLASSID
, f
->res
.classid
))
636 goto nla_put_failure
;
638 if (tcf_exts_dump(skb
, &f
->exts
) < 0)
639 goto nla_put_failure
;
641 nla_nest_end(skb
, nest
);
643 if (tcf_exts_dump_stats(skb
, &f
->exts
) < 0)
644 goto nla_put_failure
;
649 nla_nest_cancel(skb
, nest
);
653 static struct tcf_proto_ops cls_route4_ops __read_mostly
= {
655 .classify
= route4_classify
,
657 .destroy
= route4_destroy
,
659 .change
= route4_change
,
660 .delete = route4_delete
,
663 .owner
= THIS_MODULE
,
666 static int __init
init_route4(void)
668 return register_tcf_proto_ops(&cls_route4_ops
);
671 static void __exit
exit_route4(void)
673 unregister_tcf_proto_ops(&cls_route4_ops
);
676 module_init(init_route4
)
677 module_exit(exit_route4
)
678 MODULE_LICENSE("GPL");