2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
20 #include <net/route.h>
21 #include <net/netlink.h>
22 #include <net/act_api.h>
23 #include <net/pkt_cls.h>
26 * 1. For now we assume that route tags < 256.
27 * It allows to use direct table lookups, instead of hash tables.
28 * 2. For now we assume that "from TAG" and "fromdev DEV" statements
29 * are mutually exclusive.
30 * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
32 struct route4_fastmap
{
33 struct route4_filter
*filter
;
39 struct route4_fastmap fastmap
[16];
40 struct route4_bucket __rcu
*table
[256 + 1];
44 struct route4_bucket
{
45 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
46 struct route4_filter __rcu
*ht
[16 + 16 + 1];
50 struct route4_filter
{
51 struct route4_filter __rcu
*next
;
55 struct tcf_result res
;
58 struct route4_bucket
*bkt
;
63 #define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
65 static inline int route4_fastmap_hash(u32 id
, int iif
)
70 static DEFINE_SPINLOCK(fastmap_lock
);
72 route4_reset_fastmap(struct route4_head
*head
)
74 spin_lock_bh(&fastmap_lock
);
75 memset(head
->fastmap
, 0, sizeof(head
->fastmap
));
76 spin_unlock_bh(&fastmap_lock
);
80 route4_set_fastmap(struct route4_head
*head
, u32 id
, int iif
,
81 struct route4_filter
*f
)
83 int h
= route4_fastmap_hash(id
, iif
);
85 /* fastmap updates must look atomic to aling id, iff, filter */
86 spin_lock_bh(&fastmap_lock
);
87 head
->fastmap
[h
].id
= id
;
88 head
->fastmap
[h
].iif
= iif
;
89 head
->fastmap
[h
].filter
= f
;
90 spin_unlock_bh(&fastmap_lock
);
93 static inline int route4_hash_to(u32 id
)
98 static inline int route4_hash_from(u32 id
)
100 return (id
>> 16) & 0xF;
103 static inline int route4_hash_iif(int iif
)
105 return 16 + ((iif
>> 16) & 0xF);
108 static inline int route4_hash_wild(void)
113 #define ROUTE4_APPLY_RESULT() \
116 if (tcf_exts_is_available(&f->exts)) { \
117 int r = tcf_exts_exec(skb, &f->exts, res); \
123 } else if (!dont_cache) \
124 route4_set_fastmap(head, id, iif, f); \
128 static int route4_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
129 struct tcf_result
*res
)
131 struct route4_head
*head
= rcu_dereference_bh(tp
->root
);
132 struct dst_entry
*dst
;
133 struct route4_bucket
*b
;
134 struct route4_filter
*f
;
136 int iif
, dont_cache
= 0;
148 h
= route4_fastmap_hash(id
, iif
);
150 spin_lock(&fastmap_lock
);
151 if (id
== head
->fastmap
[h
].id
&&
152 iif
== head
->fastmap
[h
].iif
&&
153 (f
= head
->fastmap
[h
].filter
) != NULL
) {
154 if (f
== ROUTE4_FAILURE
) {
155 spin_unlock(&fastmap_lock
);
160 spin_unlock(&fastmap_lock
);
163 spin_unlock(&fastmap_lock
);
165 h
= route4_hash_to(id
);
168 b
= rcu_dereference_bh(head
->table
[h
]);
170 for (f
= rcu_dereference_bh(b
->ht
[route4_hash_from(id
)]);
172 f
= rcu_dereference_bh(f
->next
))
174 ROUTE4_APPLY_RESULT();
176 for (f
= rcu_dereference_bh(b
->ht
[route4_hash_iif(iif
)]);
178 f
= rcu_dereference_bh(f
->next
))
180 ROUTE4_APPLY_RESULT();
182 for (f
= rcu_dereference_bh(b
->ht
[route4_hash_wild()]);
184 f
= rcu_dereference_bh(f
->next
))
185 ROUTE4_APPLY_RESULT();
194 route4_set_fastmap(head
, id
, iif
, ROUTE4_FAILURE
);
199 if (id
&& (TC_H_MAJ(id
) == 0 ||
200 !(TC_H_MAJ(id
^tp
->q
->handle
)))) {
208 static inline u32
to_hash(u32 id
)
217 static inline u32
from_hash(u32 id
)
222 if (!(id
& 0x8000)) {
227 return 16 + (id
& 0xF);
230 static unsigned long route4_get(struct tcf_proto
*tp
, u32 handle
)
232 struct route4_head
*head
= rtnl_dereference(tp
->root
);
233 struct route4_bucket
*b
;
234 struct route4_filter
*f
;
240 h1
= to_hash(handle
);
244 h2
= from_hash(handle
>> 16);
248 b
= rtnl_dereference(head
->table
[h1
]);
250 for (f
= rtnl_dereference(b
->ht
[h2
]);
252 f
= rtnl_dereference(f
->next
))
253 if (f
->handle
== handle
)
254 return (unsigned long)f
;
259 static int route4_init(struct tcf_proto
*tp
)
261 struct route4_head
*head
;
263 head
= kzalloc(sizeof(struct route4_head
), GFP_KERNEL
);
267 rcu_assign_pointer(tp
->root
, head
);
272 route4_delete_filter(struct rcu_head
*head
)
274 struct route4_filter
*f
= container_of(head
, struct route4_filter
, rcu
);
276 tcf_exts_destroy(&f
->exts
);
280 static bool route4_destroy(struct tcf_proto
*tp
, bool force
)
282 struct route4_head
*head
= rtnl_dereference(tp
->root
);
289 for (h1
= 0; h1
<= 256; h1
++) {
290 if (rcu_access_pointer(head
->table
[h1
]))
295 for (h1
= 0; h1
<= 256; h1
++) {
296 struct route4_bucket
*b
;
298 b
= rtnl_dereference(head
->table
[h1
]);
300 for (h2
= 0; h2
<= 32; h2
++) {
301 struct route4_filter
*f
;
303 while ((f
= rtnl_dereference(b
->ht
[h2
])) != NULL
) {
304 struct route4_filter
*next
;
306 next
= rtnl_dereference(f
->next
);
307 RCU_INIT_POINTER(b
->ht
[h2
], next
);
308 tcf_unbind_filter(tp
, &f
->res
);
309 call_rcu(&f
->rcu
, route4_delete_filter
);
312 RCU_INIT_POINTER(head
->table
[h1
], NULL
);
316 RCU_INIT_POINTER(tp
->root
, NULL
);
317 kfree_rcu(head
, rcu
);
321 static int route4_delete(struct tcf_proto
*tp
, unsigned long arg
)
323 struct route4_head
*head
= rtnl_dereference(tp
->root
);
324 struct route4_filter
*f
= (struct route4_filter
*)arg
;
325 struct route4_filter __rcu
**fp
;
326 struct route4_filter
*nf
;
327 struct route4_bucket
*b
;
337 fp
= &b
->ht
[from_hash(h
>> 16)];
338 for (nf
= rtnl_dereference(*fp
); nf
;
339 fp
= &nf
->next
, nf
= rtnl_dereference(*fp
)) {
342 RCU_INIT_POINTER(*fp
, rtnl_dereference(f
->next
));
344 /* Remove any fastmap lookups that might ref filter
345 * notice we unlink'd the filter so we can't get it
346 * back in the fastmap.
348 route4_reset_fastmap(head
);
351 tcf_unbind_filter(tp
, &f
->res
);
352 call_rcu(&f
->rcu
, route4_delete_filter
);
354 /* Strip RTNL protected tree */
355 for (i
= 0; i
<= 32; i
++) {
356 struct route4_filter
*rt
;
358 rt
= rtnl_dereference(b
->ht
[i
]);
363 /* OK, session has no flows */
364 RCU_INIT_POINTER(head
->table
[to_hash(h
)], NULL
);
373 static const struct nla_policy route4_policy
[TCA_ROUTE4_MAX
+ 1] = {
374 [TCA_ROUTE4_CLASSID
] = { .type
= NLA_U32
},
375 [TCA_ROUTE4_TO
] = { .type
= NLA_U32
},
376 [TCA_ROUTE4_FROM
] = { .type
= NLA_U32
},
377 [TCA_ROUTE4_IIF
] = { .type
= NLA_U32
},
380 static int route4_set_parms(struct net
*net
, struct tcf_proto
*tp
,
381 unsigned long base
, struct route4_filter
*f
,
382 u32 handle
, struct route4_head
*head
,
383 struct nlattr
**tb
, struct nlattr
*est
, int new,
387 u32 id
= 0, to
= 0, nhandle
= 0x8000;
388 struct route4_filter
*fp
;
390 struct route4_bucket
*b
;
393 tcf_exts_init(&e
, TCA_ROUTE4_ACT
, TCA_ROUTE4_POLICE
);
394 err
= tcf_exts_validate(net
, tp
, tb
, est
, &e
, ovr
);
399 if (tb
[TCA_ROUTE4_TO
]) {
400 if (new && handle
& 0x8000)
402 to
= nla_get_u32(tb
[TCA_ROUTE4_TO
]);
408 if (tb
[TCA_ROUTE4_FROM
]) {
409 if (tb
[TCA_ROUTE4_IIF
])
411 id
= nla_get_u32(tb
[TCA_ROUTE4_FROM
]);
415 } else if (tb
[TCA_ROUTE4_IIF
]) {
416 id
= nla_get_u32(tb
[TCA_ROUTE4_IIF
]);
419 nhandle
|= (id
| 0x8000) << 16;
421 nhandle
|= 0xFFFF << 16;
424 nhandle
|= handle
& 0x7F00;
425 if (nhandle
!= handle
)
429 h1
= to_hash(nhandle
);
430 b
= rtnl_dereference(head
->table
[h1
]);
433 b
= kzalloc(sizeof(struct route4_bucket
), GFP_KERNEL
);
437 rcu_assign_pointer(head
->table
[h1
], b
);
439 unsigned int h2
= from_hash(nhandle
>> 16);
442 for (fp
= rtnl_dereference(b
->ht
[h2
]);
444 fp
= rtnl_dereference(fp
->next
))
445 if (fp
->handle
== f
->handle
)
449 if (tb
[TCA_ROUTE4_TO
])
452 if (tb
[TCA_ROUTE4_FROM
])
454 else if (tb
[TCA_ROUTE4_IIF
])
461 if (tb
[TCA_ROUTE4_CLASSID
]) {
462 f
->res
.classid
= nla_get_u32(tb
[TCA_ROUTE4_CLASSID
]);
463 tcf_bind_filter(tp
, &f
->res
, base
);
466 tcf_exts_change(tp
, &f
->exts
, &e
);
470 tcf_exts_destroy(&e
);
474 static int route4_change(struct net
*net
, struct sk_buff
*in_skb
,
475 struct tcf_proto
*tp
, unsigned long base
,
478 unsigned long *arg
, bool ovr
)
480 struct route4_head
*head
= rtnl_dereference(tp
->root
);
481 struct route4_filter __rcu
**fp
;
482 struct route4_filter
*fold
, *f1
, *pfp
, *f
= NULL
;
483 struct route4_bucket
*b
;
484 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
485 struct nlattr
*tb
[TCA_ROUTE4_MAX
+ 1];
491 return handle
? -EINVAL
: 0;
493 err
= nla_parse_nested(tb
, TCA_ROUTE4_MAX
, opt
, route4_policy
);
497 fold
= (struct route4_filter
*)*arg
;
498 if (fold
&& handle
&& fold
->handle
!= handle
)
502 f
= kzalloc(sizeof(struct route4_filter
), GFP_KERNEL
);
506 tcf_exts_init(&f
->exts
, TCA_ROUTE4_ACT
, TCA_ROUTE4_POLICE
);
511 f
->handle
= fold
->handle
;
518 err
= route4_set_parms(net
, tp
, base
, f
, handle
, head
, tb
,
519 tca
[TCA_RATE
], new, ovr
);
523 h
= from_hash(f
->handle
>> 16);
525 for (pfp
= rtnl_dereference(*fp
);
526 (f1
= rtnl_dereference(*fp
)) != NULL
;
528 if (f
->handle
< f1
->handle
)
531 netif_keep_dst(qdisc_dev(tp
->q
));
532 rcu_assign_pointer(f
->next
, f1
);
533 rcu_assign_pointer(*fp
, f
);
535 if (fold
&& fold
->handle
&& f
->handle
!= fold
->handle
) {
536 th
= to_hash(fold
->handle
);
537 h
= from_hash(fold
->handle
>> 16);
538 b
= rtnl_dereference(head
->table
[th
]);
541 for (pfp
= rtnl_dereference(*fp
); pfp
;
542 fp
= &pfp
->next
, pfp
= rtnl_dereference(*fp
)) {
551 route4_reset_fastmap(head
);
552 *arg
= (unsigned long)f
;
554 tcf_unbind_filter(tp
, &fold
->res
);
555 call_rcu(&fold
->rcu
, route4_delete_filter
);
564 static void route4_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
566 struct route4_head
*head
= rtnl_dereference(tp
->root
);
575 for (h
= 0; h
<= 256; h
++) {
576 struct route4_bucket
*b
= rtnl_dereference(head
->table
[h
]);
579 for (h1
= 0; h1
<= 32; h1
++) {
580 struct route4_filter
*f
;
582 for (f
= rtnl_dereference(b
->ht
[h1
]);
584 f
= rtnl_dereference(f
->next
)) {
585 if (arg
->count
< arg
->skip
) {
589 if (arg
->fn(tp
, (unsigned long)f
, arg
) < 0) {
600 static int route4_dump(struct net
*net
, struct tcf_proto
*tp
, unsigned long fh
,
601 struct sk_buff
*skb
, struct tcmsg
*t
)
603 struct route4_filter
*f
= (struct route4_filter
*)fh
;
610 t
->tcm_handle
= f
->handle
;
612 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
614 goto nla_put_failure
;
616 if (!(f
->handle
& 0x8000)) {
618 if (nla_put_u32(skb
, TCA_ROUTE4_TO
, id
))
619 goto nla_put_failure
;
621 if (f
->handle
& 0x80000000) {
622 if ((f
->handle
>> 16) != 0xFFFF &&
623 nla_put_u32(skb
, TCA_ROUTE4_IIF
, f
->iif
))
624 goto nla_put_failure
;
627 if (nla_put_u32(skb
, TCA_ROUTE4_FROM
, id
))
628 goto nla_put_failure
;
630 if (f
->res
.classid
&&
631 nla_put_u32(skb
, TCA_ROUTE4_CLASSID
, f
->res
.classid
))
632 goto nla_put_failure
;
634 if (tcf_exts_dump(skb
, &f
->exts
) < 0)
635 goto nla_put_failure
;
637 nla_nest_end(skb
, nest
);
639 if (tcf_exts_dump_stats(skb
, &f
->exts
) < 0)
640 goto nla_put_failure
;
645 nla_nest_cancel(skb
, nest
);
649 static struct tcf_proto_ops cls_route4_ops __read_mostly
= {
651 .classify
= route4_classify
,
653 .destroy
= route4_destroy
,
655 .change
= route4_change
,
656 .delete = route4_delete
,
659 .owner
= THIS_MODULE
,
662 static int __init
init_route4(void)
664 return register_tcf_proto_ops(&cls_route4_ops
);
667 static void __exit
exit_route4(void)
669 unregister_tcf_proto_ops(&cls_route4_ops
);
672 module_init(init_route4
)
673 module_exit(exit_route4
)
674 MODULE_LICENSE("GPL");