2 * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
4 * Written 1998,1999 by Werner Almesberger, EPFL ICA
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <net/act_api.h>
14 #include <net/netlink.h>
15 #include <net/pkt_cls.h>
18 * Passing parameters to the root seems to be done more awkwardly than really
19 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
23 #define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
24 #define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
27 struct tcindex_filter_result
{
29 struct tcf_result res
;
33 struct tcindex_filter
{
35 struct tcindex_filter_result result
;
36 struct tcindex_filter __rcu
*next
;
42 struct tcindex_filter_result
*perfect
; /* perfect hash; NULL if none */
43 struct tcindex_filter __rcu
**h
; /* imperfect hash; */
45 u16 mask
; /* AND key with mask */
46 u32 shift
; /* shift ANDed key to the right */
47 u32 hash
; /* hash table size; 0 if undefined */
48 u32 alloc_hash
; /* allocated size */
49 u32 fall_through
; /* 0: only classify if explicit match */
53 static inline int tcindex_filter_is_set(struct tcindex_filter_result
*r
)
55 return tcf_exts_is_predicative(&r
->exts
) || r
->res
.classid
;
58 static struct tcindex_filter_result
*tcindex_lookup(struct tcindex_data
*p
,
62 struct tcindex_filter_result
*f
= p
->perfect
+ key
;
64 return tcindex_filter_is_set(f
) ? f
: NULL
;
66 struct tcindex_filter __rcu
**fp
;
67 struct tcindex_filter
*f
;
69 fp
= &p
->h
[key
% p
->hash
];
70 for (f
= rcu_dereference_bh_rtnl(*fp
);
72 fp
= &f
->next
, f
= rcu_dereference_bh_rtnl(*fp
))
81 static int tcindex_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
82 struct tcf_result
*res
)
84 struct tcindex_data
*p
= rcu_dereference_bh(tp
->root
);
85 struct tcindex_filter_result
*f
;
86 int key
= (skb
->tc_index
& p
->mask
) >> p
->shift
;
88 pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
91 f
= tcindex_lookup(p
, key
);
95 res
->classid
= TC_H_MAKE(TC_H_MAJ(tp
->q
->handle
), key
);
97 pr_debug("alg 0x%x\n", res
->classid
);
101 pr_debug("map 0x%x\n", res
->classid
);
103 return tcf_exts_exec(skb
, &f
->exts
, res
);
107 static unsigned long tcindex_get(struct tcf_proto
*tp
, u32 handle
)
109 struct tcindex_data
*p
= rtnl_dereference(tp
->root
);
110 struct tcindex_filter_result
*r
;
112 pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp
, handle
);
113 if (p
->perfect
&& handle
>= p
->alloc_hash
)
115 r
= tcindex_lookup(p
, handle
);
116 return r
&& tcindex_filter_is_set(r
) ? (unsigned long) r
: 0UL;
119 static int tcindex_init(struct tcf_proto
*tp
)
121 struct tcindex_data
*p
;
123 pr_debug("tcindex_init(tp %p)\n", tp
);
124 p
= kzalloc(sizeof(struct tcindex_data
), GFP_KERNEL
);
129 p
->hash
= DEFAULT_HASH_SIZE
;
132 rcu_assign_pointer(tp
->root
, p
);
136 static void tcindex_destroy_rexts(struct rcu_head
*head
)
138 struct tcindex_filter_result
*r
;
140 r
= container_of(head
, struct tcindex_filter_result
, rcu
);
141 tcf_exts_destroy(&r
->exts
);
144 static void tcindex_destroy_fexts(struct rcu_head
*head
)
146 struct tcindex_filter
*f
= container_of(head
, struct tcindex_filter
,
149 tcf_exts_destroy(&f
->result
.exts
);
153 static int tcindex_delete(struct tcf_proto
*tp
, unsigned long arg
)
155 struct tcindex_data
*p
= rtnl_dereference(tp
->root
);
156 struct tcindex_filter_result
*r
= (struct tcindex_filter_result
*) arg
;
157 struct tcindex_filter __rcu
**walk
;
158 struct tcindex_filter
*f
= NULL
;
160 pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p\n", tp
, arg
, p
);
167 for (i
= 0; i
< p
->hash
; i
++) {
169 for (f
= rtnl_dereference(*walk
); f
;
170 walk
= &f
->next
, f
= rtnl_dereference(*walk
)) {
178 rcu_assign_pointer(*walk
, rtnl_dereference(f
->next
));
180 tcf_unbind_filter(tp
, &r
->res
);
181 /* all classifiers are required to call tcf_exts_destroy() after rcu
182 * grace period, since converted-to-rcu actions are relying on that
183 * in cleanup() callback
186 call_rcu(&f
->rcu
, tcindex_destroy_fexts
);
188 call_rcu(&r
->rcu
, tcindex_destroy_rexts
);
192 static int tcindex_destroy_element(struct tcf_proto
*tp
,
194 struct tcf_walker
*walker
)
196 return tcindex_delete(tp
, arg
);
199 static void __tcindex_destroy(struct rcu_head
*head
)
201 struct tcindex_data
*p
= container_of(head
, struct tcindex_data
, rcu
);
209 valid_perfect_hash(struct tcindex_data
*p
)
211 return p
->hash
> (p
->mask
>> p
->shift
);
214 static const struct nla_policy tcindex_policy
[TCA_TCINDEX_MAX
+ 1] = {
215 [TCA_TCINDEX_HASH
] = { .type
= NLA_U32
},
216 [TCA_TCINDEX_MASK
] = { .type
= NLA_U16
},
217 [TCA_TCINDEX_SHIFT
] = { .type
= NLA_U32
},
218 [TCA_TCINDEX_FALL_THROUGH
] = { .type
= NLA_U32
},
219 [TCA_TCINDEX_CLASSID
] = { .type
= NLA_U32
},
222 static int tcindex_filter_result_init(struct tcindex_filter_result
*r
)
224 memset(r
, 0, sizeof(*r
));
225 return tcf_exts_init(&r
->exts
, TCA_TCINDEX_ACT
, TCA_TCINDEX_POLICE
);
228 static void __tcindex_partial_destroy(struct rcu_head
*head
)
230 struct tcindex_data
*p
= container_of(head
, struct tcindex_data
, rcu
);
236 static void tcindex_free_perfect_hash(struct tcindex_data
*cp
)
240 for (i
= 0; i
< cp
->hash
; i
++)
241 tcf_exts_destroy(&cp
->perfect
[i
].exts
);
245 static int tcindex_alloc_perfect_hash(struct tcindex_data
*cp
)
249 cp
->perfect
= kcalloc(cp
->hash
, sizeof(struct tcindex_filter_result
),
254 for (i
= 0; i
< cp
->hash
; i
++) {
255 err
= tcf_exts_init(&cp
->perfect
[i
].exts
,
256 TCA_TCINDEX_ACT
, TCA_TCINDEX_POLICE
);
264 tcindex_free_perfect_hash(cp
);
269 tcindex_set_parms(struct net
*net
, struct tcf_proto
*tp
, unsigned long base
,
270 u32 handle
, struct tcindex_data
*p
,
271 struct tcindex_filter_result
*r
, struct nlattr
**tb
,
272 struct nlattr
*est
, bool ovr
)
274 struct tcindex_filter_result new_filter_result
, *old_r
= r
;
275 struct tcindex_filter_result cr
;
276 struct tcindex_data
*cp
= NULL
, *oldp
;
277 struct tcindex_filter
*f
= NULL
; /* make gcc behave */
281 err
= tcf_exts_init(&e
, TCA_TCINDEX_ACT
, TCA_TCINDEX_POLICE
);
284 err
= tcf_exts_validate(net
, tp
, tb
, est
, &e
, ovr
);
289 /* tcindex_data attributes must look atomic to classifier/lookup so
290 * allocate new tcindex data and RCU assign it onto root. Keeping
291 * perfect hash and hash pointers from old data.
293 cp
= kzalloc(sizeof(*cp
), GFP_KERNEL
);
298 cp
->shift
= p
->shift
;
300 cp
->alloc_hash
= p
->alloc_hash
;
301 cp
->fall_through
= p
->fall_through
;
307 if (tcindex_alloc_perfect_hash(cp
) < 0)
309 for (i
= 0; i
< cp
->hash
; i
++)
310 cp
->perfect
[i
].res
= p
->perfect
[i
].res
;
315 err
= tcindex_filter_result_init(&new_filter_result
);
318 err
= tcindex_filter_result_init(&cr
);
324 if (tb
[TCA_TCINDEX_HASH
])
325 cp
->hash
= nla_get_u32(tb
[TCA_TCINDEX_HASH
]);
327 if (tb
[TCA_TCINDEX_MASK
])
328 cp
->mask
= nla_get_u16(tb
[TCA_TCINDEX_MASK
]);
330 if (tb
[TCA_TCINDEX_SHIFT
])
331 cp
->shift
= nla_get_u32(tb
[TCA_TCINDEX_SHIFT
]);
335 /* Hash already allocated, make sure that we still meet the
336 * requirements for the allocated hash.
339 if (!valid_perfect_hash(cp
) ||
340 cp
->hash
> cp
->alloc_hash
)
342 } else if (cp
->h
&& cp
->hash
!= cp
->alloc_hash
) {
347 if (tb
[TCA_TCINDEX_FALL_THROUGH
])
348 cp
->fall_through
= nla_get_u32(tb
[TCA_TCINDEX_FALL_THROUGH
]);
351 /* Hash not specified, use perfect hash if the upper limit
352 * of the hashing index is below the threshold.
354 if ((cp
->mask
>> cp
->shift
) < PERFECT_HASH_THRESHOLD
)
355 cp
->hash
= (cp
->mask
>> cp
->shift
) + 1;
357 cp
->hash
= DEFAULT_HASH_SIZE
;
360 if (!cp
->perfect
&& !cp
->h
)
361 cp
->alloc_hash
= cp
->hash
;
363 /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
364 * but then, we'd fail handles that may become valid after some future
365 * mask change. While this is extremely unlikely to ever matter,
366 * the check below is safer (and also more backwards-compatible).
368 if (cp
->perfect
|| valid_perfect_hash(cp
))
369 if (handle
>= cp
->alloc_hash
)
374 if (!cp
->perfect
&& !cp
->h
) {
375 if (valid_perfect_hash(cp
)) {
376 if (tcindex_alloc_perfect_hash(cp
) < 0)
380 struct tcindex_filter __rcu
**hash
;
382 hash
= kcalloc(cp
->hash
,
383 sizeof(struct tcindex_filter
*),
395 r
= cp
->perfect
+ handle
;
397 r
= tcindex_lookup(cp
, handle
) ? : &new_filter_result
;
399 if (r
== &new_filter_result
) {
400 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
405 err
= tcindex_filter_result_init(&f
->result
);
412 if (tb
[TCA_TCINDEX_CLASSID
]) {
413 cr
.res
.classid
= nla_get_u32(tb
[TCA_TCINDEX_CLASSID
]);
414 tcf_bind_filter(tp
, &cr
.res
, base
);
418 tcf_exts_change(tp
, &r
->exts
, &e
);
420 tcf_exts_change(tp
, &cr
.exts
, &e
);
422 if (old_r
&& old_r
!= r
) {
423 err
= tcindex_filter_result_init(old_r
);
432 rcu_assign_pointer(tp
->root
, cp
);
434 if (r
== &new_filter_result
) {
435 struct tcindex_filter
*nfp
;
436 struct tcindex_filter __rcu
**fp
;
438 tcf_exts_change(tp
, &f
->result
.exts
, &r
->exts
);
440 fp
= cp
->h
+ (handle
% cp
->hash
);
441 for (nfp
= rtnl_dereference(*fp
);
443 fp
= &nfp
->next
, nfp
= rtnl_dereference(*fp
))
446 rcu_assign_pointer(*fp
, f
);
450 call_rcu(&oldp
->rcu
, __tcindex_partial_destroy
);
455 tcindex_free_perfect_hash(cp
);
456 else if (balloc
== 2)
459 tcf_exts_destroy(&cr
.exts
);
460 tcf_exts_destroy(&new_filter_result
.exts
);
463 tcf_exts_destroy(&e
);
468 tcindex_change(struct net
*net
, struct sk_buff
*in_skb
,
469 struct tcf_proto
*tp
, unsigned long base
, u32 handle
,
470 struct nlattr
**tca
, unsigned long *arg
, bool ovr
)
472 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
473 struct nlattr
*tb
[TCA_TCINDEX_MAX
+ 1];
474 struct tcindex_data
*p
= rtnl_dereference(tp
->root
);
475 struct tcindex_filter_result
*r
= (struct tcindex_filter_result
*) *arg
;
478 pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
479 "p %p,r %p,*arg 0x%lx\n",
480 tp
, handle
, tca
, arg
, opt
, p
, r
, arg
? *arg
: 0L);
485 err
= nla_parse_nested(tb
, TCA_TCINDEX_MAX
, opt
, tcindex_policy
);
489 return tcindex_set_parms(net
, tp
, base
, handle
, p
, r
, tb
,
493 static void tcindex_walk(struct tcf_proto
*tp
, struct tcf_walker
*walker
)
495 struct tcindex_data
*p
= rtnl_dereference(tp
->root
);
496 struct tcindex_filter
*f
, *next
;
499 pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp
, walker
, p
);
501 for (i
= 0; i
< p
->hash
; i
++) {
502 if (!p
->perfect
[i
].res
.class)
504 if (walker
->count
>= walker
->skip
) {
506 (unsigned long) (p
->perfect
+i
), walker
)
517 for (i
= 0; i
< p
->hash
; i
++) {
518 for (f
= rtnl_dereference(p
->h
[i
]); f
; f
= next
) {
519 next
= rtnl_dereference(f
->next
);
520 if (walker
->count
>= walker
->skip
) {
521 if (walker
->fn(tp
, (unsigned long) &f
->result
,
532 static bool tcindex_destroy(struct tcf_proto
*tp
, bool force
)
534 struct tcindex_data
*p
= rtnl_dereference(tp
->root
);
535 struct tcf_walker walker
;
540 pr_debug("tcindex_destroy(tp %p),p %p\n", tp
, p
);
543 walker
.fn
= tcindex_destroy_element
;
544 tcindex_walk(tp
, &walker
);
546 RCU_INIT_POINTER(tp
->root
, NULL
);
547 call_rcu(&p
->rcu
, __tcindex_destroy
);
552 static int tcindex_dump(struct net
*net
, struct tcf_proto
*tp
, unsigned long fh
,
553 struct sk_buff
*skb
, struct tcmsg
*t
)
555 struct tcindex_data
*p
= rtnl_dereference(tp
->root
);
556 struct tcindex_filter_result
*r
= (struct tcindex_filter_result
*) fh
;
559 pr_debug("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p\n",
560 tp
, fh
, skb
, t
, p
, r
);
561 pr_debug("p->perfect %p p->h %p\n", p
->perfect
, p
->h
);
563 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
565 goto nla_put_failure
;
568 t
->tcm_handle
= ~0; /* whatever ... */
569 if (nla_put_u32(skb
, TCA_TCINDEX_HASH
, p
->hash
) ||
570 nla_put_u16(skb
, TCA_TCINDEX_MASK
, p
->mask
) ||
571 nla_put_u32(skb
, TCA_TCINDEX_SHIFT
, p
->shift
) ||
572 nla_put_u32(skb
, TCA_TCINDEX_FALL_THROUGH
, p
->fall_through
))
573 goto nla_put_failure
;
574 nla_nest_end(skb
, nest
);
577 t
->tcm_handle
= r
- p
->perfect
;
579 struct tcindex_filter
*f
;
580 struct tcindex_filter __rcu
**fp
;
584 for (i
= 0; !t
->tcm_handle
&& i
< p
->hash
; i
++) {
586 for (f
= rtnl_dereference(*fp
);
588 fp
= &f
->next
, f
= rtnl_dereference(*fp
)) {
590 t
->tcm_handle
= f
->key
;
594 pr_debug("handle = %d\n", t
->tcm_handle
);
596 nla_put_u32(skb
, TCA_TCINDEX_CLASSID
, r
->res
.classid
))
597 goto nla_put_failure
;
599 if (tcf_exts_dump(skb
, &r
->exts
) < 0)
600 goto nla_put_failure
;
601 nla_nest_end(skb
, nest
);
603 if (tcf_exts_dump_stats(skb
, &r
->exts
) < 0)
604 goto nla_put_failure
;
610 nla_nest_cancel(skb
, nest
);
614 static struct tcf_proto_ops cls_tcindex_ops __read_mostly
= {
616 .classify
= tcindex_classify
,
617 .init
= tcindex_init
,
618 .destroy
= tcindex_destroy
,
620 .change
= tcindex_change
,
621 .delete = tcindex_delete
,
622 .walk
= tcindex_walk
,
623 .dump
= tcindex_dump
,
624 .owner
= THIS_MODULE
,
627 static int __init
init_tcindex(void)
629 return register_tcf_proto_ops(&cls_tcindex_ops
);
632 static void __exit
exit_tcindex(void)
634 unregister_tcf_proto_ops(&cls_tcindex_ops
);
637 module_init(init_tcindex
)
638 module_exit(exit_tcindex
)
639 MODULE_LICENSE("GPL");