1 // SPDX-License-Identifier: GPL-2.0-only
3 * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
5 * Written 1998,1999 by Werner Almesberger, EPFL ICA
8 #include <linux/module.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/skbuff.h>
12 #include <linux/errno.h>
13 #include <linux/slab.h>
14 #include <linux/refcount.h>
15 #include <net/act_api.h>
16 #include <net/netlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/sch_generic.h>
21 * Passing parameters to the root seems to be done more awkwardly than really
22 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
26 #define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
27 #define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
32 struct tcindex_filter_result
{
34 struct tcf_result res
;
35 struct tcindex_data
*p
;
36 struct rcu_work rwork
;
39 struct tcindex_filter
{
41 struct tcindex_filter_result result
;
42 struct tcindex_filter __rcu
*next
;
43 struct rcu_work rwork
;
48 struct tcindex_filter_result
*perfect
; /* perfect hash; NULL if none */
49 struct tcindex_filter __rcu
**h
; /* imperfect hash; */
51 u16 mask
; /* AND key with mask */
52 u32 shift
; /* shift ANDed key to the right */
53 u32 hash
; /* hash table size; 0 if undefined */
54 u32 alloc_hash
; /* allocated size */
55 u32 fall_through
; /* 0: only classify if explicit match */
56 refcount_t refcnt
; /* a temporary refcnt for perfect hash */
57 struct rcu_work rwork
;
60 static inline int tcindex_filter_is_set(struct tcindex_filter_result
*r
)
62 return tcf_exts_has_actions(&r
->exts
) || r
->res
.classid
;
65 static void tcindex_data_get(struct tcindex_data
*p
)
67 refcount_inc(&p
->refcnt
);
70 static void tcindex_data_put(struct tcindex_data
*p
)
72 if (refcount_dec_and_test(&p
->refcnt
)) {
79 static struct tcindex_filter_result
*tcindex_lookup(struct tcindex_data
*p
,
83 struct tcindex_filter_result
*f
= p
->perfect
+ key
;
85 return tcindex_filter_is_set(f
) ? f
: NULL
;
87 struct tcindex_filter __rcu
**fp
;
88 struct tcindex_filter
*f
;
90 fp
= &p
->h
[key
% p
->hash
];
91 for (f
= rcu_dereference_bh_rtnl(*fp
);
93 fp
= &f
->next
, f
= rcu_dereference_bh_rtnl(*fp
))
102 static int tcindex_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
103 struct tcf_result
*res
)
105 struct tcindex_data
*p
= rcu_dereference_bh(tp
->root
);
106 struct tcindex_filter_result
*f
;
107 int key
= (skb
->tc_index
& p
->mask
) >> p
->shift
;
109 pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
112 f
= tcindex_lookup(p
, key
);
114 struct Qdisc
*q
= tcf_block_q(tp
->chain
->block
);
116 if (!p
->fall_through
)
118 res
->classid
= TC_H_MAKE(TC_H_MAJ(q
->handle
), key
);
120 pr_debug("alg 0x%x\n", res
->classid
);
124 pr_debug("map 0x%x\n", res
->classid
);
126 return tcf_exts_exec(skb
, &f
->exts
, res
);
130 static void *tcindex_get(struct tcf_proto
*tp
, u32 handle
)
132 struct tcindex_data
*p
= rtnl_dereference(tp
->root
);
133 struct tcindex_filter_result
*r
;
135 pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp
, handle
);
136 if (p
->perfect
&& handle
>= p
->alloc_hash
)
138 r
= tcindex_lookup(p
, handle
);
139 return r
&& tcindex_filter_is_set(r
) ? r
: NULL
;
142 static int tcindex_init(struct tcf_proto
*tp
)
144 struct tcindex_data
*p
;
146 pr_debug("tcindex_init(tp %p)\n", tp
);
147 p
= kzalloc(sizeof(struct tcindex_data
), GFP_KERNEL
);
152 p
->hash
= DEFAULT_HASH_SIZE
;
154 refcount_set(&p
->refcnt
, 1); /* Paired with tcindex_destroy_work() */
156 rcu_assign_pointer(tp
->root
, p
);
160 static void __tcindex_destroy_rexts(struct tcindex_filter_result
*r
)
162 tcf_exts_destroy(&r
->exts
);
163 tcf_exts_put_net(&r
->exts
);
164 tcindex_data_put(r
->p
);
167 static void tcindex_destroy_rexts_work(struct work_struct
*work
)
169 struct tcindex_filter_result
*r
;
171 r
= container_of(to_rcu_work(work
),
172 struct tcindex_filter_result
,
175 __tcindex_destroy_rexts(r
);
179 static void __tcindex_destroy_fexts(struct tcindex_filter
*f
)
181 tcf_exts_destroy(&f
->result
.exts
);
182 tcf_exts_put_net(&f
->result
.exts
);
186 static void tcindex_destroy_fexts_work(struct work_struct
*work
)
188 struct tcindex_filter
*f
= container_of(to_rcu_work(work
),
189 struct tcindex_filter
,
193 __tcindex_destroy_fexts(f
);
197 static int tcindex_delete(struct tcf_proto
*tp
, void *arg
, bool *last
,
198 bool rtnl_held
, struct netlink_ext_ack
*extack
)
200 struct tcindex_data
*p
= rtnl_dereference(tp
->root
);
201 struct tcindex_filter_result
*r
= arg
;
202 struct tcindex_filter __rcu
**walk
;
203 struct tcindex_filter
*f
= NULL
;
205 pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp
, arg
, p
);
212 for (i
= 0; i
< p
->hash
; i
++) {
214 for (f
= rtnl_dereference(*walk
); f
;
215 walk
= &f
->next
, f
= rtnl_dereference(*walk
)) {
223 rcu_assign_pointer(*walk
, rtnl_dereference(f
->next
));
225 tcf_unbind_filter(tp
, &r
->res
);
226 /* all classifiers are required to call tcf_exts_destroy() after rcu
227 * grace period, since converted-to-rcu actions are relying on that
228 * in cleanup() callback
231 if (tcf_exts_get_net(&f
->result
.exts
))
232 tcf_queue_work(&f
->rwork
, tcindex_destroy_fexts_work
);
234 __tcindex_destroy_fexts(f
);
238 if (tcf_exts_get_net(&r
->exts
))
239 tcf_queue_work(&r
->rwork
, tcindex_destroy_rexts_work
);
241 __tcindex_destroy_rexts(r
);
248 static void tcindex_destroy_work(struct work_struct
*work
)
250 struct tcindex_data
*p
= container_of(to_rcu_work(work
),
258 valid_perfect_hash(struct tcindex_data
*p
)
260 return p
->hash
> (p
->mask
>> p
->shift
);
263 static const struct nla_policy tcindex_policy
[TCA_TCINDEX_MAX
+ 1] = {
264 [TCA_TCINDEX_HASH
] = { .type
= NLA_U32
},
265 [TCA_TCINDEX_MASK
] = { .type
= NLA_U16
},
266 [TCA_TCINDEX_SHIFT
] = { .type
= NLA_U32
},
267 [TCA_TCINDEX_FALL_THROUGH
] = { .type
= NLA_U32
},
268 [TCA_TCINDEX_CLASSID
] = { .type
= NLA_U32
},
271 static int tcindex_filter_result_init(struct tcindex_filter_result
*r
,
272 struct tcindex_data
*p
,
275 memset(r
, 0, sizeof(*r
));
277 return tcf_exts_init(&r
->exts
, net
, TCA_TCINDEX_ACT
,
281 static void tcindex_partial_destroy_work(struct work_struct
*work
)
283 struct tcindex_data
*p
= container_of(to_rcu_work(work
),
293 static void tcindex_free_perfect_hash(struct tcindex_data
*cp
)
297 for (i
= 0; i
< cp
->hash
; i
++)
298 tcf_exts_destroy(&cp
->perfect
[i
].exts
);
302 static int tcindex_alloc_perfect_hash(struct net
*net
, struct tcindex_data
*cp
)
306 cp
->perfect
= kcalloc(cp
->hash
, sizeof(struct tcindex_filter_result
),
311 for (i
= 0; i
< cp
->hash
; i
++) {
312 err
= tcf_exts_init(&cp
->perfect
[i
].exts
, net
,
313 TCA_TCINDEX_ACT
, TCA_TCINDEX_POLICE
);
316 cp
->perfect
[i
].p
= cp
;
322 tcindex_free_perfect_hash(cp
);
327 tcindex_set_parms(struct net
*net
, struct tcf_proto
*tp
, unsigned long base
,
328 u32 handle
, struct tcindex_data
*p
,
329 struct tcindex_filter_result
*r
, struct nlattr
**tb
,
330 struct nlattr
*est
, bool ovr
, struct netlink_ext_ack
*extack
)
332 struct tcindex_filter_result new_filter_result
, *old_r
= r
;
333 struct tcindex_data
*cp
= NULL
, *oldp
;
334 struct tcindex_filter
*f
= NULL
; /* make gcc behave */
335 struct tcf_result cr
= {};
339 err
= tcf_exts_init(&e
, net
, TCA_TCINDEX_ACT
, TCA_TCINDEX_POLICE
);
342 err
= tcf_exts_validate(net
, tp
, tb
, est
, &e
, ovr
, true, extack
);
347 /* tcindex_data attributes must look atomic to classifier/lookup so
348 * allocate new tcindex data and RCU assign it onto root. Keeping
349 * perfect hash and hash pointers from old data.
351 cp
= kzalloc(sizeof(*cp
), GFP_KERNEL
);
356 cp
->shift
= p
->shift
;
358 cp
->alloc_hash
= p
->alloc_hash
;
359 cp
->fall_through
= p
->fall_through
;
361 refcount_set(&cp
->refcnt
, 1); /* Paired with tcindex_destroy_work() */
363 if (tb
[TCA_TCINDEX_HASH
])
364 cp
->hash
= nla_get_u32(tb
[TCA_TCINDEX_HASH
]);
366 if (tb
[TCA_TCINDEX_MASK
])
367 cp
->mask
= nla_get_u16(tb
[TCA_TCINDEX_MASK
]);
369 if (tb
[TCA_TCINDEX_SHIFT
])
370 cp
->shift
= nla_get_u32(tb
[TCA_TCINDEX_SHIFT
]);
373 /* Hash not specified, use perfect hash if the upper limit
374 * of the hashing index is below the threshold.
376 if ((cp
->mask
>> cp
->shift
) < PERFECT_HASH_THRESHOLD
)
377 cp
->hash
= (cp
->mask
>> cp
->shift
) + 1;
379 cp
->hash
= DEFAULT_HASH_SIZE
;
385 if (tcindex_alloc_perfect_hash(net
, cp
) < 0)
387 cp
->alloc_hash
= cp
->hash
;
388 for (i
= 0; i
< min(cp
->hash
, p
->hash
); i
++)
389 cp
->perfect
[i
].res
= p
->perfect
[i
].res
;
394 err
= tcindex_filter_result_init(&new_filter_result
, cp
, net
);
402 /* Hash already allocated, make sure that we still meet the
403 * requirements for the allocated hash.
406 if (!valid_perfect_hash(cp
) ||
407 cp
->hash
> cp
->alloc_hash
)
409 } else if (cp
->h
&& cp
->hash
!= cp
->alloc_hash
) {
414 if (tb
[TCA_TCINDEX_FALL_THROUGH
])
415 cp
->fall_through
= nla_get_u32(tb
[TCA_TCINDEX_FALL_THROUGH
]);
417 if (!cp
->perfect
&& !cp
->h
)
418 cp
->alloc_hash
= cp
->hash
;
420 /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
421 * but then, we'd fail handles that may become valid after some future
422 * mask change. While this is extremely unlikely to ever matter,
423 * the check below is safer (and also more backwards-compatible).
425 if (cp
->perfect
|| valid_perfect_hash(cp
))
426 if (handle
>= cp
->alloc_hash
)
431 if (!cp
->perfect
&& !cp
->h
) {
432 if (valid_perfect_hash(cp
)) {
433 if (tcindex_alloc_perfect_hash(net
, cp
) < 0)
437 struct tcindex_filter __rcu
**hash
;
439 hash
= kcalloc(cp
->hash
,
440 sizeof(struct tcindex_filter
*),
452 r
= cp
->perfect
+ handle
;
454 r
= tcindex_lookup(cp
, handle
) ? : &new_filter_result
;
456 if (r
== &new_filter_result
) {
457 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
462 err
= tcindex_filter_result_init(&f
->result
, cp
, net
);
469 if (tb
[TCA_TCINDEX_CLASSID
]) {
470 cr
.classid
= nla_get_u32(tb
[TCA_TCINDEX_CLASSID
]);
471 tcf_bind_filter(tp
, &cr
, base
);
474 if (old_r
&& old_r
!= r
) {
475 err
= tcindex_filter_result_init(old_r
, cp
, net
);
484 tcf_exts_change(&r
->exts
, &e
);
486 rcu_assign_pointer(tp
->root
, cp
);
488 if (r
== &new_filter_result
) {
489 struct tcindex_filter
*nfp
;
490 struct tcindex_filter __rcu
**fp
;
492 f
->result
.res
= r
->res
;
493 tcf_exts_change(&f
->result
.exts
, &r
->exts
);
495 fp
= cp
->h
+ (handle
% cp
->hash
);
496 for (nfp
= rtnl_dereference(*fp
);
498 fp
= &nfp
->next
, nfp
= rtnl_dereference(*fp
))
501 rcu_assign_pointer(*fp
, f
);
503 tcf_exts_destroy(&new_filter_result
.exts
);
507 tcf_queue_work(&oldp
->rwork
, tcindex_partial_destroy_work
);
512 tcindex_free_perfect_hash(cp
);
513 else if (balloc
== 2)
515 tcf_exts_destroy(&new_filter_result
.exts
);
518 tcf_exts_destroy(&e
);
523 tcindex_change(struct net
*net
, struct sk_buff
*in_skb
,
524 struct tcf_proto
*tp
, unsigned long base
, u32 handle
,
525 struct nlattr
**tca
, void **arg
, bool ovr
,
526 bool rtnl_held
, struct netlink_ext_ack
*extack
)
528 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
529 struct nlattr
*tb
[TCA_TCINDEX_MAX
+ 1];
530 struct tcindex_data
*p
= rtnl_dereference(tp
->root
);
531 struct tcindex_filter_result
*r
= *arg
;
534 pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
535 "p %p,r %p,*arg %p\n",
536 tp
, handle
, tca
, arg
, opt
, p
, r
, arg
? *arg
: NULL
);
541 err
= nla_parse_nested_deprecated(tb
, TCA_TCINDEX_MAX
, opt
,
542 tcindex_policy
, NULL
);
546 return tcindex_set_parms(net
, tp
, base
, handle
, p
, r
, tb
,
547 tca
[TCA_RATE
], ovr
, extack
);
550 static void tcindex_walk(struct tcf_proto
*tp
, struct tcf_walker
*walker
,
553 struct tcindex_data
*p
= rtnl_dereference(tp
->root
);
554 struct tcindex_filter
*f
, *next
;
557 pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp
, walker
, p
);
559 for (i
= 0; i
< p
->hash
; i
++) {
560 if (!p
->perfect
[i
].res
.class)
562 if (walker
->count
>= walker
->skip
) {
563 if (walker
->fn(tp
, p
->perfect
+ i
, walker
) < 0) {
573 for (i
= 0; i
< p
->hash
; i
++) {
574 for (f
= rtnl_dereference(p
->h
[i
]); f
; f
= next
) {
575 next
= rtnl_dereference(f
->next
);
576 if (walker
->count
>= walker
->skip
) {
577 if (walker
->fn(tp
, &f
->result
, walker
) < 0) {
587 static void tcindex_destroy(struct tcf_proto
*tp
, bool rtnl_held
,
588 struct netlink_ext_ack
*extack
)
590 struct tcindex_data
*p
= rtnl_dereference(tp
->root
);
593 pr_debug("tcindex_destroy(tp %p),p %p\n", tp
, p
);
596 for (i
= 0; i
< p
->hash
; i
++) {
597 struct tcindex_filter_result
*r
= p
->perfect
+ i
;
599 /* tcf_queue_work() does not guarantee the ordering we
600 * want, so we have to take this refcnt temporarily to
601 * ensure 'p' is freed after all tcindex_filter_result
602 * here. Imperfect hash does not need this, because it
603 * uses linked lists rather than an array.
607 tcf_unbind_filter(tp
, &r
->res
);
608 if (tcf_exts_get_net(&r
->exts
))
609 tcf_queue_work(&r
->rwork
,
610 tcindex_destroy_rexts_work
);
612 __tcindex_destroy_rexts(r
);
616 for (i
= 0; p
->h
&& i
< p
->hash
; i
++) {
617 struct tcindex_filter
*f
, *next
;
620 for (f
= rtnl_dereference(p
->h
[i
]); f
; f
= next
) {
621 next
= rtnl_dereference(f
->next
);
622 tcindex_delete(tp
, &f
->result
, &last
, rtnl_held
, NULL
);
626 tcf_queue_work(&p
->rwork
, tcindex_destroy_work
);
630 static int tcindex_dump(struct net
*net
, struct tcf_proto
*tp
, void *fh
,
631 struct sk_buff
*skb
, struct tcmsg
*t
, bool rtnl_held
)
633 struct tcindex_data
*p
= rtnl_dereference(tp
->root
);
634 struct tcindex_filter_result
*r
= fh
;
637 pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
638 tp
, fh
, skb
, t
, p
, r
);
639 pr_debug("p->perfect %p p->h %p\n", p
->perfect
, p
->h
);
641 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
643 goto nla_put_failure
;
646 t
->tcm_handle
= ~0; /* whatever ... */
647 if (nla_put_u32(skb
, TCA_TCINDEX_HASH
, p
->hash
) ||
648 nla_put_u16(skb
, TCA_TCINDEX_MASK
, p
->mask
) ||
649 nla_put_u32(skb
, TCA_TCINDEX_SHIFT
, p
->shift
) ||
650 nla_put_u32(skb
, TCA_TCINDEX_FALL_THROUGH
, p
->fall_through
))
651 goto nla_put_failure
;
652 nla_nest_end(skb
, nest
);
655 t
->tcm_handle
= r
- p
->perfect
;
657 struct tcindex_filter
*f
;
658 struct tcindex_filter __rcu
**fp
;
662 for (i
= 0; !t
->tcm_handle
&& i
< p
->hash
; i
++) {
664 for (f
= rtnl_dereference(*fp
);
666 fp
= &f
->next
, f
= rtnl_dereference(*fp
)) {
668 t
->tcm_handle
= f
->key
;
672 pr_debug("handle = %d\n", t
->tcm_handle
);
674 nla_put_u32(skb
, TCA_TCINDEX_CLASSID
, r
->res
.classid
))
675 goto nla_put_failure
;
677 if (tcf_exts_dump(skb
, &r
->exts
) < 0)
678 goto nla_put_failure
;
679 nla_nest_end(skb
, nest
);
681 if (tcf_exts_dump_stats(skb
, &r
->exts
) < 0)
682 goto nla_put_failure
;
688 nla_nest_cancel(skb
, nest
);
692 static void tcindex_bind_class(void *fh
, u32 classid
, unsigned long cl
,
693 void *q
, unsigned long base
)
695 struct tcindex_filter_result
*r
= fh
;
697 if (r
&& r
->res
.classid
== classid
) {
699 __tcf_bind_filter(q
, &r
->res
, base
);
701 __tcf_unbind_filter(q
, &r
->res
);
705 static struct tcf_proto_ops cls_tcindex_ops __read_mostly
= {
707 .classify
= tcindex_classify
,
708 .init
= tcindex_init
,
709 .destroy
= tcindex_destroy
,
711 .change
= tcindex_change
,
712 .delete = tcindex_delete
,
713 .walk
= tcindex_walk
,
714 .dump
= tcindex_dump
,
715 .bind_class
= tcindex_bind_class
,
716 .owner
= THIS_MODULE
,
719 static int __init
init_tcindex(void)
721 return register_tcf_proto_ops(&cls_tcindex_ops
);
724 static void __exit
exit_tcindex(void)
726 unregister_tcf_proto_ops(&cls_tcindex_ops
);
729 module_init(init_tcindex
)
730 module_exit(exit_tcindex
)
731 MODULE_LICENSE("GPL");