1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_matchll.c Match-all classifier
5 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
13 #include <net/sch_generic.h>
14 #include <net/pkt_cls.h>
16 struct cls_mall_head
{
18 struct tcf_result res
;
21 unsigned int in_hw_count
;
22 struct tc_matchall_pcnt __percpu
*pf
;
23 struct rcu_work rwork
;
27 static int mall_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
28 struct tcf_result
*res
)
30 struct cls_mall_head
*head
= rcu_dereference_bh(tp
->root
);
35 if (tc_skip_sw(head
->flags
))
39 __this_cpu_inc(head
->pf
->rhit
);
40 return tcf_exts_exec(skb
, &head
->exts
, res
);
43 static int mall_init(struct tcf_proto
*tp
)
48 static void __mall_destroy(struct cls_mall_head
*head
)
50 tcf_exts_destroy(&head
->exts
);
51 tcf_exts_put_net(&head
->exts
);
52 free_percpu(head
->pf
);
56 static void mall_destroy_work(struct work_struct
*work
)
58 struct cls_mall_head
*head
= container_of(to_rcu_work(work
),
66 static void mall_destroy_hw_filter(struct tcf_proto
*tp
,
67 struct cls_mall_head
*head
,
69 struct netlink_ext_ack
*extack
)
71 struct tc_cls_matchall_offload cls_mall
= {};
72 struct tcf_block
*block
= tp
->chain
->block
;
74 tc_cls_common_offload_init(&cls_mall
.common
, tp
, head
->flags
, extack
);
75 cls_mall
.command
= TC_CLSMATCHALL_DESTROY
;
76 cls_mall
.cookie
= cookie
;
78 tc_setup_cb_destroy(block
, tp
, TC_SETUP_CLSMATCHALL
, &cls_mall
, false,
79 &head
->flags
, &head
->in_hw_count
, true);
82 static int mall_replace_hw_filter(struct tcf_proto
*tp
,
83 struct cls_mall_head
*head
,
85 struct netlink_ext_ack
*extack
)
87 struct tc_cls_matchall_offload cls_mall
= {};
88 struct tcf_block
*block
= tp
->chain
->block
;
89 bool skip_sw
= tc_skip_sw(head
->flags
);
92 cls_mall
.rule
= flow_rule_alloc(tcf_exts_num_actions(&head
->exts
));
96 tc_cls_common_offload_init(&cls_mall
.common
, tp
, head
->flags
, extack
);
97 cls_mall
.command
= TC_CLSMATCHALL_REPLACE
;
98 cls_mall
.cookie
= cookie
;
100 err
= tc_setup_flow_action(&cls_mall
.rule
->action
, &head
->exts
);
102 kfree(cls_mall
.rule
);
103 mall_destroy_hw_filter(tp
, head
, cookie
, NULL
);
105 NL_SET_ERR_MSG_MOD(extack
, "Failed to setup flow action");
112 err
= tc_setup_cb_add(block
, tp
, TC_SETUP_CLSMATCHALL
, &cls_mall
,
113 skip_sw
, &head
->flags
, &head
->in_hw_count
, true);
114 tc_cleanup_flow_action(&cls_mall
.rule
->action
);
115 kfree(cls_mall
.rule
);
118 mall_destroy_hw_filter(tp
, head
, cookie
, NULL
);
122 if (skip_sw
&& !(head
->flags
& TCA_CLS_FLAGS_IN_HW
))
128 static void mall_destroy(struct tcf_proto
*tp
, bool rtnl_held
,
129 struct netlink_ext_ack
*extack
)
131 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
136 tcf_unbind_filter(tp
, &head
->res
);
138 if (!tc_skip_hw(head
->flags
))
139 mall_destroy_hw_filter(tp
, head
, (unsigned long) head
, extack
);
141 if (tcf_exts_get_net(&head
->exts
))
142 tcf_queue_work(&head
->rwork
, mall_destroy_work
);
144 __mall_destroy(head
);
147 static void *mall_get(struct tcf_proto
*tp
, u32 handle
)
149 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
151 if (head
&& head
->handle
== handle
)
157 static const struct nla_policy mall_policy
[TCA_MATCHALL_MAX
+ 1] = {
158 [TCA_MATCHALL_UNSPEC
] = { .type
= NLA_UNSPEC
},
159 [TCA_MATCHALL_CLASSID
] = { .type
= NLA_U32
},
160 [TCA_MATCHALL_FLAGS
] = { .type
= NLA_U32
},
163 static int mall_set_parms(struct net
*net
, struct tcf_proto
*tp
,
164 struct cls_mall_head
*head
,
165 unsigned long base
, struct nlattr
**tb
,
166 struct nlattr
*est
, bool ovr
,
167 struct netlink_ext_ack
*extack
)
171 err
= tcf_exts_validate(net
, tp
, tb
, est
, &head
->exts
, ovr
, true,
176 if (tb
[TCA_MATCHALL_CLASSID
]) {
177 head
->res
.classid
= nla_get_u32(tb
[TCA_MATCHALL_CLASSID
]);
178 tcf_bind_filter(tp
, &head
->res
, base
);
183 static int mall_change(struct net
*net
, struct sk_buff
*in_skb
,
184 struct tcf_proto
*tp
, unsigned long base
,
185 u32 handle
, struct nlattr
**tca
,
186 void **arg
, bool ovr
, bool rtnl_held
,
187 struct netlink_ext_ack
*extack
)
189 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
190 struct nlattr
*tb
[TCA_MATCHALL_MAX
+ 1];
191 struct cls_mall_head
*new;
195 if (!tca
[TCA_OPTIONS
])
201 err
= nla_parse_nested_deprecated(tb
, TCA_MATCHALL_MAX
,
202 tca
[TCA_OPTIONS
], mall_policy
, NULL
);
206 if (tb
[TCA_MATCHALL_FLAGS
]) {
207 flags
= nla_get_u32(tb
[TCA_MATCHALL_FLAGS
]);
208 if (!tc_flags_valid(flags
))
212 new = kzalloc(sizeof(*new), GFP_KERNEL
);
216 err
= tcf_exts_init(&new->exts
, net
, TCA_MATCHALL_ACT
, 0);
222 new->handle
= handle
;
224 new->pf
= alloc_percpu(struct tc_matchall_pcnt
);
227 goto err_alloc_percpu
;
230 err
= mall_set_parms(net
, tp
, new, base
, tb
, tca
[TCA_RATE
], ovr
,
235 if (!tc_skip_hw(new->flags
)) {
236 err
= mall_replace_hw_filter(tp
, new, (unsigned long)new,
239 goto err_replace_hw_filter
;
242 if (!tc_in_hw(new->flags
))
243 new->flags
|= TCA_CLS_FLAGS_NOT_IN_HW
;
246 rcu_assign_pointer(tp
->root
, new);
249 err_replace_hw_filter
:
251 free_percpu(new->pf
);
253 tcf_exts_destroy(&new->exts
);
259 static int mall_delete(struct tcf_proto
*tp
, void *arg
, bool *last
,
260 bool rtnl_held
, struct netlink_ext_ack
*extack
)
262 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
264 head
->deleting
= true;
269 static void mall_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
,
272 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
274 if (arg
->count
< arg
->skip
)
277 if (!head
|| head
->deleting
)
279 if (arg
->fn(tp
, head
, arg
) < 0)
285 static int mall_reoffload(struct tcf_proto
*tp
, bool add
, flow_setup_cb_t
*cb
,
286 void *cb_priv
, struct netlink_ext_ack
*extack
)
288 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
289 struct tc_cls_matchall_offload cls_mall
= {};
290 struct tcf_block
*block
= tp
->chain
->block
;
293 if (tc_skip_hw(head
->flags
))
296 cls_mall
.rule
= flow_rule_alloc(tcf_exts_num_actions(&head
->exts
));
300 tc_cls_common_offload_init(&cls_mall
.common
, tp
, head
->flags
, extack
);
301 cls_mall
.command
= add
?
302 TC_CLSMATCHALL_REPLACE
: TC_CLSMATCHALL_DESTROY
;
303 cls_mall
.cookie
= (unsigned long)head
;
305 err
= tc_setup_flow_action(&cls_mall
.rule
->action
, &head
->exts
);
307 kfree(cls_mall
.rule
);
308 if (add
&& tc_skip_sw(head
->flags
)) {
309 NL_SET_ERR_MSG_MOD(extack
, "Failed to setup flow action");
315 err
= tc_setup_cb_reoffload(block
, tp
, add
, cb
, TC_SETUP_CLSMATCHALL
,
316 &cls_mall
, cb_priv
, &head
->flags
,
318 tc_cleanup_flow_action(&cls_mall
.rule
->action
);
319 kfree(cls_mall
.rule
);
327 static void mall_stats_hw_filter(struct tcf_proto
*tp
,
328 struct cls_mall_head
*head
,
329 unsigned long cookie
)
331 struct tc_cls_matchall_offload cls_mall
= {};
332 struct tcf_block
*block
= tp
->chain
->block
;
334 tc_cls_common_offload_init(&cls_mall
.common
, tp
, head
->flags
, NULL
);
335 cls_mall
.command
= TC_CLSMATCHALL_STATS
;
336 cls_mall
.cookie
= cookie
;
338 tc_setup_cb_call(block
, TC_SETUP_CLSMATCHALL
, &cls_mall
, false, true);
340 tcf_exts_stats_update(&head
->exts
, cls_mall
.stats
.bytes
,
341 cls_mall
.stats
.pkts
, cls_mall
.stats
.lastused
,
342 cls_mall
.stats
.used_hw_stats
,
343 cls_mall
.stats
.used_hw_stats_valid
);
346 static int mall_dump(struct net
*net
, struct tcf_proto
*tp
, void *fh
,
347 struct sk_buff
*skb
, struct tcmsg
*t
, bool rtnl_held
)
349 struct tc_matchall_pcnt gpf
= {};
350 struct cls_mall_head
*head
= fh
;
357 if (!tc_skip_hw(head
->flags
))
358 mall_stats_hw_filter(tp
, head
, (unsigned long)head
);
360 t
->tcm_handle
= head
->handle
;
362 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
364 goto nla_put_failure
;
366 if (head
->res
.classid
&&
367 nla_put_u32(skb
, TCA_MATCHALL_CLASSID
, head
->res
.classid
))
368 goto nla_put_failure
;
370 if (head
->flags
&& nla_put_u32(skb
, TCA_MATCHALL_FLAGS
, head
->flags
))
371 goto nla_put_failure
;
373 for_each_possible_cpu(cpu
) {
374 struct tc_matchall_pcnt
*pf
= per_cpu_ptr(head
->pf
, cpu
);
376 gpf
.rhit
+= pf
->rhit
;
379 if (nla_put_64bit(skb
, TCA_MATCHALL_PCNT
,
380 sizeof(struct tc_matchall_pcnt
),
381 &gpf
, TCA_MATCHALL_PAD
))
382 goto nla_put_failure
;
384 if (tcf_exts_dump(skb
, &head
->exts
))
385 goto nla_put_failure
;
387 nla_nest_end(skb
, nest
);
389 if (tcf_exts_dump_stats(skb
, &head
->exts
) < 0)
390 goto nla_put_failure
;
395 nla_nest_cancel(skb
, nest
);
399 static void mall_bind_class(void *fh
, u32 classid
, unsigned long cl
, void *q
,
402 struct cls_mall_head
*head
= fh
;
404 if (head
&& head
->res
.classid
== classid
) {
406 __tcf_bind_filter(q
, &head
->res
, base
);
408 __tcf_unbind_filter(q
, &head
->res
);
412 static struct tcf_proto_ops cls_mall_ops __read_mostly
= {
414 .classify
= mall_classify
,
416 .destroy
= mall_destroy
,
418 .change
= mall_change
,
419 .delete = mall_delete
,
421 .reoffload
= mall_reoffload
,
423 .bind_class
= mall_bind_class
,
424 .owner
= THIS_MODULE
,
427 static int __init
cls_mall_init(void)
429 return register_tcf_proto_ops(&cls_mall_ops
);
432 static void __exit
cls_mall_exit(void)
434 unregister_tcf_proto_ops(&cls_mall_ops
);
437 module_init(cls_mall_init
);
438 module_exit(cls_mall_exit
);
440 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
441 MODULE_DESCRIPTION("Match-all classifier");
442 MODULE_LICENSE("GPL v2");