1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_matchll.c Match-all classifier
5 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
13 #include <net/sch_generic.h>
14 #include <net/pkt_cls.h>
16 struct cls_mall_head
{
18 struct tcf_result res
;
21 unsigned int in_hw_count
;
22 struct tc_matchall_pcnt __percpu
*pf
;
23 struct rcu_work rwork
;
26 static int mall_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
27 struct tcf_result
*res
)
29 struct cls_mall_head
*head
= rcu_dereference_bh(tp
->root
);
34 if (tc_skip_sw(head
->flags
))
38 __this_cpu_inc(head
->pf
->rhit
);
39 return tcf_exts_exec(skb
, &head
->exts
, res
);
42 static int mall_init(struct tcf_proto
*tp
)
47 static void __mall_destroy(struct cls_mall_head
*head
)
49 tcf_exts_destroy(&head
->exts
);
50 tcf_exts_put_net(&head
->exts
);
51 free_percpu(head
->pf
);
55 static void mall_destroy_work(struct work_struct
*work
)
57 struct cls_mall_head
*head
= container_of(to_rcu_work(work
),
65 static void mall_destroy_hw_filter(struct tcf_proto
*tp
,
66 struct cls_mall_head
*head
,
68 struct netlink_ext_ack
*extack
)
70 struct tc_cls_matchall_offload cls_mall
= {};
71 struct tcf_block
*block
= tp
->chain
->block
;
73 tc_cls_common_offload_init(&cls_mall
.common
, tp
, head
->flags
, extack
);
74 cls_mall
.command
= TC_CLSMATCHALL_DESTROY
;
75 cls_mall
.cookie
= cookie
;
77 tc_setup_cb_call(block
, TC_SETUP_CLSMATCHALL
, &cls_mall
, false);
78 tcf_block_offload_dec(block
, &head
->flags
);
81 static int mall_replace_hw_filter(struct tcf_proto
*tp
,
82 struct cls_mall_head
*head
,
84 struct netlink_ext_ack
*extack
)
86 struct tc_cls_matchall_offload cls_mall
= {};
87 struct tcf_block
*block
= tp
->chain
->block
;
88 bool skip_sw
= tc_skip_sw(head
->flags
);
91 cls_mall
.rule
= flow_rule_alloc(tcf_exts_num_actions(&head
->exts
));
95 tc_cls_common_offload_init(&cls_mall
.common
, tp
, head
->flags
, extack
);
96 cls_mall
.command
= TC_CLSMATCHALL_REPLACE
;
97 cls_mall
.cookie
= cookie
;
99 err
= tc_setup_flow_action(&cls_mall
.rule
->action
, &head
->exts
);
101 kfree(cls_mall
.rule
);
102 mall_destroy_hw_filter(tp
, head
, cookie
, NULL
);
104 NL_SET_ERR_MSG_MOD(extack
, "Failed to setup flow action");
111 err
= tc_setup_cb_call(block
, TC_SETUP_CLSMATCHALL
, &cls_mall
, skip_sw
);
112 kfree(cls_mall
.rule
);
115 mall_destroy_hw_filter(tp
, head
, cookie
, NULL
);
117 } else if (err
> 0) {
118 head
->in_hw_count
= err
;
119 tcf_block_offload_inc(block
, &head
->flags
);
122 if (skip_sw
&& !(head
->flags
& TCA_CLS_FLAGS_IN_HW
))
128 static void mall_destroy(struct tcf_proto
*tp
, bool rtnl_held
,
129 struct netlink_ext_ack
*extack
)
131 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
136 tcf_unbind_filter(tp
, &head
->res
);
138 if (!tc_skip_hw(head
->flags
))
139 mall_destroy_hw_filter(tp
, head
, (unsigned long) head
, extack
);
141 if (tcf_exts_get_net(&head
->exts
))
142 tcf_queue_work(&head
->rwork
, mall_destroy_work
);
144 __mall_destroy(head
);
147 static void *mall_get(struct tcf_proto
*tp
, u32 handle
)
149 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
151 if (head
&& head
->handle
== handle
)
157 static const struct nla_policy mall_policy
[TCA_MATCHALL_MAX
+ 1] = {
158 [TCA_MATCHALL_UNSPEC
] = { .type
= NLA_UNSPEC
},
159 [TCA_MATCHALL_CLASSID
] = { .type
= NLA_U32
},
162 static int mall_set_parms(struct net
*net
, struct tcf_proto
*tp
,
163 struct cls_mall_head
*head
,
164 unsigned long base
, struct nlattr
**tb
,
165 struct nlattr
*est
, bool ovr
,
166 struct netlink_ext_ack
*extack
)
170 err
= tcf_exts_validate(net
, tp
, tb
, est
, &head
->exts
, ovr
, true,
175 if (tb
[TCA_MATCHALL_CLASSID
]) {
176 head
->res
.classid
= nla_get_u32(tb
[TCA_MATCHALL_CLASSID
]);
177 tcf_bind_filter(tp
, &head
->res
, base
);
182 static int mall_change(struct net
*net
, struct sk_buff
*in_skb
,
183 struct tcf_proto
*tp
, unsigned long base
,
184 u32 handle
, struct nlattr
**tca
,
185 void **arg
, bool ovr
, bool rtnl_held
,
186 struct netlink_ext_ack
*extack
)
188 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
189 struct nlattr
*tb
[TCA_MATCHALL_MAX
+ 1];
190 struct cls_mall_head
*new;
194 if (!tca
[TCA_OPTIONS
])
200 err
= nla_parse_nested_deprecated(tb
, TCA_MATCHALL_MAX
,
201 tca
[TCA_OPTIONS
], mall_policy
, NULL
);
205 if (tb
[TCA_MATCHALL_FLAGS
]) {
206 flags
= nla_get_u32(tb
[TCA_MATCHALL_FLAGS
]);
207 if (!tc_flags_valid(flags
))
211 new = kzalloc(sizeof(*new), GFP_KERNEL
);
215 err
= tcf_exts_init(&new->exts
, net
, TCA_MATCHALL_ACT
, 0);
221 new->handle
= handle
;
223 new->pf
= alloc_percpu(struct tc_matchall_pcnt
);
226 goto err_alloc_percpu
;
229 err
= mall_set_parms(net
, tp
, new, base
, tb
, tca
[TCA_RATE
], ovr
,
234 if (!tc_skip_hw(new->flags
)) {
235 err
= mall_replace_hw_filter(tp
, new, (unsigned long)new,
238 goto err_replace_hw_filter
;
241 if (!tc_in_hw(new->flags
))
242 new->flags
|= TCA_CLS_FLAGS_NOT_IN_HW
;
245 rcu_assign_pointer(tp
->root
, new);
248 err_replace_hw_filter
:
250 free_percpu(new->pf
);
252 tcf_exts_destroy(&new->exts
);
258 static int mall_delete(struct tcf_proto
*tp
, void *arg
, bool *last
,
259 bool rtnl_held
, struct netlink_ext_ack
*extack
)
264 static void mall_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
,
267 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
269 if (arg
->count
< arg
->skip
)
274 if (arg
->fn(tp
, head
, arg
) < 0)
280 static int mall_reoffload(struct tcf_proto
*tp
, bool add
, tc_setup_cb_t
*cb
,
281 void *cb_priv
, struct netlink_ext_ack
*extack
)
283 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
284 struct tc_cls_matchall_offload cls_mall
= {};
285 struct tcf_block
*block
= tp
->chain
->block
;
288 if (tc_skip_hw(head
->flags
))
291 cls_mall
.rule
= flow_rule_alloc(tcf_exts_num_actions(&head
->exts
));
295 tc_cls_common_offload_init(&cls_mall
.common
, tp
, head
->flags
, extack
);
296 cls_mall
.command
= add
?
297 TC_CLSMATCHALL_REPLACE
: TC_CLSMATCHALL_DESTROY
;
298 cls_mall
.cookie
= (unsigned long)head
;
300 err
= tc_setup_flow_action(&cls_mall
.rule
->action
, &head
->exts
);
302 kfree(cls_mall
.rule
);
303 if (add
&& tc_skip_sw(head
->flags
)) {
304 NL_SET_ERR_MSG_MOD(extack
, "Failed to setup flow action");
310 err
= cb(TC_SETUP_CLSMATCHALL
, &cls_mall
, cb_priv
);
311 kfree(cls_mall
.rule
);
314 if (add
&& tc_skip_sw(head
->flags
))
319 tc_cls_offload_cnt_update(block
, &head
->in_hw_count
, &head
->flags
, add
);
324 static void mall_stats_hw_filter(struct tcf_proto
*tp
,
325 struct cls_mall_head
*head
,
326 unsigned long cookie
)
328 struct tc_cls_matchall_offload cls_mall
= {};
329 struct tcf_block
*block
= tp
->chain
->block
;
331 tc_cls_common_offload_init(&cls_mall
.common
, tp
, head
->flags
, NULL
);
332 cls_mall
.command
= TC_CLSMATCHALL_STATS
;
333 cls_mall
.cookie
= cookie
;
335 tc_setup_cb_call(block
, TC_SETUP_CLSMATCHALL
, &cls_mall
, false);
337 tcf_exts_stats_update(&head
->exts
, cls_mall
.stats
.bytes
,
338 cls_mall
.stats
.pkts
, cls_mall
.stats
.lastused
);
341 static int mall_dump(struct net
*net
, struct tcf_proto
*tp
, void *fh
,
342 struct sk_buff
*skb
, struct tcmsg
*t
, bool rtnl_held
)
344 struct tc_matchall_pcnt gpf
= {};
345 struct cls_mall_head
*head
= fh
;
352 if (!tc_skip_hw(head
->flags
))
353 mall_stats_hw_filter(tp
, head
, (unsigned long)head
);
355 t
->tcm_handle
= head
->handle
;
357 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
359 goto nla_put_failure
;
361 if (head
->res
.classid
&&
362 nla_put_u32(skb
, TCA_MATCHALL_CLASSID
, head
->res
.classid
))
363 goto nla_put_failure
;
365 if (head
->flags
&& nla_put_u32(skb
, TCA_MATCHALL_FLAGS
, head
->flags
))
366 goto nla_put_failure
;
368 for_each_possible_cpu(cpu
) {
369 struct tc_matchall_pcnt
*pf
= per_cpu_ptr(head
->pf
, cpu
);
371 gpf
.rhit
+= pf
->rhit
;
374 if (nla_put_64bit(skb
, TCA_MATCHALL_PCNT
,
375 sizeof(struct tc_matchall_pcnt
),
376 &gpf
, TCA_MATCHALL_PAD
))
377 goto nla_put_failure
;
379 if (tcf_exts_dump(skb
, &head
->exts
))
380 goto nla_put_failure
;
382 nla_nest_end(skb
, nest
);
384 if (tcf_exts_dump_stats(skb
, &head
->exts
) < 0)
385 goto nla_put_failure
;
390 nla_nest_cancel(skb
, nest
);
394 static void mall_bind_class(void *fh
, u32 classid
, unsigned long cl
)
396 struct cls_mall_head
*head
= fh
;
398 if (head
&& head
->res
.classid
== classid
)
399 head
->res
.class = cl
;
402 static struct tcf_proto_ops cls_mall_ops __read_mostly
= {
404 .classify
= mall_classify
,
406 .destroy
= mall_destroy
,
408 .change
= mall_change
,
409 .delete = mall_delete
,
411 .reoffload
= mall_reoffload
,
413 .bind_class
= mall_bind_class
,
414 .owner
= THIS_MODULE
,
417 static int __init
cls_mall_init(void)
419 return register_tcf_proto_ops(&cls_mall_ops
);
422 static void __exit
cls_mall_exit(void)
424 unregister_tcf_proto_ops(&cls_mall_ops
);
427 module_init(cls_mall_init
);
428 module_exit(cls_mall_exit
);
430 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
431 MODULE_DESCRIPTION("Match-all classifier");
432 MODULE_LICENSE("GPL v2");