1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_matchll.c Match-all classifier
5 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
13 #include <net/sch_generic.h>
14 #include <net/pkt_cls.h>
16 struct cls_mall_head
{
18 struct tcf_result res
;
21 unsigned int in_hw_count
;
22 struct tc_matchall_pcnt __percpu
*pf
;
23 struct rcu_work rwork
;
27 static int mall_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
28 struct tcf_result
*res
)
30 struct cls_mall_head
*head
= rcu_dereference_bh(tp
->root
);
35 if (tc_skip_sw(head
->flags
))
39 __this_cpu_inc(head
->pf
->rhit
);
40 return tcf_exts_exec(skb
, &head
->exts
, res
);
43 static int mall_init(struct tcf_proto
*tp
)
48 static void __mall_destroy(struct cls_mall_head
*head
)
50 tcf_exts_destroy(&head
->exts
);
51 tcf_exts_put_net(&head
->exts
);
52 free_percpu(head
->pf
);
56 static void mall_destroy_work(struct work_struct
*work
)
58 struct cls_mall_head
*head
= container_of(to_rcu_work(work
),
66 static void mall_destroy_hw_filter(struct tcf_proto
*tp
,
67 struct cls_mall_head
*head
,
69 struct netlink_ext_ack
*extack
)
71 struct tc_cls_matchall_offload cls_mall
= {};
72 struct tcf_block
*block
= tp
->chain
->block
;
74 tc_cls_common_offload_init(&cls_mall
.common
, tp
, head
->flags
, extack
);
75 cls_mall
.command
= TC_CLSMATCHALL_DESTROY
;
76 cls_mall
.cookie
= cookie
;
78 tc_setup_cb_destroy(block
, tp
, TC_SETUP_CLSMATCHALL
, &cls_mall
, false,
79 &head
->flags
, &head
->in_hw_count
, true);
82 static int mall_replace_hw_filter(struct tcf_proto
*tp
,
83 struct cls_mall_head
*head
,
85 struct netlink_ext_ack
*extack
)
87 struct tc_cls_matchall_offload cls_mall
= {};
88 struct tcf_block
*block
= tp
->chain
->block
;
89 bool skip_sw
= tc_skip_sw(head
->flags
);
92 cls_mall
.rule
= flow_rule_alloc(tcf_exts_num_actions(&head
->exts
));
96 tc_cls_common_offload_init(&cls_mall
.common
, tp
, head
->flags
, extack
);
97 cls_mall
.command
= TC_CLSMATCHALL_REPLACE
;
98 cls_mall
.cookie
= cookie
;
100 err
= tc_setup_flow_action(&cls_mall
.rule
->action
, &head
->exts
, true);
102 kfree(cls_mall
.rule
);
103 mall_destroy_hw_filter(tp
, head
, cookie
, NULL
);
105 NL_SET_ERR_MSG_MOD(extack
, "Failed to setup flow action");
112 err
= tc_setup_cb_add(block
, tp
, TC_SETUP_CLSMATCHALL
, &cls_mall
,
113 skip_sw
, &head
->flags
, &head
->in_hw_count
, true);
114 tc_cleanup_flow_action(&cls_mall
.rule
->action
);
115 kfree(cls_mall
.rule
);
118 mall_destroy_hw_filter(tp
, head
, cookie
, NULL
);
122 if (skip_sw
&& !(head
->flags
& TCA_CLS_FLAGS_IN_HW
))
128 static void mall_destroy(struct tcf_proto
*tp
, bool rtnl_held
,
129 struct netlink_ext_ack
*extack
)
131 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
136 tcf_unbind_filter(tp
, &head
->res
);
138 if (!tc_skip_hw(head
->flags
))
139 mall_destroy_hw_filter(tp
, head
, (unsigned long) head
, extack
);
141 if (tcf_exts_get_net(&head
->exts
))
142 tcf_queue_work(&head
->rwork
, mall_destroy_work
);
144 __mall_destroy(head
);
147 static void *mall_get(struct tcf_proto
*tp
, u32 handle
)
149 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
151 if (head
&& head
->handle
== handle
)
157 static const struct nla_policy mall_policy
[TCA_MATCHALL_MAX
+ 1] = {
158 [TCA_MATCHALL_UNSPEC
] = { .type
= NLA_UNSPEC
},
159 [TCA_MATCHALL_CLASSID
] = { .type
= NLA_U32
},
162 static int mall_set_parms(struct net
*net
, struct tcf_proto
*tp
,
163 struct cls_mall_head
*head
,
164 unsigned long base
, struct nlattr
**tb
,
165 struct nlattr
*est
, bool ovr
,
166 struct netlink_ext_ack
*extack
)
170 err
= tcf_exts_validate(net
, tp
, tb
, est
, &head
->exts
, ovr
, true,
175 if (tb
[TCA_MATCHALL_CLASSID
]) {
176 head
->res
.classid
= nla_get_u32(tb
[TCA_MATCHALL_CLASSID
]);
177 tcf_bind_filter(tp
, &head
->res
, base
);
182 static int mall_change(struct net
*net
, struct sk_buff
*in_skb
,
183 struct tcf_proto
*tp
, unsigned long base
,
184 u32 handle
, struct nlattr
**tca
,
185 void **arg
, bool ovr
, bool rtnl_held
,
186 struct netlink_ext_ack
*extack
)
188 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
189 struct nlattr
*tb
[TCA_MATCHALL_MAX
+ 1];
190 struct cls_mall_head
*new;
194 if (!tca
[TCA_OPTIONS
])
200 err
= nla_parse_nested_deprecated(tb
, TCA_MATCHALL_MAX
,
201 tca
[TCA_OPTIONS
], mall_policy
, NULL
);
205 if (tb
[TCA_MATCHALL_FLAGS
]) {
206 flags
= nla_get_u32(tb
[TCA_MATCHALL_FLAGS
]);
207 if (!tc_flags_valid(flags
))
211 new = kzalloc(sizeof(*new), GFP_KERNEL
);
215 err
= tcf_exts_init(&new->exts
, net
, TCA_MATCHALL_ACT
, 0);
221 new->handle
= handle
;
223 new->pf
= alloc_percpu(struct tc_matchall_pcnt
);
226 goto err_alloc_percpu
;
229 err
= mall_set_parms(net
, tp
, new, base
, tb
, tca
[TCA_RATE
], ovr
,
234 if (!tc_skip_hw(new->flags
)) {
235 err
= mall_replace_hw_filter(tp
, new, (unsigned long)new,
238 goto err_replace_hw_filter
;
241 if (!tc_in_hw(new->flags
))
242 new->flags
|= TCA_CLS_FLAGS_NOT_IN_HW
;
245 rcu_assign_pointer(tp
->root
, new);
248 err_replace_hw_filter
:
250 free_percpu(new->pf
);
252 tcf_exts_destroy(&new->exts
);
258 static int mall_delete(struct tcf_proto
*tp
, void *arg
, bool *last
,
259 bool rtnl_held
, struct netlink_ext_ack
*extack
)
261 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
263 head
->deleting
= true;
268 static void mall_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
,
271 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
273 if (arg
->count
< arg
->skip
)
276 if (!head
|| head
->deleting
)
278 if (arg
->fn(tp
, head
, arg
) < 0)
284 static int mall_reoffload(struct tcf_proto
*tp
, bool add
, flow_setup_cb_t
*cb
,
285 void *cb_priv
, struct netlink_ext_ack
*extack
)
287 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
288 struct tc_cls_matchall_offload cls_mall
= {};
289 struct tcf_block
*block
= tp
->chain
->block
;
292 if (tc_skip_hw(head
->flags
))
295 cls_mall
.rule
= flow_rule_alloc(tcf_exts_num_actions(&head
->exts
));
299 tc_cls_common_offload_init(&cls_mall
.common
, tp
, head
->flags
, extack
);
300 cls_mall
.command
= add
?
301 TC_CLSMATCHALL_REPLACE
: TC_CLSMATCHALL_DESTROY
;
302 cls_mall
.cookie
= (unsigned long)head
;
304 err
= tc_setup_flow_action(&cls_mall
.rule
->action
, &head
->exts
, true);
306 kfree(cls_mall
.rule
);
307 if (add
&& tc_skip_sw(head
->flags
)) {
308 NL_SET_ERR_MSG_MOD(extack
, "Failed to setup flow action");
314 err
= tc_setup_cb_reoffload(block
, tp
, add
, cb
, TC_SETUP_CLSMATCHALL
,
315 &cls_mall
, cb_priv
, &head
->flags
,
317 tc_cleanup_flow_action(&cls_mall
.rule
->action
);
318 kfree(cls_mall
.rule
);
326 static void mall_stats_hw_filter(struct tcf_proto
*tp
,
327 struct cls_mall_head
*head
,
328 unsigned long cookie
)
330 struct tc_cls_matchall_offload cls_mall
= {};
331 struct tcf_block
*block
= tp
->chain
->block
;
333 tc_cls_common_offload_init(&cls_mall
.common
, tp
, head
->flags
, NULL
);
334 cls_mall
.command
= TC_CLSMATCHALL_STATS
;
335 cls_mall
.cookie
= cookie
;
337 tc_setup_cb_call(block
, TC_SETUP_CLSMATCHALL
, &cls_mall
, false, true);
339 tcf_exts_stats_update(&head
->exts
, cls_mall
.stats
.bytes
,
340 cls_mall
.stats
.pkts
, cls_mall
.stats
.lastused
);
343 static int mall_dump(struct net
*net
, struct tcf_proto
*tp
, void *fh
,
344 struct sk_buff
*skb
, struct tcmsg
*t
, bool rtnl_held
)
346 struct tc_matchall_pcnt gpf
= {};
347 struct cls_mall_head
*head
= fh
;
354 if (!tc_skip_hw(head
->flags
))
355 mall_stats_hw_filter(tp
, head
, (unsigned long)head
);
357 t
->tcm_handle
= head
->handle
;
359 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
361 goto nla_put_failure
;
363 if (head
->res
.classid
&&
364 nla_put_u32(skb
, TCA_MATCHALL_CLASSID
, head
->res
.classid
))
365 goto nla_put_failure
;
367 if (head
->flags
&& nla_put_u32(skb
, TCA_MATCHALL_FLAGS
, head
->flags
))
368 goto nla_put_failure
;
370 for_each_possible_cpu(cpu
) {
371 struct tc_matchall_pcnt
*pf
= per_cpu_ptr(head
->pf
, cpu
);
373 gpf
.rhit
+= pf
->rhit
;
376 if (nla_put_64bit(skb
, TCA_MATCHALL_PCNT
,
377 sizeof(struct tc_matchall_pcnt
),
378 &gpf
, TCA_MATCHALL_PAD
))
379 goto nla_put_failure
;
381 if (tcf_exts_dump(skb
, &head
->exts
))
382 goto nla_put_failure
;
384 nla_nest_end(skb
, nest
);
386 if (tcf_exts_dump_stats(skb
, &head
->exts
) < 0)
387 goto nla_put_failure
;
392 nla_nest_cancel(skb
, nest
);
396 static void mall_bind_class(void *fh
, u32 classid
, unsigned long cl
)
398 struct cls_mall_head
*head
= fh
;
400 if (head
&& head
->res
.classid
== classid
)
401 head
->res
.class = cl
;
404 static struct tcf_proto_ops cls_mall_ops __read_mostly
= {
406 .classify
= mall_classify
,
408 .destroy
= mall_destroy
,
410 .change
= mall_change
,
411 .delete = mall_delete
,
413 .reoffload
= mall_reoffload
,
415 .bind_class
= mall_bind_class
,
416 .owner
= THIS_MODULE
,
419 static int __init
cls_mall_init(void)
421 return register_tcf_proto_ops(&cls_mall_ops
);
424 static void __exit
cls_mall_exit(void)
426 unregister_tcf_proto_ops(&cls_mall_ops
);
429 module_init(cls_mall_init
);
430 module_exit(cls_mall_exit
);
432 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
433 MODULE_DESCRIPTION("Match-all classifier");
434 MODULE_LICENSE("GPL v2");