1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 #include <net/flow_offload.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/mutex.h>
7 #include <linux/rhashtable.h>
9 struct flow_rule
*flow_rule_alloc(unsigned int num_actions
)
11 struct flow_rule
*rule
;
14 rule
= kzalloc(struct_size(rule
, action
.entries
, num_actions
),
19 rule
->action
.num_entries
= num_actions
;
20 /* Pre-fill each action hw_stats with DONT_CARE.
21 * Caller can override this if it wants stats for a given action.
23 for (i
= 0; i
< num_actions
; i
++)
24 rule
->action
.entries
[i
].hw_stats
= FLOW_ACTION_HW_STATS_DONT_CARE
;
28 EXPORT_SYMBOL(flow_rule_alloc
);
30 #define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
31 const struct flow_match *__m = &(__rule)->match; \
32 struct flow_dissector *__d = (__m)->dissector; \
34 (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \
35 (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \
37 void flow_rule_match_meta(const struct flow_rule *rule,
38 struct flow_match_meta
*out
)
40 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_META
, out
);
42 EXPORT_SYMBOL(flow_rule_match_meta
);
44 void flow_rule_match_basic(const struct flow_rule
*rule
,
45 struct flow_match_basic
*out
)
47 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_BASIC
, out
);
49 EXPORT_SYMBOL(flow_rule_match_basic
);
51 void flow_rule_match_control(const struct flow_rule
*rule
,
52 struct flow_match_control
*out
)
54 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_CONTROL
, out
);
56 EXPORT_SYMBOL(flow_rule_match_control
);
58 void flow_rule_match_eth_addrs(const struct flow_rule
*rule
,
59 struct flow_match_eth_addrs
*out
)
61 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_ETH_ADDRS
, out
);
63 EXPORT_SYMBOL(flow_rule_match_eth_addrs
);
65 void flow_rule_match_vlan(const struct flow_rule
*rule
,
66 struct flow_match_vlan
*out
)
68 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_VLAN
, out
);
70 EXPORT_SYMBOL(flow_rule_match_vlan
);
72 void flow_rule_match_cvlan(const struct flow_rule
*rule
,
73 struct flow_match_vlan
*out
)
75 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_CVLAN
, out
);
77 EXPORT_SYMBOL(flow_rule_match_cvlan
);
79 void flow_rule_match_ipv4_addrs(const struct flow_rule
*rule
,
80 struct flow_match_ipv4_addrs
*out
)
82 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_IPV4_ADDRS
, out
);
84 EXPORT_SYMBOL(flow_rule_match_ipv4_addrs
);
86 void flow_rule_match_ipv6_addrs(const struct flow_rule
*rule
,
87 struct flow_match_ipv6_addrs
*out
)
89 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_IPV6_ADDRS
, out
);
91 EXPORT_SYMBOL(flow_rule_match_ipv6_addrs
);
93 void flow_rule_match_ip(const struct flow_rule
*rule
,
94 struct flow_match_ip
*out
)
96 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_IP
, out
);
98 EXPORT_SYMBOL(flow_rule_match_ip
);
100 void flow_rule_match_ports(const struct flow_rule
*rule
,
101 struct flow_match_ports
*out
)
103 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_PORTS
, out
);
105 EXPORT_SYMBOL(flow_rule_match_ports
);
107 void flow_rule_match_tcp(const struct flow_rule
*rule
,
108 struct flow_match_tcp
*out
)
110 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_TCP
, out
);
112 EXPORT_SYMBOL(flow_rule_match_tcp
);
114 void flow_rule_match_icmp(const struct flow_rule
*rule
,
115 struct flow_match_icmp
*out
)
117 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_ICMP
, out
);
119 EXPORT_SYMBOL(flow_rule_match_icmp
);
121 void flow_rule_match_mpls(const struct flow_rule
*rule
,
122 struct flow_match_mpls
*out
)
124 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_MPLS
, out
);
126 EXPORT_SYMBOL(flow_rule_match_mpls
);
128 void flow_rule_match_enc_control(const struct flow_rule
*rule
,
129 struct flow_match_control
*out
)
131 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_ENC_CONTROL
, out
);
133 EXPORT_SYMBOL(flow_rule_match_enc_control
);
135 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule
*rule
,
136 struct flow_match_ipv4_addrs
*out
)
138 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
, out
);
140 EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs
);
142 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule
*rule
,
143 struct flow_match_ipv6_addrs
*out
)
145 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
, out
);
147 EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs
);
149 void flow_rule_match_enc_ip(const struct flow_rule
*rule
,
150 struct flow_match_ip
*out
)
152 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_ENC_IP
, out
);
154 EXPORT_SYMBOL(flow_rule_match_enc_ip
);
156 void flow_rule_match_enc_ports(const struct flow_rule
*rule
,
157 struct flow_match_ports
*out
)
159 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_ENC_PORTS
, out
);
161 EXPORT_SYMBOL(flow_rule_match_enc_ports
);
163 void flow_rule_match_enc_keyid(const struct flow_rule
*rule
,
164 struct flow_match_enc_keyid
*out
)
166 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_ENC_KEYID
, out
);
168 EXPORT_SYMBOL(flow_rule_match_enc_keyid
);
170 void flow_rule_match_enc_opts(const struct flow_rule
*rule
,
171 struct flow_match_enc_opts
*out
)
173 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_ENC_OPTS
, out
);
175 EXPORT_SYMBOL(flow_rule_match_enc_opts
);
177 struct flow_action_cookie
*flow_action_cookie_create(void *data
,
181 struct flow_action_cookie
*cookie
;
183 cookie
= kmalloc(sizeof(*cookie
) + len
, gfp
);
186 cookie
->cookie_len
= len
;
187 memcpy(cookie
->cookie
, data
, len
);
190 EXPORT_SYMBOL(flow_action_cookie_create
);
192 void flow_action_cookie_destroy(struct flow_action_cookie
*cookie
)
196 EXPORT_SYMBOL(flow_action_cookie_destroy
);
198 void flow_rule_match_ct(const struct flow_rule
*rule
,
199 struct flow_match_ct
*out
)
201 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_CT
, out
);
203 EXPORT_SYMBOL(flow_rule_match_ct
);
205 struct flow_block_cb
*flow_block_cb_alloc(flow_setup_cb_t
*cb
,
206 void *cb_ident
, void *cb_priv
,
207 void (*release
)(void *cb_priv
))
209 struct flow_block_cb
*block_cb
;
211 block_cb
= kzalloc(sizeof(*block_cb
), GFP_KERNEL
);
213 return ERR_PTR(-ENOMEM
);
216 block_cb
->cb_ident
= cb_ident
;
217 block_cb
->cb_priv
= cb_priv
;
218 block_cb
->release
= release
;
222 EXPORT_SYMBOL(flow_block_cb_alloc
);
224 void flow_block_cb_free(struct flow_block_cb
*block_cb
)
226 if (block_cb
->release
)
227 block_cb
->release(block_cb
->cb_priv
);
231 EXPORT_SYMBOL(flow_block_cb_free
);
233 struct flow_block_cb
*flow_block_cb_lookup(struct flow_block
*block
,
234 flow_setup_cb_t
*cb
, void *cb_ident
)
236 struct flow_block_cb
*block_cb
;
238 list_for_each_entry(block_cb
, &block
->cb_list
, list
) {
239 if (block_cb
->cb
== cb
&&
240 block_cb
->cb_ident
== cb_ident
)
246 EXPORT_SYMBOL(flow_block_cb_lookup
);
248 void *flow_block_cb_priv(struct flow_block_cb
*block_cb
)
250 return block_cb
->cb_priv
;
252 EXPORT_SYMBOL(flow_block_cb_priv
);
254 void flow_block_cb_incref(struct flow_block_cb
*block_cb
)
258 EXPORT_SYMBOL(flow_block_cb_incref
);
260 unsigned int flow_block_cb_decref(struct flow_block_cb
*block_cb
)
262 return --block_cb
->refcnt
;
264 EXPORT_SYMBOL(flow_block_cb_decref
);
266 bool flow_block_cb_is_busy(flow_setup_cb_t
*cb
, void *cb_ident
,
267 struct list_head
*driver_block_list
)
269 struct flow_block_cb
*block_cb
;
271 list_for_each_entry(block_cb
, driver_block_list
, driver_list
) {
272 if (block_cb
->cb
== cb
&&
273 block_cb
->cb_ident
== cb_ident
)
279 EXPORT_SYMBOL(flow_block_cb_is_busy
);
281 int flow_block_cb_setup_simple(struct flow_block_offload
*f
,
282 struct list_head
*driver_block_list
,
284 void *cb_ident
, void *cb_priv
,
287 struct flow_block_cb
*block_cb
;
290 f
->binder_type
!= FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS
)
293 f
->driver_block_list
= driver_block_list
;
295 switch (f
->command
) {
296 case FLOW_BLOCK_BIND
:
297 if (flow_block_cb_is_busy(cb
, cb_ident
, driver_block_list
))
300 block_cb
= flow_block_cb_alloc(cb
, cb_ident
, cb_priv
, NULL
);
301 if (IS_ERR(block_cb
))
302 return PTR_ERR(block_cb
);
304 flow_block_cb_add(block_cb
, f
);
305 list_add_tail(&block_cb
->driver_list
, driver_block_list
);
307 case FLOW_BLOCK_UNBIND
:
308 block_cb
= flow_block_cb_lookup(f
->block
, cb
, cb_ident
);
312 flow_block_cb_remove(block_cb
, f
);
313 list_del(&block_cb
->driver_list
);
319 EXPORT_SYMBOL(flow_block_cb_setup_simple
);
321 static DEFINE_MUTEX(flow_indr_block_lock
);
322 static LIST_HEAD(flow_block_indr_list
);
323 static LIST_HEAD(flow_block_indr_dev_list
);
325 struct flow_indr_dev
{
326 struct list_head list
;
327 flow_indr_block_bind_cb_t
*cb
;
333 static struct flow_indr_dev
*flow_indr_dev_alloc(flow_indr_block_bind_cb_t
*cb
,
336 struct flow_indr_dev
*indr_dev
;
338 indr_dev
= kmalloc(sizeof(*indr_dev
), GFP_KERNEL
);
343 indr_dev
->cb_priv
= cb_priv
;
344 refcount_set(&indr_dev
->refcnt
, 1);
349 int flow_indr_dev_register(flow_indr_block_bind_cb_t
*cb
, void *cb_priv
)
351 struct flow_indr_dev
*indr_dev
;
353 mutex_lock(&flow_indr_block_lock
);
354 list_for_each_entry(indr_dev
, &flow_block_indr_dev_list
, list
) {
355 if (indr_dev
->cb
== cb
&&
356 indr_dev
->cb_priv
== cb_priv
) {
357 refcount_inc(&indr_dev
->refcnt
);
358 mutex_unlock(&flow_indr_block_lock
);
363 indr_dev
= flow_indr_dev_alloc(cb
, cb_priv
);
365 mutex_unlock(&flow_indr_block_lock
);
369 list_add(&indr_dev
->list
, &flow_block_indr_dev_list
);
370 mutex_unlock(&flow_indr_block_lock
);
374 EXPORT_SYMBOL(flow_indr_dev_register
);
376 static void __flow_block_indr_cleanup(void (*release
)(void *cb_priv
),
378 struct list_head
*cleanup_list
)
380 struct flow_block_cb
*this, *next
;
382 list_for_each_entry_safe(this, next
, &flow_block_indr_list
, indr
.list
) {
383 if (this->release
== release
&&
384 this->indr
.cb_priv
== cb_priv
)
385 list_move(&this->indr
.list
, cleanup_list
);
389 static void flow_block_indr_notify(struct list_head
*cleanup_list
)
391 struct flow_block_cb
*this, *next
;
393 list_for_each_entry_safe(this, next
, cleanup_list
, indr
.list
) {
394 list_del(&this->indr
.list
);
395 this->indr
.cleanup(this);
399 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t
*cb
, void *cb_priv
,
400 void (*release
)(void *cb_priv
))
402 struct flow_indr_dev
*this, *next
, *indr_dev
= NULL
;
403 LIST_HEAD(cleanup_list
);
405 mutex_lock(&flow_indr_block_lock
);
406 list_for_each_entry_safe(this, next
, &flow_block_indr_dev_list
, list
) {
407 if (this->cb
== cb
&&
408 this->cb_priv
== cb_priv
&&
409 refcount_dec_and_test(&this->refcnt
)) {
411 list_del(&indr_dev
->list
);
417 mutex_unlock(&flow_indr_block_lock
);
421 __flow_block_indr_cleanup(release
, cb_priv
, &cleanup_list
);
422 mutex_unlock(&flow_indr_block_lock
);
424 flow_block_indr_notify(&cleanup_list
);
427 EXPORT_SYMBOL(flow_indr_dev_unregister
);
429 static void flow_block_indr_init(struct flow_block_cb
*flow_block
,
430 struct flow_block_offload
*bo
,
431 struct net_device
*dev
, struct Qdisc
*sch
, void *data
,
433 void (*cleanup
)(struct flow_block_cb
*block_cb
))
435 flow_block
->indr
.binder_type
= bo
->binder_type
;
436 flow_block
->indr
.data
= data
;
437 flow_block
->indr
.cb_priv
= cb_priv
;
438 flow_block
->indr
.dev
= dev
;
439 flow_block
->indr
.sch
= sch
;
440 flow_block
->indr
.cleanup
= cleanup
;
443 struct flow_block_cb
*flow_indr_block_cb_alloc(flow_setup_cb_t
*cb
,
444 void *cb_ident
, void *cb_priv
,
445 void (*release
)(void *cb_priv
),
446 struct flow_block_offload
*bo
,
447 struct net_device
*dev
,
448 struct Qdisc
*sch
, void *data
,
450 void (*cleanup
)(struct flow_block_cb
*block_cb
))
452 struct flow_block_cb
*block_cb
;
454 block_cb
= flow_block_cb_alloc(cb
, cb_ident
, cb_priv
, release
);
455 if (IS_ERR(block_cb
))
458 flow_block_indr_init(block_cb
, bo
, dev
, sch
, data
, indr_cb_priv
, cleanup
);
459 list_add(&block_cb
->indr
.list
, &flow_block_indr_list
);
464 EXPORT_SYMBOL(flow_indr_block_cb_alloc
);
466 int flow_indr_dev_setup_offload(struct net_device
*dev
, struct Qdisc
*sch
,
467 enum tc_setup_type type
, void *data
,
468 struct flow_block_offload
*bo
,
469 void (*cleanup
)(struct flow_block_cb
*block_cb
))
471 struct flow_indr_dev
*this;
473 mutex_lock(&flow_indr_block_lock
);
474 list_for_each_entry(this, &flow_block_indr_dev_list
, list
)
475 this->cb(dev
, sch
, this->cb_priv
, type
, bo
, data
, cleanup
);
477 mutex_unlock(&flow_indr_block_lock
);
479 return list_empty(&bo
->cb_list
) ? -EOPNOTSUPP
: 0;
481 EXPORT_SYMBOL(flow_indr_dev_setup_offload
);