1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 #include <net/act_api.h>
5 #include <net/flow_offload.h>
6 #include <linux/rtnetlink.h>
7 #include <linux/mutex.h>
8 #include <linux/rhashtable.h>
10 struct flow_rule
*flow_rule_alloc(unsigned int num_actions
)
12 struct flow_rule
*rule
;
15 rule
= kzalloc(struct_size(rule
, action
.entries
, num_actions
),
20 rule
->action
.num_entries
= num_actions
;
21 /* Pre-fill each action hw_stats with DONT_CARE.
22 * Caller can override this if it wants stats for a given action.
24 for (i
= 0; i
< num_actions
; i
++)
25 rule
->action
.entries
[i
].hw_stats
= FLOW_ACTION_HW_STATS_DONT_CARE
;
29 EXPORT_SYMBOL(flow_rule_alloc
);
31 struct flow_offload_action
*offload_action_alloc(unsigned int num_actions
)
33 struct flow_offload_action
*fl_action
;
36 fl_action
= kzalloc(struct_size(fl_action
, action
.entries
, num_actions
),
41 fl_action
->action
.num_entries
= num_actions
;
42 /* Pre-fill each action hw_stats with DONT_CARE.
43 * Caller can override this if it wants stats for a given action.
45 for (i
= 0; i
< num_actions
; i
++)
46 fl_action
->action
.entries
[i
].hw_stats
= FLOW_ACTION_HW_STATS_DONT_CARE
;
51 #define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
52 const struct flow_match *__m = &(__rule)->match; \
53 struct flow_dissector *__d = (__m)->dissector; \
55 (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \
56 (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \
58 void flow_rule_match_meta(const struct flow_rule *rule,
59 struct flow_match_meta
*out
)
61 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_META
, out
);
63 EXPORT_SYMBOL(flow_rule_match_meta
);
65 void flow_rule_match_basic(const struct flow_rule
*rule
,
66 struct flow_match_basic
*out
)
68 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_BASIC
, out
);
70 EXPORT_SYMBOL(flow_rule_match_basic
);
72 void flow_rule_match_control(const struct flow_rule
*rule
,
73 struct flow_match_control
*out
)
75 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_CONTROL
, out
);
77 EXPORT_SYMBOL(flow_rule_match_control
);
79 void flow_rule_match_eth_addrs(const struct flow_rule
*rule
,
80 struct flow_match_eth_addrs
*out
)
82 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_ETH_ADDRS
, out
);
84 EXPORT_SYMBOL(flow_rule_match_eth_addrs
);
86 void flow_rule_match_vlan(const struct flow_rule
*rule
,
87 struct flow_match_vlan
*out
)
89 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_VLAN
, out
);
91 EXPORT_SYMBOL(flow_rule_match_vlan
);
93 void flow_rule_match_cvlan(const struct flow_rule
*rule
,
94 struct flow_match_vlan
*out
)
96 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_CVLAN
, out
);
98 EXPORT_SYMBOL(flow_rule_match_cvlan
);
100 void flow_rule_match_arp(const struct flow_rule
*rule
,
101 struct flow_match_arp
*out
)
103 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_ARP
, out
);
105 EXPORT_SYMBOL(flow_rule_match_arp
);
107 void flow_rule_match_ipv4_addrs(const struct flow_rule
*rule
,
108 struct flow_match_ipv4_addrs
*out
)
110 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_IPV4_ADDRS
, out
);
112 EXPORT_SYMBOL(flow_rule_match_ipv4_addrs
);
114 void flow_rule_match_ipv6_addrs(const struct flow_rule
*rule
,
115 struct flow_match_ipv6_addrs
*out
)
117 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_IPV6_ADDRS
, out
);
119 EXPORT_SYMBOL(flow_rule_match_ipv6_addrs
);
121 void flow_rule_match_ip(const struct flow_rule
*rule
,
122 struct flow_match_ip
*out
)
124 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_IP
, out
);
126 EXPORT_SYMBOL(flow_rule_match_ip
);
128 void flow_rule_match_ports(const struct flow_rule
*rule
,
129 struct flow_match_ports
*out
)
131 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_PORTS
, out
);
133 EXPORT_SYMBOL(flow_rule_match_ports
);
135 void flow_rule_match_ports_range(const struct flow_rule
*rule
,
136 struct flow_match_ports_range
*out
)
138 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_PORTS_RANGE
, out
);
140 EXPORT_SYMBOL(flow_rule_match_ports_range
);
142 void flow_rule_match_tcp(const struct flow_rule
*rule
,
143 struct flow_match_tcp
*out
)
145 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_TCP
, out
);
147 EXPORT_SYMBOL(flow_rule_match_tcp
);
149 void flow_rule_match_ipsec(const struct flow_rule
*rule
,
150 struct flow_match_ipsec
*out
)
152 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_IPSEC
, out
);
154 EXPORT_SYMBOL(flow_rule_match_ipsec
);
156 void flow_rule_match_icmp(const struct flow_rule
*rule
,
157 struct flow_match_icmp
*out
)
159 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_ICMP
, out
);
161 EXPORT_SYMBOL(flow_rule_match_icmp
);
163 void flow_rule_match_mpls(const struct flow_rule
*rule
,
164 struct flow_match_mpls
*out
)
166 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_MPLS
, out
);
168 EXPORT_SYMBOL(flow_rule_match_mpls
);
170 void flow_rule_match_enc_control(const struct flow_rule
*rule
,
171 struct flow_match_control
*out
)
173 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_ENC_CONTROL
, out
);
175 EXPORT_SYMBOL(flow_rule_match_enc_control
);
177 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule
*rule
,
178 struct flow_match_ipv4_addrs
*out
)
180 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
, out
);
182 EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs
);
184 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule
*rule
,
185 struct flow_match_ipv6_addrs
*out
)
187 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
, out
);
189 EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs
);
191 void flow_rule_match_enc_ip(const struct flow_rule
*rule
,
192 struct flow_match_ip
*out
)
194 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_ENC_IP
, out
);
196 EXPORT_SYMBOL(flow_rule_match_enc_ip
);
198 void flow_rule_match_enc_ports(const struct flow_rule
*rule
,
199 struct flow_match_ports
*out
)
201 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_ENC_PORTS
, out
);
203 EXPORT_SYMBOL(flow_rule_match_enc_ports
);
205 void flow_rule_match_enc_keyid(const struct flow_rule
*rule
,
206 struct flow_match_enc_keyid
*out
)
208 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_ENC_KEYID
, out
);
210 EXPORT_SYMBOL(flow_rule_match_enc_keyid
);
212 void flow_rule_match_enc_opts(const struct flow_rule
*rule
,
213 struct flow_match_enc_opts
*out
)
215 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_ENC_OPTS
, out
);
217 EXPORT_SYMBOL(flow_rule_match_enc_opts
);
219 struct flow_action_cookie
*flow_action_cookie_create(void *data
,
223 struct flow_action_cookie
*cookie
;
225 cookie
= kmalloc(sizeof(*cookie
) + len
, gfp
);
228 cookie
->cookie_len
= len
;
229 memcpy(cookie
->cookie
, data
, len
);
232 EXPORT_SYMBOL(flow_action_cookie_create
);
234 void flow_action_cookie_destroy(struct flow_action_cookie
*cookie
)
238 EXPORT_SYMBOL(flow_action_cookie_destroy
);
240 void flow_rule_match_ct(const struct flow_rule
*rule
,
241 struct flow_match_ct
*out
)
243 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_CT
, out
);
245 EXPORT_SYMBOL(flow_rule_match_ct
);
247 void flow_rule_match_pppoe(const struct flow_rule
*rule
,
248 struct flow_match_pppoe
*out
)
250 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_PPPOE
, out
);
252 EXPORT_SYMBOL(flow_rule_match_pppoe
);
254 void flow_rule_match_l2tpv3(const struct flow_rule
*rule
,
255 struct flow_match_l2tpv3
*out
)
257 FLOW_DISSECTOR_MATCH(rule
, FLOW_DISSECTOR_KEY_L2TPV3
, out
);
259 EXPORT_SYMBOL(flow_rule_match_l2tpv3
);
261 struct flow_block_cb
*flow_block_cb_alloc(flow_setup_cb_t
*cb
,
262 void *cb_ident
, void *cb_priv
,
263 void (*release
)(void *cb_priv
))
265 struct flow_block_cb
*block_cb
;
267 block_cb
= kzalloc(sizeof(*block_cb
), GFP_KERNEL
);
269 return ERR_PTR(-ENOMEM
);
272 block_cb
->cb_ident
= cb_ident
;
273 block_cb
->cb_priv
= cb_priv
;
274 block_cb
->release
= release
;
278 EXPORT_SYMBOL(flow_block_cb_alloc
);
280 void flow_block_cb_free(struct flow_block_cb
*block_cb
)
282 if (block_cb
->release
)
283 block_cb
->release(block_cb
->cb_priv
);
287 EXPORT_SYMBOL(flow_block_cb_free
);
289 struct flow_block_cb
*flow_block_cb_lookup(struct flow_block
*block
,
290 flow_setup_cb_t
*cb
, void *cb_ident
)
292 struct flow_block_cb
*block_cb
;
294 list_for_each_entry(block_cb
, &block
->cb_list
, list
) {
295 if (block_cb
->cb
== cb
&&
296 block_cb
->cb_ident
== cb_ident
)
302 EXPORT_SYMBOL(flow_block_cb_lookup
);
304 void *flow_block_cb_priv(struct flow_block_cb
*block_cb
)
306 return block_cb
->cb_priv
;
308 EXPORT_SYMBOL(flow_block_cb_priv
);
310 void flow_block_cb_incref(struct flow_block_cb
*block_cb
)
314 EXPORT_SYMBOL(flow_block_cb_incref
);
316 unsigned int flow_block_cb_decref(struct flow_block_cb
*block_cb
)
318 return --block_cb
->refcnt
;
320 EXPORT_SYMBOL(flow_block_cb_decref
);
322 bool flow_block_cb_is_busy(flow_setup_cb_t
*cb
, void *cb_ident
,
323 struct list_head
*driver_block_list
)
325 struct flow_block_cb
*block_cb
;
327 list_for_each_entry(block_cb
, driver_block_list
, driver_list
) {
328 if (block_cb
->cb
== cb
&&
329 block_cb
->cb_ident
== cb_ident
)
335 EXPORT_SYMBOL(flow_block_cb_is_busy
);
337 int flow_block_cb_setup_simple(struct flow_block_offload
*f
,
338 struct list_head
*driver_block_list
,
340 void *cb_ident
, void *cb_priv
,
343 struct flow_block_cb
*block_cb
;
346 f
->binder_type
!= FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS
)
349 f
->driver_block_list
= driver_block_list
;
351 switch (f
->command
) {
352 case FLOW_BLOCK_BIND
:
353 if (flow_block_cb_is_busy(cb
, cb_ident
, driver_block_list
))
356 block_cb
= flow_block_cb_alloc(cb
, cb_ident
, cb_priv
, NULL
);
357 if (IS_ERR(block_cb
))
358 return PTR_ERR(block_cb
);
360 flow_block_cb_add(block_cb
, f
);
361 list_add_tail(&block_cb
->driver_list
, driver_block_list
);
363 case FLOW_BLOCK_UNBIND
:
364 block_cb
= flow_block_cb_lookup(f
->block
, cb
, cb_ident
);
368 flow_block_cb_remove(block_cb
, f
);
369 list_del(&block_cb
->driver_list
);
375 EXPORT_SYMBOL(flow_block_cb_setup_simple
);
377 static DEFINE_MUTEX(flow_indr_block_lock
);
378 static LIST_HEAD(flow_block_indr_list
);
379 static LIST_HEAD(flow_block_indr_dev_list
);
380 static LIST_HEAD(flow_indir_dev_list
);
382 struct flow_indr_dev
{
383 struct list_head list
;
384 flow_indr_block_bind_cb_t
*cb
;
389 static struct flow_indr_dev
*flow_indr_dev_alloc(flow_indr_block_bind_cb_t
*cb
,
392 struct flow_indr_dev
*indr_dev
;
394 indr_dev
= kmalloc(sizeof(*indr_dev
), GFP_KERNEL
);
399 indr_dev
->cb_priv
= cb_priv
;
400 refcount_set(&indr_dev
->refcnt
, 1);
405 struct flow_indir_dev_info
{
407 struct net_device
*dev
;
409 enum tc_setup_type type
;
410 void (*cleanup
)(struct flow_block_cb
*block_cb
);
411 struct list_head list
;
412 enum flow_block_command command
;
413 enum flow_block_binder_type binder_type
;
414 struct list_head
*cb_list
;
417 static void existing_qdiscs_register(flow_indr_block_bind_cb_t
*cb
, void *cb_priv
)
419 struct flow_block_offload bo
;
420 struct flow_indir_dev_info
*cur
;
422 list_for_each_entry(cur
, &flow_indir_dev_list
, list
) {
423 memset(&bo
, 0, sizeof(bo
));
424 bo
.command
= cur
->command
;
425 bo
.binder_type
= cur
->binder_type
;
426 INIT_LIST_HEAD(&bo
.cb_list
);
427 cb(cur
->dev
, cur
->sch
, cb_priv
, cur
->type
, &bo
, cur
->data
, cur
->cleanup
);
428 list_splice(&bo
.cb_list
, cur
->cb_list
);
432 int flow_indr_dev_register(flow_indr_block_bind_cb_t
*cb
, void *cb_priv
)
434 struct flow_indr_dev
*indr_dev
;
436 mutex_lock(&flow_indr_block_lock
);
437 list_for_each_entry(indr_dev
, &flow_block_indr_dev_list
, list
) {
438 if (indr_dev
->cb
== cb
&&
439 indr_dev
->cb_priv
== cb_priv
) {
440 refcount_inc(&indr_dev
->refcnt
);
441 mutex_unlock(&flow_indr_block_lock
);
446 indr_dev
= flow_indr_dev_alloc(cb
, cb_priv
);
448 mutex_unlock(&flow_indr_block_lock
);
452 list_add(&indr_dev
->list
, &flow_block_indr_dev_list
);
453 existing_qdiscs_register(cb
, cb_priv
);
454 mutex_unlock(&flow_indr_block_lock
);
456 tcf_action_reoffload_cb(cb
, cb_priv
, true);
460 EXPORT_SYMBOL(flow_indr_dev_register
);
462 static void __flow_block_indr_cleanup(void (*release
)(void *cb_priv
),
464 struct list_head
*cleanup_list
)
466 struct flow_block_cb
*this, *next
;
468 list_for_each_entry_safe(this, next
, &flow_block_indr_list
, indr
.list
) {
469 if (this->release
== release
&&
470 this->indr
.cb_priv
== cb_priv
)
471 list_move(&this->indr
.list
, cleanup_list
);
475 static void flow_block_indr_notify(struct list_head
*cleanup_list
)
477 struct flow_block_cb
*this, *next
;
479 list_for_each_entry_safe(this, next
, cleanup_list
, indr
.list
) {
480 list_del(&this->indr
.list
);
481 this->indr
.cleanup(this);
485 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t
*cb
, void *cb_priv
,
486 void (*release
)(void *cb_priv
))
488 struct flow_indr_dev
*this, *next
, *indr_dev
= NULL
;
489 LIST_HEAD(cleanup_list
);
491 mutex_lock(&flow_indr_block_lock
);
492 list_for_each_entry_safe(this, next
, &flow_block_indr_dev_list
, list
) {
493 if (this->cb
== cb
&&
494 this->cb_priv
== cb_priv
&&
495 refcount_dec_and_test(&this->refcnt
)) {
497 list_del(&indr_dev
->list
);
503 mutex_unlock(&flow_indr_block_lock
);
507 __flow_block_indr_cleanup(release
, cb_priv
, &cleanup_list
);
508 mutex_unlock(&flow_indr_block_lock
);
510 tcf_action_reoffload_cb(cb
, cb_priv
, false);
511 flow_block_indr_notify(&cleanup_list
);
514 EXPORT_SYMBOL(flow_indr_dev_unregister
);
516 static void flow_block_indr_init(struct flow_block_cb
*flow_block
,
517 struct flow_block_offload
*bo
,
518 struct net_device
*dev
, struct Qdisc
*sch
, void *data
,
520 void (*cleanup
)(struct flow_block_cb
*block_cb
))
522 flow_block
->indr
.binder_type
= bo
->binder_type
;
523 flow_block
->indr
.data
= data
;
524 flow_block
->indr
.cb_priv
= cb_priv
;
525 flow_block
->indr
.dev
= dev
;
526 flow_block
->indr
.sch
= sch
;
527 flow_block
->indr
.cleanup
= cleanup
;
530 struct flow_block_cb
*flow_indr_block_cb_alloc(flow_setup_cb_t
*cb
,
531 void *cb_ident
, void *cb_priv
,
532 void (*release
)(void *cb_priv
),
533 struct flow_block_offload
*bo
,
534 struct net_device
*dev
,
535 struct Qdisc
*sch
, void *data
,
537 void (*cleanup
)(struct flow_block_cb
*block_cb
))
539 struct flow_block_cb
*block_cb
;
541 block_cb
= flow_block_cb_alloc(cb
, cb_ident
, cb_priv
, release
);
542 if (IS_ERR(block_cb
))
545 flow_block_indr_init(block_cb
, bo
, dev
, sch
, data
, indr_cb_priv
, cleanup
);
546 list_add(&block_cb
->indr
.list
, &flow_block_indr_list
);
551 EXPORT_SYMBOL(flow_indr_block_cb_alloc
);
553 static struct flow_indir_dev_info
*find_indir_dev(void *data
)
555 struct flow_indir_dev_info
*cur
;
557 list_for_each_entry(cur
, &flow_indir_dev_list
, list
) {
558 if (cur
->data
== data
)
564 static int indir_dev_add(void *data
, struct net_device
*dev
, struct Qdisc
*sch
,
565 enum tc_setup_type type
, void (*cleanup
)(struct flow_block_cb
*block_cb
),
566 struct flow_block_offload
*bo
)
568 struct flow_indir_dev_info
*info
;
570 info
= find_indir_dev(data
);
574 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
582 info
->cleanup
= cleanup
;
583 info
->command
= bo
->command
;
584 info
->binder_type
= bo
->binder_type
;
585 info
->cb_list
= bo
->cb_list_head
;
587 list_add(&info
->list
, &flow_indir_dev_list
);
591 static int indir_dev_remove(void *data
)
593 struct flow_indir_dev_info
*info
;
595 info
= find_indir_dev(data
);
599 list_del(&info
->list
);
605 int flow_indr_dev_setup_offload(struct net_device
*dev
, struct Qdisc
*sch
,
606 enum tc_setup_type type
, void *data
,
607 struct flow_block_offload
*bo
,
608 void (*cleanup
)(struct flow_block_cb
*block_cb
))
610 struct flow_indr_dev
*this;
614 mutex_lock(&flow_indr_block_lock
);
616 if (bo
->command
== FLOW_BLOCK_BIND
)
617 indir_dev_add(data
, dev
, sch
, type
, cleanup
, bo
);
618 else if (bo
->command
== FLOW_BLOCK_UNBIND
)
619 indir_dev_remove(data
);
622 list_for_each_entry(this, &flow_block_indr_dev_list
, list
) {
623 err
= this->cb(dev
, sch
, this->cb_priv
, type
, bo
, data
, cleanup
);
628 mutex_unlock(&flow_indr_block_lock
);
630 return (bo
&& list_empty(&bo
->cb_list
)) ? -EOPNOTSUPP
: count
;
632 EXPORT_SYMBOL(flow_indr_dev_setup_offload
);
634 bool flow_indr_dev_exists(void)
636 return !list_empty(&flow_block_indr_dev_list
);
638 EXPORT_SYMBOL(flow_indr_dev_exists
);