1 #ifndef _NET_FLOW_OFFLOAD_H
2 #define _NET_FLOW_OFFLOAD_H
4 #include <linux/kernel.h>
5 #include <linux/list.h>
6 #include <net/flow_dissector.h>
7 #include <linux/rhashtable.h>
10 struct flow_dissector
*dissector
;
15 struct flow_match_meta
{
16 struct flow_dissector_key_meta
*key
, *mask
;
19 struct flow_match_basic
{
20 struct flow_dissector_key_basic
*key
, *mask
;
23 struct flow_match_control
{
24 struct flow_dissector_key_control
*key
, *mask
;
27 struct flow_match_eth_addrs
{
28 struct flow_dissector_key_eth_addrs
*key
, *mask
;
31 struct flow_match_vlan
{
32 struct flow_dissector_key_vlan
*key
, *mask
;
35 struct flow_match_ipv4_addrs
{
36 struct flow_dissector_key_ipv4_addrs
*key
, *mask
;
39 struct flow_match_ipv6_addrs
{
40 struct flow_dissector_key_ipv6_addrs
*key
, *mask
;
43 struct flow_match_ip
{
44 struct flow_dissector_key_ip
*key
, *mask
;
47 struct flow_match_ports
{
48 struct flow_dissector_key_ports
*key
, *mask
;
51 struct flow_match_icmp
{
52 struct flow_dissector_key_icmp
*key
, *mask
;
55 struct flow_match_tcp
{
56 struct flow_dissector_key_tcp
*key
, *mask
;
59 struct flow_match_mpls
{
60 struct flow_dissector_key_mpls
*key
, *mask
;
63 struct flow_match_enc_keyid
{
64 struct flow_dissector_key_keyid
*key
, *mask
;
67 struct flow_match_enc_opts
{
68 struct flow_dissector_key_enc_opts
*key
, *mask
;
73 void flow_rule_match_meta(const struct flow_rule
*rule
,
74 struct flow_match_meta
*out
);
75 void flow_rule_match_basic(const struct flow_rule
*rule
,
76 struct flow_match_basic
*out
);
77 void flow_rule_match_control(const struct flow_rule
*rule
,
78 struct flow_match_control
*out
);
79 void flow_rule_match_eth_addrs(const struct flow_rule
*rule
,
80 struct flow_match_eth_addrs
*out
);
81 void flow_rule_match_vlan(const struct flow_rule
*rule
,
82 struct flow_match_vlan
*out
);
83 void flow_rule_match_cvlan(const struct flow_rule
*rule
,
84 struct flow_match_vlan
*out
);
85 void flow_rule_match_ipv4_addrs(const struct flow_rule
*rule
,
86 struct flow_match_ipv4_addrs
*out
);
87 void flow_rule_match_ipv6_addrs(const struct flow_rule
*rule
,
88 struct flow_match_ipv6_addrs
*out
);
89 void flow_rule_match_ip(const struct flow_rule
*rule
,
90 struct flow_match_ip
*out
);
91 void flow_rule_match_ports(const struct flow_rule
*rule
,
92 struct flow_match_ports
*out
);
93 void flow_rule_match_tcp(const struct flow_rule
*rule
,
94 struct flow_match_tcp
*out
);
95 void flow_rule_match_icmp(const struct flow_rule
*rule
,
96 struct flow_match_icmp
*out
);
97 void flow_rule_match_mpls(const struct flow_rule
*rule
,
98 struct flow_match_mpls
*out
);
99 void flow_rule_match_enc_control(const struct flow_rule
*rule
,
100 struct flow_match_control
*out
);
101 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule
*rule
,
102 struct flow_match_ipv4_addrs
*out
);
103 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule
*rule
,
104 struct flow_match_ipv6_addrs
*out
);
105 void flow_rule_match_enc_ip(const struct flow_rule
*rule
,
106 struct flow_match_ip
*out
);
107 void flow_rule_match_enc_ports(const struct flow_rule
*rule
,
108 struct flow_match_ports
*out
);
109 void flow_rule_match_enc_keyid(const struct flow_rule
*rule
,
110 struct flow_match_enc_keyid
*out
);
111 void flow_rule_match_enc_opts(const struct flow_rule
*rule
,
112 struct flow_match_enc_opts
*out
);
114 enum flow_action_id
{
115 FLOW_ACTION_ACCEPT
= 0,
119 FLOW_ACTION_REDIRECT
,
121 FLOW_ACTION_REDIRECT_INGRESS
,
122 FLOW_ACTION_MIRRED_INGRESS
,
123 FLOW_ACTION_VLAN_PUSH
,
124 FLOW_ACTION_VLAN_POP
,
125 FLOW_ACTION_VLAN_MANGLE
,
126 FLOW_ACTION_TUNNEL_ENCAP
,
127 FLOW_ACTION_TUNNEL_DECAP
,
138 FLOW_ACTION_MPLS_PUSH
,
139 FLOW_ACTION_MPLS_POP
,
140 FLOW_ACTION_MPLS_MANGLE
,
144 /* This is mirroring enum pedit_header_type definition for easy mapping between
145 * tc pedit action. Legacy TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK is mapped to
146 * FLOW_ACT_MANGLE_UNSPEC, which is supported by no driver.
148 enum flow_action_mangle_base
{
149 FLOW_ACT_MANGLE_UNSPEC
= 0,
150 FLOW_ACT_MANGLE_HDR_TYPE_ETH
,
151 FLOW_ACT_MANGLE_HDR_TYPE_IP4
,
152 FLOW_ACT_MANGLE_HDR_TYPE_IP6
,
153 FLOW_ACT_MANGLE_HDR_TYPE_TCP
,
154 FLOW_ACT_MANGLE_HDR_TYPE_UDP
,
157 typedef void (*action_destr
)(void *priv
);
159 struct flow_action_entry
{
160 enum flow_action_id id
;
161 action_destr destructor
;
162 void *destructor_priv
;
164 u32 chain_index
; /* FLOW_ACTION_GOTO */
165 struct net_device
*dev
; /* FLOW_ACTION_REDIRECT */
166 struct { /* FLOW_ACTION_VLAN */
171 struct { /* FLOW_ACTION_PACKET_EDIT */
172 enum flow_action_mangle_base htype
;
177 struct ip_tunnel_info
*tunnel
; /* FLOW_ACTION_TUNNEL_ENCAP */
178 u32 csum_flags
; /* FLOW_ACTION_CSUM */
179 u32 mark
; /* FLOW_ACTION_MARK */
180 u16 ptype
; /* FLOW_ACTION_PTYPE */
181 struct { /* FLOW_ACTION_QUEUE */
186 struct { /* FLOW_ACTION_SAMPLE */
187 struct psample_group
*psample_group
;
192 struct { /* FLOW_ACTION_POLICE */
196 struct { /* FLOW_ACTION_CT */
200 struct { /* FLOW_ACTION_MPLS_PUSH */
207 struct { /* FLOW_ACTION_MPLS_POP */
210 struct { /* FLOW_ACTION_MPLS_MANGLE */
220 unsigned int num_entries
;
221 struct flow_action_entry entries
[0];
224 static inline bool flow_action_has_entries(const struct flow_action
*action
)
226 return action
->num_entries
;
230 * flow_action_has_one_action() - check if exactly one action is present
231 * @action: tc filter flow offload action
233 * Returns true if exactly one action is present.
235 static inline bool flow_offload_has_one_action(const struct flow_action
*action
)
237 return action
->num_entries
== 1;
240 #define flow_action_for_each(__i, __act, __actions) \
241 for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i])
244 struct flow_match match
;
245 struct flow_action action
;
248 struct flow_rule
*flow_rule_alloc(unsigned int num_actions
);
250 static inline bool flow_rule_match_key(const struct flow_rule
*rule
,
251 enum flow_dissector_key_id key
)
253 return dissector_uses_key(rule
->match
.dissector
, key
);
262 static inline void flow_stats_update(struct flow_stats
*flow_stats
,
263 u64 bytes
, u64 pkts
, u64 lastused
)
265 flow_stats
->pkts
+= pkts
;
266 flow_stats
->bytes
+= bytes
;
267 flow_stats
->lastused
= max_t(u64
, flow_stats
->lastused
, lastused
);
270 enum flow_block_command
{
275 enum flow_block_binder_type
{
276 FLOW_BLOCK_BINDER_TYPE_UNSPEC
,
277 FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS
,
278 FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS
,
282 struct list_head cb_list
;
285 struct netlink_ext_ack
;
287 struct flow_block_offload
{
288 enum flow_block_command command
;
289 enum flow_block_binder_type binder_type
;
291 bool unlocked_driver_cb
;
293 struct flow_block
*block
;
294 struct list_head cb_list
;
295 struct list_head
*driver_block_list
;
296 struct netlink_ext_ack
*extack
;
300 typedef int flow_setup_cb_t(enum tc_setup_type type
, void *type_data
,
303 struct flow_block_cb
{
304 struct list_head driver_list
;
305 struct list_head list
;
309 void (*release
)(void *cb_priv
);
313 struct flow_block_cb
*flow_block_cb_alloc(flow_setup_cb_t
*cb
,
314 void *cb_ident
, void *cb_priv
,
315 void (*release
)(void *cb_priv
));
316 void flow_block_cb_free(struct flow_block_cb
*block_cb
);
318 struct flow_block_cb
*flow_block_cb_lookup(struct flow_block
*block
,
319 flow_setup_cb_t
*cb
, void *cb_ident
);
321 void *flow_block_cb_priv(struct flow_block_cb
*block_cb
);
322 void flow_block_cb_incref(struct flow_block_cb
*block_cb
);
323 unsigned int flow_block_cb_decref(struct flow_block_cb
*block_cb
);
325 static inline void flow_block_cb_add(struct flow_block_cb
*block_cb
,
326 struct flow_block_offload
*offload
)
328 list_add_tail(&block_cb
->list
, &offload
->cb_list
);
331 static inline void flow_block_cb_remove(struct flow_block_cb
*block_cb
,
332 struct flow_block_offload
*offload
)
334 list_move(&block_cb
->list
, &offload
->cb_list
);
337 bool flow_block_cb_is_busy(flow_setup_cb_t
*cb
, void *cb_ident
,
338 struct list_head
*driver_block_list
);
340 int flow_block_cb_setup_simple(struct flow_block_offload
*f
,
341 struct list_head
*driver_list
,
343 void *cb_ident
, void *cb_priv
, bool ingress_only
);
345 enum flow_cls_command
{
349 FLOW_CLS_TMPLT_CREATE
,
350 FLOW_CLS_TMPLT_DESTROY
,
353 struct flow_cls_common_offload
{
357 struct netlink_ext_ack
*extack
;
360 struct flow_cls_offload
{
361 struct flow_cls_common_offload common
;
362 enum flow_cls_command command
;
363 unsigned long cookie
;
364 struct flow_rule
*rule
;
365 struct flow_stats stats
;
369 static inline struct flow_rule
*
370 flow_cls_offload_flow_rule(struct flow_cls_offload
*flow_cmd
)
372 return flow_cmd
->rule
;
375 static inline void flow_block_init(struct flow_block
*flow_block
)
377 INIT_LIST_HEAD(&flow_block
->cb_list
);
380 typedef int flow_indr_block_bind_cb_t(struct net_device
*dev
, void *cb_priv
,
381 enum tc_setup_type type
, void *type_data
);
383 typedef void flow_indr_block_cmd_t(struct net_device
*dev
,
384 flow_indr_block_bind_cb_t
*cb
, void *cb_priv
,
385 enum flow_block_command command
);
387 struct flow_indr_block_entry
{
388 flow_indr_block_cmd_t
*cb
;
389 struct list_head list
;
392 void flow_indr_add_block_cb(struct flow_indr_block_entry
*entry
);
394 void flow_indr_del_block_cb(struct flow_indr_block_entry
*entry
);
396 int __flow_indr_block_cb_register(struct net_device
*dev
, void *cb_priv
,
397 flow_indr_block_bind_cb_t
*cb
,
400 void __flow_indr_block_cb_unregister(struct net_device
*dev
,
401 flow_indr_block_bind_cb_t
*cb
,
404 int flow_indr_block_cb_register(struct net_device
*dev
, void *cb_priv
,
405 flow_indr_block_bind_cb_t
*cb
, void *cb_ident
);
407 void flow_indr_block_cb_unregister(struct net_device
*dev
,
408 flow_indr_block_bind_cb_t
*cb
,
411 void flow_indr_block_call(struct net_device
*dev
,
412 struct flow_block_offload
*bo
,
413 enum flow_block_command command
);
415 #endif /* _NET_FLOW_OFFLOAD_H */