1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/skbuff.h>
5 #include <net/devlink.h>
6 #include <net/pkt_cls.h>
10 #include "conntrack.h"
11 #include "../nfpcore/nfp_cpp.h"
12 #include "../nfpcore/nfp_nsp.h"
13 #include "../nfp_app.h"
14 #include "../nfp_main.h"
15 #include "../nfp_net.h"
16 #include "../nfp_port.h"
18 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
19 (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
20 TCPHDR_PSH | TCPHDR_URG)
22 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
23 (FLOW_DIS_IS_FRAGMENT | \
26 #define NFP_FLOWER_WHITELIST_DISSECTOR \
27 (BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | \
28 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | \
29 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
30 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
31 BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | \
32 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | \
33 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
34 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | \
35 BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) | \
36 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
37 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
38 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
39 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
40 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
41 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
42 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | \
43 BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) | \
44 BIT_ULL(FLOW_DISSECTOR_KEY_CT) | \
45 BIT_ULL(FLOW_DISSECTOR_KEY_META) | \
46 BIT_ULL(FLOW_DISSECTOR_KEY_IP))
48 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
49 (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
50 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
51 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
52 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
53 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
54 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
55 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP))
57 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
58 (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
59 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
61 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \
62 (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
63 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS))
65 #define NFP_FLOWER_MERGE_FIELDS \
66 (NFP_FLOWER_LAYER_PORT | \
67 NFP_FLOWER_LAYER_MAC | \
68 NFP_FLOWER_LAYER_TP | \
69 NFP_FLOWER_LAYER_IPV4 | \
70 NFP_FLOWER_LAYER_IPV6)
72 #define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
73 (NFP_FLOWER_LAYER_EXT_META | \
74 NFP_FLOWER_LAYER_PORT | \
75 NFP_FLOWER_LAYER_MAC | \
76 NFP_FLOWER_LAYER_IPV4 | \
77 NFP_FLOWER_LAYER_IPV6)
79 struct nfp_flower_merge_check
{
83 struct nfp_flower_mac_mpls l2
;
84 struct nfp_flower_tp_ports l4
;
86 struct nfp_flower_ipv4 ipv4
;
87 struct nfp_flower_ipv6 ipv6
;
90 unsigned long vals
[8];
95 nfp_flower_xmit_flow(struct nfp_app
*app
, struct nfp_fl_payload
*nfp_flow
,
98 u32 meta_len
, key_len
, mask_len
, act_len
, tot_len
;
102 meta_len
= sizeof(struct nfp_fl_rule_metadata
);
103 key_len
= nfp_flow
->meta
.key_len
;
104 mask_len
= nfp_flow
->meta
.mask_len
;
105 act_len
= nfp_flow
->meta
.act_len
;
107 tot_len
= meta_len
+ key_len
+ mask_len
+ act_len
;
109 /* Convert to long words as firmware expects
110 * lengths in units of NFP_FL_LW_SIZ.
112 nfp_flow
->meta
.key_len
>>= NFP_FL_LW_SIZ
;
113 nfp_flow
->meta
.mask_len
>>= NFP_FL_LW_SIZ
;
114 nfp_flow
->meta
.act_len
>>= NFP_FL_LW_SIZ
;
116 skb
= nfp_flower_cmsg_alloc(app
, tot_len
, mtype
, GFP_KERNEL
);
120 msg
= nfp_flower_cmsg_get_data(skb
);
121 memcpy(msg
, &nfp_flow
->meta
, meta_len
);
122 memcpy(&msg
[meta_len
], nfp_flow
->unmasked_data
, key_len
);
123 memcpy(&msg
[meta_len
+ key_len
], nfp_flow
->mask_data
, mask_len
);
124 memcpy(&msg
[meta_len
+ key_len
+ mask_len
],
125 nfp_flow
->action_data
, act_len
);
127 /* Convert back to bytes as software expects
128 * lengths in units of bytes.
130 nfp_flow
->meta
.key_len
<<= NFP_FL_LW_SIZ
;
131 nfp_flow
->meta
.mask_len
<<= NFP_FL_LW_SIZ
;
132 nfp_flow
->meta
.act_len
<<= NFP_FL_LW_SIZ
;
134 nfp_ctrl_tx(app
->ctrl
, skb
);
139 static bool nfp_flower_check_higher_than_mac(struct flow_rule
*rule
)
141 return flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_IPV4_ADDRS
) ||
142 flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_IPV6_ADDRS
) ||
143 flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_PORTS
) ||
144 flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ICMP
);
147 static bool nfp_flower_check_higher_than_l3(struct flow_rule
*rule
)
149 return flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_PORTS
) ||
150 flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ICMP
);
154 nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts
*enc_opts
,
155 u32
*key_layer_two
, int *key_size
, bool ipv6
,
156 struct netlink_ext_ack
*extack
)
158 if (enc_opts
->len
> NFP_FL_MAX_GENEVE_OPT_KEY
||
159 (ipv6
&& enc_opts
->len
> NFP_FL_MAX_GENEVE_OPT_KEY_V6
)) {
160 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: geneve options exceed maximum length");
164 if (enc_opts
->len
> 0) {
165 *key_layer_two
|= NFP_FLOWER_LAYER2_GENEVE_OP
;
166 *key_size
+= sizeof(struct nfp_flower_geneve_options
);
173 nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports
*enc_ports
,
174 struct flow_dissector_key_enc_opts
*enc_op
,
175 u32
*key_layer_two
, u8
*key_layer
, int *key_size
,
176 struct nfp_flower_priv
*priv
,
177 enum nfp_flower_tun_type
*tun_type
, bool ipv6
,
178 struct netlink_ext_ack
*extack
)
182 switch (enc_ports
->dst
) {
183 case htons(IANA_VXLAN_UDP_PORT
):
184 *tun_type
= NFP_FL_TUNNEL_VXLAN
;
185 *key_layer
|= NFP_FLOWER_LAYER_VXLAN
;
188 *key_layer
|= NFP_FLOWER_LAYER_EXT_META
;
189 *key_size
+= sizeof(struct nfp_flower_ext_meta
);
190 *key_layer_two
|= NFP_FLOWER_LAYER2_TUN_IPV6
;
191 *key_size
+= sizeof(struct nfp_flower_ipv6_udp_tun
);
193 *key_size
+= sizeof(struct nfp_flower_ipv4_udp_tun
);
197 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: encap options not supported on vxlan tunnels");
201 case htons(GENEVE_UDP_PORT
):
202 if (!(priv
->flower_ext_feats
& NFP_FL_FEATS_GENEVE
)) {
203 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: loaded firmware does not support geneve offload");
206 *tun_type
= NFP_FL_TUNNEL_GENEVE
;
207 *key_layer
|= NFP_FLOWER_LAYER_EXT_META
;
208 *key_size
+= sizeof(struct nfp_flower_ext_meta
);
209 *key_layer_two
|= NFP_FLOWER_LAYER2_GENEVE
;
212 *key_layer_two
|= NFP_FLOWER_LAYER2_TUN_IPV6
;
213 *key_size
+= sizeof(struct nfp_flower_ipv6_udp_tun
);
215 *key_size
+= sizeof(struct nfp_flower_ipv4_udp_tun
);
220 if (!(priv
->flower_ext_feats
& NFP_FL_FEATS_GENEVE_OPT
)) {
221 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: loaded firmware does not support geneve option offload");
224 err
= nfp_flower_calc_opt_layer(enc_op
, key_layer_two
, key_size
,
230 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: tunnel type unknown");
238 nfp_flower_calculate_key_layers(struct nfp_app
*app
,
239 struct net_device
*netdev
,
240 struct nfp_fl_key_ls
*ret_key_ls
,
241 struct flow_rule
*rule
,
242 enum nfp_flower_tun_type
*tun_type
,
243 struct netlink_ext_ack
*extack
)
245 struct flow_dissector
*dissector
= rule
->match
.dissector
;
246 struct flow_match_basic basic
= { NULL
, NULL
};
247 struct nfp_flower_priv
*priv
= app
->priv
;
253 if (dissector
->used_keys
& ~NFP_FLOWER_WHITELIST_DISSECTOR
) {
254 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: match not supported");
258 /* If any tun dissector is used then the required set must be used. */
259 if (dissector
->used_keys
& NFP_FLOWER_WHITELIST_TUN_DISSECTOR
&&
260 (dissector
->used_keys
& NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R
)
261 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R
&&
262 (dissector
->used_keys
& NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R
)
263 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R
) {
264 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: tunnel match not supported");
269 key_layer
= NFP_FLOWER_LAYER_PORT
;
270 key_size
= sizeof(struct nfp_flower_meta_tci
) +
271 sizeof(struct nfp_flower_in_port
);
273 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ETH_ADDRS
) ||
274 flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_MPLS
)) {
275 key_layer
|= NFP_FLOWER_LAYER_MAC
;
276 key_size
+= sizeof(struct nfp_flower_mac_mpls
);
279 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_VLAN
)) {
280 struct flow_match_vlan vlan
;
282 flow_rule_match_vlan(rule
, &vlan
);
283 if (!(priv
->flower_ext_feats
& NFP_FL_FEATS_VLAN_PCP
) &&
284 vlan
.key
->vlan_priority
) {
285 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: loaded firmware does not support VLAN PCP offload");
288 if (priv
->flower_ext_feats
& NFP_FL_FEATS_VLAN_QINQ
&&
289 !(key_layer_two
& NFP_FLOWER_LAYER2_QINQ
)) {
290 key_layer
|= NFP_FLOWER_LAYER_EXT_META
;
291 key_size
+= sizeof(struct nfp_flower_ext_meta
);
292 key_size
+= sizeof(struct nfp_flower_vlan
);
293 key_layer_two
|= NFP_FLOWER_LAYER2_QINQ
;
297 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_CVLAN
)) {
298 struct flow_match_vlan cvlan
;
300 if (!(priv
->flower_ext_feats
& NFP_FL_FEATS_VLAN_QINQ
)) {
301 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: loaded firmware does not support VLAN QinQ offload");
305 flow_rule_match_vlan(rule
, &cvlan
);
306 if (!(key_layer_two
& NFP_FLOWER_LAYER2_QINQ
)) {
307 key_layer
|= NFP_FLOWER_LAYER_EXT_META
;
308 key_size
+= sizeof(struct nfp_flower_ext_meta
);
309 key_size
+= sizeof(struct nfp_flower_vlan
);
310 key_layer_two
|= NFP_FLOWER_LAYER2_QINQ
;
314 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ENC_CONTROL
)) {
315 struct flow_match_enc_opts enc_op
= { NULL
, NULL
};
316 struct flow_match_ipv4_addrs ipv4_addrs
;
317 struct flow_match_ipv6_addrs ipv6_addrs
;
318 struct flow_match_control enc_ctl
;
319 struct flow_match_ports enc_ports
;
320 bool ipv6_tun
= false;
322 flow_rule_match_enc_control(rule
, &enc_ctl
);
324 if (flow_rule_has_enc_control_flags(enc_ctl
.mask
->flags
,
328 if (enc_ctl
.mask
->addr_type
!= 0xffff) {
329 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: wildcarded protocols on tunnels are not supported");
333 ipv6_tun
= enc_ctl
.key
->addr_type
==
334 FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
336 !(priv
->flower_ext_feats
& NFP_FL_FEATS_IPV6_TUN
)) {
337 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: firmware does not support IPv6 tunnels");
342 enc_ctl
.key
->addr_type
!= FLOW_DISSECTOR_KEY_IPV4_ADDRS
) {
343 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: tunnel address type not IPv4 or IPv6");
348 flow_rule_match_enc_ipv6_addrs(rule
, &ipv6_addrs
);
349 if (memchr_inv(&ipv6_addrs
.mask
->dst
, 0xff,
350 sizeof(ipv6_addrs
.mask
->dst
))) {
351 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: only an exact match IPv6 destination address is supported");
355 flow_rule_match_enc_ipv4_addrs(rule
, &ipv4_addrs
);
356 if (ipv4_addrs
.mask
->dst
!= cpu_to_be32(~0)) {
357 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: only an exact match IPv4 destination address is supported");
362 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ENC_OPTS
))
363 flow_rule_match_enc_opts(rule
, &enc_op
);
365 if (!flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ENC_PORTS
)) {
366 /* Check if GRE, which has no enc_ports */
367 if (!netif_is_gretap(netdev
) && !netif_is_ip6gretap(netdev
)) {
368 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
372 *tun_type
= NFP_FL_TUNNEL_GRE
;
373 key_layer
|= NFP_FLOWER_LAYER_EXT_META
;
374 key_size
+= sizeof(struct nfp_flower_ext_meta
);
375 key_layer_two
|= NFP_FLOWER_LAYER2_GRE
;
378 key_layer_two
|= NFP_FLOWER_LAYER2_TUN_IPV6
;
380 sizeof(struct nfp_flower_ipv6_gre_tun
);
383 sizeof(struct nfp_flower_ipv4_gre_tun
);
387 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: encap options not supported on GRE tunnels");
391 flow_rule_match_enc_ports(rule
, &enc_ports
);
392 if (enc_ports
.mask
->dst
!= cpu_to_be16(~0)) {
393 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: only an exact match L4 destination port is supported");
397 err
= nfp_flower_calc_udp_tun_layer(enc_ports
.key
,
407 /* Ensure the ingress netdev matches the expected
410 if (!nfp_fl_netdev_is_tunnel_type(netdev
, *tun_type
)) {
411 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: ingress netdev does not match the expected tunnel type");
417 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_BASIC
))
418 flow_rule_match_basic(rule
, &basic
);
420 if (basic
.mask
&& basic
.mask
->n_proto
) {
421 /* Ethernet type is present in the key. */
422 switch (basic
.key
->n_proto
) {
423 case cpu_to_be16(ETH_P_IP
):
424 key_layer
|= NFP_FLOWER_LAYER_IPV4
;
425 key_size
+= sizeof(struct nfp_flower_ipv4
);
428 case cpu_to_be16(ETH_P_IPV6
):
429 key_layer
|= NFP_FLOWER_LAYER_IPV6
;
430 key_size
+= sizeof(struct nfp_flower_ipv6
);
433 /* Currently we do not offload ARP
434 * because we rely on it to get to the host.
436 case cpu_to_be16(ETH_P_ARP
):
437 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: ARP not supported");
440 case cpu_to_be16(ETH_P_MPLS_UC
):
441 case cpu_to_be16(ETH_P_MPLS_MC
):
442 if (!(key_layer
& NFP_FLOWER_LAYER_MAC
)) {
443 key_layer
|= NFP_FLOWER_LAYER_MAC
;
444 key_size
+= sizeof(struct nfp_flower_mac_mpls
);
448 /* Will be included in layer 2. */
449 case cpu_to_be16(ETH_P_8021Q
):
453 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: match on given EtherType is not supported");
456 } else if (nfp_flower_check_higher_than_mac(rule
)) {
457 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: cannot match above L2 without specified EtherType");
461 if (basic
.mask
&& basic
.mask
->ip_proto
) {
462 switch (basic
.key
->ip_proto
) {
468 key_layer
|= NFP_FLOWER_LAYER_TP
;
469 key_size
+= sizeof(struct nfp_flower_tp_ports
);
474 if (!(key_layer
& NFP_FLOWER_LAYER_TP
) &&
475 nfp_flower_check_higher_than_l3(rule
)) {
476 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: cannot match on L4 information without specified IP protocol type");
480 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_TCP
)) {
481 struct flow_match_tcp tcp
;
484 flow_rule_match_tcp(rule
, &tcp
);
485 tcp_flags
= be16_to_cpu(tcp
.key
->flags
);
487 if (tcp_flags
& ~NFP_FLOWER_SUPPORTED_TCPFLAGS
) {
488 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: no match support for selected TCP flags");
492 /* We only support PSH and URG flags when either
493 * FIN, SYN or RST is present as well.
495 if ((tcp_flags
& (TCPHDR_PSH
| TCPHDR_URG
)) &&
496 !(tcp_flags
& (TCPHDR_FIN
| TCPHDR_SYN
| TCPHDR_RST
))) {
497 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST");
501 /* We need to store TCP flags in the either the IPv4 or IPv6 key
502 * space, thus we need to ensure we include a IPv4/IPv6 key
503 * layer if we have not done so already.
506 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: match on TCP flags requires a match on L3 protocol");
510 if (!(key_layer
& NFP_FLOWER_LAYER_IPV4
) &&
511 !(key_layer
& NFP_FLOWER_LAYER_IPV6
)) {
512 switch (basic
.key
->n_proto
) {
513 case cpu_to_be16(ETH_P_IP
):
514 key_layer
|= NFP_FLOWER_LAYER_IPV4
;
515 key_size
+= sizeof(struct nfp_flower_ipv4
);
518 case cpu_to_be16(ETH_P_IPV6
):
519 key_layer
|= NFP_FLOWER_LAYER_IPV6
;
520 key_size
+= sizeof(struct nfp_flower_ipv6
);
524 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6");
530 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_CONTROL
)) {
531 struct flow_match_control ctl
;
533 flow_rule_match_control(rule
, &ctl
);
535 if (!flow_rule_is_supp_control_flags(NFP_FLOWER_SUPPORTED_CTLFLAGS
,
536 ctl
.mask
->flags
, extack
))
540 ret_key_ls
->key_layer
= key_layer
;
541 ret_key_ls
->key_layer_two
= key_layer_two
;
542 ret_key_ls
->key_size
= key_size
;
547 struct nfp_fl_payload
*
548 nfp_flower_allocate_new(struct nfp_fl_key_ls
*key_layer
)
550 struct nfp_fl_payload
*flow_pay
;
552 flow_pay
= kmalloc(sizeof(*flow_pay
), GFP_KERNEL
);
556 flow_pay
->meta
.key_len
= key_layer
->key_size
;
557 flow_pay
->unmasked_data
= kmalloc(key_layer
->key_size
, GFP_KERNEL
);
558 if (!flow_pay
->unmasked_data
)
561 flow_pay
->meta
.mask_len
= key_layer
->key_size
;
562 flow_pay
->mask_data
= kmalloc(key_layer
->key_size
, GFP_KERNEL
);
563 if (!flow_pay
->mask_data
)
564 goto err_free_unmasked
;
566 flow_pay
->action_data
= kmalloc(NFP_FL_MAX_A_SIZ
, GFP_KERNEL
);
567 if (!flow_pay
->action_data
)
570 flow_pay
->nfp_tun_ipv4_addr
= 0;
571 flow_pay
->nfp_tun_ipv6
= NULL
;
572 flow_pay
->meta
.flags
= 0;
573 INIT_LIST_HEAD(&flow_pay
->linked_flows
);
574 flow_pay
->in_hw
= false;
575 flow_pay
->pre_tun_rule
.dev
= NULL
;
580 kfree(flow_pay
->mask_data
);
582 kfree(flow_pay
->unmasked_data
);
589 nfp_flower_update_merge_with_actions(struct nfp_fl_payload
*flow
,
590 struct nfp_flower_merge_check
*merge
,
591 u8
*last_act_id
, int *act_out
)
593 struct nfp_fl_set_ipv6_tc_hl_fl
*ipv6_tc_hl_fl
;
594 struct nfp_fl_set_ip4_ttl_tos
*ipv4_ttl_tos
;
595 struct nfp_fl_set_ip4_addrs
*ipv4_add
;
596 struct nfp_fl_set_ipv6_addr
*ipv6_add
;
597 struct nfp_fl_push_vlan
*push_vlan
;
598 struct nfp_fl_pre_tunnel
*pre_tun
;
599 struct nfp_fl_set_tport
*tport
;
600 struct nfp_fl_set_eth
*eth
;
601 struct nfp_fl_act_head
*a
;
602 unsigned int act_off
= 0;
603 bool ipv6_tun
= false;
608 while (act_off
< flow
->meta
.act_len
) {
609 a
= (struct nfp_fl_act_head
*)&flow
->action_data
[act_off
];
613 case NFP_FL_ACTION_OPCODE_OUTPUT
:
617 case NFP_FL_ACTION_OPCODE_PUSH_VLAN
:
618 push_vlan
= (struct nfp_fl_push_vlan
*)a
;
619 if (push_vlan
->vlan_tci
)
620 merge
->tci
= cpu_to_be16(0xffff);
622 case NFP_FL_ACTION_OPCODE_POP_VLAN
:
623 merge
->tci
= cpu_to_be16(0);
625 case NFP_FL_ACTION_OPCODE_SET_TUNNEL
:
626 /* New tunnel header means l2 to l4 can be matched. */
627 eth_broadcast_addr(&merge
->l2
.mac_dst
[0]);
628 eth_broadcast_addr(&merge
->l2
.mac_src
[0]);
629 memset(&merge
->l4
, 0xff,
630 sizeof(struct nfp_flower_tp_ports
));
632 memset(&merge
->ipv6
, 0xff,
633 sizeof(struct nfp_flower_ipv6
));
635 memset(&merge
->ipv4
, 0xff,
636 sizeof(struct nfp_flower_ipv4
));
638 case NFP_FL_ACTION_OPCODE_SET_ETHERNET
:
639 eth
= (struct nfp_fl_set_eth
*)a
;
640 for (i
= 0; i
< ETH_ALEN
; i
++)
641 merge
->l2
.mac_dst
[i
] |= eth
->eth_addr_mask
[i
];
642 for (i
= 0; i
< ETH_ALEN
; i
++)
643 merge
->l2
.mac_src
[i
] |=
644 eth
->eth_addr_mask
[ETH_ALEN
+ i
];
646 case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS
:
647 ipv4_add
= (struct nfp_fl_set_ip4_addrs
*)a
;
648 merge
->ipv4
.ipv4_src
|= ipv4_add
->ipv4_src_mask
;
649 merge
->ipv4
.ipv4_dst
|= ipv4_add
->ipv4_dst_mask
;
651 case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS
:
652 ipv4_ttl_tos
= (struct nfp_fl_set_ip4_ttl_tos
*)a
;
653 merge
->ipv4
.ip_ext
.ttl
|= ipv4_ttl_tos
->ipv4_ttl_mask
;
654 merge
->ipv4
.ip_ext
.tos
|= ipv4_ttl_tos
->ipv4_tos_mask
;
656 case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC
:
657 ipv6_add
= (struct nfp_fl_set_ipv6_addr
*)a
;
658 for (i
= 0; i
< 4; i
++)
659 merge
->ipv6
.ipv6_src
.in6_u
.u6_addr32
[i
] |=
660 ipv6_add
->ipv6
[i
].mask
;
662 case NFP_FL_ACTION_OPCODE_SET_IPV6_DST
:
663 ipv6_add
= (struct nfp_fl_set_ipv6_addr
*)a
;
664 for (i
= 0; i
< 4; i
++)
665 merge
->ipv6
.ipv6_dst
.in6_u
.u6_addr32
[i
] |=
666 ipv6_add
->ipv6
[i
].mask
;
668 case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL
:
669 ipv6_tc_hl_fl
= (struct nfp_fl_set_ipv6_tc_hl_fl
*)a
;
670 merge
->ipv6
.ip_ext
.ttl
|=
671 ipv6_tc_hl_fl
->ipv6_hop_limit_mask
;
672 merge
->ipv6
.ip_ext
.tos
|= ipv6_tc_hl_fl
->ipv6_tc_mask
;
673 merge
->ipv6
.ipv6_flow_label_exthdr
|=
674 ipv6_tc_hl_fl
->ipv6_label_mask
;
676 case NFP_FL_ACTION_OPCODE_SET_UDP
:
677 case NFP_FL_ACTION_OPCODE_SET_TCP
:
678 tport
= (struct nfp_fl_set_tport
*)a
;
679 ports
= (u8
*)&merge
->l4
.port_src
;
680 for (i
= 0; i
< 4; i
++)
681 ports
[i
] |= tport
->tp_port_mask
[i
];
683 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL
:
684 pre_tun
= (struct nfp_fl_pre_tunnel
*)a
;
685 ipv6_tun
= be16_to_cpu(pre_tun
->flags
) &
688 case NFP_FL_ACTION_OPCODE_PRE_LAG
:
689 case NFP_FL_ACTION_OPCODE_PUSH_GENEVE
:
695 act_off
+= a
->len_lw
<< NFP_FL_LW_SIZ
;
699 *last_act_id
= act_id
;
705 nfp_flower_populate_merge_match(struct nfp_fl_payload
*flow
,
706 struct nfp_flower_merge_check
*merge
,
709 struct nfp_flower_meta_tci
*meta_tci
;
710 u8
*mask
= flow
->mask_data
;
711 u8 key_layer
, match_size
;
713 memset(merge
, 0, sizeof(struct nfp_flower_merge_check
));
715 meta_tci
= (struct nfp_flower_meta_tci
*)mask
;
716 key_layer
= meta_tci
->nfp_flow_key_layer
;
718 if (key_layer
& ~NFP_FLOWER_MERGE_FIELDS
&& !extra_fields
)
721 merge
->tci
= meta_tci
->tci
;
722 mask
+= sizeof(struct nfp_flower_meta_tci
);
724 if (key_layer
& NFP_FLOWER_LAYER_EXT_META
)
725 mask
+= sizeof(struct nfp_flower_ext_meta
);
727 mask
+= sizeof(struct nfp_flower_in_port
);
729 if (key_layer
& NFP_FLOWER_LAYER_MAC
) {
730 match_size
= sizeof(struct nfp_flower_mac_mpls
);
731 memcpy(&merge
->l2
, mask
, match_size
);
735 if (key_layer
& NFP_FLOWER_LAYER_TP
) {
736 match_size
= sizeof(struct nfp_flower_tp_ports
);
737 memcpy(&merge
->l4
, mask
, match_size
);
741 if (key_layer
& NFP_FLOWER_LAYER_IPV4
) {
742 match_size
= sizeof(struct nfp_flower_ipv4
);
743 memcpy(&merge
->ipv4
, mask
, match_size
);
746 if (key_layer
& NFP_FLOWER_LAYER_IPV6
) {
747 match_size
= sizeof(struct nfp_flower_ipv6
);
748 memcpy(&merge
->ipv6
, mask
, match_size
);
755 nfp_flower_can_merge(struct nfp_fl_payload
*sub_flow1
,
756 struct nfp_fl_payload
*sub_flow2
)
758 /* Two flows can be merged if sub_flow2 only matches on bits that are
759 * either matched by sub_flow1 or set by a sub_flow1 action. This
760 * ensures that every packet that hits sub_flow1 and recirculates is
761 * guaranteed to hit sub_flow2.
763 struct nfp_flower_merge_check sub_flow1_merge
, sub_flow2_merge
;
764 int err
, act_out
= 0;
767 err
= nfp_flower_populate_merge_match(sub_flow1
, &sub_flow1_merge
,
772 err
= nfp_flower_populate_merge_match(sub_flow2
, &sub_flow2_merge
,
777 err
= nfp_flower_update_merge_with_actions(sub_flow1
, &sub_flow1_merge
,
778 &last_act_id
, &act_out
);
782 /* Must only be 1 output action and it must be the last in sequence. */
783 if (act_out
!= 1 || last_act_id
!= NFP_FL_ACTION_OPCODE_OUTPUT
)
786 /* Reject merge if sub_flow2 matches on something that is not matched
787 * on or set in an action by sub_flow1.
789 err
= bitmap_andnot(sub_flow2_merge
.vals
, sub_flow2_merge
.vals
,
790 sub_flow1_merge
.vals
,
791 sizeof(struct nfp_flower_merge_check
) * 8);
799 nfp_flower_copy_pre_actions(char *act_dst
, char *act_src
, int len
,
802 unsigned int act_off
= 0, act_len
;
803 struct nfp_fl_act_head
*a
;
806 while (act_off
< len
) {
807 a
= (struct nfp_fl_act_head
*)&act_src
[act_off
];
808 act_len
= a
->len_lw
<< NFP_FL_LW_SIZ
;
812 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL
:
816 case NFP_FL_ACTION_OPCODE_PRE_LAG
:
817 memcpy(act_dst
+ act_off
, act_src
+ act_off
, act_len
);
830 nfp_fl_verify_post_tun_acts(char *acts
, int len
, struct nfp_fl_push_vlan
**vlan
)
832 struct nfp_fl_act_head
*a
;
833 unsigned int act_off
= 0;
835 while (act_off
< len
) {
836 a
= (struct nfp_fl_act_head
*)&acts
[act_off
];
838 if (a
->jump_id
== NFP_FL_ACTION_OPCODE_PUSH_VLAN
&& !act_off
)
839 *vlan
= (struct nfp_fl_push_vlan
*)a
;
840 else if (a
->jump_id
!= NFP_FL_ACTION_OPCODE_OUTPUT
)
843 act_off
+= a
->len_lw
<< NFP_FL_LW_SIZ
;
846 /* Ensure any VLAN push also has an egress action. */
847 if (*vlan
&& act_off
<= sizeof(struct nfp_fl_push_vlan
))
854 nfp_fl_push_vlan_after_tun(char *acts
, int len
, struct nfp_fl_push_vlan
*vlan
)
856 struct nfp_fl_set_tun
*tun
;
857 struct nfp_fl_act_head
*a
;
858 unsigned int act_off
= 0;
860 while (act_off
< len
) {
861 a
= (struct nfp_fl_act_head
*)&acts
[act_off
];
863 if (a
->jump_id
== NFP_FL_ACTION_OPCODE_SET_TUNNEL
) {
864 tun
= (struct nfp_fl_set_tun
*)a
;
865 tun
->outer_vlan_tpid
= vlan
->vlan_tpid
;
866 tun
->outer_vlan_tci
= vlan
->vlan_tci
;
871 act_off
+= a
->len_lw
<< NFP_FL_LW_SIZ
;
874 /* Return error if no tunnel action is found. */
879 nfp_flower_merge_action(struct nfp_fl_payload
*sub_flow1
,
880 struct nfp_fl_payload
*sub_flow2
,
881 struct nfp_fl_payload
*merge_flow
)
883 unsigned int sub1_act_len
, sub2_act_len
, pre_off1
, pre_off2
;
884 struct nfp_fl_push_vlan
*post_tun_push_vlan
= NULL
;
885 bool tunnel_act
= false;
889 /* The last action of sub_flow1 must be output - do not merge this. */
890 sub1_act_len
= sub_flow1
->meta
.act_len
- sizeof(struct nfp_fl_output
);
891 sub2_act_len
= sub_flow2
->meta
.act_len
;
896 if (sub1_act_len
+ sub2_act_len
> NFP_FL_MAX_A_SIZ
)
899 /* A shortcut can only be applied if there is a single action. */
901 merge_flow
->meta
.shortcut
= cpu_to_be32(NFP_FL_SC_ACT_NULL
);
903 merge_flow
->meta
.shortcut
= sub_flow2
->meta
.shortcut
;
905 merge_flow
->meta
.act_len
= sub1_act_len
+ sub2_act_len
;
906 merge_act
= merge_flow
->action_data
;
908 /* Copy any pre-actions to the start of merge flow action list. */
909 pre_off1
= nfp_flower_copy_pre_actions(merge_act
,
910 sub_flow1
->action_data
,
911 sub1_act_len
, &tunnel_act
);
912 merge_act
+= pre_off1
;
913 sub1_act_len
-= pre_off1
;
914 pre_off2
= nfp_flower_copy_pre_actions(merge_act
,
915 sub_flow2
->action_data
,
917 merge_act
+= pre_off2
;
918 sub2_act_len
-= pre_off2
;
920 /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
921 * a tunnel, there are restrictions on what sub_flow 2 actions lead to a
925 char *post_tun_acts
= &sub_flow2
->action_data
[pre_off2
];
927 err
= nfp_fl_verify_post_tun_acts(post_tun_acts
, sub2_act_len
,
928 &post_tun_push_vlan
);
932 if (post_tun_push_vlan
) {
933 pre_off2
+= sizeof(*post_tun_push_vlan
);
934 sub2_act_len
-= sizeof(*post_tun_push_vlan
);
938 /* Copy remaining actions from sub_flows 1 and 2. */
939 memcpy(merge_act
, sub_flow1
->action_data
+ pre_off1
, sub1_act_len
);
941 if (post_tun_push_vlan
) {
942 /* Update tunnel action in merge to include VLAN push. */
943 err
= nfp_fl_push_vlan_after_tun(merge_act
, sub1_act_len
,
948 merge_flow
->meta
.act_len
-= sizeof(*post_tun_push_vlan
);
951 merge_act
+= sub1_act_len
;
952 memcpy(merge_act
, sub_flow2
->action_data
+ pre_off2
, sub2_act_len
);
957 /* Flow link code should only be accessed under RTNL. */
958 static void nfp_flower_unlink_flow(struct nfp_fl_payload_link
*link
)
960 list_del(&link
->merge_flow
.list
);
961 list_del(&link
->sub_flow
.list
);
965 static void nfp_flower_unlink_flows(struct nfp_fl_payload
*merge_flow
,
966 struct nfp_fl_payload
*sub_flow
)
968 struct nfp_fl_payload_link
*link
;
970 list_for_each_entry(link
, &merge_flow
->linked_flows
, merge_flow
.list
)
971 if (link
->sub_flow
.flow
== sub_flow
) {
972 nfp_flower_unlink_flow(link
);
977 static int nfp_flower_link_flows(struct nfp_fl_payload
*merge_flow
,
978 struct nfp_fl_payload
*sub_flow
)
980 struct nfp_fl_payload_link
*link
;
982 link
= kmalloc(sizeof(*link
), GFP_KERNEL
);
986 link
->merge_flow
.flow
= merge_flow
;
987 list_add_tail(&link
->merge_flow
.list
, &merge_flow
->linked_flows
);
988 link
->sub_flow
.flow
= sub_flow
;
989 list_add_tail(&link
->sub_flow
.list
, &sub_flow
->linked_flows
);
995 * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
996 * @app: Pointer to the APP handle
997 * @sub_flow1: Initial flow matched to produce merge hint
998 * @sub_flow2: Post recirculation flow matched in merge hint
1000 * Combines 2 flows (if valid) to a single flow, removing the initial from hw
1001 * and offloading the new, merged flow.
1003 * Return: negative value on error, 0 in success.
1005 int nfp_flower_merge_offloaded_flows(struct nfp_app
*app
,
1006 struct nfp_fl_payload
*sub_flow1
,
1007 struct nfp_fl_payload
*sub_flow2
)
1009 struct nfp_flower_priv
*priv
= app
->priv
;
1010 struct nfp_fl_payload
*merge_flow
;
1011 struct nfp_fl_key_ls merge_key_ls
;
1012 struct nfp_merge_info
*merge_info
;
1016 if (sub_flow1
== sub_flow2
||
1017 nfp_flower_is_merge_flow(sub_flow1
) ||
1018 nfp_flower_is_merge_flow(sub_flow2
))
1021 /* Check if the two flows are already merged */
1022 parent_ctx
= (u64
)(be32_to_cpu(sub_flow1
->meta
.host_ctx_id
)) << 32;
1023 parent_ctx
|= (u64
)(be32_to_cpu(sub_flow2
->meta
.host_ctx_id
));
1024 if (rhashtable_lookup_fast(&priv
->merge_table
,
1025 &parent_ctx
, merge_table_params
)) {
1026 nfp_flower_cmsg_warn(app
, "The two flows are already merged.\n");
1030 err
= nfp_flower_can_merge(sub_flow1
, sub_flow2
);
1034 merge_key_ls
.key_size
= sub_flow1
->meta
.key_len
;
1036 merge_flow
= nfp_flower_allocate_new(&merge_key_ls
);
1040 merge_flow
->tc_flower_cookie
= (unsigned long)merge_flow
;
1041 merge_flow
->ingress_dev
= sub_flow1
->ingress_dev
;
1043 memcpy(merge_flow
->unmasked_data
, sub_flow1
->unmasked_data
,
1044 sub_flow1
->meta
.key_len
);
1045 memcpy(merge_flow
->mask_data
, sub_flow1
->mask_data
,
1046 sub_flow1
->meta
.mask_len
);
1048 err
= nfp_flower_merge_action(sub_flow1
, sub_flow2
, merge_flow
);
1050 goto err_destroy_merge_flow
;
1052 err
= nfp_flower_link_flows(merge_flow
, sub_flow1
);
1054 goto err_destroy_merge_flow
;
1056 err
= nfp_flower_link_flows(merge_flow
, sub_flow2
);
1058 goto err_unlink_sub_flow1
;
1060 err
= nfp_compile_flow_metadata(app
, merge_flow
->tc_flower_cookie
, merge_flow
,
1061 merge_flow
->ingress_dev
, NULL
);
1063 goto err_unlink_sub_flow2
;
1065 err
= rhashtable_insert_fast(&priv
->flow_table
, &merge_flow
->fl_node
,
1066 nfp_flower_table_params
);
1068 goto err_release_metadata
;
1070 merge_info
= kmalloc(sizeof(*merge_info
), GFP_KERNEL
);
1073 goto err_remove_rhash
;
1075 merge_info
->parent_ctx
= parent_ctx
;
1076 err
= rhashtable_insert_fast(&priv
->merge_table
, &merge_info
->ht_node
,
1077 merge_table_params
);
1079 goto err_destroy_merge_info
;
1081 err
= nfp_flower_xmit_flow(app
, merge_flow
,
1082 NFP_FLOWER_CMSG_TYPE_FLOW_MOD
);
1084 goto err_remove_merge_info
;
1086 merge_flow
->in_hw
= true;
1087 sub_flow1
->in_hw
= false;
1091 err_remove_merge_info
:
1092 WARN_ON_ONCE(rhashtable_remove_fast(&priv
->merge_table
,
1093 &merge_info
->ht_node
,
1094 merge_table_params
));
1095 err_destroy_merge_info
:
1098 WARN_ON_ONCE(rhashtable_remove_fast(&priv
->flow_table
,
1099 &merge_flow
->fl_node
,
1100 nfp_flower_table_params
));
1101 err_release_metadata
:
1102 nfp_modify_flow_metadata(app
, merge_flow
);
1103 err_unlink_sub_flow2
:
1104 nfp_flower_unlink_flows(merge_flow
, sub_flow2
);
1105 err_unlink_sub_flow1
:
1106 nfp_flower_unlink_flows(merge_flow
, sub_flow1
);
1107 err_destroy_merge_flow
:
1108 kfree(merge_flow
->action_data
);
1109 kfree(merge_flow
->mask_data
);
1110 kfree(merge_flow
->unmasked_data
);
1116 * nfp_flower_validate_pre_tun_rule()
1117 * @app: Pointer to the APP handle
1118 * @flow: Pointer to NFP flow representation of rule
1119 * @key_ls: Pointer to NFP key layers structure
1120 * @extack: Netlink extended ACK report
1122 * Verifies the flow as a pre-tunnel rule.
1124 * Return: negative value on error, 0 if verified.
1127 nfp_flower_validate_pre_tun_rule(struct nfp_app
*app
,
1128 struct nfp_fl_payload
*flow
,
1129 struct nfp_fl_key_ls
*key_ls
,
1130 struct netlink_ext_ack
*extack
)
1132 struct nfp_flower_priv
*priv
= app
->priv
;
1133 struct nfp_flower_meta_tci
*meta_tci
;
1134 struct nfp_flower_mac_mpls
*mac
;
1135 u8
*ext
= flow
->unmasked_data
;
1136 struct nfp_fl_act_head
*act
;
1137 u8
*mask
= flow
->mask_data
;
1142 meta_tci
= (struct nfp_flower_meta_tci
*)flow
->unmasked_data
;
1143 key_layer
= key_ls
->key_layer
;
1144 if (!(priv
->flower_ext_feats
& NFP_FL_FEATS_VLAN_QINQ
)) {
1145 if (meta_tci
->tci
& cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT
)) {
1146 u16 vlan_tci
= be16_to_cpu(meta_tci
->tci
);
1148 vlan_tci
&= ~NFP_FLOWER_MASK_VLAN_PRESENT
;
1149 flow
->pre_tun_rule
.vlan_tci
= cpu_to_be16(vlan_tci
);
1152 flow
->pre_tun_rule
.vlan_tci
= cpu_to_be16(0xffff);
1156 if (key_layer
& ~NFP_FLOWER_PRE_TUN_RULE_FIELDS
) {
1157 NL_SET_ERR_MSG_MOD(extack
, "unsupported pre-tunnel rule: too many match fields");
1159 } else if (key_ls
->key_layer_two
& ~NFP_FLOWER_LAYER2_QINQ
) {
1160 NL_SET_ERR_MSG_MOD(extack
, "unsupported pre-tunnel rule: non-vlan in extended match fields");
1164 if (!(key_layer
& NFP_FLOWER_LAYER_MAC
)) {
1165 NL_SET_ERR_MSG_MOD(extack
, "unsupported pre-tunnel rule: MAC fields match required");
1169 if (!(key_layer
& NFP_FLOWER_LAYER_IPV4
) &&
1170 !(key_layer
& NFP_FLOWER_LAYER_IPV6
)) {
1171 NL_SET_ERR_MSG_MOD(extack
, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present");
1175 if (key_layer
& NFP_FLOWER_LAYER_IPV6
)
1176 flow
->pre_tun_rule
.is_ipv6
= true;
1178 flow
->pre_tun_rule
.is_ipv6
= false;
1180 /* Skip fields known to exist. */
1181 mask
+= sizeof(struct nfp_flower_meta_tci
);
1182 ext
+= sizeof(struct nfp_flower_meta_tci
);
1183 if (key_ls
->key_layer_two
) {
1184 mask
+= sizeof(struct nfp_flower_ext_meta
);
1185 ext
+= sizeof(struct nfp_flower_ext_meta
);
1187 mask
+= sizeof(struct nfp_flower_in_port
);
1188 ext
+= sizeof(struct nfp_flower_in_port
);
1190 /* Ensure destination MAC address is fully matched. */
1191 mac
= (struct nfp_flower_mac_mpls
*)mask
;
1192 if (!is_broadcast_ether_addr(&mac
->mac_dst
[0])) {
1193 NL_SET_ERR_MSG_MOD(extack
, "unsupported pre-tunnel rule: dest MAC field must not be masked");
1197 /* Ensure source MAC address is fully matched. This is only needed
1198 * for firmware with the DECAP_V2 feature enabled. Don't do this
1199 * for firmware without this feature to keep old behaviour.
1201 if (priv
->flower_ext_feats
& NFP_FL_FEATS_DECAP_V2
) {
1202 mac
= (struct nfp_flower_mac_mpls
*)mask
;
1203 if (!is_broadcast_ether_addr(&mac
->mac_src
[0])) {
1204 NL_SET_ERR_MSG_MOD(extack
,
1205 "unsupported pre-tunnel rule: source MAC field must not be masked");
1210 if (mac
->mpls_lse
) {
1211 NL_SET_ERR_MSG_MOD(extack
, "unsupported pre-tunnel rule: MPLS not supported");
1215 /* Ensure destination MAC address matches pre_tun_dev. */
1216 mac
= (struct nfp_flower_mac_mpls
*)ext
;
1217 if (memcmp(&mac
->mac_dst
[0], flow
->pre_tun_rule
.dev
->dev_addr
, 6)) {
1218 NL_SET_ERR_MSG_MOD(extack
,
1219 "unsupported pre-tunnel rule: dest MAC must match output dev MAC");
1223 /* Save mac addresses in pre_tun_rule entry for later use */
1224 memcpy(&flow
->pre_tun_rule
.loc_mac
, &mac
->mac_dst
[0], ETH_ALEN
);
1225 memcpy(&flow
->pre_tun_rule
.rem_mac
, &mac
->mac_src
[0], ETH_ALEN
);
1227 mask
+= sizeof(struct nfp_flower_mac_mpls
);
1228 ext
+= sizeof(struct nfp_flower_mac_mpls
);
1229 if (key_layer
& NFP_FLOWER_LAYER_IPV4
||
1230 key_layer
& NFP_FLOWER_LAYER_IPV6
) {
1231 /* Flags and proto fields have same offset in IPv4 and IPv6. */
1232 int ip_flags
= offsetof(struct nfp_flower_ipv4
, ip_ext
.flags
);
1233 int ip_proto
= offsetof(struct nfp_flower_ipv4
, ip_ext
.proto
);
1237 size
= key_layer
& NFP_FLOWER_LAYER_IPV4
?
1238 sizeof(struct nfp_flower_ipv4
) :
1239 sizeof(struct nfp_flower_ipv6
);
1242 /* Ensure proto and flags are the only IP layer fields. */
1243 for (i
= 0; i
< size
; i
++)
1244 if (mask
[i
] && i
!= ip_flags
&& i
!= ip_proto
) {
1245 NL_SET_ERR_MSG_MOD(extack
, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
1252 if ((priv
->flower_ext_feats
& NFP_FL_FEATS_VLAN_QINQ
)) {
1253 if (key_ls
->key_layer_two
& NFP_FLOWER_LAYER2_QINQ
) {
1254 struct nfp_flower_vlan
*vlan_tags
;
1258 vlan_tags
= (struct nfp_flower_vlan
*)ext
;
1260 vlan_tci
= be16_to_cpu(vlan_tags
->outer_tci
);
1261 vlan_tpid
= be16_to_cpu(vlan_tags
->outer_tpid
);
1263 vlan_tci
&= ~NFP_FLOWER_MASK_VLAN_PRESENT
;
1264 flow
->pre_tun_rule
.vlan_tci
= cpu_to_be16(vlan_tci
);
1265 flow
->pre_tun_rule
.vlan_tpid
= cpu_to_be16(vlan_tpid
);
1268 flow
->pre_tun_rule
.vlan_tci
= cpu_to_be16(0xffff);
1269 flow
->pre_tun_rule
.vlan_tpid
= cpu_to_be16(0xffff);
1273 /* Action must be a single egress or pop_vlan and egress. */
1275 act
= (struct nfp_fl_act_head
*)&flow
->action_data
[act_offset
];
1277 if (act
->jump_id
!= NFP_FL_ACTION_OPCODE_POP_VLAN
) {
1278 NL_SET_ERR_MSG_MOD(extack
, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action");
1282 act_offset
+= act
->len_lw
<< NFP_FL_LW_SIZ
;
1283 act
= (struct nfp_fl_act_head
*)&flow
->action_data
[act_offset
];
1286 if (act
->jump_id
!= NFP_FL_ACTION_OPCODE_OUTPUT
) {
1287 NL_SET_ERR_MSG_MOD(extack
, "unsupported pre-tunnel rule: non egress action detected where egress was expected");
1291 act_offset
+= act
->len_lw
<< NFP_FL_LW_SIZ
;
1293 /* Ensure there are no more actions after egress. */
1294 if (act_offset
!= flow
->meta
.act_len
) {
1295 NL_SET_ERR_MSG_MOD(extack
, "unsupported pre-tunnel rule: egress is not the last action");
1302 static bool offload_pre_check(struct flow_cls_offload
*flow
)
1304 struct flow_rule
*rule
= flow_cls_offload_flow_rule(flow
);
1305 struct flow_dissector
*dissector
= rule
->match
.dissector
;
1306 struct flow_match_ct ct
;
1308 if (dissector
->used_keys
& BIT_ULL(FLOW_DISSECTOR_KEY_CT
)) {
1309 flow_rule_match_ct(rule
, &ct
);
1310 /* Allow special case where CT match is all 0 */
1311 if (memchr_inv(ct
.key
, 0, sizeof(*ct
.key
)))
1315 if (flow
->common
.chain_index
)
1322 * nfp_flower_add_offload() - Adds a new flow to hardware.
1323 * @app: Pointer to the APP handle
1324 * @netdev: netdev structure.
1325 * @flow: TC flower classifier offload structure.
1327 * Adds a new flow to the repeated hash structure and action payload.
1329 * Return: negative value on error, 0 if configured successfully.
1332 nfp_flower_add_offload(struct nfp_app
*app
, struct net_device
*netdev
,
1333 struct flow_cls_offload
*flow
)
1335 struct flow_rule
*rule
= flow_cls_offload_flow_rule(flow
);
1336 enum nfp_flower_tun_type tun_type
= NFP_FL_TUNNEL_NONE
;
1337 struct nfp_flower_priv
*priv
= app
->priv
;
1338 struct netlink_ext_ack
*extack
= NULL
;
1339 struct nfp_fl_payload
*flow_pay
;
1340 struct nfp_fl_key_ls
*key_layer
;
1341 struct nfp_port
*port
= NULL
;
1344 extack
= flow
->common
.extack
;
1345 if (nfp_netdev_is_nfp_repr(netdev
))
1346 port
= nfp_port_from_netdev(netdev
);
1348 if (is_pre_ct_flow(flow
))
1349 return nfp_fl_ct_handle_pre_ct(priv
, netdev
, flow
, extack
, NULL
);
1351 if (is_post_ct_flow(flow
))
1352 return nfp_fl_ct_handle_post_ct(priv
, netdev
, flow
, extack
);
1354 if (!offload_pre_check(flow
))
1357 key_layer
= kmalloc(sizeof(*key_layer
), GFP_KERNEL
);
1361 err
= nfp_flower_calculate_key_layers(app
, netdev
, key_layer
, rule
,
1364 goto err_free_key_ls
;
1366 flow_pay
= nfp_flower_allocate_new(key_layer
);
1369 goto err_free_key_ls
;
1372 err
= nfp_flower_compile_flow_match(app
, rule
, key_layer
, netdev
,
1373 flow_pay
, tun_type
, extack
);
1375 goto err_destroy_flow
;
1377 err
= nfp_flower_compile_action(app
, rule
, netdev
, flow_pay
, extack
);
1379 goto err_destroy_flow
;
1381 if (flow_pay
->pre_tun_rule
.dev
) {
1382 err
= nfp_flower_validate_pre_tun_rule(app
, flow_pay
, key_layer
, extack
);
1384 goto err_destroy_flow
;
1387 err
= nfp_compile_flow_metadata(app
, flow
->cookie
, flow_pay
, netdev
, extack
);
1389 goto err_destroy_flow
;
1391 flow_pay
->tc_flower_cookie
= flow
->cookie
;
1392 err
= rhashtable_insert_fast(&priv
->flow_table
, &flow_pay
->fl_node
,
1393 nfp_flower_table_params
);
1395 NL_SET_ERR_MSG_MOD(extack
, "invalid entry: cannot insert flow into tables for offloads");
1396 goto err_release_metadata
;
1399 if (flow_pay
->pre_tun_rule
.dev
) {
1400 if (priv
->flower_ext_feats
& NFP_FL_FEATS_DECAP_V2
) {
1401 struct nfp_predt_entry
*predt
;
1403 predt
= kzalloc(sizeof(*predt
), GFP_KERNEL
);
1406 goto err_remove_rhash
;
1408 predt
->flow_pay
= flow_pay
;
1409 INIT_LIST_HEAD(&predt
->nn_list
);
1410 spin_lock_bh(&priv
->predt_lock
);
1411 list_add(&predt
->list_head
, &priv
->predt_list
);
1412 flow_pay
->pre_tun_rule
.predt
= predt
;
1413 nfp_tun_link_and_update_nn_entries(app
, predt
);
1414 spin_unlock_bh(&priv
->predt_lock
);
1416 err
= nfp_flower_xmit_pre_tun_flow(app
, flow_pay
);
1419 err
= nfp_flower_xmit_flow(app
, flow_pay
,
1420 NFP_FLOWER_CMSG_TYPE_FLOW_ADD
);
1424 goto err_remove_rhash
;
1427 port
->tc_offload_cnt
++;
1429 flow_pay
->in_hw
= true;
1431 /* Deallocate flow payload when flower rule has been destroyed. */
1437 WARN_ON_ONCE(rhashtable_remove_fast(&priv
->flow_table
,
1439 nfp_flower_table_params
));
1440 err_release_metadata
:
1441 nfp_modify_flow_metadata(app
, flow_pay
);
1443 if (flow_pay
->nfp_tun_ipv6
)
1444 nfp_tunnel_put_ipv6_off(app
, flow_pay
->nfp_tun_ipv6
);
1445 kfree(flow_pay
->action_data
);
1446 kfree(flow_pay
->mask_data
);
1447 kfree(flow_pay
->unmasked_data
);
1455 nfp_flower_remove_merge_flow(struct nfp_app
*app
,
1456 struct nfp_fl_payload
*del_sub_flow
,
1457 struct nfp_fl_payload
*merge_flow
)
1459 struct nfp_flower_priv
*priv
= app
->priv
;
1460 struct nfp_fl_payload_link
*link
, *temp
;
1461 struct nfp_merge_info
*merge_info
;
1462 struct nfp_fl_payload
*origin
;
1467 link
= list_first_entry(&merge_flow
->linked_flows
,
1468 struct nfp_fl_payload_link
, merge_flow
.list
);
1469 origin
= link
->sub_flow
.flow
;
1471 /* Re-add rule the merge had overwritten if it has not been deleted. */
1472 if (origin
!= del_sub_flow
)
1475 err
= nfp_modify_flow_metadata(app
, merge_flow
);
1477 nfp_flower_cmsg_warn(app
, "Metadata fail for merge flow delete.\n");
1478 goto err_free_links
;
1482 err
= nfp_flower_xmit_flow(app
, merge_flow
,
1483 NFP_FLOWER_CMSG_TYPE_FLOW_DEL
);
1485 nfp_flower_cmsg_warn(app
, "Failed to delete merged flow.\n");
1486 goto err_free_links
;
1489 __nfp_modify_flow_metadata(priv
, origin
);
1490 err
= nfp_flower_xmit_flow(app
, origin
,
1491 NFP_FLOWER_CMSG_TYPE_FLOW_MOD
);
1493 nfp_flower_cmsg_warn(app
, "Failed to revert merge flow.\n");
1494 origin
->in_hw
= true;
1498 /* Clean any links connected with the merged flow. */
1499 list_for_each_entry_safe(link
, temp
, &merge_flow
->linked_flows
,
1501 u32 ctx_id
= be32_to_cpu(link
->sub_flow
.flow
->meta
.host_ctx_id
);
1503 parent_ctx
= (parent_ctx
<< 32) | (u64
)(ctx_id
);
1504 nfp_flower_unlink_flow(link
);
1507 merge_info
= rhashtable_lookup_fast(&priv
->merge_table
,
1509 merge_table_params
);
1511 WARN_ON_ONCE(rhashtable_remove_fast(&priv
->merge_table
,
1512 &merge_info
->ht_node
,
1513 merge_table_params
));
1517 kfree(merge_flow
->action_data
);
1518 kfree(merge_flow
->mask_data
);
1519 kfree(merge_flow
->unmasked_data
);
1520 WARN_ON_ONCE(rhashtable_remove_fast(&priv
->flow_table
,
1521 &merge_flow
->fl_node
,
1522 nfp_flower_table_params
));
1523 kfree_rcu(merge_flow
, rcu
);
1527 nfp_flower_del_linked_merge_flows(struct nfp_app
*app
,
1528 struct nfp_fl_payload
*sub_flow
)
1530 struct nfp_fl_payload_link
*link
, *temp
;
1532 /* Remove any merge flow formed from the deleted sub_flow. */
1533 list_for_each_entry_safe(link
, temp
, &sub_flow
->linked_flows
,
1535 nfp_flower_remove_merge_flow(app
, sub_flow
,
1536 link
->merge_flow
.flow
);
1540 * nfp_flower_del_offload() - Removes a flow from hardware.
1541 * @app: Pointer to the APP handle
1542 * @netdev: netdev structure.
1543 * @flow: TC flower classifier offload structure
1545 * Removes a flow from the repeated hash structure and clears the
1546 * action payload. Any flows merged from this are also deleted.
1548 * Return: negative value on error, 0 if removed successfully.
1551 nfp_flower_del_offload(struct nfp_app
*app
, struct net_device
*netdev
,
1552 struct flow_cls_offload
*flow
)
1554 struct nfp_flower_priv
*priv
= app
->priv
;
1555 struct nfp_fl_ct_map_entry
*ct_map_ent
;
1556 struct netlink_ext_ack
*extack
= NULL
;
1557 struct nfp_fl_payload
*nfp_flow
;
1558 struct nfp_port
*port
= NULL
;
1561 extack
= flow
->common
.extack
;
1562 if (nfp_netdev_is_nfp_repr(netdev
))
1563 port
= nfp_port_from_netdev(netdev
);
1565 /* Check ct_map_table */
1566 ct_map_ent
= rhashtable_lookup_fast(&priv
->ct_map_table
, &flow
->cookie
,
1569 err
= nfp_fl_ct_del_flow(ct_map_ent
);
1573 nfp_flow
= nfp_flower_search_fl_table(app
, flow
->cookie
, netdev
);
1575 NL_SET_ERR_MSG_MOD(extack
, "invalid entry: cannot remove flow that does not exist");
1579 err
= nfp_modify_flow_metadata(app
, nfp_flow
);
1581 goto err_free_merge_flow
;
1583 if (nfp_flow
->nfp_tun_ipv4_addr
)
1584 nfp_tunnel_del_ipv4_off(app
, nfp_flow
->nfp_tun_ipv4_addr
);
1586 if (nfp_flow
->nfp_tun_ipv6
)
1587 nfp_tunnel_put_ipv6_off(app
, nfp_flow
->nfp_tun_ipv6
);
1589 if (!nfp_flow
->in_hw
) {
1591 goto err_free_merge_flow
;
1594 if (nfp_flow
->pre_tun_rule
.dev
) {
1595 if (priv
->flower_ext_feats
& NFP_FL_FEATS_DECAP_V2
) {
1596 struct nfp_predt_entry
*predt
;
1598 predt
= nfp_flow
->pre_tun_rule
.predt
;
1600 spin_lock_bh(&priv
->predt_lock
);
1601 nfp_tun_unlink_and_update_nn_entries(app
, predt
);
1602 list_del(&predt
->list_head
);
1603 spin_unlock_bh(&priv
->predt_lock
);
1607 err
= nfp_flower_xmit_pre_tun_del_flow(app
, nfp_flow
);
1610 err
= nfp_flower_xmit_flow(app
, nfp_flow
,
1611 NFP_FLOWER_CMSG_TYPE_FLOW_DEL
);
1613 /* Fall through on error. */
1615 err_free_merge_flow
:
1616 nfp_flower_del_linked_merge_flows(app
, nfp_flow
);
1618 port
->tc_offload_cnt
--;
1619 kfree(nfp_flow
->action_data
);
1620 kfree(nfp_flow
->mask_data
);
1621 kfree(nfp_flow
->unmasked_data
);
1622 WARN_ON_ONCE(rhashtable_remove_fast(&priv
->flow_table
,
1624 nfp_flower_table_params
));
1625 kfree_rcu(nfp_flow
, rcu
);
1630 __nfp_flower_update_merge_stats(struct nfp_app
*app
,
1631 struct nfp_fl_payload
*merge_flow
)
1633 struct nfp_flower_priv
*priv
= app
->priv
;
1634 struct nfp_fl_payload_link
*link
;
1635 struct nfp_fl_payload
*sub_flow
;
1636 u64 pkts
, bytes
, used
;
1639 ctx_id
= be32_to_cpu(merge_flow
->meta
.host_ctx_id
);
1640 pkts
= priv
->stats
[ctx_id
].pkts
;
1641 /* Do not cycle subflows if no stats to distribute. */
1644 bytes
= priv
->stats
[ctx_id
].bytes
;
1645 used
= priv
->stats
[ctx_id
].used
;
1647 /* Reset stats for the merge flow. */
1648 priv
->stats
[ctx_id
].pkts
= 0;
1649 priv
->stats
[ctx_id
].bytes
= 0;
1651 /* The merge flow has received stats updates from firmware.
1652 * Distribute these stats to all subflows that form the merge.
1653 * The stats will collected from TC via the subflows.
1655 list_for_each_entry(link
, &merge_flow
->linked_flows
, merge_flow
.list
) {
1656 sub_flow
= link
->sub_flow
.flow
;
1657 ctx_id
= be32_to_cpu(sub_flow
->meta
.host_ctx_id
);
1658 priv
->stats
[ctx_id
].pkts
+= pkts
;
1659 priv
->stats
[ctx_id
].bytes
+= bytes
;
1660 priv
->stats
[ctx_id
].used
= max_t(u64
, used
,
1661 priv
->stats
[ctx_id
].used
);
1666 nfp_flower_update_merge_stats(struct nfp_app
*app
,
1667 struct nfp_fl_payload
*sub_flow
)
1669 struct nfp_fl_payload_link
*link
;
1671 /* Get merge flows that the subflow forms to distribute their stats. */
1672 list_for_each_entry(link
, &sub_flow
->linked_flows
, sub_flow
.list
)
1673 __nfp_flower_update_merge_stats(app
, link
->merge_flow
.flow
);
1677 * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
1678 * @app: Pointer to the APP handle
1679 * @netdev: Netdev structure.
1680 * @flow: TC flower classifier offload structure
1682 * Populates a flow statistics structure which which corresponds to a
1685 * Return: negative value on error, 0 if stats populated successfully.
1688 nfp_flower_get_stats(struct nfp_app
*app
, struct net_device
*netdev
,
1689 struct flow_cls_offload
*flow
)
1691 struct nfp_flower_priv
*priv
= app
->priv
;
1692 struct nfp_fl_ct_map_entry
*ct_map_ent
;
1693 struct netlink_ext_ack
*extack
= NULL
;
1694 struct nfp_fl_payload
*nfp_flow
;
1697 /* Check ct_map table first */
1698 ct_map_ent
= rhashtable_lookup_fast(&priv
->ct_map_table
, &flow
->cookie
,
1701 return nfp_fl_ct_stats(flow
, ct_map_ent
);
1703 extack
= flow
->common
.extack
;
1704 nfp_flow
= nfp_flower_search_fl_table(app
, flow
->cookie
, netdev
);
1706 NL_SET_ERR_MSG_MOD(extack
, "invalid entry: cannot dump stats for flow that does not exist");
1710 ctx_id
= be32_to_cpu(nfp_flow
->meta
.host_ctx_id
);
1712 spin_lock_bh(&priv
->stats_lock
);
1713 /* If request is for a sub_flow, update stats from merged flows. */
1714 if (!list_empty(&nfp_flow
->linked_flows
))
1715 nfp_flower_update_merge_stats(app
, nfp_flow
);
1717 flow_stats_update(&flow
->stats
, priv
->stats
[ctx_id
].bytes
,
1718 priv
->stats
[ctx_id
].pkts
, 0, priv
->stats
[ctx_id
].used
,
1719 FLOW_ACTION_HW_STATS_DELAYED
);
1721 priv
->stats
[ctx_id
].pkts
= 0;
1722 priv
->stats
[ctx_id
].bytes
= 0;
1723 spin_unlock_bh(&priv
->stats_lock
);
1729 nfp_flower_repr_offload(struct nfp_app
*app
, struct net_device
*netdev
,
1730 struct flow_cls_offload
*flower
)
1732 struct nfp_flower_priv
*priv
= app
->priv
;
1735 if (!eth_proto_is_802_3(flower
->common
.protocol
))
1738 mutex_lock(&priv
->nfp_fl_lock
);
1739 switch (flower
->command
) {
1740 case FLOW_CLS_REPLACE
:
1741 ret
= nfp_flower_add_offload(app
, netdev
, flower
);
1743 case FLOW_CLS_DESTROY
:
1744 ret
= nfp_flower_del_offload(app
, netdev
, flower
);
1746 case FLOW_CLS_STATS
:
1747 ret
= nfp_flower_get_stats(app
, netdev
, flower
);
1753 mutex_unlock(&priv
->nfp_fl_lock
);
1758 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type
,
1759 void *type_data
, void *cb_priv
)
1761 struct flow_cls_common_offload
*common
= type_data
;
1762 struct nfp_repr
*repr
= cb_priv
;
1764 if (!tc_can_offload_extack(repr
->netdev
, common
->extack
))
1768 case TC_SETUP_CLSFLOWER
:
1769 return nfp_flower_repr_offload(repr
->app
, repr
->netdev
,
1771 case TC_SETUP_CLSMATCHALL
:
1772 return nfp_flower_setup_qos_offload(repr
->app
, repr
->netdev
,
1779 static LIST_HEAD(nfp_block_cb_list
);
1781 static int nfp_flower_setup_tc_block(struct net_device
*netdev
,
1782 struct flow_block_offload
*f
)
1784 struct nfp_repr
*repr
= netdev_priv(netdev
);
1785 struct nfp_flower_repr_priv
*repr_priv
;
1786 struct flow_block_cb
*block_cb
;
1788 if (f
->binder_type
!= FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS
)
1791 repr_priv
= repr
->app_priv
;
1792 repr_priv
->block_shared
= f
->block_shared
;
1793 f
->driver_block_list
= &nfp_block_cb_list
;
1794 f
->unlocked_driver_cb
= true;
1796 switch (f
->command
) {
1797 case FLOW_BLOCK_BIND
:
1798 if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb
, repr
,
1799 &nfp_block_cb_list
))
1802 block_cb
= flow_block_cb_alloc(nfp_flower_setup_tc_block_cb
,
1804 if (IS_ERR(block_cb
))
1805 return PTR_ERR(block_cb
);
1807 flow_block_cb_add(block_cb
, f
);
1808 list_add_tail(&block_cb
->driver_list
, &nfp_block_cb_list
);
1810 case FLOW_BLOCK_UNBIND
:
1811 block_cb
= flow_block_cb_lookup(f
->block
,
1812 nfp_flower_setup_tc_block_cb
,
1817 flow_block_cb_remove(block_cb
, f
);
1818 list_del(&block_cb
->driver_list
);
1825 int nfp_flower_setup_tc(struct nfp_app
*app
, struct net_device
*netdev
,
1826 enum tc_setup_type type
, void *type_data
)
1829 case TC_SETUP_BLOCK
:
1830 return nfp_flower_setup_tc_block(netdev
, type_data
);
1836 struct nfp_flower_indr_block_cb_priv
{
1837 struct net_device
*netdev
;
1838 struct nfp_app
*app
;
1839 struct list_head list
;
1842 static struct nfp_flower_indr_block_cb_priv
*
1843 nfp_flower_indr_block_cb_priv_lookup(struct nfp_app
*app
,
1844 struct net_device
*netdev
)
1846 struct nfp_flower_indr_block_cb_priv
*cb_priv
;
1847 struct nfp_flower_priv
*priv
= app
->priv
;
1849 list_for_each_entry(cb_priv
, &priv
->indr_block_cb_priv
, list
)
1850 if (cb_priv
->netdev
== netdev
)
1856 static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type
,
1857 void *type_data
, void *cb_priv
)
1859 struct nfp_flower_indr_block_cb_priv
*priv
= cb_priv
;
1862 case TC_SETUP_CLSFLOWER
:
1863 return nfp_flower_repr_offload(priv
->app
, priv
->netdev
,
1870 void nfp_flower_setup_indr_tc_release(void *cb_priv
)
1872 struct nfp_flower_indr_block_cb_priv
*priv
= cb_priv
;
1874 list_del(&priv
->list
);
1879 nfp_flower_setup_indr_tc_block(struct net_device
*netdev
, struct Qdisc
*sch
, struct nfp_app
*app
,
1880 struct flow_block_offload
*f
, void *data
,
1881 void (*cleanup
)(struct flow_block_cb
*block_cb
))
1883 struct nfp_flower_indr_block_cb_priv
*cb_priv
;
1884 struct nfp_flower_priv
*priv
= app
->priv
;
1885 struct flow_block_cb
*block_cb
;
1887 if ((f
->binder_type
!= FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS
&&
1888 !nfp_flower_internal_port_can_offload(app
, netdev
)) ||
1889 (f
->binder_type
!= FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS
&&
1890 nfp_flower_internal_port_can_offload(app
, netdev
)))
1893 f
->unlocked_driver_cb
= true;
1895 switch (f
->command
) {
1896 case FLOW_BLOCK_BIND
:
1897 cb_priv
= nfp_flower_indr_block_cb_priv_lookup(app
, netdev
);
1899 flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb
,
1901 &nfp_block_cb_list
))
1904 cb_priv
= kmalloc(sizeof(*cb_priv
), GFP_KERNEL
);
1908 cb_priv
->netdev
= netdev
;
1910 list_add(&cb_priv
->list
, &priv
->indr_block_cb_priv
);
1912 block_cb
= flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb
,
1914 nfp_flower_setup_indr_tc_release
,
1915 f
, netdev
, sch
, data
, app
, cleanup
);
1916 if (IS_ERR(block_cb
)) {
1917 list_del(&cb_priv
->list
);
1919 return PTR_ERR(block_cb
);
1922 flow_block_cb_add(block_cb
, f
);
1923 list_add_tail(&block_cb
->driver_list
, &nfp_block_cb_list
);
1925 case FLOW_BLOCK_UNBIND
:
1926 cb_priv
= nfp_flower_indr_block_cb_priv_lookup(app
, netdev
);
1930 block_cb
= flow_block_cb_lookup(f
->block
,
1931 nfp_flower_setup_indr_block_cb
,
1936 flow_indr_block_cb_remove(block_cb
, f
);
1937 list_del(&block_cb
->driver_list
);
1946 nfp_setup_tc_no_dev(struct nfp_app
*app
, enum tc_setup_type type
, void *data
)
1953 return nfp_setup_tc_act_offload(app
, data
);
1960 nfp_flower_indr_setup_tc_cb(struct net_device
*netdev
, struct Qdisc
*sch
, void *cb_priv
,
1961 enum tc_setup_type type
, void *type_data
,
1963 void (*cleanup
)(struct flow_block_cb
*block_cb
))
1966 return nfp_setup_tc_no_dev(cb_priv
, type
, data
);
1968 if (!nfp_fl_is_netdev_to_offload(netdev
))
1972 case TC_SETUP_BLOCK
:
1973 return nfp_flower_setup_indr_tc_block(netdev
, sch
, cb_priv
,
1974 type_data
, data
, cleanup
);