1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/bitfield.h>
5 #include <net/pkt_cls.h>
11 nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci
*ext
,
12 struct nfp_flower_meta_tci
*msk
,
13 struct flow_rule
*rule
, u8 key_type
, bool qinq_sup
)
17 memset(ext
, 0, sizeof(struct nfp_flower_meta_tci
));
18 memset(msk
, 0, sizeof(struct nfp_flower_meta_tci
));
20 /* Populate the metadata frame. */
21 ext
->nfp_flow_key_layer
= key_type
;
24 msk
->nfp_flow_key_layer
= key_type
;
27 if (!qinq_sup
&& flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_VLAN
)) {
28 struct flow_match_vlan match
;
30 flow_rule_match_vlan(rule
, &match
);
31 /* Populate the tci field. */
32 tmp_tci
= NFP_FLOWER_MASK_VLAN_PRESENT
;
33 tmp_tci
|= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO
,
34 match
.key
->vlan_priority
) |
35 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID
,
37 ext
->tci
= cpu_to_be16(tmp_tci
);
39 tmp_tci
= NFP_FLOWER_MASK_VLAN_PRESENT
;
40 tmp_tci
|= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO
,
41 match
.mask
->vlan_priority
) |
42 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID
,
44 msk
->tci
= cpu_to_be16(tmp_tci
);
49 nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta
*frame
, u32 key_ext
)
51 frame
->nfp_flow_key_layer2
= cpu_to_be32(key_ext
);
55 nfp_flower_compile_port(struct nfp_flower_in_port
*frame
, u32 cmsg_port
,
56 bool mask_version
, enum nfp_flower_tun_type tun_type
,
57 struct netlink_ext_ack
*extack
)
60 frame
->in_port
= cpu_to_be32(~0);
65 frame
->in_port
= cpu_to_be32(NFP_FL_PORT_TYPE_TUN
| tun_type
);
68 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid ingress interface for match offload");
71 frame
->in_port
= cpu_to_be32(cmsg_port
);
78 nfp_flower_compile_mac(struct nfp_flower_mac_mpls
*ext
,
79 struct nfp_flower_mac_mpls
*msk
, struct flow_rule
*rule
,
80 struct netlink_ext_ack
*extack
)
82 memset(ext
, 0, sizeof(struct nfp_flower_mac_mpls
));
83 memset(msk
, 0, sizeof(struct nfp_flower_mac_mpls
));
85 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
86 struct flow_match_eth_addrs match
;
88 flow_rule_match_eth_addrs(rule
, &match
);
89 /* Populate mac frame. */
90 ether_addr_copy(ext
->mac_dst
, &match
.key
->dst
[0]);
91 ether_addr_copy(ext
->mac_src
, &match
.key
->src
[0]);
92 ether_addr_copy(msk
->mac_dst
, &match
.mask
->dst
[0]);
93 ether_addr_copy(msk
->mac_src
, &match
.mask
->src
[0]);
96 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_MPLS
)) {
97 struct flow_match_mpls match
;
100 flow_rule_match_mpls(rule
, &match
);
102 /* Only support matching the first LSE */
103 if (match
.mask
->used_lses
!= 1) {
104 NL_SET_ERR_MSG_MOD(extack
,
105 "unsupported offload: invalid LSE depth for MPLS match offload");
109 t_mpls
= FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB
,
110 match
.key
->ls
[0].mpls_label
) |
111 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC
,
112 match
.key
->ls
[0].mpls_tc
) |
113 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS
,
114 match
.key
->ls
[0].mpls_bos
) |
115 NFP_FLOWER_MASK_MPLS_Q
;
116 ext
->mpls_lse
= cpu_to_be32(t_mpls
);
117 t_mpls
= FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB
,
118 match
.mask
->ls
[0].mpls_label
) |
119 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC
,
120 match
.mask
->ls
[0].mpls_tc
) |
121 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS
,
122 match
.mask
->ls
[0].mpls_bos
) |
123 NFP_FLOWER_MASK_MPLS_Q
;
124 msk
->mpls_lse
= cpu_to_be32(t_mpls
);
125 } else if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_BASIC
)) {
126 /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
127 * bit, which indicates an mpls ether type but without any
130 struct flow_match_basic match
;
132 flow_rule_match_basic(rule
, &match
);
133 if (match
.key
->n_proto
== cpu_to_be16(ETH_P_MPLS_UC
) ||
134 match
.key
->n_proto
== cpu_to_be16(ETH_P_MPLS_MC
)) {
135 ext
->mpls_lse
= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q
);
136 msk
->mpls_lse
= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q
);
144 nfp_flower_compile_tport(struct nfp_flower_tp_ports
*ext
,
145 struct nfp_flower_tp_ports
*msk
,
146 struct flow_rule
*rule
)
148 memset(ext
, 0, sizeof(struct nfp_flower_tp_ports
));
149 memset(msk
, 0, sizeof(struct nfp_flower_tp_ports
));
151 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_PORTS
)) {
152 struct flow_match_ports match
;
154 flow_rule_match_ports(rule
, &match
);
155 ext
->port_src
= match
.key
->src
;
156 ext
->port_dst
= match
.key
->dst
;
157 msk
->port_src
= match
.mask
->src
;
158 msk
->port_dst
= match
.mask
->dst
;
163 nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext
*ext
,
164 struct nfp_flower_ip_ext
*msk
, struct flow_rule
*rule
)
166 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_BASIC
)) {
167 struct flow_match_basic match
;
169 flow_rule_match_basic(rule
, &match
);
170 ext
->proto
= match
.key
->ip_proto
;
171 msk
->proto
= match
.mask
->ip_proto
;
174 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_IP
)) {
175 struct flow_match_ip match
;
177 flow_rule_match_ip(rule
, &match
);
178 ext
->tos
= match
.key
->tos
;
179 ext
->ttl
= match
.key
->ttl
;
180 msk
->tos
= match
.mask
->tos
;
181 msk
->ttl
= match
.mask
->ttl
;
184 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_TCP
)) {
185 u16 tcp_flags
, tcp_flags_mask
;
186 struct flow_match_tcp match
;
188 flow_rule_match_tcp(rule
, &match
);
189 tcp_flags
= be16_to_cpu(match
.key
->flags
);
190 tcp_flags_mask
= be16_to_cpu(match
.mask
->flags
);
192 if (tcp_flags
& TCPHDR_FIN
)
193 ext
->flags
|= NFP_FL_TCP_FLAG_FIN
;
194 if (tcp_flags_mask
& TCPHDR_FIN
)
195 msk
->flags
|= NFP_FL_TCP_FLAG_FIN
;
197 if (tcp_flags
& TCPHDR_SYN
)
198 ext
->flags
|= NFP_FL_TCP_FLAG_SYN
;
199 if (tcp_flags_mask
& TCPHDR_SYN
)
200 msk
->flags
|= NFP_FL_TCP_FLAG_SYN
;
202 if (tcp_flags
& TCPHDR_RST
)
203 ext
->flags
|= NFP_FL_TCP_FLAG_RST
;
204 if (tcp_flags_mask
& TCPHDR_RST
)
205 msk
->flags
|= NFP_FL_TCP_FLAG_RST
;
207 if (tcp_flags
& TCPHDR_PSH
)
208 ext
->flags
|= NFP_FL_TCP_FLAG_PSH
;
209 if (tcp_flags_mask
& TCPHDR_PSH
)
210 msk
->flags
|= NFP_FL_TCP_FLAG_PSH
;
212 if (tcp_flags
& TCPHDR_URG
)
213 ext
->flags
|= NFP_FL_TCP_FLAG_URG
;
214 if (tcp_flags_mask
& TCPHDR_URG
)
215 msk
->flags
|= NFP_FL_TCP_FLAG_URG
;
218 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_CONTROL
)) {
219 struct flow_match_control match
;
221 flow_rule_match_control(rule
, &match
);
222 if (match
.key
->flags
& FLOW_DIS_IS_FRAGMENT
)
223 ext
->flags
|= NFP_FL_IP_FRAGMENTED
;
224 if (match
.mask
->flags
& FLOW_DIS_IS_FRAGMENT
)
225 msk
->flags
|= NFP_FL_IP_FRAGMENTED
;
226 if (match
.key
->flags
& FLOW_DIS_FIRST_FRAG
)
227 ext
->flags
|= NFP_FL_IP_FRAG_FIRST
;
228 if (match
.mask
->flags
& FLOW_DIS_FIRST_FRAG
)
229 msk
->flags
|= NFP_FL_IP_FRAG_FIRST
;
234 nfp_flower_fill_vlan(struct flow_dissector_key_vlan
*key
,
235 struct nfp_flower_vlan
*frame
,
240 tci
= NFP_FLOWER_MASK_VLAN_PRESENT
;
241 tci
|= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO
,
242 key
->vlan_priority
) |
243 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID
,
247 frame
->outer_tci
= cpu_to_be16(tci
);
248 frame
->outer_tpid
= key
->vlan_tpid
;
250 frame
->inner_tci
= cpu_to_be16(tci
);
251 frame
->inner_tpid
= key
->vlan_tpid
;
256 nfp_flower_compile_vlan(struct nfp_flower_vlan
*ext
,
257 struct nfp_flower_vlan
*msk
,
258 struct flow_rule
*rule
)
260 struct flow_match_vlan match
;
262 memset(ext
, 0, sizeof(struct nfp_flower_vlan
));
263 memset(msk
, 0, sizeof(struct nfp_flower_vlan
));
265 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_VLAN
)) {
266 flow_rule_match_vlan(rule
, &match
);
267 nfp_flower_fill_vlan(match
.key
, ext
, true);
268 nfp_flower_fill_vlan(match
.mask
, msk
, true);
270 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_CVLAN
)) {
271 flow_rule_match_cvlan(rule
, &match
);
272 nfp_flower_fill_vlan(match
.key
, ext
, false);
273 nfp_flower_fill_vlan(match
.mask
, msk
, false);
278 nfp_flower_compile_ipv4(struct nfp_flower_ipv4
*ext
,
279 struct nfp_flower_ipv4
*msk
, struct flow_rule
*rule
)
281 struct flow_match_ipv4_addrs match
;
283 memset(ext
, 0, sizeof(struct nfp_flower_ipv4
));
284 memset(msk
, 0, sizeof(struct nfp_flower_ipv4
));
286 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_IPV4_ADDRS
)) {
287 flow_rule_match_ipv4_addrs(rule
, &match
);
288 ext
->ipv4_src
= match
.key
->src
;
289 ext
->ipv4_dst
= match
.key
->dst
;
290 msk
->ipv4_src
= match
.mask
->src
;
291 msk
->ipv4_dst
= match
.mask
->dst
;
294 nfp_flower_compile_ip_ext(&ext
->ip_ext
, &msk
->ip_ext
, rule
);
298 nfp_flower_compile_ipv6(struct nfp_flower_ipv6
*ext
,
299 struct nfp_flower_ipv6
*msk
, struct flow_rule
*rule
)
301 memset(ext
, 0, sizeof(struct nfp_flower_ipv6
));
302 memset(msk
, 0, sizeof(struct nfp_flower_ipv6
));
304 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_IPV6_ADDRS
)) {
305 struct flow_match_ipv6_addrs match
;
307 flow_rule_match_ipv6_addrs(rule
, &match
);
308 ext
->ipv6_src
= match
.key
->src
;
309 ext
->ipv6_dst
= match
.key
->dst
;
310 msk
->ipv6_src
= match
.mask
->src
;
311 msk
->ipv6_dst
= match
.mask
->dst
;
314 nfp_flower_compile_ip_ext(&ext
->ip_ext
, &msk
->ip_ext
, rule
);
318 nfp_flower_compile_geneve_opt(void *ext
, void *msk
, struct flow_rule
*rule
)
320 struct flow_match_enc_opts match
;
322 flow_rule_match_enc_opts(rule
, &match
);
323 memcpy(ext
, match
.key
->data
, match
.key
->len
);
324 memcpy(msk
, match
.mask
->data
, match
.mask
->len
);
330 nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4
*ext
,
331 struct nfp_flower_tun_ipv4
*msk
,
332 struct flow_rule
*rule
)
334 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
)) {
335 struct flow_match_ipv4_addrs match
;
337 flow_rule_match_enc_ipv4_addrs(rule
, &match
);
338 ext
->src
= match
.key
->src
;
339 ext
->dst
= match
.key
->dst
;
340 msk
->src
= match
.mask
->src
;
341 msk
->dst
= match
.mask
->dst
;
346 nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6
*ext
,
347 struct nfp_flower_tun_ipv6
*msk
,
348 struct flow_rule
*rule
)
350 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
)) {
351 struct flow_match_ipv6_addrs match
;
353 flow_rule_match_enc_ipv6_addrs(rule
, &match
);
354 ext
->src
= match
.key
->src
;
355 ext
->dst
= match
.key
->dst
;
356 msk
->src
= match
.mask
->src
;
357 msk
->dst
= match
.mask
->dst
;
362 nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext
*ext
,
363 struct nfp_flower_tun_ip_ext
*msk
,
364 struct flow_rule
*rule
)
366 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ENC_IP
)) {
367 struct flow_match_ip match
;
369 flow_rule_match_enc_ip(rule
, &match
);
370 ext
->tos
= match
.key
->tos
;
371 ext
->ttl
= match
.key
->ttl
;
372 msk
->tos
= match
.mask
->tos
;
373 msk
->ttl
= match
.mask
->ttl
;
378 nfp_flower_compile_tun_udp_key(__be32
*key
, __be32
*key_msk
,
379 struct flow_rule
*rule
)
381 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ENC_KEYID
)) {
382 struct flow_match_enc_keyid match
;
385 flow_rule_match_enc_keyid(rule
, &match
);
386 vni
= be32_to_cpu(match
.key
->keyid
) << NFP_FL_TUN_VNI_OFFSET
;
387 *key
= cpu_to_be32(vni
);
388 vni
= be32_to_cpu(match
.mask
->keyid
) << NFP_FL_TUN_VNI_OFFSET
;
389 *key_msk
= cpu_to_be32(vni
);
394 nfp_flower_compile_tun_gre_key(__be32
*key
, __be32
*key_msk
, __be16
*flags
,
395 __be16
*flags_msk
, struct flow_rule
*rule
)
397 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ENC_KEYID
)) {
398 struct flow_match_enc_keyid match
;
400 flow_rule_match_enc_keyid(rule
, &match
);
401 *key
= match
.key
->keyid
;
402 *key_msk
= match
.mask
->keyid
;
404 *flags
= cpu_to_be16(NFP_FL_GRE_FLAG_KEY
);
405 *flags_msk
= cpu_to_be16(NFP_FL_GRE_FLAG_KEY
);
410 nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun
*ext
,
411 struct nfp_flower_ipv4_gre_tun
*msk
,
412 struct flow_rule
*rule
)
414 memset(ext
, 0, sizeof(struct nfp_flower_ipv4_gre_tun
));
415 memset(msk
, 0, sizeof(struct nfp_flower_ipv4_gre_tun
));
417 /* NVGRE is the only supported GRE tunnel type */
418 ext
->ethertype
= cpu_to_be16(ETH_P_TEB
);
419 msk
->ethertype
= cpu_to_be16(~0);
421 nfp_flower_compile_tun_ipv4_addrs(&ext
->ipv4
, &msk
->ipv4
, rule
);
422 nfp_flower_compile_tun_ip_ext(&ext
->ip_ext
, &msk
->ip_ext
, rule
);
423 nfp_flower_compile_tun_gre_key(&ext
->tun_key
, &msk
->tun_key
,
424 &ext
->tun_flags
, &msk
->tun_flags
, rule
);
428 nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun
*ext
,
429 struct nfp_flower_ipv4_udp_tun
*msk
,
430 struct flow_rule
*rule
)
432 memset(ext
, 0, sizeof(struct nfp_flower_ipv4_udp_tun
));
433 memset(msk
, 0, sizeof(struct nfp_flower_ipv4_udp_tun
));
435 nfp_flower_compile_tun_ipv4_addrs(&ext
->ipv4
, &msk
->ipv4
, rule
);
436 nfp_flower_compile_tun_ip_ext(&ext
->ip_ext
, &msk
->ip_ext
, rule
);
437 nfp_flower_compile_tun_udp_key(&ext
->tun_id
, &msk
->tun_id
, rule
);
441 nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun
*ext
,
442 struct nfp_flower_ipv6_udp_tun
*msk
,
443 struct flow_rule
*rule
)
445 memset(ext
, 0, sizeof(struct nfp_flower_ipv6_udp_tun
));
446 memset(msk
, 0, sizeof(struct nfp_flower_ipv6_udp_tun
));
448 nfp_flower_compile_tun_ipv6_addrs(&ext
->ipv6
, &msk
->ipv6
, rule
);
449 nfp_flower_compile_tun_ip_ext(&ext
->ip_ext
, &msk
->ip_ext
, rule
);
450 nfp_flower_compile_tun_udp_key(&ext
->tun_id
, &msk
->tun_id
, rule
);
454 nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun
*ext
,
455 struct nfp_flower_ipv6_gre_tun
*msk
,
456 struct flow_rule
*rule
)
458 memset(ext
, 0, sizeof(struct nfp_flower_ipv6_gre_tun
));
459 memset(msk
, 0, sizeof(struct nfp_flower_ipv6_gre_tun
));
461 /* NVGRE is the only supported GRE tunnel type */
462 ext
->ethertype
= cpu_to_be16(ETH_P_TEB
);
463 msk
->ethertype
= cpu_to_be16(~0);
465 nfp_flower_compile_tun_ipv6_addrs(&ext
->ipv6
, &msk
->ipv6
, rule
);
466 nfp_flower_compile_tun_ip_ext(&ext
->ip_ext
, &msk
->ip_ext
, rule
);
467 nfp_flower_compile_tun_gre_key(&ext
->tun_key
, &msk
->tun_key
,
468 &ext
->tun_flags
, &msk
->tun_flags
, rule
);
471 int nfp_flower_compile_flow_match(struct nfp_app
*app
,
472 struct flow_cls_offload
*flow
,
473 struct nfp_fl_key_ls
*key_ls
,
474 struct net_device
*netdev
,
475 struct nfp_fl_payload
*nfp_flow
,
476 enum nfp_flower_tun_type tun_type
,
477 struct netlink_ext_ack
*extack
)
479 struct flow_rule
*rule
= flow_cls_offload_flow_rule(flow
);
480 struct nfp_flower_priv
*priv
= app
->priv
;
488 port_id
= nfp_flower_get_port_id_from_netdev(app
, netdev
);
490 memset(nfp_flow
->unmasked_data
, 0, key_ls
->key_size
);
491 memset(nfp_flow
->mask_data
, 0, key_ls
->key_size
);
493 ext
= nfp_flow
->unmasked_data
;
494 msk
= nfp_flow
->mask_data
;
496 qinq_sup
= !!(priv
->flower_ext_feats
& NFP_FL_FEATS_VLAN_QINQ
);
498 nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci
*)ext
,
499 (struct nfp_flower_meta_tci
*)msk
,
500 rule
, key_ls
->key_layer
, qinq_sup
);
501 ext
+= sizeof(struct nfp_flower_meta_tci
);
502 msk
+= sizeof(struct nfp_flower_meta_tci
);
504 /* Populate Extended Metadata if Required. */
505 if (NFP_FLOWER_LAYER_EXT_META
& key_ls
->key_layer
) {
506 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta
*)ext
,
507 key_ls
->key_layer_two
);
508 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta
*)msk
,
509 key_ls
->key_layer_two
);
510 ext
+= sizeof(struct nfp_flower_ext_meta
);
511 msk
+= sizeof(struct nfp_flower_ext_meta
);
514 /* Populate Exact Port data. */
515 err
= nfp_flower_compile_port((struct nfp_flower_in_port
*)ext
,
516 port_id
, false, tun_type
, extack
);
520 /* Populate Mask Port Data. */
521 err
= nfp_flower_compile_port((struct nfp_flower_in_port
*)msk
,
522 port_id
, true, tun_type
, extack
);
526 ext
+= sizeof(struct nfp_flower_in_port
);
527 msk
+= sizeof(struct nfp_flower_in_port
);
529 if (NFP_FLOWER_LAYER_MAC
& key_ls
->key_layer
) {
530 err
= nfp_flower_compile_mac((struct nfp_flower_mac_mpls
*)ext
,
531 (struct nfp_flower_mac_mpls
*)msk
,
536 ext
+= sizeof(struct nfp_flower_mac_mpls
);
537 msk
+= sizeof(struct nfp_flower_mac_mpls
);
540 if (NFP_FLOWER_LAYER_TP
& key_ls
->key_layer
) {
541 nfp_flower_compile_tport((struct nfp_flower_tp_ports
*)ext
,
542 (struct nfp_flower_tp_ports
*)msk
,
544 ext
+= sizeof(struct nfp_flower_tp_ports
);
545 msk
+= sizeof(struct nfp_flower_tp_ports
);
548 if (NFP_FLOWER_LAYER_IPV4
& key_ls
->key_layer
) {
549 nfp_flower_compile_ipv4((struct nfp_flower_ipv4
*)ext
,
550 (struct nfp_flower_ipv4
*)msk
,
552 ext
+= sizeof(struct nfp_flower_ipv4
);
553 msk
+= sizeof(struct nfp_flower_ipv4
);
556 if (NFP_FLOWER_LAYER_IPV6
& key_ls
->key_layer
) {
557 nfp_flower_compile_ipv6((struct nfp_flower_ipv6
*)ext
,
558 (struct nfp_flower_ipv6
*)msk
,
560 ext
+= sizeof(struct nfp_flower_ipv6
);
561 msk
+= sizeof(struct nfp_flower_ipv6
);
564 if (key_ls
->key_layer_two
& NFP_FLOWER_LAYER2_GRE
) {
565 if (key_ls
->key_layer_two
& NFP_FLOWER_LAYER2_TUN_IPV6
) {
566 struct nfp_flower_ipv6_gre_tun
*gre_match
;
567 struct nfp_ipv6_addr_entry
*entry
;
568 struct in6_addr
*dst
;
570 nfp_flower_compile_ipv6_gre_tun((void *)ext
,
572 gre_match
= (struct nfp_flower_ipv6_gre_tun
*)ext
;
573 dst
= &gre_match
->ipv6
.dst
;
574 ext
+= sizeof(struct nfp_flower_ipv6_gre_tun
);
575 msk
+= sizeof(struct nfp_flower_ipv6_gre_tun
);
577 entry
= nfp_tunnel_add_ipv6_off(app
, dst
);
581 nfp_flow
->nfp_tun_ipv6
= entry
;
585 nfp_flower_compile_ipv4_gre_tun((void *)ext
,
587 dst
= ((struct nfp_flower_ipv4_gre_tun
*)ext
)->ipv4
.dst
;
588 ext
+= sizeof(struct nfp_flower_ipv4_gre_tun
);
589 msk
+= sizeof(struct nfp_flower_ipv4_gre_tun
);
591 /* Store the tunnel destination in the rule data.
592 * This must be present and be an exact match.
594 nfp_flow
->nfp_tun_ipv4_addr
= dst
;
595 nfp_tunnel_add_ipv4_off(app
, dst
);
599 if (NFP_FLOWER_LAYER2_QINQ
& key_ls
->key_layer_two
) {
600 nfp_flower_compile_vlan((struct nfp_flower_vlan
*)ext
,
601 (struct nfp_flower_vlan
*)msk
,
603 ext
+= sizeof(struct nfp_flower_vlan
);
604 msk
+= sizeof(struct nfp_flower_vlan
);
607 if (key_ls
->key_layer
& NFP_FLOWER_LAYER_VXLAN
||
608 key_ls
->key_layer_two
& NFP_FLOWER_LAYER2_GENEVE
) {
609 if (key_ls
->key_layer_two
& NFP_FLOWER_LAYER2_TUN_IPV6
) {
610 struct nfp_flower_ipv6_udp_tun
*udp_match
;
611 struct nfp_ipv6_addr_entry
*entry
;
612 struct in6_addr
*dst
;
614 nfp_flower_compile_ipv6_udp_tun((void *)ext
,
616 udp_match
= (struct nfp_flower_ipv6_udp_tun
*)ext
;
617 dst
= &udp_match
->ipv6
.dst
;
618 ext
+= sizeof(struct nfp_flower_ipv6_udp_tun
);
619 msk
+= sizeof(struct nfp_flower_ipv6_udp_tun
);
621 entry
= nfp_tunnel_add_ipv6_off(app
, dst
);
625 nfp_flow
->nfp_tun_ipv6
= entry
;
629 nfp_flower_compile_ipv4_udp_tun((void *)ext
,
631 dst
= ((struct nfp_flower_ipv4_udp_tun
*)ext
)->ipv4
.dst
;
632 ext
+= sizeof(struct nfp_flower_ipv4_udp_tun
);
633 msk
+= sizeof(struct nfp_flower_ipv4_udp_tun
);
635 /* Store the tunnel destination in the rule data.
636 * This must be present and be an exact match.
638 nfp_flow
->nfp_tun_ipv4_addr
= dst
;
639 nfp_tunnel_add_ipv4_off(app
, dst
);
642 if (key_ls
->key_layer_two
& NFP_FLOWER_LAYER2_GENEVE_OP
) {
643 err
= nfp_flower_compile_geneve_opt(ext
, msk
, rule
);
649 /* Check that the flow key does not exceed the maximum limit.
650 * All structures in the key is multiples of 4 bytes, so use u32.
652 ext_len
= (u32
*)ext
- (u32
*)nfp_flow
->unmasked_data
;
653 if (ext_len
> NFP_FLOWER_KEY_MAX_LW
) {
654 NL_SET_ERR_MSG_MOD(extack
,
655 "unsupported offload: flow key too long");