1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/bitfield.h>
5 #include <net/pkt_cls.h>
11 nfp_flower_compile_meta(struct nfp_flower_meta_tci
*ext
,
12 struct nfp_flower_meta_tci
*msk
, u8 key_type
)
14 /* Populate the metadata frame. */
15 ext
->nfp_flow_key_layer
= key_type
;
18 msk
->nfp_flow_key_layer
= key_type
;
23 nfp_flower_compile_tci(struct nfp_flower_meta_tci
*ext
,
24 struct nfp_flower_meta_tci
*msk
,
25 struct flow_rule
*rule
)
29 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_VLAN
)) {
30 struct flow_match_vlan match
;
32 flow_rule_match_vlan(rule
, &match
);
33 /* Populate the tci field. */
34 key_tci
= NFP_FLOWER_MASK_VLAN_PRESENT
;
35 key_tci
|= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO
,
36 match
.key
->vlan_priority
) |
37 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID
,
40 msk_tci
= NFP_FLOWER_MASK_VLAN_PRESENT
;
41 msk_tci
|= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO
,
42 match
.mask
->vlan_priority
) |
43 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID
,
46 ext
->tci
|= cpu_to_be16((key_tci
& msk_tci
));
47 msk
->tci
|= cpu_to_be16(msk_tci
);
52 nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci
*ext
,
53 struct nfp_flower_meta_tci
*msk
,
54 struct flow_rule
*rule
, u8 key_type
, bool qinq_sup
)
56 memset(ext
, 0, sizeof(struct nfp_flower_meta_tci
));
57 memset(msk
, 0, sizeof(struct nfp_flower_meta_tci
));
59 nfp_flower_compile_meta(ext
, msk
, key_type
);
62 nfp_flower_compile_tci(ext
, msk
, rule
);
66 nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta
*frame
, u32 key_ext
)
68 frame
->nfp_flow_key_layer2
= cpu_to_be32(key_ext
);
72 nfp_flower_compile_port(struct nfp_flower_in_port
*frame
, u32 cmsg_port
,
73 bool mask_version
, enum nfp_flower_tun_type tun_type
,
74 struct netlink_ext_ack
*extack
)
77 frame
->in_port
= cpu_to_be32(~0);
82 frame
->in_port
= cpu_to_be32(NFP_FL_PORT_TYPE_TUN
| tun_type
);
85 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid ingress interface for match offload");
88 frame
->in_port
= cpu_to_be32(cmsg_port
);
95 nfp_flower_compile_mac(struct nfp_flower_mac_mpls
*ext
,
96 struct nfp_flower_mac_mpls
*msk
,
97 struct flow_rule
*rule
)
99 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
100 struct flow_match_eth_addrs match
;
104 flow_rule_match_eth_addrs(rule
, &match
);
105 /* Populate mac frame. */
106 for (i
= 0; i
< ETH_ALEN
; i
++) {
107 tmp
= match
.key
->dst
[i
] & match
.mask
->dst
[i
];
108 ext
->mac_dst
[i
] |= tmp
& (~msk
->mac_dst
[i
]);
109 msk
->mac_dst
[i
] |= match
.mask
->dst
[i
];
111 tmp
= match
.key
->src
[i
] & match
.mask
->src
[i
];
112 ext
->mac_src
[i
] |= tmp
& (~msk
->mac_src
[i
]);
113 msk
->mac_src
[i
] |= match
.mask
->src
[i
];
119 nfp_flower_compile_mpls(struct nfp_flower_mac_mpls
*ext
,
120 struct nfp_flower_mac_mpls
*msk
,
121 struct flow_rule
*rule
,
122 struct netlink_ext_ack
*extack
)
124 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_MPLS
)) {
125 struct flow_match_mpls match
;
126 u32 key_mpls
, msk_mpls
;
128 flow_rule_match_mpls(rule
, &match
);
130 /* Only support matching the first LSE */
131 if (match
.mask
->used_lses
!= 1) {
132 NL_SET_ERR_MSG_MOD(extack
,
133 "unsupported offload: invalid LSE depth for MPLS match offload");
137 key_mpls
= FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB
,
138 match
.key
->ls
[0].mpls_label
) |
139 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC
,
140 match
.key
->ls
[0].mpls_tc
) |
141 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS
,
142 match
.key
->ls
[0].mpls_bos
) |
143 NFP_FLOWER_MASK_MPLS_Q
;
145 msk_mpls
= FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB
,
146 match
.mask
->ls
[0].mpls_label
) |
147 FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC
,
148 match
.mask
->ls
[0].mpls_tc
) |
149 FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS
,
150 match
.mask
->ls
[0].mpls_bos
) |
151 NFP_FLOWER_MASK_MPLS_Q
;
153 ext
->mpls_lse
|= cpu_to_be32((key_mpls
& msk_mpls
));
154 msk
->mpls_lse
|= cpu_to_be32(msk_mpls
);
155 } else if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_BASIC
)) {
156 /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
157 * bit, which indicates an mpls ether type but without any
160 struct flow_match_basic match
;
162 flow_rule_match_basic(rule
, &match
);
163 if (match
.key
->n_proto
== cpu_to_be16(ETH_P_MPLS_UC
) ||
164 match
.key
->n_proto
== cpu_to_be16(ETH_P_MPLS_MC
)) {
165 ext
->mpls_lse
|= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q
);
166 msk
->mpls_lse
|= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q
);
174 nfp_flower_compile_mac_mpls(struct nfp_flower_mac_mpls
*ext
,
175 struct nfp_flower_mac_mpls
*msk
,
176 struct flow_rule
*rule
,
177 struct netlink_ext_ack
*extack
)
179 memset(ext
, 0, sizeof(struct nfp_flower_mac_mpls
));
180 memset(msk
, 0, sizeof(struct nfp_flower_mac_mpls
));
182 nfp_flower_compile_mac(ext
, msk
, rule
);
184 return nfp_flower_compile_mpls(ext
, msk
, rule
, extack
);
188 nfp_flower_compile_tport(struct nfp_flower_tp_ports
*ext
,
189 struct nfp_flower_tp_ports
*msk
,
190 struct flow_rule
*rule
)
192 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_PORTS
)) {
193 struct flow_match_ports match
;
196 flow_rule_match_ports(rule
, &match
);
198 tmp
= match
.key
->src
& match
.mask
->src
;
199 ext
->port_src
|= tmp
& (~msk
->port_src
);
200 msk
->port_src
|= match
.mask
->src
;
202 tmp
= match
.key
->dst
& match
.mask
->dst
;
203 ext
->port_dst
|= tmp
& (~msk
->port_dst
);
204 msk
->port_dst
|= match
.mask
->dst
;
209 nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext
*ext
,
210 struct nfp_flower_ip_ext
*msk
, struct flow_rule
*rule
)
212 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_BASIC
)) {
213 struct flow_match_basic match
;
215 flow_rule_match_basic(rule
, &match
);
216 ext
->proto
|= match
.key
->ip_proto
& match
.mask
->ip_proto
;
217 msk
->proto
|= match
.mask
->ip_proto
;
220 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_IP
)) {
221 struct flow_match_ip match
;
224 flow_rule_match_ip(rule
, &match
);
226 tmp
= match
.key
->tos
& match
.mask
->tos
;
227 ext
->tos
|= tmp
& (~msk
->tos
);
228 msk
->tos
|= match
.mask
->tos
;
230 tmp
= match
.key
->ttl
& match
.mask
->ttl
;
231 ext
->ttl
|= tmp
& (~msk
->ttl
);
232 msk
->ttl
|= match
.mask
->ttl
;
235 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_TCP
)) {
236 u16 tcp_flags
, tcp_flags_mask
;
237 struct flow_match_tcp match
;
239 flow_rule_match_tcp(rule
, &match
);
240 tcp_flags
= be16_to_cpu(match
.key
->flags
);
241 tcp_flags_mask
= be16_to_cpu(match
.mask
->flags
);
243 if (tcp_flags
& TCPHDR_FIN
)
244 ext
->flags
|= NFP_FL_TCP_FLAG_FIN
;
245 if (tcp_flags_mask
& TCPHDR_FIN
)
246 msk
->flags
|= NFP_FL_TCP_FLAG_FIN
;
248 if (tcp_flags
& TCPHDR_SYN
)
249 ext
->flags
|= NFP_FL_TCP_FLAG_SYN
;
250 if (tcp_flags_mask
& TCPHDR_SYN
)
251 msk
->flags
|= NFP_FL_TCP_FLAG_SYN
;
253 if (tcp_flags
& TCPHDR_RST
)
254 ext
->flags
|= NFP_FL_TCP_FLAG_RST
;
255 if (tcp_flags_mask
& TCPHDR_RST
)
256 msk
->flags
|= NFP_FL_TCP_FLAG_RST
;
258 if (tcp_flags
& TCPHDR_PSH
)
259 ext
->flags
|= NFP_FL_TCP_FLAG_PSH
;
260 if (tcp_flags_mask
& TCPHDR_PSH
)
261 msk
->flags
|= NFP_FL_TCP_FLAG_PSH
;
263 if (tcp_flags
& TCPHDR_URG
)
264 ext
->flags
|= NFP_FL_TCP_FLAG_URG
;
265 if (tcp_flags_mask
& TCPHDR_URG
)
266 msk
->flags
|= NFP_FL_TCP_FLAG_URG
;
269 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_CONTROL
)) {
270 struct flow_match_control match
;
272 flow_rule_match_control(rule
, &match
);
273 if (match
.key
->flags
& FLOW_DIS_IS_FRAGMENT
)
274 ext
->flags
|= NFP_FL_IP_FRAGMENTED
;
275 if (match
.mask
->flags
& FLOW_DIS_IS_FRAGMENT
)
276 msk
->flags
|= NFP_FL_IP_FRAGMENTED
;
277 if (match
.key
->flags
& FLOW_DIS_FIRST_FRAG
)
278 ext
->flags
|= NFP_FL_IP_FRAG_FIRST
;
279 if (match
.mask
->flags
& FLOW_DIS_FIRST_FRAG
)
280 msk
->flags
|= NFP_FL_IP_FRAG_FIRST
;
285 nfp_flower_fill_vlan(struct flow_match_vlan
*match
,
286 struct nfp_flower_vlan
*ext
,
287 struct nfp_flower_vlan
*msk
, bool outer_vlan
)
289 struct flow_dissector_key_vlan
*mask
= match
->mask
;
290 struct flow_dissector_key_vlan
*key
= match
->key
;
291 u16 msk_tci
, key_tci
;
293 key_tci
= NFP_FLOWER_MASK_VLAN_PRESENT
;
294 key_tci
|= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO
,
295 key
->vlan_priority
) |
296 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID
,
298 msk_tci
= NFP_FLOWER_MASK_VLAN_PRESENT
;
299 msk_tci
|= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO
,
300 mask
->vlan_priority
) |
301 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID
,
305 ext
->outer_tci
|= cpu_to_be16((key_tci
& msk_tci
));
306 ext
->outer_tpid
|= key
->vlan_tpid
& mask
->vlan_tpid
;
307 msk
->outer_tci
|= cpu_to_be16(msk_tci
);
308 msk
->outer_tpid
|= mask
->vlan_tpid
;
310 ext
->inner_tci
|= cpu_to_be16((key_tci
& msk_tci
));
311 ext
->inner_tpid
|= key
->vlan_tpid
& mask
->vlan_tpid
;
312 msk
->inner_tci
|= cpu_to_be16(msk_tci
);
313 msk
->inner_tpid
|= mask
->vlan_tpid
;
318 nfp_flower_compile_vlan(struct nfp_flower_vlan
*ext
,
319 struct nfp_flower_vlan
*msk
,
320 struct flow_rule
*rule
)
322 struct flow_match_vlan match
;
324 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_VLAN
)) {
325 flow_rule_match_vlan(rule
, &match
);
326 nfp_flower_fill_vlan(&match
, ext
, msk
, true);
328 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_CVLAN
)) {
329 flow_rule_match_cvlan(rule
, &match
);
330 nfp_flower_fill_vlan(&match
, ext
, msk
, false);
335 nfp_flower_compile_ipv4(struct nfp_flower_ipv4
*ext
,
336 struct nfp_flower_ipv4
*msk
, struct flow_rule
*rule
)
338 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_IPV4_ADDRS
)) {
339 struct flow_match_ipv4_addrs match
;
342 flow_rule_match_ipv4_addrs(rule
, &match
);
344 tmp
= match
.key
->src
& match
.mask
->src
;
345 ext
->ipv4_src
|= tmp
& (~msk
->ipv4_src
);
346 msk
->ipv4_src
|= match
.mask
->src
;
348 tmp
= match
.key
->dst
& match
.mask
->dst
;
349 ext
->ipv4_dst
|= tmp
& (~msk
->ipv4_dst
);
350 msk
->ipv4_dst
|= match
.mask
->dst
;
353 nfp_flower_compile_ip_ext(&ext
->ip_ext
, &msk
->ip_ext
, rule
);
357 nfp_flower_compile_ipv6(struct nfp_flower_ipv6
*ext
,
358 struct nfp_flower_ipv6
*msk
, struct flow_rule
*rule
)
360 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_IPV6_ADDRS
)) {
361 struct flow_match_ipv6_addrs match
;
365 flow_rule_match_ipv6_addrs(rule
, &match
);
366 for (i
= 0; i
< sizeof(ext
->ipv6_src
); i
++) {
367 tmp
= match
.key
->src
.s6_addr
[i
] &
368 match
.mask
->src
.s6_addr
[i
];
369 ext
->ipv6_src
.s6_addr
[i
] |= tmp
&
370 (~msk
->ipv6_src
.s6_addr
[i
]);
371 msk
->ipv6_src
.s6_addr
[i
] |= match
.mask
->src
.s6_addr
[i
];
373 tmp
= match
.key
->dst
.s6_addr
[i
] &
374 match
.mask
->dst
.s6_addr
[i
];
375 ext
->ipv6_dst
.s6_addr
[i
] |= tmp
&
376 (~msk
->ipv6_dst
.s6_addr
[i
]);
377 msk
->ipv6_dst
.s6_addr
[i
] |= match
.mask
->dst
.s6_addr
[i
];
381 nfp_flower_compile_ip_ext(&ext
->ip_ext
, &msk
->ip_ext
, rule
);
385 nfp_flower_compile_geneve_opt(u8
*ext
, u8
*msk
, struct flow_rule
*rule
)
387 struct flow_match_enc_opts match
;
390 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ENC_OPTS
)) {
391 flow_rule_match_enc_opts(rule
, &match
);
393 for (i
= 0; i
< match
.mask
->len
; i
++) {
394 ext
[i
] |= match
.key
->data
[i
] & match
.mask
->data
[i
];
395 msk
[i
] |= match
.mask
->data
[i
];
401 nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4
*ext
,
402 struct nfp_flower_tun_ipv4
*msk
,
403 struct flow_rule
*rule
)
405 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
)) {
406 struct flow_match_ipv4_addrs match
;
408 flow_rule_match_enc_ipv4_addrs(rule
, &match
);
409 ext
->src
|= match
.key
->src
& match
.mask
->src
;
410 ext
->dst
|= match
.key
->dst
& match
.mask
->dst
;
411 msk
->src
|= match
.mask
->src
;
412 msk
->dst
|= match
.mask
->dst
;
417 nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6
*ext
,
418 struct nfp_flower_tun_ipv6
*msk
,
419 struct flow_rule
*rule
)
421 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
)) {
422 struct flow_match_ipv6_addrs match
;
425 flow_rule_match_enc_ipv6_addrs(rule
, &match
);
426 for (i
= 0; i
< sizeof(ext
->src
); i
++) {
427 ext
->src
.s6_addr
[i
] |= match
.key
->src
.s6_addr
[i
] &
428 match
.mask
->src
.s6_addr
[i
];
429 ext
->dst
.s6_addr
[i
] |= match
.key
->dst
.s6_addr
[i
] &
430 match
.mask
->dst
.s6_addr
[i
];
431 msk
->src
.s6_addr
[i
] |= match
.mask
->src
.s6_addr
[i
];
432 msk
->dst
.s6_addr
[i
] |= match
.mask
->dst
.s6_addr
[i
];
438 nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext
*ext
,
439 struct nfp_flower_tun_ip_ext
*msk
,
440 struct flow_rule
*rule
)
442 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ENC_IP
)) {
443 struct flow_match_ip match
;
445 flow_rule_match_enc_ip(rule
, &match
);
446 ext
->tos
|= match
.key
->tos
& match
.mask
->tos
;
447 ext
->ttl
|= match
.key
->ttl
& match
.mask
->ttl
;
448 msk
->tos
|= match
.mask
->tos
;
449 msk
->ttl
|= match
.mask
->ttl
;
454 nfp_flower_compile_tun_udp_key(__be32
*key
, __be32
*key_msk
,
455 struct flow_rule
*rule
)
457 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ENC_KEYID
)) {
458 struct flow_match_enc_keyid match
;
461 flow_rule_match_enc_keyid(rule
, &match
);
462 vni
= be32_to_cpu((match
.key
->keyid
& match
.mask
->keyid
)) <<
463 NFP_FL_TUN_VNI_OFFSET
;
464 *key
|= cpu_to_be32(vni
);
465 vni
= be32_to_cpu(match
.mask
->keyid
) << NFP_FL_TUN_VNI_OFFSET
;
466 *key_msk
|= cpu_to_be32(vni
);
471 nfp_flower_compile_tun_gre_key(__be32
*key
, __be32
*key_msk
, __be16
*flags
,
472 __be16
*flags_msk
, struct flow_rule
*rule
)
474 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ENC_KEYID
)) {
475 struct flow_match_enc_keyid match
;
477 flow_rule_match_enc_keyid(rule
, &match
);
478 *key
|= match
.key
->keyid
& match
.mask
->keyid
;
479 *key_msk
|= match
.mask
->keyid
;
481 *flags
= cpu_to_be16(NFP_FL_GRE_FLAG_KEY
);
482 *flags_msk
= cpu_to_be16(NFP_FL_GRE_FLAG_KEY
);
487 nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun
*ext
,
488 struct nfp_flower_ipv4_gre_tun
*msk
,
489 struct flow_rule
*rule
)
491 /* NVGRE is the only supported GRE tunnel type */
492 ext
->ethertype
= cpu_to_be16(ETH_P_TEB
);
493 msk
->ethertype
= cpu_to_be16(~0);
495 nfp_flower_compile_tun_ipv4_addrs(&ext
->ipv4
, &msk
->ipv4
, rule
);
496 nfp_flower_compile_tun_ip_ext(&ext
->ip_ext
, &msk
->ip_ext
, rule
);
497 nfp_flower_compile_tun_gre_key(&ext
->tun_key
, &msk
->tun_key
,
498 &ext
->tun_flags
, &msk
->tun_flags
, rule
);
502 nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun
*ext
,
503 struct nfp_flower_ipv4_udp_tun
*msk
,
504 struct flow_rule
*rule
)
506 nfp_flower_compile_tun_ipv4_addrs(&ext
->ipv4
, &msk
->ipv4
, rule
);
507 nfp_flower_compile_tun_ip_ext(&ext
->ip_ext
, &msk
->ip_ext
, rule
);
508 nfp_flower_compile_tun_udp_key(&ext
->tun_id
, &msk
->tun_id
, rule
);
512 nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun
*ext
,
513 struct nfp_flower_ipv6_udp_tun
*msk
,
514 struct flow_rule
*rule
)
516 nfp_flower_compile_tun_ipv6_addrs(&ext
->ipv6
, &msk
->ipv6
, rule
);
517 nfp_flower_compile_tun_ip_ext(&ext
->ip_ext
, &msk
->ip_ext
, rule
);
518 nfp_flower_compile_tun_udp_key(&ext
->tun_id
, &msk
->tun_id
, rule
);
522 nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun
*ext
,
523 struct nfp_flower_ipv6_gre_tun
*msk
,
524 struct flow_rule
*rule
)
526 /* NVGRE is the only supported GRE tunnel type */
527 ext
->ethertype
= cpu_to_be16(ETH_P_TEB
);
528 msk
->ethertype
= cpu_to_be16(~0);
530 nfp_flower_compile_tun_ipv6_addrs(&ext
->ipv6
, &msk
->ipv6
, rule
);
531 nfp_flower_compile_tun_ip_ext(&ext
->ip_ext
, &msk
->ip_ext
, rule
);
532 nfp_flower_compile_tun_gre_key(&ext
->tun_key
, &msk
->tun_key
,
533 &ext
->tun_flags
, &msk
->tun_flags
, rule
);
536 int nfp_flower_compile_flow_match(struct nfp_app
*app
,
537 struct flow_rule
*rule
,
538 struct nfp_fl_key_ls
*key_ls
,
539 struct net_device
*netdev
,
540 struct nfp_fl_payload
*nfp_flow
,
541 enum nfp_flower_tun_type tun_type
,
542 struct netlink_ext_ack
*extack
)
544 struct nfp_flower_priv
*priv
= app
->priv
;
552 port_id
= nfp_flower_get_port_id_from_netdev(app
, netdev
);
554 memset(nfp_flow
->unmasked_data
, 0, key_ls
->key_size
);
555 memset(nfp_flow
->mask_data
, 0, key_ls
->key_size
);
557 ext
= nfp_flow
->unmasked_data
;
558 msk
= nfp_flow
->mask_data
;
560 qinq_sup
= !!(priv
->flower_ext_feats
& NFP_FL_FEATS_VLAN_QINQ
);
562 nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci
*)ext
,
563 (struct nfp_flower_meta_tci
*)msk
,
564 rule
, key_ls
->key_layer
, qinq_sup
);
565 ext
+= sizeof(struct nfp_flower_meta_tci
);
566 msk
+= sizeof(struct nfp_flower_meta_tci
);
568 /* Populate Extended Metadata if Required. */
569 if (NFP_FLOWER_LAYER_EXT_META
& key_ls
->key_layer
) {
570 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta
*)ext
,
571 key_ls
->key_layer_two
);
572 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta
*)msk
,
573 key_ls
->key_layer_two
);
574 ext
+= sizeof(struct nfp_flower_ext_meta
);
575 msk
+= sizeof(struct nfp_flower_ext_meta
);
578 /* Populate Exact Port data. */
579 err
= nfp_flower_compile_port((struct nfp_flower_in_port
*)ext
,
580 port_id
, false, tun_type
, extack
);
584 /* Populate Mask Port Data. */
585 err
= nfp_flower_compile_port((struct nfp_flower_in_port
*)msk
,
586 port_id
, true, tun_type
, extack
);
590 ext
+= sizeof(struct nfp_flower_in_port
);
591 msk
+= sizeof(struct nfp_flower_in_port
);
593 if (NFP_FLOWER_LAYER_MAC
& key_ls
->key_layer
) {
594 err
= nfp_flower_compile_mac_mpls((struct nfp_flower_mac_mpls
*)ext
,
595 (struct nfp_flower_mac_mpls
*)msk
,
600 ext
+= sizeof(struct nfp_flower_mac_mpls
);
601 msk
+= sizeof(struct nfp_flower_mac_mpls
);
604 if (NFP_FLOWER_LAYER_TP
& key_ls
->key_layer
) {
605 nfp_flower_compile_tport((struct nfp_flower_tp_ports
*)ext
,
606 (struct nfp_flower_tp_ports
*)msk
,
608 ext
+= sizeof(struct nfp_flower_tp_ports
);
609 msk
+= sizeof(struct nfp_flower_tp_ports
);
612 if (NFP_FLOWER_LAYER_IPV4
& key_ls
->key_layer
) {
613 nfp_flower_compile_ipv4((struct nfp_flower_ipv4
*)ext
,
614 (struct nfp_flower_ipv4
*)msk
,
616 ext
+= sizeof(struct nfp_flower_ipv4
);
617 msk
+= sizeof(struct nfp_flower_ipv4
);
620 if (NFP_FLOWER_LAYER_IPV6
& key_ls
->key_layer
) {
621 nfp_flower_compile_ipv6((struct nfp_flower_ipv6
*)ext
,
622 (struct nfp_flower_ipv6
*)msk
,
624 ext
+= sizeof(struct nfp_flower_ipv6
);
625 msk
+= sizeof(struct nfp_flower_ipv6
);
628 if (NFP_FLOWER_LAYER2_QINQ
& key_ls
->key_layer_two
) {
629 nfp_flower_compile_vlan((struct nfp_flower_vlan
*)ext
,
630 (struct nfp_flower_vlan
*)msk
,
632 ext
+= sizeof(struct nfp_flower_vlan
);
633 msk
+= sizeof(struct nfp_flower_vlan
);
636 if (key_ls
->key_layer_two
& NFP_FLOWER_LAYER2_GRE
) {
637 if (key_ls
->key_layer_two
& NFP_FLOWER_LAYER2_TUN_IPV6
) {
638 struct nfp_flower_ipv6_gre_tun
*gre_match
;
639 struct nfp_ipv6_addr_entry
*entry
;
640 struct in6_addr
*dst
;
642 nfp_flower_compile_ipv6_gre_tun((void *)ext
,
644 gre_match
= (struct nfp_flower_ipv6_gre_tun
*)ext
;
645 dst
= &gre_match
->ipv6
.dst
;
646 ext
+= sizeof(struct nfp_flower_ipv6_gre_tun
);
647 msk
+= sizeof(struct nfp_flower_ipv6_gre_tun
);
649 entry
= nfp_tunnel_add_ipv6_off(app
, dst
);
653 nfp_flow
->nfp_tun_ipv6
= entry
;
657 nfp_flower_compile_ipv4_gre_tun((void *)ext
,
659 dst
= ((struct nfp_flower_ipv4_gre_tun
*)ext
)->ipv4
.dst
;
660 ext
+= sizeof(struct nfp_flower_ipv4_gre_tun
);
661 msk
+= sizeof(struct nfp_flower_ipv4_gre_tun
);
663 /* Store the tunnel destination in the rule data.
664 * This must be present and be an exact match.
666 nfp_flow
->nfp_tun_ipv4_addr
= dst
;
667 nfp_tunnel_add_ipv4_off(app
, dst
);
671 if (key_ls
->key_layer
& NFP_FLOWER_LAYER_VXLAN
||
672 key_ls
->key_layer_two
& NFP_FLOWER_LAYER2_GENEVE
) {
673 if (key_ls
->key_layer_two
& NFP_FLOWER_LAYER2_TUN_IPV6
) {
674 struct nfp_flower_ipv6_udp_tun
*udp_match
;
675 struct nfp_ipv6_addr_entry
*entry
;
676 struct in6_addr
*dst
;
678 nfp_flower_compile_ipv6_udp_tun((void *)ext
,
680 udp_match
= (struct nfp_flower_ipv6_udp_tun
*)ext
;
681 dst
= &udp_match
->ipv6
.dst
;
682 ext
+= sizeof(struct nfp_flower_ipv6_udp_tun
);
683 msk
+= sizeof(struct nfp_flower_ipv6_udp_tun
);
685 entry
= nfp_tunnel_add_ipv6_off(app
, dst
);
689 nfp_flow
->nfp_tun_ipv6
= entry
;
693 nfp_flower_compile_ipv4_udp_tun((void *)ext
,
695 dst
= ((struct nfp_flower_ipv4_udp_tun
*)ext
)->ipv4
.dst
;
696 ext
+= sizeof(struct nfp_flower_ipv4_udp_tun
);
697 msk
+= sizeof(struct nfp_flower_ipv4_udp_tun
);
699 /* Store the tunnel destination in the rule data.
700 * This must be present and be an exact match.
702 nfp_flow
->nfp_tun_ipv4_addr
= dst
;
703 nfp_tunnel_add_ipv4_off(app
, dst
);
706 if (key_ls
->key_layer_two
& NFP_FLOWER_LAYER2_GENEVE_OP
) {
707 nfp_flower_compile_geneve_opt(ext
, msk
, rule
);
711 /* Check that the flow key does not exceed the maximum limit.
712 * All structures in the key is multiples of 4 bytes, so use u32.
714 ext_len
= (u32
*)ext
- (u32
*)nfp_flow
->unmasked_data
;
715 if (ext_len
> NFP_FLOWER_KEY_MAX_LW
) {
716 NL_SET_ERR_MSG_MOD(extack
,
717 "unsupported offload: flow key too long");