1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/bitfield.h>
5 #include <linux/mpls.h>
6 #include <net/pkt_cls.h>
7 #include <net/tc_act/tc_csum.h>
8 #include <net/tc_act/tc_gact.h>
9 #include <net/tc_act/tc_mirred.h>
10 #include <net/tc_act/tc_mpls.h>
11 #include <net/tc_act/tc_pedit.h>
12 #include <net/tc_act/tc_vlan.h>
13 #include <net/tc_act/tc_tunnel_key.h>
17 #include "../nfp_net_repr.h"
19 /* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable
20 * to change. Such changes will break our FW ABI.
22 #define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01)
23 #define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04)
24 #define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800)
25 #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS (IP_TUNNEL_INFO_TX | \
27 #define NFP_FL_SUPPORTED_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
29 NFP_FL_TUNNEL_GENEVE_OPT)
32 nfp_fl_push_mpls(struct nfp_fl_push_mpls
*push_mpls
,
33 const struct flow_action_entry
*act
,
34 struct netlink_ext_ack
*extack
)
36 size_t act_size
= sizeof(struct nfp_fl_push_mpls
);
39 push_mpls
->head
.jump_id
= NFP_FL_ACTION_OPCODE_PUSH_MPLS
;
40 push_mpls
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
42 /* BOS is optional in the TC action but required for offload. */
43 if (act
->mpls_push
.bos
!= ACT_MPLS_BOS_NOT_SET
) {
44 mpls_lse
|= act
->mpls_push
.bos
<< MPLS_LS_S_SHIFT
;
46 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: BOS field must explicitly be set for MPLS push");
50 /* Leave MPLS TC as a default value of 0 if not explicitly set. */
51 if (act
->mpls_push
.tc
!= ACT_MPLS_TC_NOT_SET
)
52 mpls_lse
|= act
->mpls_push
.tc
<< MPLS_LS_TC_SHIFT
;
54 /* Proto, label and TTL are enforced and verified for MPLS push. */
55 mpls_lse
|= act
->mpls_push
.label
<< MPLS_LS_LABEL_SHIFT
;
56 mpls_lse
|= act
->mpls_push
.ttl
<< MPLS_LS_TTL_SHIFT
;
57 push_mpls
->ethtype
= act
->mpls_push
.proto
;
58 push_mpls
->lse
= cpu_to_be32(mpls_lse
);
64 nfp_fl_pop_mpls(struct nfp_fl_pop_mpls
*pop_mpls
,
65 const struct flow_action_entry
*act
)
67 size_t act_size
= sizeof(struct nfp_fl_pop_mpls
);
69 pop_mpls
->head
.jump_id
= NFP_FL_ACTION_OPCODE_POP_MPLS
;
70 pop_mpls
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
71 pop_mpls
->ethtype
= act
->mpls_pop
.proto
;
75 nfp_fl_set_mpls(struct nfp_fl_set_mpls
*set_mpls
,
76 const struct flow_action_entry
*act
)
78 size_t act_size
= sizeof(struct nfp_fl_set_mpls
);
79 u32 mpls_lse
= 0, mpls_mask
= 0;
81 set_mpls
->head
.jump_id
= NFP_FL_ACTION_OPCODE_SET_MPLS
;
82 set_mpls
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
84 if (act
->mpls_mangle
.label
!= ACT_MPLS_LABEL_NOT_SET
) {
85 mpls_lse
|= act
->mpls_mangle
.label
<< MPLS_LS_LABEL_SHIFT
;
86 mpls_mask
|= MPLS_LS_LABEL_MASK
;
88 if (act
->mpls_mangle
.tc
!= ACT_MPLS_TC_NOT_SET
) {
89 mpls_lse
|= act
->mpls_mangle
.tc
<< MPLS_LS_TC_SHIFT
;
90 mpls_mask
|= MPLS_LS_TC_MASK
;
92 if (act
->mpls_mangle
.bos
!= ACT_MPLS_BOS_NOT_SET
) {
93 mpls_lse
|= act
->mpls_mangle
.bos
<< MPLS_LS_S_SHIFT
;
94 mpls_mask
|= MPLS_LS_S_MASK
;
96 if (act
->mpls_mangle
.ttl
) {
97 mpls_lse
|= act
->mpls_mangle
.ttl
<< MPLS_LS_TTL_SHIFT
;
98 mpls_mask
|= MPLS_LS_TTL_MASK
;
101 set_mpls
->lse
= cpu_to_be32(mpls_lse
);
102 set_mpls
->lse_mask
= cpu_to_be32(mpls_mask
);
105 static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan
*pop_vlan
)
107 size_t act_size
= sizeof(struct nfp_fl_pop_vlan
);
109 pop_vlan
->head
.jump_id
= NFP_FL_ACTION_OPCODE_POP_VLAN
;
110 pop_vlan
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
111 pop_vlan
->reserved
= 0;
115 nfp_fl_push_vlan(struct nfp_fl_push_vlan
*push_vlan
,
116 const struct flow_action_entry
*act
)
118 size_t act_size
= sizeof(struct nfp_fl_push_vlan
);
119 u16 tmp_push_vlan_tci
;
121 push_vlan
->head
.jump_id
= NFP_FL_ACTION_OPCODE_PUSH_VLAN
;
122 push_vlan
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
123 push_vlan
->reserved
= 0;
124 push_vlan
->vlan_tpid
= act
->vlan
.proto
;
127 FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO
, act
->vlan
.prio
) |
128 FIELD_PREP(NFP_FL_PUSH_VLAN_VID
, act
->vlan
.vid
);
129 push_vlan
->vlan_tci
= cpu_to_be16(tmp_push_vlan_tci
);
133 nfp_fl_pre_lag(struct nfp_app
*app
, const struct flow_action_entry
*act
,
134 struct nfp_fl_payload
*nfp_flow
, int act_len
,
135 struct netlink_ext_ack
*extack
)
137 size_t act_size
= sizeof(struct nfp_fl_pre_lag
);
138 struct nfp_fl_pre_lag
*pre_lag
;
139 struct net_device
*out_dev
;
143 if (!out_dev
|| !netif_is_lag_master(out_dev
))
146 if (act_len
+ act_size
> NFP_FL_MAX_A_SIZ
) {
147 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed action list size exceeded at LAG action");
151 /* Pre_lag action must be first on action list.
152 * If other actions already exist they need to be pushed forward.
155 memmove(nfp_flow
->action_data
+ act_size
,
156 nfp_flow
->action_data
, act_len
);
158 pre_lag
= (struct nfp_fl_pre_lag
*)nfp_flow
->action_data
;
159 err
= nfp_flower_lag_populate_pre_action(app
, out_dev
, pre_lag
, extack
);
163 pre_lag
->head
.jump_id
= NFP_FL_ACTION_OPCODE_PRE_LAG
;
164 pre_lag
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
166 nfp_flow
->meta
.shortcut
= cpu_to_be32(NFP_FL_SC_ACT_NULL
);
172 nfp_fl_output(struct nfp_app
*app
, struct nfp_fl_output
*output
,
173 const struct flow_action_entry
*act
,
174 struct nfp_fl_payload
*nfp_flow
,
175 bool last
, struct net_device
*in_dev
,
176 enum nfp_flower_tun_type tun_type
, int *tun_out_cnt
,
177 bool pkt_host
, struct netlink_ext_ack
*extack
)
179 size_t act_size
= sizeof(struct nfp_fl_output
);
180 struct nfp_flower_priv
*priv
= app
->priv
;
181 struct net_device
*out_dev
;
184 output
->head
.jump_id
= NFP_FL_ACTION_OPCODE_OUTPUT
;
185 output
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
189 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid egress interface for mirred action");
193 tmp_flags
= last
? NFP_FL_OUT_FLAGS_LAST
: 0;
196 /* Verify the egress netdev matches the tunnel type. */
197 if (!nfp_fl_netdev_is_tunnel_type(out_dev
, tun_type
)) {
198 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: egress interface does not match the required tunnel type");
203 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: cannot offload more than one tunnel mirred output per filter");
208 output
->flags
= cpu_to_be16(tmp_flags
|
209 NFP_FL_OUT_FLAGS_USE_TUN
);
210 output
->port
= cpu_to_be32(NFP_FL_PORT_TYPE_TUN
| tun_type
);
211 } else if (netif_is_lag_master(out_dev
) &&
212 priv
->flower_en_feats
& NFP_FL_ENABLE_LAG
) {
215 output
->flags
= cpu_to_be16(tmp_flags
);
216 gid
= nfp_flower_lag_get_output_id(app
, out_dev
);
218 NL_SET_ERR_MSG_MOD(extack
, "invalid entry: cannot find group id for LAG action");
221 output
->port
= cpu_to_be32(NFP_FL_LAG_OUT
| gid
);
222 } else if (nfp_flower_internal_port_can_offload(app
, out_dev
)) {
223 if (!(priv
->flower_ext_feats
& NFP_FL_FEATS_PRE_TUN_RULES
) &&
224 !(priv
->flower_ext_feats
& NFP_FL_FEATS_DECAP_V2
)) {
225 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: pre-tunnel rules not supported in loaded firmware");
229 if (nfp_flow
->pre_tun_rule
.dev
|| !pkt_host
) {
230 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: pre-tunnel rules require single egress dev and ptype HOST action");
234 nfp_flow
->pre_tun_rule
.dev
= out_dev
;
238 /* Set action output parameters. */
239 output
->flags
= cpu_to_be16(tmp_flags
);
241 if (nfp_netdev_is_nfp_repr(in_dev
)) {
242 /* Confirm ingress and egress are on same device. */
243 if (!netdev_port_same_parent_id(in_dev
, out_dev
)) {
244 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: ingress and egress interfaces are on different devices");
249 if (!nfp_netdev_is_nfp_repr(out_dev
)) {
250 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: egress interface is not an nfp port");
254 output
->port
= cpu_to_be32(nfp_repr_get_port_id(out_dev
));
256 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid port id for egress interface");
260 nfp_flow
->meta
.shortcut
= output
->port
;
266 nfp_flower_tun_is_gre(struct flow_rule
*rule
, int start_idx
)
268 struct flow_action_entry
*act
= rule
->action
.entries
;
269 int num_act
= rule
->action
.num_entries
;
272 /* Preparse action list for next mirred or redirect action */
273 for (act_idx
= start_idx
+ 1; act_idx
< num_act
; act_idx
++)
274 if (act
[act_idx
].id
== FLOW_ACTION_REDIRECT
||
275 act
[act_idx
].id
== FLOW_ACTION_MIRRED
)
276 return netif_is_gretap(act
[act_idx
].dev
) ||
277 netif_is_ip6gretap(act
[act_idx
].dev
);
282 static enum nfp_flower_tun_type
283 nfp_fl_get_tun_from_act(struct nfp_app
*app
,
284 struct flow_rule
*rule
,
285 const struct flow_action_entry
*act
, int act_idx
)
287 const struct ip_tunnel_info
*tun
= act
->tunnel
;
288 struct nfp_flower_priv
*priv
= app
->priv
;
290 /* Determine the tunnel type based on the egress netdev
291 * in the mirred action for tunnels without l4.
293 if (nfp_flower_tun_is_gre(rule
, act_idx
))
294 return NFP_FL_TUNNEL_GRE
;
296 switch (tun
->key
.tp_dst
) {
297 case htons(IANA_VXLAN_UDP_PORT
):
298 return NFP_FL_TUNNEL_VXLAN
;
299 case htons(GENEVE_UDP_PORT
):
300 if (priv
->flower_ext_feats
& NFP_FL_FEATS_GENEVE
)
301 return NFP_FL_TUNNEL_GENEVE
;
304 return NFP_FL_TUNNEL_NONE
;
308 static struct nfp_fl_pre_tunnel
*nfp_fl_pre_tunnel(char *act_data
, int act_len
)
310 size_t act_size
= sizeof(struct nfp_fl_pre_tunnel
);
311 struct nfp_fl_pre_tunnel
*pre_tun_act
;
313 /* Pre_tunnel action must be first on action list.
314 * If other actions already exist they need to be pushed forward.
317 memmove(act_data
+ act_size
, act_data
, act_len
);
319 pre_tun_act
= (struct nfp_fl_pre_tunnel
*)act_data
;
321 memset(pre_tun_act
, 0, act_size
);
323 pre_tun_act
->head
.jump_id
= NFP_FL_ACTION_OPCODE_PRE_TUNNEL
;
324 pre_tun_act
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
330 nfp_fl_push_geneve_options(struct nfp_fl_payload
*nfp_fl
, int *list_len
,
331 const struct flow_action_entry
*act
,
332 struct netlink_ext_ack
*extack
)
334 struct ip_tunnel_info
*ip_tun
= (struct ip_tunnel_info
*)act
->tunnel
;
335 int opt_len
, opt_cnt
, act_start
, tot_push_len
;
336 u8
*src
= ip_tunnel_info_opts(ip_tun
);
338 /* We need to populate the options in reverse order for HW.
339 * Therefore we go through the options, calculating the
340 * number of options and the total size, then we populate
341 * them in reverse order in the action list.
345 opt_len
= ip_tun
->options_len
;
346 while (opt_len
> 0) {
347 struct geneve_opt
*opt
= (struct geneve_opt
*)src
;
350 if (opt_cnt
> NFP_FL_MAX_GENEVE_OPT_CNT
) {
351 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed number of geneve options exceeded");
355 tot_push_len
+= sizeof(struct nfp_fl_push_geneve
) +
357 if (tot_push_len
> NFP_FL_MAX_GENEVE_OPT_ACT
) {
358 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
362 opt_len
-= sizeof(struct geneve_opt
) + opt
->length
* 4;
363 src
+= sizeof(struct geneve_opt
) + opt
->length
* 4;
366 if (*list_len
+ tot_push_len
> NFP_FL_MAX_A_SIZ
) {
367 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
371 act_start
= *list_len
;
372 *list_len
+= tot_push_len
;
373 src
= ip_tunnel_info_opts(ip_tun
);
375 struct geneve_opt
*opt
= (struct geneve_opt
*)src
;
376 struct nfp_fl_push_geneve
*push
;
377 size_t act_size
, len
;
380 act_size
= sizeof(struct nfp_fl_push_geneve
) + opt
->length
* 4;
381 tot_push_len
-= act_size
;
382 len
= act_start
+ tot_push_len
;
384 push
= (struct nfp_fl_push_geneve
*)&nfp_fl
->action_data
[len
];
385 push
->head
.jump_id
= NFP_FL_ACTION_OPCODE_PUSH_GENEVE
;
386 push
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
388 push
->class = opt
->opt_class
;
389 push
->type
= opt
->type
;
390 push
->length
= opt
->length
;
391 memcpy(&push
->opt_data
, opt
->opt_data
, opt
->length
* 4);
393 src
+= sizeof(struct geneve_opt
) + opt
->length
* 4;
399 #define NFP_FL_CHECK(flag) ({ \
400 IP_TUNNEL_DECLARE_FLAGS(__check) = { }; \
403 __set_bit(IP_TUNNEL_##flag##_BIT, __check); \
404 __res = ip_tunnel_flags_to_be16(__check); \
406 BUILD_BUG_ON(__builtin_constant_p(__res) && \
407 NFP_FL_TUNNEL_##flag != __res); \
411 nfp_fl_set_tun(struct nfp_app
*app
, struct nfp_fl_set_tun
*set_tun
,
412 const struct flow_action_entry
*act
,
413 struct nfp_fl_pre_tunnel
*pre_tun
,
414 enum nfp_flower_tun_type tun_type
,
415 struct net_device
*netdev
, struct netlink_ext_ack
*extack
)
417 const struct ip_tunnel_info
*ip_tun
= act
->tunnel
;
418 bool ipv6
= ip_tunnel_info_af(ip_tun
) == AF_INET6
;
419 size_t act_size
= sizeof(struct nfp_fl_set_tun
);
420 struct nfp_flower_priv
*priv
= app
->priv
;
421 u32 tmp_set_ip_tun_type_index
= 0;
422 /* Currently support one pre-tunnel so index is always 0. */
426 if (!IS_ENABLED(CONFIG_IPV6
) && ipv6
)
429 if (ipv6
&& !(priv
->flower_ext_feats
& NFP_FL_FEATS_IPV6_TUN
))
434 NFP_FL_CHECK(GENEVE_OPT
);
436 if (ip_tun
->options_len
&&
437 (tun_type
!= NFP_FL_TUNNEL_GENEVE
||
438 !(priv
->flower_ext_feats
& NFP_FL_FEATS_GENEVE_OPT
))) {
439 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: loaded firmware does not support geneve options offload");
443 tun_flags
= ip_tunnel_flags_to_be16(ip_tun
->key
.tun_flags
);
444 if (!ip_tunnel_flags_is_be16_compat(ip_tun
->key
.tun_flags
) ||
445 (tun_flags
& ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS
)) {
446 NL_SET_ERR_MSG_MOD(extack
,
447 "unsupported offload: loaded firmware does not support tunnel flag offload");
451 set_tun
->head
.jump_id
= NFP_FL_ACTION_OPCODE_SET_TUNNEL
;
452 set_tun
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
454 /* Set tunnel type and pre-tunnel index. */
455 tmp_set_ip_tun_type_index
|=
456 FIELD_PREP(NFP_FL_TUNNEL_TYPE
, tun_type
) |
457 FIELD_PREP(NFP_FL_PRE_TUN_INDEX
, pretun_idx
);
459 set_tun
->tun_type_index
= cpu_to_be32(tmp_set_ip_tun_type_index
);
460 if (tun_flags
& NFP_FL_TUNNEL_KEY
)
461 set_tun
->tun_id
= ip_tun
->key
.tun_id
;
463 if (ip_tun
->key
.ttl
) {
464 set_tun
->ttl
= ip_tun
->key
.ttl
;
467 struct net
*net
= dev_net(netdev
);
468 struct flowi6 flow
= {};
469 struct dst_entry
*dst
;
471 flow
.daddr
= ip_tun
->key
.u
.ipv6
.dst
;
472 flow
.flowi4_proto
= IPPROTO_UDP
;
473 dst
= ipv6_stub
->ipv6_dst_lookup_flow(net
, NULL
, &flow
, NULL
);
475 set_tun
->ttl
= ip6_dst_hoplimit(dst
);
478 set_tun
->ttl
= READ_ONCE(net
->ipv6
.devconf_all
->hop_limit
);
482 struct net
*net
= dev_net(netdev
);
483 struct flowi4 flow
= {};
487 /* Do a route lookup to determine ttl - if fails then use
488 * default. Note that CONFIG_INET is a requirement of
489 * CONFIG_NET_SWITCHDEV so must be defined here.
491 flow
.daddr
= ip_tun
->key
.u
.ipv4
.dst
;
492 flow
.flowi4_proto
= IPPROTO_UDP
;
493 rt
= ip_route_output_key(net
, &flow
);
494 err
= PTR_ERR_OR_ZERO(rt
);
496 set_tun
->ttl
= ip4_dst_hoplimit(&rt
->dst
);
499 set_tun
->ttl
= READ_ONCE(net
->ipv4
.sysctl_ip_default_ttl
);
503 set_tun
->tos
= ip_tun
->key
.tos
;
504 set_tun
->tun_flags
= tun_flags
;
506 if (tun_type
== NFP_FL_TUNNEL_GENEVE
) {
507 set_tun
->tun_proto
= htons(ETH_P_TEB
);
508 set_tun
->tun_len
= ip_tun
->options_len
/ 4;
511 /* Complete pre_tunnel action. */
513 pre_tun
->flags
|= cpu_to_be16(NFP_FL_PRE_TUN_IPV6
);
514 pre_tun
->ipv6_dst
= ip_tun
->key
.u
.ipv6
.dst
;
516 pre_tun
->ipv4_dst
= ip_tun
->key
.u
.ipv4
.dst
;
522 static void nfp_fl_set_helper32(u32 value
, u32 mask
, u8
*p_exact
, u8
*p_mask
)
524 u32 oldvalue
= get_unaligned((u32
*)p_exact
);
525 u32 oldmask
= get_unaligned((u32
*)p_mask
);
528 value
|= oldvalue
& ~mask
;
530 put_unaligned(oldmask
| mask
, (u32
*)p_mask
);
531 put_unaligned(value
, (u32
*)p_exact
);
535 nfp_fl_set_eth(const struct flow_action_entry
*act
, u32 off
,
536 struct nfp_fl_set_eth
*set_eth
, struct netlink_ext_ack
*extack
)
540 if (off
+ 4 > ETH_ALEN
* 2) {
541 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid pedit ethernet action");
545 mask
= ~act
->mangle
.mask
;
546 exact
= act
->mangle
.val
;
549 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid pedit ethernet action");
553 nfp_fl_set_helper32(exact
, mask
, &set_eth
->eth_addr_val
[off
],
554 &set_eth
->eth_addr_mask
[off
]);
556 set_eth
->reserved
= cpu_to_be16(0);
557 set_eth
->head
.jump_id
= NFP_FL_ACTION_OPCODE_SET_ETHERNET
;
558 set_eth
->head
.len_lw
= sizeof(*set_eth
) >> NFP_FL_LW_SIZ
;
563 struct ipv4_ttl_word
{
570 nfp_fl_set_ip4(const struct flow_action_entry
*act
, u32 off
,
571 struct nfp_fl_set_ip4_addrs
*set_ip_addr
,
572 struct nfp_fl_set_ip4_ttl_tos
*set_ip_ttl_tos
,
573 struct netlink_ext_ack
*extack
)
575 struct ipv4_ttl_word
*ttl_word_mask
;
576 struct ipv4_ttl_word
*ttl_word
;
577 struct iphdr
*tos_word_mask
;
578 struct iphdr
*tos_word
;
581 /* We are expecting tcf_pedit to return a big endian value */
582 mask
= (__force __be32
)~act
->mangle
.mask
;
583 exact
= (__force __be32
)act
->mangle
.val
;
586 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid pedit IPv4 action");
591 case offsetof(struct iphdr
, daddr
):
592 set_ip_addr
->ipv4_dst_mask
|= mask
;
593 set_ip_addr
->ipv4_dst
&= ~mask
;
594 set_ip_addr
->ipv4_dst
|= exact
& mask
;
595 set_ip_addr
->head
.jump_id
= NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS
;
596 set_ip_addr
->head
.len_lw
= sizeof(*set_ip_addr
) >>
599 case offsetof(struct iphdr
, saddr
):
600 set_ip_addr
->ipv4_src_mask
|= mask
;
601 set_ip_addr
->ipv4_src
&= ~mask
;
602 set_ip_addr
->ipv4_src
|= exact
& mask
;
603 set_ip_addr
->head
.jump_id
= NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS
;
604 set_ip_addr
->head
.len_lw
= sizeof(*set_ip_addr
) >>
607 case offsetof(struct iphdr
, ttl
):
608 ttl_word_mask
= (struct ipv4_ttl_word
*)&mask
;
609 ttl_word
= (struct ipv4_ttl_word
*)&exact
;
611 if (ttl_word_mask
->protocol
|| ttl_word_mask
->check
) {
612 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid pedit IPv4 ttl action");
616 set_ip_ttl_tos
->ipv4_ttl_mask
|= ttl_word_mask
->ttl
;
617 set_ip_ttl_tos
->ipv4_ttl
&= ~ttl_word_mask
->ttl
;
618 set_ip_ttl_tos
->ipv4_ttl
|= ttl_word
->ttl
& ttl_word_mask
->ttl
;
619 set_ip_ttl_tos
->head
.jump_id
=
620 NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS
;
621 set_ip_ttl_tos
->head
.len_lw
= sizeof(*set_ip_ttl_tos
) >>
624 case round_down(offsetof(struct iphdr
, tos
), 4):
625 tos_word_mask
= (struct iphdr
*)&mask
;
626 tos_word
= (struct iphdr
*)&exact
;
628 if (tos_word_mask
->version
|| tos_word_mask
->ihl
||
629 tos_word_mask
->tot_len
) {
630 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid pedit IPv4 tos action");
634 set_ip_ttl_tos
->ipv4_tos_mask
|= tos_word_mask
->tos
;
635 set_ip_ttl_tos
->ipv4_tos
&= ~tos_word_mask
->tos
;
636 set_ip_ttl_tos
->ipv4_tos
|= tos_word
->tos
& tos_word_mask
->tos
;
637 set_ip_ttl_tos
->head
.jump_id
=
638 NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS
;
639 set_ip_ttl_tos
->head
.len_lw
= sizeof(*set_ip_ttl_tos
) >>
643 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: pedit on unsupported section of IPv4 header");
651 nfp_fl_set_ip6_helper(int opcode_tag
, u8 word
, __be32 exact
, __be32 mask
,
652 struct nfp_fl_set_ipv6_addr
*ip6
)
654 ip6
->ipv6
[word
].mask
|= mask
;
655 ip6
->ipv6
[word
].exact
&= ~mask
;
656 ip6
->ipv6
[word
].exact
|= exact
& mask
;
658 ip6
->reserved
= cpu_to_be16(0);
659 ip6
->head
.jump_id
= opcode_tag
;
660 ip6
->head
.len_lw
= sizeof(*ip6
) >> NFP_FL_LW_SIZ
;
663 struct ipv6_hop_limit_word
{
670 nfp_fl_set_ip6_hop_limit_flow_label(u32 off
, __be32 exact
, __be32 mask
,
671 struct nfp_fl_set_ipv6_tc_hl_fl
*ip_hl_fl
,
672 struct netlink_ext_ack
*extack
)
674 struct ipv6_hop_limit_word
*fl_hl_mask
;
675 struct ipv6_hop_limit_word
*fl_hl
;
678 case offsetof(struct ipv6hdr
, payload_len
):
679 fl_hl_mask
= (struct ipv6_hop_limit_word
*)&mask
;
680 fl_hl
= (struct ipv6_hop_limit_word
*)&exact
;
682 if (fl_hl_mask
->nexthdr
|| fl_hl_mask
->payload_len
) {
683 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid pedit IPv6 hop limit action");
687 ip_hl_fl
->ipv6_hop_limit_mask
|= fl_hl_mask
->hop_limit
;
688 ip_hl_fl
->ipv6_hop_limit
&= ~fl_hl_mask
->hop_limit
;
689 ip_hl_fl
->ipv6_hop_limit
|= fl_hl
->hop_limit
&
690 fl_hl_mask
->hop_limit
;
692 case round_down(offsetof(struct ipv6hdr
, flow_lbl
), 4):
693 if (mask
& ~IPV6_FLOWINFO_MASK
||
694 exact
& ~IPV6_FLOWINFO_MASK
) {
695 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid pedit IPv6 flow info action");
699 ip_hl_fl
->ipv6_label_mask
|= mask
;
700 ip_hl_fl
->ipv6_label
&= ~mask
;
701 ip_hl_fl
->ipv6_label
|= exact
& mask
;
705 ip_hl_fl
->head
.jump_id
= NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL
;
706 ip_hl_fl
->head
.len_lw
= sizeof(*ip_hl_fl
) >> NFP_FL_LW_SIZ
;
712 nfp_fl_set_ip6(const struct flow_action_entry
*act
, u32 off
,
713 struct nfp_fl_set_ipv6_addr
*ip_dst
,
714 struct nfp_fl_set_ipv6_addr
*ip_src
,
715 struct nfp_fl_set_ipv6_tc_hl_fl
*ip_hl_fl
,
716 struct netlink_ext_ack
*extack
)
722 /* We are expecting tcf_pedit to return a big endian value */
723 mask
= (__force __be32
)~act
->mangle
.mask
;
724 exact
= (__force __be32
)act
->mangle
.val
;
727 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid pedit IPv6 action");
731 if (off
< offsetof(struct ipv6hdr
, saddr
)) {
732 err
= nfp_fl_set_ip6_hop_limit_flow_label(off
, exact
, mask
,
734 } else if (off
< offsetof(struct ipv6hdr
, daddr
)) {
735 word
= (off
- offsetof(struct ipv6hdr
, saddr
)) / sizeof(exact
);
736 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC
, word
,
737 exact
, mask
, ip_src
);
738 } else if (off
< offsetof(struct ipv6hdr
, daddr
) +
739 sizeof(struct in6_addr
)) {
740 word
= (off
- offsetof(struct ipv6hdr
, daddr
)) / sizeof(exact
);
741 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST
, word
,
742 exact
, mask
, ip_dst
);
744 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: pedit on unsupported section of IPv6 header");
752 nfp_fl_set_tport(const struct flow_action_entry
*act
, u32 off
,
753 struct nfp_fl_set_tport
*set_tport
, int opcode
,
754 struct netlink_ext_ack
*extack
)
759 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: pedit on unsupported section of L4 header");
763 mask
= ~act
->mangle
.mask
;
764 exact
= act
->mangle
.val
;
767 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid pedit L4 action");
771 nfp_fl_set_helper32(exact
, mask
, set_tport
->tp_port_val
,
772 set_tport
->tp_port_mask
);
774 set_tport
->reserved
= cpu_to_be16(0);
775 set_tport
->head
.jump_id
= opcode
;
776 set_tport
->head
.len_lw
= sizeof(*set_tport
) >> NFP_FL_LW_SIZ
;
781 static u32
nfp_fl_csum_l4_to_flag(u8 ip_proto
)
785 /* Filter doesn't force proto match,
786 * both TCP and UDP will be updated if encountered
788 return TCA_CSUM_UPDATE_FLAG_TCP
| TCA_CSUM_UPDATE_FLAG_UDP
;
790 return TCA_CSUM_UPDATE_FLAG_TCP
;
792 return TCA_CSUM_UPDATE_FLAG_UDP
;
794 /* All other protocols will be ignored by FW */
799 struct nfp_flower_pedit_acts
{
800 struct nfp_fl_set_ipv6_addr set_ip6_dst
, set_ip6_src
;
801 struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl
;
802 struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos
;
803 struct nfp_fl_set_ip4_addrs set_ip_addr
;
804 struct nfp_fl_set_tport set_tport
;
805 struct nfp_fl_set_eth set_eth
;
809 nfp_fl_commit_mangle(struct flow_rule
*rule
, char *nfp_action
,
810 int *a_len
, struct nfp_flower_pedit_acts
*set_act
,
816 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_BASIC
)) {
817 struct flow_match_basic match
;
819 flow_rule_match_basic(rule
, &match
);
820 ip_proto
= match
.key
->ip_proto
;
823 if (set_act
->set_eth
.head
.len_lw
) {
824 act_size
= sizeof(set_act
->set_eth
);
825 memcpy(nfp_action
, &set_act
->set_eth
, act_size
);
829 if (set_act
->set_ip_ttl_tos
.head
.len_lw
) {
830 nfp_action
+= act_size
;
831 act_size
= sizeof(set_act
->set_ip_ttl_tos
);
832 memcpy(nfp_action
, &set_act
->set_ip_ttl_tos
, act_size
);
835 /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
836 *csum_updated
|= TCA_CSUM_UPDATE_FLAG_IPV4HDR
|
837 nfp_fl_csum_l4_to_flag(ip_proto
);
840 if (set_act
->set_ip_addr
.head
.len_lw
) {
841 nfp_action
+= act_size
;
842 act_size
= sizeof(set_act
->set_ip_addr
);
843 memcpy(nfp_action
, &set_act
->set_ip_addr
, act_size
);
846 /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
847 *csum_updated
|= TCA_CSUM_UPDATE_FLAG_IPV4HDR
|
848 nfp_fl_csum_l4_to_flag(ip_proto
);
851 if (set_act
->set_ip6_tc_hl_fl
.head
.len_lw
) {
852 nfp_action
+= act_size
;
853 act_size
= sizeof(set_act
->set_ip6_tc_hl_fl
);
854 memcpy(nfp_action
, &set_act
->set_ip6_tc_hl_fl
, act_size
);
857 /* Hardware will automatically fix TCP/UDP checksum. */
858 *csum_updated
|= nfp_fl_csum_l4_to_flag(ip_proto
);
861 if (set_act
->set_ip6_dst
.head
.len_lw
&&
862 set_act
->set_ip6_src
.head
.len_lw
) {
863 /* TC compiles set src and dst IPv6 address as a single action,
864 * the hardware requires this to be 2 separate actions.
866 nfp_action
+= act_size
;
867 act_size
= sizeof(set_act
->set_ip6_src
);
868 memcpy(nfp_action
, &set_act
->set_ip6_src
, act_size
);
871 act_size
= sizeof(set_act
->set_ip6_dst
);
872 memcpy(&nfp_action
[sizeof(set_act
->set_ip6_src
)],
873 &set_act
->set_ip6_dst
, act_size
);
876 /* Hardware will automatically fix TCP/UDP checksum. */
877 *csum_updated
|= nfp_fl_csum_l4_to_flag(ip_proto
);
878 } else if (set_act
->set_ip6_dst
.head
.len_lw
) {
879 nfp_action
+= act_size
;
880 act_size
= sizeof(set_act
->set_ip6_dst
);
881 memcpy(nfp_action
, &set_act
->set_ip6_dst
, act_size
);
884 /* Hardware will automatically fix TCP/UDP checksum. */
885 *csum_updated
|= nfp_fl_csum_l4_to_flag(ip_proto
);
886 } else if (set_act
->set_ip6_src
.head
.len_lw
) {
887 nfp_action
+= act_size
;
888 act_size
= sizeof(set_act
->set_ip6_src
);
889 memcpy(nfp_action
, &set_act
->set_ip6_src
, act_size
);
892 /* Hardware will automatically fix TCP/UDP checksum. */
893 *csum_updated
|= nfp_fl_csum_l4_to_flag(ip_proto
);
895 if (set_act
->set_tport
.head
.len_lw
) {
896 nfp_action
+= act_size
;
897 act_size
= sizeof(set_act
->set_tport
);
898 memcpy(nfp_action
, &set_act
->set_tport
, act_size
);
901 /* Hardware will automatically fix TCP/UDP checksum. */
902 *csum_updated
|= nfp_fl_csum_l4_to_flag(ip_proto
);
909 nfp_fl_pedit(const struct flow_action_entry
*act
,
910 char *nfp_action
, int *a_len
,
911 u32
*csum_updated
, struct nfp_flower_pedit_acts
*set_act
,
912 struct netlink_ext_ack
*extack
)
914 enum flow_action_mangle_base htype
;
917 htype
= act
->mangle
.htype
;
918 offset
= act
->mangle
.offset
;
921 case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH
:
922 return nfp_fl_set_eth(act
, offset
, &set_act
->set_eth
, extack
);
923 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4
:
924 return nfp_fl_set_ip4(act
, offset
, &set_act
->set_ip_addr
,
925 &set_act
->set_ip_ttl_tos
, extack
);
926 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6
:
927 return nfp_fl_set_ip6(act
, offset
, &set_act
->set_ip6_dst
,
928 &set_act
->set_ip6_src
,
929 &set_act
->set_ip6_tc_hl_fl
, extack
);
930 case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP
:
931 return nfp_fl_set_tport(act
, offset
, &set_act
->set_tport
,
932 NFP_FL_ACTION_OPCODE_SET_TCP
, extack
);
933 case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP
:
934 return nfp_fl_set_tport(act
, offset
, &set_act
->set_tport
,
935 NFP_FL_ACTION_OPCODE_SET_UDP
, extack
);
937 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: pedit on unsupported header");
942 static struct nfp_fl_meter
*nfp_fl_meter(char *act_data
)
944 size_t act_size
= sizeof(struct nfp_fl_meter
);
945 struct nfp_fl_meter
*meter_act
;
947 meter_act
= (struct nfp_fl_meter
*)act_data
;
949 memset(meter_act
, 0, act_size
);
951 meter_act
->head
.jump_id
= NFP_FL_ACTION_OPCODE_METER
;
952 meter_act
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
958 nfp_flower_meter_action(struct nfp_app
*app
,
959 const struct flow_action_entry
*action
,
960 struct nfp_fl_payload
*nfp_fl
, int *a_len
,
961 struct net_device
*netdev
,
962 struct netlink_ext_ack
*extack
)
964 struct nfp_fl_meter
*fl_meter
;
967 if (*a_len
+ sizeof(struct nfp_fl_meter
) > NFP_FL_MAX_A_SIZ
) {
968 NL_SET_ERR_MSG_MOD(extack
,
969 "unsupported offload:meter action size beyond the allowed maximum");
973 meter_id
= action
->hw_index
;
974 if (!nfp_flower_search_meter_entry(app
, meter_id
)) {
975 NL_SET_ERR_MSG_MOD(extack
,
976 "can not offload flow table with unsupported police action.");
980 fl_meter
= nfp_fl_meter(&nfp_fl
->action_data
[*a_len
]);
981 *a_len
+= sizeof(struct nfp_fl_meter
);
982 fl_meter
->meter_id
= cpu_to_be32(meter_id
);
988 nfp_flower_output_action(struct nfp_app
*app
,
989 const struct flow_action_entry
*act
,
990 struct nfp_fl_payload
*nfp_fl
, int *a_len
,
991 struct net_device
*netdev
, bool last
,
992 enum nfp_flower_tun_type
*tun_type
, int *tun_out_cnt
,
993 int *out_cnt
, u32
*csum_updated
, bool pkt_host
,
994 struct netlink_ext_ack
*extack
)
996 struct nfp_flower_priv
*priv
= app
->priv
;
997 struct nfp_fl_output
*output
;
998 int err
, prelag_size
;
1000 /* If csum_updated has not been reset by now, it means HW will
1001 * incorrectly update csums when they are not requested.
1003 if (*csum_updated
) {
1004 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: set actions without updating checksums are not supported");
1008 if (*a_len
+ sizeof(struct nfp_fl_output
) > NFP_FL_MAX_A_SIZ
) {
1009 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: mirred output increases action list size beyond the allowed maximum");
1013 output
= (struct nfp_fl_output
*)&nfp_fl
->action_data
[*a_len
];
1014 err
= nfp_fl_output(app
, output
, act
, nfp_fl
, last
, netdev
, *tun_type
,
1015 tun_out_cnt
, pkt_host
, extack
);
1019 *a_len
+= sizeof(struct nfp_fl_output
);
1021 if (priv
->flower_en_feats
& NFP_FL_ENABLE_LAG
) {
1022 /* nfp_fl_pre_lag returns -err or size of prelag action added.
1023 * This will be 0 if it is not egressing to a lag dev.
1025 prelag_size
= nfp_fl_pre_lag(app
, act
, nfp_fl
, *a_len
, extack
);
1026 if (prelag_size
< 0) {
1028 } else if (prelag_size
> 0 && (!last
|| *out_cnt
)) {
1029 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: LAG action has to be last action in action list");
1033 *a_len
+= prelag_size
;
1041 nfp_flower_loop_action(struct nfp_app
*app
, const struct flow_action_entry
*act
,
1042 struct flow_rule
*rule
,
1043 struct nfp_fl_payload
*nfp_fl
, int *a_len
,
1044 struct net_device
*netdev
,
1045 enum nfp_flower_tun_type
*tun_type
, int *tun_out_cnt
,
1046 int *out_cnt
, u32
*csum_updated
,
1047 struct nfp_flower_pedit_acts
*set_act
, bool *pkt_host
,
1048 struct netlink_ext_ack
*extack
, int act_idx
)
1050 struct nfp_flower_priv
*fl_priv
= app
->priv
;
1051 struct nfp_fl_pre_tunnel
*pre_tun
;
1052 struct nfp_fl_set_tun
*set_tun
;
1053 struct nfp_fl_push_vlan
*psh_v
;
1054 struct nfp_fl_push_mpls
*psh_m
;
1055 struct nfp_fl_pop_vlan
*pop_v
;
1056 struct nfp_fl_pop_mpls
*pop_m
;
1057 struct nfp_fl_set_mpls
*set_m
;
1061 case FLOW_ACTION_DROP
:
1062 nfp_fl
->meta
.shortcut
= cpu_to_be32(NFP_FL_SC_ACT_DROP
);
1064 case FLOW_ACTION_REDIRECT_INGRESS
:
1065 case FLOW_ACTION_REDIRECT
:
1066 err
= nfp_flower_output_action(app
, act
, nfp_fl
, a_len
, netdev
,
1067 true, tun_type
, tun_out_cnt
,
1068 out_cnt
, csum_updated
, *pkt_host
,
1073 case FLOW_ACTION_MIRRED_INGRESS
:
1074 case FLOW_ACTION_MIRRED
:
1075 err
= nfp_flower_output_action(app
, act
, nfp_fl
, a_len
, netdev
,
1076 false, tun_type
, tun_out_cnt
,
1077 out_cnt
, csum_updated
, *pkt_host
,
1082 case FLOW_ACTION_VLAN_POP
:
1084 sizeof(struct nfp_fl_pop_vlan
) > NFP_FL_MAX_A_SIZ
) {
1085 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed action list size exceeded at pop vlan");
1089 pop_v
= (struct nfp_fl_pop_vlan
*)&nfp_fl
->action_data
[*a_len
];
1090 nfp_fl
->meta
.shortcut
= cpu_to_be32(NFP_FL_SC_ACT_POPV
);
1092 nfp_fl_pop_vlan(pop_v
);
1093 *a_len
+= sizeof(struct nfp_fl_pop_vlan
);
1095 case FLOW_ACTION_VLAN_PUSH
:
1097 sizeof(struct nfp_fl_push_vlan
) > NFP_FL_MAX_A_SIZ
) {
1098 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed action list size exceeded at push vlan");
1102 psh_v
= (struct nfp_fl_push_vlan
*)&nfp_fl
->action_data
[*a_len
];
1103 nfp_fl
->meta
.shortcut
= cpu_to_be32(NFP_FL_SC_ACT_NULL
);
1105 nfp_fl_push_vlan(psh_v
, act
);
1106 *a_len
+= sizeof(struct nfp_fl_push_vlan
);
1108 case FLOW_ACTION_TUNNEL_ENCAP
: {
1109 const struct ip_tunnel_info
*ip_tun
= act
->tunnel
;
1111 *tun_type
= nfp_fl_get_tun_from_act(app
, rule
, act
, act_idx
);
1112 if (*tun_type
== NFP_FL_TUNNEL_NONE
) {
1113 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: unsupported tunnel type in action list");
1117 if (ip_tun
->mode
& ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS
) {
1118 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: unsupported tunnel flags in action list");
1122 /* Pre-tunnel action is required for tunnel encap.
1123 * This checks for next hop entries on NFP.
1124 * If none, the packet falls back before applying other actions.
1126 if (*a_len
+ sizeof(struct nfp_fl_pre_tunnel
) +
1127 sizeof(struct nfp_fl_set_tun
) > NFP_FL_MAX_A_SIZ
) {
1128 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed action list size exceeded at tunnel encap");
1132 pre_tun
= nfp_fl_pre_tunnel(nfp_fl
->action_data
, *a_len
);
1133 nfp_fl
->meta
.shortcut
= cpu_to_be32(NFP_FL_SC_ACT_NULL
);
1134 *a_len
+= sizeof(struct nfp_fl_pre_tunnel
);
1136 err
= nfp_fl_push_geneve_options(nfp_fl
, a_len
, act
, extack
);
1140 set_tun
= (void *)&nfp_fl
->action_data
[*a_len
];
1141 err
= nfp_fl_set_tun(app
, set_tun
, act
, pre_tun
, *tun_type
,
1145 *a_len
+= sizeof(struct nfp_fl_set_tun
);
1148 case FLOW_ACTION_TUNNEL_DECAP
:
1149 /* Tunnel decap is handled by default so accept action. */
1151 case FLOW_ACTION_MANGLE
:
1152 if (nfp_fl_pedit(act
, &nfp_fl
->action_data
[*a_len
],
1153 a_len
, csum_updated
, set_act
, extack
))
1156 case FLOW_ACTION_CSUM
:
1157 /* csum action requests recalc of something we have not fixed */
1158 if (act
->csum_flags
& ~*csum_updated
) {
1159 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: unsupported csum update action in action list");
1162 /* If we will correctly fix the csum we can remove it from the
1163 * csum update list. Which will later be used to check support.
1165 *csum_updated
&= ~act
->csum_flags
;
1167 case FLOW_ACTION_MPLS_PUSH
:
1169 sizeof(struct nfp_fl_push_mpls
) > NFP_FL_MAX_A_SIZ
) {
1170 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed action list size exceeded at push MPLS");
1174 psh_m
= (struct nfp_fl_push_mpls
*)&nfp_fl
->action_data
[*a_len
];
1175 nfp_fl
->meta
.shortcut
= cpu_to_be32(NFP_FL_SC_ACT_NULL
);
1177 err
= nfp_fl_push_mpls(psh_m
, act
, extack
);
1180 *a_len
+= sizeof(struct nfp_fl_push_mpls
);
1182 case FLOW_ACTION_MPLS_POP
:
1184 sizeof(struct nfp_fl_pop_mpls
) > NFP_FL_MAX_A_SIZ
) {
1185 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed action list size exceeded at pop MPLS");
1189 pop_m
= (struct nfp_fl_pop_mpls
*)&nfp_fl
->action_data
[*a_len
];
1190 nfp_fl
->meta
.shortcut
= cpu_to_be32(NFP_FL_SC_ACT_NULL
);
1192 nfp_fl_pop_mpls(pop_m
, act
);
1193 *a_len
+= sizeof(struct nfp_fl_pop_mpls
);
1195 case FLOW_ACTION_MPLS_MANGLE
:
1197 sizeof(struct nfp_fl_set_mpls
) > NFP_FL_MAX_A_SIZ
) {
1198 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed action list size exceeded at set MPLS");
1202 set_m
= (struct nfp_fl_set_mpls
*)&nfp_fl
->action_data
[*a_len
];
1203 nfp_fl
->meta
.shortcut
= cpu_to_be32(NFP_FL_SC_ACT_NULL
);
1205 nfp_fl_set_mpls(set_m
, act
);
1206 *a_len
+= sizeof(struct nfp_fl_set_mpls
);
1208 case FLOW_ACTION_PTYPE
:
1209 /* TC ptype skbedit sets PACKET_HOST for ingress redirect. */
1210 if (act
->ptype
!= PACKET_HOST
)
1215 case FLOW_ACTION_POLICE
:
1216 if (!(fl_priv
->flower_ext_feats
& NFP_FL_FEATS_QOS_METER
)) {
1217 NL_SET_ERR_MSG_MOD(extack
,
1218 "unsupported offload: unsupported police action in action list");
1222 err
= nfp_flower_meter_action(app
, act
, nfp_fl
, a_len
, netdev
,
1228 /* Currently we do not handle any other actions. */
1229 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: unsupported action in action list");
1236 static bool nfp_fl_check_mangle_start(struct flow_action
*flow_act
,
1237 int current_act_idx
)
1239 struct flow_action_entry current_act
;
1240 struct flow_action_entry prev_act
;
1242 current_act
= flow_act
->entries
[current_act_idx
];
1243 if (current_act
.id
!= FLOW_ACTION_MANGLE
)
1246 if (current_act_idx
== 0)
1249 prev_act
= flow_act
->entries
[current_act_idx
- 1];
1251 return prev_act
.id
!= FLOW_ACTION_MANGLE
;
1254 static bool nfp_fl_check_mangle_end(struct flow_action
*flow_act
,
1255 int current_act_idx
)
1257 struct flow_action_entry current_act
;
1258 struct flow_action_entry next_act
;
1260 current_act
= flow_act
->entries
[current_act_idx
];
1261 if (current_act
.id
!= FLOW_ACTION_MANGLE
)
1264 if (current_act_idx
== flow_act
->num_entries
)
1267 next_act
= flow_act
->entries
[current_act_idx
+ 1];
1269 return next_act
.id
!= FLOW_ACTION_MANGLE
;
1272 int nfp_flower_compile_action(struct nfp_app
*app
,
1273 struct flow_rule
*rule
,
1274 struct net_device
*netdev
,
1275 struct nfp_fl_payload
*nfp_flow
,
1276 struct netlink_ext_ack
*extack
)
1278 int act_len
, act_cnt
, err
, tun_out_cnt
, out_cnt
, i
;
1279 struct nfp_flower_pedit_acts set_act
;
1280 enum nfp_flower_tun_type tun_type
;
1281 struct flow_action_entry
*act
;
1282 bool pkt_host
= false;
1283 u32 csum_updated
= 0;
1285 if (!flow_action_hw_stats_check(&rule
->action
, extack
,
1286 FLOW_ACTION_HW_STATS_DELAYED_BIT
))
1289 memset(nfp_flow
->action_data
, 0, NFP_FL_MAX_A_SIZ
);
1290 nfp_flow
->meta
.act_len
= 0;
1291 tun_type
= NFP_FL_TUNNEL_NONE
;
1297 flow_action_for_each(i
, act
, &rule
->action
) {
1298 if (nfp_fl_check_mangle_start(&rule
->action
, i
))
1299 memset(&set_act
, 0, sizeof(set_act
));
1300 err
= nfp_flower_loop_action(app
, act
, rule
, nfp_flow
, &act_len
,
1301 netdev
, &tun_type
, &tun_out_cnt
,
1302 &out_cnt
, &csum_updated
,
1303 &set_act
, &pkt_host
, extack
, i
);
1307 if (nfp_fl_check_mangle_end(&rule
->action
, i
))
1308 nfp_fl_commit_mangle(rule
,
1309 &nfp_flow
->action_data
[act_len
],
1310 &act_len
, &set_act
, &csum_updated
);
1313 /* We optimise when the action list is small, this can unfortunately
1314 * not happen once we have more than one action in the action list.
1317 nfp_flow
->meta
.shortcut
= cpu_to_be32(NFP_FL_SC_ACT_NULL
);
1319 nfp_flow
->meta
.act_len
= act_len
;