1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/bitfield.h>
5 #include <linux/mpls.h>
6 #include <net/pkt_cls.h>
7 #include <net/tc_act/tc_csum.h>
8 #include <net/tc_act/tc_gact.h>
9 #include <net/tc_act/tc_mirred.h>
10 #include <net/tc_act/tc_mpls.h>
11 #include <net/tc_act/tc_pedit.h>
12 #include <net/tc_act/tc_vlan.h>
13 #include <net/tc_act/tc_tunnel_key.h>
17 #include "../nfp_net_repr.h"
19 /* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable
20 * to change. Such changes will break our FW ABI.
22 #define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01)
23 #define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04)
24 #define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800)
25 #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS (IP_TUNNEL_INFO_TX | \
27 #define NFP_FL_SUPPORTED_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
29 NFP_FL_TUNNEL_GENEVE_OPT)
32 nfp_fl_push_mpls(struct nfp_fl_push_mpls
*push_mpls
,
33 const struct flow_action_entry
*act
,
34 struct netlink_ext_ack
*extack
)
36 size_t act_size
= sizeof(struct nfp_fl_push_mpls
);
39 push_mpls
->head
.jump_id
= NFP_FL_ACTION_OPCODE_PUSH_MPLS
;
40 push_mpls
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
42 /* BOS is optional in the TC action but required for offload. */
43 if (act
->mpls_push
.bos
!= ACT_MPLS_BOS_NOT_SET
) {
44 mpls_lse
|= act
->mpls_push
.bos
<< MPLS_LS_S_SHIFT
;
46 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: BOS field must explicitly be set for MPLS push");
50 /* Leave MPLS TC as a default value of 0 if not explicitly set. */
51 if (act
->mpls_push
.tc
!= ACT_MPLS_TC_NOT_SET
)
52 mpls_lse
|= act
->mpls_push
.tc
<< MPLS_LS_TC_SHIFT
;
54 /* Proto, label and TTL are enforced and verified for MPLS push. */
55 mpls_lse
|= act
->mpls_push
.label
<< MPLS_LS_LABEL_SHIFT
;
56 mpls_lse
|= act
->mpls_push
.ttl
<< MPLS_LS_TTL_SHIFT
;
57 push_mpls
->ethtype
= act
->mpls_push
.proto
;
58 push_mpls
->lse
= cpu_to_be32(mpls_lse
);
64 nfp_fl_pop_mpls(struct nfp_fl_pop_mpls
*pop_mpls
,
65 const struct flow_action_entry
*act
)
67 size_t act_size
= sizeof(struct nfp_fl_pop_mpls
);
69 pop_mpls
->head
.jump_id
= NFP_FL_ACTION_OPCODE_POP_MPLS
;
70 pop_mpls
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
71 pop_mpls
->ethtype
= act
->mpls_pop
.proto
;
75 nfp_fl_set_mpls(struct nfp_fl_set_mpls
*set_mpls
,
76 const struct flow_action_entry
*act
)
78 size_t act_size
= sizeof(struct nfp_fl_set_mpls
);
79 u32 mpls_lse
= 0, mpls_mask
= 0;
81 set_mpls
->head
.jump_id
= NFP_FL_ACTION_OPCODE_SET_MPLS
;
82 set_mpls
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
84 if (act
->mpls_mangle
.label
!= ACT_MPLS_LABEL_NOT_SET
) {
85 mpls_lse
|= act
->mpls_mangle
.label
<< MPLS_LS_LABEL_SHIFT
;
86 mpls_mask
|= MPLS_LS_LABEL_MASK
;
88 if (act
->mpls_mangle
.tc
!= ACT_MPLS_TC_NOT_SET
) {
89 mpls_lse
|= act
->mpls_mangle
.tc
<< MPLS_LS_TC_SHIFT
;
90 mpls_mask
|= MPLS_LS_TC_MASK
;
92 if (act
->mpls_mangle
.bos
!= ACT_MPLS_BOS_NOT_SET
) {
93 mpls_lse
|= act
->mpls_mangle
.bos
<< MPLS_LS_S_SHIFT
;
94 mpls_mask
|= MPLS_LS_S_MASK
;
96 if (act
->mpls_mangle
.ttl
) {
97 mpls_lse
|= act
->mpls_mangle
.ttl
<< MPLS_LS_TTL_SHIFT
;
98 mpls_mask
|= MPLS_LS_TTL_MASK
;
101 set_mpls
->lse
= cpu_to_be32(mpls_lse
);
102 set_mpls
->lse_mask
= cpu_to_be32(mpls_mask
);
105 static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan
*pop_vlan
)
107 size_t act_size
= sizeof(struct nfp_fl_pop_vlan
);
109 pop_vlan
->head
.jump_id
= NFP_FL_ACTION_OPCODE_POP_VLAN
;
110 pop_vlan
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
111 pop_vlan
->reserved
= 0;
115 nfp_fl_push_vlan(struct nfp_fl_push_vlan
*push_vlan
,
116 const struct flow_action_entry
*act
)
118 size_t act_size
= sizeof(struct nfp_fl_push_vlan
);
119 u16 tmp_push_vlan_tci
;
121 push_vlan
->head
.jump_id
= NFP_FL_ACTION_OPCODE_PUSH_VLAN
;
122 push_vlan
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
123 push_vlan
->reserved
= 0;
124 push_vlan
->vlan_tpid
= act
->vlan
.proto
;
127 FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO
, act
->vlan
.prio
) |
128 FIELD_PREP(NFP_FL_PUSH_VLAN_VID
, act
->vlan
.vid
);
129 push_vlan
->vlan_tci
= cpu_to_be16(tmp_push_vlan_tci
);
133 nfp_fl_pre_lag(struct nfp_app
*app
, const struct flow_action_entry
*act
,
134 struct nfp_fl_payload
*nfp_flow
, int act_len
,
135 struct netlink_ext_ack
*extack
)
137 size_t act_size
= sizeof(struct nfp_fl_pre_lag
);
138 struct nfp_fl_pre_lag
*pre_lag
;
139 struct net_device
*out_dev
;
143 if (!out_dev
|| !netif_is_lag_master(out_dev
))
146 if (act_len
+ act_size
> NFP_FL_MAX_A_SIZ
) {
147 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed action list size exceeded at LAG action");
151 /* Pre_lag action must be first on action list.
152 * If other actions already exist they need pushed forward.
155 memmove(nfp_flow
->action_data
+ act_size
,
156 nfp_flow
->action_data
, act_len
);
158 pre_lag
= (struct nfp_fl_pre_lag
*)nfp_flow
->action_data
;
159 err
= nfp_flower_lag_populate_pre_action(app
, out_dev
, pre_lag
, extack
);
163 pre_lag
->head
.jump_id
= NFP_FL_ACTION_OPCODE_PRE_LAG
;
164 pre_lag
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
166 nfp_flow
->meta
.shortcut
= cpu_to_be32(NFP_FL_SC_ACT_NULL
);
172 nfp_fl_output(struct nfp_app
*app
, struct nfp_fl_output
*output
,
173 const struct flow_action_entry
*act
,
174 struct nfp_fl_payload
*nfp_flow
,
175 bool last
, struct net_device
*in_dev
,
176 enum nfp_flower_tun_type tun_type
, int *tun_out_cnt
,
177 bool pkt_host
, struct netlink_ext_ack
*extack
)
179 size_t act_size
= sizeof(struct nfp_fl_output
);
180 struct nfp_flower_priv
*priv
= app
->priv
;
181 struct net_device
*out_dev
;
184 output
->head
.jump_id
= NFP_FL_ACTION_OPCODE_OUTPUT
;
185 output
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
189 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid egress interface for mirred action");
193 tmp_flags
= last
? NFP_FL_OUT_FLAGS_LAST
: 0;
196 /* Verify the egress netdev matches the tunnel type. */
197 if (!nfp_fl_netdev_is_tunnel_type(out_dev
, tun_type
)) {
198 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: egress interface does not match the required tunnel type");
203 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: cannot offload more than one tunnel mirred output per filter");
208 output
->flags
= cpu_to_be16(tmp_flags
|
209 NFP_FL_OUT_FLAGS_USE_TUN
);
210 output
->port
= cpu_to_be32(NFP_FL_PORT_TYPE_TUN
| tun_type
);
211 } else if (netif_is_lag_master(out_dev
) &&
212 priv
->flower_en_feats
& NFP_FL_ENABLE_LAG
) {
215 output
->flags
= cpu_to_be16(tmp_flags
);
216 gid
= nfp_flower_lag_get_output_id(app
, out_dev
);
218 NL_SET_ERR_MSG_MOD(extack
, "invalid entry: cannot find group id for LAG action");
221 output
->port
= cpu_to_be32(NFP_FL_LAG_OUT
| gid
);
222 } else if (nfp_flower_internal_port_can_offload(app
, out_dev
)) {
223 if (!(priv
->flower_ext_feats
& NFP_FL_FEATS_PRE_TUN_RULES
)) {
224 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: pre-tunnel rules not supported in loaded firmware");
228 if (nfp_flow
->pre_tun_rule
.dev
|| !pkt_host
) {
229 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: pre-tunnel rules require single egress dev and ptype HOST action");
233 nfp_flow
->pre_tun_rule
.dev
= out_dev
;
237 /* Set action output parameters. */
238 output
->flags
= cpu_to_be16(tmp_flags
);
240 if (nfp_netdev_is_nfp_repr(in_dev
)) {
241 /* Confirm ingress and egress are on same device. */
242 if (!netdev_port_same_parent_id(in_dev
, out_dev
)) {
243 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: ingress and egress interfaces are on different devices");
248 if (!nfp_netdev_is_nfp_repr(out_dev
)) {
249 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: egress interface is not an nfp port");
253 output
->port
= cpu_to_be32(nfp_repr_get_port_id(out_dev
));
255 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid port id for egress interface");
259 nfp_flow
->meta
.shortcut
= output
->port
;
265 nfp_flower_tun_is_gre(struct flow_cls_offload
*flow
, int start_idx
)
267 struct flow_action_entry
*act
= flow
->rule
->action
.entries
;
268 int num_act
= flow
->rule
->action
.num_entries
;
271 /* Preparse action list for next mirred or redirect action */
272 for (act_idx
= start_idx
+ 1; act_idx
< num_act
; act_idx
++)
273 if (act
[act_idx
].id
== FLOW_ACTION_REDIRECT
||
274 act
[act_idx
].id
== FLOW_ACTION_MIRRED
)
275 return netif_is_gretap(act
[act_idx
].dev
);
280 static enum nfp_flower_tun_type
281 nfp_fl_get_tun_from_act(struct nfp_app
*app
,
282 struct flow_cls_offload
*flow
,
283 const struct flow_action_entry
*act
, int act_idx
)
285 const struct ip_tunnel_info
*tun
= act
->tunnel
;
286 struct nfp_flower_priv
*priv
= app
->priv
;
288 /* Determine the tunnel type based on the egress netdev
289 * in the mirred action for tunnels without l4.
291 if (nfp_flower_tun_is_gre(flow
, act_idx
))
292 return NFP_FL_TUNNEL_GRE
;
294 switch (tun
->key
.tp_dst
) {
295 case htons(IANA_VXLAN_UDP_PORT
):
296 return NFP_FL_TUNNEL_VXLAN
;
297 case htons(GENEVE_UDP_PORT
):
298 if (priv
->flower_ext_feats
& NFP_FL_FEATS_GENEVE
)
299 return NFP_FL_TUNNEL_GENEVE
;
302 return NFP_FL_TUNNEL_NONE
;
306 static struct nfp_fl_pre_tunnel
*nfp_fl_pre_tunnel(char *act_data
, int act_len
)
308 size_t act_size
= sizeof(struct nfp_fl_pre_tunnel
);
309 struct nfp_fl_pre_tunnel
*pre_tun_act
;
311 /* Pre_tunnel action must be first on action list.
312 * If other actions already exist they need to be pushed forward.
315 memmove(act_data
+ act_size
, act_data
, act_len
);
317 pre_tun_act
= (struct nfp_fl_pre_tunnel
*)act_data
;
319 memset(pre_tun_act
, 0, act_size
);
321 pre_tun_act
->head
.jump_id
= NFP_FL_ACTION_OPCODE_PRE_TUNNEL
;
322 pre_tun_act
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
328 nfp_fl_push_geneve_options(struct nfp_fl_payload
*nfp_fl
, int *list_len
,
329 const struct flow_action_entry
*act
,
330 struct netlink_ext_ack
*extack
)
332 struct ip_tunnel_info
*ip_tun
= (struct ip_tunnel_info
*)act
->tunnel
;
333 int opt_len
, opt_cnt
, act_start
, tot_push_len
;
334 u8
*src
= ip_tunnel_info_opts(ip_tun
);
336 /* We need to populate the options in reverse order for HW.
337 * Therefore we go through the options, calculating the
338 * number of options and the total size, then we populate
339 * them in reverse order in the action list.
343 opt_len
= ip_tun
->options_len
;
344 while (opt_len
> 0) {
345 struct geneve_opt
*opt
= (struct geneve_opt
*)src
;
348 if (opt_cnt
> NFP_FL_MAX_GENEVE_OPT_CNT
) {
349 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed number of geneve options exceeded");
353 tot_push_len
+= sizeof(struct nfp_fl_push_geneve
) +
355 if (tot_push_len
> NFP_FL_MAX_GENEVE_OPT_ACT
) {
356 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
360 opt_len
-= sizeof(struct geneve_opt
) + opt
->length
* 4;
361 src
+= sizeof(struct geneve_opt
) + opt
->length
* 4;
364 if (*list_len
+ tot_push_len
> NFP_FL_MAX_A_SIZ
) {
365 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
369 act_start
= *list_len
;
370 *list_len
+= tot_push_len
;
371 src
= ip_tunnel_info_opts(ip_tun
);
373 struct geneve_opt
*opt
= (struct geneve_opt
*)src
;
374 struct nfp_fl_push_geneve
*push
;
375 size_t act_size
, len
;
378 act_size
= sizeof(struct nfp_fl_push_geneve
) + opt
->length
* 4;
379 tot_push_len
-= act_size
;
380 len
= act_start
+ tot_push_len
;
382 push
= (struct nfp_fl_push_geneve
*)&nfp_fl
->action_data
[len
];
383 push
->head
.jump_id
= NFP_FL_ACTION_OPCODE_PUSH_GENEVE
;
384 push
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
386 push
->class = opt
->opt_class
;
387 push
->type
= opt
->type
;
388 push
->length
= opt
->length
;
389 memcpy(&push
->opt_data
, opt
->opt_data
, opt
->length
* 4);
391 src
+= sizeof(struct geneve_opt
) + opt
->length
* 4;
398 nfp_fl_set_tun(struct nfp_app
*app
, struct nfp_fl_set_tun
*set_tun
,
399 const struct flow_action_entry
*act
,
400 struct nfp_fl_pre_tunnel
*pre_tun
,
401 enum nfp_flower_tun_type tun_type
,
402 struct net_device
*netdev
, struct netlink_ext_ack
*extack
)
404 const struct ip_tunnel_info
*ip_tun
= act
->tunnel
;
405 bool ipv6
= ip_tunnel_info_af(ip_tun
) == AF_INET6
;
406 size_t act_size
= sizeof(struct nfp_fl_set_tun
);
407 struct nfp_flower_priv
*priv
= app
->priv
;
408 u32 tmp_set_ip_tun_type_index
= 0;
409 /* Currently support one pre-tunnel so index is always 0. */
412 if (!IS_ENABLED(CONFIG_IPV6
) && ipv6
)
415 if (ipv6
&& !(priv
->flower_ext_feats
& NFP_FL_FEATS_IPV6_TUN
))
418 BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM
!= TUNNEL_CSUM
||
419 NFP_FL_TUNNEL_KEY
!= TUNNEL_KEY
||
420 NFP_FL_TUNNEL_GENEVE_OPT
!= TUNNEL_GENEVE_OPT
);
421 if (ip_tun
->options_len
&&
422 (tun_type
!= NFP_FL_TUNNEL_GENEVE
||
423 !(priv
->flower_ext_feats
& NFP_FL_FEATS_GENEVE_OPT
))) {
424 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: loaded firmware does not support geneve options offload");
428 set_tun
->head
.jump_id
= NFP_FL_ACTION_OPCODE_SET_TUNNEL
;
429 set_tun
->head
.len_lw
= act_size
>> NFP_FL_LW_SIZ
;
431 /* Set tunnel type and pre-tunnel index. */
432 tmp_set_ip_tun_type_index
|=
433 FIELD_PREP(NFP_FL_TUNNEL_TYPE
, tun_type
) |
434 FIELD_PREP(NFP_FL_PRE_TUN_INDEX
, pretun_idx
);
436 set_tun
->tun_type_index
= cpu_to_be32(tmp_set_ip_tun_type_index
);
437 set_tun
->tun_id
= ip_tun
->key
.tun_id
;
439 if (ip_tun
->key
.ttl
) {
440 set_tun
->ttl
= ip_tun
->key
.ttl
;
443 struct net
*net
= dev_net(netdev
);
444 struct flowi6 flow
= {};
445 struct dst_entry
*dst
;
447 flow
.daddr
= ip_tun
->key
.u
.ipv6
.dst
;
448 flow
.flowi4_proto
= IPPROTO_UDP
;
449 dst
= ipv6_stub
->ipv6_dst_lookup_flow(net
, NULL
, &flow
, NULL
);
451 set_tun
->ttl
= ip6_dst_hoplimit(dst
);
454 set_tun
->ttl
= net
->ipv6
.devconf_all
->hop_limit
;
458 struct net
*net
= dev_net(netdev
);
459 struct flowi4 flow
= {};
463 /* Do a route lookup to determine ttl - if fails then use
464 * default. Note that CONFIG_INET is a requirement of
465 * CONFIG_NET_SWITCHDEV so must be defined here.
467 flow
.daddr
= ip_tun
->key
.u
.ipv4
.dst
;
468 flow
.flowi4_proto
= IPPROTO_UDP
;
469 rt
= ip_route_output_key(net
, &flow
);
470 err
= PTR_ERR_OR_ZERO(rt
);
472 set_tun
->ttl
= ip4_dst_hoplimit(&rt
->dst
);
475 set_tun
->ttl
= net
->ipv4
.sysctl_ip_default_ttl
;
479 set_tun
->tos
= ip_tun
->key
.tos
;
481 if (!(ip_tun
->key
.tun_flags
& NFP_FL_TUNNEL_KEY
) ||
482 ip_tun
->key
.tun_flags
& ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS
) {
483 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: loaded firmware does not support tunnel flag offload");
486 set_tun
->tun_flags
= ip_tun
->key
.tun_flags
;
488 if (tun_type
== NFP_FL_TUNNEL_GENEVE
) {
489 set_tun
->tun_proto
= htons(ETH_P_TEB
);
490 set_tun
->tun_len
= ip_tun
->options_len
/ 4;
493 /* Complete pre_tunnel action. */
495 pre_tun
->flags
|= cpu_to_be16(NFP_FL_PRE_TUN_IPV6
);
496 pre_tun
->ipv6_dst
= ip_tun
->key
.u
.ipv6
.dst
;
498 pre_tun
->ipv4_dst
= ip_tun
->key
.u
.ipv4
.dst
;
504 static void nfp_fl_set_helper32(u32 value
, u32 mask
, u8
*p_exact
, u8
*p_mask
)
506 u32 oldvalue
= get_unaligned((u32
*)p_exact
);
507 u32 oldmask
= get_unaligned((u32
*)p_mask
);
510 value
|= oldvalue
& ~mask
;
512 put_unaligned(oldmask
| mask
, (u32
*)p_mask
);
513 put_unaligned(value
, (u32
*)p_exact
);
517 nfp_fl_set_eth(const struct flow_action_entry
*act
, u32 off
,
518 struct nfp_fl_set_eth
*set_eth
, struct netlink_ext_ack
*extack
)
522 if (off
+ 4 > ETH_ALEN
* 2) {
523 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid pedit ethernet action");
527 mask
= ~act
->mangle
.mask
;
528 exact
= act
->mangle
.val
;
531 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid pedit ethernet action");
535 nfp_fl_set_helper32(exact
, mask
, &set_eth
->eth_addr_val
[off
],
536 &set_eth
->eth_addr_mask
[off
]);
538 set_eth
->reserved
= cpu_to_be16(0);
539 set_eth
->head
.jump_id
= NFP_FL_ACTION_OPCODE_SET_ETHERNET
;
540 set_eth
->head
.len_lw
= sizeof(*set_eth
) >> NFP_FL_LW_SIZ
;
545 struct ipv4_ttl_word
{
552 nfp_fl_set_ip4(const struct flow_action_entry
*act
, u32 off
,
553 struct nfp_fl_set_ip4_addrs
*set_ip_addr
,
554 struct nfp_fl_set_ip4_ttl_tos
*set_ip_ttl_tos
,
555 struct netlink_ext_ack
*extack
)
557 struct ipv4_ttl_word
*ttl_word_mask
;
558 struct ipv4_ttl_word
*ttl_word
;
559 struct iphdr
*tos_word_mask
;
560 struct iphdr
*tos_word
;
563 /* We are expecting tcf_pedit to return a big endian value */
564 mask
= (__force __be32
)~act
->mangle
.mask
;
565 exact
= (__force __be32
)act
->mangle
.val
;
568 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid pedit IPv4 action");
573 case offsetof(struct iphdr
, daddr
):
574 set_ip_addr
->ipv4_dst_mask
|= mask
;
575 set_ip_addr
->ipv4_dst
&= ~mask
;
576 set_ip_addr
->ipv4_dst
|= exact
& mask
;
577 set_ip_addr
->head
.jump_id
= NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS
;
578 set_ip_addr
->head
.len_lw
= sizeof(*set_ip_addr
) >>
581 case offsetof(struct iphdr
, saddr
):
582 set_ip_addr
->ipv4_src_mask
|= mask
;
583 set_ip_addr
->ipv4_src
&= ~mask
;
584 set_ip_addr
->ipv4_src
|= exact
& mask
;
585 set_ip_addr
->head
.jump_id
= NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS
;
586 set_ip_addr
->head
.len_lw
= sizeof(*set_ip_addr
) >>
589 case offsetof(struct iphdr
, ttl
):
590 ttl_word_mask
= (struct ipv4_ttl_word
*)&mask
;
591 ttl_word
= (struct ipv4_ttl_word
*)&exact
;
593 if (ttl_word_mask
->protocol
|| ttl_word_mask
->check
) {
594 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid pedit IPv4 ttl action");
598 set_ip_ttl_tos
->ipv4_ttl_mask
|= ttl_word_mask
->ttl
;
599 set_ip_ttl_tos
->ipv4_ttl
&= ~ttl_word_mask
->ttl
;
600 set_ip_ttl_tos
->ipv4_ttl
|= ttl_word
->ttl
& ttl_word_mask
->ttl
;
601 set_ip_ttl_tos
->head
.jump_id
=
602 NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS
;
603 set_ip_ttl_tos
->head
.len_lw
= sizeof(*set_ip_ttl_tos
) >>
606 case round_down(offsetof(struct iphdr
, tos
), 4):
607 tos_word_mask
= (struct iphdr
*)&mask
;
608 tos_word
= (struct iphdr
*)&exact
;
610 if (tos_word_mask
->version
|| tos_word_mask
->ihl
||
611 tos_word_mask
->tot_len
) {
612 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid pedit IPv4 tos action");
616 set_ip_ttl_tos
->ipv4_tos_mask
|= tos_word_mask
->tos
;
617 set_ip_ttl_tos
->ipv4_tos
&= ~tos_word_mask
->tos
;
618 set_ip_ttl_tos
->ipv4_tos
|= tos_word
->tos
& tos_word_mask
->tos
;
619 set_ip_ttl_tos
->head
.jump_id
=
620 NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS
;
621 set_ip_ttl_tos
->head
.len_lw
= sizeof(*set_ip_ttl_tos
) >>
625 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: pedit on unsupported section of IPv4 header");
633 nfp_fl_set_ip6_helper(int opcode_tag
, u8 word
, __be32 exact
, __be32 mask
,
634 struct nfp_fl_set_ipv6_addr
*ip6
)
636 ip6
->ipv6
[word
].mask
|= mask
;
637 ip6
->ipv6
[word
].exact
&= ~mask
;
638 ip6
->ipv6
[word
].exact
|= exact
& mask
;
640 ip6
->reserved
= cpu_to_be16(0);
641 ip6
->head
.jump_id
= opcode_tag
;
642 ip6
->head
.len_lw
= sizeof(*ip6
) >> NFP_FL_LW_SIZ
;
645 struct ipv6_hop_limit_word
{
652 nfp_fl_set_ip6_hop_limit_flow_label(u32 off
, __be32 exact
, __be32 mask
,
653 struct nfp_fl_set_ipv6_tc_hl_fl
*ip_hl_fl
,
654 struct netlink_ext_ack
*extack
)
656 struct ipv6_hop_limit_word
*fl_hl_mask
;
657 struct ipv6_hop_limit_word
*fl_hl
;
660 case offsetof(struct ipv6hdr
, payload_len
):
661 fl_hl_mask
= (struct ipv6_hop_limit_word
*)&mask
;
662 fl_hl
= (struct ipv6_hop_limit_word
*)&exact
;
664 if (fl_hl_mask
->nexthdr
|| fl_hl_mask
->payload_len
) {
665 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid pedit IPv6 hop limit action");
669 ip_hl_fl
->ipv6_hop_limit_mask
|= fl_hl_mask
->hop_limit
;
670 ip_hl_fl
->ipv6_hop_limit
&= ~fl_hl_mask
->hop_limit
;
671 ip_hl_fl
->ipv6_hop_limit
|= fl_hl
->hop_limit
&
672 fl_hl_mask
->hop_limit
;
674 case round_down(offsetof(struct ipv6hdr
, flow_lbl
), 4):
675 if (mask
& ~IPV6_FLOW_LABEL_MASK
||
676 exact
& ~IPV6_FLOW_LABEL_MASK
) {
677 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid pedit IPv6 flow label action");
681 ip_hl_fl
->ipv6_label_mask
|= mask
;
682 ip_hl_fl
->ipv6_label
&= ~mask
;
683 ip_hl_fl
->ipv6_label
|= exact
& mask
;
687 ip_hl_fl
->head
.jump_id
= NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL
;
688 ip_hl_fl
->head
.len_lw
= sizeof(*ip_hl_fl
) >> NFP_FL_LW_SIZ
;
694 nfp_fl_set_ip6(const struct flow_action_entry
*act
, u32 off
,
695 struct nfp_fl_set_ipv6_addr
*ip_dst
,
696 struct nfp_fl_set_ipv6_addr
*ip_src
,
697 struct nfp_fl_set_ipv6_tc_hl_fl
*ip_hl_fl
,
698 struct netlink_ext_ack
*extack
)
704 /* We are expecting tcf_pedit to return a big endian value */
705 mask
= (__force __be32
)~act
->mangle
.mask
;
706 exact
= (__force __be32
)act
->mangle
.val
;
709 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid pedit IPv6 action");
713 if (off
< offsetof(struct ipv6hdr
, saddr
)) {
714 err
= nfp_fl_set_ip6_hop_limit_flow_label(off
, exact
, mask
,
716 } else if (off
< offsetof(struct ipv6hdr
, daddr
)) {
717 word
= (off
- offsetof(struct ipv6hdr
, saddr
)) / sizeof(exact
);
718 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC
, word
,
719 exact
, mask
, ip_src
);
720 } else if (off
< offsetof(struct ipv6hdr
, daddr
) +
721 sizeof(struct in6_addr
)) {
722 word
= (off
- offsetof(struct ipv6hdr
, daddr
)) / sizeof(exact
);
723 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST
, word
,
724 exact
, mask
, ip_dst
);
726 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: pedit on unsupported section of IPv6 header");
734 nfp_fl_set_tport(const struct flow_action_entry
*act
, u32 off
,
735 struct nfp_fl_set_tport
*set_tport
, int opcode
,
736 struct netlink_ext_ack
*extack
)
741 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: pedit on unsupported section of L4 header");
745 mask
= ~act
->mangle
.mask
;
746 exact
= act
->mangle
.val
;
749 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: invalid pedit L4 action");
753 nfp_fl_set_helper32(exact
, mask
, set_tport
->tp_port_val
,
754 set_tport
->tp_port_mask
);
756 set_tport
->reserved
= cpu_to_be16(0);
757 set_tport
->head
.jump_id
= opcode
;
758 set_tport
->head
.len_lw
= sizeof(*set_tport
) >> NFP_FL_LW_SIZ
;
763 static u32
nfp_fl_csum_l4_to_flag(u8 ip_proto
)
767 /* Filter doesn't force proto match,
768 * both TCP and UDP will be updated if encountered
770 return TCA_CSUM_UPDATE_FLAG_TCP
| TCA_CSUM_UPDATE_FLAG_UDP
;
772 return TCA_CSUM_UPDATE_FLAG_TCP
;
774 return TCA_CSUM_UPDATE_FLAG_UDP
;
776 /* All other protocols will be ignored by FW */
781 struct nfp_flower_pedit_acts
{
782 struct nfp_fl_set_ipv6_addr set_ip6_dst
, set_ip6_src
;
783 struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl
;
784 struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos
;
785 struct nfp_fl_set_ip4_addrs set_ip_addr
;
786 struct nfp_fl_set_tport set_tport
;
787 struct nfp_fl_set_eth set_eth
;
791 nfp_fl_commit_mangle(struct flow_cls_offload
*flow
, char *nfp_action
,
792 int *a_len
, struct nfp_flower_pedit_acts
*set_act
,
795 struct flow_rule
*rule
= flow_cls_offload_flow_rule(flow
);
799 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_BASIC
)) {
800 struct flow_match_basic match
;
802 flow_rule_match_basic(rule
, &match
);
803 ip_proto
= match
.key
->ip_proto
;
806 if (set_act
->set_eth
.head
.len_lw
) {
807 act_size
= sizeof(set_act
->set_eth
);
808 memcpy(nfp_action
, &set_act
->set_eth
, act_size
);
812 if (set_act
->set_ip_ttl_tos
.head
.len_lw
) {
813 nfp_action
+= act_size
;
814 act_size
= sizeof(set_act
->set_ip_ttl_tos
);
815 memcpy(nfp_action
, &set_act
->set_ip_ttl_tos
, act_size
);
818 /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
819 *csum_updated
|= TCA_CSUM_UPDATE_FLAG_IPV4HDR
|
820 nfp_fl_csum_l4_to_flag(ip_proto
);
823 if (set_act
->set_ip_addr
.head
.len_lw
) {
824 nfp_action
+= act_size
;
825 act_size
= sizeof(set_act
->set_ip_addr
);
826 memcpy(nfp_action
, &set_act
->set_ip_addr
, act_size
);
829 /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
830 *csum_updated
|= TCA_CSUM_UPDATE_FLAG_IPV4HDR
|
831 nfp_fl_csum_l4_to_flag(ip_proto
);
834 if (set_act
->set_ip6_tc_hl_fl
.head
.len_lw
) {
835 nfp_action
+= act_size
;
836 act_size
= sizeof(set_act
->set_ip6_tc_hl_fl
);
837 memcpy(nfp_action
, &set_act
->set_ip6_tc_hl_fl
, act_size
);
840 /* Hardware will automatically fix TCP/UDP checksum. */
841 *csum_updated
|= nfp_fl_csum_l4_to_flag(ip_proto
);
844 if (set_act
->set_ip6_dst
.head
.len_lw
&&
845 set_act
->set_ip6_src
.head
.len_lw
) {
846 /* TC compiles set src and dst IPv6 address as a single action,
847 * the hardware requires this to be 2 separate actions.
849 nfp_action
+= act_size
;
850 act_size
= sizeof(set_act
->set_ip6_src
);
851 memcpy(nfp_action
, &set_act
->set_ip6_src
, act_size
);
854 act_size
= sizeof(set_act
->set_ip6_dst
);
855 memcpy(&nfp_action
[sizeof(set_act
->set_ip6_src
)],
856 &set_act
->set_ip6_dst
, act_size
);
859 /* Hardware will automatically fix TCP/UDP checksum. */
860 *csum_updated
|= nfp_fl_csum_l4_to_flag(ip_proto
);
861 } else if (set_act
->set_ip6_dst
.head
.len_lw
) {
862 nfp_action
+= act_size
;
863 act_size
= sizeof(set_act
->set_ip6_dst
);
864 memcpy(nfp_action
, &set_act
->set_ip6_dst
, act_size
);
867 /* Hardware will automatically fix TCP/UDP checksum. */
868 *csum_updated
|= nfp_fl_csum_l4_to_flag(ip_proto
);
869 } else if (set_act
->set_ip6_src
.head
.len_lw
) {
870 nfp_action
+= act_size
;
871 act_size
= sizeof(set_act
->set_ip6_src
);
872 memcpy(nfp_action
, &set_act
->set_ip6_src
, act_size
);
875 /* Hardware will automatically fix TCP/UDP checksum. */
876 *csum_updated
|= nfp_fl_csum_l4_to_flag(ip_proto
);
878 if (set_act
->set_tport
.head
.len_lw
) {
879 nfp_action
+= act_size
;
880 act_size
= sizeof(set_act
->set_tport
);
881 memcpy(nfp_action
, &set_act
->set_tport
, act_size
);
884 /* Hardware will automatically fix TCP/UDP checksum. */
885 *csum_updated
|= nfp_fl_csum_l4_to_flag(ip_proto
);
892 nfp_fl_pedit(const struct flow_action_entry
*act
,
893 struct flow_cls_offload
*flow
, char *nfp_action
, int *a_len
,
894 u32
*csum_updated
, struct nfp_flower_pedit_acts
*set_act
,
895 struct netlink_ext_ack
*extack
)
897 enum flow_action_mangle_base htype
;
900 htype
= act
->mangle
.htype
;
901 offset
= act
->mangle
.offset
;
904 case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH
:
905 return nfp_fl_set_eth(act
, offset
, &set_act
->set_eth
, extack
);
906 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4
:
907 return nfp_fl_set_ip4(act
, offset
, &set_act
->set_ip_addr
,
908 &set_act
->set_ip_ttl_tos
, extack
);
909 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6
:
910 return nfp_fl_set_ip6(act
, offset
, &set_act
->set_ip6_dst
,
911 &set_act
->set_ip6_src
,
912 &set_act
->set_ip6_tc_hl_fl
, extack
);
913 case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP
:
914 return nfp_fl_set_tport(act
, offset
, &set_act
->set_tport
,
915 NFP_FL_ACTION_OPCODE_SET_TCP
, extack
);
916 case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP
:
917 return nfp_fl_set_tport(act
, offset
, &set_act
->set_tport
,
918 NFP_FL_ACTION_OPCODE_SET_UDP
, extack
);
920 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: pedit on unsupported header");
926 nfp_flower_output_action(struct nfp_app
*app
,
927 const struct flow_action_entry
*act
,
928 struct nfp_fl_payload
*nfp_fl
, int *a_len
,
929 struct net_device
*netdev
, bool last
,
930 enum nfp_flower_tun_type
*tun_type
, int *tun_out_cnt
,
931 int *out_cnt
, u32
*csum_updated
, bool pkt_host
,
932 struct netlink_ext_ack
*extack
)
934 struct nfp_flower_priv
*priv
= app
->priv
;
935 struct nfp_fl_output
*output
;
936 int err
, prelag_size
;
938 /* If csum_updated has not been reset by now, it means HW will
939 * incorrectly update csums when they are not requested.
942 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: set actions without updating checksums are not supported");
946 if (*a_len
+ sizeof(struct nfp_fl_output
) > NFP_FL_MAX_A_SIZ
) {
947 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: mirred output increases action list size beyond the allowed maximum");
951 output
= (struct nfp_fl_output
*)&nfp_fl
->action_data
[*a_len
];
952 err
= nfp_fl_output(app
, output
, act
, nfp_fl
, last
, netdev
, *tun_type
,
953 tun_out_cnt
, pkt_host
, extack
);
957 *a_len
+= sizeof(struct nfp_fl_output
);
959 if (priv
->flower_en_feats
& NFP_FL_ENABLE_LAG
) {
960 /* nfp_fl_pre_lag returns -err or size of prelag action added.
961 * This will be 0 if it is not egressing to a lag dev.
963 prelag_size
= nfp_fl_pre_lag(app
, act
, nfp_fl
, *a_len
, extack
);
964 if (prelag_size
< 0) {
966 } else if (prelag_size
> 0 && (!last
|| *out_cnt
)) {
967 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: LAG action has to be last action in action list");
971 *a_len
+= prelag_size
;
979 nfp_flower_loop_action(struct nfp_app
*app
, const struct flow_action_entry
*act
,
980 struct flow_cls_offload
*flow
,
981 struct nfp_fl_payload
*nfp_fl
, int *a_len
,
982 struct net_device
*netdev
,
983 enum nfp_flower_tun_type
*tun_type
, int *tun_out_cnt
,
984 int *out_cnt
, u32
*csum_updated
,
985 struct nfp_flower_pedit_acts
*set_act
, bool *pkt_host
,
986 struct netlink_ext_ack
*extack
, int act_idx
)
988 struct nfp_fl_pre_tunnel
*pre_tun
;
989 struct nfp_fl_set_tun
*set_tun
;
990 struct nfp_fl_push_vlan
*psh_v
;
991 struct nfp_fl_push_mpls
*psh_m
;
992 struct nfp_fl_pop_vlan
*pop_v
;
993 struct nfp_fl_pop_mpls
*pop_m
;
994 struct nfp_fl_set_mpls
*set_m
;
998 case FLOW_ACTION_DROP
:
999 nfp_fl
->meta
.shortcut
= cpu_to_be32(NFP_FL_SC_ACT_DROP
);
1001 case FLOW_ACTION_REDIRECT_INGRESS
:
1002 case FLOW_ACTION_REDIRECT
:
1003 err
= nfp_flower_output_action(app
, act
, nfp_fl
, a_len
, netdev
,
1004 true, tun_type
, tun_out_cnt
,
1005 out_cnt
, csum_updated
, *pkt_host
,
1010 case FLOW_ACTION_MIRRED_INGRESS
:
1011 case FLOW_ACTION_MIRRED
:
1012 err
= nfp_flower_output_action(app
, act
, nfp_fl
, a_len
, netdev
,
1013 false, tun_type
, tun_out_cnt
,
1014 out_cnt
, csum_updated
, *pkt_host
,
1019 case FLOW_ACTION_VLAN_POP
:
1021 sizeof(struct nfp_fl_pop_vlan
) > NFP_FL_MAX_A_SIZ
) {
1022 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed action list size exceeded at pop vlan");
1026 pop_v
= (struct nfp_fl_pop_vlan
*)&nfp_fl
->action_data
[*a_len
];
1027 nfp_fl
->meta
.shortcut
= cpu_to_be32(NFP_FL_SC_ACT_POPV
);
1029 nfp_fl_pop_vlan(pop_v
);
1030 *a_len
+= sizeof(struct nfp_fl_pop_vlan
);
1032 case FLOW_ACTION_VLAN_PUSH
:
1034 sizeof(struct nfp_fl_push_vlan
) > NFP_FL_MAX_A_SIZ
) {
1035 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed action list size exceeded at push vlan");
1039 psh_v
= (struct nfp_fl_push_vlan
*)&nfp_fl
->action_data
[*a_len
];
1040 nfp_fl
->meta
.shortcut
= cpu_to_be32(NFP_FL_SC_ACT_NULL
);
1042 nfp_fl_push_vlan(psh_v
, act
);
1043 *a_len
+= sizeof(struct nfp_fl_push_vlan
);
1045 case FLOW_ACTION_TUNNEL_ENCAP
: {
1046 const struct ip_tunnel_info
*ip_tun
= act
->tunnel
;
1048 *tun_type
= nfp_fl_get_tun_from_act(app
, flow
, act
, act_idx
);
1049 if (*tun_type
== NFP_FL_TUNNEL_NONE
) {
1050 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: unsupported tunnel type in action list");
1054 if (ip_tun
->mode
& ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS
) {
1055 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: unsupported tunnel flags in action list");
1059 /* Pre-tunnel action is required for tunnel encap.
1060 * This checks for next hop entries on NFP.
1061 * If none, the packet falls back before applying other actions.
1063 if (*a_len
+ sizeof(struct nfp_fl_pre_tunnel
) +
1064 sizeof(struct nfp_fl_set_tun
) > NFP_FL_MAX_A_SIZ
) {
1065 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed action list size exceeded at tunnel encap");
1069 pre_tun
= nfp_fl_pre_tunnel(nfp_fl
->action_data
, *a_len
);
1070 nfp_fl
->meta
.shortcut
= cpu_to_be32(NFP_FL_SC_ACT_NULL
);
1071 *a_len
+= sizeof(struct nfp_fl_pre_tunnel
);
1073 err
= nfp_fl_push_geneve_options(nfp_fl
, a_len
, act
, extack
);
1077 set_tun
= (void *)&nfp_fl
->action_data
[*a_len
];
1078 err
= nfp_fl_set_tun(app
, set_tun
, act
, pre_tun
, *tun_type
,
1082 *a_len
+= sizeof(struct nfp_fl_set_tun
);
1085 case FLOW_ACTION_TUNNEL_DECAP
:
1086 /* Tunnel decap is handled by default so accept action. */
1088 case FLOW_ACTION_MANGLE
:
1089 if (nfp_fl_pedit(act
, flow
, &nfp_fl
->action_data
[*a_len
],
1090 a_len
, csum_updated
, set_act
, extack
))
1093 case FLOW_ACTION_CSUM
:
1094 /* csum action requests recalc of something we have not fixed */
1095 if (act
->csum_flags
& ~*csum_updated
) {
1096 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: unsupported csum update action in action list");
1099 /* If we will correctly fix the csum we can remove it from the
1100 * csum update list. Which will later be used to check support.
1102 *csum_updated
&= ~act
->csum_flags
;
1104 case FLOW_ACTION_MPLS_PUSH
:
1106 sizeof(struct nfp_fl_push_mpls
) > NFP_FL_MAX_A_SIZ
) {
1107 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed action list size exceeded at push MPLS");
1111 psh_m
= (struct nfp_fl_push_mpls
*)&nfp_fl
->action_data
[*a_len
];
1112 nfp_fl
->meta
.shortcut
= cpu_to_be32(NFP_FL_SC_ACT_NULL
);
1114 err
= nfp_fl_push_mpls(psh_m
, act
, extack
);
1117 *a_len
+= sizeof(struct nfp_fl_push_mpls
);
1119 case FLOW_ACTION_MPLS_POP
:
1121 sizeof(struct nfp_fl_pop_mpls
) > NFP_FL_MAX_A_SIZ
) {
1122 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed action list size exceeded at pop MPLS");
1126 pop_m
= (struct nfp_fl_pop_mpls
*)&nfp_fl
->action_data
[*a_len
];
1127 nfp_fl
->meta
.shortcut
= cpu_to_be32(NFP_FL_SC_ACT_NULL
);
1129 nfp_fl_pop_mpls(pop_m
, act
);
1130 *a_len
+= sizeof(struct nfp_fl_pop_mpls
);
1132 case FLOW_ACTION_MPLS_MANGLE
:
1134 sizeof(struct nfp_fl_set_mpls
) > NFP_FL_MAX_A_SIZ
) {
1135 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: maximum allowed action list size exceeded at set MPLS");
1139 set_m
= (struct nfp_fl_set_mpls
*)&nfp_fl
->action_data
[*a_len
];
1140 nfp_fl
->meta
.shortcut
= cpu_to_be32(NFP_FL_SC_ACT_NULL
);
1142 nfp_fl_set_mpls(set_m
, act
);
1143 *a_len
+= sizeof(struct nfp_fl_set_mpls
);
1145 case FLOW_ACTION_PTYPE
:
1146 /* TC ptype skbedit sets PACKET_HOST for ingress redirect. */
1147 if (act
->ptype
!= PACKET_HOST
)
1153 /* Currently we do not handle any other actions. */
1154 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: unsupported action in action list");
1161 static bool nfp_fl_check_mangle_start(struct flow_action
*flow_act
,
1162 int current_act_idx
)
1164 struct flow_action_entry current_act
;
1165 struct flow_action_entry prev_act
;
1167 current_act
= flow_act
->entries
[current_act_idx
];
1168 if (current_act
.id
!= FLOW_ACTION_MANGLE
)
1171 if (current_act_idx
== 0)
1174 prev_act
= flow_act
->entries
[current_act_idx
- 1];
1176 return prev_act
.id
!= FLOW_ACTION_MANGLE
;
1179 static bool nfp_fl_check_mangle_end(struct flow_action
*flow_act
,
1180 int current_act_idx
)
1182 struct flow_action_entry current_act
;
1183 struct flow_action_entry next_act
;
1185 current_act
= flow_act
->entries
[current_act_idx
];
1186 if (current_act
.id
!= FLOW_ACTION_MANGLE
)
1189 if (current_act_idx
== flow_act
->num_entries
)
1192 next_act
= flow_act
->entries
[current_act_idx
+ 1];
1194 return next_act
.id
!= FLOW_ACTION_MANGLE
;
1197 int nfp_flower_compile_action(struct nfp_app
*app
,
1198 struct flow_cls_offload
*flow
,
1199 struct net_device
*netdev
,
1200 struct nfp_fl_payload
*nfp_flow
,
1201 struct netlink_ext_ack
*extack
)
1203 int act_len
, act_cnt
, err
, tun_out_cnt
, out_cnt
, i
;
1204 struct nfp_flower_pedit_acts set_act
;
1205 enum nfp_flower_tun_type tun_type
;
1206 struct flow_action_entry
*act
;
1207 bool pkt_host
= false;
1208 u32 csum_updated
= 0;
1210 if (!flow_action_hw_stats_check(&flow
->rule
->action
, extack
,
1211 FLOW_ACTION_HW_STATS_DELAYED_BIT
))
1214 memset(nfp_flow
->action_data
, 0, NFP_FL_MAX_A_SIZ
);
1215 nfp_flow
->meta
.act_len
= 0;
1216 tun_type
= NFP_FL_TUNNEL_NONE
;
1222 flow_action_for_each(i
, act
, &flow
->rule
->action
) {
1223 if (nfp_fl_check_mangle_start(&flow
->rule
->action
, i
))
1224 memset(&set_act
, 0, sizeof(set_act
));
1225 err
= nfp_flower_loop_action(app
, act
, flow
, nfp_flow
, &act_len
,
1226 netdev
, &tun_type
, &tun_out_cnt
,
1227 &out_cnt
, &csum_updated
,
1228 &set_act
, &pkt_host
, extack
, i
);
1232 if (nfp_fl_check_mangle_end(&flow
->rule
->action
, i
))
1233 nfp_fl_commit_mangle(flow
,
1234 &nfp_flow
->action_data
[act_len
],
1235 &act_len
, &set_act
, &csum_updated
);
1238 /* We optimise when the action list is small, this can unfortunately
1239 * not happen once we have more than one action in the action list.
1242 nfp_flow
->meta
.shortcut
= cpu_to_be32(NFP_FL_SC_ACT_NULL
);
1244 nfp_flow
->meta
.act_len
= act_len
;