1 // SPDX-License-Identifier: GPL-2.0+
4 * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
7 #include <net/tc_act/tc_gate.h>
10 #include "sparx5_tc.h"
12 #include "vcap_api_client.h"
14 #include "sparx5_main.h"
15 #include "sparx5_vcap_impl.h"
17 #define SPX5_MAX_RULE_SIZE 13 /* allows X1, X2, X4, X6 and X12 rules */
19 /* Collect keysets and type ids for multiple rules per size */
20 struct sparx5_wildcard_rule
{
24 enum vcap_keyfield_set keyset
;
27 struct sparx5_multiple_rules
{
28 struct sparx5_wildcard_rule rule
[SPX5_MAX_RULE_SIZE
];
31 struct sparx5_tc_flower_template
{
32 struct list_head list
; /* for insertion in the list of templates */
33 int cid
; /* chain id */
34 enum vcap_keyfield_set orig
; /* keyset used before the template */
35 enum vcap_keyfield_set keyset
; /* new keyset used by template */
36 u16 l3_proto
; /* protocol specified in the template */
39 /* SparX-5 VCAP fragment types:
40 * 0 = no fragment, 1 = initial fragment,
41 * 2 = suspicious fragment, 3 = valid follow-up fragment
43 enum { /* key / mask */
44 FRAG_NOT
= 0x03, /* 0 / 3 */
45 FRAG_SOME
= 0x11, /* 1 / 1 */
46 FRAG_FIRST
= 0x13, /* 1 / 3 */
47 FRAG_LATER
= 0x33, /* 3 / 3 */
48 FRAG_INVAL
= 0xff, /* invalid */
51 /* Flower fragment flag to VCAP fragment type mapping */
52 static const u8 sparx5_vcap_frag_map
[4][4] = { /* is_frag */
53 { FRAG_INVAL
, FRAG_INVAL
, FRAG_INVAL
, FRAG_FIRST
}, /* 0/0 */
54 { FRAG_NOT
, FRAG_NOT
, FRAG_INVAL
, FRAG_INVAL
}, /* 0/1 */
55 { FRAG_INVAL
, FRAG_INVAL
, FRAG_INVAL
, FRAG_INVAL
}, /* 1/0 */
56 { FRAG_SOME
, FRAG_LATER
, FRAG_INVAL
, FRAG_FIRST
} /* 1/1 */
57 /* 0/0 0/1 1/0 1/1 <-- first_frag */
61 sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage
*st
)
67 err
= vcap_rule_add_key_u32(st
->vrule
,
69 SPX5_TPID_SEL_8100
, ~0);
72 err
= vcap_rule_add_key_u32(st
->vrule
,
74 SPX5_TPID_SEL_88A8
, ~0);
77 NL_SET_ERR_MSG_MOD(st
->fco
->common
.extack
,
78 "Invalid vlan proto");
86 sparx5_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage
*st
)
88 struct flow_match_basic mt
;
91 flow_rule_match_basic(st
->frule
, &mt
);
93 if (mt
.mask
->n_proto
) {
94 st
->l3_proto
= be16_to_cpu(mt
.key
->n_proto
);
95 if (!sparx5_vcap_is_known_etype(st
->admin
, st
->l3_proto
)) {
96 err
= vcap_rule_add_key_u32(st
->vrule
, VCAP_KF_ETYPE
,
100 } else if (st
->l3_proto
== ETH_P_IP
) {
101 err
= vcap_rule_add_key_bit(st
->vrule
, VCAP_KF_IP4_IS
,
105 } else if (st
->l3_proto
== ETH_P_IPV6
) {
106 err
= vcap_rule_add_key_bit(st
->vrule
, VCAP_KF_IP4_IS
,
110 if (st
->admin
->vtype
== VCAP_TYPE_IS0
) {
111 err
= vcap_rule_add_key_bit(st
->vrule
,
120 if (mt
.mask
->ip_proto
) {
121 st
->l4_proto
= mt
.key
->ip_proto
;
122 if (st
->l4_proto
== IPPROTO_TCP
) {
123 err
= vcap_rule_add_key_bit(st
->vrule
,
128 } else if (st
->l4_proto
== IPPROTO_UDP
) {
129 err
= vcap_rule_add_key_bit(st
->vrule
,
134 if (st
->admin
->vtype
== VCAP_TYPE_IS0
) {
135 err
= vcap_rule_add_key_bit(st
->vrule
,
142 err
= vcap_rule_add_key_u32(st
->vrule
,
150 st
->used_keys
|= BIT_ULL(FLOW_DISSECTOR_KEY_BASIC
);
155 NL_SET_ERR_MSG_MOD(st
->fco
->common
.extack
, "ip_proto parse error");
160 sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage
*st
)
162 struct netlink_ext_ack
*extack
= st
->fco
->common
.extack
;
163 struct flow_match_control mt
;
167 flow_rule_match_control(st
->frule
, &mt
);
169 if (mt
.mask
->flags
& (FLOW_DIS_IS_FRAGMENT
| FLOW_DIS_FIRST_FRAG
)) {
170 u8 is_frag_key
= !!(mt
.key
->flags
& FLOW_DIS_IS_FRAGMENT
);
171 u8 is_frag_mask
= !!(mt
.mask
->flags
& FLOW_DIS_IS_FRAGMENT
);
172 u8 is_frag_idx
= (is_frag_key
<< 1) | is_frag_mask
;
174 u8 first_frag_key
= !!(mt
.key
->flags
& FLOW_DIS_FIRST_FRAG
);
175 u8 first_frag_mask
= !!(mt
.mask
->flags
& FLOW_DIS_FIRST_FRAG
);
176 u8 first_frag_idx
= (first_frag_key
<< 1) | first_frag_mask
;
178 /* Lookup verdict based on the 2 + 2 input bits */
179 u8 vdt
= sparx5_vcap_frag_map
[is_frag_idx
][first_frag_idx
];
181 if (vdt
== FRAG_INVAL
) {
182 NL_SET_ERR_MSG_MOD(extack
,
183 "Match on invalid fragment flag combination");
187 /* Extract VCAP fragment key and mask from verdict */
188 value
= (vdt
>> 4) & 0x3;
191 err
= vcap_rule_add_key_u32(st
->vrule
,
192 VCAP_KF_L3_FRAGMENT_TYPE
,
195 NL_SET_ERR_MSG_MOD(extack
, "ip_frag parse error");
200 if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT
|
202 mt
.mask
->flags
, extack
))
205 st
->used_keys
|= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL
);
211 sparx5_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage
*st
)
213 if (st
->admin
->vtype
!= VCAP_TYPE_IS0
) {
214 NL_SET_ERR_MSG_MOD(st
->fco
->common
.extack
,
215 "cvlan not supported in this VCAP");
219 return vcap_tc_flower_handler_cvlan_usage(st
);
223 sparx5_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage
*st
)
225 enum vcap_key_field vid_key
= VCAP_KF_8021Q_VID_CLS
;
226 enum vcap_key_field pcp_key
= VCAP_KF_8021Q_PCP_CLS
;
229 if (st
->admin
->vtype
== VCAP_TYPE_IS0
) {
230 vid_key
= VCAP_KF_8021Q_VID0
;
231 pcp_key
= VCAP_KF_8021Q_PCP0
;
234 err
= vcap_tc_flower_handler_vlan_usage(st
, vid_key
, pcp_key
);
238 if (st
->admin
->vtype
== VCAP_TYPE_ES0
&& st
->tpid
)
239 err
= sparx5_tc_flower_es0_tpid(st
);
244 static int (*sparx5_tc_flower_usage_handlers
[])(struct vcap_tc_flower_parse_usage
*st
) = {
245 [FLOW_DISSECTOR_KEY_ETH_ADDRS
] = vcap_tc_flower_handler_ethaddr_usage
,
246 [FLOW_DISSECTOR_KEY_IPV4_ADDRS
] = vcap_tc_flower_handler_ipv4_usage
,
247 [FLOW_DISSECTOR_KEY_IPV6_ADDRS
] = vcap_tc_flower_handler_ipv6_usage
,
248 [FLOW_DISSECTOR_KEY_CONTROL
] = sparx5_tc_flower_handler_control_usage
,
249 [FLOW_DISSECTOR_KEY_PORTS
] = vcap_tc_flower_handler_portnum_usage
,
250 [FLOW_DISSECTOR_KEY_BASIC
] = sparx5_tc_flower_handler_basic_usage
,
251 [FLOW_DISSECTOR_KEY_CVLAN
] = sparx5_tc_flower_handler_cvlan_usage
,
252 [FLOW_DISSECTOR_KEY_VLAN
] = sparx5_tc_flower_handler_vlan_usage
,
253 [FLOW_DISSECTOR_KEY_TCP
] = vcap_tc_flower_handler_tcp_usage
,
254 [FLOW_DISSECTOR_KEY_ARP
] = vcap_tc_flower_handler_arp_usage
,
255 [FLOW_DISSECTOR_KEY_IP
] = vcap_tc_flower_handler_ip_usage
,
258 static int sparx5_tc_use_dissectors(struct vcap_tc_flower_parse_usage
*st
,
259 struct vcap_admin
*admin
,
260 struct vcap_rule
*vrule
)
264 for (idx
= 0; idx
< ARRAY_SIZE(sparx5_tc_flower_usage_handlers
); ++idx
) {
265 if (!flow_rule_match_key(st
->frule
, idx
))
267 if (!sparx5_tc_flower_usage_handlers
[idx
])
269 err
= sparx5_tc_flower_usage_handlers
[idx
](st
);
274 if (st
->frule
->match
.dissector
->used_keys
^ st
->used_keys
) {
275 NL_SET_ERR_MSG_MOD(st
->fco
->common
.extack
,
276 "Unsupported match item");
283 static int sparx5_tc_flower_action_check(struct vcap_control
*vctrl
,
284 struct net_device
*ndev
,
285 struct flow_cls_offload
*fco
,
288 struct flow_rule
*rule
= flow_cls_offload_flow_rule(fco
);
289 struct flow_action_entry
*actent
, *last_actent
= NULL
;
290 struct flow_action
*act
= &rule
->action
;
294 if (!flow_action_has_entries(act
)) {
295 NL_SET_ERR_MSG_MOD(fco
->common
.extack
, "No actions");
299 if (!flow_action_basic_hw_stats_check(act
, fco
->common
.extack
))
302 flow_action_for_each(idx
, actent
, act
) {
303 if (action_mask
& BIT(actent
->id
)) {
304 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
305 "More actions of the same type");
308 action_mask
|= BIT(actent
->id
);
309 last_actent
= actent
; /* Save last action for later check */
312 /* Check if last action is a goto
313 * The last chain/lookup does not need to have a goto action
315 if (last_actent
->id
== FLOW_ACTION_GOTO
) {
316 /* Check if the destination chain is in one of the VCAPs */
317 if (!vcap_is_next_lookup(vctrl
, fco
->common
.chain_index
,
318 last_actent
->chain_index
)) {
319 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
320 "Invalid goto chain");
323 } else if (!vcap_is_last_chain(vctrl
, fco
->common
.chain_index
,
325 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
326 "Last action must be 'goto'");
330 /* Catch unsupported combinations of actions */
331 if (action_mask
& BIT(FLOW_ACTION_TRAP
) &&
332 action_mask
& BIT(FLOW_ACTION_ACCEPT
)) {
333 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
334 "Cannot combine pass and trap action");
338 if (action_mask
& BIT(FLOW_ACTION_VLAN_PUSH
) &&
339 action_mask
& BIT(FLOW_ACTION_VLAN_POP
)) {
340 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
341 "Cannot combine vlan push and pop action");
345 if (action_mask
& BIT(FLOW_ACTION_VLAN_PUSH
) &&
346 action_mask
& BIT(FLOW_ACTION_VLAN_MANGLE
)) {
347 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
348 "Cannot combine vlan push and modify action");
352 if (action_mask
& BIT(FLOW_ACTION_VLAN_POP
) &&
353 action_mask
& BIT(FLOW_ACTION_VLAN_MANGLE
)) {
354 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
355 "Cannot combine vlan pop and modify action");
362 /* Add a rule counter action */
363 static int sparx5_tc_add_rule_counter(struct vcap_admin
*admin
,
364 struct vcap_rule
*vrule
)
368 switch (admin
->vtype
) {
372 err
= vcap_rule_mod_action_u32(vrule
, VCAP_AF_ESDX
,
376 vcap_rule_set_counter_id(vrule
, vrule
->id
);
380 err
= vcap_rule_mod_action_u32(vrule
, VCAP_AF_CNT_ID
,
384 vcap_rule_set_counter_id(vrule
, vrule
->id
);
387 pr_err("%s:%d: vcap type: %d not supported\n",
388 __func__
, __LINE__
, admin
->vtype
);
394 /* Collect all port keysets and apply the first of them, possibly wildcarded */
395 static int sparx5_tc_select_protocol_keyset(struct net_device
*ndev
,
396 struct vcap_rule
*vrule
,
397 struct vcap_admin
*admin
,
399 struct sparx5_multiple_rules
*multi
)
401 struct sparx5_port
*port
= netdev_priv(ndev
);
402 struct vcap_keyset_list portkeysetlist
= {};
403 enum vcap_keyfield_set portkeysets
[10] = {};
404 struct vcap_keyset_list matches
= {};
405 enum vcap_keyfield_set keysets
[10];
406 int idx
, jdx
, err
= 0, count
= 0;
407 struct sparx5_wildcard_rule
*mru
;
408 const struct vcap_set
*kinfo
;
409 struct vcap_control
*vctrl
;
411 vctrl
= port
->sparx5
->vcap_ctrl
;
413 /* Find the keysets that the rule can use */
414 matches
.keysets
= keysets
;
415 matches
.max
= ARRAY_SIZE(keysets
);
416 if (!vcap_rule_find_keysets(vrule
, &matches
))
419 /* Find the keysets that the port configuration supports */
420 portkeysetlist
.max
= ARRAY_SIZE(portkeysets
);
421 portkeysetlist
.keysets
= portkeysets
;
422 err
= sparx5_vcap_get_port_keyset(ndev
,
423 admin
, vrule
->vcap_chain_id
,
429 /* Find the intersection of the two sets of keyset */
430 for (idx
= 0; idx
< portkeysetlist
.cnt
; ++idx
) {
431 kinfo
= vcap_keyfieldset(vctrl
, admin
->vtype
,
432 portkeysetlist
.keysets
[idx
]);
436 /* Find a port keyset that matches the required keys
437 * If there are multiple keysets then compose a type id mask
439 for (jdx
= 0; jdx
< matches
.cnt
; ++jdx
) {
440 if (portkeysetlist
.keysets
[idx
] != matches
.keysets
[jdx
])
443 mru
= &multi
->rule
[kinfo
->sw_per_item
];
444 if (!mru
->selected
) {
445 mru
->selected
= true;
446 mru
->keyset
= portkeysetlist
.keysets
[idx
];
447 mru
->value
= kinfo
->type_id
;
449 mru
->value
&= kinfo
->type_id
;
450 mru
->mask
|= kinfo
->type_id
;
457 if (l3_proto
== ETH_P_ALL
&& count
< portkeysetlist
.cnt
)
460 for (idx
= 0; idx
< SPX5_MAX_RULE_SIZE
; ++idx
) {
461 mru
= &multi
->rule
[idx
];
465 /* Align the mask to the combined value */
466 mru
->mask
^= mru
->value
;
469 /* Set the chosen keyset on the rule and set a wildcarded type if there
470 * are more than one keyset
472 for (idx
= 0; idx
< SPX5_MAX_RULE_SIZE
; ++idx
) {
473 mru
= &multi
->rule
[idx
];
477 vcap_set_rule_set_keyset(vrule
, mru
->keyset
);
479 /* Some keysets do not have a type field */
480 vcap_rule_mod_key_u32(vrule
, VCAP_KF_TYPE
,
483 mru
->selected
= false; /* mark as done */
484 break; /* Stop here and add more rules later */
489 static int sparx5_tc_add_rule_copy(struct vcap_control
*vctrl
,
490 struct flow_cls_offload
*fco
,
491 struct vcap_rule
*erule
,
492 struct vcap_admin
*admin
,
493 struct sparx5_wildcard_rule
*rule
)
495 enum vcap_key_field keylist
[] = {
496 VCAP_KF_IF_IGR_PORT_MASK
,
497 VCAP_KF_IF_IGR_PORT_MASK_SEL
,
498 VCAP_KF_IF_IGR_PORT_MASK_RNG
,
499 VCAP_KF_LOOKUP_FIRST_IS
,
502 struct vcap_rule
*vrule
;
505 /* Add an extra rule with a special user and the new keyset */
506 erule
->user
= VCAP_USER_TC_EXTRA
;
507 vrule
= vcap_copy_rule(erule
);
509 return PTR_ERR(vrule
);
511 /* Link the new rule to the existing rule with the cookie */
512 vrule
->cookie
= erule
->cookie
;
513 vcap_filter_rule_keys(vrule
, keylist
, ARRAY_SIZE(keylist
), true);
514 err
= vcap_set_rule_set_keyset(vrule
, rule
->keyset
);
516 pr_err("%s:%d: could not set keyset %s in rule: %u\n",
518 vcap_keyset_name(vctrl
, rule
->keyset
),
523 /* Some keysets do not have a type field, so ignore return value */
524 vcap_rule_mod_key_u32(vrule
, VCAP_KF_TYPE
, rule
->value
, ~rule
->mask
);
526 err
= vcap_set_rule_set_actionset(vrule
, erule
->actionset
);
530 err
= sparx5_tc_add_rule_counter(admin
, vrule
);
534 err
= vcap_val_rule(vrule
, ETH_P_ALL
);
536 pr_err("%s:%d: could not validate rule: %u\n",
537 __func__
, __LINE__
, vrule
->id
);
538 vcap_set_tc_exterr(fco
, vrule
);
541 err
= vcap_add_rule(vrule
);
543 pr_err("%s:%d: could not add rule: %u\n",
544 __func__
, __LINE__
, vrule
->id
);
548 vcap_free_rule(vrule
);
552 static int sparx5_tc_add_remaining_rules(struct vcap_control
*vctrl
,
553 struct flow_cls_offload
*fco
,
554 struct vcap_rule
*erule
,
555 struct vcap_admin
*admin
,
556 struct sparx5_multiple_rules
*multi
)
560 for (idx
= 0; idx
< SPX5_MAX_RULE_SIZE
; ++idx
) {
561 if (!multi
->rule
[idx
].selected
)
564 err
= sparx5_tc_add_rule_copy(vctrl
, fco
, erule
, admin
,
572 /* Add the actionset that is the default for the VCAP type */
573 static int sparx5_tc_set_actionset(struct vcap_admin
*admin
,
574 struct vcap_rule
*vrule
)
576 enum vcap_actionfield_set aset
;
579 switch (admin
->vtype
) {
581 aset
= VCAP_AFS_CLASSIFICATION
;
584 aset
= VCAP_AFS_BASE_TYPE
;
590 aset
= VCAP_AFS_BASE_TYPE
;
593 pr_err("%s:%d: %s\n", __func__
, __LINE__
, "Invalid VCAP type");
596 /* Do not overwrite any current actionset */
597 if (vrule
->actionset
== VCAP_AFS_NO_VALUE
)
598 err
= vcap_set_rule_set_actionset(vrule
, aset
);
602 /* Add the VCAP key to match on for a rule target value */
603 static int sparx5_tc_add_rule_link_target(struct vcap_admin
*admin
,
604 struct vcap_rule
*vrule
,
607 int link_val
= target_cid
% VCAP_CID_LOOKUP_SIZE
;
613 switch (admin
->vtype
) {
615 /* Add NXT_IDX key for chaining rules between IS0 instances */
616 err
= vcap_rule_add_key_u32(vrule
, VCAP_KF_LOOKUP_GEN_IDX_SEL
,
621 return vcap_rule_add_key_u32(vrule
, VCAP_KF_LOOKUP_GEN_IDX
,
622 link_val
, /* target */
625 /* Add PAG key for chaining rules from IS0 */
626 return vcap_rule_add_key_u32(vrule
, VCAP_KF_LOOKUP_PAG
,
627 link_val
, /* target */
631 /* Add ISDX key for chaining rules from IS0 */
632 return vcap_rule_add_key_u32(vrule
, VCAP_KF_ISDX_CLS
, link_val
,
640 /* Add the VCAP action that adds a target value to a rule */
641 static int sparx5_tc_add_rule_link(struct vcap_control
*vctrl
,
642 struct vcap_admin
*admin
,
643 struct vcap_rule
*vrule
,
644 int from_cid
, int to_cid
)
646 struct vcap_admin
*to_admin
= vcap_find_admin(vctrl
, to_cid
);
650 pr_err("%s:%d: unsupported chain direction: %d\n",
651 __func__
, __LINE__
, to_cid
);
655 diff
= vcap_chain_offset(vctrl
, from_cid
, to_cid
);
659 if (admin
->vtype
== VCAP_TYPE_IS0
&&
660 to_admin
->vtype
== VCAP_TYPE_IS0
) {
661 /* Between IS0 instances the G_IDX value is used */
662 err
= vcap_rule_add_action_u32(vrule
, VCAP_AF_NXT_IDX
, diff
);
665 err
= vcap_rule_add_action_u32(vrule
, VCAP_AF_NXT_IDX_CTRL
,
669 } else if (admin
->vtype
== VCAP_TYPE_IS0
&&
670 to_admin
->vtype
== VCAP_TYPE_IS2
) {
671 /* Between IS0 and IS2 the PAG value is used */
672 err
= vcap_rule_add_action_u32(vrule
, VCAP_AF_PAG_VAL
, diff
);
675 err
= vcap_rule_add_action_u32(vrule
,
676 VCAP_AF_PAG_OVERRIDE_MASK
,
680 } else if (admin
->vtype
== VCAP_TYPE_IS0
&&
681 (to_admin
->vtype
== VCAP_TYPE_ES0
||
682 to_admin
->vtype
== VCAP_TYPE_ES2
)) {
683 /* Between IS0 and ES0/ES2 the ISDX value is used */
684 err
= vcap_rule_add_action_u32(vrule
, VCAP_AF_ISDX_VAL
,
688 err
= vcap_rule_add_action_bit(vrule
,
689 VCAP_AF_ISDX_ADD_REPLACE_SEL
,
694 pr_err("%s:%d: unsupported chain destination: %d\n",
695 __func__
, __LINE__
, to_cid
);
702 static int sparx5_tc_flower_parse_act_gate(struct sparx5_psfp_sg
*sg
,
703 struct flow_action_entry
*act
,
704 struct netlink_ext_ack
*extack
)
708 if (act
->gate
.prio
< -1 || act
->gate
.prio
> SPX5_PSFP_SG_MAX_IPV
) {
709 NL_SET_ERR_MSG_MOD(extack
, "Invalid gate priority");
713 if (act
->gate
.cycletime
< SPX5_PSFP_SG_MIN_CYCLE_TIME_NS
||
714 act
->gate
.cycletime
> SPX5_PSFP_SG_MAX_CYCLE_TIME_NS
) {
715 NL_SET_ERR_MSG_MOD(extack
, "Invalid gate cycletime");
719 if (act
->gate
.cycletimeext
> SPX5_PSFP_SG_MAX_CYCLE_TIME_NS
) {
720 NL_SET_ERR_MSG_MOD(extack
, "Invalid gate cycletimeext");
724 if (act
->gate
.num_entries
>= SPX5_PSFP_GCE_CNT
) {
725 NL_SET_ERR_MSG_MOD(extack
, "Invalid number of gate entries");
729 sg
->gate_state
= true;
730 sg
->ipv
= act
->gate
.prio
;
731 sg
->num_entries
= act
->gate
.num_entries
;
732 sg
->cycletime
= act
->gate
.cycletime
;
733 sg
->cycletimeext
= act
->gate
.cycletimeext
;
735 for (i
= 0; i
< sg
->num_entries
; i
++) {
736 sg
->gce
[i
].gate_state
= !!act
->gate
.entries
[i
].gate_state
;
737 sg
->gce
[i
].interval
= act
->gate
.entries
[i
].interval
;
738 sg
->gce
[i
].ipv
= act
->gate
.entries
[i
].ipv
;
739 sg
->gce
[i
].maxoctets
= act
->gate
.entries
[i
].maxoctets
;
745 static int sparx5_tc_flower_parse_act_police(struct sparx5_policer
*pol
,
746 struct flow_action_entry
*act
,
747 struct netlink_ext_ack
*extack
)
749 pol
->type
= SPX5_POL_SERVICE
;
750 pol
->rate
= div_u64(act
->police
.rate_bytes_ps
, 1000) * 8;
751 pol
->burst
= act
->police
.burst
;
752 pol
->idx
= act
->hw_index
;
754 /* rate is now in kbit */
755 if (pol
->rate
> DIV_ROUND_UP(SPX5_SDLB_GROUP_RATE_MAX
, 1000)) {
756 NL_SET_ERR_MSG_MOD(extack
, "Maximum rate exceeded");
760 if (act
->police
.exceed
.act_id
!= FLOW_ACTION_DROP
) {
761 NL_SET_ERR_MSG_MOD(extack
, "Offload not supported when exceed action is not drop");
765 if (act
->police
.notexceed
.act_id
!= FLOW_ACTION_PIPE
&&
766 act
->police
.notexceed
.act_id
!= FLOW_ACTION_ACCEPT
) {
767 NL_SET_ERR_MSG_MOD(extack
, "Offload not supported when conform action is not pipe or ok");
774 static int sparx5_tc_flower_psfp_setup(struct sparx5
*sparx5
,
775 struct vcap_rule
*vrule
, int sg_idx
,
776 int pol_idx
, struct sparx5_psfp_sg
*sg
,
777 struct sparx5_psfp_fm
*fm
,
778 struct sparx5_psfp_sf
*sf
)
780 u32 psfp_sfid
= 0, psfp_fmid
= 0, psfp_sgid
= 0;
783 /* Must always have a stream gate - max sdu (filter option) is evaluated
784 * after frames have passed the gate, so in case of only a policer, we
785 * allocate a stream gate that is always open.
788 /* Always-open stream gate is always the last */
789 sg_idx
= sparx5_pool_idx_to_id(sparx5
->data
->consts
->n_gates
-
791 sg
->ipv
= 0; /* Disabled */
792 sg
->cycletime
= SPX5_PSFP_SG_CYCLE_TIME_DEFAULT
;
794 sg
->gate_state
= 1; /* Open */
795 sg
->gate_enabled
= 1;
796 sg
->gce
[0].gate_state
= 1;
797 sg
->gce
[0].interval
= SPX5_PSFP_SG_CYCLE_TIME_DEFAULT
;
799 sg
->gce
[0].maxoctets
= 0; /* Disabled */
802 ret
= sparx5_psfp_sg_add(sparx5
, sg_idx
, sg
, &psfp_sgid
);
807 /* Add new flow-meter */
808 ret
= sparx5_psfp_fm_add(sparx5
, pol_idx
, fm
, &psfp_fmid
);
813 /* Map stream filter to stream gate */
814 sf
->sgid
= psfp_sgid
;
816 /* Add new stream-filter and map it to a steam gate */
817 ret
= sparx5_psfp_sf_add(sparx5
, sf
, &psfp_sfid
);
821 /* Streams are classified by ISDX - map ISDX 1:1 to sfid for now. */
822 sparx5_isdx_conf_set(sparx5
, psfp_sfid
, psfp_sfid
, psfp_fmid
);
824 ret
= vcap_rule_add_action_bit(vrule
, VCAP_AF_ISDX_ADD_REPLACE_SEL
,
829 ret
= vcap_rule_add_action_u32(vrule
, VCAP_AF_ISDX_VAL
, psfp_sfid
);
836 /* Handle the action trap for a VCAP rule */
837 static int sparx5_tc_action_trap(struct vcap_admin
*admin
,
838 struct vcap_rule
*vrule
,
839 struct flow_cls_offload
*fco
)
843 switch (admin
->vtype
) {
845 err
= vcap_rule_add_action_bit(vrule
,
846 VCAP_AF_CPU_COPY_ENA
,
850 err
= vcap_rule_add_action_u32(vrule
,
851 VCAP_AF_CPU_QUEUE_NUM
, 0);
854 err
= vcap_rule_add_action_u32(vrule
,
856 SPX5_PMM_REPLACE_ALL
);
859 err
= vcap_rule_add_action_u32(vrule
,
861 SPX5_FWSEL_REDIRECT_TO_LOOPBACK
);
864 err
= vcap_rule_add_action_bit(vrule
,
865 VCAP_AF_CPU_COPY_ENA
,
869 err
= vcap_rule_add_action_u32(vrule
,
870 VCAP_AF_CPU_QUEUE_NUM
, 0);
873 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
874 "Trap action not supported in this VCAP");
881 static int sparx5_tc_action_vlan_pop(struct vcap_admin
*admin
,
882 struct vcap_rule
*vrule
,
883 struct flow_cls_offload
*fco
,
888 switch (admin
->vtype
) {
892 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
893 "VLAN pop action not supported in this VCAP");
900 err
= vcap_rule_add_action_u32(vrule
,
901 VCAP_AF_PUSH_OUTER_TAG
,
905 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
906 "Invalid vlan proto");
912 static int sparx5_tc_action_vlan_modify(struct vcap_admin
*admin
,
913 struct vcap_rule
*vrule
,
914 struct flow_cls_offload
*fco
,
915 struct flow_action_entry
*act
,
920 switch (admin
->vtype
) {
922 err
= vcap_rule_add_action_u32(vrule
,
923 VCAP_AF_PUSH_OUTER_TAG
,
929 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
930 "VLAN modify action not supported in this VCAP");
936 err
= vcap_rule_add_action_u32(vrule
,
937 VCAP_AF_TAG_A_TPID_SEL
,
941 err
= vcap_rule_add_action_u32(vrule
,
942 VCAP_AF_TAG_A_TPID_SEL
,
946 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
947 "Invalid vlan proto");
953 err
= vcap_rule_add_action_u32(vrule
,
954 VCAP_AF_TAG_A_VID_SEL
,
959 err
= vcap_rule_add_action_u32(vrule
,
965 err
= vcap_rule_add_action_u32(vrule
,
966 VCAP_AF_TAG_A_PCP_SEL
,
971 err
= vcap_rule_add_action_u32(vrule
,
977 return vcap_rule_add_action_u32(vrule
,
978 VCAP_AF_TAG_A_DEI_SEL
,
979 SPX5_DEI_A_CLASSIFIED
);
982 static int sparx5_tc_action_vlan_push(struct vcap_admin
*admin
,
983 struct vcap_rule
*vrule
,
984 struct flow_cls_offload
*fco
,
985 struct flow_action_entry
*act
,
988 u16 act_tpid
= be16_to_cpu(act
->vlan
.proto
);
991 switch (admin
->vtype
) {
995 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
996 "VLAN push action not supported in this VCAP");
1000 if (tpid
== ETH_P_8021AD
) {
1001 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
1002 "Cannot push on double tagged frames");
1006 err
= sparx5_tc_action_vlan_modify(admin
, vrule
, fco
, act
, act_tpid
);
1014 /* Push classified tag as inner tag */
1015 err
= vcap_rule_add_action_u32(vrule
,
1016 VCAP_AF_PUSH_INNER_TAG
,
1017 SPX5_ITAG_PUSH_B_TAG
);
1020 err
= vcap_rule_add_action_u32(vrule
,
1021 VCAP_AF_TAG_B_TPID_SEL
,
1022 SPX5_TPID_B_CLASSIFIED
);
1025 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
1026 "Invalid vlan proto");
1032 static void sparx5_tc_flower_set_port_mask(struct vcap_u72_action
*ports
,
1033 struct net_device
*ndev
)
1035 struct sparx5_port
*port
= netdev_priv(ndev
);
1036 int byidx
= port
->portno
/ BITS_PER_BYTE
;
1037 int biidx
= port
->portno
% BITS_PER_BYTE
;
1039 ports
->value
[byidx
] |= BIT(biidx
);
1042 static int sparx5_tc_action_mirred(struct vcap_admin
*admin
,
1043 struct vcap_rule
*vrule
,
1044 struct flow_cls_offload
*fco
,
1045 struct flow_action_entry
*act
)
1047 struct vcap_u72_action ports
= {0};
1050 if (admin
->vtype
!= VCAP_TYPE_IS0
&& admin
->vtype
!= VCAP_TYPE_IS2
) {
1051 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
1052 "Mirror action not supported in this VCAP");
1056 err
= vcap_rule_add_action_u32(vrule
, VCAP_AF_MASK_MODE
,
1057 SPX5_PMM_OR_DSTMASK
);
1061 sparx5_tc_flower_set_port_mask(&ports
, act
->dev
);
1063 return vcap_rule_add_action_u72(vrule
, VCAP_AF_PORT_MASK
, &ports
);
1066 static int sparx5_tc_action_redirect(struct vcap_admin
*admin
,
1067 struct vcap_rule
*vrule
,
1068 struct flow_cls_offload
*fco
,
1069 struct flow_action_entry
*act
)
1071 struct vcap_u72_action ports
= {0};
1074 if (admin
->vtype
!= VCAP_TYPE_IS0
&& admin
->vtype
!= VCAP_TYPE_IS2
) {
1075 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
1076 "Redirect action not supported in this VCAP");
1080 err
= vcap_rule_add_action_u32(vrule
, VCAP_AF_MASK_MODE
,
1081 SPX5_PMM_REPLACE_ALL
);
1085 sparx5_tc_flower_set_port_mask(&ports
, act
->dev
);
1087 return vcap_rule_add_action_u72(vrule
, VCAP_AF_PORT_MASK
, &ports
);
1090 /* Remove rule keys that may prevent templates from matching a keyset */
1091 static void sparx5_tc_flower_simplify_rule(struct vcap_admin
*admin
,
1092 struct vcap_rule
*vrule
,
1095 switch (admin
->vtype
) {
1097 vcap_rule_rem_key(vrule
, VCAP_KF_ETYPE
);
1102 vcap_rule_rem_key(vrule
, VCAP_KF_IP_SNAP_IS
);
1111 if (vrule
->keyset
== VCAP_KFS_IP4_OTHER
)
1112 vcap_rule_rem_key(vrule
, VCAP_KF_TCP_IS
);
1115 if (vrule
->keyset
== VCAP_KFS_IP6_STD
)
1116 vcap_rule_rem_key(vrule
, VCAP_KF_TCP_IS
);
1117 vcap_rule_rem_key(vrule
, VCAP_KF_IP4_IS
);
1127 vcap_rule_rem_key(vrule
, VCAP_KF_IP4_IS
);
1138 static bool sparx5_tc_flower_use_template(struct net_device
*ndev
,
1139 struct flow_cls_offload
*fco
,
1140 struct vcap_admin
*admin
,
1141 struct vcap_rule
*vrule
)
1143 struct sparx5_port
*port
= netdev_priv(ndev
);
1144 struct sparx5_tc_flower_template
*ftp
;
1146 list_for_each_entry(ftp
, &port
->tc_templates
, list
) {
1147 if (ftp
->cid
!= fco
->common
.chain_index
)
1150 vcap_set_rule_set_keyset(vrule
, ftp
->keyset
);
1151 sparx5_tc_flower_simplify_rule(admin
, vrule
, ftp
->l3_proto
);
1157 static int sparx5_tc_flower_replace(struct net_device
*ndev
,
1158 struct flow_cls_offload
*fco
,
1159 struct vcap_admin
*admin
,
1162 struct sparx5_psfp_sf sf
= { .max_sdu
= SPX5_PSFP_SF_MAX_SDU
};
1163 struct netlink_ext_ack
*extack
= fco
->common
.extack
;
1164 int err
, idx
, tc_sg_idx
= -1, tc_pol_idx
= -1;
1165 struct vcap_tc_flower_parse_usage state
= {
1167 .l3_proto
= ETH_P_ALL
,
1170 struct sparx5_port
*port
= netdev_priv(ndev
);
1171 struct sparx5_multiple_rules multi
= {};
1172 struct sparx5
*sparx5
= port
->sparx5
;
1173 struct sparx5_psfp_sg sg
= { 0 };
1174 struct sparx5_psfp_fm fm
= { 0 };
1175 struct flow_action_entry
*act
;
1176 struct vcap_control
*vctrl
;
1177 struct flow_rule
*frule
;
1178 struct vcap_rule
*vrule
;
1180 vctrl
= port
->sparx5
->vcap_ctrl
;
1182 err
= sparx5_tc_flower_action_check(vctrl
, ndev
, fco
, ingress
);
1186 vrule
= vcap_alloc_rule(vctrl
, ndev
, fco
->common
.chain_index
, VCAP_USER_TC
,
1187 fco
->common
.prio
, 0);
1189 return PTR_ERR(vrule
);
1191 vrule
->cookie
= fco
->cookie
;
1193 state
.vrule
= vrule
;
1194 state
.frule
= flow_cls_offload_flow_rule(fco
);
1195 err
= sparx5_tc_use_dissectors(&state
, admin
, vrule
);
1199 err
= sparx5_tc_add_rule_counter(admin
, vrule
);
1203 err
= sparx5_tc_add_rule_link_target(admin
, vrule
,
1204 fco
->common
.chain_index
);
1208 frule
= flow_cls_offload_flow_rule(fco
);
1209 flow_action_for_each(idx
, act
, &frule
->action
) {
1211 case FLOW_ACTION_GATE
: {
1212 err
= sparx5_tc_flower_parse_act_gate(&sg
, act
, extack
);
1216 tc_sg_idx
= act
->hw_index
;
1220 case FLOW_ACTION_POLICE
: {
1221 err
= sparx5_tc_flower_parse_act_police(&fm
.pol
, act
,
1226 tc_pol_idx
= fm
.pol
.idx
;
1227 sf
.max_sdu
= act
->police
.mtu
;
1231 case FLOW_ACTION_TRAP
:
1232 err
= sparx5_tc_action_trap(admin
, vrule
, fco
);
1236 case FLOW_ACTION_MIRRED
:
1237 err
= sparx5_tc_action_mirred(admin
, vrule
, fco
, act
);
1241 case FLOW_ACTION_REDIRECT
:
1242 err
= sparx5_tc_action_redirect(admin
, vrule
, fco
, act
);
1246 case FLOW_ACTION_ACCEPT
:
1247 err
= sparx5_tc_set_actionset(admin
, vrule
);
1251 case FLOW_ACTION_GOTO
:
1252 err
= sparx5_tc_set_actionset(admin
, vrule
);
1255 sparx5_tc_add_rule_link(vctrl
, admin
, vrule
,
1256 fco
->common
.chain_index
,
1259 case FLOW_ACTION_VLAN_POP
:
1260 err
= sparx5_tc_action_vlan_pop(admin
, vrule
, fco
,
1265 case FLOW_ACTION_VLAN_PUSH
:
1266 err
= sparx5_tc_action_vlan_push(admin
, vrule
, fco
,
1271 case FLOW_ACTION_VLAN_MANGLE
:
1272 err
= sparx5_tc_action_vlan_modify(admin
, vrule
, fco
,
1278 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
1279 "Unsupported TC action");
1286 if (tc_sg_idx
>= 0 || tc_pol_idx
>= 0) {
1287 if (!sparx5_has_feature(sparx5
, SPX5_FEATURE_PSFP
)) {
1292 err
= sparx5_tc_flower_psfp_setup(sparx5
, vrule
, tc_sg_idx
,
1293 tc_pol_idx
, &sg
, &fm
, &sf
);
1298 if (!sparx5_tc_flower_use_template(ndev
, fco
, admin
, vrule
)) {
1299 err
= sparx5_tc_select_protocol_keyset(ndev
, vrule
, admin
,
1300 state
.l3_proto
, &multi
);
1302 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
1303 "No matching port keyset for filter protocol and keys");
1308 /* provide the l3 protocol to guide the keyset selection */
1309 err
= vcap_val_rule(vrule
, state
.l3_proto
);
1311 vcap_set_tc_exterr(fco
, vrule
);
1314 err
= vcap_add_rule(vrule
);
1316 NL_SET_ERR_MSG_MOD(fco
->common
.extack
,
1317 "Could not add the filter");
1319 if (state
.l3_proto
== ETH_P_ALL
)
1320 err
= sparx5_tc_add_remaining_rules(vctrl
, fco
, vrule
, admin
,
1324 vcap_free_rule(vrule
);
1328 static void sparx5_tc_free_psfp_resources(struct sparx5
*sparx5
,
1329 struct vcap_rule
*vrule
)
1331 struct vcap_client_actionfield
*afield
;
1332 u32 isdx
, sfid
, sgid
, fmid
;
1334 /* Check if VCAP_AF_ISDX_VAL action is set for this rule - and if
1335 * it is used for stream and/or flow-meter classification.
1337 afield
= vcap_find_actionfield(vrule
, VCAP_AF_ISDX_VAL
);
1341 isdx
= afield
->data
.u32
.value
;
1342 sfid
= sparx5_psfp_isdx_get_sf(sparx5
, isdx
);
1347 fmid
= sparx5_psfp_isdx_get_fm(sparx5
, isdx
);
1348 sgid
= sparx5_psfp_sf_get_sg(sparx5
, sfid
);
1350 if (fmid
&& sparx5_psfp_fm_del(sparx5
, fmid
) < 0)
1351 pr_err("%s:%d Could not delete invalid fmid: %d", __func__
,
1354 if (sgid
&& sparx5_psfp_sg_del(sparx5
, sgid
) < 0)
1355 pr_err("%s:%d Could not delete invalid sgid: %d", __func__
,
1358 if (sparx5_psfp_sf_del(sparx5
, sfid
) < 0)
1359 pr_err("%s:%d Could not delete invalid sfid: %d", __func__
,
1362 sparx5_isdx_conf_set(sparx5
, isdx
, 0, 0);
1365 static int sparx5_tc_free_rule_resources(struct net_device
*ndev
,
1366 struct vcap_control
*vctrl
,
1369 struct sparx5_port
*port
= netdev_priv(ndev
);
1370 struct sparx5
*sparx5
= port
->sparx5
;
1371 struct vcap_rule
*vrule
;
1374 vrule
= vcap_get_rule(vctrl
, rule_id
);
1378 sparx5_tc_free_psfp_resources(sparx5
, vrule
);
1380 vcap_free_rule(vrule
);
1384 static int sparx5_tc_flower_destroy(struct net_device
*ndev
,
1385 struct flow_cls_offload
*fco
,
1386 struct vcap_admin
*admin
)
1388 struct sparx5_port
*port
= netdev_priv(ndev
);
1389 int err
= -ENOENT
, count
= 0, rule_id
;
1390 struct vcap_control
*vctrl
;
1392 vctrl
= port
->sparx5
->vcap_ctrl
;
1394 rule_id
= vcap_lookup_rule_by_cookie(vctrl
, fco
->cookie
);
1398 /* Resources are attached to the first rule of
1399 * a set of rules. Only works if the rules are
1400 * in the correct order.
1402 err
= sparx5_tc_free_rule_resources(ndev
, vctrl
,
1405 pr_err("%s:%d: could not free resources %d\n",
1406 __func__
, __LINE__
, rule_id
);
1408 err
= vcap_del_rule(vctrl
, ndev
, rule_id
);
1410 pr_err("%s:%d: could not delete rule %d\n",
1411 __func__
, __LINE__
, rule_id
);
1418 static int sparx5_tc_flower_stats(struct net_device
*ndev
,
1419 struct flow_cls_offload
*fco
,
1420 struct vcap_admin
*admin
)
1422 struct sparx5_port
*port
= netdev_priv(ndev
);
1423 struct vcap_counter ctr
= {};
1424 struct vcap_control
*vctrl
;
1428 vctrl
= port
->sparx5
->vcap_ctrl
;
1429 err
= vcap_get_rule_count_by_cookie(vctrl
, &ctr
, fco
->cookie
);
1432 flow_stats_update(&fco
->stats
, 0x0, ctr
.value
, 0, lastused
,
1433 FLOW_ACTION_HW_STATS_IMMEDIATE
);
1437 static int sparx5_tc_flower_template_create(struct net_device
*ndev
,
1438 struct flow_cls_offload
*fco
,
1439 struct vcap_admin
*admin
)
1441 struct sparx5_port
*port
= netdev_priv(ndev
);
1442 struct vcap_tc_flower_parse_usage state
= {
1444 .l3_proto
= ETH_P_ALL
,
1447 struct sparx5_tc_flower_template
*ftp
;
1448 struct vcap_keyset_list kslist
= {};
1449 enum vcap_keyfield_set keysets
[10];
1450 struct vcap_control
*vctrl
;
1451 struct vcap_rule
*vrule
;
1454 if (admin
->vtype
== VCAP_TYPE_ES0
) {
1455 pr_err("%s:%d: %s\n", __func__
, __LINE__
,
1456 "VCAP does not support templates");
1460 count
= vcap_admin_rule_count(admin
, fco
->common
.chain_index
);
1462 pr_err("%s:%d: %s\n", __func__
, __LINE__
,
1463 "Filters are already present");
1467 ftp
= kzalloc(sizeof(*ftp
), GFP_KERNEL
);
1471 ftp
->cid
= fco
->common
.chain_index
;
1472 ftp
->orig
= VCAP_KFS_NO_VALUE
;
1473 ftp
->keyset
= VCAP_KFS_NO_VALUE
;
1475 vctrl
= port
->sparx5
->vcap_ctrl
;
1476 vrule
= vcap_alloc_rule(vctrl
, ndev
, fco
->common
.chain_index
,
1477 VCAP_USER_TC
, fco
->common
.prio
, 0);
1478 if (IS_ERR(vrule
)) {
1479 err
= PTR_ERR(vrule
);
1483 state
.vrule
= vrule
;
1484 state
.frule
= flow_cls_offload_flow_rule(fco
);
1485 err
= sparx5_tc_use_dissectors(&state
, admin
, vrule
);
1487 pr_err("%s:%d: key error: %d\n", __func__
, __LINE__
, err
);
1491 ftp
->l3_proto
= state
.l3_proto
;
1493 sparx5_tc_flower_simplify_rule(admin
, vrule
, state
.l3_proto
);
1495 /* Find the keysets that the rule can use */
1496 kslist
.keysets
= keysets
;
1497 kslist
.max
= ARRAY_SIZE(keysets
);
1498 if (!vcap_rule_find_keysets(vrule
, &kslist
)) {
1499 pr_err("%s:%d: %s\n", __func__
, __LINE__
,
1500 "Could not find a suitable keyset");
1505 ftp
->keyset
= vcap_select_min_rule_keyset(vctrl
, admin
->vtype
, &kslist
);
1507 sparx5_vcap_set_port_keyset(ndev
, admin
, fco
->common
.chain_index
,
1513 ftp
->orig
= kslist
.keysets
[0];
1515 /* Store new template */
1516 list_add_tail(&ftp
->list
, &port
->tc_templates
);
1517 vcap_free_rule(vrule
);
1521 vcap_free_rule(vrule
);
1527 static int sparx5_tc_flower_template_destroy(struct net_device
*ndev
,
1528 struct flow_cls_offload
*fco
,
1529 struct vcap_admin
*admin
)
1531 struct sparx5_port
*port
= netdev_priv(ndev
);
1532 struct sparx5_tc_flower_template
*ftp
, *tmp
;
1535 /* Rules using the template are removed by the tc framework */
1536 list_for_each_entry_safe(ftp
, tmp
, &port
->tc_templates
, list
) {
1537 if (ftp
->cid
!= fco
->common
.chain_index
)
1540 sparx5_vcap_set_port_keyset(ndev
, admin
,
1541 fco
->common
.chain_index
,
1542 ftp
->l3_proto
, ftp
->orig
,
1544 list_del(&ftp
->list
);
1551 int sparx5_tc_flower(struct net_device
*ndev
, struct flow_cls_offload
*fco
,
1554 struct sparx5_port
*port
= netdev_priv(ndev
);
1555 struct vcap_control
*vctrl
;
1556 struct vcap_admin
*admin
;
1559 /* Get vcap instance from the chain id */
1560 vctrl
= port
->sparx5
->vcap_ctrl
;
1561 admin
= vcap_find_admin(vctrl
, fco
->common
.chain_index
);
1563 NL_SET_ERR_MSG_MOD(fco
->common
.extack
, "Invalid chain");
1567 switch (fco
->command
) {
1568 case FLOW_CLS_REPLACE
:
1569 return sparx5_tc_flower_replace(ndev
, fco
, admin
, ingress
);
1570 case FLOW_CLS_DESTROY
:
1571 return sparx5_tc_flower_destroy(ndev
, fco
, admin
);
1572 case FLOW_CLS_STATS
:
1573 return sparx5_tc_flower_stats(ndev
, fco
, admin
);
1574 case FLOW_CLS_TMPLT_CREATE
:
1575 return sparx5_tc_flower_template_create(ndev
, fco
, admin
);
1576 case FLOW_CLS_TMPLT_DESTROY
:
1577 return sparx5_tc_flower_template_destroy(ndev
, fco
, admin
);