2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
42 #define UPLINK_VPORT 0xFFFF
50 /* Vport UC/MC hash node */
52 struct l2addr_node node
;
55 struct mlx5_flow_handle
*flow_rule
;
56 bool mpfs
; /* UC MAC was added to MPFs */
57 /* A flag indicating that mac was added due to mc promiscuous vport */
62 UC_ADDR_CHANGE
= BIT(0),
63 MC_ADDR_CHANGE
= BIT(1),
64 PROMISC_CHANGE
= BIT(3),
67 /* Vport context events */
68 #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
72 static int arm_vport_context_events_cmd(struct mlx5_core_dev
*dev
, u16 vport
,
75 int in
[MLX5_ST_SZ_DW(modify_nic_vport_context_in
)] = {0};
76 int out
[MLX5_ST_SZ_DW(modify_nic_vport_context_out
)] = {0};
79 MLX5_SET(modify_nic_vport_context_in
, in
,
80 opcode
, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
);
81 MLX5_SET(modify_nic_vport_context_in
, in
, field_select
.change_event
, 1);
82 MLX5_SET(modify_nic_vport_context_in
, in
, vport_number
, vport
);
84 MLX5_SET(modify_nic_vport_context_in
, in
, other_vport
, 1);
85 nic_vport_ctx
= MLX5_ADDR_OF(modify_nic_vport_context_in
,
86 in
, nic_vport_context
);
88 MLX5_SET(nic_vport_context
, nic_vport_ctx
, arm_change_event
, 1);
90 if (events_mask
& UC_ADDR_CHANGE
)
91 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
92 event_on_uc_address_change
, 1);
93 if (events_mask
& MC_ADDR_CHANGE
)
94 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
95 event_on_mc_address_change
, 1);
96 if (events_mask
& PROMISC_CHANGE
)
97 MLX5_SET(nic_vport_context
, nic_vport_ctx
,
98 event_on_promisc_change
, 1);
100 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
103 /* E-Switch vport context HW commands */
104 static int modify_esw_vport_context_cmd(struct mlx5_core_dev
*dev
, u16 vport
,
107 u32 out
[MLX5_ST_SZ_DW(modify_esw_vport_context_out
)] = {0};
109 MLX5_SET(modify_esw_vport_context_in
, in
, opcode
,
110 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT
);
111 MLX5_SET(modify_esw_vport_context_in
, in
, vport_number
, vport
);
113 MLX5_SET(modify_esw_vport_context_in
, in
, other_vport
, 1);
114 return mlx5_cmd_exec(dev
, in
, inlen
, out
, sizeof(out
));
117 static int modify_esw_vport_cvlan(struct mlx5_core_dev
*dev
, u32 vport
,
118 u16 vlan
, u8 qos
, u8 set_flags
)
120 u32 in
[MLX5_ST_SZ_DW(modify_esw_vport_context_in
)] = {0};
122 if (!MLX5_CAP_ESW(dev
, vport_cvlan_strip
) ||
123 !MLX5_CAP_ESW(dev
, vport_cvlan_insert_if_not_exist
))
126 esw_debug(dev
, "Set Vport[%d] VLAN %d qos %d set=%x\n",
127 vport
, vlan
, qos
, set_flags
);
129 if (set_flags
& SET_VLAN_STRIP
)
130 MLX5_SET(modify_esw_vport_context_in
, in
,
131 esw_vport_context
.vport_cvlan_strip
, 1);
133 if (set_flags
& SET_VLAN_INSERT
) {
134 /* insert only if no vlan in packet */
135 MLX5_SET(modify_esw_vport_context_in
, in
,
136 esw_vport_context
.vport_cvlan_insert
, 1);
138 MLX5_SET(modify_esw_vport_context_in
, in
,
139 esw_vport_context
.cvlan_pcp
, qos
);
140 MLX5_SET(modify_esw_vport_context_in
, in
,
141 esw_vport_context
.cvlan_id
, vlan
);
144 MLX5_SET(modify_esw_vport_context_in
, in
,
145 field_select
.vport_cvlan_strip
, 1);
146 MLX5_SET(modify_esw_vport_context_in
, in
,
147 field_select
.vport_cvlan_insert
, 1);
149 return modify_esw_vport_context_cmd(dev
, vport
, in
, sizeof(in
));
153 static struct mlx5_flow_handle
*
154 __esw_fdb_set_vport_rule(struct mlx5_eswitch
*esw
, u32 vport
, bool rx_rule
,
155 u8 mac_c
[ETH_ALEN
], u8 mac_v
[ETH_ALEN
])
157 int match_header
= (is_zero_ether_addr(mac_c
) ? 0 :
158 MLX5_MATCH_OUTER_HEADERS
);
159 struct mlx5_flow_handle
*flow_rule
= NULL
;
160 struct mlx5_flow_act flow_act
= {0};
161 struct mlx5_flow_destination dest
= {};
162 struct mlx5_flow_spec
*spec
;
163 void *mv_misc
= NULL
;
164 void *mc_misc
= NULL
;
169 match_header
|= MLX5_MATCH_MISC_PARAMETERS
;
171 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
175 dmac_v
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
176 outer_headers
.dmac_47_16
);
177 dmac_c
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
178 outer_headers
.dmac_47_16
);
180 if (match_header
& MLX5_MATCH_OUTER_HEADERS
) {
181 ether_addr_copy(dmac_v
, mac_v
);
182 ether_addr_copy(dmac_c
, mac_c
);
185 if (match_header
& MLX5_MATCH_MISC_PARAMETERS
) {
186 mv_misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
188 mc_misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
190 MLX5_SET(fte_match_set_misc
, mv_misc
, source_port
, UPLINK_VPORT
);
191 MLX5_SET_TO_ONES(fte_match_set_misc
, mc_misc
, source_port
);
194 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
195 dest
.vport_num
= vport
;
198 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
199 dmac_v
, dmac_c
, vport
);
200 spec
->match_criteria_enable
= match_header
;
201 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
203 mlx5_add_flow_rules(esw
->fdb_table
.fdb
, spec
,
204 &flow_act
, &dest
, 1);
205 if (IS_ERR(flow_rule
)) {
207 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
208 dmac_v
, dmac_c
, vport
, PTR_ERR(flow_rule
));
216 static struct mlx5_flow_handle
*
217 esw_fdb_set_vport_rule(struct mlx5_eswitch
*esw
, u8 mac
[ETH_ALEN
], u32 vport
)
221 eth_broadcast_addr(mac_c
);
222 return __esw_fdb_set_vport_rule(esw
, vport
, false, mac_c
, mac
);
225 static struct mlx5_flow_handle
*
226 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch
*esw
, u32 vport
)
231 eth_zero_addr(mac_c
);
232 eth_zero_addr(mac_v
);
235 return __esw_fdb_set_vport_rule(esw
, vport
, false, mac_c
, mac_v
);
238 static struct mlx5_flow_handle
*
239 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch
*esw
, u32 vport
)
244 eth_zero_addr(mac_c
);
245 eth_zero_addr(mac_v
);
246 return __esw_fdb_set_vport_rule(esw
, vport
, true, mac_c
, mac_v
);
249 static int esw_create_legacy_fdb_table(struct mlx5_eswitch
*esw
, int nvports
)
251 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
252 struct mlx5_flow_table_attr ft_attr
= {};
253 struct mlx5_core_dev
*dev
= esw
->dev
;
254 struct mlx5_flow_namespace
*root_ns
;
255 struct mlx5_flow_table
*fdb
;
256 struct mlx5_flow_group
*g
;
257 void *match_criteria
;
263 esw_debug(dev
, "Create FDB log_max_size(%d)\n",
264 MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, log_max_ft_size
));
266 root_ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_FDB
);
268 esw_warn(dev
, "Failed to get FDB flow namespace\n");
272 flow_group_in
= kvzalloc(inlen
, GFP_KERNEL
);
276 table_size
= BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, log_max_ft_size
));
278 ft_attr
.max_fte
= table_size
;
279 fdb
= mlx5_create_flow_table(root_ns
, &ft_attr
);
282 esw_warn(dev
, "Failed to create FDB Table err %d\n", err
);
285 esw
->fdb_table
.fdb
= fdb
;
287 /* Addresses group : Full match unicast/multicast addresses */
288 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
289 MLX5_MATCH_OUTER_HEADERS
);
290 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
, match_criteria
);
291 dmac
= MLX5_ADDR_OF(fte_match_param
, match_criteria
, outer_headers
.dmac_47_16
);
292 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
293 /* Preserve 2 entries for allmulti and promisc rules*/
294 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, table_size
- 3);
295 eth_broadcast_addr(dmac
);
296 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
299 esw_warn(dev
, "Failed to create flow group err(%d)\n", err
);
302 esw
->fdb_table
.legacy
.addr_grp
= g
;
304 /* Allmulti group : One rule that forwards any mcast traffic */
305 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
306 MLX5_MATCH_OUTER_HEADERS
);
307 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, table_size
- 2);
308 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, table_size
- 2);
311 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
314 esw_warn(dev
, "Failed to create allmulti flow group err(%d)\n", err
);
317 esw
->fdb_table
.legacy
.allmulti_grp
= g
;
319 /* Promiscuous group :
320 * One rule that forward all unmatched traffic from previous groups
323 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
324 MLX5_MATCH_MISC_PARAMETERS
);
325 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, misc_parameters
.source_port
);
326 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, table_size
- 1);
327 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, table_size
- 1);
328 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
331 esw_warn(dev
, "Failed to create promisc flow group err(%d)\n", err
);
334 esw
->fdb_table
.legacy
.promisc_grp
= g
;
338 if (!IS_ERR_OR_NULL(esw
->fdb_table
.legacy
.allmulti_grp
)) {
339 mlx5_destroy_flow_group(esw
->fdb_table
.legacy
.allmulti_grp
);
340 esw
->fdb_table
.legacy
.allmulti_grp
= NULL
;
342 if (!IS_ERR_OR_NULL(esw
->fdb_table
.legacy
.addr_grp
)) {
343 mlx5_destroy_flow_group(esw
->fdb_table
.legacy
.addr_grp
);
344 esw
->fdb_table
.legacy
.addr_grp
= NULL
;
346 if (!IS_ERR_OR_NULL(esw
->fdb_table
.fdb
)) {
347 mlx5_destroy_flow_table(esw
->fdb_table
.fdb
);
348 esw
->fdb_table
.fdb
= NULL
;
352 kvfree(flow_group_in
);
356 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch
*esw
)
358 if (!esw
->fdb_table
.fdb
)
361 esw_debug(esw
->dev
, "Destroy FDB Table\n");
362 mlx5_destroy_flow_group(esw
->fdb_table
.legacy
.promisc_grp
);
363 mlx5_destroy_flow_group(esw
->fdb_table
.legacy
.allmulti_grp
);
364 mlx5_destroy_flow_group(esw
->fdb_table
.legacy
.addr_grp
);
365 mlx5_destroy_flow_table(esw
->fdb_table
.fdb
);
366 esw
->fdb_table
.fdb
= NULL
;
367 esw
->fdb_table
.legacy
.addr_grp
= NULL
;
368 esw
->fdb_table
.legacy
.allmulti_grp
= NULL
;
369 esw
->fdb_table
.legacy
.promisc_grp
= NULL
;
372 /* E-Switch vport UC/MC lists management */
373 typedef int (*vport_addr_action
)(struct mlx5_eswitch
*esw
,
374 struct vport_addr
*vaddr
);
376 static int esw_add_uc_addr(struct mlx5_eswitch
*esw
, struct vport_addr
*vaddr
)
378 u8
*mac
= vaddr
->node
.addr
;
379 u32 vport
= vaddr
->vport
;
382 /* Skip mlx5_mpfs_add_mac for PFs,
383 * it is already done by the PF netdev in mlx5e_execute_l2_action
388 err
= mlx5_mpfs_add_mac(esw
->dev
, mac
);
391 "Failed to add L2 table mac(%pM) for vport(%d), err(%d)\n",
398 /* SRIOV is enabled: Forward UC MAC to vport */
399 if (esw
->fdb_table
.fdb
&& esw
->mode
== SRIOV_LEGACY
)
400 vaddr
->flow_rule
= esw_fdb_set_vport_rule(esw
, mac
, vport
);
402 esw_debug(esw
->dev
, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
403 vport
, mac
, vaddr
->flow_rule
);
408 static int esw_del_uc_addr(struct mlx5_eswitch
*esw
, struct vport_addr
*vaddr
)
410 u8
*mac
= vaddr
->node
.addr
;
411 u32 vport
= vaddr
->vport
;
414 /* Skip mlx5_mpfs_del_mac for PFs,
415 * it is already done by the PF netdev in mlx5e_execute_l2_action
417 if (!vport
|| !vaddr
->mpfs
)
420 err
= mlx5_mpfs_del_mac(esw
->dev
, mac
);
423 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
428 if (vaddr
->flow_rule
)
429 mlx5_del_flow_rules(vaddr
->flow_rule
);
430 vaddr
->flow_rule
= NULL
;
435 static void update_allmulti_vports(struct mlx5_eswitch
*esw
,
436 struct vport_addr
*vaddr
,
437 struct esw_mc_addr
*esw_mc
)
439 u8
*mac
= vaddr
->node
.addr
;
442 for (vport_idx
= 0; vport_idx
< esw
->total_vports
; vport_idx
++) {
443 struct mlx5_vport
*vport
= &esw
->vports
[vport_idx
];
444 struct hlist_head
*vport_hash
= vport
->mc_list
;
445 struct vport_addr
*iter_vaddr
=
446 l2addr_hash_find(vport_hash
,
449 if (IS_ERR_OR_NULL(vport
->allmulti_rule
) ||
450 vaddr
->vport
== vport_idx
)
452 switch (vaddr
->action
) {
453 case MLX5_ACTION_ADD
:
456 iter_vaddr
= l2addr_hash_add(vport_hash
, mac
,
461 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
465 iter_vaddr
->vport
= vport_idx
;
466 iter_vaddr
->flow_rule
=
467 esw_fdb_set_vport_rule(esw
,
470 iter_vaddr
->mc_promisc
= true;
472 case MLX5_ACTION_DEL
:
475 mlx5_del_flow_rules(iter_vaddr
->flow_rule
);
476 l2addr_hash_del(iter_vaddr
);
482 static int esw_add_mc_addr(struct mlx5_eswitch
*esw
, struct vport_addr
*vaddr
)
484 struct hlist_head
*hash
= esw
->mc_table
;
485 struct esw_mc_addr
*esw_mc
;
486 u8
*mac
= vaddr
->node
.addr
;
487 u32 vport
= vaddr
->vport
;
489 if (!esw
->fdb_table
.fdb
)
492 esw_mc
= l2addr_hash_find(hash
, mac
, struct esw_mc_addr
);
496 esw_mc
= l2addr_hash_add(hash
, mac
, struct esw_mc_addr
, GFP_KERNEL
);
500 esw_mc
->uplink_rule
= /* Forward MC MAC to Uplink */
501 esw_fdb_set_vport_rule(esw
, mac
, UPLINK_VPORT
);
503 /* Add this multicast mac to all the mc promiscuous vports */
504 update_allmulti_vports(esw
, vaddr
, esw_mc
);
507 /* If the multicast mac is added as a result of mc promiscuous vport,
508 * don't increment the multicast ref count
510 if (!vaddr
->mc_promisc
)
513 /* Forward MC MAC to vport */
514 vaddr
->flow_rule
= esw_fdb_set_vport_rule(esw
, mac
, vport
);
516 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
517 vport
, mac
, vaddr
->flow_rule
,
518 esw_mc
->refcnt
, esw_mc
->uplink_rule
);
522 static int esw_del_mc_addr(struct mlx5_eswitch
*esw
, struct vport_addr
*vaddr
)
524 struct hlist_head
*hash
= esw
->mc_table
;
525 struct esw_mc_addr
*esw_mc
;
526 u8
*mac
= vaddr
->node
.addr
;
527 u32 vport
= vaddr
->vport
;
529 if (!esw
->fdb_table
.fdb
)
532 esw_mc
= l2addr_hash_find(hash
, mac
, struct esw_mc_addr
);
535 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
540 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
541 vport
, mac
, vaddr
->flow_rule
, esw_mc
->refcnt
,
542 esw_mc
->uplink_rule
);
544 if (vaddr
->flow_rule
)
545 mlx5_del_flow_rules(vaddr
->flow_rule
);
546 vaddr
->flow_rule
= NULL
;
548 /* If the multicast mac is added as a result of mc promiscuous vport,
549 * don't decrement the multicast ref count.
551 if (vaddr
->mc_promisc
|| (--esw_mc
->refcnt
> 0))
554 /* Remove this multicast mac from all the mc promiscuous vports */
555 update_allmulti_vports(esw
, vaddr
, esw_mc
);
557 if (esw_mc
->uplink_rule
)
558 mlx5_del_flow_rules(esw_mc
->uplink_rule
);
560 l2addr_hash_del(esw_mc
);
564 /* Apply vport UC/MC list to HW l2 table and FDB table */
565 static void esw_apply_vport_addr_list(struct mlx5_eswitch
*esw
,
566 u32 vport_num
, int list_type
)
568 struct mlx5_vport
*vport
= &esw
->vports
[vport_num
];
569 bool is_uc
= list_type
== MLX5_NVPRT_LIST_TYPE_UC
;
570 vport_addr_action vport_addr_add
;
571 vport_addr_action vport_addr_del
;
572 struct vport_addr
*addr
;
573 struct l2addr_node
*node
;
574 struct hlist_head
*hash
;
575 struct hlist_node
*tmp
;
578 vport_addr_add
= is_uc
? esw_add_uc_addr
:
580 vport_addr_del
= is_uc
? esw_del_uc_addr
:
583 hash
= is_uc
? vport
->uc_list
: vport
->mc_list
;
584 for_each_l2hash_node(node
, tmp
, hash
, hi
) {
585 addr
= container_of(node
, struct vport_addr
, node
);
586 switch (addr
->action
) {
587 case MLX5_ACTION_ADD
:
588 vport_addr_add(esw
, addr
);
589 addr
->action
= MLX5_ACTION_NONE
;
591 case MLX5_ACTION_DEL
:
592 vport_addr_del(esw
, addr
);
593 l2addr_hash_del(addr
);
599 /* Sync vport UC/MC list from vport context */
600 static void esw_update_vport_addr_list(struct mlx5_eswitch
*esw
,
601 u32 vport_num
, int list_type
)
603 struct mlx5_vport
*vport
= &esw
->vports
[vport_num
];
604 bool is_uc
= list_type
== MLX5_NVPRT_LIST_TYPE_UC
;
605 u8 (*mac_list
)[ETH_ALEN
];
606 struct l2addr_node
*node
;
607 struct vport_addr
*addr
;
608 struct hlist_head
*hash
;
609 struct hlist_node
*tmp
;
615 size
= is_uc
? MLX5_MAX_UC_PER_VPORT(esw
->dev
) :
616 MLX5_MAX_MC_PER_VPORT(esw
->dev
);
618 mac_list
= kcalloc(size
, ETH_ALEN
, GFP_KERNEL
);
622 hash
= is_uc
? vport
->uc_list
: vport
->mc_list
;
624 for_each_l2hash_node(node
, tmp
, hash
, hi
) {
625 addr
= container_of(node
, struct vport_addr
, node
);
626 addr
->action
= MLX5_ACTION_DEL
;
632 err
= mlx5_query_nic_vport_mac_list(esw
->dev
, vport_num
, list_type
,
636 esw_debug(esw
->dev
, "vport[%d] context update %s list size (%d)\n",
637 vport_num
, is_uc
? "UC" : "MC", size
);
639 for (i
= 0; i
< size
; i
++) {
640 if (is_uc
&& !is_valid_ether_addr(mac_list
[i
]))
643 if (!is_uc
&& !is_multicast_ether_addr(mac_list
[i
]))
646 addr
= l2addr_hash_find(hash
, mac_list
[i
], struct vport_addr
);
648 addr
->action
= MLX5_ACTION_NONE
;
649 /* If this mac was previously added because of allmulti
650 * promiscuous rx mode, its now converted to be original
653 if (addr
->mc_promisc
) {
654 struct esw_mc_addr
*esw_mc
=
655 l2addr_hash_find(esw
->mc_table
,
660 "Failed to MAC(%pM) in mcast DB\n",
665 addr
->mc_promisc
= false;
670 addr
= l2addr_hash_add(hash
, mac_list
[i
], struct vport_addr
,
674 "Failed to add MAC(%pM) to vport[%d] DB\n",
675 mac_list
[i
], vport_num
);
678 addr
->vport
= vport_num
;
679 addr
->action
= MLX5_ACTION_ADD
;
685 /* Sync vport UC/MC list from vport context
686 * Must be called after esw_update_vport_addr_list
688 static void esw_update_vport_mc_promisc(struct mlx5_eswitch
*esw
, u32 vport_num
)
690 struct mlx5_vport
*vport
= &esw
->vports
[vport_num
];
691 struct l2addr_node
*node
;
692 struct vport_addr
*addr
;
693 struct hlist_head
*hash
;
694 struct hlist_node
*tmp
;
697 hash
= vport
->mc_list
;
699 for_each_l2hash_node(node
, tmp
, esw
->mc_table
, hi
) {
700 u8
*mac
= node
->addr
;
702 addr
= l2addr_hash_find(hash
, mac
, struct vport_addr
);
704 if (addr
->action
== MLX5_ACTION_DEL
)
705 addr
->action
= MLX5_ACTION_NONE
;
708 addr
= l2addr_hash_add(hash
, mac
, struct vport_addr
,
712 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
716 addr
->vport
= vport_num
;
717 addr
->action
= MLX5_ACTION_ADD
;
718 addr
->mc_promisc
= true;
722 /* Apply vport rx mode to HW FDB table */
723 static void esw_apply_vport_rx_mode(struct mlx5_eswitch
*esw
, u32 vport_num
,
724 bool promisc
, bool mc_promisc
)
726 struct esw_mc_addr
*allmulti_addr
= &esw
->mc_promisc
;
727 struct mlx5_vport
*vport
= &esw
->vports
[vport_num
];
729 if (IS_ERR_OR_NULL(vport
->allmulti_rule
) != mc_promisc
)
733 vport
->allmulti_rule
=
734 esw_fdb_set_vport_allmulti_rule(esw
, vport_num
);
735 if (!allmulti_addr
->uplink_rule
)
736 allmulti_addr
->uplink_rule
=
737 esw_fdb_set_vport_allmulti_rule(esw
,
739 allmulti_addr
->refcnt
++;
740 } else if (vport
->allmulti_rule
) {
741 mlx5_del_flow_rules(vport
->allmulti_rule
);
742 vport
->allmulti_rule
= NULL
;
744 if (--allmulti_addr
->refcnt
> 0)
747 if (allmulti_addr
->uplink_rule
)
748 mlx5_del_flow_rules(allmulti_addr
->uplink_rule
);
749 allmulti_addr
->uplink_rule
= NULL
;
753 if (IS_ERR_OR_NULL(vport
->promisc_rule
) != promisc
)
757 vport
->promisc_rule
= esw_fdb_set_vport_promisc_rule(esw
,
759 } else if (vport
->promisc_rule
) {
760 mlx5_del_flow_rules(vport
->promisc_rule
);
761 vport
->promisc_rule
= NULL
;
765 /* Sync vport rx mode from vport context */
766 static void esw_update_vport_rx_mode(struct mlx5_eswitch
*esw
, u32 vport_num
)
768 struct mlx5_vport
*vport
= &esw
->vports
[vport_num
];
774 err
= mlx5_query_nic_vport_promisc(esw
->dev
,
781 esw_debug(esw
->dev
, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
782 vport_num
, promisc_all
, promisc_mc
);
784 if (!vport
->info
.trusted
|| !vport
->enabled
) {
790 esw_apply_vport_rx_mode(esw
, vport_num
, promisc_all
,
791 (promisc_all
|| promisc_mc
));
794 static void esw_vport_change_handle_locked(struct mlx5_vport
*vport
)
796 struct mlx5_core_dev
*dev
= vport
->dev
;
797 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
800 mlx5_query_nic_vport_mac_address(dev
, vport
->vport
, mac
);
801 esw_debug(dev
, "vport[%d] Context Changed: perm mac: %pM\n",
804 if (vport
->enabled_events
& UC_ADDR_CHANGE
) {
805 esw_update_vport_addr_list(esw
, vport
->vport
,
806 MLX5_NVPRT_LIST_TYPE_UC
);
807 esw_apply_vport_addr_list(esw
, vport
->vport
,
808 MLX5_NVPRT_LIST_TYPE_UC
);
811 if (vport
->enabled_events
& MC_ADDR_CHANGE
) {
812 esw_update_vport_addr_list(esw
, vport
->vport
,
813 MLX5_NVPRT_LIST_TYPE_MC
);
816 if (vport
->enabled_events
& PROMISC_CHANGE
) {
817 esw_update_vport_rx_mode(esw
, vport
->vport
);
818 if (!IS_ERR_OR_NULL(vport
->allmulti_rule
))
819 esw_update_vport_mc_promisc(esw
, vport
->vport
);
822 if (vport
->enabled_events
& (PROMISC_CHANGE
| MC_ADDR_CHANGE
)) {
823 esw_apply_vport_addr_list(esw
, vport
->vport
,
824 MLX5_NVPRT_LIST_TYPE_MC
);
827 esw_debug(esw
->dev
, "vport[%d] Context Changed: Done\n", vport
->vport
);
829 arm_vport_context_events_cmd(dev
, vport
->vport
,
830 vport
->enabled_events
);
833 static void esw_vport_change_handler(struct work_struct
*work
)
835 struct mlx5_vport
*vport
=
836 container_of(work
, struct mlx5_vport
, vport_change_handler
);
837 struct mlx5_eswitch
*esw
= vport
->dev
->priv
.eswitch
;
839 mutex_lock(&esw
->state_lock
);
840 esw_vport_change_handle_locked(vport
);
841 mutex_unlock(&esw
->state_lock
);
844 static int esw_vport_enable_egress_acl(struct mlx5_eswitch
*esw
,
845 struct mlx5_vport
*vport
)
847 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
848 struct mlx5_flow_group
*vlan_grp
= NULL
;
849 struct mlx5_flow_group
*drop_grp
= NULL
;
850 struct mlx5_core_dev
*dev
= esw
->dev
;
851 struct mlx5_flow_namespace
*root_ns
;
852 struct mlx5_flow_table
*acl
;
853 void *match_criteria
;
855 /* The egress acl table contains 2 rules:
856 * 1)Allow traffic with vlan_tag=vst_vlan_id
857 * 2)Drop all other traffic.
862 if (!MLX5_CAP_ESW_EGRESS_ACL(dev
, ft_support
))
865 if (!IS_ERR_OR_NULL(vport
->egress
.acl
))
868 esw_debug(dev
, "Create vport[%d] egress ACL log_max_size(%d)\n",
869 vport
->vport
, MLX5_CAP_ESW_EGRESS_ACL(dev
, log_max_ft_size
));
871 root_ns
= mlx5_get_flow_vport_acl_namespace(dev
, MLX5_FLOW_NAMESPACE_ESW_EGRESS
,
874 esw_warn(dev
, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport
->vport
);
878 flow_group_in
= kvzalloc(inlen
, GFP_KERNEL
);
882 acl
= mlx5_create_vport_flow_table(root_ns
, 0, table_size
, 0, vport
->vport
);
885 esw_warn(dev
, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
890 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
891 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
, match_criteria
);
892 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.cvlan_tag
);
893 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.first_vid
);
894 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
895 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, 0);
897 vlan_grp
= mlx5_create_flow_group(acl
, flow_group_in
);
898 if (IS_ERR(vlan_grp
)) {
899 err
= PTR_ERR(vlan_grp
);
900 esw_warn(dev
, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
905 memset(flow_group_in
, 0, inlen
);
906 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 1);
907 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, 1);
908 drop_grp
= mlx5_create_flow_group(acl
, flow_group_in
);
909 if (IS_ERR(drop_grp
)) {
910 err
= PTR_ERR(drop_grp
);
911 esw_warn(dev
, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
916 vport
->egress
.acl
= acl
;
917 vport
->egress
.drop_grp
= drop_grp
;
918 vport
->egress
.allowed_vlans_grp
= vlan_grp
;
920 kvfree(flow_group_in
);
921 if (err
&& !IS_ERR_OR_NULL(vlan_grp
))
922 mlx5_destroy_flow_group(vlan_grp
);
923 if (err
&& !IS_ERR_OR_NULL(acl
))
924 mlx5_destroy_flow_table(acl
);
928 static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch
*esw
,
929 struct mlx5_vport
*vport
)
931 if (!IS_ERR_OR_NULL(vport
->egress
.allowed_vlan
))
932 mlx5_del_flow_rules(vport
->egress
.allowed_vlan
);
934 if (!IS_ERR_OR_NULL(vport
->egress
.drop_rule
))
935 mlx5_del_flow_rules(vport
->egress
.drop_rule
);
937 vport
->egress
.allowed_vlan
= NULL
;
938 vport
->egress
.drop_rule
= NULL
;
941 static void esw_vport_disable_egress_acl(struct mlx5_eswitch
*esw
,
942 struct mlx5_vport
*vport
)
944 if (IS_ERR_OR_NULL(vport
->egress
.acl
))
947 esw_debug(esw
->dev
, "Destroy vport[%d] E-Switch egress ACL\n", vport
->vport
);
949 esw_vport_cleanup_egress_rules(esw
, vport
);
950 mlx5_destroy_flow_group(vport
->egress
.allowed_vlans_grp
);
951 mlx5_destroy_flow_group(vport
->egress
.drop_grp
);
952 mlx5_destroy_flow_table(vport
->egress
.acl
);
953 vport
->egress
.allowed_vlans_grp
= NULL
;
954 vport
->egress
.drop_grp
= NULL
;
955 vport
->egress
.acl
= NULL
;
958 static int esw_vport_enable_ingress_acl(struct mlx5_eswitch
*esw
,
959 struct mlx5_vport
*vport
)
961 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
962 struct mlx5_core_dev
*dev
= esw
->dev
;
963 struct mlx5_flow_namespace
*root_ns
;
964 struct mlx5_flow_table
*acl
;
965 struct mlx5_flow_group
*g
;
966 void *match_criteria
;
968 /* The ingress acl table contains 4 groups
969 * (2 active rules at the same time -
970 * 1 allow rule from one of the first 3 groups.
971 * 1 drop rule from the last group):
972 * 1)Allow untagged traffic with smac=original mac.
973 * 2)Allow untagged traffic.
974 * 3)Allow traffic with smac=original mac.
975 * 4)Drop all other traffic.
980 if (!MLX5_CAP_ESW_INGRESS_ACL(dev
, ft_support
))
983 if (!IS_ERR_OR_NULL(vport
->ingress
.acl
))
986 esw_debug(dev
, "Create vport[%d] ingress ACL log_max_size(%d)\n",
987 vport
->vport
, MLX5_CAP_ESW_INGRESS_ACL(dev
, log_max_ft_size
));
989 root_ns
= mlx5_get_flow_vport_acl_namespace(dev
, MLX5_FLOW_NAMESPACE_ESW_INGRESS
,
992 esw_warn(dev
, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport
->vport
);
996 flow_group_in
= kvzalloc(inlen
, GFP_KERNEL
);
1000 acl
= mlx5_create_vport_flow_table(root_ns
, 0, table_size
, 0, vport
->vport
);
1003 esw_warn(dev
, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
1007 vport
->ingress
.acl
= acl
;
1009 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
, match_criteria
);
1011 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
1012 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.cvlan_tag
);
1013 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.smac_47_16
);
1014 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.smac_15_0
);
1015 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
1016 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, 0);
1018 g
= mlx5_create_flow_group(acl
, flow_group_in
);
1021 esw_warn(dev
, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
1025 vport
->ingress
.allow_untagged_spoofchk_grp
= g
;
1027 memset(flow_group_in
, 0, inlen
);
1028 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
1029 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.cvlan_tag
);
1030 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 1);
1031 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, 1);
1033 g
= mlx5_create_flow_group(acl
, flow_group_in
);
1036 esw_warn(dev
, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
1040 vport
->ingress
.allow_untagged_only_grp
= g
;
1042 memset(flow_group_in
, 0, inlen
);
1043 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
1044 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.smac_47_16
);
1045 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, outer_headers
.smac_15_0
);
1046 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 2);
1047 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, 2);
1049 g
= mlx5_create_flow_group(acl
, flow_group_in
);
1052 esw_warn(dev
, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
1056 vport
->ingress
.allow_spoofchk_only_grp
= g
;
1058 memset(flow_group_in
, 0, inlen
);
1059 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 3);
1060 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, 3);
1062 g
= mlx5_create_flow_group(acl
, flow_group_in
);
1065 esw_warn(dev
, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
1069 vport
->ingress
.drop_grp
= g
;
1073 if (!IS_ERR_OR_NULL(vport
->ingress
.allow_spoofchk_only_grp
))
1074 mlx5_destroy_flow_group(
1075 vport
->ingress
.allow_spoofchk_only_grp
);
1076 if (!IS_ERR_OR_NULL(vport
->ingress
.allow_untagged_only_grp
))
1077 mlx5_destroy_flow_group(
1078 vport
->ingress
.allow_untagged_only_grp
);
1079 if (!IS_ERR_OR_NULL(vport
->ingress
.allow_untagged_spoofchk_grp
))
1080 mlx5_destroy_flow_group(
1081 vport
->ingress
.allow_untagged_spoofchk_grp
);
1082 if (!IS_ERR_OR_NULL(vport
->ingress
.acl
))
1083 mlx5_destroy_flow_table(vport
->ingress
.acl
);
1086 kvfree(flow_group_in
);
1090 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch
*esw
,
1091 struct mlx5_vport
*vport
)
1093 if (!IS_ERR_OR_NULL(vport
->ingress
.drop_rule
))
1094 mlx5_del_flow_rules(vport
->ingress
.drop_rule
);
1096 if (!IS_ERR_OR_NULL(vport
->ingress
.allow_rule
))
1097 mlx5_del_flow_rules(vport
->ingress
.allow_rule
);
1099 vport
->ingress
.drop_rule
= NULL
;
1100 vport
->ingress
.allow_rule
= NULL
;
1103 static void esw_vport_disable_ingress_acl(struct mlx5_eswitch
*esw
,
1104 struct mlx5_vport
*vport
)
1106 if (IS_ERR_OR_NULL(vport
->ingress
.acl
))
1109 esw_debug(esw
->dev
, "Destroy vport[%d] E-Switch ingress ACL\n", vport
->vport
);
1111 esw_vport_cleanup_ingress_rules(esw
, vport
);
1112 mlx5_destroy_flow_group(vport
->ingress
.allow_spoofchk_only_grp
);
1113 mlx5_destroy_flow_group(vport
->ingress
.allow_untagged_only_grp
);
1114 mlx5_destroy_flow_group(vport
->ingress
.allow_untagged_spoofchk_grp
);
1115 mlx5_destroy_flow_group(vport
->ingress
.drop_grp
);
1116 mlx5_destroy_flow_table(vport
->ingress
.acl
);
1117 vport
->ingress
.acl
= NULL
;
1118 vport
->ingress
.drop_grp
= NULL
;
1119 vport
->ingress
.allow_spoofchk_only_grp
= NULL
;
1120 vport
->ingress
.allow_untagged_only_grp
= NULL
;
1121 vport
->ingress
.allow_untagged_spoofchk_grp
= NULL
;
1124 static int esw_vport_ingress_config(struct mlx5_eswitch
*esw
,
1125 struct mlx5_vport
*vport
)
1127 struct mlx5_fc
*counter
= vport
->ingress
.drop_counter
;
1128 struct mlx5_flow_destination drop_ctr_dst
= {0};
1129 struct mlx5_flow_destination
*dst
= NULL
;
1130 struct mlx5_flow_act flow_act
= {0};
1131 struct mlx5_flow_spec
*spec
;
1136 if (vport
->info
.spoofchk
&& !is_valid_ether_addr(vport
->info
.mac
)) {
1137 mlx5_core_warn(esw
->dev
,
1138 "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
1143 esw_vport_cleanup_ingress_rules(esw
, vport
);
1145 if (!vport
->info
.vlan
&& !vport
->info
.qos
&& !vport
->info
.spoofchk
) {
1146 esw_vport_disable_ingress_acl(esw
, vport
);
1150 err
= esw_vport_enable_ingress_acl(esw
, vport
);
1152 mlx5_core_warn(esw
->dev
,
1153 "failed to enable ingress acl (%d) on vport[%d]\n",
1159 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
1160 vport
->vport
, vport
->info
.vlan
, vport
->info
.qos
);
1162 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
1168 if (vport
->info
.vlan
|| vport
->info
.qos
)
1169 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, outer_headers
.cvlan_tag
);
1171 if (vport
->info
.spoofchk
) {
1172 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, outer_headers
.smac_47_16
);
1173 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, outer_headers
.smac_15_0
);
1174 smac_v
= MLX5_ADDR_OF(fte_match_param
,
1176 outer_headers
.smac_47_16
);
1177 ether_addr_copy(smac_v
, vport
->info
.mac
);
1180 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
1181 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_ALLOW
;
1182 vport
->ingress
.allow_rule
=
1183 mlx5_add_flow_rules(vport
->ingress
.acl
, spec
,
1184 &flow_act
, NULL
, 0);
1185 if (IS_ERR(vport
->ingress
.allow_rule
)) {
1186 err
= PTR_ERR(vport
->ingress
.allow_rule
);
1188 "vport[%d] configure ingress allow rule, err(%d)\n",
1190 vport
->ingress
.allow_rule
= NULL
;
1194 memset(spec
, 0, sizeof(*spec
));
1195 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_DROP
;
1197 /* Attach drop flow counter */
1199 flow_act
.action
|= MLX5_FLOW_CONTEXT_ACTION_COUNT
;
1200 drop_ctr_dst
.type
= MLX5_FLOW_DESTINATION_TYPE_COUNTER
;
1201 drop_ctr_dst
.counter
= counter
;
1202 dst
= &drop_ctr_dst
;
1205 vport
->ingress
.drop_rule
=
1206 mlx5_add_flow_rules(vport
->ingress
.acl
, spec
,
1207 &flow_act
, dst
, dest_num
);
1208 if (IS_ERR(vport
->ingress
.drop_rule
)) {
1209 err
= PTR_ERR(vport
->ingress
.drop_rule
);
1211 "vport[%d] configure ingress drop rule, err(%d)\n",
1213 vport
->ingress
.drop_rule
= NULL
;
1219 esw_vport_cleanup_ingress_rules(esw
, vport
);
1224 static int esw_vport_egress_config(struct mlx5_eswitch
*esw
,
1225 struct mlx5_vport
*vport
)
1227 struct mlx5_fc
*counter
= vport
->egress
.drop_counter
;
1228 struct mlx5_flow_destination drop_ctr_dst
= {0};
1229 struct mlx5_flow_destination
*dst
= NULL
;
1230 struct mlx5_flow_act flow_act
= {0};
1231 struct mlx5_flow_spec
*spec
;
1235 esw_vport_cleanup_egress_rules(esw
, vport
);
1237 if (!vport
->info
.vlan
&& !vport
->info
.qos
) {
1238 esw_vport_disable_egress_acl(esw
, vport
);
1242 err
= esw_vport_enable_egress_acl(esw
, vport
);
1244 mlx5_core_warn(esw
->dev
,
1245 "failed to enable egress acl (%d) on vport[%d]\n",
1251 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
1252 vport
->vport
, vport
->info
.vlan
, vport
->info
.qos
);
1254 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
1260 /* Allowed vlan rule */
1261 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, outer_headers
.cvlan_tag
);
1262 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_value
, outer_headers
.cvlan_tag
);
1263 MLX5_SET_TO_ONES(fte_match_param
, spec
->match_criteria
, outer_headers
.first_vid
);
1264 MLX5_SET(fte_match_param
, spec
->match_value
, outer_headers
.first_vid
, vport
->info
.vlan
);
1266 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
1267 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_ALLOW
;
1268 vport
->egress
.allowed_vlan
=
1269 mlx5_add_flow_rules(vport
->egress
.acl
, spec
,
1270 &flow_act
, NULL
, 0);
1271 if (IS_ERR(vport
->egress
.allowed_vlan
)) {
1272 err
= PTR_ERR(vport
->egress
.allowed_vlan
);
1274 "vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
1276 vport
->egress
.allowed_vlan
= NULL
;
1280 /* Drop others rule (star rule) */
1281 memset(spec
, 0, sizeof(*spec
));
1282 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_DROP
;
1284 /* Attach egress drop flow counter */
1286 flow_act
.action
|= MLX5_FLOW_CONTEXT_ACTION_COUNT
;
1287 drop_ctr_dst
.type
= MLX5_FLOW_DESTINATION_TYPE_COUNTER
;
1288 drop_ctr_dst
.counter
= counter
;
1289 dst
= &drop_ctr_dst
;
1292 vport
->egress
.drop_rule
=
1293 mlx5_add_flow_rules(vport
->egress
.acl
, spec
,
1294 &flow_act
, dst
, dest_num
);
1295 if (IS_ERR(vport
->egress
.drop_rule
)) {
1296 err
= PTR_ERR(vport
->egress
.drop_rule
);
1298 "vport[%d] configure egress drop rule failed, err(%d)\n",
1300 vport
->egress
.drop_rule
= NULL
;
1307 /* Vport QoS management */
1308 static int esw_create_tsar(struct mlx5_eswitch
*esw
)
1310 u32 tsar_ctx
[MLX5_ST_SZ_DW(scheduling_context
)] = {0};
1311 struct mlx5_core_dev
*dev
= esw
->dev
;
1314 if (!MLX5_CAP_GEN(dev
, qos
) || !MLX5_CAP_QOS(dev
, esw_scheduling
))
1317 if (esw
->qos
.enabled
)
1320 err
= mlx5_create_scheduling_element_cmd(dev
,
1321 SCHEDULING_HIERARCHY_E_SWITCH
,
1323 &esw
->qos
.root_tsar_id
);
1325 esw_warn(esw
->dev
, "E-Switch create TSAR failed (%d)\n", err
);
1329 esw
->qos
.enabled
= true;
1333 static void esw_destroy_tsar(struct mlx5_eswitch
*esw
)
1337 if (!esw
->qos
.enabled
)
1340 err
= mlx5_destroy_scheduling_element_cmd(esw
->dev
,
1341 SCHEDULING_HIERARCHY_E_SWITCH
,
1342 esw
->qos
.root_tsar_id
);
1344 esw_warn(esw
->dev
, "E-Switch destroy TSAR failed (%d)\n", err
);
1346 esw
->qos
.enabled
= false;
1349 static int esw_vport_enable_qos(struct mlx5_eswitch
*esw
, int vport_num
,
1350 u32 initial_max_rate
, u32 initial_bw_share
)
1352 u32 sched_ctx
[MLX5_ST_SZ_DW(scheduling_context
)] = {0};
1353 struct mlx5_vport
*vport
= &esw
->vports
[vport_num
];
1354 struct mlx5_core_dev
*dev
= esw
->dev
;
1358 if (!esw
->qos
.enabled
|| !MLX5_CAP_GEN(dev
, qos
) ||
1359 !MLX5_CAP_QOS(dev
, esw_scheduling
))
1362 if (vport
->qos
.enabled
)
1365 MLX5_SET(scheduling_context
, sched_ctx
, element_type
,
1366 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT
);
1367 vport_elem
= MLX5_ADDR_OF(scheduling_context
, sched_ctx
,
1368 element_attributes
);
1369 MLX5_SET(vport_element
, vport_elem
, vport_number
, vport_num
);
1370 MLX5_SET(scheduling_context
, sched_ctx
, parent_element_id
,
1371 esw
->qos
.root_tsar_id
);
1372 MLX5_SET(scheduling_context
, sched_ctx
, max_average_bw
,
1374 MLX5_SET(scheduling_context
, sched_ctx
, bw_share
, initial_bw_share
);
1376 err
= mlx5_create_scheduling_element_cmd(dev
,
1377 SCHEDULING_HIERARCHY_E_SWITCH
,
1379 &vport
->qos
.esw_tsar_ix
);
1381 esw_warn(esw
->dev
, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
1386 vport
->qos
.enabled
= true;
1390 static void esw_vport_disable_qos(struct mlx5_eswitch
*esw
, int vport_num
)
1392 struct mlx5_vport
*vport
= &esw
->vports
[vport_num
];
1395 if (!vport
->qos
.enabled
)
1398 err
= mlx5_destroy_scheduling_element_cmd(esw
->dev
,
1399 SCHEDULING_HIERARCHY_E_SWITCH
,
1400 vport
->qos
.esw_tsar_ix
);
1402 esw_warn(esw
->dev
, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
1405 vport
->qos
.enabled
= false;
1408 static int esw_vport_qos_config(struct mlx5_eswitch
*esw
, int vport_num
,
1409 u32 max_rate
, u32 bw_share
)
1411 u32 sched_ctx
[MLX5_ST_SZ_DW(scheduling_context
)] = {0};
1412 struct mlx5_vport
*vport
= &esw
->vports
[vport_num
];
1413 struct mlx5_core_dev
*dev
= esw
->dev
;
1418 if (!MLX5_CAP_GEN(dev
, qos
) || !MLX5_CAP_QOS(dev
, esw_scheduling
))
1421 if (!vport
->qos
.enabled
)
1424 MLX5_SET(scheduling_context
, sched_ctx
, element_type
,
1425 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT
);
1426 vport_elem
= MLX5_ADDR_OF(scheduling_context
, sched_ctx
,
1427 element_attributes
);
1428 MLX5_SET(vport_element
, vport_elem
, vport_number
, vport_num
);
1429 MLX5_SET(scheduling_context
, sched_ctx
, parent_element_id
,
1430 esw
->qos
.root_tsar_id
);
1431 MLX5_SET(scheduling_context
, sched_ctx
, max_average_bw
,
1433 MLX5_SET(scheduling_context
, sched_ctx
, bw_share
, bw_share
);
1434 bitmask
|= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW
;
1435 bitmask
|= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE
;
1437 err
= mlx5_modify_scheduling_element_cmd(dev
,
1438 SCHEDULING_HIERARCHY_E_SWITCH
,
1440 vport
->qos
.esw_tsar_ix
,
1443 esw_warn(esw
->dev
, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
1451 static void node_guid_gen_from_mac(u64
*node_guid
, u8 mac
[ETH_ALEN
])
1453 ((u8
*)node_guid
)[7] = mac
[0];
1454 ((u8
*)node_guid
)[6] = mac
[1];
1455 ((u8
*)node_guid
)[5] = mac
[2];
1456 ((u8
*)node_guid
)[4] = 0xff;
1457 ((u8
*)node_guid
)[3] = 0xfe;
1458 ((u8
*)node_guid
)[2] = mac
[3];
1459 ((u8
*)node_guid
)[1] = mac
[4];
1460 ((u8
*)node_guid
)[0] = mac
[5];
1463 static void esw_apply_vport_conf(struct mlx5_eswitch
*esw
,
1464 struct mlx5_vport
*vport
)
1466 int vport_num
= vport
->vport
;
1471 mlx5_modify_vport_admin_state(esw
->dev
,
1472 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT
,
1474 vport
->info
.link_state
);
1475 mlx5_modify_nic_vport_mac_address(esw
->dev
, vport_num
, vport
->info
.mac
);
1476 mlx5_modify_nic_vport_node_guid(esw
->dev
, vport_num
, vport
->info
.node_guid
);
1477 modify_esw_vport_cvlan(esw
->dev
, vport_num
, vport
->info
.vlan
, vport
->info
.qos
,
1478 (vport
->info
.vlan
|| vport
->info
.qos
));
1480 /* Only legacy mode needs ACLs */
1481 if (esw
->mode
== SRIOV_LEGACY
) {
1482 esw_vport_ingress_config(esw
, vport
);
1483 esw_vport_egress_config(esw
, vport
);
1487 static void esw_vport_create_drop_counters(struct mlx5_vport
*vport
)
1489 struct mlx5_core_dev
*dev
= vport
->dev
;
1491 if (MLX5_CAP_ESW_INGRESS_ACL(dev
, flow_counter
)) {
1492 vport
->ingress
.drop_counter
= mlx5_fc_create(dev
, false);
1493 if (IS_ERR(vport
->ingress
.drop_counter
)) {
1495 "vport[%d] configure ingress drop rule counter failed\n",
1497 vport
->ingress
.drop_counter
= NULL
;
1501 if (MLX5_CAP_ESW_EGRESS_ACL(dev
, flow_counter
)) {
1502 vport
->egress
.drop_counter
= mlx5_fc_create(dev
, false);
1503 if (IS_ERR(vport
->egress
.drop_counter
)) {
1505 "vport[%d] configure egress drop rule counter failed\n",
1507 vport
->egress
.drop_counter
= NULL
;
1512 static void esw_vport_destroy_drop_counters(struct mlx5_vport
*vport
)
1514 struct mlx5_core_dev
*dev
= vport
->dev
;
1516 if (vport
->ingress
.drop_counter
)
1517 mlx5_fc_destroy(dev
, vport
->ingress
.drop_counter
);
1518 if (vport
->egress
.drop_counter
)
1519 mlx5_fc_destroy(dev
, vport
->egress
.drop_counter
);
1522 static void esw_enable_vport(struct mlx5_eswitch
*esw
, int vport_num
,
1525 struct mlx5_vport
*vport
= &esw
->vports
[vport_num
];
1527 mutex_lock(&esw
->state_lock
);
1528 WARN_ON(vport
->enabled
);
1530 esw_debug(esw
->dev
, "Enabling VPORT(%d)\n", vport_num
);
1532 /* Create steering drop counters for ingress and egress ACLs */
1533 if (vport_num
&& esw
->mode
== SRIOV_LEGACY
)
1534 esw_vport_create_drop_counters(vport
);
1536 /* Restore old vport configuration */
1537 esw_apply_vport_conf(esw
, vport
);
1539 /* Attach vport to the eswitch rate limiter */
1540 if (esw_vport_enable_qos(esw
, vport_num
, vport
->info
.max_rate
,
1541 vport
->qos
.bw_share
))
1542 esw_warn(esw
->dev
, "Failed to attach vport %d to eswitch rate limiter", vport_num
);
1544 /* Sync with current vport context */
1545 vport
->enabled_events
= enable_events
;
1546 vport
->enabled
= true;
1548 /* only PF is trusted by default */
1550 vport
->info
.trusted
= true;
1552 esw_vport_change_handle_locked(vport
);
1554 esw
->enabled_vports
++;
1555 esw_debug(esw
->dev
, "Enabled VPORT(%d)\n", vport_num
);
1556 mutex_unlock(&esw
->state_lock
);
1559 static void esw_disable_vport(struct mlx5_eswitch
*esw
, int vport_num
)
1561 struct mlx5_vport
*vport
= &esw
->vports
[vport_num
];
1563 if (!vport
->enabled
)
1566 esw_debug(esw
->dev
, "Disabling vport(%d)\n", vport_num
);
1567 /* Mark this vport as disabled to discard new events */
1568 vport
->enabled
= false;
1570 synchronize_irq(pci_irq_vector(esw
->dev
->pdev
, MLX5_EQ_VEC_ASYNC
));
1571 /* Wait for current already scheduled events to complete */
1572 flush_workqueue(esw
->work_queue
);
1573 /* Disable events from this vport */
1574 arm_vport_context_events_cmd(esw
->dev
, vport
->vport
, 0);
1575 mutex_lock(&esw
->state_lock
);
1576 /* We don't assume VFs will cleanup after themselves.
1577 * Calling vport change handler while vport is disabled will cleanup
1578 * the vport resources.
1580 esw_vport_change_handle_locked(vport
);
1581 vport
->enabled_events
= 0;
1582 esw_vport_disable_qos(esw
, vport_num
);
1583 if (vport_num
&& esw
->mode
== SRIOV_LEGACY
) {
1584 mlx5_modify_vport_admin_state(esw
->dev
,
1585 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT
,
1587 MLX5_ESW_VPORT_ADMIN_STATE_DOWN
);
1588 esw_vport_disable_egress_acl(esw
, vport
);
1589 esw_vport_disable_ingress_acl(esw
, vport
);
1590 esw_vport_destroy_drop_counters(vport
);
1592 esw
->enabled_vports
--;
1593 mutex_unlock(&esw
->state_lock
);
1596 /* Public E-Switch API */
1597 #define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev))
1599 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch
*esw
, int nvfs
, int mode
)
1602 int i
, enabled_events
;
1604 if (!ESW_ALLOWED(esw
))
1607 if (!MLX5_CAP_GEN(esw
->dev
, eswitch_flow_table
) ||
1608 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw
->dev
, ft_support
)) {
1609 esw_warn(esw
->dev
, "E-Switch FDB is not supported, aborting ...\n");
1613 if (!MLX5_CAP_ESW_INGRESS_ACL(esw
->dev
, ft_support
))
1614 esw_warn(esw
->dev
, "E-Switch ingress ACL is not supported by FW\n");
1616 if (!MLX5_CAP_ESW_EGRESS_ACL(esw
->dev
, ft_support
))
1617 esw_warn(esw
->dev
, "E-Switch engress ACL is not supported by FW\n");
1619 esw_info(esw
->dev
, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs
, mode
);
1622 if (mode
== SRIOV_LEGACY
)
1623 err
= esw_create_legacy_fdb_table(esw
, nvfs
+ 1);
1625 err
= esw_offloads_init(esw
, nvfs
+ 1);
1629 err
= esw_create_tsar(esw
);
1631 esw_warn(esw
->dev
, "Failed to create eswitch TSAR");
1633 /* Don't enable vport events when in SRIOV_OFFLOADS mode, since:
1634 * 1. L2 table (MPFS) is programmed by PF/VF representors netdevs set_rx_mode
1635 * 2. FDB/Eswitch is programmed by user space tools
1637 enabled_events
= (mode
== SRIOV_LEGACY
) ? SRIOV_VPORT_EVENTS
: 0;
1638 for (i
= 0; i
<= nvfs
; i
++)
1639 esw_enable_vport(esw
, i
, enabled_events
);
1641 esw_info(esw
->dev
, "SRIOV enabled: active vports(%d)\n",
1642 esw
->enabled_vports
);
1646 esw
->mode
= SRIOV_NONE
;
1650 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch
*esw
)
1652 struct esw_mc_addr
*mc_promisc
;
1656 if (!ESW_ALLOWED(esw
) || esw
->mode
== SRIOV_NONE
)
1659 esw_info(esw
->dev
, "disable SRIOV: active vports(%d) mode(%d)\n",
1660 esw
->enabled_vports
, esw
->mode
);
1662 mc_promisc
= &esw
->mc_promisc
;
1663 nvports
= esw
->enabled_vports
;
1665 for (i
= 0; i
< esw
->total_vports
; i
++)
1666 esw_disable_vport(esw
, i
);
1668 if (mc_promisc
&& mc_promisc
->uplink_rule
)
1669 mlx5_del_flow_rules(mc_promisc
->uplink_rule
);
1671 esw_destroy_tsar(esw
);
1673 if (esw
->mode
== SRIOV_LEGACY
)
1674 esw_destroy_legacy_fdb_table(esw
);
1675 else if (esw
->mode
== SRIOV_OFFLOADS
)
1676 esw_offloads_cleanup(esw
, nvports
);
1678 esw
->mode
= SRIOV_NONE
;
1681 int mlx5_eswitch_init(struct mlx5_core_dev
*dev
)
1683 int total_vports
= MLX5_TOTAL_VPORTS(dev
);
1684 struct mlx5_eswitch
*esw
;
1688 if (!MLX5_VPORT_MANAGER(dev
))
1692 "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
1694 MLX5_MAX_UC_PER_VPORT(dev
),
1695 MLX5_MAX_MC_PER_VPORT(dev
));
1697 esw
= kzalloc(sizeof(*esw
), GFP_KERNEL
);
1703 esw
->work_queue
= create_singlethread_workqueue("mlx5_esw_wq");
1704 if (!esw
->work_queue
) {
1709 esw
->vports
= kcalloc(total_vports
, sizeof(struct mlx5_vport
),
1716 err
= esw_offloads_init_reps(esw
);
1720 hash_init(esw
->offloads
.encap_tbl
);
1721 hash_init(esw
->offloads
.mod_hdr_tbl
);
1722 mutex_init(&esw
->state_lock
);
1724 for (vport_num
= 0; vport_num
< total_vports
; vport_num
++) {
1725 struct mlx5_vport
*vport
= &esw
->vports
[vport_num
];
1727 vport
->vport
= vport_num
;
1728 vport
->info
.link_state
= MLX5_ESW_VPORT_ADMIN_STATE_AUTO
;
1730 INIT_WORK(&vport
->vport_change_handler
,
1731 esw_vport_change_handler
);
1734 esw
->total_vports
= total_vports
;
1735 esw
->enabled_vports
= 0;
1736 esw
->mode
= SRIOV_NONE
;
1737 esw
->offloads
.inline_mode
= MLX5_INLINE_MODE_NONE
;
1738 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, encap
) &&
1739 MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, decap
))
1740 esw
->offloads
.encap
= DEVLINK_ESWITCH_ENCAP_MODE_BASIC
;
1742 esw
->offloads
.encap
= DEVLINK_ESWITCH_ENCAP_MODE_NONE
;
1744 dev
->priv
.eswitch
= esw
;
1747 if (esw
->work_queue
)
1748 destroy_workqueue(esw
->work_queue
);
1749 esw_offloads_cleanup_reps(esw
);
1755 void mlx5_eswitch_cleanup(struct mlx5_eswitch
*esw
)
1757 if (!esw
|| !MLX5_VPORT_MANAGER(esw
->dev
))
1760 esw_info(esw
->dev
, "cleanup\n");
1762 esw
->dev
->priv
.eswitch
= NULL
;
1763 destroy_workqueue(esw
->work_queue
);
1764 esw_offloads_cleanup_reps(esw
);
1769 void mlx5_eswitch_vport_event(struct mlx5_eswitch
*esw
, struct mlx5_eqe
*eqe
)
1771 struct mlx5_eqe_vport_change
*vc_eqe
= &eqe
->data
.vport_change
;
1772 u16 vport_num
= be16_to_cpu(vc_eqe
->vport_num
);
1773 struct mlx5_vport
*vport
;
1776 pr_warn("MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n",
1781 vport
= &esw
->vports
[vport_num
];
1783 queue_work(esw
->work_queue
, &vport
->vport_change_handler
);
1786 /* Vport Administration */
1787 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
1789 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch
*esw
,
1790 int vport
, u8 mac
[ETH_ALEN
])
1792 struct mlx5_vport
*evport
;
1796 if (!ESW_ALLOWED(esw
))
1798 if (!LEGAL_VPORT(esw
, vport
) || is_multicast_ether_addr(mac
))
1801 mutex_lock(&esw
->state_lock
);
1802 evport
= &esw
->vports
[vport
];
1804 if (evport
->info
.spoofchk
&& !is_valid_ether_addr(mac
)) {
1805 mlx5_core_warn(esw
->dev
,
1806 "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
1812 err
= mlx5_modify_nic_vport_mac_address(esw
->dev
, vport
, mac
);
1814 mlx5_core_warn(esw
->dev
,
1815 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1820 node_guid_gen_from_mac(&node_guid
, mac
);
1821 err
= mlx5_modify_nic_vport_node_guid(esw
->dev
, vport
, node_guid
);
1823 mlx5_core_warn(esw
->dev
,
1824 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
1827 ether_addr_copy(evport
->info
.mac
, mac
);
1828 evport
->info
.node_guid
= node_guid
;
1829 if (evport
->enabled
&& esw
->mode
== SRIOV_LEGACY
)
1830 err
= esw_vport_ingress_config(esw
, evport
);
1833 mutex_unlock(&esw
->state_lock
);
1837 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch
*esw
,
1838 int vport
, int link_state
)
1840 struct mlx5_vport
*evport
;
1843 if (!ESW_ALLOWED(esw
))
1845 if (!LEGAL_VPORT(esw
, vport
))
1848 mutex_lock(&esw
->state_lock
);
1849 evport
= &esw
->vports
[vport
];
1851 err
= mlx5_modify_vport_admin_state(esw
->dev
,
1852 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT
,
1855 mlx5_core_warn(esw
->dev
,
1856 "Failed to set vport %d link state, err = %d",
1861 evport
->info
.link_state
= link_state
;
1864 mutex_unlock(&esw
->state_lock
);
1868 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch
*esw
,
1869 int vport
, struct ifla_vf_info
*ivi
)
1871 struct mlx5_vport
*evport
;
1873 if (!ESW_ALLOWED(esw
))
1875 if (!LEGAL_VPORT(esw
, vport
))
1878 evport
= &esw
->vports
[vport
];
1880 memset(ivi
, 0, sizeof(*ivi
));
1881 ivi
->vf
= vport
- 1;
1883 mutex_lock(&esw
->state_lock
);
1884 ether_addr_copy(ivi
->mac
, evport
->info
.mac
);
1885 ivi
->linkstate
= evport
->info
.link_state
;
1886 ivi
->vlan
= evport
->info
.vlan
;
1887 ivi
->qos
= evport
->info
.qos
;
1888 ivi
->spoofchk
= evport
->info
.spoofchk
;
1889 ivi
->trusted
= evport
->info
.trusted
;
1890 ivi
->min_tx_rate
= evport
->info
.min_rate
;
1891 ivi
->max_tx_rate
= evport
->info
.max_rate
;
1892 mutex_unlock(&esw
->state_lock
);
1897 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch
*esw
,
1898 int vport
, u16 vlan
, u8 qos
, u8 set_flags
)
1900 struct mlx5_vport
*evport
;
1903 if (!ESW_ALLOWED(esw
))
1905 if (!LEGAL_VPORT(esw
, vport
) || (vlan
> 4095) || (qos
> 7))
1908 mutex_lock(&esw
->state_lock
);
1909 evport
= &esw
->vports
[vport
];
1911 err
= modify_esw_vport_cvlan(esw
->dev
, vport
, vlan
, qos
, set_flags
);
1915 evport
->info
.vlan
= vlan
;
1916 evport
->info
.qos
= qos
;
1917 if (evport
->enabled
&& esw
->mode
== SRIOV_LEGACY
) {
1918 err
= esw_vport_ingress_config(esw
, evport
);
1921 err
= esw_vport_egress_config(esw
, evport
);
1925 mutex_unlock(&esw
->state_lock
);
1929 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch
*esw
,
1930 int vport
, u16 vlan
, u8 qos
)
1935 set_flags
= SET_VLAN_STRIP
| SET_VLAN_INSERT
;
1937 return __mlx5_eswitch_set_vport_vlan(esw
, vport
, vlan
, qos
, set_flags
);
1940 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch
*esw
,
1941 int vport
, bool spoofchk
)
1943 struct mlx5_vport
*evport
;
1947 if (!ESW_ALLOWED(esw
))
1949 if (!LEGAL_VPORT(esw
, vport
))
1952 mutex_lock(&esw
->state_lock
);
1953 evport
= &esw
->vports
[vport
];
1954 pschk
= evport
->info
.spoofchk
;
1955 evport
->info
.spoofchk
= spoofchk
;
1956 if (evport
->enabled
&& esw
->mode
== SRIOV_LEGACY
)
1957 err
= esw_vport_ingress_config(esw
, evport
);
1959 evport
->info
.spoofchk
= pschk
;
1960 mutex_unlock(&esw
->state_lock
);
1965 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch
*esw
,
1966 int vport
, bool setting
)
1968 struct mlx5_vport
*evport
;
1970 if (!ESW_ALLOWED(esw
))
1972 if (!LEGAL_VPORT(esw
, vport
))
1975 mutex_lock(&esw
->state_lock
);
1976 evport
= &esw
->vports
[vport
];
1977 evport
->info
.trusted
= setting
;
1978 if (evport
->enabled
)
1979 esw_vport_change_handle_locked(evport
);
1980 mutex_unlock(&esw
->state_lock
);
1985 static u32
calculate_vports_min_rate_divider(struct mlx5_eswitch
*esw
)
1987 u32 fw_max_bw_share
= MLX5_CAP_QOS(esw
->dev
, max_tsar_bw_share
);
1988 struct mlx5_vport
*evport
;
1989 u32 max_guarantee
= 0;
1992 for (i
= 0; i
<= esw
->total_vports
; i
++) {
1993 evport
= &esw
->vports
[i
];
1994 if (!evport
->enabled
|| evport
->info
.min_rate
< max_guarantee
)
1996 max_guarantee
= evport
->info
.min_rate
;
1999 return max_t(u32
, max_guarantee
/ fw_max_bw_share
, 1);
2002 static int normalize_vports_min_rate(struct mlx5_eswitch
*esw
, u32 divider
)
2004 u32 fw_max_bw_share
= MLX5_CAP_QOS(esw
->dev
, max_tsar_bw_share
);
2005 struct mlx5_vport
*evport
;
2012 for (i
= 0; i
<= esw
->total_vports
; i
++) {
2013 evport
= &esw
->vports
[i
];
2014 if (!evport
->enabled
)
2016 vport_min_rate
= evport
->info
.min_rate
;
2017 vport_max_rate
= evport
->info
.max_rate
;
2018 bw_share
= MLX5_MIN_BW_SHARE
;
2021 bw_share
= MLX5_RATE_TO_BW_SHARE(vport_min_rate
,
2025 if (bw_share
== evport
->qos
.bw_share
)
2028 err
= esw_vport_qos_config(esw
, i
, vport_max_rate
,
2031 evport
->qos
.bw_share
= bw_share
;
2039 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch
*esw
, int vport
,
2040 u32 max_rate
, u32 min_rate
)
2042 u32 fw_max_bw_share
= MLX5_CAP_QOS(esw
->dev
, max_tsar_bw_share
);
2043 bool min_rate_supported
= MLX5_CAP_QOS(esw
->dev
, esw_bw_share
) &&
2044 fw_max_bw_share
>= MLX5_MIN_BW_SHARE
;
2045 bool max_rate_supported
= MLX5_CAP_QOS(esw
->dev
, esw_rate_limit
);
2046 struct mlx5_vport
*evport
;
2047 u32 previous_min_rate
;
2051 if (!ESW_ALLOWED(esw
))
2053 if (!LEGAL_VPORT(esw
, vport
))
2055 if ((min_rate
&& !min_rate_supported
) || (max_rate
&& !max_rate_supported
))
2058 mutex_lock(&esw
->state_lock
);
2059 evport
= &esw
->vports
[vport
];
2061 if (min_rate
== evport
->info
.min_rate
)
2064 previous_min_rate
= evport
->info
.min_rate
;
2065 evport
->info
.min_rate
= min_rate
;
2066 divider
= calculate_vports_min_rate_divider(esw
);
2067 err
= normalize_vports_min_rate(esw
, divider
);
2069 evport
->info
.min_rate
= previous_min_rate
;
2074 if (max_rate
== evport
->info
.max_rate
)
2077 err
= esw_vport_qos_config(esw
, vport
, max_rate
, evport
->qos
.bw_share
);
2079 evport
->info
.max_rate
= max_rate
;
2082 mutex_unlock(&esw
->state_lock
);
2086 static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev
*dev
,
2088 struct mlx5_vport_drop_stats
*stats
)
2090 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
2091 struct mlx5_vport
*vport
= &esw
->vports
[vport_idx
];
2095 if (!vport
->enabled
|| esw
->mode
!= SRIOV_LEGACY
)
2098 if (vport
->egress
.drop_counter
) {
2099 idx
= vport
->egress
.drop_counter
->id
;
2100 mlx5_fc_query(dev
, idx
, &stats
->rx_dropped
, &bytes
);
2103 if (vport
->ingress
.drop_counter
) {
2104 idx
= vport
->ingress
.drop_counter
->id
;
2105 mlx5_fc_query(dev
, idx
, &stats
->tx_dropped
, &bytes
);
2109 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch
*esw
,
2111 struct ifla_vf_stats
*vf_stats
)
2113 int outlen
= MLX5_ST_SZ_BYTES(query_vport_counter_out
);
2114 u32 in
[MLX5_ST_SZ_DW(query_vport_counter_in
)] = {0};
2115 struct mlx5_vport_drop_stats stats
= {0};
2119 if (!ESW_ALLOWED(esw
))
2121 if (!LEGAL_VPORT(esw
, vport
))
2124 out
= kvzalloc(outlen
, GFP_KERNEL
);
2128 MLX5_SET(query_vport_counter_in
, in
, opcode
,
2129 MLX5_CMD_OP_QUERY_VPORT_COUNTER
);
2130 MLX5_SET(query_vport_counter_in
, in
, op_mod
, 0);
2131 MLX5_SET(query_vport_counter_in
, in
, vport_number
, vport
);
2133 MLX5_SET(query_vport_counter_in
, in
, other_vport
, 1);
2135 memset(out
, 0, outlen
);
2136 err
= mlx5_cmd_exec(esw
->dev
, in
, sizeof(in
), out
, outlen
);
2140 #define MLX5_GET_CTR(p, x) \
2141 MLX5_GET64(query_vport_counter_out, p, x)
2143 memset(vf_stats
, 0, sizeof(*vf_stats
));
2144 vf_stats
->rx_packets
=
2145 MLX5_GET_CTR(out
, received_eth_unicast
.packets
) +
2146 MLX5_GET_CTR(out
, received_eth_multicast
.packets
) +
2147 MLX5_GET_CTR(out
, received_eth_broadcast
.packets
);
2149 vf_stats
->rx_bytes
=
2150 MLX5_GET_CTR(out
, received_eth_unicast
.octets
) +
2151 MLX5_GET_CTR(out
, received_eth_multicast
.octets
) +
2152 MLX5_GET_CTR(out
, received_eth_broadcast
.octets
);
2154 vf_stats
->tx_packets
=
2155 MLX5_GET_CTR(out
, transmitted_eth_unicast
.packets
) +
2156 MLX5_GET_CTR(out
, transmitted_eth_multicast
.packets
) +
2157 MLX5_GET_CTR(out
, transmitted_eth_broadcast
.packets
);
2159 vf_stats
->tx_bytes
=
2160 MLX5_GET_CTR(out
, transmitted_eth_unicast
.octets
) +
2161 MLX5_GET_CTR(out
, transmitted_eth_multicast
.octets
) +
2162 MLX5_GET_CTR(out
, transmitted_eth_broadcast
.octets
);
2164 vf_stats
->multicast
=
2165 MLX5_GET_CTR(out
, received_eth_multicast
.packets
);
2167 vf_stats
->broadcast
=
2168 MLX5_GET_CTR(out
, received_eth_broadcast
.packets
);
2170 mlx5_eswitch_query_vport_drop_stats(esw
->dev
, vport
, &stats
);
2171 vf_stats
->rx_dropped
= stats
.rx_dropped
;
2172 vf_stats
->tx_dropped
= stats
.tx_dropped
;