2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/ethtool.h>
44 #include <linux/slab.h>
45 #include <linux/device.h>
46 #include <linux/skbuff.h>
47 #include <linux/if_vlan.h>
48 #include <linux/if_bridge.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/bitops.h>
52 #include <linux/list.h>
53 #include <linux/notifier.h>
54 #include <linux/dcbnl.h>
55 #include <linux/inetdevice.h>
56 #include <linux/netlink.h>
57 #include <net/switchdev.h>
58 #include <net/pkt_cls.h>
59 #include <net/tc_act/tc_mirred.h>
60 #include <net/netevent.h>
61 #include <net/tc_act/tc_sample.h>
62 #include <net/addrconf.h>
71 #include "spectrum_cnt.h"
72 #include "spectrum_dpipe.h"
73 #include "spectrum_acl_flex_actions.h"
74 #include "../mlxfw/mlxfw.h"
76 #define MLXSW_FWREV_MAJOR 13
77 #define MLXSW_FWREV_MINOR 1530
78 #define MLXSW_FWREV_SUBMINOR 152
79 #define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
81 #define MLXSW_SP_FW_FILENAME \
82 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
83 "." __stringify(MLXSW_FWREV_MINOR) \
84 "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
86 static const char mlxsw_sp_driver_name
[] = "mlxsw_spectrum";
87 static const char mlxsw_sp_driver_version
[] = "1.0";
93 MLXSW_ITEM32(tx
, hdr
, version
, 0x00, 28, 4);
96 * Packet control type.
97 * 0 - Ethernet control (e.g. EMADs, LACP)
100 MLXSW_ITEM32(tx
, hdr
, ctl
, 0x00, 26, 2);
103 * Packet protocol type. Must be set to 1 (Ethernet).
105 MLXSW_ITEM32(tx
, hdr
, proto
, 0x00, 21, 3);
107 /* tx_hdr_rx_is_router
108 * Packet is sent from the router. Valid for data packets only.
110 MLXSW_ITEM32(tx
, hdr
, rx_is_router
, 0x00, 19, 1);
113 * Indicates if the 'fid' field is valid and should be used for
114 * forwarding lookup. Valid for data packets only.
116 MLXSW_ITEM32(tx
, hdr
, fid_valid
, 0x00, 16, 1);
119 * Switch partition ID. Must be set to 0.
121 MLXSW_ITEM32(tx
, hdr
, swid
, 0x00, 12, 3);
123 /* tx_hdr_control_tclass
124 * Indicates if the packet should use the control TClass and not one
125 * of the data TClasses.
127 MLXSW_ITEM32(tx
, hdr
, control_tclass
, 0x00, 6, 1);
130 * Egress TClass to be used on the egress device on the egress port.
132 MLXSW_ITEM32(tx
, hdr
, etclass
, 0x00, 0, 4);
135 * Destination local port for unicast packets.
136 * Destination multicast ID for multicast packets.
138 * Control packets are directed to a specific egress port, while data
139 * packets are transmitted through the CPU port (0) into the switch partition,
140 * where forwarding rules are applied.
142 MLXSW_ITEM32(tx
, hdr
, port_mid
, 0x04, 16, 16);
145 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
146 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
147 * Valid for data packets only.
149 MLXSW_ITEM32(tx
, hdr
, fid
, 0x08, 0, 16);
153 * 6 - Control packets
155 MLXSW_ITEM32(tx
, hdr
, type
, 0x0C, 0, 4);
157 struct mlxsw_sp_mlxfw_dev
{
158 struct mlxfw_dev mlxfw_dev
;
159 struct mlxsw_sp
*mlxsw_sp
;
162 static int mlxsw_sp_component_query(struct mlxfw_dev
*mlxfw_dev
,
163 u16 component_index
, u32
*p_max_size
,
164 u8
*p_align_bits
, u16
*p_max_write_size
)
166 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
167 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
168 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
169 char mcqi_pl
[MLXSW_REG_MCQI_LEN
];
172 mlxsw_reg_mcqi_pack(mcqi_pl
, component_index
);
173 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(mcqi
), mcqi_pl
);
176 mlxsw_reg_mcqi_unpack(mcqi_pl
, p_max_size
, p_align_bits
,
179 *p_align_bits
= max_t(u8
, *p_align_bits
, 2);
180 *p_max_write_size
= min_t(u16
, *p_max_write_size
,
181 MLXSW_REG_MCDA_MAX_DATA_LEN
);
185 static int mlxsw_sp_fsm_lock(struct mlxfw_dev
*mlxfw_dev
, u32
*fwhandle
)
187 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
188 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
189 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
190 char mcc_pl
[MLXSW_REG_MCC_LEN
];
194 mlxsw_reg_mcc_pack(mcc_pl
, 0, 0, 0, 0);
195 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
199 mlxsw_reg_mcc_unpack(mcc_pl
, fwhandle
, NULL
, &control_state
);
200 if (control_state
!= MLXFW_FSM_STATE_IDLE
)
203 mlxsw_reg_mcc_pack(mcc_pl
,
204 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE
,
206 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
209 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev
*mlxfw_dev
,
210 u32 fwhandle
, u16 component_index
,
213 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
214 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
215 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
216 char mcc_pl
[MLXSW_REG_MCC_LEN
];
218 mlxsw_reg_mcc_pack(mcc_pl
, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT
,
219 component_index
, fwhandle
, component_size
);
220 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
223 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev
*mlxfw_dev
,
224 u32 fwhandle
, u8
*data
, u16 size
,
227 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
228 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
229 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
230 char mcda_pl
[MLXSW_REG_MCDA_LEN
];
232 mlxsw_reg_mcda_pack(mcda_pl
, fwhandle
, offset
, size
, data
);
233 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcda
), mcda_pl
);
236 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev
*mlxfw_dev
,
237 u32 fwhandle
, u16 component_index
)
239 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
240 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
241 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
242 char mcc_pl
[MLXSW_REG_MCC_LEN
];
244 mlxsw_reg_mcc_pack(mcc_pl
, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT
,
245 component_index
, fwhandle
, 0);
246 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
249 static int mlxsw_sp_fsm_activate(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
)
251 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
252 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
253 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
254 char mcc_pl
[MLXSW_REG_MCC_LEN
];
256 mlxsw_reg_mcc_pack(mcc_pl
, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE
, 0,
258 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
261 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
,
262 enum mlxfw_fsm_state
*fsm_state
,
263 enum mlxfw_fsm_state_err
*fsm_state_err
)
265 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
266 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
267 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
268 char mcc_pl
[MLXSW_REG_MCC_LEN
];
273 mlxsw_reg_mcc_pack(mcc_pl
, 0, 0, fwhandle
, 0);
274 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
278 mlxsw_reg_mcc_unpack(mcc_pl
, NULL
, &error_code
, &control_state
);
279 *fsm_state
= control_state
;
280 *fsm_state_err
= min_t(enum mlxfw_fsm_state_err
, error_code
,
281 MLXFW_FSM_STATE_ERR_MAX
);
285 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
)
287 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
288 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
289 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
290 char mcc_pl
[MLXSW_REG_MCC_LEN
];
292 mlxsw_reg_mcc_pack(mcc_pl
, MLXSW_REG_MCC_INSTRUCTION_CANCEL
, 0,
294 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
297 static void mlxsw_sp_fsm_release(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
)
299 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
300 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
301 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
302 char mcc_pl
[MLXSW_REG_MCC_LEN
];
304 mlxsw_reg_mcc_pack(mcc_pl
,
305 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE
, 0,
307 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
310 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops
= {
311 .component_query
= mlxsw_sp_component_query
,
312 .fsm_lock
= mlxsw_sp_fsm_lock
,
313 .fsm_component_update
= mlxsw_sp_fsm_component_update
,
314 .fsm_block_download
= mlxsw_sp_fsm_block_download
,
315 .fsm_component_verify
= mlxsw_sp_fsm_component_verify
,
316 .fsm_activate
= mlxsw_sp_fsm_activate
,
317 .fsm_query_state
= mlxsw_sp_fsm_query_state
,
318 .fsm_cancel
= mlxsw_sp_fsm_cancel
,
319 .fsm_release
= mlxsw_sp_fsm_release
322 static int mlxsw_sp_firmware_flash(struct mlxsw_sp
*mlxsw_sp
,
323 const struct firmware
*firmware
)
325 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev
= {
327 .ops
= &mlxsw_sp_mlxfw_dev_ops
,
328 .psid
= mlxsw_sp
->bus_info
->psid
,
329 .psid_size
= strlen(mlxsw_sp
->bus_info
->psid
),
334 return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev
.mlxfw_dev
, firmware
);
337 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp
*mlxsw_sp
)
339 const struct mlxsw_fw_rev
*rev
= &mlxsw_sp
->bus_info
->fw_rev
;
340 const struct firmware
*firmware
;
343 /* Validate driver & FW are compatible */
344 if (rev
->major
!= MLXSW_FWREV_MAJOR
) {
345 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
346 rev
->major
, MLXSW_FWREV_MAJOR
);
349 if (MLXSW_FWREV_MINOR_TO_BRANCH(rev
->minor
) ==
350 MLXSW_FWREV_MINOR_TO_BRANCH(MLXSW_FWREV_MINOR
))
353 dev_info(mlxsw_sp
->bus_info
->dev
, "The firmware version %d.%d.%d is incompatible with the driver\n",
354 rev
->major
, rev
->minor
, rev
->subminor
);
355 dev_info(mlxsw_sp
->bus_info
->dev
, "Flashing firmware using file %s\n",
356 MLXSW_SP_FW_FILENAME
);
358 err
= request_firmware_direct(&firmware
, MLXSW_SP_FW_FILENAME
,
359 mlxsw_sp
->bus_info
->dev
);
361 dev_err(mlxsw_sp
->bus_info
->dev
, "Could not request firmware file %s\n",
362 MLXSW_SP_FW_FILENAME
);
366 err
= mlxsw_sp_firmware_flash(mlxsw_sp
, firmware
);
367 release_firmware(firmware
);
371 int mlxsw_sp_flow_counter_get(struct mlxsw_sp
*mlxsw_sp
,
372 unsigned int counter_index
, u64
*packets
,
375 char mgpc_pl
[MLXSW_REG_MGPC_LEN
];
378 mlxsw_reg_mgpc_pack(mgpc_pl
, counter_index
, MLXSW_REG_MGPC_OPCODE_NOP
,
379 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES
);
380 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(mgpc
), mgpc_pl
);
384 *packets
= mlxsw_reg_mgpc_packet_counter_get(mgpc_pl
);
386 *bytes
= mlxsw_reg_mgpc_byte_counter_get(mgpc_pl
);
390 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp
*mlxsw_sp
,
391 unsigned int counter_index
)
393 char mgpc_pl
[MLXSW_REG_MGPC_LEN
];
395 mlxsw_reg_mgpc_pack(mgpc_pl
, counter_index
, MLXSW_REG_MGPC_OPCODE_CLEAR
,
396 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES
);
397 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mgpc
), mgpc_pl
);
400 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp
*mlxsw_sp
,
401 unsigned int *p_counter_index
)
405 err
= mlxsw_sp_counter_alloc(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
409 err
= mlxsw_sp_flow_counter_clear(mlxsw_sp
, *p_counter_index
);
411 goto err_counter_clear
;
415 mlxsw_sp_counter_free(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
420 void mlxsw_sp_flow_counter_free(struct mlxsw_sp
*mlxsw_sp
,
421 unsigned int counter_index
)
423 mlxsw_sp_counter_free(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
427 static void mlxsw_sp_txhdr_construct(struct sk_buff
*skb
,
428 const struct mlxsw_tx_info
*tx_info
)
430 char *txhdr
= skb_push(skb
, MLXSW_TXHDR_LEN
);
432 memset(txhdr
, 0, MLXSW_TXHDR_LEN
);
434 mlxsw_tx_hdr_version_set(txhdr
, MLXSW_TXHDR_VERSION_1
);
435 mlxsw_tx_hdr_ctl_set(txhdr
, MLXSW_TXHDR_ETH_CTL
);
436 mlxsw_tx_hdr_proto_set(txhdr
, MLXSW_TXHDR_PROTO_ETH
);
437 mlxsw_tx_hdr_swid_set(txhdr
, 0);
438 mlxsw_tx_hdr_control_tclass_set(txhdr
, 1);
439 mlxsw_tx_hdr_port_mid_set(txhdr
, tx_info
->local_port
);
440 mlxsw_tx_hdr_type_set(txhdr
, MLXSW_TXHDR_TYPE_CONTROL
);
443 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
,
446 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
447 enum mlxsw_reg_spms_state spms_state
;
452 case BR_STATE_FORWARDING
:
453 spms_state
= MLXSW_REG_SPMS_STATE_FORWARDING
;
455 case BR_STATE_LEARNING
:
456 spms_state
= MLXSW_REG_SPMS_STATE_LEARNING
;
458 case BR_STATE_LISTENING
: /* fall-through */
459 case BR_STATE_DISABLED
: /* fall-through */
460 case BR_STATE_BLOCKING
:
461 spms_state
= MLXSW_REG_SPMS_STATE_DISCARDING
;
467 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
470 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
471 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
473 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
478 static int mlxsw_sp_base_mac_get(struct mlxsw_sp
*mlxsw_sp
)
480 char spad_pl
[MLXSW_REG_SPAD_LEN
] = {0};
483 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(spad
), spad_pl
);
486 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl
, mlxsw_sp
->base_mac
);
490 static int mlxsw_sp_span_init(struct mlxsw_sp
*mlxsw_sp
)
494 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_SPAN
))
497 mlxsw_sp
->span
.entries_count
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
499 mlxsw_sp
->span
.entries
= kcalloc(mlxsw_sp
->span
.entries_count
,
500 sizeof(struct mlxsw_sp_span_entry
),
502 if (!mlxsw_sp
->span
.entries
)
505 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++)
506 INIT_LIST_HEAD(&mlxsw_sp
->span
.entries
[i
].bound_ports_list
);
511 static void mlxsw_sp_span_fini(struct mlxsw_sp
*mlxsw_sp
)
515 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
516 struct mlxsw_sp_span_entry
*curr
= &mlxsw_sp
->span
.entries
[i
];
518 WARN_ON_ONCE(!list_empty(&curr
->bound_ports_list
));
520 kfree(mlxsw_sp
->span
.entries
);
523 static struct mlxsw_sp_span_entry
*
524 mlxsw_sp_span_entry_create(struct mlxsw_sp_port
*port
)
526 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
527 struct mlxsw_sp_span_entry
*span_entry
;
528 char mpat_pl
[MLXSW_REG_MPAT_LEN
];
529 u8 local_port
= port
->local_port
;
534 /* find a free entry to use */
536 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
537 if (!mlxsw_sp
->span
.entries
[i
].used
) {
539 span_entry
= &mlxsw_sp
->span
.entries
[i
];
546 /* create a new port analayzer entry for local_port */
547 mlxsw_reg_mpat_pack(mpat_pl
, index
, local_port
, true);
548 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpat
), mpat_pl
);
552 span_entry
->used
= true;
553 span_entry
->id
= index
;
554 span_entry
->ref_count
= 1;
555 span_entry
->local_port
= local_port
;
559 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp
*mlxsw_sp
,
560 struct mlxsw_sp_span_entry
*span_entry
)
562 u8 local_port
= span_entry
->local_port
;
563 char mpat_pl
[MLXSW_REG_MPAT_LEN
];
564 int pa_id
= span_entry
->id
;
566 mlxsw_reg_mpat_pack(mpat_pl
, pa_id
, local_port
, false);
567 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpat
), mpat_pl
);
568 span_entry
->used
= false;
571 struct mlxsw_sp_span_entry
*
572 mlxsw_sp_span_entry_find(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
576 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
577 struct mlxsw_sp_span_entry
*curr
= &mlxsw_sp
->span
.entries
[i
];
579 if (curr
->used
&& curr
->local_port
== local_port
)
585 static struct mlxsw_sp_span_entry
586 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port
*port
)
588 struct mlxsw_sp_span_entry
*span_entry
;
590 span_entry
= mlxsw_sp_span_entry_find(port
->mlxsw_sp
,
593 /* Already exists, just take a reference */
594 span_entry
->ref_count
++;
598 return mlxsw_sp_span_entry_create(port
);
601 static int mlxsw_sp_span_entry_put(struct mlxsw_sp
*mlxsw_sp
,
602 struct mlxsw_sp_span_entry
*span_entry
)
604 WARN_ON(!span_entry
->ref_count
);
605 if (--span_entry
->ref_count
== 0)
606 mlxsw_sp_span_entry_destroy(mlxsw_sp
, span_entry
);
610 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port
*port
)
612 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
613 struct mlxsw_sp_span_inspected_port
*p
;
616 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
617 struct mlxsw_sp_span_entry
*curr
= &mlxsw_sp
->span
.entries
[i
];
619 list_for_each_entry(p
, &curr
->bound_ports_list
, list
)
620 if (p
->local_port
== port
->local_port
&&
621 p
->type
== MLXSW_SP_SPAN_EGRESS
)
628 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp
*mlxsw_sp
,
631 return mlxsw_sp_bytes_cells(mlxsw_sp
, mtu
* 5 / 2) + 1;
634 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port
*port
, u16 mtu
)
636 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
637 char sbib_pl
[MLXSW_REG_SBIB_LEN
];
640 /* If port is egress mirrored, the shared buffer size should be
641 * updated according to the mtu value
643 if (mlxsw_sp_span_is_egress_mirror(port
)) {
644 u32 buffsize
= mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp
, mtu
);
646 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, buffsize
);
647 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
649 netdev_err(port
->dev
, "Could not update shared buffer for mirroring\n");
657 static struct mlxsw_sp_span_inspected_port
*
658 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port
*port
,
659 struct mlxsw_sp_span_entry
*span_entry
)
661 struct mlxsw_sp_span_inspected_port
*p
;
663 list_for_each_entry(p
, &span_entry
->bound_ports_list
, list
)
664 if (port
->local_port
== p
->local_port
)
670 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port
*port
,
671 struct mlxsw_sp_span_entry
*span_entry
,
672 enum mlxsw_sp_span_type type
,
675 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
676 char mpar_pl
[MLXSW_REG_MPAR_LEN
];
677 int pa_id
= span_entry
->id
;
679 /* bind the port to the SPAN entry */
680 mlxsw_reg_mpar_pack(mpar_pl
, port
->local_port
,
681 (enum mlxsw_reg_mpar_i_e
) type
, bind
, pa_id
);
682 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpar
), mpar_pl
);
686 mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port
*port
,
687 struct mlxsw_sp_span_entry
*span_entry
,
688 enum mlxsw_sp_span_type type
,
691 struct mlxsw_sp_span_inspected_port
*inspected_port
;
692 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
693 char sbib_pl
[MLXSW_REG_SBIB_LEN
];
696 /* if it is an egress SPAN, bind a shared buffer to it */
697 if (type
== MLXSW_SP_SPAN_EGRESS
) {
698 u32 buffsize
= mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp
,
701 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, buffsize
);
702 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
704 netdev_err(port
->dev
, "Could not create shared buffer for mirroring\n");
710 err
= mlxsw_sp_span_inspected_port_bind(port
, span_entry
, type
,
716 inspected_port
= kzalloc(sizeof(*inspected_port
), GFP_KERNEL
);
717 if (!inspected_port
) {
719 goto err_inspected_port_alloc
;
721 inspected_port
->local_port
= port
->local_port
;
722 inspected_port
->type
= type
;
723 list_add_tail(&inspected_port
->list
, &span_entry
->bound_ports_list
);
727 err_inspected_port_alloc
:
729 mlxsw_sp_span_inspected_port_bind(port
, span_entry
, type
,
732 if (type
== MLXSW_SP_SPAN_EGRESS
) {
733 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, 0);
734 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
740 mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port
*port
,
741 struct mlxsw_sp_span_entry
*span_entry
,
742 enum mlxsw_sp_span_type type
,
745 struct mlxsw_sp_span_inspected_port
*inspected_port
;
746 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
747 char sbib_pl
[MLXSW_REG_SBIB_LEN
];
749 inspected_port
= mlxsw_sp_span_entry_bound_port_find(port
, span_entry
);
754 mlxsw_sp_span_inspected_port_bind(port
, span_entry
, type
,
756 /* remove the SBIB buffer if it was egress SPAN */
757 if (type
== MLXSW_SP_SPAN_EGRESS
) {
758 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, 0);
759 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
762 mlxsw_sp_span_entry_put(mlxsw_sp
, span_entry
);
764 list_del(&inspected_port
->list
);
765 kfree(inspected_port
);
768 int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port
*from
,
769 struct mlxsw_sp_port
*to
,
770 enum mlxsw_sp_span_type type
, bool bind
)
772 struct mlxsw_sp
*mlxsw_sp
= from
->mlxsw_sp
;
773 struct mlxsw_sp_span_entry
*span_entry
;
776 span_entry
= mlxsw_sp_span_entry_get(to
);
780 netdev_dbg(from
->dev
, "Adding inspected port to SPAN entry %d\n",
783 err
= mlxsw_sp_span_inspected_port_add(from
, span_entry
, type
, bind
);
790 mlxsw_sp_span_entry_put(mlxsw_sp
, span_entry
);
794 void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port
*from
, u8 destination_port
,
795 enum mlxsw_sp_span_type type
, bool bind
)
797 struct mlxsw_sp_span_entry
*span_entry
;
799 span_entry
= mlxsw_sp_span_entry_find(from
->mlxsw_sp
,
802 netdev_err(from
->dev
, "no span entry found\n");
806 netdev_dbg(from
->dev
, "removing inspected port from SPAN entry %d\n",
808 mlxsw_sp_span_inspected_port_del(from
, span_entry
, type
, bind
);
811 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
812 bool enable
, u32 rate
)
814 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
815 char mpsc_pl
[MLXSW_REG_MPSC_LEN
];
817 mlxsw_reg_mpsc_pack(mpsc_pl
, mlxsw_sp_port
->local_port
, enable
, rate
);
818 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpsc
), mpsc_pl
);
821 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
824 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
825 char paos_pl
[MLXSW_REG_PAOS_LEN
];
827 mlxsw_reg_paos_pack(paos_pl
, mlxsw_sp_port
->local_port
,
828 is_up
? MLXSW_PORT_ADMIN_STATUS_UP
:
829 MLXSW_PORT_ADMIN_STATUS_DOWN
);
830 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(paos
), paos_pl
);
833 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
836 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
837 char ppad_pl
[MLXSW_REG_PPAD_LEN
];
839 mlxsw_reg_ppad_pack(ppad_pl
, true, mlxsw_sp_port
->local_port
);
840 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl
, addr
);
841 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ppad
), ppad_pl
);
844 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
846 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
847 unsigned char *addr
= mlxsw_sp_port
->dev
->dev_addr
;
849 ether_addr_copy(addr
, mlxsw_sp
->base_mac
);
850 addr
[ETH_ALEN
- 1] += mlxsw_sp_port
->local_port
;
851 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
);
854 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 mtu
)
856 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
857 char pmtu_pl
[MLXSW_REG_PMTU_LEN
];
861 mtu
+= MLXSW_TXHDR_LEN
+ ETH_HLEN
;
862 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, 0);
863 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
866 max_mtu
= mlxsw_reg_pmtu_max_mtu_get(pmtu_pl
);
871 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, mtu
);
872 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
875 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 swid
)
877 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
878 char pspa_pl
[MLXSW_REG_PSPA_LEN
];
880 mlxsw_reg_pspa_pack(pspa_pl
, swid
, mlxsw_sp_port
->local_port
);
881 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pspa
), pspa_pl
);
884 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port
*mlxsw_sp_port
, bool enable
)
886 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
887 char svpe_pl
[MLXSW_REG_SVPE_LEN
];
889 mlxsw_reg_svpe_pack(svpe_pl
, mlxsw_sp_port
->local_port
, enable
);
890 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svpe
), svpe_pl
);
893 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
,
896 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
900 spvmlr_pl
= kmalloc(MLXSW_REG_SPVMLR_LEN
, GFP_KERNEL
);
903 mlxsw_reg_spvmlr_pack(spvmlr_pl
, mlxsw_sp_port
->local_port
, vid
, vid
,
905 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvmlr
), spvmlr_pl
);
910 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
913 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
914 char spvid_pl
[MLXSW_REG_SPVID_LEN
];
916 mlxsw_reg_spvid_pack(spvid_pl
, mlxsw_sp_port
->local_port
, vid
);
917 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvid
), spvid_pl
);
920 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
923 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
924 char spaft_pl
[MLXSW_REG_SPAFT_LEN
];
926 mlxsw_reg_spaft_pack(spaft_pl
, mlxsw_sp_port
->local_port
, allow
);
927 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spaft
), spaft_pl
);
930 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
935 err
= mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port
, false);
939 err
= __mlxsw_sp_port_pvid_set(mlxsw_sp_port
, vid
);
942 err
= mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port
, true);
944 goto err_port_allow_untagged_set
;
947 mlxsw_sp_port
->pvid
= vid
;
950 err_port_allow_untagged_set
:
951 __mlxsw_sp_port_pvid_set(mlxsw_sp_port
, mlxsw_sp_port
->pvid
);
956 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port
*mlxsw_sp_port
)
958 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
959 char sspr_pl
[MLXSW_REG_SSPR_LEN
];
961 mlxsw_reg_sspr_pack(sspr_pl
, mlxsw_sp_port
->local_port
);
962 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sspr
), sspr_pl
);
965 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp
*mlxsw_sp
,
966 u8 local_port
, u8
*p_module
,
967 u8
*p_width
, u8
*p_lane
)
969 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
972 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
973 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
976 *p_module
= mlxsw_reg_pmlp_module_get(pmlp_pl
, 0);
977 *p_width
= mlxsw_reg_pmlp_width_get(pmlp_pl
);
978 *p_lane
= mlxsw_reg_pmlp_tx_lane_get(pmlp_pl
, 0);
982 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port
*mlxsw_sp_port
,
983 u8 module
, u8 width
, u8 lane
)
985 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
986 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
989 mlxsw_reg_pmlp_pack(pmlp_pl
, mlxsw_sp_port
->local_port
);
990 mlxsw_reg_pmlp_width_set(pmlp_pl
, width
);
991 for (i
= 0; i
< width
; i
++) {
992 mlxsw_reg_pmlp_module_set(pmlp_pl
, i
, module
);
993 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl
, i
, lane
+ i
); /* Rx & Tx */
996 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
999 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port
*mlxsw_sp_port
)
1001 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1002 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
1004 mlxsw_reg_pmlp_pack(pmlp_pl
, mlxsw_sp_port
->local_port
);
1005 mlxsw_reg_pmlp_width_set(pmlp_pl
, 0);
1006 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
1009 static int mlxsw_sp_port_open(struct net_device
*dev
)
1011 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1014 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
1017 netif_start_queue(dev
);
1021 static int mlxsw_sp_port_stop(struct net_device
*dev
)
1023 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1025 netif_stop_queue(dev
);
1026 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
1029 static netdev_tx_t
mlxsw_sp_port_xmit(struct sk_buff
*skb
,
1030 struct net_device
*dev
)
1032 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1033 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1034 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
1035 const struct mlxsw_tx_info tx_info
= {
1036 .local_port
= mlxsw_sp_port
->local_port
,
1042 if (mlxsw_core_skb_transmit_busy(mlxsw_sp
->core
, &tx_info
))
1043 return NETDEV_TX_BUSY
;
1045 if (unlikely(skb_headroom(skb
) < MLXSW_TXHDR_LEN
)) {
1046 struct sk_buff
*skb_orig
= skb
;
1048 skb
= skb_realloc_headroom(skb
, MLXSW_TXHDR_LEN
);
1050 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
1051 dev_kfree_skb_any(skb_orig
);
1052 return NETDEV_TX_OK
;
1054 dev_consume_skb_any(skb_orig
);
1057 if (eth_skb_pad(skb
)) {
1058 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
1059 return NETDEV_TX_OK
;
1062 mlxsw_sp_txhdr_construct(skb
, &tx_info
);
1063 /* TX header is consumed by HW on the way so we shouldn't count its
1064 * bytes as being sent.
1066 len
= skb
->len
- MLXSW_TXHDR_LEN
;
1068 /* Due to a race we might fail here because of a full queue. In that
1069 * unlikely case we simply drop the packet.
1071 err
= mlxsw_core_skb_transmit(mlxsw_sp
->core
, skb
, &tx_info
);
1074 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
1075 u64_stats_update_begin(&pcpu_stats
->syncp
);
1076 pcpu_stats
->tx_packets
++;
1077 pcpu_stats
->tx_bytes
+= len
;
1078 u64_stats_update_end(&pcpu_stats
->syncp
);
1080 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
1081 dev_kfree_skb_any(skb
);
1083 return NETDEV_TX_OK
;
1086 static void mlxsw_sp_set_rx_mode(struct net_device
*dev
)
1090 static int mlxsw_sp_port_set_mac_address(struct net_device
*dev
, void *p
)
1092 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1093 struct sockaddr
*addr
= p
;
1096 if (!is_valid_ether_addr(addr
->sa_data
))
1097 return -EADDRNOTAVAIL
;
1099 err
= mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
->sa_data
);
1102 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1106 static u16
mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp
*mlxsw_sp
,
1109 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp
, mtu
);
1112 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
1114 static u16
mlxsw_sp_pfc_delay_get(const struct mlxsw_sp
*mlxsw_sp
, int mtu
,
1117 delay
= mlxsw_sp_bytes_cells(mlxsw_sp
, DIV_ROUND_UP(delay
,
1119 return MLXSW_SP_CELL_FACTOR
* delay
+ mlxsw_sp_bytes_cells(mlxsw_sp
,
1123 /* Maximum delay buffer needed in case of PAUSE frames, in bytes.
1124 * Assumes 100m cable and maximum MTU.
1126 #define MLXSW_SP_PAUSE_DELAY 58752
1128 static u16
mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp
*mlxsw_sp
, int mtu
,
1129 u16 delay
, bool pfc
, bool pause
)
1132 return mlxsw_sp_pfc_delay_get(mlxsw_sp
, mtu
, delay
);
1134 return mlxsw_sp_bytes_cells(mlxsw_sp
, MLXSW_SP_PAUSE_DELAY
);
1139 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl
, int index
, u16 size
, u16 thres
,
1143 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl
, index
, size
);
1145 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl
, index
, size
,
1149 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
, int mtu
,
1150 u8
*prio_tc
, bool pause_en
,
1151 struct ieee_pfc
*my_pfc
)
1153 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1154 u8 pfc_en
= !!my_pfc
? my_pfc
->pfc_en
: 0;
1155 u16 delay
= !!my_pfc
? my_pfc
->delay
: 0;
1156 char pbmc_pl
[MLXSW_REG_PBMC_LEN
];
1159 mlxsw_reg_pbmc_pack(pbmc_pl
, mlxsw_sp_port
->local_port
, 0, 0);
1160 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
1164 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1165 bool configure
= false;
1170 for (j
= 0; j
< IEEE_8021QAZ_MAX_TCS
; j
++) {
1171 if (prio_tc
[j
] == i
) {
1172 pfc
= pfc_en
& BIT(j
);
1181 lossy
= !(pfc
|| pause_en
);
1182 thres
= mlxsw_sp_pg_buf_threshold_get(mlxsw_sp
, mtu
);
1183 delay
= mlxsw_sp_pg_buf_delay_get(mlxsw_sp
, mtu
, delay
, pfc
,
1185 mlxsw_sp_pg_buf_pack(pbmc_pl
, i
, thres
+ delay
, thres
, lossy
);
1188 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
1191 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1192 int mtu
, bool pause_en
)
1194 u8 def_prio_tc
[IEEE_8021QAZ_MAX_TCS
] = {0};
1195 bool dcb_en
= !!mlxsw_sp_port
->dcb
.ets
;
1196 struct ieee_pfc
*my_pfc
;
1199 prio_tc
= dcb_en
? mlxsw_sp_port
->dcb
.ets
->prio_tc
: def_prio_tc
;
1200 my_pfc
= dcb_en
? mlxsw_sp_port
->dcb
.pfc
: NULL
;
1202 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, prio_tc
,
1206 static int mlxsw_sp_port_change_mtu(struct net_device
*dev
, int mtu
)
1208 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1209 bool pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
1212 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, pause_en
);
1215 err
= mlxsw_sp_span_port_mtu_update(mlxsw_sp_port
, mtu
);
1217 goto err_span_port_mtu_update
;
1218 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, mtu
);
1220 goto err_port_mtu_set
;
1225 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port
, dev
->mtu
);
1226 err_span_port_mtu_update
:
1227 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
1232 mlxsw_sp_port_get_sw_stats64(const struct net_device
*dev
,
1233 struct rtnl_link_stats64
*stats
)
1235 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1236 struct mlxsw_sp_port_pcpu_stats
*p
;
1237 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
1242 for_each_possible_cpu(i
) {
1243 p
= per_cpu_ptr(mlxsw_sp_port
->pcpu_stats
, i
);
1245 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
1246 rx_packets
= p
->rx_packets
;
1247 rx_bytes
= p
->rx_bytes
;
1248 tx_packets
= p
->tx_packets
;
1249 tx_bytes
= p
->tx_bytes
;
1250 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
1252 stats
->rx_packets
+= rx_packets
;
1253 stats
->rx_bytes
+= rx_bytes
;
1254 stats
->tx_packets
+= tx_packets
;
1255 stats
->tx_bytes
+= tx_bytes
;
1256 /* tx_dropped is u32, updated without syncp protection. */
1257 tx_dropped
+= p
->tx_dropped
;
1259 stats
->tx_dropped
= tx_dropped
;
1263 static bool mlxsw_sp_port_has_offload_stats(const struct net_device
*dev
, int attr_id
)
1266 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
1273 static int mlxsw_sp_port_get_offload_stats(int attr_id
, const struct net_device
*dev
,
1277 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
1278 return mlxsw_sp_port_get_sw_stats64(dev
, sp
);
1284 static int mlxsw_sp_port_get_stats_raw(struct net_device
*dev
, int grp
,
1285 int prio
, char *ppcnt_pl
)
1287 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1288 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1290 mlxsw_reg_ppcnt_pack(ppcnt_pl
, mlxsw_sp_port
->local_port
, grp
, prio
);
1291 return mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ppcnt
), ppcnt_pl
);
1294 static int mlxsw_sp_port_get_hw_stats(struct net_device
*dev
,
1295 struct rtnl_link_stats64
*stats
)
1297 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
1300 err
= mlxsw_sp_port_get_stats_raw(dev
, MLXSW_REG_PPCNT_IEEE_8023_CNT
,
1306 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl
);
1308 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl
);
1310 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl
);
1312 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl
);
1314 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl
);
1316 stats
->rx_crc_errors
=
1317 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl
);
1318 stats
->rx_frame_errors
=
1319 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl
);
1321 stats
->rx_length_errors
= (
1322 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl
) +
1323 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl
) +
1324 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl
));
1326 stats
->rx_errors
= (stats
->rx_crc_errors
+
1327 stats
->rx_frame_errors
+ stats
->rx_length_errors
);
1334 mlxsw_sp_port_get_hw_xstats(struct net_device
*dev
,
1335 struct mlxsw_sp_port_xstats
*xstats
)
1337 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
1340 err
= mlxsw_sp_port_get_stats_raw(dev
, MLXSW_REG_PPCNT_EXT_CNT
, 0,
1343 xstats
->ecn
= mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl
);
1345 for (i
= 0; i
< TC_MAX_QUEUE
; i
++) {
1346 err
= mlxsw_sp_port_get_stats_raw(dev
,
1347 MLXSW_REG_PPCNT_TC_CONG_TC
,
1350 xstats
->wred_drop
[i
] =
1351 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl
);
1353 err
= mlxsw_sp_port_get_stats_raw(dev
, MLXSW_REG_PPCNT_TC_CNT
,
1358 xstats
->backlog
[i
] =
1359 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl
);
1360 xstats
->tail_drop
[i
] =
1361 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl
);
1365 static void update_stats_cache(struct work_struct
*work
)
1367 struct mlxsw_sp_port
*mlxsw_sp_port
=
1368 container_of(work
, struct mlxsw_sp_port
,
1369 periodic_hw_stats
.update_dw
.work
);
1371 if (!netif_carrier_ok(mlxsw_sp_port
->dev
))
1374 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port
->dev
,
1375 &mlxsw_sp_port
->periodic_hw_stats
.stats
);
1376 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port
->dev
,
1377 &mlxsw_sp_port
->periodic_hw_stats
.xstats
);
1380 mlxsw_core_schedule_dw(&mlxsw_sp_port
->periodic_hw_stats
.update_dw
,
1381 MLXSW_HW_STATS_UPDATE_TIME
);
1384 /* Return the stats from a cache that is updated periodically,
1385 * as this function might get called in an atomic context.
1388 mlxsw_sp_port_get_stats64(struct net_device
*dev
,
1389 struct rtnl_link_stats64
*stats
)
1391 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1393 memcpy(stats
, &mlxsw_sp_port
->periodic_hw_stats
.stats
, sizeof(*stats
));
1396 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1397 u16 vid_begin
, u16 vid_end
,
1398 bool is_member
, bool untagged
)
1400 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1404 spvm_pl
= kmalloc(MLXSW_REG_SPVM_LEN
, GFP_KERNEL
);
1408 mlxsw_reg_spvm_pack(spvm_pl
, mlxsw_sp_port
->local_port
, vid_begin
,
1409 vid_end
, is_member
, untagged
);
1410 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvm
), spvm_pl
);
1415 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid_begin
,
1416 u16 vid_end
, bool is_member
, bool untagged
)
1421 for (vid
= vid_begin
; vid
<= vid_end
;
1422 vid
+= MLXSW_REG_SPVM_REC_MAX_COUNT
) {
1423 vid_e
= min((u16
) (vid
+ MLXSW_REG_SPVM_REC_MAX_COUNT
- 1),
1426 err
= __mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid_e
,
1427 is_member
, untagged
);
1435 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port
*mlxsw_sp_port
)
1437 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
, *tmp
;
1439 list_for_each_entry_safe(mlxsw_sp_port_vlan
, tmp
,
1440 &mlxsw_sp_port
->vlans_list
, list
)
1441 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan
);
1444 static struct mlxsw_sp_port_vlan
*
1445 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
1447 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1448 bool untagged
= vid
== 1;
1451 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, true, untagged
);
1453 return ERR_PTR(err
);
1455 mlxsw_sp_port_vlan
= kzalloc(sizeof(*mlxsw_sp_port_vlan
), GFP_KERNEL
);
1456 if (!mlxsw_sp_port_vlan
) {
1458 goto err_port_vlan_alloc
;
1461 mlxsw_sp_port_vlan
->mlxsw_sp_port
= mlxsw_sp_port
;
1462 mlxsw_sp_port_vlan
->ref_count
= 1;
1463 mlxsw_sp_port_vlan
->vid
= vid
;
1464 list_add(&mlxsw_sp_port_vlan
->list
, &mlxsw_sp_port
->vlans_list
);
1466 return mlxsw_sp_port_vlan
;
1468 err_port_vlan_alloc
:
1469 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, false, false);
1470 return ERR_PTR(err
);
1474 mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
)
1476 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp_port_vlan
->mlxsw_sp_port
;
1477 u16 vid
= mlxsw_sp_port_vlan
->vid
;
1479 list_del(&mlxsw_sp_port_vlan
->list
);
1480 kfree(mlxsw_sp_port_vlan
);
1481 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, false, false);
1484 struct mlxsw_sp_port_vlan
*
1485 mlxsw_sp_port_vlan_get(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
1487 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1489 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port
, vid
);
1490 if (mlxsw_sp_port_vlan
) {
1491 mlxsw_sp_port_vlan
->ref_count
++;
1492 return mlxsw_sp_port_vlan
;
1495 return mlxsw_sp_port_vlan_create(mlxsw_sp_port
, vid
);
1498 void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
)
1500 struct mlxsw_sp_fid
*fid
= mlxsw_sp_port_vlan
->fid
;
1502 if (--mlxsw_sp_port_vlan
->ref_count
!= 0)
1505 if (mlxsw_sp_port_vlan
->bridge_port
)
1506 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan
);
1508 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan
);
1510 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan
);
1513 static int mlxsw_sp_port_add_vid(struct net_device
*dev
,
1514 __be16 __always_unused proto
, u16 vid
)
1516 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1518 /* VLAN 0 is added to HW filter when device goes up, but it is
1519 * reserved in our case, so simply return.
1524 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port
, vid
));
1527 static int mlxsw_sp_port_kill_vid(struct net_device
*dev
,
1528 __be16 __always_unused proto
, u16 vid
)
1530 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1531 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1533 /* VLAN 0 is removed from HW filter when device goes down, but
1534 * it is reserved in our case, so simply return.
1539 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port
, vid
);
1540 if (!mlxsw_sp_port_vlan
)
1542 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan
);
1547 static int mlxsw_sp_port_get_phys_port_name(struct net_device
*dev
, char *name
,
1550 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1551 u8 module
= mlxsw_sp_port
->mapping
.module
;
1552 u8 width
= mlxsw_sp_port
->mapping
.width
;
1553 u8 lane
= mlxsw_sp_port
->mapping
.lane
;
1556 if (!mlxsw_sp_port
->split
)
1557 err
= snprintf(name
, len
, "p%d", module
+ 1);
1559 err
= snprintf(name
, len
, "p%ds%d", module
+ 1,
1568 static struct mlxsw_sp_port_mall_tc_entry
*
1569 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port
*port
,
1570 unsigned long cookie
) {
1571 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1573 list_for_each_entry(mall_tc_entry
, &port
->mall_tc_list
, list
)
1574 if (mall_tc_entry
->cookie
== cookie
)
1575 return mall_tc_entry
;
1581 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port
*mlxsw_sp_port
,
1582 struct mlxsw_sp_port_mall_mirror_tc_entry
*mirror
,
1583 const struct tc_action
*a
,
1586 enum mlxsw_sp_span_type span_type
;
1587 struct mlxsw_sp_port
*to_port
;
1588 struct net_device
*to_dev
;
1590 to_dev
= tcf_mirred_dev(a
);
1592 netdev_err(mlxsw_sp_port
->dev
, "Could not find requested device\n");
1596 if (!mlxsw_sp_port_dev_check(to_dev
)) {
1597 netdev_err(mlxsw_sp_port
->dev
, "Cannot mirror to a non-spectrum port");
1600 to_port
= netdev_priv(to_dev
);
1602 mirror
->to_local_port
= to_port
->local_port
;
1603 mirror
->ingress
= ingress
;
1604 span_type
= ingress
? MLXSW_SP_SPAN_INGRESS
: MLXSW_SP_SPAN_EGRESS
;
1605 return mlxsw_sp_span_mirror_add(mlxsw_sp_port
, to_port
, span_type
,
1610 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port
*mlxsw_sp_port
,
1611 struct mlxsw_sp_port_mall_mirror_tc_entry
*mirror
)
1613 enum mlxsw_sp_span_type span_type
;
1615 span_type
= mirror
->ingress
?
1616 MLXSW_SP_SPAN_INGRESS
: MLXSW_SP_SPAN_EGRESS
;
1617 mlxsw_sp_span_mirror_del(mlxsw_sp_port
, mirror
->to_local_port
,
1622 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port
*mlxsw_sp_port
,
1623 struct tc_cls_matchall_offload
*cls
,
1624 const struct tc_action
*a
,
1629 if (!mlxsw_sp_port
->sample
)
1631 if (rtnl_dereference(mlxsw_sp_port
->sample
->psample_group
)) {
1632 netdev_err(mlxsw_sp_port
->dev
, "sample already active\n");
1635 if (tcf_sample_rate(a
) > MLXSW_REG_MPSC_RATE_MAX
) {
1636 netdev_err(mlxsw_sp_port
->dev
, "sample rate not supported\n");
1640 rcu_assign_pointer(mlxsw_sp_port
->sample
->psample_group
,
1641 tcf_sample_psample_group(a
));
1642 mlxsw_sp_port
->sample
->truncate
= tcf_sample_truncate(a
);
1643 mlxsw_sp_port
->sample
->trunc_size
= tcf_sample_trunc_size(a
);
1644 mlxsw_sp_port
->sample
->rate
= tcf_sample_rate(a
);
1646 err
= mlxsw_sp_port_sample_set(mlxsw_sp_port
, true, tcf_sample_rate(a
));
1648 goto err_port_sample_set
;
1651 err_port_sample_set
:
1652 RCU_INIT_POINTER(mlxsw_sp_port
->sample
->psample_group
, NULL
);
1657 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port
*mlxsw_sp_port
)
1659 if (!mlxsw_sp_port
->sample
)
1662 mlxsw_sp_port_sample_set(mlxsw_sp_port
, false, 1);
1663 RCU_INIT_POINTER(mlxsw_sp_port
->sample
->psample_group
, NULL
);
1666 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port
*mlxsw_sp_port
,
1667 struct tc_cls_matchall_offload
*f
,
1670 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1671 __be16 protocol
= f
->common
.protocol
;
1672 const struct tc_action
*a
;
1676 if (!tcf_exts_has_one_action(f
->exts
)) {
1677 netdev_err(mlxsw_sp_port
->dev
, "only singular actions are supported\n");
1681 mall_tc_entry
= kzalloc(sizeof(*mall_tc_entry
), GFP_KERNEL
);
1684 mall_tc_entry
->cookie
= f
->cookie
;
1686 tcf_exts_to_list(f
->exts
, &actions
);
1687 a
= list_first_entry(&actions
, struct tc_action
, list
);
1689 if (is_tcf_mirred_egress_mirror(a
) && protocol
== htons(ETH_P_ALL
)) {
1690 struct mlxsw_sp_port_mall_mirror_tc_entry
*mirror
;
1692 mall_tc_entry
->type
= MLXSW_SP_PORT_MALL_MIRROR
;
1693 mirror
= &mall_tc_entry
->mirror
;
1694 err
= mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port
,
1695 mirror
, a
, ingress
);
1696 } else if (is_tcf_sample(a
) && protocol
== htons(ETH_P_ALL
)) {
1697 mall_tc_entry
->type
= MLXSW_SP_PORT_MALL_SAMPLE
;
1698 err
= mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port
, f
,
1705 goto err_add_action
;
1707 list_add_tail(&mall_tc_entry
->list
, &mlxsw_sp_port
->mall_tc_list
);
1711 kfree(mall_tc_entry
);
1715 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port
*mlxsw_sp_port
,
1716 struct tc_cls_matchall_offload
*f
)
1718 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1720 mall_tc_entry
= mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port
,
1722 if (!mall_tc_entry
) {
1723 netdev_dbg(mlxsw_sp_port
->dev
, "tc entry not found on port\n");
1726 list_del(&mall_tc_entry
->list
);
1728 switch (mall_tc_entry
->type
) {
1729 case MLXSW_SP_PORT_MALL_MIRROR
:
1730 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port
,
1731 &mall_tc_entry
->mirror
);
1733 case MLXSW_SP_PORT_MALL_SAMPLE
:
1734 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port
);
1740 kfree(mall_tc_entry
);
1743 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port
*mlxsw_sp_port
,
1744 struct tc_cls_matchall_offload
*f
,
1747 switch (f
->command
) {
1748 case TC_CLSMATCHALL_REPLACE
:
1749 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port
, f
,
1751 case TC_CLSMATCHALL_DESTROY
:
1752 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port
, f
);
1760 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block
*acl_block
,
1761 struct tc_cls_flower_offload
*f
)
1763 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_acl_block_mlxsw_sp(acl_block
);
1765 switch (f
->command
) {
1766 case TC_CLSFLOWER_REPLACE
:
1767 return mlxsw_sp_flower_replace(mlxsw_sp
, acl_block
, f
);
1768 case TC_CLSFLOWER_DESTROY
:
1769 mlxsw_sp_flower_destroy(mlxsw_sp
, acl_block
, f
);
1771 case TC_CLSFLOWER_STATS
:
1772 return mlxsw_sp_flower_stats(mlxsw_sp
, acl_block
, f
);
1778 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type
,
1780 void *cb_priv
, bool ingress
)
1782 struct mlxsw_sp_port
*mlxsw_sp_port
= cb_priv
;
1785 case TC_SETUP_CLSMATCHALL
:
1786 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port
->dev
,
1790 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port
, type_data
,
1792 case TC_SETUP_CLSFLOWER
:
1799 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type
,
1803 return mlxsw_sp_setup_tc_block_cb_matchall(type
, type_data
,
1807 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type
,
1811 return mlxsw_sp_setup_tc_block_cb_matchall(type
, type_data
,
1815 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type
,
1816 void *type_data
, void *cb_priv
)
1818 struct mlxsw_sp_acl_block
*acl_block
= cb_priv
;
1821 case TC_SETUP_CLSMATCHALL
:
1823 case TC_SETUP_CLSFLOWER
:
1824 if (mlxsw_sp_acl_block_disabled(acl_block
))
1827 return mlxsw_sp_setup_tc_cls_flower(acl_block
, type_data
);
1834 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port
*mlxsw_sp_port
,
1835 struct tcf_block
*block
, bool ingress
)
1837 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1838 struct mlxsw_sp_acl_block
*acl_block
;
1839 struct tcf_block_cb
*block_cb
;
1842 block_cb
= tcf_block_cb_lookup(block
, mlxsw_sp_setup_tc_block_cb_flower
,
1845 acl_block
= mlxsw_sp_acl_block_create(mlxsw_sp
, block
->net
);
1848 block_cb
= __tcf_block_cb_register(block
,
1849 mlxsw_sp_setup_tc_block_cb_flower
,
1850 mlxsw_sp
, acl_block
);
1851 if (IS_ERR(block_cb
)) {
1852 err
= PTR_ERR(block_cb
);
1853 goto err_cb_register
;
1856 acl_block
= tcf_block_cb_priv(block_cb
);
1858 tcf_block_cb_incref(block_cb
);
1859 err
= mlxsw_sp_acl_block_bind(mlxsw_sp
, acl_block
,
1860 mlxsw_sp_port
, ingress
);
1862 goto err_block_bind
;
1865 mlxsw_sp_port
->ing_acl_block
= acl_block
;
1867 mlxsw_sp_port
->eg_acl_block
= acl_block
;
1872 if (!tcf_block_cb_decref(block_cb
)) {
1873 __tcf_block_cb_unregister(block_cb
);
1875 mlxsw_sp_acl_block_destroy(acl_block
);
1881 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port
*mlxsw_sp_port
,
1882 struct tcf_block
*block
, bool ingress
)
1884 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1885 struct mlxsw_sp_acl_block
*acl_block
;
1886 struct tcf_block_cb
*block_cb
;
1889 block_cb
= tcf_block_cb_lookup(block
, mlxsw_sp_setup_tc_block_cb_flower
,
1895 mlxsw_sp_port
->ing_acl_block
= NULL
;
1897 mlxsw_sp_port
->eg_acl_block
= NULL
;
1899 acl_block
= tcf_block_cb_priv(block_cb
);
1900 err
= mlxsw_sp_acl_block_unbind(mlxsw_sp
, acl_block
,
1901 mlxsw_sp_port
, ingress
);
1902 if (!err
&& !tcf_block_cb_decref(block_cb
)) {
1903 __tcf_block_cb_unregister(block_cb
);
1904 mlxsw_sp_acl_block_destroy(acl_block
);
1908 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port
*mlxsw_sp_port
,
1909 struct tc_block_offload
*f
)
1915 if (f
->binder_type
== TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS
) {
1916 cb
= mlxsw_sp_setup_tc_block_cb_matchall_ig
;
1918 } else if (f
->binder_type
== TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS
) {
1919 cb
= mlxsw_sp_setup_tc_block_cb_matchall_eg
;
1925 switch (f
->command
) {
1927 err
= tcf_block_cb_register(f
->block
, cb
, mlxsw_sp_port
,
1931 err
= mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port
,
1934 tcf_block_cb_unregister(f
->block
, cb
, mlxsw_sp_port
);
1938 case TC_BLOCK_UNBIND
:
1939 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port
,
1941 tcf_block_cb_unregister(f
->block
, cb
, mlxsw_sp_port
);
1948 static int mlxsw_sp_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1951 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1954 case TC_SETUP_BLOCK
:
1955 return mlxsw_sp_setup_tc_block(mlxsw_sp_port
, type_data
);
1956 case TC_SETUP_QDISC_RED
:
1957 return mlxsw_sp_setup_tc_red(mlxsw_sp_port
, type_data
);
1958 case TC_SETUP_QDISC_PRIO
:
1959 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port
, type_data
);
1966 static int mlxsw_sp_feature_hw_tc(struct net_device
*dev
, bool enable
)
1968 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1971 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port
->ing_acl_block
) ||
1972 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port
->eg_acl_block
) ||
1973 !list_empty(&mlxsw_sp_port
->mall_tc_list
)) {
1974 netdev_err(dev
, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1977 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port
->ing_acl_block
);
1978 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port
->eg_acl_block
);
1980 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port
->ing_acl_block
);
1981 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port
->eg_acl_block
);
1986 typedef int (*mlxsw_sp_feature_handler
)(struct net_device
*dev
, bool enable
);
1988 static int mlxsw_sp_handle_feature(struct net_device
*dev
,
1989 netdev_features_t wanted_features
,
1990 netdev_features_t feature
,
1991 mlxsw_sp_feature_handler feature_handler
)
1993 netdev_features_t changes
= wanted_features
^ dev
->features
;
1994 bool enable
= !!(wanted_features
& feature
);
1997 if (!(changes
& feature
))
2000 err
= feature_handler(dev
, enable
);
2002 netdev_err(dev
, "%s feature %pNF failed, err %d\n",
2003 enable
? "Enable" : "Disable", &feature
, err
);
2008 dev
->features
|= feature
;
2010 dev
->features
&= ~feature
;
2014 static int mlxsw_sp_set_features(struct net_device
*dev
,
2015 netdev_features_t features
)
2017 return mlxsw_sp_handle_feature(dev
, features
, NETIF_F_HW_TC
,
2018 mlxsw_sp_feature_hw_tc
);
2021 static const struct net_device_ops mlxsw_sp_port_netdev_ops
= {
2022 .ndo_open
= mlxsw_sp_port_open
,
2023 .ndo_stop
= mlxsw_sp_port_stop
,
2024 .ndo_start_xmit
= mlxsw_sp_port_xmit
,
2025 .ndo_setup_tc
= mlxsw_sp_setup_tc
,
2026 .ndo_set_rx_mode
= mlxsw_sp_set_rx_mode
,
2027 .ndo_set_mac_address
= mlxsw_sp_port_set_mac_address
,
2028 .ndo_change_mtu
= mlxsw_sp_port_change_mtu
,
2029 .ndo_get_stats64
= mlxsw_sp_port_get_stats64
,
2030 .ndo_has_offload_stats
= mlxsw_sp_port_has_offload_stats
,
2031 .ndo_get_offload_stats
= mlxsw_sp_port_get_offload_stats
,
2032 .ndo_vlan_rx_add_vid
= mlxsw_sp_port_add_vid
,
2033 .ndo_vlan_rx_kill_vid
= mlxsw_sp_port_kill_vid
,
2034 .ndo_get_phys_port_name
= mlxsw_sp_port_get_phys_port_name
,
2035 .ndo_set_features
= mlxsw_sp_set_features
,
2038 static void mlxsw_sp_port_get_drvinfo(struct net_device
*dev
,
2039 struct ethtool_drvinfo
*drvinfo
)
2041 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2042 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2044 strlcpy(drvinfo
->driver
, mlxsw_sp_driver_name
, sizeof(drvinfo
->driver
));
2045 strlcpy(drvinfo
->version
, mlxsw_sp_driver_version
,
2046 sizeof(drvinfo
->version
));
2047 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
2049 mlxsw_sp
->bus_info
->fw_rev
.major
,
2050 mlxsw_sp
->bus_info
->fw_rev
.minor
,
2051 mlxsw_sp
->bus_info
->fw_rev
.subminor
);
2052 strlcpy(drvinfo
->bus_info
, mlxsw_sp
->bus_info
->device_name
,
2053 sizeof(drvinfo
->bus_info
));
2056 static void mlxsw_sp_port_get_pauseparam(struct net_device
*dev
,
2057 struct ethtool_pauseparam
*pause
)
2059 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2061 pause
->rx_pause
= mlxsw_sp_port
->link
.rx_pause
;
2062 pause
->tx_pause
= mlxsw_sp_port
->link
.tx_pause
;
2065 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
2066 struct ethtool_pauseparam
*pause
)
2068 char pfcc_pl
[MLXSW_REG_PFCC_LEN
];
2070 mlxsw_reg_pfcc_pack(pfcc_pl
, mlxsw_sp_port
->local_port
);
2071 mlxsw_reg_pfcc_pprx_set(pfcc_pl
, pause
->rx_pause
);
2072 mlxsw_reg_pfcc_pptx_set(pfcc_pl
, pause
->tx_pause
);
2074 return mlxsw_reg_write(mlxsw_sp_port
->mlxsw_sp
->core
, MLXSW_REG(pfcc
),
2078 static int mlxsw_sp_port_set_pauseparam(struct net_device
*dev
,
2079 struct ethtool_pauseparam
*pause
)
2081 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2082 bool pause_en
= pause
->tx_pause
|| pause
->rx_pause
;
2085 if (mlxsw_sp_port
->dcb
.pfc
&& mlxsw_sp_port
->dcb
.pfc
->pfc_en
) {
2086 netdev_err(dev
, "PFC already enabled on port\n");
2090 if (pause
->autoneg
) {
2091 netdev_err(dev
, "PAUSE frames autonegotiation isn't supported\n");
2095 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
2097 netdev_err(dev
, "Failed to configure port's headroom\n");
2101 err
= mlxsw_sp_port_pause_set(mlxsw_sp_port
, pause
);
2103 netdev_err(dev
, "Failed to set PAUSE parameters\n");
2104 goto err_port_pause_configure
;
2107 mlxsw_sp_port
->link
.rx_pause
= pause
->rx_pause
;
2108 mlxsw_sp_port
->link
.tx_pause
= pause
->tx_pause
;
2112 err_port_pause_configure
:
2113 pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
2114 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
2118 struct mlxsw_sp_port_hw_stats
{
2119 char str
[ETH_GSTRING_LEN
];
2120 u64 (*getter
)(const char *payload
);
2124 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats
[] = {
2126 .str
= "a_frames_transmitted_ok",
2127 .getter
= mlxsw_reg_ppcnt_a_frames_transmitted_ok_get
,
2130 .str
= "a_frames_received_ok",
2131 .getter
= mlxsw_reg_ppcnt_a_frames_received_ok_get
,
2134 .str
= "a_frame_check_sequence_errors",
2135 .getter
= mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get
,
2138 .str
= "a_alignment_errors",
2139 .getter
= mlxsw_reg_ppcnt_a_alignment_errors_get
,
2142 .str
= "a_octets_transmitted_ok",
2143 .getter
= mlxsw_reg_ppcnt_a_octets_transmitted_ok_get
,
2146 .str
= "a_octets_received_ok",
2147 .getter
= mlxsw_reg_ppcnt_a_octets_received_ok_get
,
2150 .str
= "a_multicast_frames_xmitted_ok",
2151 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get
,
2154 .str
= "a_broadcast_frames_xmitted_ok",
2155 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get
,
2158 .str
= "a_multicast_frames_received_ok",
2159 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get
,
2162 .str
= "a_broadcast_frames_received_ok",
2163 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get
,
2166 .str
= "a_in_range_length_errors",
2167 .getter
= mlxsw_reg_ppcnt_a_in_range_length_errors_get
,
2170 .str
= "a_out_of_range_length_field",
2171 .getter
= mlxsw_reg_ppcnt_a_out_of_range_length_field_get
,
2174 .str
= "a_frame_too_long_errors",
2175 .getter
= mlxsw_reg_ppcnt_a_frame_too_long_errors_get
,
2178 .str
= "a_symbol_error_during_carrier",
2179 .getter
= mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get
,
2182 .str
= "a_mac_control_frames_transmitted",
2183 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get
,
2186 .str
= "a_mac_control_frames_received",
2187 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_received_get
,
2190 .str
= "a_unsupported_opcodes_received",
2191 .getter
= mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get
,
2194 .str
= "a_pause_mac_ctrl_frames_received",
2195 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get
,
2198 .str
= "a_pause_mac_ctrl_frames_xmitted",
2199 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get
,
2203 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
2205 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats
[] = {
2207 .str
= "rx_octets_prio",
2208 .getter
= mlxsw_reg_ppcnt_rx_octets_get
,
2211 .str
= "rx_frames_prio",
2212 .getter
= mlxsw_reg_ppcnt_rx_frames_get
,
2215 .str
= "tx_octets_prio",
2216 .getter
= mlxsw_reg_ppcnt_tx_octets_get
,
2219 .str
= "tx_frames_prio",
2220 .getter
= mlxsw_reg_ppcnt_tx_frames_get
,
2223 .str
= "rx_pause_prio",
2224 .getter
= mlxsw_reg_ppcnt_rx_pause_get
,
2227 .str
= "rx_pause_duration_prio",
2228 .getter
= mlxsw_reg_ppcnt_rx_pause_duration_get
,
2231 .str
= "tx_pause_prio",
2232 .getter
= mlxsw_reg_ppcnt_tx_pause_get
,
2235 .str
= "tx_pause_duration_prio",
2236 .getter
= mlxsw_reg_ppcnt_tx_pause_duration_get
,
2240 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
2242 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats
[] = {
2244 .str
= "tc_transmit_queue_tc",
2245 .getter
= mlxsw_reg_ppcnt_tc_transmit_queue_get
,
2246 .cells_bytes
= true,
2249 .str
= "tc_no_buffer_discard_uc_tc",
2250 .getter
= mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get
,
2254 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
2256 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
2257 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
2258 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
2259 IEEE_8021QAZ_MAX_TCS)
2261 static void mlxsw_sp_port_get_prio_strings(u8
**p
, int prio
)
2265 for (i
= 0; i
< MLXSW_SP_PORT_HW_PRIO_STATS_LEN
; i
++) {
2266 snprintf(*p
, ETH_GSTRING_LEN
, "%s_%d",
2267 mlxsw_sp_port_hw_prio_stats
[i
].str
, prio
);
2268 *p
+= ETH_GSTRING_LEN
;
2272 static void mlxsw_sp_port_get_tc_strings(u8
**p
, int tc
)
2276 for (i
= 0; i
< MLXSW_SP_PORT_HW_TC_STATS_LEN
; i
++) {
2277 snprintf(*p
, ETH_GSTRING_LEN
, "%s_%d",
2278 mlxsw_sp_port_hw_tc_stats
[i
].str
, tc
);
2279 *p
+= ETH_GSTRING_LEN
;
2283 static void mlxsw_sp_port_get_strings(struct net_device
*dev
,
2284 u32 stringset
, u8
*data
)
2289 switch (stringset
) {
2291 for (i
= 0; i
< MLXSW_SP_PORT_HW_STATS_LEN
; i
++) {
2292 memcpy(p
, mlxsw_sp_port_hw_stats
[i
].str
,
2294 p
+= ETH_GSTRING_LEN
;
2297 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
2298 mlxsw_sp_port_get_prio_strings(&p
, i
);
2300 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
2301 mlxsw_sp_port_get_tc_strings(&p
, i
);
2307 static int mlxsw_sp_port_set_phys_id(struct net_device
*dev
,
2308 enum ethtool_phys_id_state state
)
2310 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2311 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2312 char mlcr_pl
[MLXSW_REG_MLCR_LEN
];
2316 case ETHTOOL_ID_ACTIVE
:
2319 case ETHTOOL_ID_INACTIVE
:
2326 mlxsw_reg_mlcr_pack(mlcr_pl
, mlxsw_sp_port
->local_port
, active
);
2327 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mlcr
), mlcr_pl
);
2331 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats
**p_hw_stats
,
2332 int *p_len
, enum mlxsw_reg_ppcnt_grp grp
)
2335 case MLXSW_REG_PPCNT_IEEE_8023_CNT
:
2336 *p_hw_stats
= mlxsw_sp_port_hw_stats
;
2337 *p_len
= MLXSW_SP_PORT_HW_STATS_LEN
;
2339 case MLXSW_REG_PPCNT_PRIO_CNT
:
2340 *p_hw_stats
= mlxsw_sp_port_hw_prio_stats
;
2341 *p_len
= MLXSW_SP_PORT_HW_PRIO_STATS_LEN
;
2343 case MLXSW_REG_PPCNT_TC_CNT
:
2344 *p_hw_stats
= mlxsw_sp_port_hw_tc_stats
;
2345 *p_len
= MLXSW_SP_PORT_HW_TC_STATS_LEN
;
2354 static void __mlxsw_sp_port_get_stats(struct net_device
*dev
,
2355 enum mlxsw_reg_ppcnt_grp grp
, int prio
,
2356 u64
*data
, int data_index
)
2358 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2359 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2360 struct mlxsw_sp_port_hw_stats
*hw_stats
;
2361 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
2365 err
= mlxsw_sp_get_hw_stats_by_group(&hw_stats
, &len
, grp
);
2368 mlxsw_sp_port_get_stats_raw(dev
, grp
, prio
, ppcnt_pl
);
2369 for (i
= 0; i
< len
; i
++) {
2370 data
[data_index
+ i
] = hw_stats
[i
].getter(ppcnt_pl
);
2371 if (!hw_stats
[i
].cells_bytes
)
2373 data
[data_index
+ i
] = mlxsw_sp_cells_bytes(mlxsw_sp
,
2374 data
[data_index
+ i
]);
2378 static void mlxsw_sp_port_get_stats(struct net_device
*dev
,
2379 struct ethtool_stats
*stats
, u64
*data
)
2381 int i
, data_index
= 0;
2383 /* IEEE 802.3 Counters */
2384 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_IEEE_8023_CNT
, 0,
2386 data_index
= MLXSW_SP_PORT_HW_STATS_LEN
;
2388 /* Per-Priority Counters */
2389 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2390 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_PRIO_CNT
, i
,
2392 data_index
+= MLXSW_SP_PORT_HW_PRIO_STATS_LEN
;
2395 /* Per-TC Counters */
2396 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2397 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_TC_CNT
, i
,
2399 data_index
+= MLXSW_SP_PORT_HW_TC_STATS_LEN
;
2403 static int mlxsw_sp_port_get_sset_count(struct net_device
*dev
, int sset
)
2407 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN
;
2413 struct mlxsw_sp_port_link_mode
{
2414 enum ethtool_link_mode_bit_indices mask_ethtool
;
2419 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode
[] = {
2421 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T
,
2422 .mask_ethtool
= ETHTOOL_LINK_MODE_100baseT_Full_BIT
,
2426 .mask
= MLXSW_REG_PTYS_ETH_SPEED_SGMII
|
2427 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
,
2428 .mask_ethtool
= ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
,
2429 .speed
= SPEED_1000
,
2432 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T
,
2433 .mask_ethtool
= ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
2434 .speed
= SPEED_10000
,
2437 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4
|
2438 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
,
2439 .mask_ethtool
= ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
,
2440 .speed
= SPEED_10000
,
2443 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
2444 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
2445 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
2446 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR
,
2447 .mask_ethtool
= ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
,
2448 .speed
= SPEED_10000
,
2451 .mask
= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2
,
2452 .mask_ethtool
= ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT
,
2453 .speed
= SPEED_20000
,
2456 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
,
2457 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
,
2458 .speed
= SPEED_40000
,
2461 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
,
2462 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
,
2463 .speed
= SPEED_40000
,
2466 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
,
2467 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
,
2468 .speed
= SPEED_40000
,
2471 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4
,
2472 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
,
2473 .speed
= SPEED_40000
,
2476 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR
,
2477 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
,
2478 .speed
= SPEED_25000
,
2481 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR
,
2482 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
,
2483 .speed
= SPEED_25000
,
2486 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR
,
2487 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
2488 .speed
= SPEED_25000
,
2491 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR
,
2492 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
2493 .speed
= SPEED_25000
,
2496 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2
,
2497 .mask_ethtool
= ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
,
2498 .speed
= SPEED_50000
,
2501 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2
,
2502 .mask_ethtool
= ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
,
2503 .speed
= SPEED_50000
,
2506 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2
,
2507 .mask_ethtool
= ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
2508 .speed
= SPEED_50000
,
2511 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2512 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT
,
2513 .speed
= SPEED_56000
,
2516 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2517 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT
,
2518 .speed
= SPEED_56000
,
2521 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2522 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT
,
2523 .speed
= SPEED_56000
,
2526 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2527 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT
,
2528 .speed
= SPEED_56000
,
2531 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
,
2532 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
,
2533 .speed
= SPEED_100000
,
2536 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
,
2537 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
2538 .speed
= SPEED_100000
,
2541 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
,
2542 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
,
2543 .speed
= SPEED_100000
,
2546 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4
,
2547 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
,
2548 .speed
= SPEED_100000
,
2552 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2555 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto
,
2556 struct ethtool_link_ksettings
*cmd
)
2558 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
2559 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
2560 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
2561 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
2562 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
2563 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
2564 ethtool_link_ksettings_add_link_mode(cmd
, supported
, FIBRE
);
2566 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
2567 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
2568 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
2569 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
2570 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
))
2571 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Backplane
);
2574 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto
, unsigned long *mode
)
2578 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2579 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
)
2580 __set_bit(mlxsw_sp_port_link_mode
[i
].mask_ethtool
,
2585 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok
, u32 ptys_eth_proto
,
2586 struct ethtool_link_ksettings
*cmd
)
2588 u32 speed
= SPEED_UNKNOWN
;
2589 u8 duplex
= DUPLEX_UNKNOWN
;
2595 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2596 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
) {
2597 speed
= mlxsw_sp_port_link_mode
[i
].speed
;
2598 duplex
= DUPLEX_FULL
;
2603 cmd
->base
.speed
= speed
;
2604 cmd
->base
.duplex
= duplex
;
2607 static u8
mlxsw_sp_port_connector_port(u32 ptys_eth_proto
)
2609 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
2610 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
2611 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
2612 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
2615 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
2616 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
2617 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
))
2620 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
2621 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
2622 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
2623 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
))
2630 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings
*cmd
)
2635 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2636 if (test_bit(mlxsw_sp_port_link_mode
[i
].mask_ethtool
,
2637 cmd
->link_modes
.advertising
))
2638 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
2643 static u32
mlxsw_sp_to_ptys_speed(u32 speed
)
2648 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2649 if (speed
== mlxsw_sp_port_link_mode
[i
].speed
)
2650 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
2655 static u32
mlxsw_sp_to_ptys_upper_speed(u32 upper_speed
)
2660 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2661 if (mlxsw_sp_port_link_mode
[i
].speed
<= upper_speed
)
2662 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
2667 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap
,
2668 struct ethtool_link_ksettings
*cmd
)
2670 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Asym_Pause
);
2671 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Autoneg
);
2672 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Pause
);
2674 mlxsw_sp_from_ptys_supported_port(eth_proto_cap
, cmd
);
2675 mlxsw_sp_from_ptys_link(eth_proto_cap
, cmd
->link_modes
.supported
);
2678 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin
, bool autoneg
,
2679 struct ethtool_link_ksettings
*cmd
)
2684 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, Autoneg
);
2685 mlxsw_sp_from_ptys_link(eth_proto_admin
, cmd
->link_modes
.advertising
);
2689 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp
, u8 autoneg_status
,
2690 struct ethtool_link_ksettings
*cmd
)
2692 if (autoneg_status
!= MLXSW_REG_PTYS_AN_STATUS_OK
|| !eth_proto_lp
)
2695 ethtool_link_ksettings_add_link_mode(cmd
, lp_advertising
, Autoneg
);
2696 mlxsw_sp_from_ptys_link(eth_proto_lp
, cmd
->link_modes
.lp_advertising
);
2699 static int mlxsw_sp_port_get_link_ksettings(struct net_device
*dev
,
2700 struct ethtool_link_ksettings
*cmd
)
2702 u32 eth_proto_cap
, eth_proto_admin
, eth_proto_oper
, eth_proto_lp
;
2703 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2704 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2705 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
2710 autoneg
= mlxsw_sp_port
->link
.autoneg
;
2711 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
2712 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2715 mlxsw_reg_ptys_eth_unpack(ptys_pl
, ð_proto_cap
, ð_proto_admin
,
2718 mlxsw_sp_port_get_link_supported(eth_proto_cap
, cmd
);
2720 mlxsw_sp_port_get_link_advertise(eth_proto_admin
, autoneg
, cmd
);
2722 eth_proto_lp
= mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl
);
2723 autoneg_status
= mlxsw_reg_ptys_an_status_get(ptys_pl
);
2724 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp
, autoneg_status
, cmd
);
2726 cmd
->base
.autoneg
= autoneg
? AUTONEG_ENABLE
: AUTONEG_DISABLE
;
2727 cmd
->base
.port
= mlxsw_sp_port_connector_port(eth_proto_oper
);
2728 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev
), eth_proto_oper
,
2735 mlxsw_sp_port_set_link_ksettings(struct net_device
*dev
,
2736 const struct ethtool_link_ksettings
*cmd
)
2738 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2739 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2740 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
2741 u32 eth_proto_cap
, eth_proto_new
;
2745 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
2746 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2749 mlxsw_reg_ptys_eth_unpack(ptys_pl
, ð_proto_cap
, NULL
, NULL
);
2751 autoneg
= cmd
->base
.autoneg
== AUTONEG_ENABLE
;
2752 eth_proto_new
= autoneg
?
2753 mlxsw_sp_to_ptys_advert_link(cmd
) :
2754 mlxsw_sp_to_ptys_speed(cmd
->base
.speed
);
2756 eth_proto_new
= eth_proto_new
& eth_proto_cap
;
2757 if (!eth_proto_new
) {
2758 netdev_err(dev
, "No supported speed requested\n");
2762 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
,
2764 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2768 if (!netif_running(dev
))
2771 mlxsw_sp_port
->link
.autoneg
= autoneg
;
2773 mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
2774 mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
2779 static int mlxsw_sp_flash_device(struct net_device
*dev
,
2780 struct ethtool_flash
*flash
)
2782 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2783 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2784 const struct firmware
*firmware
;
2787 if (flash
->region
!= ETHTOOL_FLASH_ALL_REGIONS
)
2793 err
= request_firmware_direct(&firmware
, flash
->data
, &dev
->dev
);
2796 err
= mlxsw_sp_firmware_flash(mlxsw_sp
, firmware
);
2797 release_firmware(firmware
);
2804 #define MLXSW_SP_I2C_ADDR_LOW 0x50
2805 #define MLXSW_SP_I2C_ADDR_HIGH 0x51
2806 #define MLXSW_SP_EEPROM_PAGE_LENGTH 256
2808 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port
*mlxsw_sp_port
,
2809 u16 offset
, u16 size
, void *data
,
2810 unsigned int *p_read_size
)
2812 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2813 char eeprom_tmp
[MLXSW_SP_REG_MCIA_EEPROM_SIZE
];
2814 char mcia_pl
[MLXSW_REG_MCIA_LEN
];
2819 size
= min_t(u16
, size
, MLXSW_SP_REG_MCIA_EEPROM_SIZE
);
2821 if (offset
< MLXSW_SP_EEPROM_PAGE_LENGTH
&&
2822 offset
+ size
> MLXSW_SP_EEPROM_PAGE_LENGTH
)
2823 /* Cross pages read, read until offset 256 in low page */
2824 size
= MLXSW_SP_EEPROM_PAGE_LENGTH
- offset
;
2826 i2c_addr
= MLXSW_SP_I2C_ADDR_LOW
;
2827 if (offset
>= MLXSW_SP_EEPROM_PAGE_LENGTH
) {
2828 i2c_addr
= MLXSW_SP_I2C_ADDR_HIGH
;
2829 offset
-= MLXSW_SP_EEPROM_PAGE_LENGTH
;
2832 mlxsw_reg_mcia_pack(mcia_pl
, mlxsw_sp_port
->mapping
.module
,
2833 0, 0, offset
, size
, i2c_addr
);
2835 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(mcia
), mcia_pl
);
2839 status
= mlxsw_reg_mcia_status_get(mcia_pl
);
2843 mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl
, eeprom_tmp
);
2844 memcpy(data
, eeprom_tmp
, size
);
2845 *p_read_size
= size
;
2850 enum mlxsw_sp_eeprom_module_info_rev_id
{
2851 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC
= 0x00,
2852 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436
= 0x01,
2853 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636
= 0x03,
2856 enum mlxsw_sp_eeprom_module_info_id
{
2857 MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP
= 0x03,
2858 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP
= 0x0C,
2859 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS
= 0x0D,
2860 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28
= 0x11,
2863 enum mlxsw_sp_eeprom_module_info
{
2864 MLXSW_SP_EEPROM_MODULE_INFO_ID
,
2865 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID
,
2866 MLXSW_SP_EEPROM_MODULE_INFO_SIZE
,
2869 static int mlxsw_sp_get_module_info(struct net_device
*netdev
,
2870 struct ethtool_modinfo
*modinfo
)
2872 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(netdev
);
2873 u8 module_info
[MLXSW_SP_EEPROM_MODULE_INFO_SIZE
];
2874 u8 module_rev_id
, module_id
;
2875 unsigned int read_size
;
2878 err
= mlxsw_sp_query_module_eeprom(mlxsw_sp_port
, 0,
2879 MLXSW_SP_EEPROM_MODULE_INFO_SIZE
,
2880 module_info
, &read_size
);
2884 if (read_size
< MLXSW_SP_EEPROM_MODULE_INFO_SIZE
)
2887 module_rev_id
= module_info
[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID
];
2888 module_id
= module_info
[MLXSW_SP_EEPROM_MODULE_INFO_ID
];
2890 switch (module_id
) {
2891 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP
:
2892 modinfo
->type
= ETH_MODULE_SFF_8436
;
2893 modinfo
->eeprom_len
= ETH_MODULE_SFF_8436_LEN
;
2895 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS
:
2896 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28
:
2897 if (module_id
== MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28
||
2898 module_rev_id
>= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636
) {
2899 modinfo
->type
= ETH_MODULE_SFF_8636
;
2900 modinfo
->eeprom_len
= ETH_MODULE_SFF_8636_LEN
;
2902 modinfo
->type
= ETH_MODULE_SFF_8436
;
2903 modinfo
->eeprom_len
= ETH_MODULE_SFF_8436_LEN
;
2906 case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP
:
2907 modinfo
->type
= ETH_MODULE_SFF_8472
;
2908 modinfo
->eeprom_len
= ETH_MODULE_SFF_8472_LEN
;
2917 static int mlxsw_sp_get_module_eeprom(struct net_device
*netdev
,
2918 struct ethtool_eeprom
*ee
,
2921 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(netdev
);
2922 int offset
= ee
->offset
;
2923 unsigned int read_size
;
2930 memset(data
, 0, ee
->len
);
2932 while (i
< ee
->len
) {
2933 err
= mlxsw_sp_query_module_eeprom(mlxsw_sp_port
, offset
,
2934 ee
->len
- i
, data
+ i
,
2937 netdev_err(mlxsw_sp_port
->dev
, "Eeprom query failed\n");
2942 offset
+= read_size
;
2948 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops
= {
2949 .get_drvinfo
= mlxsw_sp_port_get_drvinfo
,
2950 .get_link
= ethtool_op_get_link
,
2951 .get_pauseparam
= mlxsw_sp_port_get_pauseparam
,
2952 .set_pauseparam
= mlxsw_sp_port_set_pauseparam
,
2953 .get_strings
= mlxsw_sp_port_get_strings
,
2954 .set_phys_id
= mlxsw_sp_port_set_phys_id
,
2955 .get_ethtool_stats
= mlxsw_sp_port_get_stats
,
2956 .get_sset_count
= mlxsw_sp_port_get_sset_count
,
2957 .get_link_ksettings
= mlxsw_sp_port_get_link_ksettings
,
2958 .set_link_ksettings
= mlxsw_sp_port_set_link_ksettings
,
2959 .flash_device
= mlxsw_sp_flash_device
,
2960 .get_module_info
= mlxsw_sp_get_module_info
,
2961 .get_module_eeprom
= mlxsw_sp_get_module_eeprom
,
2965 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 width
)
2967 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2968 u32 upper_speed
= MLXSW_SP_PORT_BASE_SPEED
* width
;
2969 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
2970 u32 eth_proto_admin
;
2972 eth_proto_admin
= mlxsw_sp_to_ptys_upper_speed(upper_speed
);
2973 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
,
2975 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2978 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
2979 enum mlxsw_reg_qeec_hr hr
, u8 index
, u8 next_index
,
2980 bool dwrr
, u8 dwrr_weight
)
2982 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2983 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
2985 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
2987 mlxsw_reg_qeec_de_set(qeec_pl
, true);
2988 mlxsw_reg_qeec_dwrr_set(qeec_pl
, dwrr
);
2989 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl
, dwrr_weight
);
2990 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
2993 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
2994 enum mlxsw_reg_qeec_hr hr
, u8 index
,
2995 u8 next_index
, u32 maxrate
)
2997 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2998 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
3000 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
3002 mlxsw_reg_qeec_mase_set(qeec_pl
, true);
3003 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl
, maxrate
);
3004 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
3007 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
3008 u8 switch_prio
, u8 tclass
)
3010 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3011 char qtct_pl
[MLXSW_REG_QTCT_LEN
];
3013 mlxsw_reg_qtct_pack(qtct_pl
, mlxsw_sp_port
->local_port
, switch_prio
,
3015 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qtct
), qtct_pl
);
3018 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
3022 /* Setup the elements hierarcy, so that each TC is linked to
3023 * one subgroup, which are all member in the same group.
3025 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
3026 MLXSW_REG_QEEC_HIERARCY_GROUP
, 0, 0, false,
3030 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
3031 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
3032 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
, i
,
3037 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
3038 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
3039 MLXSW_REG_QEEC_HIERARCY_TC
, i
, i
,
3045 /* Make sure the max shaper is disabled in all hierarcies that
3048 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
3049 MLXSW_REG_QEEC_HIERARCY_PORT
, 0, 0,
3050 MLXSW_REG_QEEC_MAS_DIS
);
3053 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
3054 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
3055 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
,
3057 MLXSW_REG_QEEC_MAS_DIS
);
3061 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
3062 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
3063 MLXSW_REG_QEEC_HIERARCY_TC
,
3065 MLXSW_REG_QEEC_MAS_DIS
);
3070 /* Map all priorities to traffic class 0. */
3071 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
3072 err
= mlxsw_sp_port_prio_tc_set(mlxsw_sp_port
, i
, 0);
3080 static int mlxsw_sp_port_create(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
3081 bool split
, u8 module
, u8 width
, u8 lane
)
3083 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
3084 struct mlxsw_sp_port
*mlxsw_sp_port
;
3085 struct net_device
*dev
;
3088 err
= mlxsw_core_port_init(mlxsw_sp
->core
, local_port
);
3090 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to init core port\n",
3095 dev
= alloc_etherdev(sizeof(struct mlxsw_sp_port
));
3098 goto err_alloc_etherdev
;
3100 SET_NETDEV_DEV(dev
, mlxsw_sp
->bus_info
->dev
);
3101 mlxsw_sp_port
= netdev_priv(dev
);
3102 mlxsw_sp_port
->dev
= dev
;
3103 mlxsw_sp_port
->mlxsw_sp
= mlxsw_sp
;
3104 mlxsw_sp_port
->local_port
= local_port
;
3105 mlxsw_sp_port
->pvid
= 1;
3106 mlxsw_sp_port
->split
= split
;
3107 mlxsw_sp_port
->mapping
.module
= module
;
3108 mlxsw_sp_port
->mapping
.width
= width
;
3109 mlxsw_sp_port
->mapping
.lane
= lane
;
3110 mlxsw_sp_port
->link
.autoneg
= 1;
3111 INIT_LIST_HEAD(&mlxsw_sp_port
->vlans_list
);
3112 INIT_LIST_HEAD(&mlxsw_sp_port
->mall_tc_list
);
3114 mlxsw_sp_port
->pcpu_stats
=
3115 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats
);
3116 if (!mlxsw_sp_port
->pcpu_stats
) {
3118 goto err_alloc_stats
;
3121 mlxsw_sp_port
->sample
= kzalloc(sizeof(*mlxsw_sp_port
->sample
),
3123 if (!mlxsw_sp_port
->sample
) {
3125 goto err_alloc_sample
;
3128 INIT_DELAYED_WORK(&mlxsw_sp_port
->periodic_hw_stats
.update_dw
,
3129 &update_stats_cache
);
3131 dev
->netdev_ops
= &mlxsw_sp_port_netdev_ops
;
3132 dev
->ethtool_ops
= &mlxsw_sp_port_ethtool_ops
;
3134 err
= mlxsw_sp_port_module_map(mlxsw_sp_port
, module
, width
, lane
);
3136 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to map module\n",
3137 mlxsw_sp_port
->local_port
);
3138 goto err_port_module_map
;
3141 err
= mlxsw_sp_port_swid_set(mlxsw_sp_port
, 0);
3143 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set SWID\n",
3144 mlxsw_sp_port
->local_port
);
3145 goto err_port_swid_set
;
3148 err
= mlxsw_sp_port_dev_addr_init(mlxsw_sp_port
);
3150 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unable to init port mac address\n",
3151 mlxsw_sp_port
->local_port
);
3152 goto err_dev_addr_init
;
3155 netif_carrier_off(dev
);
3157 dev
->features
|= NETIF_F_NETNS_LOCAL
| NETIF_F_LLTX
| NETIF_F_SG
|
3158 NETIF_F_HW_VLAN_CTAG_FILTER
| NETIF_F_HW_TC
;
3159 dev
->hw_features
|= NETIF_F_HW_TC
;
3162 dev
->max_mtu
= ETH_MAX_MTU
;
3164 /* Each packet needs to have a Tx header (metadata) on top all other
3167 dev
->needed_headroom
= MLXSW_TXHDR_LEN
;
3169 err
= mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port
);
3171 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set system port mapping\n",
3172 mlxsw_sp_port
->local_port
);
3173 goto err_port_system_port_mapping_set
;
3176 err
= mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port
, width
);
3178 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to enable speeds\n",
3179 mlxsw_sp_port
->local_port
);
3180 goto err_port_speed_by_width_set
;
3183 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, ETH_DATA_LEN
);
3185 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set MTU\n",
3186 mlxsw_sp_port
->local_port
);
3187 goto err_port_mtu_set
;
3190 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
3192 goto err_port_admin_status_set
;
3194 err
= mlxsw_sp_port_buffers_init(mlxsw_sp_port
);
3196 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize buffers\n",
3197 mlxsw_sp_port
->local_port
);
3198 goto err_port_buffers_init
;
3201 err
= mlxsw_sp_port_ets_init(mlxsw_sp_port
);
3203 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize ETS\n",
3204 mlxsw_sp_port
->local_port
);
3205 goto err_port_ets_init
;
3208 /* ETS and buffers must be initialized before DCB. */
3209 err
= mlxsw_sp_port_dcb_init(mlxsw_sp_port
);
3211 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize DCB\n",
3212 mlxsw_sp_port
->local_port
);
3213 goto err_port_dcb_init
;
3216 err
= mlxsw_sp_port_fids_init(mlxsw_sp_port
);
3218 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize FIDs\n",
3219 mlxsw_sp_port
->local_port
);
3220 goto err_port_fids_init
;
3223 err
= mlxsw_sp_tc_qdisc_init(mlxsw_sp_port
);
3225 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize TC qdiscs\n",
3226 mlxsw_sp_port
->local_port
);
3227 goto err_port_qdiscs_init
;
3230 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_get(mlxsw_sp_port
, 1);
3231 if (IS_ERR(mlxsw_sp_port_vlan
)) {
3232 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to create VID 1\n",
3233 mlxsw_sp_port
->local_port
);
3234 err
= PTR_ERR(mlxsw_sp_port_vlan
);
3235 goto err_port_vlan_get
;
3238 mlxsw_sp_port_switchdev_init(mlxsw_sp_port
);
3239 mlxsw_sp
->ports
[local_port
] = mlxsw_sp_port
;
3240 err
= register_netdev(dev
);
3242 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to register netdev\n",
3243 mlxsw_sp_port
->local_port
);
3244 goto err_register_netdev
;
3247 mlxsw_core_port_eth_set(mlxsw_sp
->core
, mlxsw_sp_port
->local_port
,
3248 mlxsw_sp_port
, dev
, mlxsw_sp_port
->split
,
3250 mlxsw_core_schedule_dw(&mlxsw_sp_port
->periodic_hw_stats
.update_dw
, 0);
3253 err_register_netdev
:
3254 mlxsw_sp
->ports
[local_port
] = NULL
;
3255 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port
);
3256 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan
);
3258 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port
);
3259 err_port_qdiscs_init
:
3260 mlxsw_sp_port_fids_fini(mlxsw_sp_port
);
3262 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
3265 err_port_buffers_init
:
3266 err_port_admin_status_set
:
3268 err_port_speed_by_width_set
:
3269 err_port_system_port_mapping_set
:
3271 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
3273 mlxsw_sp_port_module_unmap(mlxsw_sp_port
);
3274 err_port_module_map
:
3275 kfree(mlxsw_sp_port
->sample
);
3277 free_percpu(mlxsw_sp_port
->pcpu_stats
);
3281 mlxsw_core_port_fini(mlxsw_sp
->core
, local_port
);
3285 static void mlxsw_sp_port_remove(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
3287 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
3289 cancel_delayed_work_sync(&mlxsw_sp_port
->periodic_hw_stats
.update_dw
);
3290 mlxsw_core_port_clear(mlxsw_sp
->core
, local_port
, mlxsw_sp
);
3291 unregister_netdev(mlxsw_sp_port
->dev
); /* This calls ndo_stop */
3292 mlxsw_sp
->ports
[local_port
] = NULL
;
3293 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port
);
3294 mlxsw_sp_port_vlan_flush(mlxsw_sp_port
);
3295 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port
);
3296 mlxsw_sp_port_fids_fini(mlxsw_sp_port
);
3297 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
3298 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
3299 mlxsw_sp_port_module_unmap(mlxsw_sp_port
);
3300 kfree(mlxsw_sp_port
->sample
);
3301 free_percpu(mlxsw_sp_port
->pcpu_stats
);
3302 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port
->vlans_list
));
3303 free_netdev(mlxsw_sp_port
->dev
);
3304 mlxsw_core_port_fini(mlxsw_sp
->core
, local_port
);
3307 static bool mlxsw_sp_port_created(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
3309 return mlxsw_sp
->ports
[local_port
] != NULL
;
3312 static void mlxsw_sp_ports_remove(struct mlxsw_sp
*mlxsw_sp
)
3316 for (i
= 1; i
< mlxsw_core_max_ports(mlxsw_sp
->core
); i
++)
3317 if (mlxsw_sp_port_created(mlxsw_sp
, i
))
3318 mlxsw_sp_port_remove(mlxsw_sp
, i
);
3319 kfree(mlxsw_sp
->port_to_module
);
3320 kfree(mlxsw_sp
->ports
);
3323 static int mlxsw_sp_ports_create(struct mlxsw_sp
*mlxsw_sp
)
3325 unsigned int max_ports
= mlxsw_core_max_ports(mlxsw_sp
->core
);
3326 u8 module
, width
, lane
;
3331 alloc_size
= sizeof(struct mlxsw_sp_port
*) * max_ports
;
3332 mlxsw_sp
->ports
= kzalloc(alloc_size
, GFP_KERNEL
);
3333 if (!mlxsw_sp
->ports
)
3336 mlxsw_sp
->port_to_module
= kmalloc_array(max_ports
, sizeof(int),
3338 if (!mlxsw_sp
->port_to_module
) {
3340 goto err_port_to_module_alloc
;
3343 for (i
= 1; i
< max_ports
; i
++) {
3344 /* Mark as invalid */
3345 mlxsw_sp
->port_to_module
[i
] = -1;
3347 err
= mlxsw_sp_port_module_info_get(mlxsw_sp
, i
, &module
,
3350 goto err_port_module_info_get
;
3353 mlxsw_sp
->port_to_module
[i
] = module
;
3354 err
= mlxsw_sp_port_create(mlxsw_sp
, i
, false,
3355 module
, width
, lane
);
3357 goto err_port_create
;
3362 err_port_module_info_get
:
3363 for (i
--; i
>= 1; i
--)
3364 if (mlxsw_sp_port_created(mlxsw_sp
, i
))
3365 mlxsw_sp_port_remove(mlxsw_sp
, i
);
3366 kfree(mlxsw_sp
->port_to_module
);
3367 err_port_to_module_alloc
:
3368 kfree(mlxsw_sp
->ports
);
3372 static u8
mlxsw_sp_cluster_base_port_get(u8 local_port
)
3374 u8 offset
= (local_port
- 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX
;
3376 return local_port
- offset
;
3379 static int mlxsw_sp_port_split_create(struct mlxsw_sp
*mlxsw_sp
, u8 base_port
,
3380 u8 module
, unsigned int count
)
3382 u8 width
= MLXSW_PORT_MODULE_MAX_WIDTH
/ count
;
3385 for (i
= 0; i
< count
; i
++) {
3386 err
= mlxsw_sp_port_create(mlxsw_sp
, base_port
+ i
, true,
3387 module
, width
, i
* width
);
3389 goto err_port_create
;
3395 for (i
--; i
>= 0; i
--)
3396 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
))
3397 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
3401 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp
*mlxsw_sp
,
3402 u8 base_port
, unsigned int count
)
3404 u8 local_port
, module
, width
= MLXSW_PORT_MODULE_MAX_WIDTH
;
3407 /* Split by four means we need to re-create two ports, otherwise
3412 for (i
= 0; i
< count
; i
++) {
3413 local_port
= base_port
+ i
* 2;
3414 if (mlxsw_sp
->port_to_module
[local_port
] < 0)
3416 module
= mlxsw_sp
->port_to_module
[local_port
];
3418 mlxsw_sp_port_create(mlxsw_sp
, local_port
, false, module
,
3423 static int mlxsw_sp_port_split(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
3426 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3427 struct mlxsw_sp_port
*mlxsw_sp_port
;
3428 u8 module
, cur_width
, base_port
;
3432 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
3433 if (!mlxsw_sp_port
) {
3434 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
3439 module
= mlxsw_sp_port
->mapping
.module
;
3440 cur_width
= mlxsw_sp_port
->mapping
.width
;
3442 if (count
!= 2 && count
!= 4) {
3443 netdev_err(mlxsw_sp_port
->dev
, "Port can only be split into 2 or 4 ports\n");
3447 if (cur_width
!= MLXSW_PORT_MODULE_MAX_WIDTH
) {
3448 netdev_err(mlxsw_sp_port
->dev
, "Port cannot be split further\n");
3452 /* Make sure we have enough slave (even) ports for the split. */
3454 base_port
= local_port
;
3455 if (mlxsw_sp
->ports
[base_port
+ 1]) {
3456 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
3460 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
3461 if (mlxsw_sp
->ports
[base_port
+ 1] ||
3462 mlxsw_sp
->ports
[base_port
+ 3]) {
3463 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
3468 for (i
= 0; i
< count
; i
++)
3469 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
))
3470 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
3472 err
= mlxsw_sp_port_split_create(mlxsw_sp
, base_port
, module
, count
);
3474 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create split ports\n");
3475 goto err_port_split_create
;
3480 err_port_split_create
:
3481 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
3485 static int mlxsw_sp_port_unsplit(struct mlxsw_core
*mlxsw_core
, u8 local_port
)
3487 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3488 struct mlxsw_sp_port
*mlxsw_sp_port
;
3489 u8 cur_width
, base_port
;
3493 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
3494 if (!mlxsw_sp_port
) {
3495 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
3500 if (!mlxsw_sp_port
->split
) {
3501 netdev_err(mlxsw_sp_port
->dev
, "Port wasn't split\n");
3505 cur_width
= mlxsw_sp_port
->mapping
.width
;
3506 count
= cur_width
== 1 ? 4 : 2;
3508 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
3510 /* Determine which ports to remove. */
3511 if (count
== 2 && local_port
>= base_port
+ 2)
3512 base_port
= base_port
+ 2;
3514 for (i
= 0; i
< count
; i
++)
3515 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
))
3516 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
3518 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
3523 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info
*reg
,
3524 char *pude_pl
, void *priv
)
3526 struct mlxsw_sp
*mlxsw_sp
= priv
;
3527 struct mlxsw_sp_port
*mlxsw_sp_port
;
3528 enum mlxsw_reg_pude_oper_status status
;
3531 local_port
= mlxsw_reg_pude_local_port_get(pude_pl
);
3532 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
3536 status
= mlxsw_reg_pude_oper_status_get(pude_pl
);
3537 if (status
== MLXSW_PORT_OPER_STATUS_UP
) {
3538 netdev_info(mlxsw_sp_port
->dev
, "link up\n");
3539 netif_carrier_on(mlxsw_sp_port
->dev
);
3541 netdev_info(mlxsw_sp_port
->dev
, "link down\n");
3542 netif_carrier_off(mlxsw_sp_port
->dev
);
3546 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff
*skb
,
3547 u8 local_port
, void *priv
)
3549 struct mlxsw_sp
*mlxsw_sp
= priv
;
3550 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
3551 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
3553 if (unlikely(!mlxsw_sp_port
)) {
3554 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: skb received for non-existent port\n",
3559 skb
->dev
= mlxsw_sp_port
->dev
;
3561 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
3562 u64_stats_update_begin(&pcpu_stats
->syncp
);
3563 pcpu_stats
->rx_packets
++;
3564 pcpu_stats
->rx_bytes
+= skb
->len
;
3565 u64_stats_update_end(&pcpu_stats
->syncp
);
3567 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
3568 netif_receive_skb(skb
);
3571 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff
*skb
, u8 local_port
,
3574 skb
->offload_fwd_mark
= 1;
3575 return mlxsw_sp_rx_listener_no_mark_func(skb
, local_port
, priv
);
3578 static void mlxsw_sp_rx_listener_mr_mark_func(struct sk_buff
*skb
,
3579 u8 local_port
, void *priv
)
3581 skb
->offload_mr_fwd_mark
= 1;
3582 skb
->offload_fwd_mark
= 1;
3583 return mlxsw_sp_rx_listener_no_mark_func(skb
, local_port
, priv
);
3586 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff
*skb
, u8 local_port
,
3589 struct mlxsw_sp
*mlxsw_sp
= priv
;
3590 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
3591 struct psample_group
*psample_group
;
3594 if (unlikely(!mlxsw_sp_port
)) {
3595 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: sample skb received for non-existent port\n",
3599 if (unlikely(!mlxsw_sp_port
->sample
)) {
3600 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: sample skb received on unsupported port\n",
3605 size
= mlxsw_sp_port
->sample
->truncate
?
3606 mlxsw_sp_port
->sample
->trunc_size
: skb
->len
;
3609 psample_group
= rcu_dereference(mlxsw_sp_port
->sample
->psample_group
);
3612 psample_sample_packet(psample_group
, skb
, size
,
3613 mlxsw_sp_port
->dev
->ifindex
, 0,
3614 mlxsw_sp_port
->sample
->rate
);
3621 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
3622 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
3623 _is_ctrl, SP_##_trap_group, DISCARD)
3625 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
3626 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
3627 _is_ctrl, SP_##_trap_group, DISCARD)
3629 #define MLXSW_SP_RXL_MR_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
3630 MLXSW_RXL(mlxsw_sp_rx_listener_mr_mark_func, _trap_id, _action, \
3631 _is_ctrl, SP_##_trap_group, DISCARD)
3633 #define MLXSW_SP_EVENTL(_func, _trap_id) \
3634 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
3636 static const struct mlxsw_listener mlxsw_sp_listener
[] = {
3638 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func
, PUDE
),
3640 MLXSW_SP_RXL_NO_MARK(STP
, TRAP_TO_CPU
, STP
, true),
3641 MLXSW_SP_RXL_NO_MARK(LACP
, TRAP_TO_CPU
, LACP
, true),
3642 MLXSW_SP_RXL_NO_MARK(LLDP
, TRAP_TO_CPU
, LLDP
, true),
3643 MLXSW_SP_RXL_MARK(DHCP
, MIRROR_TO_CPU
, DHCP
, false),
3644 MLXSW_SP_RXL_MARK(IGMP_QUERY
, MIRROR_TO_CPU
, IGMP
, false),
3645 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT
, TRAP_TO_CPU
, IGMP
, false),
3646 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT
, TRAP_TO_CPU
, IGMP
, false),
3647 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE
, TRAP_TO_CPU
, IGMP
, false),
3648 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT
, TRAP_TO_CPU
, IGMP
, false),
3649 MLXSW_SP_RXL_MARK(ARPBC
, MIRROR_TO_CPU
, ARP
, false),
3650 MLXSW_SP_RXL_MARK(ARPUC
, MIRROR_TO_CPU
, ARP
, false),
3651 MLXSW_SP_RXL_NO_MARK(FID_MISS
, TRAP_TO_CPU
, IP2ME
, false),
3652 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY
, MIRROR_TO_CPU
, IPV6_MLD
,
3654 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT
, TRAP_TO_CPU
, IPV6_MLD
,
3656 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE
, TRAP_TO_CPU
, IPV6_MLD
,
3658 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT
, TRAP_TO_CPU
, IPV6_MLD
,
3661 MLXSW_SP_RXL_MARK(MTUERROR
, TRAP_TO_CPU
, ROUTER_EXP
, false),
3662 MLXSW_SP_RXL_MARK(TTLERROR
, TRAP_TO_CPU
, ROUTER_EXP
, false),
3663 MLXSW_SP_RXL_MARK(LBERROR
, TRAP_TO_CPU
, ROUTER_EXP
, false),
3664 MLXSW_SP_RXL_MARK(IP2ME
, TRAP_TO_CPU
, IP2ME
, false),
3665 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS
, TRAP_TO_CPU
, ROUTER_EXP
,
3667 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST
, TRAP_TO_CPU
, ROUTER_EXP
, false),
3668 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC
, TRAP_TO_CPU
, ROUTER_EXP
, false),
3669 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK
, TRAP_TO_CPU
, ROUTER_EXP
, false),
3670 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK
, TRAP_TO_CPU
, ROUTER_EXP
,
3672 MLXSW_SP_RXL_MARK(IPV4_OSPF
, TRAP_TO_CPU
, OSPF
, false),
3673 MLXSW_SP_RXL_MARK(IPV6_OSPF
, TRAP_TO_CPU
, OSPF
, false),
3674 MLXSW_SP_RXL_MARK(IPV6_DHCP
, TRAP_TO_CPU
, DHCP
, false),
3675 MLXSW_SP_RXL_MARK(RTR_INGRESS0
, TRAP_TO_CPU
, REMOTE_ROUTE
, false),
3676 MLXSW_SP_RXL_MARK(IPV4_BGP
, TRAP_TO_CPU
, BGP
, false),
3677 MLXSW_SP_RXL_MARK(IPV6_BGP
, TRAP_TO_CPU
, BGP
, false),
3678 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION
, TRAP_TO_CPU
, IPV6_ND
,
3680 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT
, TRAP_TO_CPU
, IPV6_ND
,
3682 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION
, TRAP_TO_CPU
, IPV6_ND
,
3684 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT
, TRAP_TO_CPU
, IPV6_ND
,
3686 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION
, TRAP_TO_CPU
, IPV6_ND
, false),
3687 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST
, TRAP_TO_CPU
, ROUTER_EXP
,
3689 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4
, TRAP_TO_CPU
, HOST_MISS
, false),
3690 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6
, TRAP_TO_CPU
, HOST_MISS
, false),
3691 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4
, TRAP_TO_CPU
, ROUTER_EXP
, false),
3692 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6
, TRAP_TO_CPU
, ROUTER_EXP
, false),
3693 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR
, TRAP_TO_CPU
, ROUTER_EXP
, false),
3694 /* PKT Sample trap */
3695 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func
, PKT_SAMPLE
, MIRROR_TO_CPU
,
3696 false, SP_IP2ME
, DISCARD
),
3698 MLXSW_SP_RXL_NO_MARK(ACL0
, TRAP_TO_CPU
, IP2ME
, false),
3699 /* Multicast Router Traps */
3700 MLXSW_SP_RXL_MARK(IPV4_PIM
, TRAP_TO_CPU
, PIM
, false),
3701 MLXSW_SP_RXL_MARK(RPF
, TRAP_TO_CPU
, RPF
, false),
3702 MLXSW_SP_RXL_MARK(ACL1
, TRAP_TO_CPU
, MULTICAST
, false),
3703 MLXSW_SP_RXL_MR_MARK(ACL2
, TRAP_TO_CPU
, MULTICAST
, false),
3706 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core
*mlxsw_core
)
3708 char qpcr_pl
[MLXSW_REG_QPCR_LEN
];
3709 enum mlxsw_reg_qpcr_ir_units ir_units
;
3710 int max_cpu_policers
;
3716 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_CPU_POLICERS
))
3719 max_cpu_policers
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_CPU_POLICERS
);
3721 ir_units
= MLXSW_REG_QPCR_IR_UNITS_M
;
3722 for (i
= 0; i
< max_cpu_policers
; i
++) {
3725 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP
:
3726 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP
:
3727 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP
:
3728 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF
:
3729 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM
:
3730 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF
:
3734 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP
:
3735 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD
:
3739 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP
:
3740 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP
:
3741 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP
:
3742 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS
:
3743 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP
:
3744 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE
:
3745 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND
:
3746 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST
:
3750 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME
:
3759 mlxsw_reg_qpcr_pack(qpcr_pl
, i
, ir_units
, is_bytes
, rate
,
3761 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(qpcr
), qpcr_pl
);
3769 static int mlxsw_sp_trap_groups_set(struct mlxsw_core
*mlxsw_core
)
3771 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
3772 enum mlxsw_reg_htgt_trap_group i
;
3773 int max_cpu_policers
;
3774 int max_trap_groups
;
3779 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_TRAP_GROUPS
))
3782 max_trap_groups
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_TRAP_GROUPS
);
3783 max_cpu_policers
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_CPU_POLICERS
);
3785 for (i
= 0; i
< max_trap_groups
; i
++) {
3788 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP
:
3789 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP
:
3790 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP
:
3791 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF
:
3792 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM
:
3796 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP
:
3797 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP
:
3801 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP
:
3802 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME
:
3803 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD
:
3807 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP
:
3808 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND
:
3809 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF
:
3813 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS
:
3814 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP
:
3815 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE
:
3816 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST
:
3820 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT
:
3821 priority
= MLXSW_REG_HTGT_DEFAULT_PRIORITY
;
3822 tc
= MLXSW_REG_HTGT_DEFAULT_TC
;
3823 policer_id
= MLXSW_REG_HTGT_INVALID_POLICER
;
3829 if (max_cpu_policers
<= policer_id
&&
3830 policer_id
!= MLXSW_REG_HTGT_INVALID_POLICER
)
3833 mlxsw_reg_htgt_pack(htgt_pl
, i
, policer_id
, priority
, tc
);
3834 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
3842 static int mlxsw_sp_traps_init(struct mlxsw_sp
*mlxsw_sp
)
3847 err
= mlxsw_sp_cpu_policers_set(mlxsw_sp
->core
);
3851 err
= mlxsw_sp_trap_groups_set(mlxsw_sp
->core
);
3855 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_listener
); i
++) {
3856 err
= mlxsw_core_trap_register(mlxsw_sp
->core
,
3857 &mlxsw_sp_listener
[i
],
3860 goto err_listener_register
;
3865 err_listener_register
:
3866 for (i
--; i
>= 0; i
--) {
3867 mlxsw_core_trap_unregister(mlxsw_sp
->core
,
3868 &mlxsw_sp_listener
[i
],
3874 static void mlxsw_sp_traps_fini(struct mlxsw_sp
*mlxsw_sp
)
3878 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_listener
); i
++) {
3879 mlxsw_core_trap_unregister(mlxsw_sp
->core
,
3880 &mlxsw_sp_listener
[i
],
3885 static int mlxsw_sp_lag_init(struct mlxsw_sp
*mlxsw_sp
)
3887 char slcr_pl
[MLXSW_REG_SLCR_LEN
];
3890 mlxsw_reg_slcr_pack(slcr_pl
, MLXSW_REG_SLCR_LAG_HASH_SMAC
|
3891 MLXSW_REG_SLCR_LAG_HASH_DMAC
|
3892 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE
|
3893 MLXSW_REG_SLCR_LAG_HASH_VLANID
|
3894 MLXSW_REG_SLCR_LAG_HASH_SIP
|
3895 MLXSW_REG_SLCR_LAG_HASH_DIP
|
3896 MLXSW_REG_SLCR_LAG_HASH_SPORT
|
3897 MLXSW_REG_SLCR_LAG_HASH_DPORT
|
3898 MLXSW_REG_SLCR_LAG_HASH_IPPROTO
);
3899 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcr
), slcr_pl
);
3903 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_LAG
) ||
3904 !MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_LAG_MEMBERS
))
3907 mlxsw_sp
->lags
= kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_LAG
),
3908 sizeof(struct mlxsw_sp_upper
),
3910 if (!mlxsw_sp
->lags
)
3916 static void mlxsw_sp_lag_fini(struct mlxsw_sp
*mlxsw_sp
)
3918 kfree(mlxsw_sp
->lags
);
3921 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core
*mlxsw_core
)
3923 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
3925 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_EMAD
,
3926 MLXSW_REG_HTGT_INVALID_POLICER
,
3927 MLXSW_REG_HTGT_DEFAULT_PRIORITY
,
3928 MLXSW_REG_HTGT_DEFAULT_TC
);
3929 return mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
3932 static int mlxsw_sp_netdevice_event(struct notifier_block
*unused
,
3933 unsigned long event
, void *ptr
);
3935 static int mlxsw_sp_init(struct mlxsw_core
*mlxsw_core
,
3936 const struct mlxsw_bus_info
*mlxsw_bus_info
)
3938 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3941 mlxsw_sp
->core
= mlxsw_core
;
3942 mlxsw_sp
->bus_info
= mlxsw_bus_info
;
3944 err
= mlxsw_sp_fw_rev_validate(mlxsw_sp
);
3946 dev_err(mlxsw_sp
->bus_info
->dev
, "Could not upgrade firmware\n");
3950 err
= mlxsw_sp_base_mac_get(mlxsw_sp
);
3952 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to get base mac\n");
3956 err
= mlxsw_sp_kvdl_init(mlxsw_sp
);
3958 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize KVDL\n");
3962 err
= mlxsw_sp_fids_init(mlxsw_sp
);
3964 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize FIDs\n");
3968 err
= mlxsw_sp_traps_init(mlxsw_sp
);
3970 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set traps\n");
3971 goto err_traps_init
;
3974 err
= mlxsw_sp_buffers_init(mlxsw_sp
);
3976 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize buffers\n");
3977 goto err_buffers_init
;
3980 err
= mlxsw_sp_lag_init(mlxsw_sp
);
3982 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize LAG\n");
3986 err
= mlxsw_sp_switchdev_init(mlxsw_sp
);
3988 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize switchdev\n");
3989 goto err_switchdev_init
;
3992 err
= mlxsw_sp_counter_pool_init(mlxsw_sp
);
3994 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init counter pool\n");
3995 goto err_counter_pool_init
;
3998 err
= mlxsw_sp_afa_init(mlxsw_sp
);
4000 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize ACL actions\n");
4004 err
= mlxsw_sp_router_init(mlxsw_sp
);
4006 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize router\n");
4007 goto err_router_init
;
4010 /* Initialize netdevice notifier after router is initialized, so that
4011 * the event handler can use router structures.
4013 mlxsw_sp
->netdevice_nb
.notifier_call
= mlxsw_sp_netdevice_event
;
4014 err
= register_netdevice_notifier(&mlxsw_sp
->netdevice_nb
);
4016 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to register netdev notifier\n");
4017 goto err_netdev_notifier
;
4020 err
= mlxsw_sp_span_init(mlxsw_sp
);
4022 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init span system\n");
4026 err
= mlxsw_sp_acl_init(mlxsw_sp
);
4028 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize ACL\n");
4032 err
= mlxsw_sp_dpipe_init(mlxsw_sp
);
4034 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init pipeline debug\n");
4035 goto err_dpipe_init
;
4038 err
= mlxsw_sp_ports_create(mlxsw_sp
);
4040 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create ports\n");
4041 goto err_ports_create
;
4047 mlxsw_sp_dpipe_fini(mlxsw_sp
);
4049 mlxsw_sp_acl_fini(mlxsw_sp
);
4051 mlxsw_sp_span_fini(mlxsw_sp
);
4053 unregister_netdevice_notifier(&mlxsw_sp
->netdevice_nb
);
4054 err_netdev_notifier
:
4055 mlxsw_sp_router_fini(mlxsw_sp
);
4057 mlxsw_sp_afa_fini(mlxsw_sp
);
4059 mlxsw_sp_counter_pool_fini(mlxsw_sp
);
4060 err_counter_pool_init
:
4061 mlxsw_sp_switchdev_fini(mlxsw_sp
);
4063 mlxsw_sp_lag_fini(mlxsw_sp
);
4065 mlxsw_sp_buffers_fini(mlxsw_sp
);
4067 mlxsw_sp_traps_fini(mlxsw_sp
);
4069 mlxsw_sp_fids_fini(mlxsw_sp
);
4071 mlxsw_sp_kvdl_fini(mlxsw_sp
);
4075 static void mlxsw_sp_fini(struct mlxsw_core
*mlxsw_core
)
4077 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
4079 mlxsw_sp_ports_remove(mlxsw_sp
);
4080 mlxsw_sp_dpipe_fini(mlxsw_sp
);
4081 mlxsw_sp_acl_fini(mlxsw_sp
);
4082 mlxsw_sp_span_fini(mlxsw_sp
);
4083 unregister_netdevice_notifier(&mlxsw_sp
->netdevice_nb
);
4084 mlxsw_sp_router_fini(mlxsw_sp
);
4085 mlxsw_sp_afa_fini(mlxsw_sp
);
4086 mlxsw_sp_counter_pool_fini(mlxsw_sp
);
4087 mlxsw_sp_switchdev_fini(mlxsw_sp
);
4088 mlxsw_sp_lag_fini(mlxsw_sp
);
4089 mlxsw_sp_buffers_fini(mlxsw_sp
);
4090 mlxsw_sp_traps_fini(mlxsw_sp
);
4091 mlxsw_sp_fids_fini(mlxsw_sp
);
4092 mlxsw_sp_kvdl_fini(mlxsw_sp
);
4095 static const struct mlxsw_config_profile mlxsw_sp_config_profile
= {
4096 .used_max_vepa_channels
= 1,
4097 .max_vepa_channels
= 0,
4099 .max_mid
= MLXSW_SP_MID_MAX
,
4102 .used_flood_tables
= 1,
4103 .used_flood_mode
= 1,
4105 .max_fid_offset_flood_tables
= 3,
4106 .fid_offset_flood_table_size
= VLAN_N_VID
- 1,
4107 .max_fid_flood_tables
= 3,
4108 .fid_flood_table_size
= MLXSW_SP_FID_8021D_MAX
,
4109 .used_max_ib_mc
= 1,
4113 .used_kvd_split_data
= 1,
4114 .kvd_hash_granularity
= MLXSW_SP_KVD_GRANULARITY
,
4115 .kvd_hash_single_parts
= 59,
4116 .kvd_hash_double_parts
= 41,
4117 .kvd_linear_size
= MLXSW_SP_KVD_LINEAR_SIZE
,
4121 .type
= MLXSW_PORT_SWID_TYPE_ETH
,
4124 .resource_query_enable
= 1,
4128 mlxsw_sp_resource_kvd_granularity_validate(struct netlink_ext_ack
*extack
,
4131 const struct mlxsw_config_profile
*profile
;
4133 profile
= &mlxsw_sp_config_profile
;
4134 if (size
% profile
->kvd_hash_granularity
) {
4135 NL_SET_ERR_MSG_MOD(extack
, "resource set with wrong granularity");
4142 mlxsw_sp_resource_kvd_size_validate(struct devlink
*devlink
, u64 size
,
4143 struct netlink_ext_ack
*extack
)
4145 NL_SET_ERR_MSG_MOD(extack
, "kvd size cannot be changed");
4150 mlxsw_sp_resource_kvd_linear_size_validate(struct devlink
*devlink
, u64 size
,
4151 struct netlink_ext_ack
*extack
)
4153 if (!mlxsw_sp_resource_kvd_granularity_validate(extack
, size
))
4160 mlxsw_sp_resource_kvd_hash_single_size_validate(struct devlink
*devlink
, u64 size
,
4161 struct netlink_ext_ack
*extack
)
4163 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
4165 if (!mlxsw_sp_resource_kvd_granularity_validate(extack
, size
))
4168 if (size
< MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SINGLE_MIN_SIZE
)) {
4169 NL_SET_ERR_MSG_MOD(extack
, "hash single size is smaller than minimum");
4176 mlxsw_sp_resource_kvd_hash_double_size_validate(struct devlink
*devlink
, u64 size
,
4177 struct netlink_ext_ack
*extack
)
4179 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
4181 if (!mlxsw_sp_resource_kvd_granularity_validate(extack
, size
))
4184 if (size
< MLXSW_CORE_RES_GET(mlxsw_core
, KVD_DOUBLE_MIN_SIZE
)) {
4185 NL_SET_ERR_MSG_MOD(extack
, "hash double size is smaller than minimum");
4191 static u64
mlxsw_sp_resource_kvd_linear_occ_get(struct devlink
*devlink
)
4193 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
4194 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
4196 return mlxsw_sp_kvdl_occ_get(mlxsw_sp
);
4199 static struct devlink_resource_ops mlxsw_sp_resource_kvd_ops
= {
4200 .size_validate
= mlxsw_sp_resource_kvd_size_validate
,
4203 static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops
= {
4204 .size_validate
= mlxsw_sp_resource_kvd_linear_size_validate
,
4205 .occ_get
= mlxsw_sp_resource_kvd_linear_occ_get
,
4208 static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_single_ops
= {
4209 .size_validate
= mlxsw_sp_resource_kvd_hash_single_size_validate
,
4212 static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops
= {
4213 .size_validate
= mlxsw_sp_resource_kvd_hash_double_size_validate
,
4217 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core
*mlxsw_core
,
4218 struct devlink_resource_size_params
*kvd_size_params
,
4219 struct devlink_resource_size_params
*linear_size_params
,
4220 struct devlink_resource_size_params
*hash_double_size_params
,
4221 struct devlink_resource_size_params
*hash_single_size_params
)
4223 u32 single_size_min
= MLXSW_CORE_RES_GET(mlxsw_core
,
4224 KVD_SINGLE_MIN_SIZE
);
4225 u32 double_size_min
= MLXSW_CORE_RES_GET(mlxsw_core
,
4226 KVD_DOUBLE_MIN_SIZE
);
4227 u32 kvd_size
= MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
);
4228 u32 linear_size_min
= 0;
4230 devlink_resource_size_params_init(kvd_size_params
, kvd_size
, kvd_size
,
4231 MLXSW_SP_KVD_GRANULARITY
,
4232 DEVLINK_RESOURCE_UNIT_ENTRY
);
4233 devlink_resource_size_params_init(linear_size_params
, linear_size_min
,
4234 kvd_size
- single_size_min
-
4236 MLXSW_SP_KVD_GRANULARITY
,
4237 DEVLINK_RESOURCE_UNIT_ENTRY
);
4238 devlink_resource_size_params_init(hash_double_size_params
,
4240 kvd_size
- single_size_min
-
4242 MLXSW_SP_KVD_GRANULARITY
,
4243 DEVLINK_RESOURCE_UNIT_ENTRY
);
4244 devlink_resource_size_params_init(hash_single_size_params
,
4246 kvd_size
- double_size_min
-
4248 MLXSW_SP_KVD_GRANULARITY
,
4249 DEVLINK_RESOURCE_UNIT_ENTRY
);
4252 static int mlxsw_sp_resources_register(struct mlxsw_core
*mlxsw_core
)
4254 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
4255 struct devlink_resource_size_params hash_single_size_params
;
4256 struct devlink_resource_size_params hash_double_size_params
;
4257 struct devlink_resource_size_params linear_size_params
;
4258 struct devlink_resource_size_params kvd_size_params
;
4259 u32 kvd_size
, single_size
, double_size
, linear_size
;
4260 const struct mlxsw_config_profile
*profile
;
4263 profile
= &mlxsw_sp_config_profile
;
4264 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, KVD_SIZE
))
4267 mlxsw_sp_resource_size_params_prepare(mlxsw_core
, &kvd_size_params
,
4268 &linear_size_params
,
4269 &hash_double_size_params
,
4270 &hash_single_size_params
);
4272 kvd_size
= MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
);
4273 err
= devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_KVD
,
4275 MLXSW_SP_RESOURCE_KVD
,
4276 DEVLINK_RESOURCE_ID_PARENT_TOP
,
4278 &mlxsw_sp_resource_kvd_ops
);
4282 linear_size
= profile
->kvd_linear_size
;
4283 err
= devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR
,
4285 MLXSW_SP_RESOURCE_KVD_LINEAR
,
4286 MLXSW_SP_RESOURCE_KVD
,
4287 &linear_size_params
,
4288 &mlxsw_sp_resource_kvd_linear_ops
);
4292 double_size
= kvd_size
- linear_size
;
4293 double_size
*= profile
->kvd_hash_double_parts
;
4294 double_size
/= profile
->kvd_hash_double_parts
+
4295 profile
->kvd_hash_single_parts
;
4296 double_size
= rounddown(double_size
, profile
->kvd_hash_granularity
);
4297 err
= devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE
,
4299 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE
,
4300 MLXSW_SP_RESOURCE_KVD
,
4301 &hash_double_size_params
,
4302 &mlxsw_sp_resource_kvd_hash_double_ops
);
4306 single_size
= kvd_size
- double_size
- linear_size
;
4307 err
= devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE
,
4309 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE
,
4310 MLXSW_SP_RESOURCE_KVD
,
4311 &hash_single_size_params
,
4312 &mlxsw_sp_resource_kvd_hash_single_ops
);
4319 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core
*mlxsw_core
,
4320 const struct mlxsw_config_profile
*profile
,
4321 u64
*p_single_size
, u64
*p_double_size
,
4324 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
4328 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, KVD_SINGLE_MIN_SIZE
) ||
4329 !MLXSW_CORE_RES_VALID(mlxsw_core
, KVD_DOUBLE_MIN_SIZE
) ||
4330 !profile
->used_kvd_split_data
)
4333 /* The hash part is what left of the kvd without the
4334 * linear part. It is split to the single size and
4335 * double size by the parts ratio from the profile.
4336 * Both sizes must be a multiplications of the
4337 * granularity from the profile. In case the user
4338 * provided the sizes they are obtained via devlink.
4340 err
= devlink_resource_size_get(devlink
,
4341 MLXSW_SP_RESOURCE_KVD_LINEAR
,
4344 *p_linear_size
= profile
->kvd_linear_size
;
4346 err
= devlink_resource_size_get(devlink
,
4347 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE
,
4350 double_size
= MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
) -
4352 double_size
*= profile
->kvd_hash_double_parts
;
4353 double_size
/= profile
->kvd_hash_double_parts
+
4354 profile
->kvd_hash_single_parts
;
4355 *p_double_size
= rounddown(double_size
,
4356 profile
->kvd_hash_granularity
);
4359 err
= devlink_resource_size_get(devlink
,
4360 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE
,
4363 *p_single_size
= MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
) -
4364 *p_double_size
- *p_linear_size
;
4366 /* Check results are legal. */
4367 if (*p_single_size
< MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SINGLE_MIN_SIZE
) ||
4368 *p_double_size
< MLXSW_CORE_RES_GET(mlxsw_core
, KVD_DOUBLE_MIN_SIZE
) ||
4369 MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
) < *p_linear_size
)
4375 static struct mlxsw_driver mlxsw_sp_driver
= {
4376 .kind
= mlxsw_sp_driver_name
,
4377 .priv_size
= sizeof(struct mlxsw_sp
),
4378 .init
= mlxsw_sp_init
,
4379 .fini
= mlxsw_sp_fini
,
4380 .basic_trap_groups_set
= mlxsw_sp_basic_trap_groups_set
,
4381 .port_split
= mlxsw_sp_port_split
,
4382 .port_unsplit
= mlxsw_sp_port_unsplit
,
4383 .sb_pool_get
= mlxsw_sp_sb_pool_get
,
4384 .sb_pool_set
= mlxsw_sp_sb_pool_set
,
4385 .sb_port_pool_get
= mlxsw_sp_sb_port_pool_get
,
4386 .sb_port_pool_set
= mlxsw_sp_sb_port_pool_set
,
4387 .sb_tc_pool_bind_get
= mlxsw_sp_sb_tc_pool_bind_get
,
4388 .sb_tc_pool_bind_set
= mlxsw_sp_sb_tc_pool_bind_set
,
4389 .sb_occ_snapshot
= mlxsw_sp_sb_occ_snapshot
,
4390 .sb_occ_max_clear
= mlxsw_sp_sb_occ_max_clear
,
4391 .sb_occ_port_pool_get
= mlxsw_sp_sb_occ_port_pool_get
,
4392 .sb_occ_tc_port_bind_get
= mlxsw_sp_sb_occ_tc_port_bind_get
,
4393 .txhdr_construct
= mlxsw_sp_txhdr_construct
,
4394 .resources_register
= mlxsw_sp_resources_register
,
4395 .kvd_sizes_get
= mlxsw_sp_kvd_sizes_get
,
4396 .txhdr_len
= MLXSW_TXHDR_LEN
,
4397 .profile
= &mlxsw_sp_config_profile
,
4400 bool mlxsw_sp_port_dev_check(const struct net_device
*dev
)
4402 return dev
->netdev_ops
== &mlxsw_sp_port_netdev_ops
;
4405 static int mlxsw_sp_lower_dev_walk(struct net_device
*lower_dev
, void *data
)
4407 struct mlxsw_sp_port
**p_mlxsw_sp_port
= data
;
4410 if (mlxsw_sp_port_dev_check(lower_dev
)) {
4411 *p_mlxsw_sp_port
= netdev_priv(lower_dev
);
4418 struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find(struct net_device
*dev
)
4420 struct mlxsw_sp_port
*mlxsw_sp_port
;
4422 if (mlxsw_sp_port_dev_check(dev
))
4423 return netdev_priv(dev
);
4425 mlxsw_sp_port
= NULL
;
4426 netdev_walk_all_lower_dev(dev
, mlxsw_sp_lower_dev_walk
, &mlxsw_sp_port
);
4428 return mlxsw_sp_port
;
4431 struct mlxsw_sp
*mlxsw_sp_lower_get(struct net_device
*dev
)
4433 struct mlxsw_sp_port
*mlxsw_sp_port
;
4435 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find(dev
);
4436 return mlxsw_sp_port
? mlxsw_sp_port
->mlxsw_sp
: NULL
;
4439 struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find_rcu(struct net_device
*dev
)
4441 struct mlxsw_sp_port
*mlxsw_sp_port
;
4443 if (mlxsw_sp_port_dev_check(dev
))
4444 return netdev_priv(dev
);
4446 mlxsw_sp_port
= NULL
;
4447 netdev_walk_all_lower_dev_rcu(dev
, mlxsw_sp_lower_dev_walk
,
4450 return mlxsw_sp_port
;
4453 struct mlxsw_sp_port
*mlxsw_sp_port_lower_dev_hold(struct net_device
*dev
)
4455 struct mlxsw_sp_port
*mlxsw_sp_port
;
4458 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find_rcu(dev
);
4460 dev_hold(mlxsw_sp_port
->dev
);
4462 return mlxsw_sp_port
;
4465 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port
*mlxsw_sp_port
)
4467 dev_put(mlxsw_sp_port
->dev
);
4470 static int mlxsw_sp_lag_create(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
4472 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
4474 mlxsw_reg_sldr_lag_create_pack(sldr_pl
, lag_id
);
4475 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
4478 static int mlxsw_sp_lag_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
4480 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
4482 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl
, lag_id
);
4483 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
4486 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
4487 u16 lag_id
, u8 port_index
)
4489 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4490 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
4492 mlxsw_reg_slcor_port_add_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
4493 lag_id
, port_index
);
4494 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
4497 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
4500 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4501 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
4503 mlxsw_reg_slcor_port_remove_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
4505 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
4508 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port
*mlxsw_sp_port
,
4511 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4512 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
4514 mlxsw_reg_slcor_col_enable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
4516 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
4519 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port
*mlxsw_sp_port
,
4522 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4523 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
4525 mlxsw_reg_slcor_col_disable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
4527 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
4530 static int mlxsw_sp_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
4531 struct net_device
*lag_dev
,
4534 struct mlxsw_sp_upper
*lag
;
4535 int free_lag_id
= -1;
4539 max_lag
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_LAG
);
4540 for (i
= 0; i
< max_lag
; i
++) {
4541 lag
= mlxsw_sp_lag_get(mlxsw_sp
, i
);
4542 if (lag
->ref_count
) {
4543 if (lag
->dev
== lag_dev
) {
4547 } else if (free_lag_id
< 0) {
4551 if (free_lag_id
< 0)
4553 *p_lag_id
= free_lag_id
;
4558 mlxsw_sp_master_lag_check(struct mlxsw_sp
*mlxsw_sp
,
4559 struct net_device
*lag_dev
,
4560 struct netdev_lag_upper_info
*lag_upper_info
,
4561 struct netlink_ext_ack
*extack
)
4565 if (mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
) != 0) {
4566 NL_SET_ERR_MSG(extack
,
4567 "spectrum: Exceeded number of supported LAG devices");
4570 if (lag_upper_info
->tx_type
!= NETDEV_LAG_TX_TYPE_HASH
) {
4571 NL_SET_ERR_MSG(extack
,
4572 "spectrum: LAG device using unsupported Tx type");
4578 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
4579 u16 lag_id
, u8
*p_port_index
)
4581 u64 max_lag_members
;
4584 max_lag_members
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
4586 for (i
= 0; i
< max_lag_members
; i
++) {
4587 if (!mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
)) {
4595 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
4596 struct net_device
*lag_dev
)
4598 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4599 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
4600 struct mlxsw_sp_upper
*lag
;
4605 err
= mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
);
4608 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
4609 if (!lag
->ref_count
) {
4610 err
= mlxsw_sp_lag_create(mlxsw_sp
, lag_id
);
4616 err
= mlxsw_sp_port_lag_index_get(mlxsw_sp
, lag_id
, &port_index
);
4619 err
= mlxsw_sp_lag_col_port_add(mlxsw_sp_port
, lag_id
, port_index
);
4621 goto err_col_port_add
;
4622 err
= mlxsw_sp_lag_col_port_enable(mlxsw_sp_port
, lag_id
);
4624 goto err_col_port_enable
;
4626 mlxsw_core_lag_mapping_set(mlxsw_sp
->core
, lag_id
, port_index
,
4627 mlxsw_sp_port
->local_port
);
4628 mlxsw_sp_port
->lag_id
= lag_id
;
4629 mlxsw_sp_port
->lagged
= 1;
4632 /* Port is no longer usable as a router interface */
4633 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port
, 1);
4634 if (mlxsw_sp_port_vlan
->fid
)
4635 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan
);
4639 err_col_port_enable
:
4640 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
4642 if (!lag
->ref_count
)
4643 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
4647 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
4648 struct net_device
*lag_dev
)
4650 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4651 u16 lag_id
= mlxsw_sp_port
->lag_id
;
4652 struct mlxsw_sp_upper
*lag
;
4654 if (!mlxsw_sp_port
->lagged
)
4656 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
4657 WARN_ON(lag
->ref_count
== 0);
4659 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port
, lag_id
);
4660 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
4662 /* Any VLANs configured on the port are no longer valid */
4663 mlxsw_sp_port_vlan_flush(mlxsw_sp_port
);
4665 if (lag
->ref_count
== 1)
4666 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
4668 mlxsw_core_lag_mapping_clear(mlxsw_sp
->core
, lag_id
,
4669 mlxsw_sp_port
->local_port
);
4670 mlxsw_sp_port
->lagged
= 0;
4673 mlxsw_sp_port_vlan_get(mlxsw_sp_port
, 1);
4674 /* Make sure untagged frames are allowed to ingress */
4675 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 1);
4678 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
4681 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4682 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
4684 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl
, lag_id
,
4685 mlxsw_sp_port
->local_port
);
4686 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
4689 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
4692 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4693 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
4695 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl
, lag_id
,
4696 mlxsw_sp_port
->local_port
);
4697 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
4700 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
4701 bool lag_tx_enabled
)
4704 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port
,
4705 mlxsw_sp_port
->lag_id
);
4707 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port
,
4708 mlxsw_sp_port
->lag_id
);
4711 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port
*mlxsw_sp_port
,
4712 struct netdev_lag_lower_state_info
*info
)
4714 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port
, info
->tx_enabled
);
4717 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
4720 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4721 enum mlxsw_reg_spms_state spms_state
;
4726 spms_state
= enable
? MLXSW_REG_SPMS_STATE_FORWARDING
:
4727 MLXSW_REG_SPMS_STATE_DISCARDING
;
4729 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
4732 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
4734 for (vid
= 0; vid
< VLAN_N_VID
; vid
++)
4735 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
4737 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
4742 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port
*mlxsw_sp_port
)
4747 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, true);
4750 err
= mlxsw_sp_port_stp_set(mlxsw_sp_port
, true);
4752 goto err_port_stp_set
;
4753 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, 2, VLAN_N_VID
- 1,
4756 goto err_port_vlan_set
;
4758 for (; vid
<= VLAN_N_VID
- 1; vid
++) {
4759 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
,
4762 goto err_vid_learning_set
;
4767 err_vid_learning_set
:
4768 for (vid
--; vid
>= 1; vid
--)
4769 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
, true);
4771 mlxsw_sp_port_stp_set(mlxsw_sp_port
, false);
4773 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, false);
4777 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port
*mlxsw_sp_port
)
4781 for (vid
= VLAN_N_VID
- 1; vid
>= 1; vid
--)
4782 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
,
4785 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, 2, VLAN_N_VID
- 1,
4787 mlxsw_sp_port_stp_set(mlxsw_sp_port
, false);
4788 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, false);
4791 static int mlxsw_sp_netdevice_port_upper_event(struct net_device
*lower_dev
,
4792 struct net_device
*dev
,
4793 unsigned long event
, void *ptr
)
4795 struct netdev_notifier_changeupper_info
*info
;
4796 struct mlxsw_sp_port
*mlxsw_sp_port
;
4797 struct netlink_ext_ack
*extack
;
4798 struct net_device
*upper_dev
;
4799 struct mlxsw_sp
*mlxsw_sp
;
4802 mlxsw_sp_port
= netdev_priv(dev
);
4803 mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4805 extack
= netdev_notifier_info_to_extack(&info
->info
);
4808 case NETDEV_PRECHANGEUPPER
:
4809 upper_dev
= info
->upper_dev
;
4810 if (!is_vlan_dev(upper_dev
) &&
4811 !netif_is_lag_master(upper_dev
) &&
4812 !netif_is_bridge_master(upper_dev
) &&
4813 !netif_is_ovs_master(upper_dev
)) {
4814 NL_SET_ERR_MSG(extack
,
4815 "spectrum: Unknown upper device type");
4820 if (netdev_has_any_upper_dev(upper_dev
) &&
4821 (!netif_is_bridge_master(upper_dev
) ||
4822 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp
,
4824 NL_SET_ERR_MSG(extack
,
4825 "spectrum: Enslaving a port to a device that already has an upper device is not supported");
4828 if (netif_is_lag_master(upper_dev
) &&
4829 !mlxsw_sp_master_lag_check(mlxsw_sp
, upper_dev
,
4830 info
->upper_info
, extack
))
4832 if (netif_is_lag_master(upper_dev
) && vlan_uses_dev(dev
)) {
4833 NL_SET_ERR_MSG(extack
,
4834 "spectrum: Master device is a LAG master and this device has a VLAN");
4837 if (netif_is_lag_port(dev
) && is_vlan_dev(upper_dev
) &&
4838 !netif_is_lag_master(vlan_dev_real_dev(upper_dev
))) {
4839 NL_SET_ERR_MSG(extack
,
4840 "spectrum: Can not put a VLAN on a LAG port");
4843 if (netif_is_ovs_master(upper_dev
) && vlan_uses_dev(dev
)) {
4844 NL_SET_ERR_MSG(extack
,
4845 "spectrum: Master device is an OVS master and this device has a VLAN");
4848 if (netif_is_ovs_port(dev
) && is_vlan_dev(upper_dev
)) {
4849 NL_SET_ERR_MSG(extack
,
4850 "spectrum: Can not put a VLAN on an OVS port");
4854 case NETDEV_CHANGEUPPER
:
4855 upper_dev
= info
->upper_dev
;
4856 if (netif_is_bridge_master(upper_dev
)) {
4858 err
= mlxsw_sp_port_bridge_join(mlxsw_sp_port
,
4863 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
,
4866 } else if (netif_is_lag_master(upper_dev
)) {
4868 err
= mlxsw_sp_port_lag_join(mlxsw_sp_port
,
4871 mlxsw_sp_port_lag_leave(mlxsw_sp_port
,
4873 } else if (netif_is_ovs_master(upper_dev
)) {
4875 err
= mlxsw_sp_port_ovs_join(mlxsw_sp_port
);
4877 mlxsw_sp_port_ovs_leave(mlxsw_sp_port
);
4885 static int mlxsw_sp_netdevice_port_lower_event(struct net_device
*dev
,
4886 unsigned long event
, void *ptr
)
4888 struct netdev_notifier_changelowerstate_info
*info
;
4889 struct mlxsw_sp_port
*mlxsw_sp_port
;
4892 mlxsw_sp_port
= netdev_priv(dev
);
4896 case NETDEV_CHANGELOWERSTATE
:
4897 if (netif_is_lag_port(dev
) && mlxsw_sp_port
->lagged
) {
4898 err
= mlxsw_sp_port_lag_changed(mlxsw_sp_port
,
4899 info
->lower_state_info
);
4901 netdev_err(dev
, "Failed to reflect link aggregation lower state change\n");
4909 static int mlxsw_sp_netdevice_port_event(struct net_device
*lower_dev
,
4910 struct net_device
*port_dev
,
4911 unsigned long event
, void *ptr
)
4914 case NETDEV_PRECHANGEUPPER
:
4915 case NETDEV_CHANGEUPPER
:
4916 return mlxsw_sp_netdevice_port_upper_event(lower_dev
, port_dev
,
4918 case NETDEV_CHANGELOWERSTATE
:
4919 return mlxsw_sp_netdevice_port_lower_event(port_dev
, event
,
4926 static int mlxsw_sp_netdevice_lag_event(struct net_device
*lag_dev
,
4927 unsigned long event
, void *ptr
)
4929 struct net_device
*dev
;
4930 struct list_head
*iter
;
4933 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
4934 if (mlxsw_sp_port_dev_check(dev
)) {
4935 ret
= mlxsw_sp_netdevice_port_event(lag_dev
, dev
, event
,
4945 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device
*vlan_dev
,
4946 struct net_device
*dev
,
4947 unsigned long event
, void *ptr
,
4950 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
4951 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4952 struct netdev_notifier_changeupper_info
*info
= ptr
;
4953 struct netlink_ext_ack
*extack
;
4954 struct net_device
*upper_dev
;
4957 extack
= netdev_notifier_info_to_extack(&info
->info
);
4960 case NETDEV_PRECHANGEUPPER
:
4961 upper_dev
= info
->upper_dev
;
4962 if (!netif_is_bridge_master(upper_dev
)) {
4963 NL_SET_ERR_MSG(extack
, "spectrum: VLAN devices only support bridge and VRF uppers");
4968 if (netdev_has_any_upper_dev(upper_dev
) &&
4969 (!netif_is_bridge_master(upper_dev
) ||
4970 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp
,
4972 NL_SET_ERR_MSG(extack
, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
4976 case NETDEV_CHANGEUPPER
:
4977 upper_dev
= info
->upper_dev
;
4978 if (netif_is_bridge_master(upper_dev
)) {
4980 err
= mlxsw_sp_port_bridge_join(mlxsw_sp_port
,
4985 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
,
4998 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device
*vlan_dev
,
4999 struct net_device
*lag_dev
,
5000 unsigned long event
,
5003 struct net_device
*dev
;
5004 struct list_head
*iter
;
5007 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
5008 if (mlxsw_sp_port_dev_check(dev
)) {
5009 ret
= mlxsw_sp_netdevice_port_vlan_event(vlan_dev
, dev
,
5020 static int mlxsw_sp_netdevice_vlan_event(struct net_device
*vlan_dev
,
5021 unsigned long event
, void *ptr
)
5023 struct net_device
*real_dev
= vlan_dev_real_dev(vlan_dev
);
5024 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
5026 if (mlxsw_sp_port_dev_check(real_dev
))
5027 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev
, real_dev
,
5029 else if (netif_is_lag_master(real_dev
))
5030 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev
,
5037 static bool mlxsw_sp_is_vrf_event(unsigned long event
, void *ptr
)
5039 struct netdev_notifier_changeupper_info
*info
= ptr
;
5041 if (event
!= NETDEV_PRECHANGEUPPER
&& event
!= NETDEV_CHANGEUPPER
)
5043 return netif_is_l3_master(info
->upper_dev
);
5046 static int mlxsw_sp_netdevice_event(struct notifier_block
*nb
,
5047 unsigned long event
, void *ptr
)
5049 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
5050 struct mlxsw_sp
*mlxsw_sp
;
5053 mlxsw_sp
= container_of(nb
, struct mlxsw_sp
, netdevice_nb
);
5054 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp
, dev
))
5055 err
= mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp
, dev
,
5057 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp
, dev
))
5058 err
= mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp
, dev
,
5060 else if (event
== NETDEV_CHANGEADDR
|| event
== NETDEV_CHANGEMTU
)
5061 err
= mlxsw_sp_netdevice_router_port_event(dev
);
5062 else if (mlxsw_sp_is_vrf_event(event
, ptr
))
5063 err
= mlxsw_sp_netdevice_vrf_event(dev
, event
, ptr
);
5064 else if (mlxsw_sp_port_dev_check(dev
))
5065 err
= mlxsw_sp_netdevice_port_event(dev
, dev
, event
, ptr
);
5066 else if (netif_is_lag_master(dev
))
5067 err
= mlxsw_sp_netdevice_lag_event(dev
, event
, ptr
);
5068 else if (is_vlan_dev(dev
))
5069 err
= mlxsw_sp_netdevice_vlan_event(dev
, event
, ptr
);
5071 return notifier_from_errno(err
);
5074 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly
= {
5075 .notifier_call
= mlxsw_sp_inetaddr_valid_event
,
5078 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly
= {
5079 .notifier_call
= mlxsw_sp_inetaddr_event
,
5082 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly
= {
5083 .notifier_call
= mlxsw_sp_inet6addr_valid_event
,
5086 static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly
= {
5087 .notifier_call
= mlxsw_sp_inet6addr_event
,
5090 static const struct pci_device_id mlxsw_sp_pci_id_table
[] = {
5091 {PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_SPECTRUM
), 0},
5095 static struct pci_driver mlxsw_sp_pci_driver
= {
5096 .name
= mlxsw_sp_driver_name
,
5097 .id_table
= mlxsw_sp_pci_id_table
,
5100 static int __init
mlxsw_sp_module_init(void)
5104 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb
);
5105 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb
);
5106 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb
);
5107 register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb
);
5109 err
= mlxsw_core_driver_register(&mlxsw_sp_driver
);
5111 goto err_core_driver_register
;
5113 err
= mlxsw_pci_driver_register(&mlxsw_sp_pci_driver
);
5115 goto err_pci_driver_register
;
5119 err_pci_driver_register
:
5120 mlxsw_core_driver_unregister(&mlxsw_sp_driver
);
5121 err_core_driver_register
:
5122 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb
);
5123 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb
);
5124 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb
);
5125 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb
);
5129 static void __exit
mlxsw_sp_module_exit(void)
5131 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver
);
5132 mlxsw_core_driver_unregister(&mlxsw_sp_driver
);
5133 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb
);
5134 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb
);
5135 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb
);
5136 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb
);
5139 module_init(mlxsw_sp_module_init
);
5140 module_exit(mlxsw_sp_module_exit
);
5142 MODULE_LICENSE("Dual BSD/GPL");
5143 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
5144 MODULE_DESCRIPTION("Mellanox Spectrum driver");
5145 MODULE_DEVICE_TABLE(pci
, mlxsw_sp_pci_id_table
);
5146 MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME
);