2 * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015-2016 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <net/switchdev.h>
57 static const char mlxsw_sx_driver_name
[] = "mlxsw_switchx2";
58 static const char mlxsw_sx_driver_version
[] = "1.0";
63 struct mlxsw_sx_port
**ports
;
64 struct mlxsw_core
*core
;
65 const struct mlxsw_bus_info
*bus_info
;
69 struct mlxsw_sx_port_pcpu_stats
{
74 struct u64_stats_sync syncp
;
78 struct mlxsw_sx_port
{
79 struct net_device
*dev
;
80 struct mlxsw_sx_port_pcpu_stats __percpu
*pcpu_stats
;
81 struct mlxsw_sx
*mlxsw_sx
;
92 MLXSW_ITEM32(tx
, hdr
, version
, 0x00, 28, 4);
95 * Packet control type.
96 * 0 - Ethernet control (e.g. EMADs, LACP)
99 MLXSW_ITEM32(tx
, hdr
, ctl
, 0x00, 26, 2);
102 * Packet protocol type. Must be set to 1 (Ethernet).
104 MLXSW_ITEM32(tx
, hdr
, proto
, 0x00, 21, 3);
107 * Egress TClass to be used on the egress device on the egress port.
108 * The MSB is specified in the 'ctclass3' field.
109 * Range is 0-15, where 15 is the highest priority.
111 MLXSW_ITEM32(tx
, hdr
, etclass
, 0x00, 18, 3);
114 * Switch partition ID.
116 MLXSW_ITEM32(tx
, hdr
, swid
, 0x00, 12, 3);
119 * Destination local port for unicast packets.
120 * Destination multicast ID for multicast packets.
122 * Control packets are directed to a specific egress port, while data
123 * packets are transmitted through the CPU port (0) into the switch partition,
124 * where forwarding rules are applied.
126 MLXSW_ITEM32(tx
, hdr
, port_mid
, 0x04, 16, 16);
129 * See field 'etclass'.
131 MLXSW_ITEM32(tx
, hdr
, ctclass3
, 0x04, 14, 1);
134 * RDQ for control packets sent to remote CPU.
135 * Must be set to 0x1F for EMADs, otherwise 0.
137 MLXSW_ITEM32(tx
, hdr
, rdq
, 0x04, 9, 5);
140 * Signature control for packets going to CPU. Must be set to 0.
142 MLXSW_ITEM32(tx
, hdr
, cpu_sig
, 0x04, 0, 9);
145 * Stacking protocl signature. Must be set to 0xE0E0.
147 MLXSW_ITEM32(tx
, hdr
, sig
, 0x0C, 16, 16);
152 MLXSW_ITEM32(tx
, hdr
, stclass
, 0x0C, 13, 3);
155 * EMAD bit. Must be set for EMADs.
157 MLXSW_ITEM32(tx
, hdr
, emad
, 0x0C, 5, 1);
161 * 6 - Control packets
163 MLXSW_ITEM32(tx
, hdr
, type
, 0x0C, 0, 4);
165 static void mlxsw_sx_txhdr_construct(struct sk_buff
*skb
,
166 const struct mlxsw_tx_info
*tx_info
)
168 char *txhdr
= skb_push(skb
, MLXSW_TXHDR_LEN
);
169 bool is_emad
= tx_info
->is_emad
;
171 memset(txhdr
, 0, MLXSW_TXHDR_LEN
);
173 /* We currently set default values for the egress tclass (QoS). */
174 mlxsw_tx_hdr_version_set(txhdr
, MLXSW_TXHDR_VERSION_0
);
175 mlxsw_tx_hdr_ctl_set(txhdr
, MLXSW_TXHDR_ETH_CTL
);
176 mlxsw_tx_hdr_proto_set(txhdr
, MLXSW_TXHDR_PROTO_ETH
);
177 mlxsw_tx_hdr_etclass_set(txhdr
, is_emad
? MLXSW_TXHDR_ETCLASS_6
:
178 MLXSW_TXHDR_ETCLASS_5
);
179 mlxsw_tx_hdr_swid_set(txhdr
, 0);
180 mlxsw_tx_hdr_port_mid_set(txhdr
, tx_info
->local_port
);
181 mlxsw_tx_hdr_ctclass3_set(txhdr
, MLXSW_TXHDR_CTCLASS3
);
182 mlxsw_tx_hdr_rdq_set(txhdr
, is_emad
? MLXSW_TXHDR_RDQ_EMAD
:
183 MLXSW_TXHDR_RDQ_OTHER
);
184 mlxsw_tx_hdr_cpu_sig_set(txhdr
, MLXSW_TXHDR_CPU_SIG
);
185 mlxsw_tx_hdr_sig_set(txhdr
, MLXSW_TXHDR_SIG
);
186 mlxsw_tx_hdr_stclass_set(txhdr
, MLXSW_TXHDR_STCLASS_NONE
);
187 mlxsw_tx_hdr_emad_set(txhdr
, is_emad
? MLXSW_TXHDR_EMAD
:
188 MLXSW_TXHDR_NOT_EMAD
);
189 mlxsw_tx_hdr_type_set(txhdr
, MLXSW_TXHDR_TYPE_CONTROL
);
192 static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port
*mlxsw_sx_port
,
195 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
196 char paos_pl
[MLXSW_REG_PAOS_LEN
];
198 mlxsw_reg_paos_pack(paos_pl
, mlxsw_sx_port
->local_port
,
199 is_up
? MLXSW_PORT_ADMIN_STATUS_UP
:
200 MLXSW_PORT_ADMIN_STATUS_DOWN
);
201 return mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(paos
), paos_pl
);
204 static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port
*mlxsw_sx_port
,
207 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
208 char paos_pl
[MLXSW_REG_PAOS_LEN
];
212 mlxsw_reg_paos_pack(paos_pl
, mlxsw_sx_port
->local_port
, 0);
213 err
= mlxsw_reg_query(mlxsw_sx
->core
, MLXSW_REG(paos
), paos_pl
);
216 oper_status
= mlxsw_reg_paos_oper_status_get(paos_pl
);
217 *p_is_up
= oper_status
== MLXSW_PORT_ADMIN_STATUS_UP
? true : false;
221 static int __mlxsw_sx_port_mtu_set(struct mlxsw_sx_port
*mlxsw_sx_port
,
224 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
225 char pmtu_pl
[MLXSW_REG_PMTU_LEN
];
229 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sx_port
->local_port
, 0);
230 err
= mlxsw_reg_query(mlxsw_sx
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
233 max_mtu
= mlxsw_reg_pmtu_max_mtu_get(pmtu_pl
);
238 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sx_port
->local_port
, mtu
);
239 return mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
242 static int mlxsw_sx_port_mtu_eth_set(struct mlxsw_sx_port
*mlxsw_sx_port
,
245 mtu
+= MLXSW_TXHDR_LEN
+ ETH_HLEN
;
246 return __mlxsw_sx_port_mtu_set(mlxsw_sx_port
, mtu
);
249 static int mlxsw_sx_port_mtu_ib_set(struct mlxsw_sx_port
*mlxsw_sx_port
,
252 return __mlxsw_sx_port_mtu_set(mlxsw_sx_port
, mtu
);
255 static int mlxsw_sx_port_ib_port_set(struct mlxsw_sx_port
*mlxsw_sx_port
,
258 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
259 char plib_pl
[MLXSW_REG_PLIB_LEN
] = {0};
262 mlxsw_reg_plib_local_port_set(plib_pl
, mlxsw_sx_port
->local_port
);
263 mlxsw_reg_plib_ib_port_set(plib_pl
, ib_port
);
264 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(plib
), plib_pl
);
268 static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port
*mlxsw_sx_port
, u8 swid
)
270 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
271 char pspa_pl
[MLXSW_REG_PSPA_LEN
];
273 mlxsw_reg_pspa_pack(pspa_pl
, swid
, mlxsw_sx_port
->local_port
);
274 return mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(pspa
), pspa_pl
);
278 mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port
*mlxsw_sx_port
)
280 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
281 char sspr_pl
[MLXSW_REG_SSPR_LEN
];
283 mlxsw_reg_sspr_pack(sspr_pl
, mlxsw_sx_port
->local_port
);
284 return mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(sspr
), sspr_pl
);
287 static int mlxsw_sx_port_module_info_get(struct mlxsw_sx
*mlxsw_sx
,
288 u8 local_port
, u8
*p_module
,
291 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
294 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
295 err
= mlxsw_reg_query(mlxsw_sx
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
298 *p_module
= mlxsw_reg_pmlp_module_get(pmlp_pl
, 0);
299 *p_width
= mlxsw_reg_pmlp_width_get(pmlp_pl
);
303 static int mlxsw_sx_port_open(struct net_device
*dev
)
305 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
308 err
= mlxsw_sx_port_admin_status_set(mlxsw_sx_port
, true);
311 netif_start_queue(dev
);
315 static int mlxsw_sx_port_stop(struct net_device
*dev
)
317 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
319 netif_stop_queue(dev
);
320 return mlxsw_sx_port_admin_status_set(mlxsw_sx_port
, false);
323 static netdev_tx_t
mlxsw_sx_port_xmit(struct sk_buff
*skb
,
324 struct net_device
*dev
)
326 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
327 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
328 struct mlxsw_sx_port_pcpu_stats
*pcpu_stats
;
329 const struct mlxsw_tx_info tx_info
= {
330 .local_port
= mlxsw_sx_port
->local_port
,
336 if (mlxsw_core_skb_transmit_busy(mlxsw_sx
->core
, &tx_info
))
337 return NETDEV_TX_BUSY
;
339 if (unlikely(skb_headroom(skb
) < MLXSW_TXHDR_LEN
)) {
340 struct sk_buff
*skb_orig
= skb
;
342 skb
= skb_realloc_headroom(skb
, MLXSW_TXHDR_LEN
);
344 this_cpu_inc(mlxsw_sx_port
->pcpu_stats
->tx_dropped
);
345 dev_kfree_skb_any(skb_orig
);
348 dev_consume_skb_any(skb_orig
);
350 mlxsw_sx_txhdr_construct(skb
, &tx_info
);
351 /* TX header is consumed by HW on the way so we shouldn't count its
352 * bytes as being sent.
354 len
= skb
->len
- MLXSW_TXHDR_LEN
;
355 /* Due to a race we might fail here because of a full queue. In that
356 * unlikely case we simply drop the packet.
358 err
= mlxsw_core_skb_transmit(mlxsw_sx
->core
, skb
, &tx_info
);
361 pcpu_stats
= this_cpu_ptr(mlxsw_sx_port
->pcpu_stats
);
362 u64_stats_update_begin(&pcpu_stats
->syncp
);
363 pcpu_stats
->tx_packets
++;
364 pcpu_stats
->tx_bytes
+= len
;
365 u64_stats_update_end(&pcpu_stats
->syncp
);
367 this_cpu_inc(mlxsw_sx_port
->pcpu_stats
->tx_dropped
);
368 dev_kfree_skb_any(skb
);
373 static int mlxsw_sx_port_change_mtu(struct net_device
*dev
, int mtu
)
375 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
378 err
= mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port
, mtu
);
385 static struct rtnl_link_stats64
*
386 mlxsw_sx_port_get_stats64(struct net_device
*dev
,
387 struct rtnl_link_stats64
*stats
)
389 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
390 struct mlxsw_sx_port_pcpu_stats
*p
;
391 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
396 for_each_possible_cpu(i
) {
397 p
= per_cpu_ptr(mlxsw_sx_port
->pcpu_stats
, i
);
399 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
400 rx_packets
= p
->rx_packets
;
401 rx_bytes
= p
->rx_bytes
;
402 tx_packets
= p
->tx_packets
;
403 tx_bytes
= p
->tx_bytes
;
404 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
406 stats
->rx_packets
+= rx_packets
;
407 stats
->rx_bytes
+= rx_bytes
;
408 stats
->tx_packets
+= tx_packets
;
409 stats
->tx_bytes
+= tx_bytes
;
410 /* tx_dropped is u32, updated without syncp protection. */
411 tx_dropped
+= p
->tx_dropped
;
413 stats
->tx_dropped
= tx_dropped
;
417 static int mlxsw_sx_port_get_phys_port_name(struct net_device
*dev
, char *name
,
420 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
423 err
= snprintf(name
, len
, "p%d", mlxsw_sx_port
->mapping
.module
+ 1);
430 static const struct net_device_ops mlxsw_sx_port_netdev_ops
= {
431 .ndo_open
= mlxsw_sx_port_open
,
432 .ndo_stop
= mlxsw_sx_port_stop
,
433 .ndo_start_xmit
= mlxsw_sx_port_xmit
,
434 .ndo_change_mtu
= mlxsw_sx_port_change_mtu
,
435 .ndo_get_stats64
= mlxsw_sx_port_get_stats64
,
436 .ndo_get_phys_port_name
= mlxsw_sx_port_get_phys_port_name
,
439 static void mlxsw_sx_port_get_drvinfo(struct net_device
*dev
,
440 struct ethtool_drvinfo
*drvinfo
)
442 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
443 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
445 strlcpy(drvinfo
->driver
, mlxsw_sx_driver_name
, sizeof(drvinfo
->driver
));
446 strlcpy(drvinfo
->version
, mlxsw_sx_driver_version
,
447 sizeof(drvinfo
->version
));
448 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
450 mlxsw_sx
->bus_info
->fw_rev
.major
,
451 mlxsw_sx
->bus_info
->fw_rev
.minor
,
452 mlxsw_sx
->bus_info
->fw_rev
.subminor
);
453 strlcpy(drvinfo
->bus_info
, mlxsw_sx
->bus_info
->device_name
,
454 sizeof(drvinfo
->bus_info
));
457 struct mlxsw_sx_port_hw_stats
{
458 char str
[ETH_GSTRING_LEN
];
459 u64 (*getter
)(const char *payload
);
462 static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats
[] = {
464 .str
= "a_frames_transmitted_ok",
465 .getter
= mlxsw_reg_ppcnt_a_frames_transmitted_ok_get
,
468 .str
= "a_frames_received_ok",
469 .getter
= mlxsw_reg_ppcnt_a_frames_received_ok_get
,
472 .str
= "a_frame_check_sequence_errors",
473 .getter
= mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get
,
476 .str
= "a_alignment_errors",
477 .getter
= mlxsw_reg_ppcnt_a_alignment_errors_get
,
480 .str
= "a_octets_transmitted_ok",
481 .getter
= mlxsw_reg_ppcnt_a_octets_transmitted_ok_get
,
484 .str
= "a_octets_received_ok",
485 .getter
= mlxsw_reg_ppcnt_a_octets_received_ok_get
,
488 .str
= "a_multicast_frames_xmitted_ok",
489 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get
,
492 .str
= "a_broadcast_frames_xmitted_ok",
493 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get
,
496 .str
= "a_multicast_frames_received_ok",
497 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get
,
500 .str
= "a_broadcast_frames_received_ok",
501 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get
,
504 .str
= "a_in_range_length_errors",
505 .getter
= mlxsw_reg_ppcnt_a_in_range_length_errors_get
,
508 .str
= "a_out_of_range_length_field",
509 .getter
= mlxsw_reg_ppcnt_a_out_of_range_length_field_get
,
512 .str
= "a_frame_too_long_errors",
513 .getter
= mlxsw_reg_ppcnt_a_frame_too_long_errors_get
,
516 .str
= "a_symbol_error_during_carrier",
517 .getter
= mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get
,
520 .str
= "a_mac_control_frames_transmitted",
521 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get
,
524 .str
= "a_mac_control_frames_received",
525 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_received_get
,
528 .str
= "a_unsupported_opcodes_received",
529 .getter
= mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get
,
532 .str
= "a_pause_mac_ctrl_frames_received",
533 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get
,
536 .str
= "a_pause_mac_ctrl_frames_xmitted",
537 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get
,
541 #define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
543 static void mlxsw_sx_port_get_strings(struct net_device
*dev
,
544 u32 stringset
, u8
*data
)
551 for (i
= 0; i
< MLXSW_SX_PORT_HW_STATS_LEN
; i
++) {
552 memcpy(p
, mlxsw_sx_port_hw_stats
[i
].str
,
554 p
+= ETH_GSTRING_LEN
;
560 static void mlxsw_sx_port_get_stats(struct net_device
*dev
,
561 struct ethtool_stats
*stats
, u64
*data
)
563 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
564 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
565 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
569 mlxsw_reg_ppcnt_pack(ppcnt_pl
, mlxsw_sx_port
->local_port
,
570 MLXSW_REG_PPCNT_IEEE_8023_CNT
, 0);
571 err
= mlxsw_reg_query(mlxsw_sx
->core
, MLXSW_REG(ppcnt
), ppcnt_pl
);
572 for (i
= 0; i
< MLXSW_SX_PORT_HW_STATS_LEN
; i
++)
573 data
[i
] = !err
? mlxsw_sx_port_hw_stats
[i
].getter(ppcnt_pl
) : 0;
576 static int mlxsw_sx_port_get_sset_count(struct net_device
*dev
, int sset
)
580 return MLXSW_SX_PORT_HW_STATS_LEN
;
586 struct mlxsw_sx_port_link_mode
{
593 static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode
[] = {
595 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T
,
596 .supported
= SUPPORTED_100baseT_Full
,
597 .advertised
= ADVERTISED_100baseT_Full
,
601 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX
,
605 .mask
= MLXSW_REG_PTYS_ETH_SPEED_SGMII
|
606 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
,
607 .supported
= SUPPORTED_1000baseKX_Full
,
608 .advertised
= ADVERTISED_1000baseKX_Full
,
612 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T
,
613 .supported
= SUPPORTED_10000baseT_Full
,
614 .advertised
= ADVERTISED_10000baseT_Full
,
618 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4
|
619 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
,
620 .supported
= SUPPORTED_10000baseKX4_Full
,
621 .advertised
= ADVERTISED_10000baseKX4_Full
,
625 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
626 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
627 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
628 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR
,
629 .supported
= SUPPORTED_10000baseKR_Full
,
630 .advertised
= ADVERTISED_10000baseKR_Full
,
634 .mask
= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2
,
635 .supported
= SUPPORTED_20000baseKR2_Full
,
636 .advertised
= ADVERTISED_20000baseKR2_Full
,
640 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
,
641 .supported
= SUPPORTED_40000baseCR4_Full
,
642 .advertised
= ADVERTISED_40000baseCR4_Full
,
646 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
,
647 .supported
= SUPPORTED_40000baseKR4_Full
,
648 .advertised
= ADVERTISED_40000baseKR4_Full
,
652 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
,
653 .supported
= SUPPORTED_40000baseSR4_Full
,
654 .advertised
= ADVERTISED_40000baseSR4_Full
,
658 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4
,
659 .supported
= SUPPORTED_40000baseLR4_Full
,
660 .advertised
= ADVERTISED_40000baseLR4_Full
,
664 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR
|
665 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR
|
666 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR
,
670 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4
|
671 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2
|
672 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2
,
676 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
677 .supported
= SUPPORTED_56000baseKR4_Full
,
678 .advertised
= ADVERTISED_56000baseKR4_Full
,
682 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
|
683 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
684 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
685 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4
,
690 #define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
691 #define MLXSW_SX_PORT_BASE_SPEED 10000 /* Mb/s */
693 static u32
mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto
)
695 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
696 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
697 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
698 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
699 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
700 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
701 return SUPPORTED_FIBRE
;
703 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
704 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
705 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
706 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
707 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
))
708 return SUPPORTED_Backplane
;
712 static u32
mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto
)
717 for (i
= 0; i
< MLXSW_SX_PORT_LINK_MODE_LEN
; i
++) {
718 if (ptys_eth_proto
& mlxsw_sx_port_link_mode
[i
].mask
)
719 modes
|= mlxsw_sx_port_link_mode
[i
].supported
;
724 static u32
mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto
)
729 for (i
= 0; i
< MLXSW_SX_PORT_LINK_MODE_LEN
; i
++) {
730 if (ptys_eth_proto
& mlxsw_sx_port_link_mode
[i
].mask
)
731 modes
|= mlxsw_sx_port_link_mode
[i
].advertised
;
736 static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok
, u32 ptys_eth_proto
,
737 struct ethtool_cmd
*cmd
)
739 u32 speed
= SPEED_UNKNOWN
;
740 u8 duplex
= DUPLEX_UNKNOWN
;
746 for (i
= 0; i
< MLXSW_SX_PORT_LINK_MODE_LEN
; i
++) {
747 if (ptys_eth_proto
& mlxsw_sx_port_link_mode
[i
].mask
) {
748 speed
= mlxsw_sx_port_link_mode
[i
].speed
;
749 duplex
= DUPLEX_FULL
;
754 ethtool_cmd_speed_set(cmd
, speed
);
755 cmd
->duplex
= duplex
;
758 static u8
mlxsw_sx_port_connector_port(u32 ptys_eth_proto
)
760 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
761 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
762 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
763 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
766 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
767 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
768 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
))
771 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
772 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
773 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
774 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
))
780 static int mlxsw_sx_port_get_settings(struct net_device
*dev
,
781 struct ethtool_cmd
*cmd
)
783 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
784 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
785 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
791 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sx_port
->local_port
, 0);
792 err
= mlxsw_reg_query(mlxsw_sx
->core
, MLXSW_REG(ptys
), ptys_pl
);
794 netdev_err(dev
, "Failed to get proto");
797 mlxsw_reg_ptys_eth_unpack(ptys_pl
, ð_proto_cap
,
798 ð_proto_admin
, ð_proto_oper
);
800 cmd
->supported
= mlxsw_sx_from_ptys_supported_port(eth_proto_cap
) |
801 mlxsw_sx_from_ptys_supported_link(eth_proto_cap
) |
802 SUPPORTED_Pause
| SUPPORTED_Asym_Pause
;
803 cmd
->advertising
= mlxsw_sx_from_ptys_advert_link(eth_proto_admin
);
804 mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev
),
805 eth_proto_oper
, cmd
);
807 eth_proto_oper
= eth_proto_oper
? eth_proto_oper
: eth_proto_cap
;
808 cmd
->port
= mlxsw_sx_port_connector_port(eth_proto_oper
);
809 cmd
->lp_advertising
= mlxsw_sx_from_ptys_advert_link(eth_proto_oper
);
811 cmd
->transceiver
= XCVR_INTERNAL
;
815 static u32
mlxsw_sx_to_ptys_advert_link(u32 advertising
)
820 for (i
= 0; i
< MLXSW_SX_PORT_LINK_MODE_LEN
; i
++) {
821 if (advertising
& mlxsw_sx_port_link_mode
[i
].advertised
)
822 ptys_proto
|= mlxsw_sx_port_link_mode
[i
].mask
;
827 static u32
mlxsw_sx_to_ptys_speed(u32 speed
)
832 for (i
= 0; i
< MLXSW_SX_PORT_LINK_MODE_LEN
; i
++) {
833 if (speed
== mlxsw_sx_port_link_mode
[i
].speed
)
834 ptys_proto
|= mlxsw_sx_port_link_mode
[i
].mask
;
839 static u32
mlxsw_sx_to_ptys_upper_speed(u32 upper_speed
)
844 for (i
= 0; i
< MLXSW_SX_PORT_LINK_MODE_LEN
; i
++) {
845 if (mlxsw_sx_port_link_mode
[i
].speed
<= upper_speed
)
846 ptys_proto
|= mlxsw_sx_port_link_mode
[i
].mask
;
851 static int mlxsw_sx_port_set_settings(struct net_device
*dev
,
852 struct ethtool_cmd
*cmd
)
854 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
855 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
856 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
864 speed
= ethtool_cmd_speed(cmd
);
866 eth_proto_new
= cmd
->autoneg
== AUTONEG_ENABLE
?
867 mlxsw_sx_to_ptys_advert_link(cmd
->advertising
) :
868 mlxsw_sx_to_ptys_speed(speed
);
870 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sx_port
->local_port
, 0);
871 err
= mlxsw_reg_query(mlxsw_sx
->core
, MLXSW_REG(ptys
), ptys_pl
);
873 netdev_err(dev
, "Failed to get proto");
876 mlxsw_reg_ptys_eth_unpack(ptys_pl
, ð_proto_cap
, ð_proto_admin
,
879 eth_proto_new
= eth_proto_new
& eth_proto_cap
;
880 if (!eth_proto_new
) {
881 netdev_err(dev
, "Not supported proto admin requested");
884 if (eth_proto_new
== eth_proto_admin
)
887 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sx_port
->local_port
,
889 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(ptys
), ptys_pl
);
891 netdev_err(dev
, "Failed to set proto admin");
895 err
= mlxsw_sx_port_oper_status_get(mlxsw_sx_port
, &is_up
);
897 netdev_err(dev
, "Failed to get oper status");
903 err
= mlxsw_sx_port_admin_status_set(mlxsw_sx_port
, false);
905 netdev_err(dev
, "Failed to set admin status");
909 err
= mlxsw_sx_port_admin_status_set(mlxsw_sx_port
, true);
911 netdev_err(dev
, "Failed to set admin status");
918 static const struct ethtool_ops mlxsw_sx_port_ethtool_ops
= {
919 .get_drvinfo
= mlxsw_sx_port_get_drvinfo
,
920 .get_link
= ethtool_op_get_link
,
921 .get_strings
= mlxsw_sx_port_get_strings
,
922 .get_ethtool_stats
= mlxsw_sx_port_get_stats
,
923 .get_sset_count
= mlxsw_sx_port_get_sset_count
,
924 .get_settings
= mlxsw_sx_port_get_settings
,
925 .set_settings
= mlxsw_sx_port_set_settings
,
928 static int mlxsw_sx_port_attr_get(struct net_device
*dev
,
929 struct switchdev_attr
*attr
)
931 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
932 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
935 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID
:
936 attr
->u
.ppid
.id_len
= sizeof(mlxsw_sx
->hw_id
);
937 memcpy(&attr
->u
.ppid
.id
, &mlxsw_sx
->hw_id
, attr
->u
.ppid
.id_len
);
946 static const struct switchdev_ops mlxsw_sx_port_switchdev_ops
= {
947 .switchdev_port_attr_get
= mlxsw_sx_port_attr_get
,
950 static int mlxsw_sx_hw_id_get(struct mlxsw_sx
*mlxsw_sx
)
952 char spad_pl
[MLXSW_REG_SPAD_LEN
] = {0};
955 err
= mlxsw_reg_query(mlxsw_sx
->core
, MLXSW_REG(spad
), spad_pl
);
958 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl
, mlxsw_sx
->hw_id
);
962 static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port
*mlxsw_sx_port
)
964 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
965 struct net_device
*dev
= mlxsw_sx_port
->dev
;
966 char ppad_pl
[MLXSW_REG_PPAD_LEN
];
969 mlxsw_reg_ppad_pack(ppad_pl
, false, 0);
970 err
= mlxsw_reg_query(mlxsw_sx
->core
, MLXSW_REG(ppad
), ppad_pl
);
973 mlxsw_reg_ppad_mac_memcpy_from(ppad_pl
, dev
->dev_addr
);
974 /* The last byte value in base mac address is guaranteed
975 * to be such it does not overflow when adding local_port
978 dev
->dev_addr
[ETH_ALEN
- 1] += mlxsw_sx_port
->local_port
;
982 static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port
*mlxsw_sx_port
,
983 u16 vid
, enum mlxsw_reg_spms_state state
)
985 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
989 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
992 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sx_port
->local_port
);
993 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, state
);
994 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(spms
), spms_pl
);
999 static int mlxsw_sx_port_ib_speed_set(struct mlxsw_sx_port
*mlxsw_sx_port
,
1000 u16 speed
, u16 width
)
1002 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
1003 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1005 mlxsw_reg_ptys_ib_pack(ptys_pl
, mlxsw_sx_port
->local_port
, speed
,
1007 return mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(ptys
), ptys_pl
);
1011 mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port
*mlxsw_sx_port
, u8 width
)
1013 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
1014 u32 upper_speed
= MLXSW_SX_PORT_BASE_SPEED
* width
;
1015 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1016 u32 eth_proto_admin
;
1018 eth_proto_admin
= mlxsw_sx_to_ptys_upper_speed(upper_speed
);
1019 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sx_port
->local_port
,
1021 return mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(ptys
), ptys_pl
);
1025 mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port
*mlxsw_sx_port
,
1026 enum mlxsw_reg_spmlr_learn_mode mode
)
1028 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
1029 char spmlr_pl
[MLXSW_REG_SPMLR_LEN
];
1031 mlxsw_reg_spmlr_pack(spmlr_pl
, mlxsw_sx_port
->local_port
, mode
);
1032 return mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(spmlr
), spmlr_pl
);
1035 static int __mlxsw_sx_port_eth_create(struct mlxsw_sx
*mlxsw_sx
, u8 local_port
,
1036 u8 module
, u8 width
)
1038 struct mlxsw_sx_port
*mlxsw_sx_port
;
1039 struct net_device
*dev
;
1042 dev
= alloc_etherdev(sizeof(struct mlxsw_sx_port
));
1045 SET_NETDEV_DEV(dev
, mlxsw_sx
->bus_info
->dev
);
1046 mlxsw_sx_port
= netdev_priv(dev
);
1047 mlxsw_sx_port
->dev
= dev
;
1048 mlxsw_sx_port
->mlxsw_sx
= mlxsw_sx
;
1049 mlxsw_sx_port
->local_port
= local_port
;
1050 mlxsw_sx_port
->mapping
.module
= module
;
1052 mlxsw_sx_port
->pcpu_stats
=
1053 netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats
);
1054 if (!mlxsw_sx_port
->pcpu_stats
) {
1056 goto err_alloc_stats
;
1059 dev
->netdev_ops
= &mlxsw_sx_port_netdev_ops
;
1060 dev
->ethtool_ops
= &mlxsw_sx_port_ethtool_ops
;
1061 dev
->switchdev_ops
= &mlxsw_sx_port_switchdev_ops
;
1063 err
= mlxsw_sx_port_dev_addr_get(mlxsw_sx_port
);
1065 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Unable to get port mac address\n",
1066 mlxsw_sx_port
->local_port
);
1067 goto err_dev_addr_get
;
1070 netif_carrier_off(dev
);
1072 dev
->features
|= NETIF_F_NETNS_LOCAL
| NETIF_F_LLTX
| NETIF_F_SG
|
1073 NETIF_F_VLAN_CHALLENGED
;
1076 dev
->max_mtu
= ETH_MAX_MTU
;
1078 /* Each packet needs to have a Tx header (metadata) on top all other
1081 dev
->needed_headroom
= MLXSW_TXHDR_LEN
;
1083 err
= mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port
);
1085 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to set system port mapping\n",
1086 mlxsw_sx_port
->local_port
);
1087 goto err_port_system_port_mapping_set
;
1090 err
= mlxsw_sx_port_swid_set(mlxsw_sx_port
, 0);
1092 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to set SWID\n",
1093 mlxsw_sx_port
->local_port
);
1094 goto err_port_swid_set
;
1097 err
= mlxsw_sx_port_speed_by_width_set(mlxsw_sx_port
, width
);
1099 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to set speed\n",
1100 mlxsw_sx_port
->local_port
);
1101 goto err_port_speed_set
;
1104 err
= mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port
, ETH_DATA_LEN
);
1106 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to set MTU\n",
1107 mlxsw_sx_port
->local_port
);
1108 goto err_port_mtu_set
;
1111 err
= mlxsw_sx_port_admin_status_set(mlxsw_sx_port
, false);
1113 goto err_port_admin_status_set
;
1115 err
= mlxsw_sx_port_stp_state_set(mlxsw_sx_port
,
1116 MLXSW_PORT_DEFAULT_VID
,
1117 MLXSW_REG_SPMS_STATE_FORWARDING
);
1119 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to set STP state\n",
1120 mlxsw_sx_port
->local_port
);
1121 goto err_port_stp_state_set
;
1124 err
= mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port
,
1125 MLXSW_REG_SPMLR_LEARN_MODE_DISABLE
);
1127 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to set MAC learning mode\n",
1128 mlxsw_sx_port
->local_port
);
1129 goto err_port_mac_learning_mode_set
;
1132 err
= register_netdev(dev
);
1134 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to register netdev\n",
1135 mlxsw_sx_port
->local_port
);
1136 goto err_register_netdev
;
1139 mlxsw_core_port_eth_set(mlxsw_sx
->core
, mlxsw_sx_port
->local_port
,
1140 mlxsw_sx_port
, dev
, false, 0);
1141 mlxsw_sx
->ports
[local_port
] = mlxsw_sx_port
;
1144 err_register_netdev
:
1145 err_port_mac_learning_mode_set
:
1146 err_port_stp_state_set
:
1147 err_port_admin_status_set
:
1150 mlxsw_sx_port_swid_set(mlxsw_sx_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
1152 err_port_system_port_mapping_set
:
1154 free_percpu(mlxsw_sx_port
->pcpu_stats
);
1160 static int mlxsw_sx_port_eth_create(struct mlxsw_sx
*mlxsw_sx
, u8 local_port
,
1161 u8 module
, u8 width
)
1165 err
= mlxsw_core_port_init(mlxsw_sx
->core
, local_port
);
1167 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to init core port\n",
1171 err
= __mlxsw_sx_port_eth_create(mlxsw_sx
, local_port
, module
, width
);
1173 goto err_port_create
;
1178 mlxsw_core_port_fini(mlxsw_sx
->core
, local_port
);
1182 static void __mlxsw_sx_port_eth_remove(struct mlxsw_sx
*mlxsw_sx
, u8 local_port
)
1184 struct mlxsw_sx_port
*mlxsw_sx_port
= mlxsw_sx
->ports
[local_port
];
1186 mlxsw_core_port_clear(mlxsw_sx
->core
, local_port
, mlxsw_sx
);
1187 unregister_netdev(mlxsw_sx_port
->dev
); /* This calls ndo_stop */
1188 mlxsw_sx
->ports
[local_port
] = NULL
;
1189 mlxsw_sx_port_swid_set(mlxsw_sx_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
1190 free_percpu(mlxsw_sx_port
->pcpu_stats
);
1191 free_netdev(mlxsw_sx_port
->dev
);
1194 static bool mlxsw_sx_port_created(struct mlxsw_sx
*mlxsw_sx
, u8 local_port
)
1196 return mlxsw_sx
->ports
[local_port
] != NULL
;
1199 static int __mlxsw_sx_port_ib_create(struct mlxsw_sx
*mlxsw_sx
, u8 local_port
,
1200 u8 module
, u8 width
)
1202 struct mlxsw_sx_port
*mlxsw_sx_port
;
1205 mlxsw_sx_port
= kzalloc(sizeof(*mlxsw_sx_port
), GFP_KERNEL
);
1208 mlxsw_sx_port
->mlxsw_sx
= mlxsw_sx
;
1209 mlxsw_sx_port
->local_port
= local_port
;
1210 mlxsw_sx_port
->mapping
.module
= module
;
1212 err
= mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port
);
1214 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to set system port mapping\n",
1215 mlxsw_sx_port
->local_port
);
1216 goto err_port_system_port_mapping_set
;
1219 /* Adding port to Infiniband swid (1) */
1220 err
= mlxsw_sx_port_swid_set(mlxsw_sx_port
, 1);
1222 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to set SWID\n",
1223 mlxsw_sx_port
->local_port
);
1224 goto err_port_swid_set
;
1227 /* Expose the IB port number as it's front panel name */
1228 err
= mlxsw_sx_port_ib_port_set(mlxsw_sx_port
, module
+ 1);
1230 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to set IB port\n",
1231 mlxsw_sx_port
->local_port
);
1232 goto err_port_ib_set
;
1235 /* Supports all speeds from SDR to FDR (bitmask) and support bus width
1236 * of 1x, 2x and 4x (3 bits bitmask)
1238 err
= mlxsw_sx_port_ib_speed_set(mlxsw_sx_port
,
1239 MLXSW_REG_PTYS_IB_SPEED_EDR
- 1,
1242 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to set speed\n",
1243 mlxsw_sx_port
->local_port
);
1244 goto err_port_speed_set
;
1247 /* Change to the maximum MTU the device supports, the SMA will take
1248 * care of the active MTU
1250 err
= mlxsw_sx_port_mtu_ib_set(mlxsw_sx_port
, MLXSW_IB_DEFAULT_MTU
);
1252 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to set MTU\n",
1253 mlxsw_sx_port
->local_port
);
1254 goto err_port_mtu_set
;
1257 err
= mlxsw_sx_port_admin_status_set(mlxsw_sx_port
, true);
1259 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to change admin state to UP\n",
1260 mlxsw_sx_port
->local_port
);
1261 goto err_port_admin_set
;
1264 mlxsw_core_port_ib_set(mlxsw_sx
->core
, mlxsw_sx_port
->local_port
,
1266 mlxsw_sx
->ports
[local_port
] = mlxsw_sx_port
;
1273 mlxsw_sx_port_swid_set(mlxsw_sx_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
1275 err_port_system_port_mapping_set
:
1276 kfree(mlxsw_sx_port
);
1280 static void __mlxsw_sx_port_ib_remove(struct mlxsw_sx
*mlxsw_sx
, u8 local_port
)
1282 struct mlxsw_sx_port
*mlxsw_sx_port
= mlxsw_sx
->ports
[local_port
];
1284 mlxsw_core_port_clear(mlxsw_sx
->core
, local_port
, mlxsw_sx
);
1285 mlxsw_sx
->ports
[local_port
] = NULL
;
1286 mlxsw_sx_port_admin_status_set(mlxsw_sx_port
, false);
1287 mlxsw_sx_port_swid_set(mlxsw_sx_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
1288 kfree(mlxsw_sx_port
);
1291 static void __mlxsw_sx_port_remove(struct mlxsw_sx
*mlxsw_sx
, u8 local_port
)
1293 enum devlink_port_type port_type
=
1294 mlxsw_core_port_type_get(mlxsw_sx
->core
, local_port
);
1296 if (port_type
== DEVLINK_PORT_TYPE_ETH
)
1297 __mlxsw_sx_port_eth_remove(mlxsw_sx
, local_port
);
1298 else if (port_type
== DEVLINK_PORT_TYPE_IB
)
1299 __mlxsw_sx_port_ib_remove(mlxsw_sx
, local_port
);
1302 static void mlxsw_sx_port_remove(struct mlxsw_sx
*mlxsw_sx
, u8 local_port
)
1304 __mlxsw_sx_port_remove(mlxsw_sx
, local_port
);
1305 mlxsw_core_port_fini(mlxsw_sx
->core
, local_port
);
1308 static void mlxsw_sx_ports_remove(struct mlxsw_sx
*mlxsw_sx
)
1312 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++)
1313 if (mlxsw_sx_port_created(mlxsw_sx
, i
))
1314 mlxsw_sx_port_remove(mlxsw_sx
, i
);
1315 kfree(mlxsw_sx
->ports
);
1318 static int mlxsw_sx_ports_create(struct mlxsw_sx
*mlxsw_sx
)
1325 alloc_size
= sizeof(struct mlxsw_sx_port
*) * MLXSW_PORT_MAX_PORTS
;
1326 mlxsw_sx
->ports
= kzalloc(alloc_size
, GFP_KERNEL
);
1327 if (!mlxsw_sx
->ports
)
1330 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++) {
1331 err
= mlxsw_sx_port_module_info_get(mlxsw_sx
, i
, &module
,
1334 goto err_port_module_info_get
;
1337 err
= mlxsw_sx_port_eth_create(mlxsw_sx
, i
, module
, width
);
1339 goto err_port_create
;
1344 err_port_module_info_get
:
1345 for (i
--; i
>= 1; i
--)
1346 if (mlxsw_sx_port_created(mlxsw_sx
, i
))
1347 mlxsw_sx_port_remove(mlxsw_sx
, i
);
1348 kfree(mlxsw_sx
->ports
);
1352 static void mlxsw_sx_pude_eth_event_func(struct mlxsw_sx_port
*mlxsw_sx_port
,
1353 enum mlxsw_reg_pude_oper_status status
)
1355 if (status
== MLXSW_PORT_OPER_STATUS_UP
) {
1356 netdev_info(mlxsw_sx_port
->dev
, "link up\n");
1357 netif_carrier_on(mlxsw_sx_port
->dev
);
1359 netdev_info(mlxsw_sx_port
->dev
, "link down\n");
1360 netif_carrier_off(mlxsw_sx_port
->dev
);
1364 static void mlxsw_sx_pude_ib_event_func(struct mlxsw_sx_port
*mlxsw_sx_port
,
1365 enum mlxsw_reg_pude_oper_status status
)
1367 if (status
== MLXSW_PORT_OPER_STATUS_UP
)
1368 pr_info("ib link for port %d - up\n",
1369 mlxsw_sx_port
->mapping
.module
+ 1);
1371 pr_info("ib link for port %d - down\n",
1372 mlxsw_sx_port
->mapping
.module
+ 1);
1375 static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info
*reg
,
1376 char *pude_pl
, void *priv
)
1378 struct mlxsw_sx
*mlxsw_sx
= priv
;
1379 struct mlxsw_sx_port
*mlxsw_sx_port
;
1380 enum mlxsw_reg_pude_oper_status status
;
1381 enum devlink_port_type port_type
;
1384 local_port
= mlxsw_reg_pude_local_port_get(pude_pl
);
1385 mlxsw_sx_port
= mlxsw_sx
->ports
[local_port
];
1386 if (!mlxsw_sx_port
) {
1387 dev_warn(mlxsw_sx
->bus_info
->dev
, "Port %d: Link event received for non-existent port\n",
1392 status
= mlxsw_reg_pude_oper_status_get(pude_pl
);
1393 port_type
= mlxsw_core_port_type_get(mlxsw_sx
->core
, local_port
);
1394 if (port_type
== DEVLINK_PORT_TYPE_ETH
)
1395 mlxsw_sx_pude_eth_event_func(mlxsw_sx_port
, status
);
1396 else if (port_type
== DEVLINK_PORT_TYPE_IB
)
1397 mlxsw_sx_pude_ib_event_func(mlxsw_sx_port
, status
);
1400 static void mlxsw_sx_rx_listener_func(struct sk_buff
*skb
, u8 local_port
,
1403 struct mlxsw_sx
*mlxsw_sx
= priv
;
1404 struct mlxsw_sx_port
*mlxsw_sx_port
= mlxsw_sx
->ports
[local_port
];
1405 struct mlxsw_sx_port_pcpu_stats
*pcpu_stats
;
1407 if (unlikely(!mlxsw_sx_port
)) {
1408 dev_warn_ratelimited(mlxsw_sx
->bus_info
->dev
, "Port %d: skb received for non-existent port\n",
1413 skb
->dev
= mlxsw_sx_port
->dev
;
1415 pcpu_stats
= this_cpu_ptr(mlxsw_sx_port
->pcpu_stats
);
1416 u64_stats_update_begin(&pcpu_stats
->syncp
);
1417 pcpu_stats
->rx_packets
++;
1418 pcpu_stats
->rx_bytes
+= skb
->len
;
1419 u64_stats_update_end(&pcpu_stats
->syncp
);
1421 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1422 netif_receive_skb(skb
);
1425 static int mlxsw_sx_port_type_set(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
1426 enum devlink_port_type new_type
)
1428 struct mlxsw_sx
*mlxsw_sx
= mlxsw_core_driver_priv(mlxsw_core
);
1432 if (new_type
== DEVLINK_PORT_TYPE_AUTO
)
1435 __mlxsw_sx_port_remove(mlxsw_sx
, local_port
);
1436 err
= mlxsw_sx_port_module_info_get(mlxsw_sx
, local_port
, &module
,
1439 goto err_port_module_info_get
;
1441 if (new_type
== DEVLINK_PORT_TYPE_ETH
)
1442 err
= __mlxsw_sx_port_eth_create(mlxsw_sx
, local_port
, module
,
1444 else if (new_type
== DEVLINK_PORT_TYPE_IB
)
1445 err
= __mlxsw_sx_port_ib_create(mlxsw_sx
, local_port
, module
,
1448 err_port_module_info_get
:
1452 #define MLXSW_SX_RXL(_trap_id) \
1453 MLXSW_RXL(mlxsw_sx_rx_listener_func, _trap_id, TRAP_TO_CPU, \
1454 false, SX2_RX, FORWARD)
1456 static const struct mlxsw_listener mlxsw_sx_listener
[] = {
1457 MLXSW_EVENTL(mlxsw_sx_pude_event_func
, PUDE
, EMAD
),
1458 MLXSW_SX_RXL(FDB_MC
),
1461 MLXSW_SX_RXL(EAPOL
),
1465 MLXSW_SX_RXL(RPVST
),
1467 MLXSW_SX_RXL(IGMP_QUERY
),
1468 MLXSW_SX_RXL(IGMP_V1_REPORT
),
1469 MLXSW_SX_RXL(IGMP_V2_REPORT
),
1470 MLXSW_SX_RXL(IGMP_V2_LEAVE
),
1471 MLXSW_SX_RXL(IGMP_V3_REPORT
),
1474 static int mlxsw_sx_traps_init(struct mlxsw_sx
*mlxsw_sx
)
1476 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
1480 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX
,
1481 MLXSW_REG_HTGT_INVALID_POLICER
,
1482 MLXSW_REG_HTGT_DEFAULT_PRIORITY
,
1483 MLXSW_REG_HTGT_DEFAULT_TC
);
1484 mlxsw_reg_htgt_local_path_rdq_set(htgt_pl
,
1485 MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_RX
);
1487 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(htgt
), htgt_pl
);
1491 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL
,
1492 MLXSW_REG_HTGT_INVALID_POLICER
,
1493 MLXSW_REG_HTGT_DEFAULT_PRIORITY
,
1494 MLXSW_REG_HTGT_DEFAULT_TC
);
1495 mlxsw_reg_htgt_local_path_rdq_set(htgt_pl
,
1496 MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_CTRL
);
1498 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(htgt
), htgt_pl
);
1502 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sx_listener
); i
++) {
1503 err
= mlxsw_core_trap_register(mlxsw_sx
->core
,
1504 &mlxsw_sx_listener
[i
],
1507 goto err_listener_register
;
1512 err_listener_register
:
1513 for (i
--; i
>= 0; i
--) {
1514 mlxsw_core_trap_unregister(mlxsw_sx
->core
,
1515 &mlxsw_sx_listener
[i
],
1521 static void mlxsw_sx_traps_fini(struct mlxsw_sx
*mlxsw_sx
)
1525 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sx_listener
); i
++) {
1526 mlxsw_core_trap_unregister(mlxsw_sx
->core
,
1527 &mlxsw_sx_listener
[i
],
1532 static int mlxsw_sx_flood_init(struct mlxsw_sx
*mlxsw_sx
)
1534 char sfgc_pl
[MLXSW_REG_SFGC_LEN
];
1535 char sgcr_pl
[MLXSW_REG_SGCR_LEN
];
1539 /* Configure a flooding table, which includes only CPU port. */
1540 sftr_pl
= kmalloc(MLXSW_REG_SFTR_LEN
, GFP_KERNEL
);
1543 mlxsw_reg_sftr_pack(sftr_pl
, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE
, 0,
1544 MLXSW_PORT_CPU_PORT
, true);
1545 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(sftr
), sftr_pl
);
1550 /* Flood different packet types using the flooding table. */
1551 mlxsw_reg_sfgc_pack(sfgc_pl
,
1552 MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST
,
1553 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID
,
1554 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE
,
1556 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(sfgc
), sfgc_pl
);
1560 mlxsw_reg_sfgc_pack(sfgc_pl
,
1561 MLXSW_REG_SFGC_TYPE_BROADCAST
,
1562 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID
,
1563 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE
,
1565 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(sfgc
), sfgc_pl
);
1569 mlxsw_reg_sfgc_pack(sfgc_pl
,
1570 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP
,
1571 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID
,
1572 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE
,
1574 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(sfgc
), sfgc_pl
);
1578 mlxsw_reg_sfgc_pack(sfgc_pl
,
1579 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6
,
1580 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID
,
1581 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE
,
1583 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(sfgc
), sfgc_pl
);
1587 mlxsw_reg_sfgc_pack(sfgc_pl
,
1588 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4
,
1589 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID
,
1590 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE
,
1592 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(sfgc
), sfgc_pl
);
1596 mlxsw_reg_sgcr_pack(sgcr_pl
, true);
1597 return mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(sgcr
), sgcr_pl
);
1600 static int mlxsw_sx_basic_trap_groups_set(struct mlxsw_core
*mlxsw_core
)
1602 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
1604 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_EMAD
,
1605 MLXSW_REG_HTGT_INVALID_POLICER
,
1606 MLXSW_REG_HTGT_DEFAULT_PRIORITY
,
1607 MLXSW_REG_HTGT_DEFAULT_TC
);
1608 mlxsw_reg_htgt_swid_set(htgt_pl
, MLXSW_PORT_SWID_ALL_SWIDS
);
1609 mlxsw_reg_htgt_local_path_rdq_set(htgt_pl
,
1610 MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_EMAD
);
1611 return mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
1614 static int mlxsw_sx_init(struct mlxsw_core
*mlxsw_core
,
1615 const struct mlxsw_bus_info
*mlxsw_bus_info
)
1617 struct mlxsw_sx
*mlxsw_sx
= mlxsw_core_driver_priv(mlxsw_core
);
1620 mlxsw_sx
->core
= mlxsw_core
;
1621 mlxsw_sx
->bus_info
= mlxsw_bus_info
;
1623 err
= mlxsw_sx_hw_id_get(mlxsw_sx
);
1625 dev_err(mlxsw_sx
->bus_info
->dev
, "Failed to get switch HW ID\n");
1629 err
= mlxsw_sx_ports_create(mlxsw_sx
);
1631 dev_err(mlxsw_sx
->bus_info
->dev
, "Failed to create ports\n");
1635 err
= mlxsw_sx_traps_init(mlxsw_sx
);
1637 dev_err(mlxsw_sx
->bus_info
->dev
, "Failed to set traps\n");
1638 goto err_listener_register
;
1641 err
= mlxsw_sx_flood_init(mlxsw_sx
);
1643 dev_err(mlxsw_sx
->bus_info
->dev
, "Failed to initialize flood tables\n");
1644 goto err_flood_init
;
1650 mlxsw_sx_traps_fini(mlxsw_sx
);
1651 err_listener_register
:
1652 mlxsw_sx_ports_remove(mlxsw_sx
);
1656 static void mlxsw_sx_fini(struct mlxsw_core
*mlxsw_core
)
1658 struct mlxsw_sx
*mlxsw_sx
= mlxsw_core_driver_priv(mlxsw_core
);
1660 mlxsw_sx_traps_fini(mlxsw_sx
);
1661 mlxsw_sx_ports_remove(mlxsw_sx
);
1664 static struct mlxsw_config_profile mlxsw_sx_config_profile
= {
1665 .used_max_vepa_channels
= 1,
1666 .max_vepa_channels
= 0,
1671 .used_max_system_port
= 1,
1672 .max_system_port
= 48000,
1673 .used_max_vlan_groups
= 1,
1674 .max_vlan_groups
= 127,
1675 .used_max_regions
= 1,
1677 .used_flood_tables
= 1,
1678 .max_flood_tables
= 2,
1679 .max_vid_flood_tables
= 1,
1680 .used_flood_mode
= 1,
1682 .used_max_ib_mc
= 1,
1689 .type
= MLXSW_PORT_SWID_TYPE_ETH
,
1693 .type
= MLXSW_PORT_SWID_TYPE_IB
,
1696 .resource_query_enable
= 0,
1699 static struct mlxsw_driver mlxsw_sx_driver
= {
1700 .kind
= mlxsw_sx_driver_name
,
1701 .priv_size
= sizeof(struct mlxsw_sx
),
1702 .init
= mlxsw_sx_init
,
1703 .fini
= mlxsw_sx_fini
,
1704 .basic_trap_groups_set
= mlxsw_sx_basic_trap_groups_set
,
1705 .txhdr_construct
= mlxsw_sx_txhdr_construct
,
1706 .txhdr_len
= MLXSW_TXHDR_LEN
,
1707 .profile
= &mlxsw_sx_config_profile
,
1708 .port_type_set
= mlxsw_sx_port_type_set
,
1711 static const struct pci_device_id mlxsw_sx_pci_id_table
[] = {
1712 {PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_SWITCHX2
), 0},
1716 static struct pci_driver mlxsw_sx_pci_driver
= {
1717 .name
= mlxsw_sx_driver_name
,
1718 .id_table
= mlxsw_sx_pci_id_table
,
1721 static int __init
mlxsw_sx_module_init(void)
1725 err
= mlxsw_core_driver_register(&mlxsw_sx_driver
);
1729 err
= mlxsw_pci_driver_register(&mlxsw_sx_pci_driver
);
1731 goto err_pci_driver_register
;
1735 err_pci_driver_register
:
1736 mlxsw_core_driver_unregister(&mlxsw_sx_driver
);
1740 static void __exit
mlxsw_sx_module_exit(void)
1742 mlxsw_pci_driver_unregister(&mlxsw_sx_pci_driver
);
1743 mlxsw_core_driver_unregister(&mlxsw_sx_driver
);
1746 module_init(mlxsw_sx_module_init
);
1747 module_exit(mlxsw_sx_module_exit
);
1749 MODULE_LICENSE("Dual BSD/GPL");
1750 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1751 MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
1752 MODULE_DEVICE_TABLE(pci
, mlxsw_sx_pci_id_table
);