2 * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/slab.h>
43 #include <linux/device.h>
44 #include <linux/skbuff.h>
45 #include <linux/if_vlan.h>
46 #include <net/switchdev.h>
47 #include <generated/utsrelease.h>
55 static const char mlxsw_sx_driver_name
[] = "mlxsw_switchx2";
56 static const char mlxsw_sx_driver_version
[] = "1.0";
61 struct mlxsw_sx_port
**ports
;
62 struct mlxsw_core
*core
;
63 const struct mlxsw_bus_info
*bus_info
;
67 struct mlxsw_sx_port_pcpu_stats
{
72 struct u64_stats_sync syncp
;
76 struct mlxsw_sx_port
{
77 struct net_device
*dev
;
78 struct mlxsw_sx_port_pcpu_stats __percpu
*pcpu_stats
;
79 struct mlxsw_sx
*mlxsw_sx
;
87 MLXSW_ITEM32(tx
, hdr
, version
, 0x00, 28, 4);
90 * Packet control type.
91 * 0 - Ethernet control (e.g. EMADs, LACP)
94 MLXSW_ITEM32(tx
, hdr
, ctl
, 0x00, 26, 2);
97 * Packet protocol type. Must be set to 1 (Ethernet).
99 MLXSW_ITEM32(tx
, hdr
, proto
, 0x00, 21, 3);
102 * Egress TClass to be used on the egress device on the egress port.
103 * The MSB is specified in the 'ctclass3' field.
104 * Range is 0-15, where 15 is the highest priority.
106 MLXSW_ITEM32(tx
, hdr
, etclass
, 0x00, 18, 3);
109 * Switch partition ID.
111 MLXSW_ITEM32(tx
, hdr
, swid
, 0x00, 12, 3);
114 * Destination local port for unicast packets.
115 * Destination multicast ID for multicast packets.
117 * Control packets are directed to a specific egress port, while data
118 * packets are transmitted through the CPU port (0) into the switch partition,
119 * where forwarding rules are applied.
121 MLXSW_ITEM32(tx
, hdr
, port_mid
, 0x04, 16, 16);
124 * See field 'etclass'.
126 MLXSW_ITEM32(tx
, hdr
, ctclass3
, 0x04, 14, 1);
129 * RDQ for control packets sent to remote CPU.
130 * Must be set to 0x1F for EMADs, otherwise 0.
132 MLXSW_ITEM32(tx
, hdr
, rdq
, 0x04, 9, 5);
135 * Signature control for packets going to CPU. Must be set to 0.
137 MLXSW_ITEM32(tx
, hdr
, cpu_sig
, 0x04, 0, 9);
140 * Stacking protocl signature. Must be set to 0xE0E0.
142 MLXSW_ITEM32(tx
, hdr
, sig
, 0x0C, 16, 16);
147 MLXSW_ITEM32(tx
, hdr
, stclass
, 0x0C, 13, 3);
150 * EMAD bit. Must be set for EMADs.
152 MLXSW_ITEM32(tx
, hdr
, emad
, 0x0C, 5, 1);
156 * 6 - Control packets
158 MLXSW_ITEM32(tx
, hdr
, type
, 0x0C, 0, 4);
160 static void mlxsw_sx_txhdr_construct(struct sk_buff
*skb
,
161 const struct mlxsw_tx_info
*tx_info
)
163 char *txhdr
= skb_push(skb
, MLXSW_TXHDR_LEN
);
164 bool is_emad
= tx_info
->is_emad
;
166 memset(txhdr
, 0, MLXSW_TXHDR_LEN
);
168 /* We currently set default values for the egress tclass (QoS). */
169 mlxsw_tx_hdr_version_set(txhdr
, MLXSW_TXHDR_VERSION_0
);
170 mlxsw_tx_hdr_ctl_set(txhdr
, MLXSW_TXHDR_ETH_CTL
);
171 mlxsw_tx_hdr_proto_set(txhdr
, MLXSW_TXHDR_PROTO_ETH
);
172 mlxsw_tx_hdr_etclass_set(txhdr
, is_emad
? MLXSW_TXHDR_ETCLASS_6
:
173 MLXSW_TXHDR_ETCLASS_5
);
174 mlxsw_tx_hdr_swid_set(txhdr
, 0);
175 mlxsw_tx_hdr_port_mid_set(txhdr
, tx_info
->local_port
);
176 mlxsw_tx_hdr_ctclass3_set(txhdr
, MLXSW_TXHDR_CTCLASS3
);
177 mlxsw_tx_hdr_rdq_set(txhdr
, is_emad
? MLXSW_TXHDR_RDQ_EMAD
:
178 MLXSW_TXHDR_RDQ_OTHER
);
179 mlxsw_tx_hdr_cpu_sig_set(txhdr
, MLXSW_TXHDR_CPU_SIG
);
180 mlxsw_tx_hdr_sig_set(txhdr
, MLXSW_TXHDR_SIG
);
181 mlxsw_tx_hdr_stclass_set(txhdr
, MLXSW_TXHDR_STCLASS_NONE
);
182 mlxsw_tx_hdr_emad_set(txhdr
, is_emad
? MLXSW_TXHDR_EMAD
:
183 MLXSW_TXHDR_NOT_EMAD
);
184 mlxsw_tx_hdr_type_set(txhdr
, MLXSW_TXHDR_TYPE_CONTROL
);
187 static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port
*mlxsw_sx_port
,
190 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
191 char paos_pl
[MLXSW_REG_PAOS_LEN
];
193 mlxsw_reg_paos_pack(paos_pl
, mlxsw_sx_port
->local_port
,
194 is_up
? MLXSW_PORT_ADMIN_STATUS_UP
:
195 MLXSW_PORT_ADMIN_STATUS_DOWN
);
196 return mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(paos
), paos_pl
);
199 static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port
*mlxsw_sx_port
,
202 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
203 char paos_pl
[MLXSW_REG_PAOS_LEN
];
207 mlxsw_reg_paos_pack(paos_pl
, mlxsw_sx_port
->local_port
, 0);
208 err
= mlxsw_reg_query(mlxsw_sx
->core
, MLXSW_REG(paos
), paos_pl
);
211 oper_status
= mlxsw_reg_paos_oper_status_get(paos_pl
);
212 *p_is_up
= oper_status
== MLXSW_PORT_ADMIN_STATUS_UP
? true : false;
216 static int mlxsw_sx_port_mtu_set(struct mlxsw_sx_port
*mlxsw_sx_port
, u16 mtu
)
218 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
219 char pmtu_pl
[MLXSW_REG_PMTU_LEN
];
223 mtu
+= MLXSW_TXHDR_LEN
+ ETH_HLEN
;
224 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sx_port
->local_port
, 0);
225 err
= mlxsw_reg_query(mlxsw_sx
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
228 max_mtu
= mlxsw_reg_pmtu_max_mtu_get(pmtu_pl
);
233 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sx_port
->local_port
, mtu
);
234 return mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
237 static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port
*mlxsw_sx_port
, u8 swid
)
239 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
240 char pspa_pl
[MLXSW_REG_PSPA_LEN
];
242 mlxsw_reg_pspa_pack(pspa_pl
, swid
, mlxsw_sx_port
->local_port
);
243 return mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(pspa
), pspa_pl
);
247 mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port
*mlxsw_sx_port
)
249 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
250 char sspr_pl
[MLXSW_REG_SSPR_LEN
];
252 mlxsw_reg_sspr_pack(sspr_pl
, mlxsw_sx_port
->local_port
);
253 return mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(sspr
), sspr_pl
);
256 static int mlxsw_sx_port_module_check(struct mlxsw_sx_port
*mlxsw_sx_port
,
259 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
260 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
263 mlxsw_reg_pmlp_pack(pmlp_pl
, mlxsw_sx_port
->local_port
);
264 err
= mlxsw_reg_query(mlxsw_sx
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
267 *p_usable
= mlxsw_reg_pmlp_width_get(pmlp_pl
) ? true : false;
271 static int mlxsw_sx_port_open(struct net_device
*dev
)
273 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
276 err
= mlxsw_sx_port_admin_status_set(mlxsw_sx_port
, true);
279 netif_start_queue(dev
);
283 static int mlxsw_sx_port_stop(struct net_device
*dev
)
285 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
287 netif_stop_queue(dev
);
288 return mlxsw_sx_port_admin_status_set(mlxsw_sx_port
, false);
291 static netdev_tx_t
mlxsw_sx_port_xmit(struct sk_buff
*skb
,
292 struct net_device
*dev
)
294 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
295 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
296 struct mlxsw_sx_port_pcpu_stats
*pcpu_stats
;
297 const struct mlxsw_tx_info tx_info
= {
298 .local_port
= mlxsw_sx_port
->local_port
,
304 if (mlxsw_core_skb_transmit_busy(mlxsw_sx
, &tx_info
))
305 return NETDEV_TX_BUSY
;
307 if (unlikely(skb_headroom(skb
) < MLXSW_TXHDR_LEN
)) {
308 struct sk_buff
*skb_orig
= skb
;
310 skb
= skb_realloc_headroom(skb
, MLXSW_TXHDR_LEN
);
312 this_cpu_inc(mlxsw_sx_port
->pcpu_stats
->tx_dropped
);
313 dev_kfree_skb_any(skb_orig
);
317 mlxsw_sx_txhdr_construct(skb
, &tx_info
);
319 /* Due to a race we might fail here because of a full queue. In that
320 * unlikely case we simply drop the packet.
322 err
= mlxsw_core_skb_transmit(mlxsw_sx
, skb
, &tx_info
);
325 pcpu_stats
= this_cpu_ptr(mlxsw_sx_port
->pcpu_stats
);
326 u64_stats_update_begin(&pcpu_stats
->syncp
);
327 pcpu_stats
->tx_packets
++;
328 pcpu_stats
->tx_bytes
+= len
;
329 u64_stats_update_end(&pcpu_stats
->syncp
);
331 this_cpu_inc(mlxsw_sx_port
->pcpu_stats
->tx_dropped
);
332 dev_kfree_skb_any(skb
);
337 static int mlxsw_sx_port_change_mtu(struct net_device
*dev
, int mtu
)
339 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
342 err
= mlxsw_sx_port_mtu_set(mlxsw_sx_port
, mtu
);
349 static struct rtnl_link_stats64
*
350 mlxsw_sx_port_get_stats64(struct net_device
*dev
,
351 struct rtnl_link_stats64
*stats
)
353 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
354 struct mlxsw_sx_port_pcpu_stats
*p
;
355 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
360 for_each_possible_cpu(i
) {
361 p
= per_cpu_ptr(mlxsw_sx_port
->pcpu_stats
, i
);
363 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
364 rx_packets
= p
->rx_packets
;
365 rx_bytes
= p
->rx_bytes
;
366 tx_packets
= p
->tx_packets
;
367 tx_bytes
= p
->tx_bytes
;
368 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
370 stats
->rx_packets
+= rx_packets
;
371 stats
->rx_bytes
+= rx_bytes
;
372 stats
->tx_packets
+= tx_packets
;
373 stats
->tx_bytes
+= tx_bytes
;
374 /* tx_dropped is u32, updated without syncp protection. */
375 tx_dropped
+= p
->tx_dropped
;
377 stats
->tx_dropped
= tx_dropped
;
381 static const struct net_device_ops mlxsw_sx_port_netdev_ops
= {
382 .ndo_open
= mlxsw_sx_port_open
,
383 .ndo_stop
= mlxsw_sx_port_stop
,
384 .ndo_start_xmit
= mlxsw_sx_port_xmit
,
385 .ndo_change_mtu
= mlxsw_sx_port_change_mtu
,
386 .ndo_get_stats64
= mlxsw_sx_port_get_stats64
,
389 static void mlxsw_sx_port_get_drvinfo(struct net_device
*dev
,
390 struct ethtool_drvinfo
*drvinfo
)
392 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
393 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
395 strlcpy(drvinfo
->driver
, mlxsw_sx_driver_name
, sizeof(drvinfo
->driver
));
396 strlcpy(drvinfo
->version
, mlxsw_sx_driver_version
,
397 sizeof(drvinfo
->version
));
398 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
400 mlxsw_sx
->bus_info
->fw_rev
.major
,
401 mlxsw_sx
->bus_info
->fw_rev
.minor
,
402 mlxsw_sx
->bus_info
->fw_rev
.subminor
);
403 strlcpy(drvinfo
->bus_info
, mlxsw_sx
->bus_info
->device_name
,
404 sizeof(drvinfo
->bus_info
));
407 struct mlxsw_sx_port_hw_stats
{
408 char str
[ETH_GSTRING_LEN
];
409 u64 (*getter
)(char *payload
);
412 static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats
[] = {
414 .str
= "a_frames_transmitted_ok",
415 .getter
= mlxsw_reg_ppcnt_a_frames_transmitted_ok_get
,
418 .str
= "a_frames_received_ok",
419 .getter
= mlxsw_reg_ppcnt_a_frames_received_ok_get
,
422 .str
= "a_frame_check_sequence_errors",
423 .getter
= mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get
,
426 .str
= "a_alignment_errors",
427 .getter
= mlxsw_reg_ppcnt_a_alignment_errors_get
,
430 .str
= "a_octets_transmitted_ok",
431 .getter
= mlxsw_reg_ppcnt_a_octets_transmitted_ok_get
,
434 .str
= "a_octets_received_ok",
435 .getter
= mlxsw_reg_ppcnt_a_octets_received_ok_get
,
438 .str
= "a_multicast_frames_xmitted_ok",
439 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get
,
442 .str
= "a_broadcast_frames_xmitted_ok",
443 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get
,
446 .str
= "a_multicast_frames_received_ok",
447 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get
,
450 .str
= "a_broadcast_frames_received_ok",
451 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get
,
454 .str
= "a_in_range_length_errors",
455 .getter
= mlxsw_reg_ppcnt_a_in_range_length_errors_get
,
458 .str
= "a_out_of_range_length_field",
459 .getter
= mlxsw_reg_ppcnt_a_out_of_range_length_field_get
,
462 .str
= "a_frame_too_long_errors",
463 .getter
= mlxsw_reg_ppcnt_a_frame_too_long_errors_get
,
466 .str
= "a_symbol_error_during_carrier",
467 .getter
= mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get
,
470 .str
= "a_mac_control_frames_transmitted",
471 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get
,
474 .str
= "a_mac_control_frames_received",
475 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_received_get
,
478 .str
= "a_unsupported_opcodes_received",
479 .getter
= mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get
,
482 .str
= "a_pause_mac_ctrl_frames_received",
483 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get
,
486 .str
= "a_pause_mac_ctrl_frames_xmitted",
487 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get
,
491 #define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
493 static void mlxsw_sx_port_get_strings(struct net_device
*dev
,
494 u32 stringset
, u8
*data
)
501 for (i
= 0; i
< MLXSW_SX_PORT_HW_STATS_LEN
; i
++) {
502 memcpy(p
, mlxsw_sx_port_hw_stats
[i
].str
,
504 p
+= ETH_GSTRING_LEN
;
510 static void mlxsw_sx_port_get_stats(struct net_device
*dev
,
511 struct ethtool_stats
*stats
, u64
*data
)
513 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
514 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
515 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
519 mlxsw_reg_ppcnt_pack(ppcnt_pl
, mlxsw_sx_port
->local_port
);
520 err
= mlxsw_reg_query(mlxsw_sx
->core
, MLXSW_REG(ppcnt
), ppcnt_pl
);
521 for (i
= 0; i
< MLXSW_SX_PORT_HW_STATS_LEN
; i
++)
522 data
[i
] = !err
? mlxsw_sx_port_hw_stats
[i
].getter(ppcnt_pl
) : 0;
525 static int mlxsw_sx_port_get_sset_count(struct net_device
*dev
, int sset
)
529 return MLXSW_SX_PORT_HW_STATS_LEN
;
535 struct mlxsw_sx_port_link_mode
{
542 static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode
[] = {
544 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T
,
545 .supported
= SUPPORTED_100baseT_Full
,
546 .advertised
= ADVERTISED_100baseT_Full
,
550 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX
,
554 .mask
= MLXSW_REG_PTYS_ETH_SPEED_SGMII
|
555 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
,
556 .supported
= SUPPORTED_1000baseKX_Full
,
557 .advertised
= ADVERTISED_1000baseKX_Full
,
561 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T
,
562 .supported
= SUPPORTED_10000baseT_Full
,
563 .advertised
= ADVERTISED_10000baseT_Full
,
567 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4
|
568 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
,
569 .supported
= SUPPORTED_10000baseKX4_Full
,
570 .advertised
= ADVERTISED_10000baseKX4_Full
,
574 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
575 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
576 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
577 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR
,
578 .supported
= SUPPORTED_10000baseKR_Full
,
579 .advertised
= ADVERTISED_10000baseKR_Full
,
583 .mask
= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2
,
584 .supported
= SUPPORTED_20000baseKR2_Full
,
585 .advertised
= ADVERTISED_20000baseKR2_Full
,
589 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
,
590 .supported
= SUPPORTED_40000baseCR4_Full
,
591 .advertised
= ADVERTISED_40000baseCR4_Full
,
595 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
,
596 .supported
= SUPPORTED_40000baseKR4_Full
,
597 .advertised
= ADVERTISED_40000baseKR4_Full
,
601 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
,
602 .supported
= SUPPORTED_40000baseSR4_Full
,
603 .advertised
= ADVERTISED_40000baseSR4_Full
,
607 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4
,
608 .supported
= SUPPORTED_40000baseLR4_Full
,
609 .advertised
= ADVERTISED_40000baseLR4_Full
,
613 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR
|
614 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR
|
615 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR
,
619 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4
|
620 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2
|
621 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2
,
625 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
626 .supported
= SUPPORTED_56000baseKR4_Full
,
627 .advertised
= ADVERTISED_56000baseKR4_Full
,
631 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
|
632 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
633 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
634 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4
,
639 #define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
641 static u32
mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto
)
643 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
644 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
645 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
646 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
647 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
648 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
649 return SUPPORTED_FIBRE
;
651 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
652 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
653 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
654 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
655 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
))
656 return SUPPORTED_Backplane
;
660 static u32
mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto
)
665 for (i
= 0; i
< MLXSW_SX_PORT_LINK_MODE_LEN
; i
++) {
666 if (ptys_eth_proto
& mlxsw_sx_port_link_mode
[i
].mask
)
667 modes
|= mlxsw_sx_port_link_mode
[i
].supported
;
672 static u32
mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto
)
677 for (i
= 0; i
< MLXSW_SX_PORT_LINK_MODE_LEN
; i
++) {
678 if (ptys_eth_proto
& mlxsw_sx_port_link_mode
[i
].mask
)
679 modes
|= mlxsw_sx_port_link_mode
[i
].advertised
;
684 static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok
, u32 ptys_eth_proto
,
685 struct ethtool_cmd
*cmd
)
687 u32 speed
= SPEED_UNKNOWN
;
688 u8 duplex
= DUPLEX_UNKNOWN
;
694 for (i
= 0; i
< MLXSW_SX_PORT_LINK_MODE_LEN
; i
++) {
695 if (ptys_eth_proto
& mlxsw_sx_port_link_mode
[i
].mask
) {
696 speed
= mlxsw_sx_port_link_mode
[i
].speed
;
697 duplex
= DUPLEX_FULL
;
702 ethtool_cmd_speed_set(cmd
, speed
);
703 cmd
->duplex
= duplex
;
706 static u8
mlxsw_sx_port_connector_port(u32 ptys_eth_proto
)
708 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
709 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
710 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
711 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
714 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
715 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
716 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
))
719 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
720 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
721 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
722 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
))
728 static int mlxsw_sx_port_get_settings(struct net_device
*dev
,
729 struct ethtool_cmd
*cmd
)
731 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
732 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
733 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
739 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sx_port
->local_port
, 0);
740 err
= mlxsw_reg_query(mlxsw_sx
->core
, MLXSW_REG(ptys
), ptys_pl
);
742 netdev_err(dev
, "Failed to get proto");
745 mlxsw_reg_ptys_unpack(ptys_pl
, ð_proto_cap
,
746 ð_proto_admin
, ð_proto_oper
);
748 cmd
->supported
= mlxsw_sx_from_ptys_supported_port(eth_proto_cap
) |
749 mlxsw_sx_from_ptys_supported_link(eth_proto_cap
) |
750 SUPPORTED_Pause
| SUPPORTED_Asym_Pause
;
751 cmd
->advertising
= mlxsw_sx_from_ptys_advert_link(eth_proto_admin
);
752 mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev
),
753 eth_proto_oper
, cmd
);
755 eth_proto_oper
= eth_proto_oper
? eth_proto_oper
: eth_proto_cap
;
756 cmd
->port
= mlxsw_sx_port_connector_port(eth_proto_oper
);
757 cmd
->lp_advertising
= mlxsw_sx_from_ptys_advert_link(eth_proto_oper
);
759 cmd
->transceiver
= XCVR_INTERNAL
;
763 static u32
mlxsw_sx_to_ptys_advert_link(u32 advertising
)
768 for (i
= 0; i
< MLXSW_SX_PORT_LINK_MODE_LEN
; i
++) {
769 if (advertising
& mlxsw_sx_port_link_mode
[i
].advertised
)
770 ptys_proto
|= mlxsw_sx_port_link_mode
[i
].mask
;
775 static u32
mlxsw_sx_to_ptys_speed(u32 speed
)
780 for (i
= 0; i
< MLXSW_SX_PORT_LINK_MODE_LEN
; i
++) {
781 if (speed
== mlxsw_sx_port_link_mode
[i
].speed
)
782 ptys_proto
|= mlxsw_sx_port_link_mode
[i
].mask
;
787 static int mlxsw_sx_port_set_settings(struct net_device
*dev
,
788 struct ethtool_cmd
*cmd
)
790 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
791 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
792 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
800 speed
= ethtool_cmd_speed(cmd
);
802 eth_proto_new
= cmd
->autoneg
== AUTONEG_ENABLE
?
803 mlxsw_sx_to_ptys_advert_link(cmd
->advertising
) :
804 mlxsw_sx_to_ptys_speed(speed
);
806 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sx_port
->local_port
, 0);
807 err
= mlxsw_reg_query(mlxsw_sx
->core
, MLXSW_REG(ptys
), ptys_pl
);
809 netdev_err(dev
, "Failed to get proto");
812 mlxsw_reg_ptys_unpack(ptys_pl
, ð_proto_cap
, ð_proto_admin
, NULL
);
814 eth_proto_new
= eth_proto_new
& eth_proto_cap
;
815 if (!eth_proto_new
) {
816 netdev_err(dev
, "Not supported proto admin requested");
819 if (eth_proto_new
== eth_proto_admin
)
822 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sx_port
->local_port
, eth_proto_new
);
823 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(ptys
), ptys_pl
);
825 netdev_err(dev
, "Failed to set proto admin");
829 err
= mlxsw_sx_port_oper_status_get(mlxsw_sx_port
, &is_up
);
831 netdev_err(dev
, "Failed to get oper status");
837 err
= mlxsw_sx_port_admin_status_set(mlxsw_sx_port
, false);
839 netdev_err(dev
, "Failed to set admin status");
843 err
= mlxsw_sx_port_admin_status_set(mlxsw_sx_port
, true);
845 netdev_err(dev
, "Failed to set admin status");
852 static const struct ethtool_ops mlxsw_sx_port_ethtool_ops
= {
853 .get_drvinfo
= mlxsw_sx_port_get_drvinfo
,
854 .get_link
= ethtool_op_get_link
,
855 .get_strings
= mlxsw_sx_port_get_strings
,
856 .get_ethtool_stats
= mlxsw_sx_port_get_stats
,
857 .get_sset_count
= mlxsw_sx_port_get_sset_count
,
858 .get_settings
= mlxsw_sx_port_get_settings
,
859 .set_settings
= mlxsw_sx_port_set_settings
,
862 static int mlxsw_sx_port_attr_get(struct net_device
*dev
,
863 struct switchdev_attr
*attr
)
865 struct mlxsw_sx_port
*mlxsw_sx_port
= netdev_priv(dev
);
866 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
869 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID
:
870 attr
->u
.ppid
.id_len
= sizeof(mlxsw_sx
->hw_id
);
871 memcpy(&attr
->u
.ppid
.id
, &mlxsw_sx
->hw_id
, attr
->u
.ppid
.id_len
);
880 static const struct switchdev_ops mlxsw_sx_port_switchdev_ops
= {
881 .switchdev_port_attr_get
= mlxsw_sx_port_attr_get
,
884 static int mlxsw_sx_hw_id_get(struct mlxsw_sx
*mlxsw_sx
)
886 char spad_pl
[MLXSW_REG_SPAD_LEN
];
889 err
= mlxsw_reg_query(mlxsw_sx
->core
, MLXSW_REG(spad
), spad_pl
);
892 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl
, mlxsw_sx
->hw_id
);
896 static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port
*mlxsw_sx_port
)
898 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
899 struct net_device
*dev
= mlxsw_sx_port
->dev
;
900 char ppad_pl
[MLXSW_REG_PPAD_LEN
];
903 mlxsw_reg_ppad_pack(ppad_pl
, false, 0);
904 err
= mlxsw_reg_query(mlxsw_sx
->core
, MLXSW_REG(ppad
), ppad_pl
);
907 mlxsw_reg_ppad_mac_memcpy_from(ppad_pl
, dev
->dev_addr
);
908 /* The last byte value in base mac address is guaranteed
909 * to be such it does not overflow when adding local_port
912 dev
->dev_addr
[ETH_ALEN
- 1] += mlxsw_sx_port
->local_port
;
916 static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port
*mlxsw_sx_port
,
917 u16 vid
, enum mlxsw_reg_spms_state state
)
919 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
923 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
926 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sx_port
->local_port
);
927 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, state
);
928 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(spms
), spms_pl
);
933 static int mlxsw_sx_port_speed_set(struct mlxsw_sx_port
*mlxsw_sx_port
,
936 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
937 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
939 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sx_port
->local_port
, speed
);
940 return mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(ptys
), ptys_pl
);
944 mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port
*mlxsw_sx_port
,
945 enum mlxsw_reg_spmlr_learn_mode mode
)
947 struct mlxsw_sx
*mlxsw_sx
= mlxsw_sx_port
->mlxsw_sx
;
948 char spmlr_pl
[MLXSW_REG_SPMLR_LEN
];
950 mlxsw_reg_spmlr_pack(spmlr_pl
, mlxsw_sx_port
->local_port
, mode
);
951 return mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(spmlr
), spmlr_pl
);
954 static int mlxsw_sx_port_create(struct mlxsw_sx
*mlxsw_sx
, u8 local_port
)
956 struct mlxsw_sx_port
*mlxsw_sx_port
;
957 struct net_device
*dev
;
961 dev
= alloc_etherdev(sizeof(struct mlxsw_sx_port
));
964 mlxsw_sx_port
= netdev_priv(dev
);
965 mlxsw_sx_port
->dev
= dev
;
966 mlxsw_sx_port
->mlxsw_sx
= mlxsw_sx
;
967 mlxsw_sx_port
->local_port
= local_port
;
969 mlxsw_sx_port
->pcpu_stats
=
970 netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats
);
971 if (!mlxsw_sx_port
->pcpu_stats
) {
973 goto err_alloc_stats
;
976 dev
->netdev_ops
= &mlxsw_sx_port_netdev_ops
;
977 dev
->ethtool_ops
= &mlxsw_sx_port_ethtool_ops
;
978 dev
->switchdev_ops
= &mlxsw_sx_port_switchdev_ops
;
980 err
= mlxsw_sx_port_dev_addr_get(mlxsw_sx_port
);
982 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Unable to get port mac address\n",
983 mlxsw_sx_port
->local_port
);
984 goto err_dev_addr_get
;
987 netif_carrier_off(dev
);
989 dev
->features
|= NETIF_F_NETNS_LOCAL
| NETIF_F_LLTX
| NETIF_F_SG
|
990 NETIF_F_VLAN_CHALLENGED
;
992 /* Each packet needs to have a Tx header (metadata) on top all other
995 dev
->hard_header_len
+= MLXSW_TXHDR_LEN
;
997 err
= mlxsw_sx_port_module_check(mlxsw_sx_port
, &usable
);
999 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to check module\n",
1000 mlxsw_sx_port
->local_port
);
1001 goto err_port_module_check
;
1005 dev_dbg(mlxsw_sx
->bus_info
->dev
, "Port %d: Not usable, skipping initialization\n",
1006 mlxsw_sx_port
->local_port
);
1007 goto port_not_usable
;
1010 err
= mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port
);
1012 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to set system port mapping\n",
1013 mlxsw_sx_port
->local_port
);
1014 goto err_port_system_port_mapping_set
;
1017 err
= mlxsw_sx_port_swid_set(mlxsw_sx_port
, 0);
1019 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to set SWID\n",
1020 mlxsw_sx_port
->local_port
);
1021 goto err_port_swid_set
;
1024 err
= mlxsw_sx_port_speed_set(mlxsw_sx_port
,
1025 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
);
1027 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to set speed\n",
1028 mlxsw_sx_port
->local_port
);
1029 goto err_port_speed_set
;
1032 err
= mlxsw_sx_port_mtu_set(mlxsw_sx_port
, ETH_DATA_LEN
);
1034 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to set MTU\n",
1035 mlxsw_sx_port
->local_port
);
1036 goto err_port_mtu_set
;
1039 err
= mlxsw_sx_port_admin_status_set(mlxsw_sx_port
, false);
1041 goto err_port_admin_status_set
;
1043 err
= mlxsw_sx_port_stp_state_set(mlxsw_sx_port
,
1044 MLXSW_PORT_DEFAULT_VID
,
1045 MLXSW_REG_SPMS_STATE_FORWARDING
);
1047 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to set STP state\n",
1048 mlxsw_sx_port
->local_port
);
1049 goto err_port_stp_state_set
;
1052 err
= mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port
,
1053 MLXSW_REG_SPMLR_LEARN_MODE_DISABLE
);
1055 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to set MAC learning mode\n",
1056 mlxsw_sx_port
->local_port
);
1057 goto err_port_mac_learning_mode_set
;
1060 err
= register_netdev(dev
);
1062 dev_err(mlxsw_sx
->bus_info
->dev
, "Port %d: Failed to register netdev\n",
1063 mlxsw_sx_port
->local_port
);
1064 goto err_register_netdev
;
1067 mlxsw_sx
->ports
[local_port
] = mlxsw_sx_port
;
1070 err_register_netdev
:
1071 err_port_mac_learning_mode_set
:
1072 err_port_stp_state_set
:
1073 err_port_admin_status_set
:
1077 err_port_system_port_mapping_set
:
1079 err_port_module_check
:
1081 free_percpu(mlxsw_sx_port
->pcpu_stats
);
1087 static void mlxsw_sx_port_remove(struct mlxsw_sx
*mlxsw_sx
, u8 local_port
)
1089 struct mlxsw_sx_port
*mlxsw_sx_port
= mlxsw_sx
->ports
[local_port
];
1093 unregister_netdev(mlxsw_sx_port
->dev
); /* This calls ndo_stop */
1094 mlxsw_sx_port_swid_set(mlxsw_sx_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
1095 free_percpu(mlxsw_sx_port
->pcpu_stats
);
1096 free_netdev(mlxsw_sx_port
->dev
);
1099 static void mlxsw_sx_ports_remove(struct mlxsw_sx
*mlxsw_sx
)
1103 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++)
1104 mlxsw_sx_port_remove(mlxsw_sx
, i
);
1105 kfree(mlxsw_sx
->ports
);
1108 static int mlxsw_sx_ports_create(struct mlxsw_sx
*mlxsw_sx
)
1114 alloc_size
= sizeof(struct mlxsw_sx_port
*) * MLXSW_PORT_MAX_PORTS
;
1115 mlxsw_sx
->ports
= kzalloc(alloc_size
, GFP_KERNEL
);
1116 if (!mlxsw_sx
->ports
)
1119 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++) {
1120 err
= mlxsw_sx_port_create(mlxsw_sx
, i
);
1122 goto err_port_create
;
1127 for (i
--; i
>= 1; i
--)
1128 mlxsw_sx_port_remove(mlxsw_sx
, i
);
1129 kfree(mlxsw_sx
->ports
);
1133 static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info
*reg
,
1134 char *pude_pl
, void *priv
)
1136 struct mlxsw_sx
*mlxsw_sx
= priv
;
1137 struct mlxsw_sx_port
*mlxsw_sx_port
;
1138 enum mlxsw_reg_pude_oper_status status
;
1141 local_port
= mlxsw_reg_pude_local_port_get(pude_pl
);
1142 mlxsw_sx_port
= mlxsw_sx
->ports
[local_port
];
1143 if (!mlxsw_sx_port
) {
1144 dev_warn(mlxsw_sx
->bus_info
->dev
, "Port %d: Link event received for non-existent port\n",
1149 status
= mlxsw_reg_pude_oper_status_get(pude_pl
);
1150 if (status
== MLXSW_PORT_OPER_STATUS_UP
) {
1151 netdev_info(mlxsw_sx_port
->dev
, "link up\n");
1152 netif_carrier_on(mlxsw_sx_port
->dev
);
1154 netdev_info(mlxsw_sx_port
->dev
, "link down\n");
1155 netif_carrier_off(mlxsw_sx_port
->dev
);
1159 static struct mlxsw_event_listener mlxsw_sx_pude_event
= {
1160 .func
= mlxsw_sx_pude_event_func
,
1161 .trap_id
= MLXSW_TRAP_ID_PUDE
,
1164 static int mlxsw_sx_event_register(struct mlxsw_sx
*mlxsw_sx
,
1165 enum mlxsw_event_trap_id trap_id
)
1167 struct mlxsw_event_listener
*el
;
1168 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
1172 case MLXSW_TRAP_ID_PUDE
:
1173 el
= &mlxsw_sx_pude_event
;
1176 err
= mlxsw_core_event_listener_register(mlxsw_sx
->core
, el
, mlxsw_sx
);
1180 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_FORWARD
, trap_id
);
1181 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
1183 goto err_event_trap_set
;
1188 mlxsw_core_event_listener_unregister(mlxsw_sx
->core
, el
, mlxsw_sx
);
1192 static void mlxsw_sx_event_unregister(struct mlxsw_sx
*mlxsw_sx
,
1193 enum mlxsw_event_trap_id trap_id
)
1195 struct mlxsw_event_listener
*el
;
1198 case MLXSW_TRAP_ID_PUDE
:
1199 el
= &mlxsw_sx_pude_event
;
1202 mlxsw_core_event_listener_unregister(mlxsw_sx
->core
, el
, mlxsw_sx
);
1205 static void mlxsw_sx_rx_listener_func(struct sk_buff
*skb
, u8 local_port
,
1208 struct mlxsw_sx
*mlxsw_sx
= priv
;
1209 struct mlxsw_sx_port
*mlxsw_sx_port
= mlxsw_sx
->ports
[local_port
];
1210 struct mlxsw_sx_port_pcpu_stats
*pcpu_stats
;
1212 if (unlikely(!mlxsw_sx_port
)) {
1213 dev_warn_ratelimited(mlxsw_sx
->bus_info
->dev
, "Port %d: skb received for non-existent port\n",
1218 skb
->dev
= mlxsw_sx_port
->dev
;
1220 pcpu_stats
= this_cpu_ptr(mlxsw_sx_port
->pcpu_stats
);
1221 u64_stats_update_begin(&pcpu_stats
->syncp
);
1222 pcpu_stats
->rx_packets
++;
1223 pcpu_stats
->rx_bytes
+= skb
->len
;
1224 u64_stats_update_end(&pcpu_stats
->syncp
);
1226 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1227 netif_receive_skb(skb
);
1230 static const struct mlxsw_rx_listener mlxsw_sx_rx_listener
[] = {
1232 .func
= mlxsw_sx_rx_listener_func
,
1233 .local_port
= MLXSW_PORT_DONT_CARE
,
1234 .trap_id
= MLXSW_TRAP_ID_FDB_MC
,
1236 /* Traps for specific L2 packet types, not trapped as FDB MC */
1238 .func
= mlxsw_sx_rx_listener_func
,
1239 .local_port
= MLXSW_PORT_DONT_CARE
,
1240 .trap_id
= MLXSW_TRAP_ID_STP
,
1243 .func
= mlxsw_sx_rx_listener_func
,
1244 .local_port
= MLXSW_PORT_DONT_CARE
,
1245 .trap_id
= MLXSW_TRAP_ID_LACP
,
1248 .func
= mlxsw_sx_rx_listener_func
,
1249 .local_port
= MLXSW_PORT_DONT_CARE
,
1250 .trap_id
= MLXSW_TRAP_ID_EAPOL
,
1253 .func
= mlxsw_sx_rx_listener_func
,
1254 .local_port
= MLXSW_PORT_DONT_CARE
,
1255 .trap_id
= MLXSW_TRAP_ID_LLDP
,
1258 .func
= mlxsw_sx_rx_listener_func
,
1259 .local_port
= MLXSW_PORT_DONT_CARE
,
1260 .trap_id
= MLXSW_TRAP_ID_MMRP
,
1263 .func
= mlxsw_sx_rx_listener_func
,
1264 .local_port
= MLXSW_PORT_DONT_CARE
,
1265 .trap_id
= MLXSW_TRAP_ID_MVRP
,
1268 .func
= mlxsw_sx_rx_listener_func
,
1269 .local_port
= MLXSW_PORT_DONT_CARE
,
1270 .trap_id
= MLXSW_TRAP_ID_RPVST
,
1273 .func
= mlxsw_sx_rx_listener_func
,
1274 .local_port
= MLXSW_PORT_DONT_CARE
,
1275 .trap_id
= MLXSW_TRAP_ID_DHCP
,
1278 .func
= mlxsw_sx_rx_listener_func
,
1279 .local_port
= MLXSW_PORT_DONT_CARE
,
1280 .trap_id
= MLXSW_TRAP_ID_IGMP_QUERY
,
1283 .func
= mlxsw_sx_rx_listener_func
,
1284 .local_port
= MLXSW_PORT_DONT_CARE
,
1285 .trap_id
= MLXSW_TRAP_ID_IGMP_V1_REPORT
,
1288 .func
= mlxsw_sx_rx_listener_func
,
1289 .local_port
= MLXSW_PORT_DONT_CARE
,
1290 .trap_id
= MLXSW_TRAP_ID_IGMP_V2_REPORT
,
1293 .func
= mlxsw_sx_rx_listener_func
,
1294 .local_port
= MLXSW_PORT_DONT_CARE
,
1295 .trap_id
= MLXSW_TRAP_ID_IGMP_V2_LEAVE
,
1298 .func
= mlxsw_sx_rx_listener_func
,
1299 .local_port
= MLXSW_PORT_DONT_CARE
,
1300 .trap_id
= MLXSW_TRAP_ID_IGMP_V3_REPORT
,
1304 static int mlxsw_sx_traps_init(struct mlxsw_sx
*mlxsw_sx
)
1306 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
1307 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
1311 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_RX
);
1312 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(htgt
), htgt_pl
);
1316 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_CTRL
);
1317 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(htgt
), htgt_pl
);
1321 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sx_rx_listener
); i
++) {
1322 err
= mlxsw_core_rx_listener_register(mlxsw_sx
->core
,
1323 &mlxsw_sx_rx_listener
[i
],
1326 goto err_rx_listener_register
;
1328 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU
,
1329 mlxsw_sx_rx_listener
[i
].trap_id
);
1330 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
1332 goto err_rx_trap_set
;
1337 mlxsw_core_rx_listener_unregister(mlxsw_sx
->core
,
1338 &mlxsw_sx_rx_listener
[i
],
1340 err_rx_listener_register
:
1341 for (i
--; i
>= 0; i
--) {
1342 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_FORWARD
,
1343 mlxsw_sx_rx_listener
[i
].trap_id
);
1344 mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
1346 mlxsw_core_rx_listener_unregister(mlxsw_sx
->core
,
1347 &mlxsw_sx_rx_listener
[i
],
1353 static void mlxsw_sx_traps_fini(struct mlxsw_sx
*mlxsw_sx
)
1355 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
1358 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sx_rx_listener
); i
++) {
1359 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_FORWARD
,
1360 mlxsw_sx_rx_listener
[i
].trap_id
);
1361 mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
1363 mlxsw_core_rx_listener_unregister(mlxsw_sx
->core
,
1364 &mlxsw_sx_rx_listener
[i
],
1369 static int mlxsw_sx_flood_init(struct mlxsw_sx
*mlxsw_sx
)
1371 char sfgc_pl
[MLXSW_REG_SFGC_LEN
];
1372 char sgcr_pl
[MLXSW_REG_SGCR_LEN
];
1376 /* Configure a flooding table, which includes only CPU port. */
1377 sftr_pl
= kmalloc(MLXSW_REG_SFTR_LEN
, GFP_KERNEL
);
1380 mlxsw_reg_sftr_pack(sftr_pl
, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE
, 0,
1381 MLXSW_PORT_CPU_PORT
, true);
1382 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(sftr
), sftr_pl
);
1387 /* Flood different packet types using the flooding table. */
1388 mlxsw_reg_sfgc_pack(sfgc_pl
,
1389 MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST
,
1390 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID
,
1391 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE
,
1393 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(sfgc
), sfgc_pl
);
1397 mlxsw_reg_sfgc_pack(sfgc_pl
,
1398 MLXSW_REG_SFGC_TYPE_BROADCAST
,
1399 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID
,
1400 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE
,
1402 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(sfgc
), sfgc_pl
);
1406 mlxsw_reg_sfgc_pack(sfgc_pl
,
1407 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP
,
1408 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID
,
1409 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE
,
1411 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(sfgc
), sfgc_pl
);
1415 mlxsw_reg_sfgc_pack(sfgc_pl
,
1416 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6
,
1417 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID
,
1418 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE
,
1420 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(sfgc
), sfgc_pl
);
1424 mlxsw_reg_sfgc_pack(sfgc_pl
,
1425 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4
,
1426 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID
,
1427 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE
,
1429 err
= mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(sfgc
), sfgc_pl
);
1433 mlxsw_reg_sgcr_pack(sgcr_pl
, true);
1434 return mlxsw_reg_write(mlxsw_sx
->core
, MLXSW_REG(sgcr
), sgcr_pl
);
1437 static int mlxsw_sx_init(void *priv
, struct mlxsw_core
*mlxsw_core
,
1438 const struct mlxsw_bus_info
*mlxsw_bus_info
)
1440 struct mlxsw_sx
*mlxsw_sx
= priv
;
1443 mlxsw_sx
->core
= mlxsw_core
;
1444 mlxsw_sx
->bus_info
= mlxsw_bus_info
;
1446 err
= mlxsw_sx_hw_id_get(mlxsw_sx
);
1448 dev_err(mlxsw_sx
->bus_info
->dev
, "Failed to get switch HW ID\n");
1452 err
= mlxsw_sx_ports_create(mlxsw_sx
);
1454 dev_err(mlxsw_sx
->bus_info
->dev
, "Failed to create ports\n");
1458 err
= mlxsw_sx_event_register(mlxsw_sx
, MLXSW_TRAP_ID_PUDE
);
1460 dev_err(mlxsw_sx
->bus_info
->dev
, "Failed to register for PUDE events\n");
1461 goto err_event_register
;
1464 err
= mlxsw_sx_traps_init(mlxsw_sx
);
1466 dev_err(mlxsw_sx
->bus_info
->dev
, "Failed to set traps for RX\n");
1467 goto err_rx_listener_register
;
1470 err
= mlxsw_sx_flood_init(mlxsw_sx
);
1472 dev_err(mlxsw_sx
->bus_info
->dev
, "Failed to initialize flood tables\n");
1473 goto err_flood_init
;
1479 mlxsw_sx_traps_fini(mlxsw_sx
);
1480 err_rx_listener_register
:
1481 mlxsw_sx_event_unregister(mlxsw_sx
, MLXSW_TRAP_ID_PUDE
);
1483 mlxsw_sx_ports_remove(mlxsw_sx
);
1487 static void mlxsw_sx_fini(void *priv
)
1489 struct mlxsw_sx
*mlxsw_sx
= priv
;
1491 mlxsw_sx_traps_fini(mlxsw_sx
);
1492 mlxsw_sx_event_unregister(mlxsw_sx
, MLXSW_TRAP_ID_PUDE
);
1493 mlxsw_sx_ports_remove(mlxsw_sx
);
1496 static struct mlxsw_config_profile mlxsw_sx_config_profile
= {
1497 .used_max_vepa_channels
= 1,
1498 .max_vepa_channels
= 0,
1501 .used_max_port_per_lag
= 1,
1502 .max_port_per_lag
= 16,
1507 .used_max_system_port
= 1,
1508 .max_system_port
= 48000,
1509 .used_max_vlan_groups
= 1,
1510 .max_vlan_groups
= 127,
1511 .used_max_regions
= 1,
1513 .used_flood_tables
= 1,
1514 .max_flood_tables
= 2,
1515 .max_vid_flood_tables
= 1,
1516 .used_flood_mode
= 1,
1518 .used_max_ib_mc
= 1,
1525 .type
= MLXSW_PORT_SWID_TYPE_ETH
,
1530 static struct mlxsw_driver mlxsw_sx_driver
= {
1531 .kind
= MLXSW_DEVICE_KIND_SWITCHX2
,
1532 .owner
= THIS_MODULE
,
1533 .priv_size
= sizeof(struct mlxsw_sx
),
1534 .init
= mlxsw_sx_init
,
1535 .fini
= mlxsw_sx_fini
,
1536 .txhdr_construct
= mlxsw_sx_txhdr_construct
,
1537 .txhdr_len
= MLXSW_TXHDR_LEN
,
1538 .profile
= &mlxsw_sx_config_profile
,
1541 static int __init
mlxsw_sx_module_init(void)
1543 return mlxsw_core_driver_register(&mlxsw_sx_driver
);
1546 static void __exit
mlxsw_sx_module_exit(void)
1548 mlxsw_core_driver_unregister(&mlxsw_sx_driver
);
1551 module_init(mlxsw_sx_module_init
);
1552 module_exit(mlxsw_sx_module_exit
);
1554 MODULE_LICENSE("Dual BSD/GPL");
1555 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1556 MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
1557 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SWITCHX2
);