Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / net / ethernet / mellanox / mlxsw / switchx2.c
blobf3c29bbf07e22e245492e7d456d89686770061d4
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015-2016 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <net/switchdev.h>
49 #include "pci.h"
50 #include "core.h"
51 #include "reg.h"
52 #include "port.h"
53 #include "trap.h"
54 #include "txheader.h"
55 #include "ib.h"
57 static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
58 static const char mlxsw_sx_driver_version[] = "1.0";
60 struct mlxsw_sx_port;
62 struct mlxsw_sx {
63 struct mlxsw_sx_port **ports;
64 struct mlxsw_core *core;
65 const struct mlxsw_bus_info *bus_info;
66 u8 hw_id[ETH_ALEN];
69 struct mlxsw_sx_port_pcpu_stats {
70 u64 rx_packets;
71 u64 rx_bytes;
72 u64 tx_packets;
73 u64 tx_bytes;
74 struct u64_stats_sync syncp;
75 u32 tx_dropped;
78 struct mlxsw_sx_port {
79 struct net_device *dev;
80 struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
81 struct mlxsw_sx *mlxsw_sx;
82 u8 local_port;
83 struct {
84 u8 module;
85 } mapping;
88 /* tx_hdr_version
89 * Tx header version.
90 * Must be set to 0.
92 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
94 /* tx_hdr_ctl
95 * Packet control type.
96 * 0 - Ethernet control (e.g. EMADs, LACP)
97 * 1 - Ethernet data
99 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
101 /* tx_hdr_proto
102 * Packet protocol type. Must be set to 1 (Ethernet).
104 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
106 /* tx_hdr_etclass
107 * Egress TClass to be used on the egress device on the egress port.
108 * The MSB is specified in the 'ctclass3' field.
109 * Range is 0-15, where 15 is the highest priority.
111 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
113 /* tx_hdr_swid
114 * Switch partition ID.
116 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
118 /* tx_hdr_port_mid
119 * Destination local port for unicast packets.
120 * Destination multicast ID for multicast packets.
122 * Control packets are directed to a specific egress port, while data
123 * packets are transmitted through the CPU port (0) into the switch partition,
124 * where forwarding rules are applied.
126 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
128 /* tx_hdr_ctclass3
129 * See field 'etclass'.
131 MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
133 /* tx_hdr_rdq
134 * RDQ for control packets sent to remote CPU.
135 * Must be set to 0x1F for EMADs, otherwise 0.
137 MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
139 /* tx_hdr_cpu_sig
140 * Signature control for packets going to CPU. Must be set to 0.
142 MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
144 /* tx_hdr_sig
145 * Stacking protocl signature. Must be set to 0xE0E0.
147 MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
149 /* tx_hdr_stclass
150 * Stacking TClass.
152 MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
154 /* tx_hdr_emad
155 * EMAD bit. Must be set for EMADs.
157 MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
159 /* tx_hdr_type
160 * 0 - Data packets
161 * 6 - Control packets
163 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
165 static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
166 const struct mlxsw_tx_info *tx_info)
168 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
169 bool is_emad = tx_info->is_emad;
171 memset(txhdr, 0, MLXSW_TXHDR_LEN);
173 /* We currently set default values for the egress tclass (QoS). */
174 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
175 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
176 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
177 mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
178 MLXSW_TXHDR_ETCLASS_5);
179 mlxsw_tx_hdr_swid_set(txhdr, 0);
180 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
181 mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
182 mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
183 MLXSW_TXHDR_RDQ_OTHER);
184 mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
185 mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
186 mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
187 mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
188 MLXSW_TXHDR_NOT_EMAD);
189 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
192 static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
193 bool is_up)
195 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
196 char paos_pl[MLXSW_REG_PAOS_LEN];
198 mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
199 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
200 MLXSW_PORT_ADMIN_STATUS_DOWN);
201 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
204 static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
205 bool *p_is_up)
207 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
208 char paos_pl[MLXSW_REG_PAOS_LEN];
209 u8 oper_status;
210 int err;
212 mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
213 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
214 if (err)
215 return err;
216 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
217 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
218 return 0;
221 static int __mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port,
222 u16 mtu)
224 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
225 char pmtu_pl[MLXSW_REG_PMTU_LEN];
226 int max_mtu;
227 int err;
229 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
230 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
231 if (err)
232 return err;
233 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
235 if (mtu > max_mtu)
236 return -EINVAL;
238 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
239 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
242 static int mlxsw_sx_port_mtu_eth_set(struct mlxsw_sx_port *mlxsw_sx_port,
243 u16 mtu)
245 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
246 return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
249 static int mlxsw_sx_port_mtu_ib_set(struct mlxsw_sx_port *mlxsw_sx_port,
250 u16 mtu)
252 return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
255 static int mlxsw_sx_port_ib_port_set(struct mlxsw_sx_port *mlxsw_sx_port,
256 u8 ib_port)
258 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
259 char plib_pl[MLXSW_REG_PLIB_LEN] = {0};
260 int err;
262 mlxsw_reg_plib_local_port_set(plib_pl, mlxsw_sx_port->local_port);
263 mlxsw_reg_plib_ib_port_set(plib_pl, ib_port);
264 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(plib), plib_pl);
265 return err;
268 static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
270 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
271 char pspa_pl[MLXSW_REG_PSPA_LEN];
273 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
274 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
277 static int
278 mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
280 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
281 char sspr_pl[MLXSW_REG_SSPR_LEN];
283 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port);
284 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
287 static int mlxsw_sx_port_module_info_get(struct mlxsw_sx *mlxsw_sx,
288 u8 local_port, u8 *p_module,
289 u8 *p_width)
291 char pmlp_pl[MLXSW_REG_PMLP_LEN];
292 int err;
294 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
295 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
296 if (err)
297 return err;
298 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
299 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
300 return 0;
303 static int mlxsw_sx_port_open(struct net_device *dev)
305 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
306 int err;
308 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
309 if (err)
310 return err;
311 netif_start_queue(dev);
312 return 0;
315 static int mlxsw_sx_port_stop(struct net_device *dev)
317 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
319 netif_stop_queue(dev);
320 return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
323 static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
324 struct net_device *dev)
326 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
327 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
328 struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
329 const struct mlxsw_tx_info tx_info = {
330 .local_port = mlxsw_sx_port->local_port,
331 .is_emad = false,
333 u64 len;
334 int err;
336 if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
337 return NETDEV_TX_BUSY;
339 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
340 struct sk_buff *skb_orig = skb;
342 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
343 if (!skb) {
344 this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
345 dev_kfree_skb_any(skb_orig);
346 return NETDEV_TX_OK;
348 dev_consume_skb_any(skb_orig);
350 mlxsw_sx_txhdr_construct(skb, &tx_info);
351 /* TX header is consumed by HW on the way so we shouldn't count its
352 * bytes as being sent.
354 len = skb->len - MLXSW_TXHDR_LEN;
355 /* Due to a race we might fail here because of a full queue. In that
356 * unlikely case we simply drop the packet.
358 err = mlxsw_core_skb_transmit(mlxsw_sx->core, skb, &tx_info);
360 if (!err) {
361 pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
362 u64_stats_update_begin(&pcpu_stats->syncp);
363 pcpu_stats->tx_packets++;
364 pcpu_stats->tx_bytes += len;
365 u64_stats_update_end(&pcpu_stats->syncp);
366 } else {
367 this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
368 dev_kfree_skb_any(skb);
370 return NETDEV_TX_OK;
373 static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
375 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
376 int err;
378 err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, mtu);
379 if (err)
380 return err;
381 dev->mtu = mtu;
382 return 0;
385 static void
386 mlxsw_sx_port_get_stats64(struct net_device *dev,
387 struct rtnl_link_stats64 *stats)
389 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
390 struct mlxsw_sx_port_pcpu_stats *p;
391 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
392 u32 tx_dropped = 0;
393 unsigned int start;
394 int i;
396 for_each_possible_cpu(i) {
397 p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
398 do {
399 start = u64_stats_fetch_begin_irq(&p->syncp);
400 rx_packets = p->rx_packets;
401 rx_bytes = p->rx_bytes;
402 tx_packets = p->tx_packets;
403 tx_bytes = p->tx_bytes;
404 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
406 stats->rx_packets += rx_packets;
407 stats->rx_bytes += rx_bytes;
408 stats->tx_packets += tx_packets;
409 stats->tx_bytes += tx_bytes;
410 /* tx_dropped is u32, updated without syncp protection. */
411 tx_dropped += p->tx_dropped;
413 stats->tx_dropped = tx_dropped;
416 static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name,
417 size_t len)
419 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
420 int err;
422 err = snprintf(name, len, "p%d", mlxsw_sx_port->mapping.module + 1);
423 if (err >= len)
424 return -EINVAL;
426 return 0;
429 static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
430 .ndo_open = mlxsw_sx_port_open,
431 .ndo_stop = mlxsw_sx_port_stop,
432 .ndo_start_xmit = mlxsw_sx_port_xmit,
433 .ndo_change_mtu = mlxsw_sx_port_change_mtu,
434 .ndo_get_stats64 = mlxsw_sx_port_get_stats64,
435 .ndo_get_phys_port_name = mlxsw_sx_port_get_phys_port_name,
438 static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
439 struct ethtool_drvinfo *drvinfo)
441 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
442 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
444 strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
445 strlcpy(drvinfo->version, mlxsw_sx_driver_version,
446 sizeof(drvinfo->version));
447 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
448 "%d.%d.%d",
449 mlxsw_sx->bus_info->fw_rev.major,
450 mlxsw_sx->bus_info->fw_rev.minor,
451 mlxsw_sx->bus_info->fw_rev.subminor);
452 strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
453 sizeof(drvinfo->bus_info));
456 struct mlxsw_sx_port_hw_stats {
457 char str[ETH_GSTRING_LEN];
458 u64 (*getter)(const char *payload);
461 static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
463 .str = "a_frames_transmitted_ok",
464 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
467 .str = "a_frames_received_ok",
468 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
471 .str = "a_frame_check_sequence_errors",
472 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
475 .str = "a_alignment_errors",
476 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
479 .str = "a_octets_transmitted_ok",
480 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
483 .str = "a_octets_received_ok",
484 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
487 .str = "a_multicast_frames_xmitted_ok",
488 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
491 .str = "a_broadcast_frames_xmitted_ok",
492 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
495 .str = "a_multicast_frames_received_ok",
496 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
499 .str = "a_broadcast_frames_received_ok",
500 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
503 .str = "a_in_range_length_errors",
504 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
507 .str = "a_out_of_range_length_field",
508 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
511 .str = "a_frame_too_long_errors",
512 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
515 .str = "a_symbol_error_during_carrier",
516 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
519 .str = "a_mac_control_frames_transmitted",
520 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
523 .str = "a_mac_control_frames_received",
524 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
527 .str = "a_unsupported_opcodes_received",
528 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
531 .str = "a_pause_mac_ctrl_frames_received",
532 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
535 .str = "a_pause_mac_ctrl_frames_xmitted",
536 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
540 #define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
542 static void mlxsw_sx_port_get_strings(struct net_device *dev,
543 u32 stringset, u8 *data)
545 u8 *p = data;
546 int i;
548 switch (stringset) {
549 case ETH_SS_STATS:
550 for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
551 memcpy(p, mlxsw_sx_port_hw_stats[i].str,
552 ETH_GSTRING_LEN);
553 p += ETH_GSTRING_LEN;
555 break;
559 static void mlxsw_sx_port_get_stats(struct net_device *dev,
560 struct ethtool_stats *stats, u64 *data)
562 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
563 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
564 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
565 int i;
566 int err;
568 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port,
569 MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
570 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
571 for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
572 data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
575 static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
577 switch (sset) {
578 case ETH_SS_STATS:
579 return MLXSW_SX_PORT_HW_STATS_LEN;
580 default:
581 return -EOPNOTSUPP;
585 struct mlxsw_sx_port_link_mode {
586 u32 mask;
587 u32 supported;
588 u32 advertised;
589 u32 speed;
592 static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
594 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
595 .supported = SUPPORTED_100baseT_Full,
596 .advertised = ADVERTISED_100baseT_Full,
597 .speed = 100,
600 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
601 .speed = 100,
604 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
605 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
606 .supported = SUPPORTED_1000baseKX_Full,
607 .advertised = ADVERTISED_1000baseKX_Full,
608 .speed = 1000,
611 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
612 .supported = SUPPORTED_10000baseT_Full,
613 .advertised = ADVERTISED_10000baseT_Full,
614 .speed = 10000,
617 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
618 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
619 .supported = SUPPORTED_10000baseKX4_Full,
620 .advertised = ADVERTISED_10000baseKX4_Full,
621 .speed = 10000,
624 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
625 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
626 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
627 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
628 .supported = SUPPORTED_10000baseKR_Full,
629 .advertised = ADVERTISED_10000baseKR_Full,
630 .speed = 10000,
633 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
634 .supported = SUPPORTED_20000baseKR2_Full,
635 .advertised = ADVERTISED_20000baseKR2_Full,
636 .speed = 20000,
639 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
640 .supported = SUPPORTED_40000baseCR4_Full,
641 .advertised = ADVERTISED_40000baseCR4_Full,
642 .speed = 40000,
645 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
646 .supported = SUPPORTED_40000baseKR4_Full,
647 .advertised = ADVERTISED_40000baseKR4_Full,
648 .speed = 40000,
651 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
652 .supported = SUPPORTED_40000baseSR4_Full,
653 .advertised = ADVERTISED_40000baseSR4_Full,
654 .speed = 40000,
657 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
658 .supported = SUPPORTED_40000baseLR4_Full,
659 .advertised = ADVERTISED_40000baseLR4_Full,
660 .speed = 40000,
663 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
664 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
665 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
666 .speed = 25000,
669 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
670 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
671 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
672 .speed = 50000,
675 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
676 .supported = SUPPORTED_56000baseKR4_Full,
677 .advertised = ADVERTISED_56000baseKR4_Full,
678 .speed = 56000,
681 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
682 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
683 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
684 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
685 .speed = 100000,
689 #define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
690 #define MLXSW_SX_PORT_BASE_SPEED 10000 /* Mb/s */
692 static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
694 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
695 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
696 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
697 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
698 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
699 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
700 return SUPPORTED_FIBRE;
702 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
703 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
704 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
705 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
706 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
707 return SUPPORTED_Backplane;
708 return 0;
711 static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
713 u32 modes = 0;
714 int i;
716 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
717 if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
718 modes |= mlxsw_sx_port_link_mode[i].supported;
720 return modes;
723 static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
725 u32 modes = 0;
726 int i;
728 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
729 if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
730 modes |= mlxsw_sx_port_link_mode[i].advertised;
732 return modes;
735 static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
736 struct ethtool_link_ksettings *cmd)
738 u32 speed = SPEED_UNKNOWN;
739 u8 duplex = DUPLEX_UNKNOWN;
740 int i;
742 if (!carrier_ok)
743 goto out;
745 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
746 if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
747 speed = mlxsw_sx_port_link_mode[i].speed;
748 duplex = DUPLEX_FULL;
749 break;
752 out:
753 cmd->base.speed = speed;
754 cmd->base.duplex = duplex;
757 static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
759 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
760 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
761 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
762 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
763 return PORT_FIBRE;
765 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
766 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
767 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
768 return PORT_DA;
770 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
771 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
772 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
773 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
774 return PORT_NONE;
776 return PORT_OTHER;
779 static int
780 mlxsw_sx_port_get_link_ksettings(struct net_device *dev,
781 struct ethtool_link_ksettings *cmd)
783 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
784 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
785 char ptys_pl[MLXSW_REG_PTYS_LEN];
786 u32 eth_proto_cap;
787 u32 eth_proto_admin;
788 u32 eth_proto_oper;
789 u32 supported, advertising, lp_advertising;
790 int err;
792 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
793 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
794 if (err) {
795 netdev_err(dev, "Failed to get proto");
796 return err;
798 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap,
799 &eth_proto_admin, &eth_proto_oper);
801 supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
802 mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
803 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
804 advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
805 mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
806 eth_proto_oper, cmd);
808 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
809 cmd->base.port = mlxsw_sx_port_connector_port(eth_proto_oper);
810 lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
812 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
813 supported);
814 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
815 advertising);
816 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
817 lp_advertising);
819 return 0;
822 static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
824 u32 ptys_proto = 0;
825 int i;
827 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
828 if (advertising & mlxsw_sx_port_link_mode[i].advertised)
829 ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
831 return ptys_proto;
834 static u32 mlxsw_sx_to_ptys_speed(u32 speed)
836 u32 ptys_proto = 0;
837 int i;
839 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
840 if (speed == mlxsw_sx_port_link_mode[i].speed)
841 ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
843 return ptys_proto;
846 static u32 mlxsw_sx_to_ptys_upper_speed(u32 upper_speed)
848 u32 ptys_proto = 0;
849 int i;
851 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
852 if (mlxsw_sx_port_link_mode[i].speed <= upper_speed)
853 ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
855 return ptys_proto;
858 static int
859 mlxsw_sx_port_set_link_ksettings(struct net_device *dev,
860 const struct ethtool_link_ksettings *cmd)
862 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
863 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
864 char ptys_pl[MLXSW_REG_PTYS_LEN];
865 u32 speed;
866 u32 eth_proto_new;
867 u32 eth_proto_cap;
868 u32 eth_proto_admin;
869 u32 advertising;
870 bool is_up;
871 int err;
873 speed = cmd->base.speed;
875 ethtool_convert_link_mode_to_legacy_u32(&advertising,
876 cmd->link_modes.advertising);
878 eth_proto_new = cmd->base.autoneg == AUTONEG_ENABLE ?
879 mlxsw_sx_to_ptys_advert_link(advertising) :
880 mlxsw_sx_to_ptys_speed(speed);
882 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
883 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
884 if (err) {
885 netdev_err(dev, "Failed to get proto");
886 return err;
888 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
889 NULL);
891 eth_proto_new = eth_proto_new & eth_proto_cap;
892 if (!eth_proto_new) {
893 netdev_err(dev, "Not supported proto admin requested");
894 return -EINVAL;
896 if (eth_proto_new == eth_proto_admin)
897 return 0;
899 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
900 eth_proto_new);
901 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
902 if (err) {
903 netdev_err(dev, "Failed to set proto admin");
904 return err;
907 err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
908 if (err) {
909 netdev_err(dev, "Failed to get oper status");
910 return err;
912 if (!is_up)
913 return 0;
915 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
916 if (err) {
917 netdev_err(dev, "Failed to set admin status");
918 return err;
921 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
922 if (err) {
923 netdev_err(dev, "Failed to set admin status");
924 return err;
927 return 0;
930 static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
931 .get_drvinfo = mlxsw_sx_port_get_drvinfo,
932 .get_link = ethtool_op_get_link,
933 .get_strings = mlxsw_sx_port_get_strings,
934 .get_ethtool_stats = mlxsw_sx_port_get_stats,
935 .get_sset_count = mlxsw_sx_port_get_sset_count,
936 .get_link_ksettings = mlxsw_sx_port_get_link_ksettings,
937 .set_link_ksettings = mlxsw_sx_port_set_link_ksettings,
940 static int mlxsw_sx_port_attr_get(struct net_device *dev,
941 struct switchdev_attr *attr)
943 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
944 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
946 switch (attr->id) {
947 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
948 attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
949 memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
950 break;
951 default:
952 return -EOPNOTSUPP;
955 return 0;
958 static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
959 .switchdev_port_attr_get = mlxsw_sx_port_attr_get,
962 static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
964 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
965 int err;
967 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
968 if (err)
969 return err;
970 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
971 return 0;
974 static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
976 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
977 struct net_device *dev = mlxsw_sx_port->dev;
978 char ppad_pl[MLXSW_REG_PPAD_LEN];
979 int err;
981 mlxsw_reg_ppad_pack(ppad_pl, false, 0);
982 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
983 if (err)
984 return err;
985 mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
986 /* The last byte value in base mac address is guaranteed
987 * to be such it does not overflow when adding local_port
988 * value.
990 dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
991 return 0;
994 static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
995 u16 vid, enum mlxsw_reg_spms_state state)
997 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
998 char *spms_pl;
999 int err;
1001 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
1002 if (!spms_pl)
1003 return -ENOMEM;
1004 mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port);
1005 mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
1006 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
1007 kfree(spms_pl);
1008 return err;
1011 static int mlxsw_sx_port_ib_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
1012 u16 speed, u16 width)
1014 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
1015 char ptys_pl[MLXSW_REG_PTYS_LEN];
1017 mlxsw_reg_ptys_ib_pack(ptys_pl, mlxsw_sx_port->local_port, speed,
1018 width);
1019 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
1022 static int
1023 mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 width)
1025 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
1026 u32 upper_speed = MLXSW_SX_PORT_BASE_SPEED * width;
1027 char ptys_pl[MLXSW_REG_PTYS_LEN];
1028 u32 eth_proto_admin;
1030 eth_proto_admin = mlxsw_sx_to_ptys_upper_speed(upper_speed);
1031 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
1032 eth_proto_admin);
1033 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
1036 static int
1037 mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
1038 enum mlxsw_reg_spmlr_learn_mode mode)
1040 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
1041 char spmlr_pl[MLXSW_REG_SPMLR_LEN];
1043 mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
1044 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
1047 static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
1048 u8 module, u8 width)
1050 struct mlxsw_sx_port *mlxsw_sx_port;
1051 struct net_device *dev;
1052 int err;
1054 dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
1055 if (!dev)
1056 return -ENOMEM;
1057 SET_NETDEV_DEV(dev, mlxsw_sx->bus_info->dev);
1058 mlxsw_sx_port = netdev_priv(dev);
1059 mlxsw_sx_port->dev = dev;
1060 mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
1061 mlxsw_sx_port->local_port = local_port;
1062 mlxsw_sx_port->mapping.module = module;
1064 mlxsw_sx_port->pcpu_stats =
1065 netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
1066 if (!mlxsw_sx_port->pcpu_stats) {
1067 err = -ENOMEM;
1068 goto err_alloc_stats;
1071 dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
1072 dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
1073 dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
1075 err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
1076 if (err) {
1077 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
1078 mlxsw_sx_port->local_port);
1079 goto err_dev_addr_get;
1082 netif_carrier_off(dev);
1084 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1085 NETIF_F_VLAN_CHALLENGED;
1087 dev->min_mtu = 0;
1088 dev->max_mtu = ETH_MAX_MTU;
1090 /* Each packet needs to have a Tx header (metadata) on top all other
1091 * headers.
1093 dev->needed_headroom = MLXSW_TXHDR_LEN;
1095 err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
1096 if (err) {
1097 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1098 mlxsw_sx_port->local_port);
1099 goto err_port_system_port_mapping_set;
1102 err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
1103 if (err) {
1104 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
1105 mlxsw_sx_port->local_port);
1106 goto err_port_swid_set;
1109 err = mlxsw_sx_port_speed_by_width_set(mlxsw_sx_port, width);
1110 if (err) {
1111 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
1112 mlxsw_sx_port->local_port);
1113 goto err_port_speed_set;
1116 err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, ETH_DATA_LEN);
1117 if (err) {
1118 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
1119 mlxsw_sx_port->local_port);
1120 goto err_port_mtu_set;
1123 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
1124 if (err)
1125 goto err_port_admin_status_set;
1127 err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
1128 MLXSW_PORT_DEFAULT_VID,
1129 MLXSW_REG_SPMS_STATE_FORWARDING);
1130 if (err) {
1131 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
1132 mlxsw_sx_port->local_port);
1133 goto err_port_stp_state_set;
1136 err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
1137 MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
1138 if (err) {
1139 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
1140 mlxsw_sx_port->local_port);
1141 goto err_port_mac_learning_mode_set;
1144 err = register_netdev(dev);
1145 if (err) {
1146 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
1147 mlxsw_sx_port->local_port);
1148 goto err_register_netdev;
1151 mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
1152 mlxsw_sx_port, dev, false, 0);
1153 mlxsw_sx->ports[local_port] = mlxsw_sx_port;
1154 return 0;
1156 err_register_netdev:
1157 err_port_mac_learning_mode_set:
1158 err_port_stp_state_set:
1159 err_port_admin_status_set:
1160 err_port_mtu_set:
1161 err_port_speed_set:
1162 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1163 err_port_swid_set:
1164 err_port_system_port_mapping_set:
1165 err_dev_addr_get:
1166 free_percpu(mlxsw_sx_port->pcpu_stats);
1167 err_alloc_stats:
1168 free_netdev(dev);
1169 return err;
1172 static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
1173 u8 module, u8 width)
1175 int err;
1177 err = mlxsw_core_port_init(mlxsw_sx->core, local_port);
1178 if (err) {
1179 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
1180 local_port);
1181 return err;
1183 err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module, width);
1184 if (err)
1185 goto err_port_create;
1187 return 0;
1189 err_port_create:
1190 mlxsw_core_port_fini(mlxsw_sx->core, local_port);
1191 return err;
1194 static void __mlxsw_sx_port_eth_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1196 struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1198 mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
1199 unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
1200 mlxsw_sx->ports[local_port] = NULL;
1201 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1202 free_percpu(mlxsw_sx_port->pcpu_stats);
1203 free_netdev(mlxsw_sx_port->dev);
1206 static bool mlxsw_sx_port_created(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1208 return mlxsw_sx->ports[local_port] != NULL;
1211 static int __mlxsw_sx_port_ib_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
1212 u8 module, u8 width)
1214 struct mlxsw_sx_port *mlxsw_sx_port;
1215 int err;
1217 mlxsw_sx_port = kzalloc(sizeof(*mlxsw_sx_port), GFP_KERNEL);
1218 if (!mlxsw_sx_port)
1219 return -ENOMEM;
1220 mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
1221 mlxsw_sx_port->local_port = local_port;
1222 mlxsw_sx_port->mapping.module = module;
1224 err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
1225 if (err) {
1226 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1227 mlxsw_sx_port->local_port);
1228 goto err_port_system_port_mapping_set;
1231 /* Adding port to Infiniband swid (1) */
1232 err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 1);
1233 if (err) {
1234 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
1235 mlxsw_sx_port->local_port);
1236 goto err_port_swid_set;
1239 /* Expose the IB port number as it's front panel name */
1240 err = mlxsw_sx_port_ib_port_set(mlxsw_sx_port, module + 1);
1241 if (err) {
1242 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set IB port\n",
1243 mlxsw_sx_port->local_port);
1244 goto err_port_ib_set;
1247 /* Supports all speeds from SDR to FDR (bitmask) and support bus width
1248 * of 1x, 2x and 4x (3 bits bitmask)
1250 err = mlxsw_sx_port_ib_speed_set(mlxsw_sx_port,
1251 MLXSW_REG_PTYS_IB_SPEED_EDR - 1,
1252 BIT(3) - 1);
1253 if (err) {
1254 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
1255 mlxsw_sx_port->local_port);
1256 goto err_port_speed_set;
1259 /* Change to the maximum MTU the device supports, the SMA will take
1260 * care of the active MTU
1262 err = mlxsw_sx_port_mtu_ib_set(mlxsw_sx_port, MLXSW_IB_DEFAULT_MTU);
1263 if (err) {
1264 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
1265 mlxsw_sx_port->local_port);
1266 goto err_port_mtu_set;
1269 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
1270 if (err) {
1271 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to change admin state to UP\n",
1272 mlxsw_sx_port->local_port);
1273 goto err_port_admin_set;
1276 mlxsw_core_port_ib_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
1277 mlxsw_sx_port);
1278 mlxsw_sx->ports[local_port] = mlxsw_sx_port;
1279 return 0;
1281 err_port_admin_set:
1282 err_port_mtu_set:
1283 err_port_speed_set:
1284 err_port_ib_set:
1285 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1286 err_port_swid_set:
1287 err_port_system_port_mapping_set:
1288 kfree(mlxsw_sx_port);
1289 return err;
1292 static void __mlxsw_sx_port_ib_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1294 struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1296 mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
1297 mlxsw_sx->ports[local_port] = NULL;
1298 mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
1299 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1300 kfree(mlxsw_sx_port);
1303 static void __mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1305 enum devlink_port_type port_type =
1306 mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
1308 if (port_type == DEVLINK_PORT_TYPE_ETH)
1309 __mlxsw_sx_port_eth_remove(mlxsw_sx, local_port);
1310 else if (port_type == DEVLINK_PORT_TYPE_IB)
1311 __mlxsw_sx_port_ib_remove(mlxsw_sx, local_port);
1314 static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1316 __mlxsw_sx_port_remove(mlxsw_sx, local_port);
1317 mlxsw_core_port_fini(mlxsw_sx->core, local_port);
1320 static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
1322 int i;
1324 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sx->core); i++)
1325 if (mlxsw_sx_port_created(mlxsw_sx, i))
1326 mlxsw_sx_port_remove(mlxsw_sx, i);
1327 kfree(mlxsw_sx->ports);
1330 static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
1332 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sx->core);
1333 size_t alloc_size;
1334 u8 module, width;
1335 int i;
1336 int err;
1338 alloc_size = sizeof(struct mlxsw_sx_port *) * max_ports;
1339 mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
1340 if (!mlxsw_sx->ports)
1341 return -ENOMEM;
1343 for (i = 1; i < max_ports; i++) {
1344 err = mlxsw_sx_port_module_info_get(mlxsw_sx, i, &module,
1345 &width);
1346 if (err)
1347 goto err_port_module_info_get;
1348 if (!width)
1349 continue;
1350 err = mlxsw_sx_port_eth_create(mlxsw_sx, i, module, width);
1351 if (err)
1352 goto err_port_create;
1354 return 0;
1356 err_port_create:
1357 err_port_module_info_get:
1358 for (i--; i >= 1; i--)
1359 if (mlxsw_sx_port_created(mlxsw_sx, i))
1360 mlxsw_sx_port_remove(mlxsw_sx, i);
1361 kfree(mlxsw_sx->ports);
1362 return err;
1365 static void mlxsw_sx_pude_eth_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
1366 enum mlxsw_reg_pude_oper_status status)
1368 if (status == MLXSW_PORT_OPER_STATUS_UP) {
1369 netdev_info(mlxsw_sx_port->dev, "link up\n");
1370 netif_carrier_on(mlxsw_sx_port->dev);
1371 } else {
1372 netdev_info(mlxsw_sx_port->dev, "link down\n");
1373 netif_carrier_off(mlxsw_sx_port->dev);
1377 static void mlxsw_sx_pude_ib_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
1378 enum mlxsw_reg_pude_oper_status status)
1380 if (status == MLXSW_PORT_OPER_STATUS_UP)
1381 pr_info("ib link for port %d - up\n",
1382 mlxsw_sx_port->mapping.module + 1);
1383 else
1384 pr_info("ib link for port %d - down\n",
1385 mlxsw_sx_port->mapping.module + 1);
1388 static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
1389 char *pude_pl, void *priv)
1391 struct mlxsw_sx *mlxsw_sx = priv;
1392 struct mlxsw_sx_port *mlxsw_sx_port;
1393 enum mlxsw_reg_pude_oper_status status;
1394 enum devlink_port_type port_type;
1395 u8 local_port;
1397 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1398 mlxsw_sx_port = mlxsw_sx->ports[local_port];
1399 if (!mlxsw_sx_port) {
1400 dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
1401 local_port);
1402 return;
1405 status = mlxsw_reg_pude_oper_status_get(pude_pl);
1406 port_type = mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
1407 if (port_type == DEVLINK_PORT_TYPE_ETH)
1408 mlxsw_sx_pude_eth_event_func(mlxsw_sx_port, status);
1409 else if (port_type == DEVLINK_PORT_TYPE_IB)
1410 mlxsw_sx_pude_ib_event_func(mlxsw_sx_port, status);
1413 static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
1414 void *priv)
1416 struct mlxsw_sx *mlxsw_sx = priv;
1417 struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1418 struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
1420 if (unlikely(!mlxsw_sx_port)) {
1421 dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
1422 local_port);
1423 return;
1426 skb->dev = mlxsw_sx_port->dev;
1428 pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
1429 u64_stats_update_begin(&pcpu_stats->syncp);
1430 pcpu_stats->rx_packets++;
1431 pcpu_stats->rx_bytes += skb->len;
1432 u64_stats_update_end(&pcpu_stats->syncp);
1434 skb->protocol = eth_type_trans(skb, skb->dev);
1435 netif_receive_skb(skb);
1438 static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port,
1439 enum devlink_port_type new_type)
1441 struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
1442 u8 module, width;
1443 int err;
1445 if (new_type == DEVLINK_PORT_TYPE_AUTO)
1446 return -EOPNOTSUPP;
1448 __mlxsw_sx_port_remove(mlxsw_sx, local_port);
1449 err = mlxsw_sx_port_module_info_get(mlxsw_sx, local_port, &module,
1450 &width);
1451 if (err)
1452 goto err_port_module_info_get;
1454 if (new_type == DEVLINK_PORT_TYPE_ETH)
1455 err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module,
1456 width);
1457 else if (new_type == DEVLINK_PORT_TYPE_IB)
1458 err = __mlxsw_sx_port_ib_create(mlxsw_sx, local_port, module,
1459 width);
1461 err_port_module_info_get:
1462 return err;
1465 #define MLXSW_SX_RXL(_trap_id) \
1466 MLXSW_RXL(mlxsw_sx_rx_listener_func, _trap_id, TRAP_TO_CPU, \
1467 false, SX2_RX, FORWARD)
1469 static const struct mlxsw_listener mlxsw_sx_listener[] = {
1470 MLXSW_EVENTL(mlxsw_sx_pude_event_func, PUDE, EMAD),
1471 MLXSW_SX_RXL(FDB_MC),
1472 MLXSW_SX_RXL(STP),
1473 MLXSW_SX_RXL(LACP),
1474 MLXSW_SX_RXL(EAPOL),
1475 MLXSW_SX_RXL(LLDP),
1476 MLXSW_SX_RXL(MMRP),
1477 MLXSW_SX_RXL(MVRP),
1478 MLXSW_SX_RXL(RPVST),
1479 MLXSW_SX_RXL(DHCP),
1480 MLXSW_SX_RXL(IGMP_QUERY),
1481 MLXSW_SX_RXL(IGMP_V1_REPORT),
1482 MLXSW_SX_RXL(IGMP_V2_REPORT),
1483 MLXSW_SX_RXL(IGMP_V2_LEAVE),
1484 MLXSW_SX_RXL(IGMP_V3_REPORT),
1487 static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
1489 char htgt_pl[MLXSW_REG_HTGT_LEN];
1490 int i;
1491 int err;
1493 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX,
1494 MLXSW_REG_HTGT_INVALID_POLICER,
1495 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
1496 MLXSW_REG_HTGT_DEFAULT_TC);
1497 mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
1498 MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_RX);
1500 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
1501 if (err)
1502 return err;
1504 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL,
1505 MLXSW_REG_HTGT_INVALID_POLICER,
1506 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
1507 MLXSW_REG_HTGT_DEFAULT_TC);
1508 mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
1509 MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_CTRL);
1511 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
1512 if (err)
1513 return err;
1515 for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
1516 err = mlxsw_core_trap_register(mlxsw_sx->core,
1517 &mlxsw_sx_listener[i],
1518 mlxsw_sx);
1519 if (err)
1520 goto err_listener_register;
1523 return 0;
1525 err_listener_register:
1526 for (i--; i >= 0; i--) {
1527 mlxsw_core_trap_unregister(mlxsw_sx->core,
1528 &mlxsw_sx_listener[i],
1529 mlxsw_sx);
1531 return err;
1534 static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
1536 int i;
1538 for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
1539 mlxsw_core_trap_unregister(mlxsw_sx->core,
1540 &mlxsw_sx_listener[i],
1541 mlxsw_sx);
1545 static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
1547 char sfgc_pl[MLXSW_REG_SFGC_LEN];
1548 char sgcr_pl[MLXSW_REG_SGCR_LEN];
1549 char *sftr_pl;
1550 int err;
1552 /* Configure a flooding table, which includes only CPU port. */
1553 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
1554 if (!sftr_pl)
1555 return -ENOMEM;
1556 mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0,
1557 MLXSW_PORT_CPU_PORT, true);
1558 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
1559 kfree(sftr_pl);
1560 if (err)
1561 return err;
1563 /* Flood different packet types using the flooding table. */
1564 mlxsw_reg_sfgc_pack(sfgc_pl,
1565 MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
1566 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1567 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1569 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1570 if (err)
1571 return err;
1573 mlxsw_reg_sfgc_pack(sfgc_pl,
1574 MLXSW_REG_SFGC_TYPE_BROADCAST,
1575 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1576 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1578 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1579 if (err)
1580 return err;
1582 mlxsw_reg_sfgc_pack(sfgc_pl,
1583 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
1584 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1585 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1587 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1588 if (err)
1589 return err;
1591 mlxsw_reg_sfgc_pack(sfgc_pl,
1592 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
1593 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1594 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1596 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1597 if (err)
1598 return err;
1600 mlxsw_reg_sfgc_pack(sfgc_pl,
1601 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
1602 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1603 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1605 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1606 if (err)
1607 return err;
1609 mlxsw_reg_sgcr_pack(sgcr_pl, true);
1610 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
1613 static int mlxsw_sx_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
1615 char htgt_pl[MLXSW_REG_HTGT_LEN];
1617 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
1618 MLXSW_REG_HTGT_INVALID_POLICER,
1619 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
1620 MLXSW_REG_HTGT_DEFAULT_TC);
1621 mlxsw_reg_htgt_swid_set(htgt_pl, MLXSW_PORT_SWID_ALL_SWIDS);
1622 mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
1623 MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_EMAD);
1624 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
1627 static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
1628 const struct mlxsw_bus_info *mlxsw_bus_info)
1630 struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
1631 int err;
1633 mlxsw_sx->core = mlxsw_core;
1634 mlxsw_sx->bus_info = mlxsw_bus_info;
1636 err = mlxsw_sx_hw_id_get(mlxsw_sx);
1637 if (err) {
1638 dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
1639 return err;
1642 err = mlxsw_sx_ports_create(mlxsw_sx);
1643 if (err) {
1644 dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
1645 return err;
1648 err = mlxsw_sx_traps_init(mlxsw_sx);
1649 if (err) {
1650 dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps\n");
1651 goto err_listener_register;
1654 err = mlxsw_sx_flood_init(mlxsw_sx);
1655 if (err) {
1656 dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
1657 goto err_flood_init;
1660 return 0;
1662 err_flood_init:
1663 mlxsw_sx_traps_fini(mlxsw_sx);
1664 err_listener_register:
1665 mlxsw_sx_ports_remove(mlxsw_sx);
1666 return err;
1669 static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
1671 struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
1673 mlxsw_sx_traps_fini(mlxsw_sx);
1674 mlxsw_sx_ports_remove(mlxsw_sx);
1677 static const struct mlxsw_config_profile mlxsw_sx_config_profile = {
1678 .used_max_vepa_channels = 1,
1679 .max_vepa_channels = 0,
1680 .used_max_mid = 1,
1681 .max_mid = 7000,
1682 .used_max_pgt = 1,
1683 .max_pgt = 0,
1684 .used_max_system_port = 1,
1685 .max_system_port = 48000,
1686 .used_max_vlan_groups = 1,
1687 .max_vlan_groups = 127,
1688 .used_max_regions = 1,
1689 .max_regions = 400,
1690 .used_flood_tables = 1,
1691 .max_flood_tables = 2,
1692 .max_vid_flood_tables = 1,
1693 .used_flood_mode = 1,
1694 .flood_mode = 3,
1695 .used_max_ib_mc = 1,
1696 .max_ib_mc = 6,
1697 .used_max_pkey = 1,
1698 .max_pkey = 0,
1699 .swid_config = {
1701 .used_type = 1,
1702 .type = MLXSW_PORT_SWID_TYPE_ETH,
1705 .used_type = 1,
1706 .type = MLXSW_PORT_SWID_TYPE_IB,
1709 .resource_query_enable = 0,
1712 static struct mlxsw_driver mlxsw_sx_driver = {
1713 .kind = mlxsw_sx_driver_name,
1714 .priv_size = sizeof(struct mlxsw_sx),
1715 .init = mlxsw_sx_init,
1716 .fini = mlxsw_sx_fini,
1717 .basic_trap_groups_set = mlxsw_sx_basic_trap_groups_set,
1718 .txhdr_construct = mlxsw_sx_txhdr_construct,
1719 .txhdr_len = MLXSW_TXHDR_LEN,
1720 .profile = &mlxsw_sx_config_profile,
1721 .port_type_set = mlxsw_sx_port_type_set,
1724 static const struct pci_device_id mlxsw_sx_pci_id_table[] = {
1725 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
1726 {0, },
1729 static struct pci_driver mlxsw_sx_pci_driver = {
1730 .name = mlxsw_sx_driver_name,
1731 .id_table = mlxsw_sx_pci_id_table,
1734 static int __init mlxsw_sx_module_init(void)
1736 int err;
1738 err = mlxsw_core_driver_register(&mlxsw_sx_driver);
1739 if (err)
1740 return err;
1742 err = mlxsw_pci_driver_register(&mlxsw_sx_pci_driver);
1743 if (err)
1744 goto err_pci_driver_register;
1746 return 0;
1748 err_pci_driver_register:
1749 mlxsw_core_driver_unregister(&mlxsw_sx_driver);
1750 return err;
1753 static void __exit mlxsw_sx_module_exit(void)
1755 mlxsw_pci_driver_unregister(&mlxsw_sx_pci_driver);
1756 mlxsw_core_driver_unregister(&mlxsw_sx_driver);
1759 module_init(mlxsw_sx_module_init);
1760 module_exit(mlxsw_sx_module_exit);
1762 MODULE_LICENSE("Dual BSD/GPL");
1763 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1764 MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
1765 MODULE_DEVICE_TABLE(pci, mlxsw_sx_pci_id_table);