2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <net/switchdev.h>
52 #include <generated/utsrelease.h>
61 static const char mlxsw_sp_driver_name
[] = "mlxsw_spectrum";
62 static const char mlxsw_sp_driver_version
[] = "1.0";
68 MLXSW_ITEM32(tx
, hdr
, version
, 0x00, 28, 4);
71 * Packet control type.
72 * 0 - Ethernet control (e.g. EMADs, LACP)
75 MLXSW_ITEM32(tx
, hdr
, ctl
, 0x00, 26, 2);
78 * Packet protocol type. Must be set to 1 (Ethernet).
80 MLXSW_ITEM32(tx
, hdr
, proto
, 0x00, 21, 3);
82 /* tx_hdr_rx_is_router
83 * Packet is sent from the router. Valid for data packets only.
85 MLXSW_ITEM32(tx
, hdr
, rx_is_router
, 0x00, 19, 1);
88 * Indicates if the 'fid' field is valid and should be used for
89 * forwarding lookup. Valid for data packets only.
91 MLXSW_ITEM32(tx
, hdr
, fid_valid
, 0x00, 16, 1);
94 * Switch partition ID. Must be set to 0.
96 MLXSW_ITEM32(tx
, hdr
, swid
, 0x00, 12, 3);
98 /* tx_hdr_control_tclass
99 * Indicates if the packet should use the control TClass and not one
100 * of the data TClasses.
102 MLXSW_ITEM32(tx
, hdr
, control_tclass
, 0x00, 6, 1);
105 * Egress TClass to be used on the egress device on the egress port.
107 MLXSW_ITEM32(tx
, hdr
, etclass
, 0x00, 0, 4);
110 * Destination local port for unicast packets.
111 * Destination multicast ID for multicast packets.
113 * Control packets are directed to a specific egress port, while data
114 * packets are transmitted through the CPU port (0) into the switch partition,
115 * where forwarding rules are applied.
117 MLXSW_ITEM32(tx
, hdr
, port_mid
, 0x04, 16, 16);
120 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
121 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
122 * Valid for data packets only.
124 MLXSW_ITEM32(tx
, hdr
, fid
, 0x08, 0, 16);
128 * 6 - Control packets
130 MLXSW_ITEM32(tx
, hdr
, type
, 0x0C, 0, 4);
132 static void mlxsw_sp_txhdr_construct(struct sk_buff
*skb
,
133 const struct mlxsw_tx_info
*tx_info
)
135 char *txhdr
= skb_push(skb
, MLXSW_TXHDR_LEN
);
137 memset(txhdr
, 0, MLXSW_TXHDR_LEN
);
139 mlxsw_tx_hdr_version_set(txhdr
, MLXSW_TXHDR_VERSION_1
);
140 mlxsw_tx_hdr_ctl_set(txhdr
, MLXSW_TXHDR_ETH_CTL
);
141 mlxsw_tx_hdr_proto_set(txhdr
, MLXSW_TXHDR_PROTO_ETH
);
142 mlxsw_tx_hdr_swid_set(txhdr
, 0);
143 mlxsw_tx_hdr_control_tclass_set(txhdr
, 1);
144 mlxsw_tx_hdr_port_mid_set(txhdr
, tx_info
->local_port
);
145 mlxsw_tx_hdr_type_set(txhdr
, MLXSW_TXHDR_TYPE_CONTROL
);
148 static int mlxsw_sp_base_mac_get(struct mlxsw_sp
*mlxsw_sp
)
150 char spad_pl
[MLXSW_REG_SPAD_LEN
];
153 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(spad
), spad_pl
);
156 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl
, mlxsw_sp
->base_mac
);
160 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
163 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
164 char paos_pl
[MLXSW_REG_PAOS_LEN
];
166 mlxsw_reg_paos_pack(paos_pl
, mlxsw_sp_port
->local_port
,
167 is_up
? MLXSW_PORT_ADMIN_STATUS_UP
:
168 MLXSW_PORT_ADMIN_STATUS_DOWN
);
169 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(paos
), paos_pl
);
172 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port
*mlxsw_sp_port
,
175 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
176 char paos_pl
[MLXSW_REG_PAOS_LEN
];
180 mlxsw_reg_paos_pack(paos_pl
, mlxsw_sp_port
->local_port
, 0);
181 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(paos
), paos_pl
);
184 oper_status
= mlxsw_reg_paos_oper_status_get(paos_pl
);
185 *p_is_up
= oper_status
== MLXSW_PORT_ADMIN_STATUS_UP
? true : false;
189 static int mlxsw_sp_vfid_create(struct mlxsw_sp
*mlxsw_sp
, u16 vfid
)
191 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
194 mlxsw_reg_sfmr_pack(sfmr_pl
, MLXSW_REG_SFMR_OP_CREATE_FID
,
195 MLXSW_SP_VFID_BASE
+ vfid
, 0);
196 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
201 set_bit(vfid
, mlxsw_sp
->active_vfids
);
205 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 vfid
)
207 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
209 clear_bit(vfid
, mlxsw_sp
->active_vfids
);
211 mlxsw_reg_sfmr_pack(sfmr_pl
, MLXSW_REG_SFMR_OP_DESTROY_FID
,
212 MLXSW_SP_VFID_BASE
+ vfid
, 0);
213 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
216 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
219 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
220 char ppad_pl
[MLXSW_REG_PPAD_LEN
];
222 mlxsw_reg_ppad_pack(ppad_pl
, true, mlxsw_sp_port
->local_port
);
223 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl
, addr
);
224 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ppad
), ppad_pl
);
227 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
229 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
230 unsigned char *addr
= mlxsw_sp_port
->dev
->dev_addr
;
232 ether_addr_copy(addr
, mlxsw_sp
->base_mac
);
233 addr
[ETH_ALEN
- 1] += mlxsw_sp_port
->local_port
;
234 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
);
237 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
238 u16 vid
, enum mlxsw_reg_spms_state state
)
240 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
244 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
247 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
248 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, state
);
249 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
254 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 mtu
)
256 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
257 char pmtu_pl
[MLXSW_REG_PMTU_LEN
];
261 mtu
+= MLXSW_TXHDR_LEN
+ ETH_HLEN
;
262 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, 0);
263 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
266 max_mtu
= mlxsw_reg_pmtu_max_mtu_get(pmtu_pl
);
271 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, mtu
);
272 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
275 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 swid
)
277 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
278 char pspa_pl
[MLXSW_REG_PSPA_LEN
];
280 mlxsw_reg_pspa_pack(pspa_pl
, swid
, mlxsw_sp_port
->local_port
);
281 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pspa
), pspa_pl
);
284 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
287 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
288 char svpe_pl
[MLXSW_REG_SVPE_LEN
];
290 mlxsw_reg_svpe_pack(svpe_pl
, mlxsw_sp_port
->local_port
, enable
);
291 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svpe
), svpe_pl
);
294 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
295 enum mlxsw_reg_svfa_mt mt
, bool valid
, u16 fid
,
298 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
299 char svfa_pl
[MLXSW_REG_SVFA_LEN
];
301 mlxsw_reg_svfa_pack(svfa_pl
, mlxsw_sp_port
->local_port
, mt
, valid
,
303 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svfa
), svfa_pl
);
306 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
307 u16 vid
, bool learn_enable
)
309 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
313 spvmlr_pl
= kmalloc(MLXSW_REG_SPVMLR_LEN
, GFP_KERNEL
);
316 mlxsw_reg_spvmlr_pack(spvmlr_pl
, mlxsw_sp_port
->local_port
, vid
, vid
,
318 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvmlr
), spvmlr_pl
);
324 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port
*mlxsw_sp_port
)
326 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
327 char sspr_pl
[MLXSW_REG_SSPR_LEN
];
329 mlxsw_reg_sspr_pack(sspr_pl
, mlxsw_sp_port
->local_port
);
330 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sspr
), sspr_pl
);
333 static int mlxsw_sp_port_module_check(struct mlxsw_sp_port
*mlxsw_sp_port
,
336 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
337 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
340 mlxsw_reg_pmlp_pack(pmlp_pl
, mlxsw_sp_port
->local_port
);
341 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
344 *p_usable
= mlxsw_reg_pmlp_width_get(pmlp_pl
) ? true : false;
348 static int mlxsw_sp_port_open(struct net_device
*dev
)
350 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
353 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
356 netif_start_queue(dev
);
360 static int mlxsw_sp_port_stop(struct net_device
*dev
)
362 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
364 netif_stop_queue(dev
);
365 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
368 static netdev_tx_t
mlxsw_sp_port_xmit(struct sk_buff
*skb
,
369 struct net_device
*dev
)
371 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
372 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
373 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
374 const struct mlxsw_tx_info tx_info
= {
375 .local_port
= mlxsw_sp_port
->local_port
,
381 if (mlxsw_core_skb_transmit_busy(mlxsw_sp
, &tx_info
))
382 return NETDEV_TX_BUSY
;
384 if (unlikely(skb_headroom(skb
) < MLXSW_TXHDR_LEN
)) {
385 struct sk_buff
*skb_orig
= skb
;
387 skb
= skb_realloc_headroom(skb
, MLXSW_TXHDR_LEN
);
389 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
390 dev_kfree_skb_any(skb_orig
);
395 if (eth_skb_pad(skb
)) {
396 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
400 mlxsw_sp_txhdr_construct(skb
, &tx_info
);
402 /* Due to a race we might fail here because of a full queue. In that
403 * unlikely case we simply drop the packet.
405 err
= mlxsw_core_skb_transmit(mlxsw_sp
, skb
, &tx_info
);
408 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
409 u64_stats_update_begin(&pcpu_stats
->syncp
);
410 pcpu_stats
->tx_packets
++;
411 pcpu_stats
->tx_bytes
+= len
;
412 u64_stats_update_end(&pcpu_stats
->syncp
);
414 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
415 dev_kfree_skb_any(skb
);
420 static int mlxsw_sp_port_set_mac_address(struct net_device
*dev
, void *p
)
422 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
423 struct sockaddr
*addr
= p
;
426 if (!is_valid_ether_addr(addr
->sa_data
))
427 return -EADDRNOTAVAIL
;
429 err
= mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
->sa_data
);
432 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
436 static int mlxsw_sp_port_change_mtu(struct net_device
*dev
, int mtu
)
438 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
441 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, mtu
);
448 static struct rtnl_link_stats64
*
449 mlxsw_sp_port_get_stats64(struct net_device
*dev
,
450 struct rtnl_link_stats64
*stats
)
452 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
453 struct mlxsw_sp_port_pcpu_stats
*p
;
454 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
459 for_each_possible_cpu(i
) {
460 p
= per_cpu_ptr(mlxsw_sp_port
->pcpu_stats
, i
);
462 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
463 rx_packets
= p
->rx_packets
;
464 rx_bytes
= p
->rx_bytes
;
465 tx_packets
= p
->tx_packets
;
466 tx_bytes
= p
->tx_bytes
;
467 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
469 stats
->rx_packets
+= rx_packets
;
470 stats
->rx_bytes
+= rx_bytes
;
471 stats
->tx_packets
+= tx_packets
;
472 stats
->tx_bytes
+= tx_bytes
;
473 /* tx_dropped is u32, updated without syncp protection. */
474 tx_dropped
+= p
->tx_dropped
;
476 stats
->tx_dropped
= tx_dropped
;
480 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid_begin
,
481 u16 vid_end
, bool is_member
, bool untagged
)
483 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
487 spvm_pl
= kmalloc(MLXSW_REG_SPVM_LEN
, GFP_KERNEL
);
491 mlxsw_reg_spvm_pack(spvm_pl
, mlxsw_sp_port
->local_port
, vid_begin
,
492 vid_end
, is_member
, untagged
);
493 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvm
), spvm_pl
);
498 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port
*mlxsw_sp_port
)
500 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
501 u16 vid
, last_visited_vid
;
504 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
505 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, true, vid
,
508 last_visited_vid
= vid
;
509 goto err_port_vid_to_fid_set
;
513 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, true);
515 last_visited_vid
= VLAN_N_VID
;
516 goto err_port_vid_to_fid_set
;
521 err_port_vid_to_fid_set
:
522 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, last_visited_vid
)
523 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false, vid
,
528 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port
*mlxsw_sp_port
)
530 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
534 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, false);
538 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
539 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false,
548 int mlxsw_sp_port_add_vid(struct net_device
*dev
, __be16 __always_unused proto
,
551 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
552 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
556 /* VLAN 0 is added to HW filter when device goes up, but it is
557 * reserved in our case, so simply return.
562 if (test_bit(vid
, mlxsw_sp_port
->active_vfids
)) {
563 netdev_warn(dev
, "VID=%d already configured\n", vid
);
567 if (!test_bit(vid
, mlxsw_sp
->active_vfids
)) {
568 err
= mlxsw_sp_vfid_create(mlxsw_sp
, vid
);
570 netdev_err(dev
, "Failed to create vFID=%d\n",
571 MLXSW_SP_VFID_BASE
+ vid
);
575 sftr_pl
= kmalloc(MLXSW_REG_SFTR_LEN
, GFP_KERNEL
);
578 goto err_flood_table_alloc
;
580 mlxsw_reg_sftr_pack(sftr_pl
, 0, vid
,
581 MLXSW_REG_SFGC_TABLE_TYPE_FID
, 0,
582 MLXSW_PORT_CPU_PORT
, true);
583 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
586 netdev_err(dev
, "Failed to configure flood table\n");
587 goto err_flood_table_config
;
591 /* In case we fail in the following steps, we intentionally do not
592 * destroy the associated vFID.
595 /* When adding the first VLAN interface on a bridged port we need to
596 * transition all the active 802.1Q bridge VLANs to use explicit
597 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
599 if (!mlxsw_sp_port
->nr_vfids
) {
600 err
= mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port
);
602 netdev_err(dev
, "Failed to set to Virtual mode\n");
607 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
,
608 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
,
609 true, MLXSW_SP_VFID_BASE
+ vid
, vid
);
611 netdev_err(dev
, "Failed to map {Port, VID=%d} to vFID=%d\n",
612 vid
, MLXSW_SP_VFID_BASE
+ vid
);
613 goto err_port_vid_to_fid_set
;
616 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
, false);
618 netdev_err(dev
, "Failed to disable learning for VID=%d\n", vid
);
619 goto err_port_vid_learning_set
;
622 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, true, false);
624 netdev_err(dev
, "Failed to set VLAN membership for VID=%d\n",
626 goto err_port_add_vid
;
629 err
= mlxsw_sp_port_stp_state_set(mlxsw_sp_port
, vid
,
630 MLXSW_REG_SPMS_STATE_FORWARDING
);
632 netdev_err(dev
, "Failed to set STP state for VID=%d\n", vid
);
633 goto err_port_stp_state_set
;
636 mlxsw_sp_port
->nr_vfids
++;
637 set_bit(vid
, mlxsw_sp_port
->active_vfids
);
641 err_flood_table_config
:
642 err_flood_table_alloc
:
643 mlxsw_sp_vfid_destroy(mlxsw_sp
, vid
);
646 err_port_stp_state_set
:
647 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, false, false);
649 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
, true);
650 err_port_vid_learning_set
:
651 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
,
652 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
, false,
653 MLXSW_SP_VFID_BASE
+ vid
, vid
);
654 err_port_vid_to_fid_set
:
655 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port
);
659 int mlxsw_sp_port_kill_vid(struct net_device
*dev
,
660 __be16 __always_unused proto
, u16 vid
)
662 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
665 /* VLAN 0 is removed from HW filter when device goes down, but
666 * it is reserved in our case, so simply return.
671 if (!test_bit(vid
, mlxsw_sp_port
->active_vfids
)) {
672 netdev_warn(dev
, "VID=%d does not exist\n", vid
);
676 err
= mlxsw_sp_port_stp_state_set(mlxsw_sp_port
, vid
,
677 MLXSW_REG_SPMS_STATE_DISCARDING
);
679 netdev_err(dev
, "Failed to set STP state for VID=%d\n", vid
);
683 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, false, false);
685 netdev_err(dev
, "Failed to set VLAN membership for VID=%d\n",
690 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
, true);
692 netdev_err(dev
, "Failed to enable learning for VID=%d\n", vid
);
696 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
,
697 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
,
698 false, MLXSW_SP_VFID_BASE
+ vid
,
701 netdev_err(dev
, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
702 vid
, MLXSW_SP_VFID_BASE
+ vid
);
706 /* When removing the last VLAN interface on a bridged port we need to
707 * transition all active 802.1Q bridge VLANs to use VID to FID
708 * mappings and set port's mode to VLAN mode.
710 if (mlxsw_sp_port
->nr_vfids
== 1) {
711 err
= mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port
);
713 netdev_err(dev
, "Failed to set to VLAN mode\n");
718 mlxsw_sp_port
->nr_vfids
--;
719 clear_bit(vid
, mlxsw_sp_port
->active_vfids
);
724 static const struct net_device_ops mlxsw_sp_port_netdev_ops
= {
725 .ndo_open
= mlxsw_sp_port_open
,
726 .ndo_stop
= mlxsw_sp_port_stop
,
727 .ndo_start_xmit
= mlxsw_sp_port_xmit
,
728 .ndo_set_mac_address
= mlxsw_sp_port_set_mac_address
,
729 .ndo_change_mtu
= mlxsw_sp_port_change_mtu
,
730 .ndo_get_stats64
= mlxsw_sp_port_get_stats64
,
731 .ndo_vlan_rx_add_vid
= mlxsw_sp_port_add_vid
,
732 .ndo_vlan_rx_kill_vid
= mlxsw_sp_port_kill_vid
,
733 .ndo_fdb_add
= switchdev_port_fdb_add
,
734 .ndo_fdb_del
= switchdev_port_fdb_del
,
735 .ndo_fdb_dump
= switchdev_port_fdb_dump
,
736 .ndo_bridge_setlink
= switchdev_port_bridge_setlink
,
737 .ndo_bridge_getlink
= switchdev_port_bridge_getlink
,
738 .ndo_bridge_dellink
= switchdev_port_bridge_dellink
,
741 static void mlxsw_sp_port_get_drvinfo(struct net_device
*dev
,
742 struct ethtool_drvinfo
*drvinfo
)
744 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
745 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
747 strlcpy(drvinfo
->driver
, mlxsw_sp_driver_name
, sizeof(drvinfo
->driver
));
748 strlcpy(drvinfo
->version
, mlxsw_sp_driver_version
,
749 sizeof(drvinfo
->version
));
750 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
752 mlxsw_sp
->bus_info
->fw_rev
.major
,
753 mlxsw_sp
->bus_info
->fw_rev
.minor
,
754 mlxsw_sp
->bus_info
->fw_rev
.subminor
);
755 strlcpy(drvinfo
->bus_info
, mlxsw_sp
->bus_info
->device_name
,
756 sizeof(drvinfo
->bus_info
));
759 struct mlxsw_sp_port_hw_stats
{
760 char str
[ETH_GSTRING_LEN
];
761 u64 (*getter
)(char *payload
);
764 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats
[] = {
766 .str
= "a_frames_transmitted_ok",
767 .getter
= mlxsw_reg_ppcnt_a_frames_transmitted_ok_get
,
770 .str
= "a_frames_received_ok",
771 .getter
= mlxsw_reg_ppcnt_a_frames_received_ok_get
,
774 .str
= "a_frame_check_sequence_errors",
775 .getter
= mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get
,
778 .str
= "a_alignment_errors",
779 .getter
= mlxsw_reg_ppcnt_a_alignment_errors_get
,
782 .str
= "a_octets_transmitted_ok",
783 .getter
= mlxsw_reg_ppcnt_a_octets_transmitted_ok_get
,
786 .str
= "a_octets_received_ok",
787 .getter
= mlxsw_reg_ppcnt_a_octets_received_ok_get
,
790 .str
= "a_multicast_frames_xmitted_ok",
791 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get
,
794 .str
= "a_broadcast_frames_xmitted_ok",
795 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get
,
798 .str
= "a_multicast_frames_received_ok",
799 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get
,
802 .str
= "a_broadcast_frames_received_ok",
803 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get
,
806 .str
= "a_in_range_length_errors",
807 .getter
= mlxsw_reg_ppcnt_a_in_range_length_errors_get
,
810 .str
= "a_out_of_range_length_field",
811 .getter
= mlxsw_reg_ppcnt_a_out_of_range_length_field_get
,
814 .str
= "a_frame_too_long_errors",
815 .getter
= mlxsw_reg_ppcnt_a_frame_too_long_errors_get
,
818 .str
= "a_symbol_error_during_carrier",
819 .getter
= mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get
,
822 .str
= "a_mac_control_frames_transmitted",
823 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get
,
826 .str
= "a_mac_control_frames_received",
827 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_received_get
,
830 .str
= "a_unsupported_opcodes_received",
831 .getter
= mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get
,
834 .str
= "a_pause_mac_ctrl_frames_received",
835 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get
,
838 .str
= "a_pause_mac_ctrl_frames_xmitted",
839 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get
,
843 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
845 static void mlxsw_sp_port_get_strings(struct net_device
*dev
,
846 u32 stringset
, u8
*data
)
853 for (i
= 0; i
< MLXSW_SP_PORT_HW_STATS_LEN
; i
++) {
854 memcpy(p
, mlxsw_sp_port_hw_stats
[i
].str
,
856 p
+= ETH_GSTRING_LEN
;
862 static void mlxsw_sp_port_get_stats(struct net_device
*dev
,
863 struct ethtool_stats
*stats
, u64
*data
)
865 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
866 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
867 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
871 mlxsw_reg_ppcnt_pack(ppcnt_pl
, mlxsw_sp_port
->local_port
);
872 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ppcnt
), ppcnt_pl
);
873 for (i
= 0; i
< MLXSW_SP_PORT_HW_STATS_LEN
; i
++)
874 data
[i
] = !err
? mlxsw_sp_port_hw_stats
[i
].getter(ppcnt_pl
) : 0;
877 static int mlxsw_sp_port_get_sset_count(struct net_device
*dev
, int sset
)
881 return MLXSW_SP_PORT_HW_STATS_LEN
;
887 struct mlxsw_sp_port_link_mode
{
894 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode
[] = {
896 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T
,
897 .supported
= SUPPORTED_100baseT_Full
,
898 .advertised
= ADVERTISED_100baseT_Full
,
902 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX
,
906 .mask
= MLXSW_REG_PTYS_ETH_SPEED_SGMII
|
907 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
,
908 .supported
= SUPPORTED_1000baseKX_Full
,
909 .advertised
= ADVERTISED_1000baseKX_Full
,
913 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T
,
914 .supported
= SUPPORTED_10000baseT_Full
,
915 .advertised
= ADVERTISED_10000baseT_Full
,
919 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4
|
920 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
,
921 .supported
= SUPPORTED_10000baseKX4_Full
,
922 .advertised
= ADVERTISED_10000baseKX4_Full
,
926 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
927 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
928 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
929 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR
,
930 .supported
= SUPPORTED_10000baseKR_Full
,
931 .advertised
= ADVERTISED_10000baseKR_Full
,
935 .mask
= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2
,
936 .supported
= SUPPORTED_20000baseKR2_Full
,
937 .advertised
= ADVERTISED_20000baseKR2_Full
,
941 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
,
942 .supported
= SUPPORTED_40000baseCR4_Full
,
943 .advertised
= ADVERTISED_40000baseCR4_Full
,
947 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
,
948 .supported
= SUPPORTED_40000baseKR4_Full
,
949 .advertised
= ADVERTISED_40000baseKR4_Full
,
953 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
,
954 .supported
= SUPPORTED_40000baseSR4_Full
,
955 .advertised
= ADVERTISED_40000baseSR4_Full
,
959 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4
,
960 .supported
= SUPPORTED_40000baseLR4_Full
,
961 .advertised
= ADVERTISED_40000baseLR4_Full
,
965 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR
|
966 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR
|
967 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR
,
971 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4
|
972 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2
|
973 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2
,
977 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
978 .supported
= SUPPORTED_56000baseKR4_Full
,
979 .advertised
= ADVERTISED_56000baseKR4_Full
,
983 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
|
984 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
985 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
986 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4
,
991 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
993 static u32
mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto
)
995 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
996 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
997 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
998 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
999 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
1000 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
1001 return SUPPORTED_FIBRE
;
1003 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
1004 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
1005 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
1006 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
1007 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
))
1008 return SUPPORTED_Backplane
;
1012 static u32
mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto
)
1017 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1018 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
)
1019 modes
|= mlxsw_sp_port_link_mode
[i
].supported
;
1024 static u32
mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto
)
1029 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1030 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
)
1031 modes
|= mlxsw_sp_port_link_mode
[i
].advertised
;
1036 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok
, u32 ptys_eth_proto
,
1037 struct ethtool_cmd
*cmd
)
1039 u32 speed
= SPEED_UNKNOWN
;
1040 u8 duplex
= DUPLEX_UNKNOWN
;
1046 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1047 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
) {
1048 speed
= mlxsw_sp_port_link_mode
[i
].speed
;
1049 duplex
= DUPLEX_FULL
;
1054 ethtool_cmd_speed_set(cmd
, speed
);
1055 cmd
->duplex
= duplex
;
1058 static u8
mlxsw_sp_port_connector_port(u32 ptys_eth_proto
)
1060 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
1061 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
1062 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
1063 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
1066 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
1067 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
1068 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
))
1071 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
1072 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
1073 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
1074 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
))
1080 static int mlxsw_sp_port_get_settings(struct net_device
*dev
,
1081 struct ethtool_cmd
*cmd
)
1083 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1084 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1085 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1087 u32 eth_proto_admin
;
1091 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
1092 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1094 netdev_err(dev
, "Failed to get proto");
1097 mlxsw_reg_ptys_unpack(ptys_pl
, ð_proto_cap
,
1098 ð_proto_admin
, ð_proto_oper
);
1100 cmd
->supported
= mlxsw_sp_from_ptys_supported_port(eth_proto_cap
) |
1101 mlxsw_sp_from_ptys_supported_link(eth_proto_cap
) |
1102 SUPPORTED_Pause
| SUPPORTED_Asym_Pause
;
1103 cmd
->advertising
= mlxsw_sp_from_ptys_advert_link(eth_proto_admin
);
1104 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev
),
1105 eth_proto_oper
, cmd
);
1107 eth_proto_oper
= eth_proto_oper
? eth_proto_oper
: eth_proto_cap
;
1108 cmd
->port
= mlxsw_sp_port_connector_port(eth_proto_oper
);
1109 cmd
->lp_advertising
= mlxsw_sp_from_ptys_advert_link(eth_proto_oper
);
1111 cmd
->transceiver
= XCVR_INTERNAL
;
1115 static u32
mlxsw_sp_to_ptys_advert_link(u32 advertising
)
1120 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1121 if (advertising
& mlxsw_sp_port_link_mode
[i
].advertised
)
1122 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
1127 static u32
mlxsw_sp_to_ptys_speed(u32 speed
)
1132 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1133 if (speed
== mlxsw_sp_port_link_mode
[i
].speed
)
1134 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
1139 static int mlxsw_sp_port_set_settings(struct net_device
*dev
,
1140 struct ethtool_cmd
*cmd
)
1142 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1143 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1144 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1148 u32 eth_proto_admin
;
1152 speed
= ethtool_cmd_speed(cmd
);
1154 eth_proto_new
= cmd
->autoneg
== AUTONEG_ENABLE
?
1155 mlxsw_sp_to_ptys_advert_link(cmd
->advertising
) :
1156 mlxsw_sp_to_ptys_speed(speed
);
1158 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
1159 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1161 netdev_err(dev
, "Failed to get proto");
1164 mlxsw_reg_ptys_unpack(ptys_pl
, ð_proto_cap
, ð_proto_admin
, NULL
);
1166 eth_proto_new
= eth_proto_new
& eth_proto_cap
;
1167 if (!eth_proto_new
) {
1168 netdev_err(dev
, "Not supported proto admin requested");
1171 if (eth_proto_new
== eth_proto_admin
)
1174 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sp_port
->local_port
, eth_proto_new
);
1175 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1177 netdev_err(dev
, "Failed to set proto admin");
1181 err
= mlxsw_sp_port_oper_status_get(mlxsw_sp_port
, &is_up
);
1183 netdev_err(dev
, "Failed to get oper status");
1189 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
1191 netdev_err(dev
, "Failed to set admin status");
1195 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
1197 netdev_err(dev
, "Failed to set admin status");
1204 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops
= {
1205 .get_drvinfo
= mlxsw_sp_port_get_drvinfo
,
1206 .get_link
= ethtool_op_get_link
,
1207 .get_strings
= mlxsw_sp_port_get_strings
,
1208 .get_ethtool_stats
= mlxsw_sp_port_get_stats
,
1209 .get_sset_count
= mlxsw_sp_port_get_sset_count
,
1210 .get_settings
= mlxsw_sp_port_get_settings
,
1211 .set_settings
= mlxsw_sp_port_set_settings
,
1214 static int mlxsw_sp_port_create(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
1216 struct mlxsw_sp_port
*mlxsw_sp_port
;
1217 struct net_device
*dev
;
1221 dev
= alloc_etherdev(sizeof(struct mlxsw_sp_port
));
1224 mlxsw_sp_port
= netdev_priv(dev
);
1225 mlxsw_sp_port
->dev
= dev
;
1226 mlxsw_sp_port
->mlxsw_sp
= mlxsw_sp
;
1227 mlxsw_sp_port
->local_port
= local_port
;
1228 mlxsw_sp_port
->learning
= 1;
1229 mlxsw_sp_port
->learning_sync
= 1;
1230 mlxsw_sp_port
->uc_flood
= 1;
1231 mlxsw_sp_port
->pvid
= 1;
1233 mlxsw_sp_port
->pcpu_stats
=
1234 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats
);
1235 if (!mlxsw_sp_port
->pcpu_stats
) {
1237 goto err_alloc_stats
;
1240 dev
->netdev_ops
= &mlxsw_sp_port_netdev_ops
;
1241 dev
->ethtool_ops
= &mlxsw_sp_port_ethtool_ops
;
1243 err
= mlxsw_sp_port_dev_addr_init(mlxsw_sp_port
);
1245 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unable to init port mac address\n",
1246 mlxsw_sp_port
->local_port
);
1247 goto err_dev_addr_init
;
1250 netif_carrier_off(dev
);
1252 dev
->features
|= NETIF_F_NETNS_LOCAL
| NETIF_F_LLTX
| NETIF_F_SG
|
1253 NETIF_F_HW_VLAN_CTAG_FILTER
;
1255 /* Each packet needs to have a Tx header (metadata) on top all other
1258 dev
->hard_header_len
+= MLXSW_TXHDR_LEN
;
1260 err
= mlxsw_sp_port_module_check(mlxsw_sp_port
, &usable
);
1262 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to check module\n",
1263 mlxsw_sp_port
->local_port
);
1264 goto err_port_module_check
;
1268 dev_dbg(mlxsw_sp
->bus_info
->dev
, "Port %d: Not usable, skipping initialization\n",
1269 mlxsw_sp_port
->local_port
);
1270 goto port_not_usable
;
1273 err
= mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port
);
1275 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set system port mapping\n",
1276 mlxsw_sp_port
->local_port
);
1277 goto err_port_system_port_mapping_set
;
1280 err
= mlxsw_sp_port_swid_set(mlxsw_sp_port
, 0);
1282 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set SWID\n",
1283 mlxsw_sp_port
->local_port
);
1284 goto err_port_swid_set
;
1287 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, ETH_DATA_LEN
);
1289 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set MTU\n",
1290 mlxsw_sp_port
->local_port
);
1291 goto err_port_mtu_set
;
1294 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
1296 goto err_port_admin_status_set
;
1298 err
= mlxsw_sp_port_buffers_init(mlxsw_sp_port
);
1300 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize buffers\n",
1301 mlxsw_sp_port
->local_port
);
1302 goto err_port_buffers_init
;
1305 mlxsw_sp_port_switchdev_init(mlxsw_sp_port
);
1306 err
= register_netdev(dev
);
1308 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to register netdev\n",
1309 mlxsw_sp_port
->local_port
);
1310 goto err_register_netdev
;
1313 err
= mlxsw_sp_port_vlan_init(mlxsw_sp_port
);
1315 goto err_port_vlan_init
;
1317 mlxsw_sp
->ports
[local_port
] = mlxsw_sp_port
;
1321 unregister_netdev(dev
);
1322 err_register_netdev
:
1323 err_port_buffers_init
:
1324 err_port_admin_status_set
:
1327 err_port_system_port_mapping_set
:
1329 err_port_module_check
:
1331 free_percpu(mlxsw_sp_port
->pcpu_stats
);
1337 static void mlxsw_sp_vfids_fini(struct mlxsw_sp
*mlxsw_sp
)
1341 for_each_set_bit(vfid
, mlxsw_sp
->active_vfids
, VLAN_N_VID
)
1342 mlxsw_sp_vfid_destroy(mlxsw_sp
, vfid
);
1345 static void mlxsw_sp_port_remove(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
1347 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
1351 mlxsw_sp_port_kill_vid(mlxsw_sp_port
->dev
, 0, 1);
1352 unregister_netdev(mlxsw_sp_port
->dev
); /* This calls ndo_stop */
1353 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port
);
1354 free_percpu(mlxsw_sp_port
->pcpu_stats
);
1355 free_netdev(mlxsw_sp_port
->dev
);
1358 static void mlxsw_sp_ports_remove(struct mlxsw_sp
*mlxsw_sp
)
1362 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++)
1363 mlxsw_sp_port_remove(mlxsw_sp
, i
);
1364 kfree(mlxsw_sp
->ports
);
1367 static int mlxsw_sp_ports_create(struct mlxsw_sp
*mlxsw_sp
)
1373 alloc_size
= sizeof(struct mlxsw_sp_port
*) * MLXSW_PORT_MAX_PORTS
;
1374 mlxsw_sp
->ports
= kzalloc(alloc_size
, GFP_KERNEL
);
1375 if (!mlxsw_sp
->ports
)
1378 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++) {
1379 err
= mlxsw_sp_port_create(mlxsw_sp
, i
);
1381 goto err_port_create
;
1386 for (i
--; i
>= 1; i
--)
1387 mlxsw_sp_port_remove(mlxsw_sp
, i
);
1388 kfree(mlxsw_sp
->ports
);
1392 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info
*reg
,
1393 char *pude_pl
, void *priv
)
1395 struct mlxsw_sp
*mlxsw_sp
= priv
;
1396 struct mlxsw_sp_port
*mlxsw_sp_port
;
1397 enum mlxsw_reg_pude_oper_status status
;
1400 local_port
= mlxsw_reg_pude_local_port_get(pude_pl
);
1401 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
1402 if (!mlxsw_sp_port
) {
1403 dev_warn(mlxsw_sp
->bus_info
->dev
, "Port %d: Link event received for non-existent port\n",
1408 status
= mlxsw_reg_pude_oper_status_get(pude_pl
);
1409 if (status
== MLXSW_PORT_OPER_STATUS_UP
) {
1410 netdev_info(mlxsw_sp_port
->dev
, "link up\n");
1411 netif_carrier_on(mlxsw_sp_port
->dev
);
1413 netdev_info(mlxsw_sp_port
->dev
, "link down\n");
1414 netif_carrier_off(mlxsw_sp_port
->dev
);
1418 static struct mlxsw_event_listener mlxsw_sp_pude_event
= {
1419 .func
= mlxsw_sp_pude_event_func
,
1420 .trap_id
= MLXSW_TRAP_ID_PUDE
,
1423 static int mlxsw_sp_event_register(struct mlxsw_sp
*mlxsw_sp
,
1424 enum mlxsw_event_trap_id trap_id
)
1426 struct mlxsw_event_listener
*el
;
1427 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
1431 case MLXSW_TRAP_ID_PUDE
:
1432 el
= &mlxsw_sp_pude_event
;
1435 err
= mlxsw_core_event_listener_register(mlxsw_sp
->core
, el
, mlxsw_sp
);
1439 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_FORWARD
, trap_id
);
1440 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
1442 goto err_event_trap_set
;
1447 mlxsw_core_event_listener_unregister(mlxsw_sp
->core
, el
, mlxsw_sp
);
1451 static void mlxsw_sp_event_unregister(struct mlxsw_sp
*mlxsw_sp
,
1452 enum mlxsw_event_trap_id trap_id
)
1454 struct mlxsw_event_listener
*el
;
1457 case MLXSW_TRAP_ID_PUDE
:
1458 el
= &mlxsw_sp_pude_event
;
1461 mlxsw_core_event_listener_unregister(mlxsw_sp
->core
, el
, mlxsw_sp
);
1464 static void mlxsw_sp_rx_listener_func(struct sk_buff
*skb
, u8 local_port
,
1467 struct mlxsw_sp
*mlxsw_sp
= priv
;
1468 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
1469 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
1471 if (unlikely(!mlxsw_sp_port
)) {
1472 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: skb received for non-existent port\n",
1477 skb
->dev
= mlxsw_sp_port
->dev
;
1479 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
1480 u64_stats_update_begin(&pcpu_stats
->syncp
);
1481 pcpu_stats
->rx_packets
++;
1482 pcpu_stats
->rx_bytes
+= skb
->len
;
1483 u64_stats_update_end(&pcpu_stats
->syncp
);
1485 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1486 netif_receive_skb(skb
);
1489 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener
[] = {
1491 .func
= mlxsw_sp_rx_listener_func
,
1492 .local_port
= MLXSW_PORT_DONT_CARE
,
1493 .trap_id
= MLXSW_TRAP_ID_FDB_MC
,
1495 /* Traps for specific L2 packet types, not trapped as FDB MC */
1497 .func
= mlxsw_sp_rx_listener_func
,
1498 .local_port
= MLXSW_PORT_DONT_CARE
,
1499 .trap_id
= MLXSW_TRAP_ID_STP
,
1502 .func
= mlxsw_sp_rx_listener_func
,
1503 .local_port
= MLXSW_PORT_DONT_CARE
,
1504 .trap_id
= MLXSW_TRAP_ID_LACP
,
1507 .func
= mlxsw_sp_rx_listener_func
,
1508 .local_port
= MLXSW_PORT_DONT_CARE
,
1509 .trap_id
= MLXSW_TRAP_ID_EAPOL
,
1512 .func
= mlxsw_sp_rx_listener_func
,
1513 .local_port
= MLXSW_PORT_DONT_CARE
,
1514 .trap_id
= MLXSW_TRAP_ID_LLDP
,
1517 .func
= mlxsw_sp_rx_listener_func
,
1518 .local_port
= MLXSW_PORT_DONT_CARE
,
1519 .trap_id
= MLXSW_TRAP_ID_MMRP
,
1522 .func
= mlxsw_sp_rx_listener_func
,
1523 .local_port
= MLXSW_PORT_DONT_CARE
,
1524 .trap_id
= MLXSW_TRAP_ID_MVRP
,
1527 .func
= mlxsw_sp_rx_listener_func
,
1528 .local_port
= MLXSW_PORT_DONT_CARE
,
1529 .trap_id
= MLXSW_TRAP_ID_RPVST
,
1532 .func
= mlxsw_sp_rx_listener_func
,
1533 .local_port
= MLXSW_PORT_DONT_CARE
,
1534 .trap_id
= MLXSW_TRAP_ID_DHCP
,
1537 .func
= mlxsw_sp_rx_listener_func
,
1538 .local_port
= MLXSW_PORT_DONT_CARE
,
1539 .trap_id
= MLXSW_TRAP_ID_IGMP_QUERY
,
1542 .func
= mlxsw_sp_rx_listener_func
,
1543 .local_port
= MLXSW_PORT_DONT_CARE
,
1544 .trap_id
= MLXSW_TRAP_ID_IGMP_V1_REPORT
,
1547 .func
= mlxsw_sp_rx_listener_func
,
1548 .local_port
= MLXSW_PORT_DONT_CARE
,
1549 .trap_id
= MLXSW_TRAP_ID_IGMP_V2_REPORT
,
1552 .func
= mlxsw_sp_rx_listener_func
,
1553 .local_port
= MLXSW_PORT_DONT_CARE
,
1554 .trap_id
= MLXSW_TRAP_ID_IGMP_V2_LEAVE
,
1557 .func
= mlxsw_sp_rx_listener_func
,
1558 .local_port
= MLXSW_PORT_DONT_CARE
,
1559 .trap_id
= MLXSW_TRAP_ID_IGMP_V3_REPORT
,
1563 static int mlxsw_sp_traps_init(struct mlxsw_sp
*mlxsw_sp
)
1565 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
1566 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
1570 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_RX
);
1571 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(htgt
), htgt_pl
);
1575 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_CTRL
);
1576 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(htgt
), htgt_pl
);
1580 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_rx_listener
); i
++) {
1581 err
= mlxsw_core_rx_listener_register(mlxsw_sp
->core
,
1582 &mlxsw_sp_rx_listener
[i
],
1585 goto err_rx_listener_register
;
1587 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU
,
1588 mlxsw_sp_rx_listener
[i
].trap_id
);
1589 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
1591 goto err_rx_trap_set
;
1596 mlxsw_core_rx_listener_unregister(mlxsw_sp
->core
,
1597 &mlxsw_sp_rx_listener
[i
],
1599 err_rx_listener_register
:
1600 for (i
--; i
>= 0; i
--) {
1601 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_FORWARD
,
1602 mlxsw_sp_rx_listener
[i
].trap_id
);
1603 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
1605 mlxsw_core_rx_listener_unregister(mlxsw_sp
->core
,
1606 &mlxsw_sp_rx_listener
[i
],
1612 static void mlxsw_sp_traps_fini(struct mlxsw_sp
*mlxsw_sp
)
1614 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
1617 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_rx_listener
); i
++) {
1618 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_FORWARD
,
1619 mlxsw_sp_rx_listener
[i
].trap_id
);
1620 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
1622 mlxsw_core_rx_listener_unregister(mlxsw_sp
->core
,
1623 &mlxsw_sp_rx_listener
[i
],
1628 static int __mlxsw_sp_flood_init(struct mlxsw_core
*mlxsw_core
,
1629 enum mlxsw_reg_sfgc_type type
,
1630 enum mlxsw_reg_sfgc_bridge_type bridge_type
)
1632 enum mlxsw_flood_table_type table_type
;
1633 enum mlxsw_sp_flood_table flood_table
;
1634 char sfgc_pl
[MLXSW_REG_SFGC_LEN
];
1636 if (bridge_type
== MLXSW_REG_SFGC_BRIDGE_TYPE_VFID
) {
1637 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID
;
1640 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
;
1641 if (type
== MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST
)
1642 flood_table
= MLXSW_SP_FLOOD_TABLE_UC
;
1644 flood_table
= MLXSW_SP_FLOOD_TABLE_BM
;
1647 mlxsw_reg_sfgc_pack(sfgc_pl
, type
, bridge_type
, table_type
,
1649 return mlxsw_reg_write(mlxsw_core
, MLXSW_REG(sfgc
), sfgc_pl
);
1652 static int mlxsw_sp_flood_init(struct mlxsw_sp
*mlxsw_sp
)
1656 /* For non-offloaded netdevs, flood all traffic types to CPU
1659 for (type
= 0; type
< MLXSW_REG_SFGC_TYPE_MAX
; type
++) {
1660 if (type
== MLXSW_REG_SFGC_TYPE_RESERVED
)
1663 err
= __mlxsw_sp_flood_init(mlxsw_sp
->core
, type
,
1664 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID
);
1669 /* For bridged ports, use one flooding table for unknown unicast
1670 * traffic and a second table for unregistered multicast and
1673 for (type
= 0; type
< MLXSW_REG_SFGC_TYPE_MAX
; type
++) {
1674 if (type
== MLXSW_REG_SFGC_TYPE_RESERVED
)
1677 err
= __mlxsw_sp_flood_init(mlxsw_sp
->core
, type
,
1678 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID
);
1686 static int mlxsw_sp_init(void *priv
, struct mlxsw_core
*mlxsw_core
,
1687 const struct mlxsw_bus_info
*mlxsw_bus_info
)
1689 struct mlxsw_sp
*mlxsw_sp
= priv
;
1692 mlxsw_sp
->core
= mlxsw_core
;
1693 mlxsw_sp
->bus_info
= mlxsw_bus_info
;
1695 err
= mlxsw_sp_base_mac_get(mlxsw_sp
);
1697 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to get base mac\n");
1701 err
= mlxsw_sp_ports_create(mlxsw_sp
);
1703 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create ports\n");
1704 goto err_ports_create
;
1707 err
= mlxsw_sp_event_register(mlxsw_sp
, MLXSW_TRAP_ID_PUDE
);
1709 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to register for PUDE events\n");
1710 goto err_event_register
;
1713 err
= mlxsw_sp_traps_init(mlxsw_sp
);
1715 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set traps for RX\n");
1716 goto err_rx_listener_register
;
1719 err
= mlxsw_sp_flood_init(mlxsw_sp
);
1721 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize flood tables\n");
1722 goto err_flood_init
;
1725 err
= mlxsw_sp_buffers_init(mlxsw_sp
);
1727 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize buffers\n");
1728 goto err_buffers_init
;
1731 err
= mlxsw_sp_switchdev_init(mlxsw_sp
);
1733 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize switchdev\n");
1734 goto err_switchdev_init
;
1742 mlxsw_sp_traps_fini(mlxsw_sp
);
1743 err_rx_listener_register
:
1744 mlxsw_sp_event_unregister(mlxsw_sp
, MLXSW_TRAP_ID_PUDE
);
1746 mlxsw_sp_ports_remove(mlxsw_sp
);
1748 mlxsw_sp_vfids_fini(mlxsw_sp
);
1752 static void mlxsw_sp_fini(void *priv
)
1754 struct mlxsw_sp
*mlxsw_sp
= priv
;
1756 mlxsw_sp_switchdev_fini(mlxsw_sp
);
1757 mlxsw_sp_traps_fini(mlxsw_sp
);
1758 mlxsw_sp_event_unregister(mlxsw_sp
, MLXSW_TRAP_ID_PUDE
);
1759 mlxsw_sp_ports_remove(mlxsw_sp
);
1760 mlxsw_sp_vfids_fini(mlxsw_sp
);
1763 static struct mlxsw_config_profile mlxsw_sp_config_profile
= {
1764 .used_max_vepa_channels
= 1,
1765 .max_vepa_channels
= 0,
1768 .used_max_port_per_lag
= 1,
1769 .max_port_per_lag
= 16,
1774 .used_max_system_port
= 1,
1775 .max_system_port
= 64,
1776 .used_max_vlan_groups
= 1,
1777 .max_vlan_groups
= 127,
1778 .used_max_regions
= 1,
1780 .used_flood_tables
= 1,
1781 .used_flood_mode
= 1,
1783 .max_fid_offset_flood_tables
= 2,
1784 .fid_offset_flood_table_size
= VLAN_N_VID
- 1,
1785 .max_fid_flood_tables
= 1,
1786 .fid_flood_table_size
= VLAN_N_VID
,
1787 .used_max_ib_mc
= 1,
1794 .type
= MLXSW_PORT_SWID_TYPE_ETH
,
1799 static struct mlxsw_driver mlxsw_sp_driver
= {
1800 .kind
= MLXSW_DEVICE_KIND_SPECTRUM
,
1801 .owner
= THIS_MODULE
,
1802 .priv_size
= sizeof(struct mlxsw_sp
),
1803 .init
= mlxsw_sp_init
,
1804 .fini
= mlxsw_sp_fini
,
1805 .txhdr_construct
= mlxsw_sp_txhdr_construct
,
1806 .txhdr_len
= MLXSW_TXHDR_LEN
,
1807 .profile
= &mlxsw_sp_config_profile
,
1810 static bool mlxsw_sp_port_dev_check(const struct net_device
*dev
)
1812 return dev
->netdev_ops
== &mlxsw_sp_port_netdev_ops
;
1815 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port
*mlxsw_sp_port
)
1817 struct net_device
*dev
= mlxsw_sp_port
->dev
;
1820 /* When port is not bridged untagged packets are tagged with
1821 * PVID=VID=1, thereby creating an implicit VLAN interface in
1822 * the device. Remove it and let bridge code take care of its
1825 err
= mlxsw_sp_port_kill_vid(dev
, 0, 1);
1827 netdev_err(dev
, "Failed to remove VID 1\n");
1832 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port
*mlxsw_sp_port
)
1834 struct net_device
*dev
= mlxsw_sp_port
->dev
;
1837 /* Add implicit VLAN interface in the device, so that untagged
1838 * packets will be classified to the default vFID.
1840 err
= mlxsw_sp_port_add_vid(dev
, 0, 1);
1842 netdev_err(dev
, "Failed to add VID 1\n");
1847 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp
*mlxsw_sp
,
1848 struct net_device
*br_dev
)
1850 return !mlxsw_sp
->master_bridge
.dev
||
1851 mlxsw_sp
->master_bridge
.dev
== br_dev
;
1854 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp
*mlxsw_sp
,
1855 struct net_device
*br_dev
)
1857 mlxsw_sp
->master_bridge
.dev
= br_dev
;
1858 mlxsw_sp
->master_bridge
.ref_count
++;
1861 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp
*mlxsw_sp
,
1862 struct net_device
*br_dev
)
1864 if (--mlxsw_sp
->master_bridge
.ref_count
== 0)
1865 mlxsw_sp
->master_bridge
.dev
= NULL
;
1868 static int mlxsw_sp_netdevice_event(struct notifier_block
*unused
,
1869 unsigned long event
, void *ptr
)
1871 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1872 struct netdev_notifier_changeupper_info
*info
;
1873 struct mlxsw_sp_port
*mlxsw_sp_port
;
1874 struct net_device
*upper_dev
;
1875 struct mlxsw_sp
*mlxsw_sp
;
1878 if (!mlxsw_sp_port_dev_check(dev
))
1881 mlxsw_sp_port
= netdev_priv(dev
);
1882 mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1886 case NETDEV_PRECHANGEUPPER
:
1887 upper_dev
= info
->upper_dev
;
1888 /* HW limitation forbids to put ports to multiple bridges. */
1889 if (info
->master
&& info
->linking
&&
1890 netif_is_bridge_master(upper_dev
) &&
1891 !mlxsw_sp_master_bridge_check(mlxsw_sp
, upper_dev
))
1894 case NETDEV_CHANGEUPPER
:
1895 upper_dev
= info
->upper_dev
;
1897 netif_is_bridge_master(upper_dev
)) {
1898 if (info
->linking
) {
1899 err
= mlxsw_sp_port_bridge_join(mlxsw_sp_port
);
1901 netdev_err(dev
, "Failed to join bridge\n");
1902 mlxsw_sp_master_bridge_inc(mlxsw_sp
, upper_dev
);
1903 mlxsw_sp_port
->bridged
= 1;
1905 err
= mlxsw_sp_port_bridge_leave(mlxsw_sp_port
);
1907 netdev_err(dev
, "Failed to leave bridge\n");
1908 mlxsw_sp_port
->bridged
= 0;
1909 mlxsw_sp_master_bridge_dec(mlxsw_sp
, upper_dev
);
1918 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly
= {
1919 .notifier_call
= mlxsw_sp_netdevice_event
,
1922 static int __init
mlxsw_sp_module_init(void)
1926 register_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
1927 err
= mlxsw_core_driver_register(&mlxsw_sp_driver
);
1929 goto err_core_driver_register
;
1932 err_core_driver_register
:
1933 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
1937 static void __exit
mlxsw_sp_module_exit(void)
1939 mlxsw_core_driver_unregister(&mlxsw_sp_driver
);
1940 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
1943 module_init(mlxsw_sp_module_init
);
1944 module_exit(mlxsw_sp_module_exit
);
1946 MODULE_LICENSE("Dual BSD/GPL");
1947 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1948 MODULE_DESCRIPTION("Mellanox Spectrum driver");
1949 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM
);