1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <linux/jhash.h>
25 #include <linux/log2.h>
26 #include <net/switchdev.h>
27 #include <net/pkt_cls.h>
28 #include <net/netevent.h>
29 #include <net/addrconf.h>
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_acl_flex_actions.h"
42 #include "spectrum_span.h"
43 #include "spectrum_ptp.h"
44 #include "spectrum_trap.h"
46 #define MLXSW_SP1_FWREV_MAJOR 13
47 #define MLXSW_SP1_FWREV_MINOR 2008
48 #define MLXSW_SP1_FWREV_SUBMINOR 2018
49 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
51 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev
= {
52 .major
= MLXSW_SP1_FWREV_MAJOR
,
53 .minor
= MLXSW_SP1_FWREV_MINOR
,
54 .subminor
= MLXSW_SP1_FWREV_SUBMINOR
,
55 .can_reset_minor
= MLXSW_SP1_FWREV_CAN_RESET_MINOR
,
58 #define MLXSW_SP1_FW_FILENAME \
59 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
60 "." __stringify(MLXSW_SP1_FWREV_MINOR) \
61 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
63 #define MLXSW_SP2_FWREV_MAJOR 29
64 #define MLXSW_SP2_FWREV_MINOR 2008
65 #define MLXSW_SP2_FWREV_SUBMINOR 2018
67 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev
= {
68 .major
= MLXSW_SP2_FWREV_MAJOR
,
69 .minor
= MLXSW_SP2_FWREV_MINOR
,
70 .subminor
= MLXSW_SP2_FWREV_SUBMINOR
,
73 #define MLXSW_SP2_FW_FILENAME \
74 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
75 "." __stringify(MLXSW_SP2_FWREV_MINOR) \
76 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
78 #define MLXSW_SP3_FWREV_MAJOR 30
79 #define MLXSW_SP3_FWREV_MINOR 2008
80 #define MLXSW_SP3_FWREV_SUBMINOR 2018
82 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev
= {
83 .major
= MLXSW_SP3_FWREV_MAJOR
,
84 .minor
= MLXSW_SP3_FWREV_MINOR
,
85 .subminor
= MLXSW_SP3_FWREV_SUBMINOR
,
88 #define MLXSW_SP3_FW_FILENAME \
89 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
90 "." __stringify(MLXSW_SP3_FWREV_MINOR) \
91 "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2"
93 static const char mlxsw_sp1_driver_name
[] = "mlxsw_spectrum";
94 static const char mlxsw_sp2_driver_name
[] = "mlxsw_spectrum2";
95 static const char mlxsw_sp3_driver_name
[] = "mlxsw_spectrum3";
97 static const unsigned char mlxsw_sp1_mac_mask
[ETH_ALEN
] = {
98 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
100 static const unsigned char mlxsw_sp2_mac_mask
[ETH_ALEN
] = {
101 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
108 MLXSW_ITEM32(tx
, hdr
, version
, 0x00, 28, 4);
111 * Packet control type.
112 * 0 - Ethernet control (e.g. EMADs, LACP)
115 MLXSW_ITEM32(tx
, hdr
, ctl
, 0x00, 26, 2);
118 * Packet protocol type. Must be set to 1 (Ethernet).
120 MLXSW_ITEM32(tx
, hdr
, proto
, 0x00, 21, 3);
122 /* tx_hdr_rx_is_router
123 * Packet is sent from the router. Valid for data packets only.
125 MLXSW_ITEM32(tx
, hdr
, rx_is_router
, 0x00, 19, 1);
128 * Indicates if the 'fid' field is valid and should be used for
129 * forwarding lookup. Valid for data packets only.
131 MLXSW_ITEM32(tx
, hdr
, fid_valid
, 0x00, 16, 1);
134 * Switch partition ID. Must be set to 0.
136 MLXSW_ITEM32(tx
, hdr
, swid
, 0x00, 12, 3);
138 /* tx_hdr_control_tclass
139 * Indicates if the packet should use the control TClass and not one
140 * of the data TClasses.
142 MLXSW_ITEM32(tx
, hdr
, control_tclass
, 0x00, 6, 1);
145 * Egress TClass to be used on the egress device on the egress port.
147 MLXSW_ITEM32(tx
, hdr
, etclass
, 0x00, 0, 4);
150 * Destination local port for unicast packets.
151 * Destination multicast ID for multicast packets.
153 * Control packets are directed to a specific egress port, while data
154 * packets are transmitted through the CPU port (0) into the switch partition,
155 * where forwarding rules are applied.
157 MLXSW_ITEM32(tx
, hdr
, port_mid
, 0x04, 16, 16);
160 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
161 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
162 * Valid for data packets only.
164 MLXSW_ITEM32(tx
, hdr
, fid
, 0x08, 0, 16);
168 * 6 - Control packets
170 MLXSW_ITEM32(tx
, hdr
, type
, 0x0C, 0, 4);
172 int mlxsw_sp_flow_counter_get(struct mlxsw_sp
*mlxsw_sp
,
173 unsigned int counter_index
, u64
*packets
,
176 char mgpc_pl
[MLXSW_REG_MGPC_LEN
];
179 mlxsw_reg_mgpc_pack(mgpc_pl
, counter_index
, MLXSW_REG_MGPC_OPCODE_NOP
,
180 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES
);
181 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(mgpc
), mgpc_pl
);
185 *packets
= mlxsw_reg_mgpc_packet_counter_get(mgpc_pl
);
187 *bytes
= mlxsw_reg_mgpc_byte_counter_get(mgpc_pl
);
191 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp
*mlxsw_sp
,
192 unsigned int counter_index
)
194 char mgpc_pl
[MLXSW_REG_MGPC_LEN
];
196 mlxsw_reg_mgpc_pack(mgpc_pl
, counter_index
, MLXSW_REG_MGPC_OPCODE_CLEAR
,
197 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES
);
198 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mgpc
), mgpc_pl
);
201 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp
*mlxsw_sp
,
202 unsigned int *p_counter_index
)
206 err
= mlxsw_sp_counter_alloc(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
210 err
= mlxsw_sp_flow_counter_clear(mlxsw_sp
, *p_counter_index
);
212 goto err_counter_clear
;
216 mlxsw_sp_counter_free(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
221 void mlxsw_sp_flow_counter_free(struct mlxsw_sp
*mlxsw_sp
,
222 unsigned int counter_index
)
224 mlxsw_sp_counter_free(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
228 static void mlxsw_sp_txhdr_construct(struct sk_buff
*skb
,
229 const struct mlxsw_tx_info
*tx_info
)
231 char *txhdr
= skb_push(skb
, MLXSW_TXHDR_LEN
);
233 memset(txhdr
, 0, MLXSW_TXHDR_LEN
);
235 mlxsw_tx_hdr_version_set(txhdr
, MLXSW_TXHDR_VERSION_1
);
236 mlxsw_tx_hdr_ctl_set(txhdr
, MLXSW_TXHDR_ETH_CTL
);
237 mlxsw_tx_hdr_proto_set(txhdr
, MLXSW_TXHDR_PROTO_ETH
);
238 mlxsw_tx_hdr_swid_set(txhdr
, 0);
239 mlxsw_tx_hdr_control_tclass_set(txhdr
, 1);
240 mlxsw_tx_hdr_port_mid_set(txhdr
, tx_info
->local_port
);
241 mlxsw_tx_hdr_type_set(txhdr
, MLXSW_TXHDR_TYPE_CONTROL
);
244 enum mlxsw_reg_spms_state
mlxsw_sp_stp_spms_state(u8 state
)
247 case BR_STATE_FORWARDING
:
248 return MLXSW_REG_SPMS_STATE_FORWARDING
;
249 case BR_STATE_LEARNING
:
250 return MLXSW_REG_SPMS_STATE_LEARNING
;
251 case BR_STATE_LISTENING
:
252 case BR_STATE_DISABLED
:
253 case BR_STATE_BLOCKING
:
254 return MLXSW_REG_SPMS_STATE_DISCARDING
;
260 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
,
263 enum mlxsw_reg_spms_state spms_state
= mlxsw_sp_stp_spms_state(state
);
264 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
268 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
271 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
272 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
274 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
279 static int mlxsw_sp_base_mac_get(struct mlxsw_sp
*mlxsw_sp
)
281 char spad_pl
[MLXSW_REG_SPAD_LEN
] = {0};
284 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(spad
), spad_pl
);
287 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl
, mlxsw_sp
->base_mac
);
291 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
294 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
295 char paos_pl
[MLXSW_REG_PAOS_LEN
];
297 mlxsw_reg_paos_pack(paos_pl
, mlxsw_sp_port
->local_port
,
298 is_up
? MLXSW_PORT_ADMIN_STATUS_UP
:
299 MLXSW_PORT_ADMIN_STATUS_DOWN
);
300 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(paos
), paos_pl
);
303 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
306 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
307 char ppad_pl
[MLXSW_REG_PPAD_LEN
];
309 mlxsw_reg_ppad_pack(ppad_pl
, true, mlxsw_sp_port
->local_port
);
310 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl
, addr
);
311 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ppad
), ppad_pl
);
314 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
316 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
317 unsigned char *addr
= mlxsw_sp_port
->dev
->dev_addr
;
319 ether_addr_copy(addr
, mlxsw_sp
->base_mac
);
320 addr
[ETH_ALEN
- 1] += mlxsw_sp_port
->local_port
;
321 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
);
324 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port
*mlxsw_sp_port
, int *p_max_mtu
)
326 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
327 char pmtu_pl
[MLXSW_REG_PMTU_LEN
];
330 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, 0);
331 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
335 *p_max_mtu
= mlxsw_reg_pmtu_max_mtu_get(pmtu_pl
);
339 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 mtu
)
341 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
342 char pmtu_pl
[MLXSW_REG_PMTU_LEN
];
344 mtu
+= MLXSW_TXHDR_LEN
+ ETH_HLEN
;
345 if (mtu
> mlxsw_sp_port
->max_mtu
)
348 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, mtu
);
349 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
352 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 swid
)
354 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
355 char pspa_pl
[MLXSW_REG_PSPA_LEN
];
357 mlxsw_reg_pspa_pack(pspa_pl
, swid
, mlxsw_sp_port
->local_port
);
358 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pspa
), pspa_pl
);
361 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port
*mlxsw_sp_port
, bool enable
)
363 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
364 char svpe_pl
[MLXSW_REG_SVPE_LEN
];
366 mlxsw_reg_svpe_pack(svpe_pl
, mlxsw_sp_port
->local_port
, enable
);
367 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svpe
), svpe_pl
);
370 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
,
373 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
377 spvmlr_pl
= kmalloc(MLXSW_REG_SPVMLR_LEN
, GFP_KERNEL
);
380 mlxsw_reg_spvmlr_pack(spvmlr_pl
, mlxsw_sp_port
->local_port
, vid
, vid
,
382 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvmlr
), spvmlr_pl
);
387 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype
, u8
*p_sver_type
)
403 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
404 u16 vid
, u16 ethtype
)
406 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
407 char spvid_pl
[MLXSW_REG_SPVID_LEN
];
411 err
= mlxsw_sp_ethtype_to_sver_type(ethtype
, &sver_type
);
415 mlxsw_reg_spvid_pack(spvid_pl
, mlxsw_sp_port
->local_port
, vid
,
418 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvid
), spvid_pl
);
421 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
424 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
425 char spaft_pl
[MLXSW_REG_SPAFT_LEN
];
427 mlxsw_reg_spaft_pack(spaft_pl
, mlxsw_sp_port
->local_port
, allow
);
428 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spaft
), spaft_pl
);
431 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
,
437 err
= mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port
, false);
441 err
= __mlxsw_sp_port_pvid_set(mlxsw_sp_port
, vid
, ethtype
);
444 err
= mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port
, true);
446 goto err_port_allow_untagged_set
;
449 mlxsw_sp_port
->pvid
= vid
;
452 err_port_allow_untagged_set
:
453 __mlxsw_sp_port_pvid_set(mlxsw_sp_port
, mlxsw_sp_port
->pvid
, ethtype
);
458 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port
*mlxsw_sp_port
)
460 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
461 char sspr_pl
[MLXSW_REG_SSPR_LEN
];
463 mlxsw_reg_sspr_pack(sspr_pl
, mlxsw_sp_port
->local_port
);
464 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sspr
), sspr_pl
);
468 mlxsw_sp_port_module_info_get(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
469 struct mlxsw_sp_port_mapping
*port_mapping
)
471 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
478 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
479 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
482 module
= mlxsw_reg_pmlp_module_get(pmlp_pl
, 0);
483 width
= mlxsw_reg_pmlp_width_get(pmlp_pl
);
484 separate_rxtx
= mlxsw_reg_pmlp_rxtx_get(pmlp_pl
);
486 if (width
&& !is_power_of_2(width
)) {
487 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unsupported module config: width value is not power of 2\n",
492 for (i
= 0; i
< width
; i
++) {
493 if (mlxsw_reg_pmlp_module_get(pmlp_pl
, i
) != module
) {
494 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unsupported module config: contains multiple modules\n",
499 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl
, i
) !=
500 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl
, i
)) {
501 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
505 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl
, i
) != i
) {
506 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
512 port_mapping
->module
= module
;
513 port_mapping
->width
= width
;
514 port_mapping
->lane
= mlxsw_reg_pmlp_tx_lane_get(pmlp_pl
, 0);
518 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port
*mlxsw_sp_port
)
520 struct mlxsw_sp_port_mapping
*port_mapping
= &mlxsw_sp_port
->mapping
;
521 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
522 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
525 mlxsw_reg_pmlp_pack(pmlp_pl
, mlxsw_sp_port
->local_port
);
526 mlxsw_reg_pmlp_width_set(pmlp_pl
, port_mapping
->width
);
527 for (i
= 0; i
< port_mapping
->width
; i
++) {
528 mlxsw_reg_pmlp_module_set(pmlp_pl
, i
, port_mapping
->module
);
529 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl
, i
, port_mapping
->lane
+ i
); /* Rx & Tx */
532 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
535 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port
*mlxsw_sp_port
)
537 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
538 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
540 mlxsw_reg_pmlp_pack(pmlp_pl
, mlxsw_sp_port
->local_port
);
541 mlxsw_reg_pmlp_width_set(pmlp_pl
, 0);
542 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
545 static int mlxsw_sp_port_open(struct net_device
*dev
)
547 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
550 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
553 netif_start_queue(dev
);
557 static int mlxsw_sp_port_stop(struct net_device
*dev
)
559 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
561 netif_stop_queue(dev
);
562 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
565 static netdev_tx_t
mlxsw_sp_port_xmit(struct sk_buff
*skb
,
566 struct net_device
*dev
)
568 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
569 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
570 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
571 const struct mlxsw_tx_info tx_info
= {
572 .local_port
= mlxsw_sp_port
->local_port
,
578 if (skb_cow_head(skb
, MLXSW_TXHDR_LEN
)) {
579 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
580 dev_kfree_skb_any(skb
);
584 memset(skb
->cb
, 0, sizeof(struct mlxsw_skb_cb
));
586 if (mlxsw_core_skb_transmit_busy(mlxsw_sp
->core
, &tx_info
))
587 return NETDEV_TX_BUSY
;
589 if (eth_skb_pad(skb
)) {
590 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
594 mlxsw_sp_txhdr_construct(skb
, &tx_info
);
595 /* TX header is consumed by HW on the way so we shouldn't count its
596 * bytes as being sent.
598 len
= skb
->len
- MLXSW_TXHDR_LEN
;
600 /* Due to a race we might fail here because of a full queue. In that
601 * unlikely case we simply drop the packet.
603 err
= mlxsw_core_skb_transmit(mlxsw_sp
->core
, skb
, &tx_info
);
606 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
607 u64_stats_update_begin(&pcpu_stats
->syncp
);
608 pcpu_stats
->tx_packets
++;
609 pcpu_stats
->tx_bytes
+= len
;
610 u64_stats_update_end(&pcpu_stats
->syncp
);
612 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
613 dev_kfree_skb_any(skb
);
618 static void mlxsw_sp_set_rx_mode(struct net_device
*dev
)
622 static int mlxsw_sp_port_set_mac_address(struct net_device
*dev
, void *p
)
624 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
625 struct sockaddr
*addr
= p
;
628 if (!is_valid_ether_addr(addr
->sa_data
))
629 return -EADDRNOTAVAIL
;
631 err
= mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
->sa_data
);
634 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
638 static int mlxsw_sp_port_change_mtu(struct net_device
*dev
, int mtu
)
640 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
641 struct mlxsw_sp_hdroom orig_hdroom
;
642 struct mlxsw_sp_hdroom hdroom
;
645 orig_hdroom
= *mlxsw_sp_port
->hdroom
;
647 hdroom
= orig_hdroom
;
649 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port
, &hdroom
);
651 err
= mlxsw_sp_hdroom_configure(mlxsw_sp_port
, &hdroom
);
653 netdev_err(dev
, "Failed to configure port's headroom\n");
657 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, mtu
);
659 goto err_port_mtu_set
;
664 mlxsw_sp_hdroom_configure(mlxsw_sp_port
, &orig_hdroom
);
669 mlxsw_sp_port_get_sw_stats64(const struct net_device
*dev
,
670 struct rtnl_link_stats64
*stats
)
672 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
673 struct mlxsw_sp_port_pcpu_stats
*p
;
674 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
679 for_each_possible_cpu(i
) {
680 p
= per_cpu_ptr(mlxsw_sp_port
->pcpu_stats
, i
);
682 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
683 rx_packets
= p
->rx_packets
;
684 rx_bytes
= p
->rx_bytes
;
685 tx_packets
= p
->tx_packets
;
686 tx_bytes
= p
->tx_bytes
;
687 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
689 stats
->rx_packets
+= rx_packets
;
690 stats
->rx_bytes
+= rx_bytes
;
691 stats
->tx_packets
+= tx_packets
;
692 stats
->tx_bytes
+= tx_bytes
;
693 /* tx_dropped is u32, updated without syncp protection. */
694 tx_dropped
+= p
->tx_dropped
;
696 stats
->tx_dropped
= tx_dropped
;
700 static bool mlxsw_sp_port_has_offload_stats(const struct net_device
*dev
, int attr_id
)
703 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
710 static int mlxsw_sp_port_get_offload_stats(int attr_id
, const struct net_device
*dev
,
714 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
715 return mlxsw_sp_port_get_sw_stats64(dev
, sp
);
721 int mlxsw_sp_port_get_stats_raw(struct net_device
*dev
, int grp
,
722 int prio
, char *ppcnt_pl
)
724 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
725 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
727 mlxsw_reg_ppcnt_pack(ppcnt_pl
, mlxsw_sp_port
->local_port
, grp
, prio
);
728 return mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ppcnt
), ppcnt_pl
);
731 static int mlxsw_sp_port_get_hw_stats(struct net_device
*dev
,
732 struct rtnl_link_stats64
*stats
)
734 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
737 err
= mlxsw_sp_port_get_stats_raw(dev
, MLXSW_REG_PPCNT_IEEE_8023_CNT
,
743 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl
);
745 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl
);
747 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl
);
749 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl
);
751 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl
);
753 stats
->rx_crc_errors
=
754 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl
);
755 stats
->rx_frame_errors
=
756 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl
);
758 stats
->rx_length_errors
= (
759 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl
) +
760 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl
) +
761 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl
));
763 stats
->rx_errors
= (stats
->rx_crc_errors
+
764 stats
->rx_frame_errors
+ stats
->rx_length_errors
);
771 mlxsw_sp_port_get_hw_xstats(struct net_device
*dev
,
772 struct mlxsw_sp_port_xstats
*xstats
)
774 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
777 err
= mlxsw_sp_port_get_stats_raw(dev
, MLXSW_REG_PPCNT_EXT_CNT
, 0,
780 xstats
->ecn
= mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl
);
782 for (i
= 0; i
< TC_MAX_QUEUE
; i
++) {
783 err
= mlxsw_sp_port_get_stats_raw(dev
,
784 MLXSW_REG_PPCNT_TC_CONG_TC
,
787 xstats
->wred_drop
[i
] =
788 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl
);
790 err
= mlxsw_sp_port_get_stats_raw(dev
, MLXSW_REG_PPCNT_TC_CNT
,
796 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl
);
797 xstats
->tail_drop
[i
] =
798 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl
);
801 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
802 err
= mlxsw_sp_port_get_stats_raw(dev
, MLXSW_REG_PPCNT_PRIO_CNT
,
807 xstats
->tx_packets
[i
] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl
);
808 xstats
->tx_bytes
[i
] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl
);
812 static void update_stats_cache(struct work_struct
*work
)
814 struct mlxsw_sp_port
*mlxsw_sp_port
=
815 container_of(work
, struct mlxsw_sp_port
,
816 periodic_hw_stats
.update_dw
.work
);
818 if (!netif_carrier_ok(mlxsw_sp_port
->dev
))
819 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
820 * necessary when port goes down.
824 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port
->dev
,
825 &mlxsw_sp_port
->periodic_hw_stats
.stats
);
826 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port
->dev
,
827 &mlxsw_sp_port
->periodic_hw_stats
.xstats
);
830 mlxsw_core_schedule_dw(&mlxsw_sp_port
->periodic_hw_stats
.update_dw
,
831 MLXSW_HW_STATS_UPDATE_TIME
);
834 /* Return the stats from a cache that is updated periodically,
835 * as this function might get called in an atomic context.
838 mlxsw_sp_port_get_stats64(struct net_device
*dev
,
839 struct rtnl_link_stats64
*stats
)
841 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
843 memcpy(stats
, &mlxsw_sp_port
->periodic_hw_stats
.stats
, sizeof(*stats
));
846 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
847 u16 vid_begin
, u16 vid_end
,
848 bool is_member
, bool untagged
)
850 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
854 spvm_pl
= kmalloc(MLXSW_REG_SPVM_LEN
, GFP_KERNEL
);
858 mlxsw_reg_spvm_pack(spvm_pl
, mlxsw_sp_port
->local_port
, vid_begin
,
859 vid_end
, is_member
, untagged
);
860 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvm
), spvm_pl
);
865 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid_begin
,
866 u16 vid_end
, bool is_member
, bool untagged
)
871 for (vid
= vid_begin
; vid
<= vid_end
;
872 vid
+= MLXSW_REG_SPVM_REC_MAX_COUNT
) {
873 vid_e
= min((u16
) (vid
+ MLXSW_REG_SPVM_REC_MAX_COUNT
- 1),
876 err
= __mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid_e
,
877 is_member
, untagged
);
885 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port
*mlxsw_sp_port
,
888 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
, *tmp
;
890 list_for_each_entry_safe(mlxsw_sp_port_vlan
, tmp
,
891 &mlxsw_sp_port
->vlans_list
, list
) {
892 if (!flush_default
&&
893 mlxsw_sp_port_vlan
->vid
== MLXSW_SP_DEFAULT_VID
)
895 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan
);
900 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
)
902 if (mlxsw_sp_port_vlan
->bridge_port
)
903 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan
);
904 else if (mlxsw_sp_port_vlan
->fid
)
905 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan
);
908 struct mlxsw_sp_port_vlan
*
909 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
911 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
912 bool untagged
= vid
== MLXSW_SP_DEFAULT_VID
;
915 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port
, vid
);
916 if (mlxsw_sp_port_vlan
)
917 return ERR_PTR(-EEXIST
);
919 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, true, untagged
);
923 mlxsw_sp_port_vlan
= kzalloc(sizeof(*mlxsw_sp_port_vlan
), GFP_KERNEL
);
924 if (!mlxsw_sp_port_vlan
) {
926 goto err_port_vlan_alloc
;
929 mlxsw_sp_port_vlan
->mlxsw_sp_port
= mlxsw_sp_port
;
930 mlxsw_sp_port_vlan
->vid
= vid
;
931 list_add(&mlxsw_sp_port_vlan
->list
, &mlxsw_sp_port
->vlans_list
);
933 return mlxsw_sp_port_vlan
;
936 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, false, false);
940 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
)
942 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp_port_vlan
->mlxsw_sp_port
;
943 u16 vid
= mlxsw_sp_port_vlan
->vid
;
945 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan
);
946 list_del(&mlxsw_sp_port_vlan
->list
);
947 kfree(mlxsw_sp_port_vlan
);
948 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, false, false);
951 static int mlxsw_sp_port_add_vid(struct net_device
*dev
,
952 __be16 __always_unused proto
, u16 vid
)
954 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
956 /* VLAN 0 is added to HW filter when device goes up, but it is
957 * reserved in our case, so simply return.
962 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port
, vid
));
965 static int mlxsw_sp_port_kill_vid(struct net_device
*dev
,
966 __be16 __always_unused proto
, u16 vid
)
968 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
969 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
971 /* VLAN 0 is removed from HW filter when device goes down, but
972 * it is reserved in our case, so simply return.
977 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port
, vid
);
978 if (!mlxsw_sp_port_vlan
)
980 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan
);
985 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port
*mlxsw_sp_port
,
986 struct flow_block_offload
*f
)
988 switch (f
->binder_type
) {
989 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS
:
990 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port
, f
, true);
991 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS
:
992 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port
, f
, false);
993 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP
:
994 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port
, f
);
1000 static int mlxsw_sp_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1003 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1006 case TC_SETUP_BLOCK
:
1007 return mlxsw_sp_setup_tc_block(mlxsw_sp_port
, type_data
);
1008 case TC_SETUP_QDISC_RED
:
1009 return mlxsw_sp_setup_tc_red(mlxsw_sp_port
, type_data
);
1010 case TC_SETUP_QDISC_PRIO
:
1011 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port
, type_data
);
1012 case TC_SETUP_QDISC_ETS
:
1013 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port
, type_data
);
1014 case TC_SETUP_QDISC_TBF
:
1015 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port
, type_data
);
1016 case TC_SETUP_QDISC_FIFO
:
1017 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port
, type_data
);
1023 static int mlxsw_sp_feature_hw_tc(struct net_device
*dev
, bool enable
)
1025 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1028 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port
->ing_flow_block
) ||
1029 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port
->eg_flow_block
)) {
1030 netdev_err(dev
, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1033 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port
->ing_flow_block
);
1034 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port
->eg_flow_block
);
1036 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port
->ing_flow_block
);
1037 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port
->eg_flow_block
);
1042 static int mlxsw_sp_feature_loopback(struct net_device
*dev
, bool enable
)
1044 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1045 char pplr_pl
[MLXSW_REG_PPLR_LEN
];
1048 if (netif_running(dev
))
1049 mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
1051 mlxsw_reg_pplr_pack(pplr_pl
, mlxsw_sp_port
->local_port
, enable
);
1052 err
= mlxsw_reg_write(mlxsw_sp_port
->mlxsw_sp
->core
, MLXSW_REG(pplr
),
1055 if (netif_running(dev
))
1056 mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
1061 typedef int (*mlxsw_sp_feature_handler
)(struct net_device
*dev
, bool enable
);
1063 static int mlxsw_sp_handle_feature(struct net_device
*dev
,
1064 netdev_features_t wanted_features
,
1065 netdev_features_t feature
,
1066 mlxsw_sp_feature_handler feature_handler
)
1068 netdev_features_t changes
= wanted_features
^ dev
->features
;
1069 bool enable
= !!(wanted_features
& feature
);
1072 if (!(changes
& feature
))
1075 err
= feature_handler(dev
, enable
);
1077 netdev_err(dev
, "%s feature %pNF failed, err %d\n",
1078 enable
? "Enable" : "Disable", &feature
, err
);
1083 dev
->features
|= feature
;
1085 dev
->features
&= ~feature
;
1089 static int mlxsw_sp_set_features(struct net_device
*dev
,
1090 netdev_features_t features
)
1092 netdev_features_t oper_features
= dev
->features
;
1095 err
|= mlxsw_sp_handle_feature(dev
, features
, NETIF_F_HW_TC
,
1096 mlxsw_sp_feature_hw_tc
);
1097 err
|= mlxsw_sp_handle_feature(dev
, features
, NETIF_F_LOOPBACK
,
1098 mlxsw_sp_feature_loopback
);
1101 dev
->features
= oper_features
;
1108 static struct devlink_port
*
1109 mlxsw_sp_port_get_devlink_port(struct net_device
*dev
)
1111 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1112 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1114 return mlxsw_core_port_devlink_port_get(mlxsw_sp
->core
,
1115 mlxsw_sp_port
->local_port
);
1118 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1121 struct hwtstamp_config config
;
1124 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
1127 err
= mlxsw_sp_port
->mlxsw_sp
->ptp_ops
->hwtstamp_set(mlxsw_sp_port
,
1132 if (copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)))
1138 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port
*mlxsw_sp_port
,
1141 struct hwtstamp_config config
;
1144 err
= mlxsw_sp_port
->mlxsw_sp
->ptp_ops
->hwtstamp_get(mlxsw_sp_port
,
1149 if (copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)))
1155 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port
*mlxsw_sp_port
)
1157 struct hwtstamp_config config
= {0};
1159 mlxsw_sp_port
->mlxsw_sp
->ptp_ops
->hwtstamp_set(mlxsw_sp_port
, &config
);
1163 mlxsw_sp_port_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1165 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1169 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port
, ifr
);
1171 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port
, ifr
);
1177 static const struct net_device_ops mlxsw_sp_port_netdev_ops
= {
1178 .ndo_open
= mlxsw_sp_port_open
,
1179 .ndo_stop
= mlxsw_sp_port_stop
,
1180 .ndo_start_xmit
= mlxsw_sp_port_xmit
,
1181 .ndo_setup_tc
= mlxsw_sp_setup_tc
,
1182 .ndo_set_rx_mode
= mlxsw_sp_set_rx_mode
,
1183 .ndo_set_mac_address
= mlxsw_sp_port_set_mac_address
,
1184 .ndo_change_mtu
= mlxsw_sp_port_change_mtu
,
1185 .ndo_get_stats64
= mlxsw_sp_port_get_stats64
,
1186 .ndo_has_offload_stats
= mlxsw_sp_port_has_offload_stats
,
1187 .ndo_get_offload_stats
= mlxsw_sp_port_get_offload_stats
,
1188 .ndo_vlan_rx_add_vid
= mlxsw_sp_port_add_vid
,
1189 .ndo_vlan_rx_kill_vid
= mlxsw_sp_port_kill_vid
,
1190 .ndo_set_features
= mlxsw_sp_set_features
,
1191 .ndo_get_devlink_port
= mlxsw_sp_port_get_devlink_port
,
1192 .ndo_do_ioctl
= mlxsw_sp_port_ioctl
,
1196 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port
*mlxsw_sp_port
)
1198 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1199 u32 eth_proto_cap
, eth_proto_admin
, eth_proto_oper
;
1200 const struct mlxsw_sp_port_type_speed_ops
*ops
;
1201 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1202 u32 eth_proto_cap_masked
;
1205 ops
= mlxsw_sp
->port_type_speed_ops
;
1207 /* Set advertised speeds to speeds supported by both the driver
1210 ops
->reg_ptys_eth_pack(mlxsw_sp
, ptys_pl
, mlxsw_sp_port
->local_port
,
1212 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1216 ops
->reg_ptys_eth_unpack(mlxsw_sp
, ptys_pl
, ð_proto_cap
,
1217 ð_proto_admin
, ð_proto_oper
);
1218 eth_proto_cap_masked
= ops
->ptys_proto_cap_masked_get(eth_proto_cap
);
1219 ops
->reg_ptys_eth_pack(mlxsw_sp
, ptys_pl
, mlxsw_sp_port
->local_port
,
1220 eth_proto_cap_masked
,
1221 mlxsw_sp_port
->link
.autoneg
);
1222 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1225 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port
*mlxsw_sp_port
, u32
*speed
)
1227 const struct mlxsw_sp_port_type_speed_ops
*port_type_speed_ops
;
1228 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1229 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1233 port_type_speed_ops
= mlxsw_sp
->port_type_speed_ops
;
1234 port_type_speed_ops
->reg_ptys_eth_pack(mlxsw_sp
, ptys_pl
,
1235 mlxsw_sp_port
->local_port
, 0,
1237 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1240 port_type_speed_ops
->reg_ptys_eth_unpack(mlxsw_sp
, ptys_pl
, NULL
, NULL
,
1242 *speed
= port_type_speed_ops
->from_ptys_speed(mlxsw_sp
, eth_proto_oper
);
1246 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1247 enum mlxsw_reg_qeec_hr hr
, u8 index
, u8 next_index
,
1248 bool dwrr
, u8 dwrr_weight
)
1250 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1251 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
1253 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
1255 mlxsw_reg_qeec_de_set(qeec_pl
, true);
1256 mlxsw_reg_qeec_dwrr_set(qeec_pl
, dwrr
);
1257 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl
, dwrr_weight
);
1258 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
1261 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1262 enum mlxsw_reg_qeec_hr hr
, u8 index
,
1263 u8 next_index
, u32 maxrate
, u8 burst_size
)
1265 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1266 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
1268 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
1270 mlxsw_reg_qeec_mase_set(qeec_pl
, true);
1271 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl
, maxrate
);
1272 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl
, burst_size
);
1273 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
1276 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1277 enum mlxsw_reg_qeec_hr hr
, u8 index
,
1278 u8 next_index
, u32 minrate
)
1280 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1281 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
1283 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
1285 mlxsw_reg_qeec_mise_set(qeec_pl
, true);
1286 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl
, minrate
);
1288 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
1291 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1292 u8 switch_prio
, u8 tclass
)
1294 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1295 char qtct_pl
[MLXSW_REG_QTCT_LEN
];
1297 mlxsw_reg_qtct_pack(qtct_pl
, mlxsw_sp_port
->local_port
, switch_prio
,
1299 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qtct
), qtct_pl
);
1302 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
1306 /* Setup the elements hierarcy, so that each TC is linked to
1307 * one subgroup, which are all member in the same group.
1309 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
1310 MLXSW_REG_QEEC_HR_GROUP
, 0, 0, false, 0);
1313 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1314 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
1315 MLXSW_REG_QEEC_HR_SUBGROUP
, i
,
1320 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1321 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
1322 MLXSW_REG_QEEC_HR_TC
, i
, i
,
1327 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
1328 MLXSW_REG_QEEC_HR_TC
,
1335 /* Make sure the max shaper is disabled in all hierarchies that support
1336 * it. Note that this disables ptps (PTP shaper), but that is intended
1337 * for the initial configuration.
1339 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
1340 MLXSW_REG_QEEC_HR_PORT
, 0, 0,
1341 MLXSW_REG_QEEC_MAS_DIS
, 0);
1344 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1345 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
1346 MLXSW_REG_QEEC_HR_SUBGROUP
,
1348 MLXSW_REG_QEEC_MAS_DIS
, 0);
1352 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1353 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
1354 MLXSW_REG_QEEC_HR_TC
,
1356 MLXSW_REG_QEEC_MAS_DIS
, 0);
1360 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
1361 MLXSW_REG_QEEC_HR_TC
,
1363 MLXSW_REG_QEEC_MAS_DIS
, 0);
1368 /* Configure the min shaper for multicast TCs. */
1369 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1370 err
= mlxsw_sp_port_min_bw_set(mlxsw_sp_port
,
1371 MLXSW_REG_QEEC_HR_TC
,
1373 MLXSW_REG_QEEC_MIS_MIN
);
1378 /* Map all priorities to traffic class 0. */
1379 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1380 err
= mlxsw_sp_port_prio_tc_set(mlxsw_sp_port
, i
, 0);
1388 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1391 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1392 char qtctm_pl
[MLXSW_REG_QTCTM_LEN
];
1394 mlxsw_reg_qtctm_pack(qtctm_pl
, mlxsw_sp_port
->local_port
, enable
);
1395 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qtctm
), qtctm_pl
);
1398 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port
*mlxsw_sp_port
)
1400 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1401 u8 module
= mlxsw_sp_port
->mapping
.module
;
1402 u64 overheat_counter
;
1405 err
= mlxsw_env_module_overheat_counter_get(mlxsw_sp
->core
, module
,
1410 mlxsw_sp_port
->module_overheat_initial_val
= overheat_counter
;
1415 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1416 bool is_8021ad_tagged
,
1417 bool is_8021q_tagged
)
1419 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1420 char spvc_pl
[MLXSW_REG_SPVC_LEN
];
1422 mlxsw_reg_spvc_pack(spvc_pl
, mlxsw_sp_port
->local_port
,
1423 is_8021ad_tagged
, is_8021q_tagged
);
1424 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvc
), spvc_pl
);
1427 static int mlxsw_sp_port_create(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
1428 u8 split_base_local_port
,
1429 struct mlxsw_sp_port_mapping
*port_mapping
)
1431 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1432 bool split
= !!split_base_local_port
;
1433 struct mlxsw_sp_port
*mlxsw_sp_port
;
1434 u32 lanes
= port_mapping
->width
;
1435 struct net_device
*dev
;
1439 splittable
= lanes
> 1 && !split
;
1440 err
= mlxsw_core_port_init(mlxsw_sp
->core
, local_port
,
1441 port_mapping
->module
+ 1, split
,
1442 port_mapping
->lane
/ lanes
,
1445 sizeof(mlxsw_sp
->base_mac
));
1447 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to init core port\n",
1452 dev
= alloc_etherdev(sizeof(struct mlxsw_sp_port
));
1455 goto err_alloc_etherdev
;
1457 SET_NETDEV_DEV(dev
, mlxsw_sp
->bus_info
->dev
);
1458 dev_net_set(dev
, mlxsw_sp_net(mlxsw_sp
));
1459 mlxsw_sp_port
= netdev_priv(dev
);
1460 mlxsw_sp_port
->dev
= dev
;
1461 mlxsw_sp_port
->mlxsw_sp
= mlxsw_sp
;
1462 mlxsw_sp_port
->local_port
= local_port
;
1463 mlxsw_sp_port
->pvid
= MLXSW_SP_DEFAULT_VID
;
1464 mlxsw_sp_port
->split
= split
;
1465 mlxsw_sp_port
->split_base_local_port
= split_base_local_port
;
1466 mlxsw_sp_port
->mapping
= *port_mapping
;
1467 mlxsw_sp_port
->link
.autoneg
= 1;
1468 INIT_LIST_HEAD(&mlxsw_sp_port
->vlans_list
);
1470 mlxsw_sp_port
->pcpu_stats
=
1471 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats
);
1472 if (!mlxsw_sp_port
->pcpu_stats
) {
1474 goto err_alloc_stats
;
1477 INIT_DELAYED_WORK(&mlxsw_sp_port
->periodic_hw_stats
.update_dw
,
1478 &update_stats_cache
);
1480 dev
->netdev_ops
= &mlxsw_sp_port_netdev_ops
;
1481 dev
->ethtool_ops
= &mlxsw_sp_port_ethtool_ops
;
1483 err
= mlxsw_sp_port_module_map(mlxsw_sp_port
);
1485 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to map module\n",
1486 mlxsw_sp_port
->local_port
);
1487 goto err_port_module_map
;
1490 err
= mlxsw_sp_port_swid_set(mlxsw_sp_port
, 0);
1492 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set SWID\n",
1493 mlxsw_sp_port
->local_port
);
1494 goto err_port_swid_set
;
1497 err
= mlxsw_sp_port_dev_addr_init(mlxsw_sp_port
);
1499 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unable to init port mac address\n",
1500 mlxsw_sp_port
->local_port
);
1501 goto err_dev_addr_init
;
1504 netif_carrier_off(dev
);
1506 dev
->features
|= NETIF_F_NETNS_LOCAL
| NETIF_F_LLTX
| NETIF_F_SG
|
1507 NETIF_F_HW_VLAN_CTAG_FILTER
| NETIF_F_HW_TC
;
1508 dev
->hw_features
|= NETIF_F_HW_TC
| NETIF_F_LOOPBACK
;
1511 dev
->max_mtu
= ETH_MAX_MTU
;
1513 /* Each packet needs to have a Tx header (metadata) on top all other
1516 dev
->needed_headroom
= MLXSW_TXHDR_LEN
;
1518 err
= mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port
);
1520 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set system port mapping\n",
1521 mlxsw_sp_port
->local_port
);
1522 goto err_port_system_port_mapping_set
;
1525 err
= mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port
);
1527 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to enable speeds\n",
1528 mlxsw_sp_port
->local_port
);
1529 goto err_port_speed_by_width_set
;
1532 err
= mlxsw_sp
->port_type_speed_ops
->ptys_max_speed(mlxsw_sp_port
,
1533 &mlxsw_sp_port
->max_speed
);
1535 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to get maximum speed\n",
1536 mlxsw_sp_port
->local_port
);
1537 goto err_max_speed_get
;
1540 err
= mlxsw_sp_port_max_mtu_get(mlxsw_sp_port
, &mlxsw_sp_port
->max_mtu
);
1542 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to get maximum MTU\n",
1543 mlxsw_sp_port
->local_port
);
1544 goto err_port_max_mtu_get
;
1547 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, ETH_DATA_LEN
);
1549 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set MTU\n",
1550 mlxsw_sp_port
->local_port
);
1551 goto err_port_mtu_set
;
1554 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
1556 goto err_port_admin_status_set
;
1558 err
= mlxsw_sp_port_buffers_init(mlxsw_sp_port
);
1560 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize buffers\n",
1561 mlxsw_sp_port
->local_port
);
1562 goto err_port_buffers_init
;
1565 err
= mlxsw_sp_port_ets_init(mlxsw_sp_port
);
1567 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize ETS\n",
1568 mlxsw_sp_port
->local_port
);
1569 goto err_port_ets_init
;
1572 err
= mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port
, true);
1574 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize TC MC mode\n",
1575 mlxsw_sp_port
->local_port
);
1576 goto err_port_tc_mc_mode
;
1579 /* ETS and buffers must be initialized before DCB. */
1580 err
= mlxsw_sp_port_dcb_init(mlxsw_sp_port
);
1582 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize DCB\n",
1583 mlxsw_sp_port
->local_port
);
1584 goto err_port_dcb_init
;
1587 err
= mlxsw_sp_port_fids_init(mlxsw_sp_port
);
1589 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize FIDs\n",
1590 mlxsw_sp_port
->local_port
);
1591 goto err_port_fids_init
;
1594 err
= mlxsw_sp_tc_qdisc_init(mlxsw_sp_port
);
1596 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize TC qdiscs\n",
1597 mlxsw_sp_port
->local_port
);
1598 goto err_port_qdiscs_init
;
1601 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, 0, VLAN_N_VID
- 1, false,
1604 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to clear VLAN filter\n",
1605 mlxsw_sp_port
->local_port
);
1606 goto err_port_vlan_clear
;
1609 err
= mlxsw_sp_port_nve_init(mlxsw_sp_port
);
1611 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize NVE\n",
1612 mlxsw_sp_port
->local_port
);
1613 goto err_port_nve_init
;
1616 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, MLXSW_SP_DEFAULT_VID
,
1619 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set PVID\n",
1620 mlxsw_sp_port
->local_port
);
1621 goto err_port_pvid_set
;
1624 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_create(mlxsw_sp_port
,
1625 MLXSW_SP_DEFAULT_VID
);
1626 if (IS_ERR(mlxsw_sp_port_vlan
)) {
1627 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to create VID 1\n",
1628 mlxsw_sp_port
->local_port
);
1629 err
= PTR_ERR(mlxsw_sp_port_vlan
);
1630 goto err_port_vlan_create
;
1632 mlxsw_sp_port
->default_vlan
= mlxsw_sp_port_vlan
;
1634 /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat
1635 * only packets with 802.1q header as tagged packets.
1637 err
= mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port
, false, true);
1639 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set default VLAN classification\n",
1641 goto err_port_vlan_classification_set
;
1644 INIT_DELAYED_WORK(&mlxsw_sp_port
->ptp
.shaper_dw
,
1645 mlxsw_sp
->ptp_ops
->shaper_work
);
1647 mlxsw_sp
->ports
[local_port
] = mlxsw_sp_port
;
1649 err
= mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port
);
1651 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set overheat initial value\n",
1652 mlxsw_sp_port
->local_port
);
1653 goto err_port_overheat_init_val_set
;
1656 err
= register_netdev(dev
);
1658 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to register netdev\n",
1659 mlxsw_sp_port
->local_port
);
1660 goto err_register_netdev
;
1663 mlxsw_core_port_eth_set(mlxsw_sp
->core
, mlxsw_sp_port
->local_port
,
1664 mlxsw_sp_port
, dev
);
1665 mlxsw_core_schedule_dw(&mlxsw_sp_port
->periodic_hw_stats
.update_dw
, 0);
1668 err_register_netdev
:
1669 err_port_overheat_init_val_set
:
1670 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port
, true, true);
1671 err_port_vlan_classification_set
:
1672 mlxsw_sp
->ports
[local_port
] = NULL
;
1673 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan
);
1674 err_port_vlan_create
:
1676 mlxsw_sp_port_nve_fini(mlxsw_sp_port
);
1678 err_port_vlan_clear
:
1679 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port
);
1680 err_port_qdiscs_init
:
1681 mlxsw_sp_port_fids_fini(mlxsw_sp_port
);
1683 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
1685 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port
, false);
1686 err_port_tc_mc_mode
:
1688 mlxsw_sp_port_buffers_fini(mlxsw_sp_port
);
1689 err_port_buffers_init
:
1690 err_port_admin_status_set
:
1692 err_port_max_mtu_get
:
1694 err_port_speed_by_width_set
:
1695 err_port_system_port_mapping_set
:
1697 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
1699 mlxsw_sp_port_module_unmap(mlxsw_sp_port
);
1700 err_port_module_map
:
1701 free_percpu(mlxsw_sp_port
->pcpu_stats
);
1705 mlxsw_core_port_fini(mlxsw_sp
->core
, local_port
);
1709 static void mlxsw_sp_port_remove(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
1711 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
1713 cancel_delayed_work_sync(&mlxsw_sp_port
->periodic_hw_stats
.update_dw
);
1714 cancel_delayed_work_sync(&mlxsw_sp_port
->ptp
.shaper_dw
);
1715 mlxsw_sp_port_ptp_clear(mlxsw_sp_port
);
1716 mlxsw_core_port_clear(mlxsw_sp
->core
, local_port
, mlxsw_sp
);
1717 unregister_netdev(mlxsw_sp_port
->dev
); /* This calls ndo_stop */
1718 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port
, true, true);
1719 mlxsw_sp
->ports
[local_port
] = NULL
;
1720 mlxsw_sp_port_vlan_flush(mlxsw_sp_port
, true);
1721 mlxsw_sp_port_nve_fini(mlxsw_sp_port
);
1722 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port
);
1723 mlxsw_sp_port_fids_fini(mlxsw_sp_port
);
1724 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
1725 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port
, false);
1726 mlxsw_sp_port_buffers_fini(mlxsw_sp_port
);
1727 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
1728 mlxsw_sp_port_module_unmap(mlxsw_sp_port
);
1729 free_percpu(mlxsw_sp_port
->pcpu_stats
);
1730 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port
->vlans_list
));
1731 free_netdev(mlxsw_sp_port
->dev
);
1732 mlxsw_core_port_fini(mlxsw_sp
->core
, local_port
);
1735 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp
*mlxsw_sp
)
1737 struct mlxsw_sp_port
*mlxsw_sp_port
;
1740 mlxsw_sp_port
= kzalloc(sizeof(*mlxsw_sp_port
), GFP_KERNEL
);
1744 mlxsw_sp_port
->mlxsw_sp
= mlxsw_sp
;
1745 mlxsw_sp_port
->local_port
= MLXSW_PORT_CPU_PORT
;
1747 err
= mlxsw_core_cpu_port_init(mlxsw_sp
->core
,
1750 sizeof(mlxsw_sp
->base_mac
));
1752 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize core CPU port\n");
1753 goto err_core_cpu_port_init
;
1756 mlxsw_sp
->ports
[MLXSW_PORT_CPU_PORT
] = mlxsw_sp_port
;
1759 err_core_cpu_port_init
:
1760 kfree(mlxsw_sp_port
);
1764 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp
*mlxsw_sp
)
1766 struct mlxsw_sp_port
*mlxsw_sp_port
=
1767 mlxsw_sp
->ports
[MLXSW_PORT_CPU_PORT
];
1769 mlxsw_core_cpu_port_fini(mlxsw_sp
->core
);
1770 mlxsw_sp
->ports
[MLXSW_PORT_CPU_PORT
] = NULL
;
1771 kfree(mlxsw_sp_port
);
1774 static bool mlxsw_sp_port_created(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
1776 return mlxsw_sp
->ports
[local_port
] != NULL
;
1779 static void mlxsw_sp_ports_remove(struct mlxsw_sp
*mlxsw_sp
)
1783 for (i
= 1; i
< mlxsw_core_max_ports(mlxsw_sp
->core
); i
++)
1784 if (mlxsw_sp_port_created(mlxsw_sp
, i
))
1785 mlxsw_sp_port_remove(mlxsw_sp
, i
);
1786 mlxsw_sp_cpu_port_remove(mlxsw_sp
);
1787 kfree(mlxsw_sp
->ports
);
1788 mlxsw_sp
->ports
= NULL
;
1791 static int mlxsw_sp_ports_create(struct mlxsw_sp
*mlxsw_sp
)
1793 unsigned int max_ports
= mlxsw_core_max_ports(mlxsw_sp
->core
);
1794 struct mlxsw_sp_port_mapping
*port_mapping
;
1799 alloc_size
= sizeof(struct mlxsw_sp_port
*) * max_ports
;
1800 mlxsw_sp
->ports
= kzalloc(alloc_size
, GFP_KERNEL
);
1801 if (!mlxsw_sp
->ports
)
1804 err
= mlxsw_sp_cpu_port_create(mlxsw_sp
);
1806 goto err_cpu_port_create
;
1808 for (i
= 1; i
< max_ports
; i
++) {
1809 port_mapping
= mlxsw_sp
->port_mapping
[i
];
1812 err
= mlxsw_sp_port_create(mlxsw_sp
, i
, 0, port_mapping
);
1814 goto err_port_create
;
1819 for (i
--; i
>= 1; i
--)
1820 if (mlxsw_sp_port_created(mlxsw_sp
, i
))
1821 mlxsw_sp_port_remove(mlxsw_sp
, i
);
1822 mlxsw_sp_cpu_port_remove(mlxsw_sp
);
1823 err_cpu_port_create
:
1824 kfree(mlxsw_sp
->ports
);
1825 mlxsw_sp
->ports
= NULL
;
1829 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp
*mlxsw_sp
)
1831 unsigned int max_ports
= mlxsw_core_max_ports(mlxsw_sp
->core
);
1832 struct mlxsw_sp_port_mapping port_mapping
;
1836 mlxsw_sp
->port_mapping
= kcalloc(max_ports
,
1837 sizeof(struct mlxsw_sp_port_mapping
*),
1839 if (!mlxsw_sp
->port_mapping
)
1842 for (i
= 1; i
< max_ports
; i
++) {
1843 if (mlxsw_core_port_is_xm(mlxsw_sp
->core
, i
))
1846 err
= mlxsw_sp_port_module_info_get(mlxsw_sp
, i
, &port_mapping
);
1848 goto err_port_module_info_get
;
1849 if (!port_mapping
.width
)
1852 mlxsw_sp
->port_mapping
[i
] = kmemdup(&port_mapping
,
1853 sizeof(port_mapping
),
1855 if (!mlxsw_sp
->port_mapping
[i
]) {
1857 goto err_port_module_info_dup
;
1862 err_port_module_info_get
:
1863 err_port_module_info_dup
:
1864 for (i
--; i
>= 1; i
--)
1865 kfree(mlxsw_sp
->port_mapping
[i
]);
1866 kfree(mlxsw_sp
->port_mapping
);
1870 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp
*mlxsw_sp
)
1874 for (i
= 1; i
< mlxsw_core_max_ports(mlxsw_sp
->core
); i
++)
1875 kfree(mlxsw_sp
->port_mapping
[i
]);
1876 kfree(mlxsw_sp
->port_mapping
);
1879 static u8
mlxsw_sp_cluster_base_port_get(u8 local_port
, unsigned int max_width
)
1881 u8 offset
= (local_port
- 1) % max_width
;
1883 return local_port
- offset
;
1887 mlxsw_sp_port_split_create(struct mlxsw_sp
*mlxsw_sp
, u8 base_port
,
1888 struct mlxsw_sp_port_mapping
*port_mapping
,
1889 unsigned int count
, u8 offset
)
1891 struct mlxsw_sp_port_mapping split_port_mapping
;
1894 split_port_mapping
= *port_mapping
;
1895 split_port_mapping
.width
/= count
;
1896 for (i
= 0; i
< count
; i
++) {
1897 err
= mlxsw_sp_port_create(mlxsw_sp
, base_port
+ i
* offset
,
1898 base_port
, &split_port_mapping
);
1900 goto err_port_create
;
1901 split_port_mapping
.lane
+= split_port_mapping
.width
;
1907 for (i
--; i
>= 0; i
--)
1908 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
* offset
))
1909 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
* offset
);
1913 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp
*mlxsw_sp
,
1915 unsigned int count
, u8 offset
)
1917 struct mlxsw_sp_port_mapping
*port_mapping
;
1920 /* Go over original unsplit ports in the gap and recreate them. */
1921 for (i
= 0; i
< count
* offset
; i
++) {
1922 port_mapping
= mlxsw_sp
->port_mapping
[base_port
+ i
];
1925 mlxsw_sp_port_create(mlxsw_sp
, base_port
+ i
, 0, port_mapping
);
1929 static int mlxsw_sp_local_ports_offset(struct mlxsw_core
*mlxsw_core
,
1931 unsigned int max_width
)
1933 enum mlxsw_res_id local_ports_in_x_res_id
;
1934 int split_width
= max_width
/ count
;
1936 if (split_width
== 1)
1937 local_ports_in_x_res_id
= MLXSW_RES_ID_LOCAL_PORTS_IN_1X
;
1938 else if (split_width
== 2)
1939 local_ports_in_x_res_id
= MLXSW_RES_ID_LOCAL_PORTS_IN_2X
;
1940 else if (split_width
== 4)
1941 local_ports_in_x_res_id
= MLXSW_RES_ID_LOCAL_PORTS_IN_4X
;
1945 if (!mlxsw_core_res_valid(mlxsw_core
, local_ports_in_x_res_id
))
1947 return mlxsw_core_res_get(mlxsw_core
, local_ports_in_x_res_id
);
1950 static struct mlxsw_sp_port
*
1951 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
1953 if (mlxsw_sp
->ports
&& mlxsw_sp
->ports
[local_port
])
1954 return mlxsw_sp
->ports
[local_port
];
1958 static int mlxsw_sp_port_split(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
1960 struct netlink_ext_ack
*extack
)
1962 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
1963 struct mlxsw_sp_port_mapping port_mapping
;
1964 struct mlxsw_sp_port
*mlxsw_sp_port
;
1971 mlxsw_sp_port
= mlxsw_sp_port_get_by_local_port(mlxsw_sp
, local_port
);
1972 if (!mlxsw_sp_port
) {
1973 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
1975 NL_SET_ERR_MSG_MOD(extack
, "Port number does not exist");
1979 max_width
= mlxsw_core_module_max_width(mlxsw_core
,
1980 mlxsw_sp_port
->mapping
.module
);
1981 if (max_width
< 0) {
1982 netdev_err(mlxsw_sp_port
->dev
, "Cannot get max width of port module\n");
1983 NL_SET_ERR_MSG_MOD(extack
, "Cannot get max width of port module");
1987 /* Split port with non-max cannot be split. */
1988 if (mlxsw_sp_port
->mapping
.width
!= max_width
) {
1989 netdev_err(mlxsw_sp_port
->dev
, "Port cannot be split\n");
1990 NL_SET_ERR_MSG_MOD(extack
, "Port cannot be split");
1994 offset
= mlxsw_sp_local_ports_offset(mlxsw_core
, count
, max_width
);
1996 netdev_err(mlxsw_sp_port
->dev
, "Cannot obtain local port offset\n");
1997 NL_SET_ERR_MSG_MOD(extack
, "Cannot obtain local port offset");
2001 /* Only in case max split is being done, the local port and
2002 * base port may differ.
2004 base_port
= count
== max_width
?
2005 mlxsw_sp_cluster_base_port_get(local_port
, max_width
) :
2008 for (i
= 0; i
< count
* offset
; i
++) {
2009 /* Expect base port to exist and also the one in the middle in
2010 * case of maximal split count.
2012 if (i
== 0 || (count
== max_width
&& i
== count
/ 2))
2015 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
)) {
2016 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
2017 NL_SET_ERR_MSG_MOD(extack
, "Invalid split configuration");
2022 port_mapping
= mlxsw_sp_port
->mapping
;
2024 for (i
= 0; i
< count
; i
++)
2025 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
* offset
))
2026 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
* offset
);
2028 err
= mlxsw_sp_port_split_create(mlxsw_sp
, base_port
, &port_mapping
,
2031 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create split ports\n");
2032 goto err_port_split_create
;
2037 err_port_split_create
:
2038 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
, offset
);
2042 static int mlxsw_sp_port_unsplit(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
2043 struct netlink_ext_ack
*extack
)
2045 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2046 struct mlxsw_sp_port
*mlxsw_sp_port
;
2053 mlxsw_sp_port
= mlxsw_sp_port_get_by_local_port(mlxsw_sp
, local_port
);
2054 if (!mlxsw_sp_port
) {
2055 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
2057 NL_SET_ERR_MSG_MOD(extack
, "Port number does not exist");
2061 if (!mlxsw_sp_port
->split
) {
2062 netdev_err(mlxsw_sp_port
->dev
, "Port was not split\n");
2063 NL_SET_ERR_MSG_MOD(extack
, "Port was not split");
2067 max_width
= mlxsw_core_module_max_width(mlxsw_core
,
2068 mlxsw_sp_port
->mapping
.module
);
2069 if (max_width
< 0) {
2070 netdev_err(mlxsw_sp_port
->dev
, "Cannot get max width of port module\n");
2071 NL_SET_ERR_MSG_MOD(extack
, "Cannot get max width of port module");
2075 count
= max_width
/ mlxsw_sp_port
->mapping
.width
;
2077 offset
= mlxsw_sp_local_ports_offset(mlxsw_core
, count
, max_width
);
2078 if (WARN_ON(offset
< 0)) {
2079 netdev_err(mlxsw_sp_port
->dev
, "Cannot obtain local port offset\n");
2080 NL_SET_ERR_MSG_MOD(extack
, "Cannot obtain local port offset");
2084 base_port
= mlxsw_sp_port
->split_base_local_port
;
2086 for (i
= 0; i
< count
; i
++)
2087 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
* offset
))
2088 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
* offset
);
2090 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
, offset
);
2096 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port
*mlxsw_sp_port
)
2100 for (i
= 0; i
< TC_MAX_QUEUE
; i
++)
2101 mlxsw_sp_port
->periodic_hw_stats
.xstats
.backlog
[i
] = 0;
2104 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info
*reg
,
2105 char *pude_pl
, void *priv
)
2107 struct mlxsw_sp
*mlxsw_sp
= priv
;
2108 struct mlxsw_sp_port
*mlxsw_sp_port
;
2109 enum mlxsw_reg_pude_oper_status status
;
2112 local_port
= mlxsw_reg_pude_local_port_get(pude_pl
);
2113 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2117 status
= mlxsw_reg_pude_oper_status_get(pude_pl
);
2118 if (status
== MLXSW_PORT_OPER_STATUS_UP
) {
2119 netdev_info(mlxsw_sp_port
->dev
, "link up\n");
2120 netif_carrier_on(mlxsw_sp_port
->dev
);
2121 mlxsw_core_schedule_dw(&mlxsw_sp_port
->ptp
.shaper_dw
, 0);
2123 netdev_info(mlxsw_sp_port
->dev
, "link down\n");
2124 netif_carrier_off(mlxsw_sp_port
->dev
);
2125 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port
);
2129 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp
*mlxsw_sp
,
2130 char *mtpptr_pl
, bool ingress
)
2136 local_port
= mlxsw_reg_mtpptr_local_port_get(mtpptr_pl
);
2137 num_rec
= mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl
);
2138 for (i
= 0; i
< num_rec
; i
++) {
2144 mlxsw_reg_mtpptr_unpack(mtpptr_pl
, i
, &message_type
,
2145 &domain_number
, &sequence_id
,
2147 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp
, ingress
, local_port
,
2148 message_type
, domain_number
,
2149 sequence_id
, timestamp
);
2153 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info
*reg
,
2154 char *mtpptr_pl
, void *priv
)
2156 struct mlxsw_sp
*mlxsw_sp
= priv
;
2158 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp
, mtpptr_pl
, true);
2161 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info
*reg
,
2162 char *mtpptr_pl
, void *priv
)
2164 struct mlxsw_sp
*mlxsw_sp
= priv
;
2166 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp
, mtpptr_pl
, false);
2169 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff
*skb
,
2170 u8 local_port
, void *priv
)
2172 struct mlxsw_sp
*mlxsw_sp
= priv
;
2173 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2174 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
2176 if (unlikely(!mlxsw_sp_port
)) {
2177 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: skb received for non-existent port\n",
2182 skb
->dev
= mlxsw_sp_port
->dev
;
2184 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
2185 u64_stats_update_begin(&pcpu_stats
->syncp
);
2186 pcpu_stats
->rx_packets
++;
2187 pcpu_stats
->rx_bytes
+= skb
->len
;
2188 u64_stats_update_end(&pcpu_stats
->syncp
);
2190 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
2191 netif_receive_skb(skb
);
2194 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff
*skb
, u8 local_port
,
2197 skb
->offload_fwd_mark
= 1;
2198 return mlxsw_sp_rx_listener_no_mark_func(skb
, local_port
, priv
);
2201 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff
*skb
,
2202 u8 local_port
, void *priv
)
2204 skb
->offload_l3_fwd_mark
= 1;
2205 skb
->offload_fwd_mark
= 1;
2206 return mlxsw_sp_rx_listener_no_mark_func(skb
, local_port
, priv
);
2209 void mlxsw_sp_ptp_receive(struct mlxsw_sp
*mlxsw_sp
, struct sk_buff
*skb
,
2212 mlxsw_sp
->ptp_ops
->receive(mlxsw_sp
, skb
, local_port
);
2215 void mlxsw_sp_sample_receive(struct mlxsw_sp
*mlxsw_sp
, struct sk_buff
*skb
,
2218 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2219 struct mlxsw_sp_port_sample
*sample
;
2222 if (unlikely(!mlxsw_sp_port
)) {
2223 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: sample skb received for non-existent port\n",
2229 sample
= rcu_dereference(mlxsw_sp_port
->sample
);
2232 size
= sample
->truncate
? sample
->trunc_size
: skb
->len
;
2233 psample_sample_packet(sample
->psample_group
, skb
, size
,
2234 mlxsw_sp_port
->dev
->ifindex
, 0, sample
->rate
);
2241 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2242 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2243 _is_ctrl, SP_##_trap_group, DISCARD)
2245 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2246 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
2247 _is_ctrl, SP_##_trap_group, DISCARD)
2249 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2250 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
2251 _is_ctrl, SP_##_trap_group, DISCARD)
2253 #define MLXSW_SP_EVENTL(_func, _trap_id) \
2254 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2256 static const struct mlxsw_listener mlxsw_sp_listener
[] = {
2258 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func
, PUDE
),
2260 MLXSW_SP_RXL_NO_MARK(FID_MISS
, TRAP_TO_CPU
, FID_MISS
, false),
2262 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS
, TRAP_TO_CPU
, ROUTER_EXP
,
2264 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC
, TRAP_TO_CPU
, ROUTER_EXP
, false),
2265 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST
, TRAP_TO_CPU
, ROUTER_EXP
,
2267 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E
, FORWARD
,
2269 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC
, FORWARD
,
2271 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP
, FORWARD
,
2273 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL
, FORWARD
,
2275 /* Multicast Router Traps */
2276 MLXSW_SP_RXL_MARK(ACL1
, TRAP_TO_CPU
, MULTICAST
, false),
2277 MLXSW_SP_RXL_L3_MARK(ACL2
, TRAP_TO_CPU
, MULTICAST
, false),
2279 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP
, TRAP_TO_CPU
, NEIGH_DISCOVERY
, false),
2282 static const struct mlxsw_listener mlxsw_sp1_listener
[] = {
2284 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func
, PTP_EGR_FIFO
, SP_PTP0
),
2285 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func
, PTP_ING_FIFO
, SP_PTP0
),
2288 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core
*mlxsw_core
)
2290 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2291 char qpcr_pl
[MLXSW_REG_QPCR_LEN
];
2292 enum mlxsw_reg_qpcr_ir_units ir_units
;
2293 int max_cpu_policers
;
2299 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_CPU_POLICERS
))
2302 max_cpu_policers
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_CPU_POLICERS
);
2304 ir_units
= MLXSW_REG_QPCR_IR_UNITS_M
;
2305 for (i
= 0; i
< max_cpu_policers
; i
++) {
2308 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP
:
2309 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST
:
2310 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS
:
2318 __set_bit(i
, mlxsw_sp
->trap
->policers_usage
);
2319 mlxsw_reg_qpcr_pack(qpcr_pl
, i
, ir_units
, is_bytes
, rate
,
2321 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(qpcr
), qpcr_pl
);
2329 static int mlxsw_sp_trap_groups_set(struct mlxsw_core
*mlxsw_core
)
2331 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
2332 enum mlxsw_reg_htgt_trap_group i
;
2333 int max_cpu_policers
;
2334 int max_trap_groups
;
2339 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_TRAP_GROUPS
))
2342 max_trap_groups
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_TRAP_GROUPS
);
2343 max_cpu_policers
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_CPU_POLICERS
);
2345 for (i
= 0; i
< max_trap_groups
; i
++) {
2348 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP
:
2349 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST
:
2350 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS
:
2354 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT
:
2355 priority
= MLXSW_REG_HTGT_DEFAULT_PRIORITY
;
2356 tc
= MLXSW_REG_HTGT_DEFAULT_TC
;
2357 policer_id
= MLXSW_REG_HTGT_INVALID_POLICER
;
2363 if (max_cpu_policers
<= policer_id
&&
2364 policer_id
!= MLXSW_REG_HTGT_INVALID_POLICER
)
2367 mlxsw_reg_htgt_pack(htgt_pl
, i
, policer_id
, priority
, tc
);
2368 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
2376 static int mlxsw_sp_traps_register(struct mlxsw_sp
*mlxsw_sp
,
2377 const struct mlxsw_listener listeners
[],
2378 size_t listeners_count
)
2383 for (i
= 0; i
< listeners_count
; i
++) {
2384 err
= mlxsw_core_trap_register(mlxsw_sp
->core
,
2388 goto err_listener_register
;
2393 err_listener_register
:
2394 for (i
--; i
>= 0; i
--) {
2395 mlxsw_core_trap_unregister(mlxsw_sp
->core
,
2402 static void mlxsw_sp_traps_unregister(struct mlxsw_sp
*mlxsw_sp
,
2403 const struct mlxsw_listener listeners
[],
2404 size_t listeners_count
)
2408 for (i
= 0; i
< listeners_count
; i
++) {
2409 mlxsw_core_trap_unregister(mlxsw_sp
->core
,
2415 static int mlxsw_sp_traps_init(struct mlxsw_sp
*mlxsw_sp
)
2417 struct mlxsw_sp_trap
*trap
;
2421 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_CPU_POLICERS
))
2423 max_policers
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_CPU_POLICERS
);
2424 trap
= kzalloc(struct_size(trap
, policers_usage
,
2425 BITS_TO_LONGS(max_policers
)), GFP_KERNEL
);
2428 trap
->max_policers
= max_policers
;
2429 mlxsw_sp
->trap
= trap
;
2431 err
= mlxsw_sp_cpu_policers_set(mlxsw_sp
->core
);
2433 goto err_cpu_policers_set
;
2435 err
= mlxsw_sp_trap_groups_set(mlxsw_sp
->core
);
2437 goto err_trap_groups_set
;
2439 err
= mlxsw_sp_traps_register(mlxsw_sp
, mlxsw_sp_listener
,
2440 ARRAY_SIZE(mlxsw_sp_listener
));
2442 goto err_traps_register
;
2444 err
= mlxsw_sp_traps_register(mlxsw_sp
, mlxsw_sp
->listeners
,
2445 mlxsw_sp
->listeners_count
);
2447 goto err_extra_traps_init
;
2451 err_extra_traps_init
:
2452 mlxsw_sp_traps_unregister(mlxsw_sp
, mlxsw_sp_listener
,
2453 ARRAY_SIZE(mlxsw_sp_listener
));
2455 err_trap_groups_set
:
2456 err_cpu_policers_set
:
2461 static void mlxsw_sp_traps_fini(struct mlxsw_sp
*mlxsw_sp
)
2463 mlxsw_sp_traps_unregister(mlxsw_sp
, mlxsw_sp
->listeners
,
2464 mlxsw_sp
->listeners_count
);
2465 mlxsw_sp_traps_unregister(mlxsw_sp
, mlxsw_sp_listener
,
2466 ARRAY_SIZE(mlxsw_sp_listener
));
2467 kfree(mlxsw_sp
->trap
);
2470 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2472 static int mlxsw_sp_lag_init(struct mlxsw_sp
*mlxsw_sp
)
2474 char slcr_pl
[MLXSW_REG_SLCR_LEN
];
2478 seed
= jhash(mlxsw_sp
->base_mac
, sizeof(mlxsw_sp
->base_mac
),
2479 MLXSW_SP_LAG_SEED_INIT
);
2480 mlxsw_reg_slcr_pack(slcr_pl
, MLXSW_REG_SLCR_LAG_HASH_SMAC
|
2481 MLXSW_REG_SLCR_LAG_HASH_DMAC
|
2482 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE
|
2483 MLXSW_REG_SLCR_LAG_HASH_VLANID
|
2484 MLXSW_REG_SLCR_LAG_HASH_SIP
|
2485 MLXSW_REG_SLCR_LAG_HASH_DIP
|
2486 MLXSW_REG_SLCR_LAG_HASH_SPORT
|
2487 MLXSW_REG_SLCR_LAG_HASH_DPORT
|
2488 MLXSW_REG_SLCR_LAG_HASH_IPPROTO
, seed
);
2489 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcr
), slcr_pl
);
2493 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_LAG
) ||
2494 !MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_LAG_MEMBERS
))
2497 mlxsw_sp
->lags
= kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_LAG
),
2498 sizeof(struct mlxsw_sp_upper
),
2500 if (!mlxsw_sp
->lags
)
2506 static void mlxsw_sp_lag_fini(struct mlxsw_sp
*mlxsw_sp
)
2508 kfree(mlxsw_sp
->lags
);
2511 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core
*mlxsw_core
)
2513 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
2516 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_EMAD
,
2517 MLXSW_REG_HTGT_INVALID_POLICER
,
2518 MLXSW_REG_HTGT_DEFAULT_PRIORITY
,
2519 MLXSW_REG_HTGT_DEFAULT_TC
);
2520 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
2524 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_MFDE
,
2525 MLXSW_REG_HTGT_INVALID_POLICER
,
2526 MLXSW_REG_HTGT_DEFAULT_PRIORITY
,
2527 MLXSW_REG_HTGT_DEFAULT_TC
);
2528 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
2532 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_MTWE
,
2533 MLXSW_REG_HTGT_INVALID_POLICER
,
2534 MLXSW_REG_HTGT_DEFAULT_PRIORITY
,
2535 MLXSW_REG_HTGT_DEFAULT_TC
);
2536 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
2540 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_PMPE
,
2541 MLXSW_REG_HTGT_INVALID_POLICER
,
2542 MLXSW_REG_HTGT_DEFAULT_PRIORITY
,
2543 MLXSW_REG_HTGT_DEFAULT_TC
);
2544 return mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
2547 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops
= {
2548 .clock_init
= mlxsw_sp1_ptp_clock_init
,
2549 .clock_fini
= mlxsw_sp1_ptp_clock_fini
,
2550 .init
= mlxsw_sp1_ptp_init
,
2551 .fini
= mlxsw_sp1_ptp_fini
,
2552 .receive
= mlxsw_sp1_ptp_receive
,
2553 .transmitted
= mlxsw_sp1_ptp_transmitted
,
2554 .hwtstamp_get
= mlxsw_sp1_ptp_hwtstamp_get
,
2555 .hwtstamp_set
= mlxsw_sp1_ptp_hwtstamp_set
,
2556 .shaper_work
= mlxsw_sp1_ptp_shaper_work
,
2557 .get_ts_info
= mlxsw_sp1_ptp_get_ts_info
,
2558 .get_stats_count
= mlxsw_sp1_get_stats_count
,
2559 .get_stats_strings
= mlxsw_sp1_get_stats_strings
,
2560 .get_stats
= mlxsw_sp1_get_stats
,
2563 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops
= {
2564 .clock_init
= mlxsw_sp2_ptp_clock_init
,
2565 .clock_fini
= mlxsw_sp2_ptp_clock_fini
,
2566 .init
= mlxsw_sp2_ptp_init
,
2567 .fini
= mlxsw_sp2_ptp_fini
,
2568 .receive
= mlxsw_sp2_ptp_receive
,
2569 .transmitted
= mlxsw_sp2_ptp_transmitted
,
2570 .hwtstamp_get
= mlxsw_sp2_ptp_hwtstamp_get
,
2571 .hwtstamp_set
= mlxsw_sp2_ptp_hwtstamp_set
,
2572 .shaper_work
= mlxsw_sp2_ptp_shaper_work
,
2573 .get_ts_info
= mlxsw_sp2_ptp_get_ts_info
,
2574 .get_stats_count
= mlxsw_sp2_get_stats_count
,
2575 .get_stats_strings
= mlxsw_sp2_get_stats_strings
,
2576 .get_stats
= mlxsw_sp2_get_stats
,
2579 static int mlxsw_sp_netdevice_event(struct notifier_block
*unused
,
2580 unsigned long event
, void *ptr
);
2582 static int mlxsw_sp_init(struct mlxsw_core
*mlxsw_core
,
2583 const struct mlxsw_bus_info
*mlxsw_bus_info
,
2584 struct netlink_ext_ack
*extack
)
2586 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2589 mlxsw_sp
->core
= mlxsw_core
;
2590 mlxsw_sp
->bus_info
= mlxsw_bus_info
;
2592 mlxsw_core_emad_string_tlv_enable(mlxsw_core
);
2594 err
= mlxsw_sp_base_mac_get(mlxsw_sp
);
2596 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to get base mac\n");
2600 err
= mlxsw_sp_kvdl_init(mlxsw_sp
);
2602 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize KVDL\n");
2606 err
= mlxsw_sp_fids_init(mlxsw_sp
);
2608 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize FIDs\n");
2612 err
= mlxsw_sp_policers_init(mlxsw_sp
);
2614 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize policers\n");
2615 goto err_policers_init
;
2618 err
= mlxsw_sp_traps_init(mlxsw_sp
);
2620 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set traps\n");
2621 goto err_traps_init
;
2624 err
= mlxsw_sp_devlink_traps_init(mlxsw_sp
);
2626 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize devlink traps\n");
2627 goto err_devlink_traps_init
;
2630 err
= mlxsw_sp_buffers_init(mlxsw_sp
);
2632 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize buffers\n");
2633 goto err_buffers_init
;
2636 err
= mlxsw_sp_lag_init(mlxsw_sp
);
2638 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize LAG\n");
2642 /* Initialize SPAN before router and switchdev, so that those components
2643 * can call mlxsw_sp_span_respin().
2645 err
= mlxsw_sp_span_init(mlxsw_sp
);
2647 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init span system\n");
2651 err
= mlxsw_sp_switchdev_init(mlxsw_sp
);
2653 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize switchdev\n");
2654 goto err_switchdev_init
;
2657 err
= mlxsw_sp_counter_pool_init(mlxsw_sp
);
2659 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init counter pool\n");
2660 goto err_counter_pool_init
;
2663 err
= mlxsw_sp_afa_init(mlxsw_sp
);
2665 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize ACL actions\n");
2669 err
= mlxsw_sp_nve_init(mlxsw_sp
);
2671 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize NVE\n");
2675 err
= mlxsw_sp_acl_init(mlxsw_sp
);
2677 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize ACL\n");
2681 err
= mlxsw_sp_router_init(mlxsw_sp
, extack
);
2683 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize router\n");
2684 goto err_router_init
;
2687 if (mlxsw_sp
->bus_info
->read_frc_capable
) {
2688 /* NULL is a valid return value from clock_init */
2690 mlxsw_sp
->ptp_ops
->clock_init(mlxsw_sp
,
2691 mlxsw_sp
->bus_info
->dev
);
2692 if (IS_ERR(mlxsw_sp
->clock
)) {
2693 err
= PTR_ERR(mlxsw_sp
->clock
);
2694 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init ptp clock\n");
2695 goto err_ptp_clock_init
;
2699 if (mlxsw_sp
->clock
) {
2700 /* NULL is a valid return value from ptp_ops->init */
2701 mlxsw_sp
->ptp_state
= mlxsw_sp
->ptp_ops
->init(mlxsw_sp
);
2702 if (IS_ERR(mlxsw_sp
->ptp_state
)) {
2703 err
= PTR_ERR(mlxsw_sp
->ptp_state
);
2704 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize PTP\n");
2709 /* Initialize netdevice notifier after router and SPAN is initialized,
2710 * so that the event handler can use router structures and call SPAN
2713 mlxsw_sp
->netdevice_nb
.notifier_call
= mlxsw_sp_netdevice_event
;
2714 err
= register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp
),
2715 &mlxsw_sp
->netdevice_nb
);
2717 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to register netdev notifier\n");
2718 goto err_netdev_notifier
;
2721 err
= mlxsw_sp_dpipe_init(mlxsw_sp
);
2723 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init pipeline debug\n");
2724 goto err_dpipe_init
;
2727 err
= mlxsw_sp_port_module_info_init(mlxsw_sp
);
2729 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init port module info\n");
2730 goto err_port_module_info_init
;
2733 err
= mlxsw_sp_ports_create(mlxsw_sp
);
2735 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create ports\n");
2736 goto err_ports_create
;
2742 mlxsw_sp_port_module_info_fini(mlxsw_sp
);
2743 err_port_module_info_init
:
2744 mlxsw_sp_dpipe_fini(mlxsw_sp
);
2746 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp
),
2747 &mlxsw_sp
->netdevice_nb
);
2748 err_netdev_notifier
:
2749 if (mlxsw_sp
->clock
)
2750 mlxsw_sp
->ptp_ops
->fini(mlxsw_sp
->ptp_state
);
2752 if (mlxsw_sp
->clock
)
2753 mlxsw_sp
->ptp_ops
->clock_fini(mlxsw_sp
->clock
);
2755 mlxsw_sp_router_fini(mlxsw_sp
);
2757 mlxsw_sp_acl_fini(mlxsw_sp
);
2759 mlxsw_sp_nve_fini(mlxsw_sp
);
2761 mlxsw_sp_afa_fini(mlxsw_sp
);
2763 mlxsw_sp_counter_pool_fini(mlxsw_sp
);
2764 err_counter_pool_init
:
2765 mlxsw_sp_switchdev_fini(mlxsw_sp
);
2767 mlxsw_sp_span_fini(mlxsw_sp
);
2769 mlxsw_sp_lag_fini(mlxsw_sp
);
2771 mlxsw_sp_buffers_fini(mlxsw_sp
);
2773 mlxsw_sp_devlink_traps_fini(mlxsw_sp
);
2774 err_devlink_traps_init
:
2775 mlxsw_sp_traps_fini(mlxsw_sp
);
2777 mlxsw_sp_policers_fini(mlxsw_sp
);
2779 mlxsw_sp_fids_fini(mlxsw_sp
);
2781 mlxsw_sp_kvdl_fini(mlxsw_sp
);
2785 static int mlxsw_sp1_init(struct mlxsw_core
*mlxsw_core
,
2786 const struct mlxsw_bus_info
*mlxsw_bus_info
,
2787 struct netlink_ext_ack
*extack
)
2789 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2791 mlxsw_sp
->kvdl_ops
= &mlxsw_sp1_kvdl_ops
;
2792 mlxsw_sp
->afa_ops
= &mlxsw_sp1_act_afa_ops
;
2793 mlxsw_sp
->afk_ops
= &mlxsw_sp1_afk_ops
;
2794 mlxsw_sp
->mr_tcam_ops
= &mlxsw_sp1_mr_tcam_ops
;
2795 mlxsw_sp
->acl_rulei_ops
= &mlxsw_sp1_acl_rulei_ops
;
2796 mlxsw_sp
->acl_tcam_ops
= &mlxsw_sp1_acl_tcam_ops
;
2797 mlxsw_sp
->nve_ops_arr
= mlxsw_sp1_nve_ops_arr
;
2798 mlxsw_sp
->mac_mask
= mlxsw_sp1_mac_mask
;
2799 mlxsw_sp
->rif_ops_arr
= mlxsw_sp1_rif_ops_arr
;
2800 mlxsw_sp
->sb_vals
= &mlxsw_sp1_sb_vals
;
2801 mlxsw_sp
->sb_ops
= &mlxsw_sp1_sb_ops
;
2802 mlxsw_sp
->port_type_speed_ops
= &mlxsw_sp1_port_type_speed_ops
;
2803 mlxsw_sp
->ptp_ops
= &mlxsw_sp1_ptp_ops
;
2804 mlxsw_sp
->span_ops
= &mlxsw_sp1_span_ops
;
2805 mlxsw_sp
->policer_core_ops
= &mlxsw_sp1_policer_core_ops
;
2806 mlxsw_sp
->trap_ops
= &mlxsw_sp1_trap_ops
;
2807 mlxsw_sp
->listeners
= mlxsw_sp1_listener
;
2808 mlxsw_sp
->listeners_count
= ARRAY_SIZE(mlxsw_sp1_listener
);
2809 mlxsw_sp
->lowest_shaper_bs
= MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1
;
2811 return mlxsw_sp_init(mlxsw_core
, mlxsw_bus_info
, extack
);
2814 static int mlxsw_sp2_init(struct mlxsw_core
*mlxsw_core
,
2815 const struct mlxsw_bus_info
*mlxsw_bus_info
,
2816 struct netlink_ext_ack
*extack
)
2818 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2820 mlxsw_sp
->kvdl_ops
= &mlxsw_sp2_kvdl_ops
;
2821 mlxsw_sp
->afa_ops
= &mlxsw_sp2_act_afa_ops
;
2822 mlxsw_sp
->afk_ops
= &mlxsw_sp2_afk_ops
;
2823 mlxsw_sp
->mr_tcam_ops
= &mlxsw_sp2_mr_tcam_ops
;
2824 mlxsw_sp
->acl_rulei_ops
= &mlxsw_sp2_acl_rulei_ops
;
2825 mlxsw_sp
->acl_tcam_ops
= &mlxsw_sp2_acl_tcam_ops
;
2826 mlxsw_sp
->nve_ops_arr
= mlxsw_sp2_nve_ops_arr
;
2827 mlxsw_sp
->mac_mask
= mlxsw_sp2_mac_mask
;
2828 mlxsw_sp
->rif_ops_arr
= mlxsw_sp2_rif_ops_arr
;
2829 mlxsw_sp
->sb_vals
= &mlxsw_sp2_sb_vals
;
2830 mlxsw_sp
->sb_ops
= &mlxsw_sp2_sb_ops
;
2831 mlxsw_sp
->port_type_speed_ops
= &mlxsw_sp2_port_type_speed_ops
;
2832 mlxsw_sp
->ptp_ops
= &mlxsw_sp2_ptp_ops
;
2833 mlxsw_sp
->span_ops
= &mlxsw_sp2_span_ops
;
2834 mlxsw_sp
->policer_core_ops
= &mlxsw_sp2_policer_core_ops
;
2835 mlxsw_sp
->trap_ops
= &mlxsw_sp2_trap_ops
;
2836 mlxsw_sp
->lowest_shaper_bs
= MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2
;
2838 return mlxsw_sp_init(mlxsw_core
, mlxsw_bus_info
, extack
);
2841 static int mlxsw_sp3_init(struct mlxsw_core
*mlxsw_core
,
2842 const struct mlxsw_bus_info
*mlxsw_bus_info
,
2843 struct netlink_ext_ack
*extack
)
2845 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2847 mlxsw_sp
->kvdl_ops
= &mlxsw_sp2_kvdl_ops
;
2848 mlxsw_sp
->afa_ops
= &mlxsw_sp2_act_afa_ops
;
2849 mlxsw_sp
->afk_ops
= &mlxsw_sp2_afk_ops
;
2850 mlxsw_sp
->mr_tcam_ops
= &mlxsw_sp2_mr_tcam_ops
;
2851 mlxsw_sp
->acl_rulei_ops
= &mlxsw_sp2_acl_rulei_ops
;
2852 mlxsw_sp
->acl_tcam_ops
= &mlxsw_sp2_acl_tcam_ops
;
2853 mlxsw_sp
->nve_ops_arr
= mlxsw_sp2_nve_ops_arr
;
2854 mlxsw_sp
->mac_mask
= mlxsw_sp2_mac_mask
;
2855 mlxsw_sp
->rif_ops_arr
= mlxsw_sp2_rif_ops_arr
;
2856 mlxsw_sp
->sb_vals
= &mlxsw_sp2_sb_vals
;
2857 mlxsw_sp
->sb_ops
= &mlxsw_sp3_sb_ops
;
2858 mlxsw_sp
->port_type_speed_ops
= &mlxsw_sp2_port_type_speed_ops
;
2859 mlxsw_sp
->ptp_ops
= &mlxsw_sp2_ptp_ops
;
2860 mlxsw_sp
->span_ops
= &mlxsw_sp3_span_ops
;
2861 mlxsw_sp
->policer_core_ops
= &mlxsw_sp2_policer_core_ops
;
2862 mlxsw_sp
->trap_ops
= &mlxsw_sp2_trap_ops
;
2863 mlxsw_sp
->lowest_shaper_bs
= MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3
;
2865 return mlxsw_sp_init(mlxsw_core
, mlxsw_bus_info
, extack
);
2868 static void mlxsw_sp_fini(struct mlxsw_core
*mlxsw_core
)
2870 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2872 mlxsw_sp_ports_remove(mlxsw_sp
);
2873 mlxsw_sp_port_module_info_fini(mlxsw_sp
);
2874 mlxsw_sp_dpipe_fini(mlxsw_sp
);
2875 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp
),
2876 &mlxsw_sp
->netdevice_nb
);
2877 if (mlxsw_sp
->clock
) {
2878 mlxsw_sp
->ptp_ops
->fini(mlxsw_sp
->ptp_state
);
2879 mlxsw_sp
->ptp_ops
->clock_fini(mlxsw_sp
->clock
);
2881 mlxsw_sp_router_fini(mlxsw_sp
);
2882 mlxsw_sp_acl_fini(mlxsw_sp
);
2883 mlxsw_sp_nve_fini(mlxsw_sp
);
2884 mlxsw_sp_afa_fini(mlxsw_sp
);
2885 mlxsw_sp_counter_pool_fini(mlxsw_sp
);
2886 mlxsw_sp_switchdev_fini(mlxsw_sp
);
2887 mlxsw_sp_span_fini(mlxsw_sp
);
2888 mlxsw_sp_lag_fini(mlxsw_sp
);
2889 mlxsw_sp_buffers_fini(mlxsw_sp
);
2890 mlxsw_sp_devlink_traps_fini(mlxsw_sp
);
2891 mlxsw_sp_traps_fini(mlxsw_sp
);
2892 mlxsw_sp_policers_fini(mlxsw_sp
);
2893 mlxsw_sp_fids_fini(mlxsw_sp
);
2894 mlxsw_sp_kvdl_fini(mlxsw_sp
);
2897 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
2900 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \
2903 static const struct mlxsw_config_profile mlxsw_sp1_config_profile
= {
2905 .max_mid
= MLXSW_SP_MID_MAX
,
2906 .used_flood_tables
= 1,
2907 .used_flood_mode
= 1,
2909 .max_fid_flood_tables
= 3,
2910 .fid_flood_table_size
= MLXSW_SP_FID_FLOOD_TABLE_SIZE
,
2911 .used_max_ib_mc
= 1,
2915 .used_kvd_sizes
= 1,
2916 .kvd_hash_single_parts
= 59,
2917 .kvd_hash_double_parts
= 41,
2918 .kvd_linear_size
= MLXSW_SP_KVD_LINEAR_SIZE
,
2922 .type
= MLXSW_PORT_SWID_TYPE_ETH
,
2927 static const struct mlxsw_config_profile mlxsw_sp2_config_profile
= {
2929 .max_mid
= MLXSW_SP_MID_MAX
,
2930 .used_flood_tables
= 1,
2931 .used_flood_mode
= 1,
2933 .max_fid_flood_tables
= 3,
2934 .fid_flood_table_size
= MLXSW_SP_FID_FLOOD_TABLE_SIZE
,
2935 .used_max_ib_mc
= 1,
2939 .used_kvh_xlt_cache_mode
= 1,
2940 .kvh_xlt_cache_mode
= 1,
2944 .type
= MLXSW_PORT_SWID_TYPE_ETH
,
2950 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core
*mlxsw_core
,
2951 struct devlink_resource_size_params
*kvd_size_params
,
2952 struct devlink_resource_size_params
*linear_size_params
,
2953 struct devlink_resource_size_params
*hash_double_size_params
,
2954 struct devlink_resource_size_params
*hash_single_size_params
)
2956 u32 single_size_min
= MLXSW_CORE_RES_GET(mlxsw_core
,
2957 KVD_SINGLE_MIN_SIZE
);
2958 u32 double_size_min
= MLXSW_CORE_RES_GET(mlxsw_core
,
2959 KVD_DOUBLE_MIN_SIZE
);
2960 u32 kvd_size
= MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
);
2961 u32 linear_size_min
= 0;
2963 devlink_resource_size_params_init(kvd_size_params
, kvd_size
, kvd_size
,
2964 MLXSW_SP_KVD_GRANULARITY
,
2965 DEVLINK_RESOURCE_UNIT_ENTRY
);
2966 devlink_resource_size_params_init(linear_size_params
, linear_size_min
,
2967 kvd_size
- single_size_min
-
2969 MLXSW_SP_KVD_GRANULARITY
,
2970 DEVLINK_RESOURCE_UNIT_ENTRY
);
2971 devlink_resource_size_params_init(hash_double_size_params
,
2973 kvd_size
- single_size_min
-
2975 MLXSW_SP_KVD_GRANULARITY
,
2976 DEVLINK_RESOURCE_UNIT_ENTRY
);
2977 devlink_resource_size_params_init(hash_single_size_params
,
2979 kvd_size
- double_size_min
-
2981 MLXSW_SP_KVD_GRANULARITY
,
2982 DEVLINK_RESOURCE_UNIT_ENTRY
);
2985 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core
*mlxsw_core
)
2987 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
2988 struct devlink_resource_size_params hash_single_size_params
;
2989 struct devlink_resource_size_params hash_double_size_params
;
2990 struct devlink_resource_size_params linear_size_params
;
2991 struct devlink_resource_size_params kvd_size_params
;
2992 u32 kvd_size
, single_size
, double_size
, linear_size
;
2993 const struct mlxsw_config_profile
*profile
;
2996 profile
= &mlxsw_sp1_config_profile
;
2997 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, KVD_SIZE
))
3000 mlxsw_sp_resource_size_params_prepare(mlxsw_core
, &kvd_size_params
,
3001 &linear_size_params
,
3002 &hash_double_size_params
,
3003 &hash_single_size_params
);
3005 kvd_size
= MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
);
3006 err
= devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_KVD
,
3007 kvd_size
, MLXSW_SP_RESOURCE_KVD
,
3008 DEVLINK_RESOURCE_ID_PARENT_TOP
,
3013 linear_size
= profile
->kvd_linear_size
;
3014 err
= devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR
,
3016 MLXSW_SP_RESOURCE_KVD_LINEAR
,
3017 MLXSW_SP_RESOURCE_KVD
,
3018 &linear_size_params
);
3022 err
= mlxsw_sp1_kvdl_resources_register(mlxsw_core
);
3026 double_size
= kvd_size
- linear_size
;
3027 double_size
*= profile
->kvd_hash_double_parts
;
3028 double_size
/= profile
->kvd_hash_double_parts
+
3029 profile
->kvd_hash_single_parts
;
3030 double_size
= rounddown(double_size
, MLXSW_SP_KVD_GRANULARITY
);
3031 err
= devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE
,
3033 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE
,
3034 MLXSW_SP_RESOURCE_KVD
,
3035 &hash_double_size_params
);
3039 single_size
= kvd_size
- double_size
- linear_size
;
3040 err
= devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE
,
3042 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE
,
3043 MLXSW_SP_RESOURCE_KVD
,
3044 &hash_single_size_params
);
3051 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core
*mlxsw_core
)
3053 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
3054 struct devlink_resource_size_params kvd_size_params
;
3057 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, KVD_SIZE
))
3060 kvd_size
= MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
);
3061 devlink_resource_size_params_init(&kvd_size_params
, kvd_size
, kvd_size
,
3062 MLXSW_SP_KVD_GRANULARITY
,
3063 DEVLINK_RESOURCE_UNIT_ENTRY
);
3065 return devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_KVD
,
3066 kvd_size
, MLXSW_SP_RESOURCE_KVD
,
3067 DEVLINK_RESOURCE_ID_PARENT_TOP
,
3071 static int mlxsw_sp_resources_span_register(struct mlxsw_core
*mlxsw_core
)
3073 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
3074 struct devlink_resource_size_params span_size_params
;
3077 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_SPAN
))
3080 max_span
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_SPAN
);
3081 devlink_resource_size_params_init(&span_size_params
, max_span
, max_span
,
3082 1, DEVLINK_RESOURCE_UNIT_ENTRY
);
3084 return devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_SPAN
,
3085 max_span
, MLXSW_SP_RESOURCE_SPAN
,
3086 DEVLINK_RESOURCE_ID_PARENT_TOP
,
3090 static int mlxsw_sp1_resources_register(struct mlxsw_core
*mlxsw_core
)
3094 err
= mlxsw_sp1_resources_kvd_register(mlxsw_core
);
3098 err
= mlxsw_sp_resources_span_register(mlxsw_core
);
3100 goto err_resources_span_register
;
3102 err
= mlxsw_sp_counter_resources_register(mlxsw_core
);
3104 goto err_resources_counter_register
;
3106 err
= mlxsw_sp_policer_resources_register(mlxsw_core
);
3108 goto err_resources_counter_register
;
3112 err_resources_counter_register
:
3113 err_resources_span_register
:
3114 devlink_resources_unregister(priv_to_devlink(mlxsw_core
), NULL
);
3118 static int mlxsw_sp2_resources_register(struct mlxsw_core
*mlxsw_core
)
3122 err
= mlxsw_sp2_resources_kvd_register(mlxsw_core
);
3126 err
= mlxsw_sp_resources_span_register(mlxsw_core
);
3128 goto err_resources_span_register
;
3130 err
= mlxsw_sp_counter_resources_register(mlxsw_core
);
3132 goto err_resources_counter_register
;
3134 err
= mlxsw_sp_policer_resources_register(mlxsw_core
);
3136 goto err_resources_counter_register
;
3140 err_resources_counter_register
:
3141 err_resources_span_register
:
3142 devlink_resources_unregister(priv_to_devlink(mlxsw_core
), NULL
);
3146 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core
*mlxsw_core
,
3147 const struct mlxsw_config_profile
*profile
,
3148 u64
*p_single_size
, u64
*p_double_size
,
3151 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
3155 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, KVD_SINGLE_MIN_SIZE
) ||
3156 !MLXSW_CORE_RES_VALID(mlxsw_core
, KVD_DOUBLE_MIN_SIZE
))
3159 /* The hash part is what left of the kvd without the
3160 * linear part. It is split to the single size and
3161 * double size by the parts ratio from the profile.
3162 * Both sizes must be a multiplications of the
3163 * granularity from the profile. In case the user
3164 * provided the sizes they are obtained via devlink.
3166 err
= devlink_resource_size_get(devlink
,
3167 MLXSW_SP_RESOURCE_KVD_LINEAR
,
3170 *p_linear_size
= profile
->kvd_linear_size
;
3172 err
= devlink_resource_size_get(devlink
,
3173 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE
,
3176 double_size
= MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
) -
3178 double_size
*= profile
->kvd_hash_double_parts
;
3179 double_size
/= profile
->kvd_hash_double_parts
+
3180 profile
->kvd_hash_single_parts
;
3181 *p_double_size
= rounddown(double_size
,
3182 MLXSW_SP_KVD_GRANULARITY
);
3185 err
= devlink_resource_size_get(devlink
,
3186 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE
,
3189 *p_single_size
= MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
) -
3190 *p_double_size
- *p_linear_size
;
3192 /* Check results are legal. */
3193 if (*p_single_size
< MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SINGLE_MIN_SIZE
) ||
3194 *p_double_size
< MLXSW_CORE_RES_GET(mlxsw_core
, KVD_DOUBLE_MIN_SIZE
) ||
3195 MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
) < *p_linear_size
)
3202 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink
*devlink
, u32 id
,
3203 struct devlink_param_gset_ctx
*ctx
)
3205 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
3206 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3208 ctx
->val
.vu32
= mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp
);
3213 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink
*devlink
, u32 id
,
3214 struct devlink_param_gset_ctx
*ctx
)
3216 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
3217 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3219 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp
, ctx
->val
.vu32
);
3222 static const struct devlink_param mlxsw_sp2_devlink_params
[] = {
3223 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL
,
3224 "acl_region_rehash_interval",
3225 DEVLINK_PARAM_TYPE_U32
,
3226 BIT(DEVLINK_PARAM_CMODE_RUNTIME
),
3227 mlxsw_sp_params_acl_region_rehash_intrvl_get
,
3228 mlxsw_sp_params_acl_region_rehash_intrvl_set
,
3232 static int mlxsw_sp2_params_register(struct mlxsw_core
*mlxsw_core
)
3234 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
3235 union devlink_param_value value
;
3238 err
= devlink_params_register(devlink
, mlxsw_sp2_devlink_params
,
3239 ARRAY_SIZE(mlxsw_sp2_devlink_params
));
3244 devlink_param_driverinit_value_set(devlink
,
3245 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL
,
3250 static void mlxsw_sp2_params_unregister(struct mlxsw_core
*mlxsw_core
)
3252 devlink_params_unregister(priv_to_devlink(mlxsw_core
),
3253 mlxsw_sp2_devlink_params
,
3254 ARRAY_SIZE(mlxsw_sp2_devlink_params
));
3257 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core
*mlxsw_core
,
3258 struct sk_buff
*skb
, u8 local_port
)
3260 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3262 skb_pull(skb
, MLXSW_TXHDR_LEN
);
3263 mlxsw_sp
->ptp_ops
->transmitted(mlxsw_sp
, skb
, local_port
);
3266 static struct mlxsw_driver mlxsw_sp1_driver
= {
3267 .kind
= mlxsw_sp1_driver_name
,
3268 .priv_size
= sizeof(struct mlxsw_sp
),
3269 .fw_req_rev
= &mlxsw_sp1_fw_rev
,
3270 .fw_filename
= MLXSW_SP1_FW_FILENAME
,
3271 .init
= mlxsw_sp1_init
,
3272 .fini
= mlxsw_sp_fini
,
3273 .basic_trap_groups_set
= mlxsw_sp_basic_trap_groups_set
,
3274 .port_split
= mlxsw_sp_port_split
,
3275 .port_unsplit
= mlxsw_sp_port_unsplit
,
3276 .sb_pool_get
= mlxsw_sp_sb_pool_get
,
3277 .sb_pool_set
= mlxsw_sp_sb_pool_set
,
3278 .sb_port_pool_get
= mlxsw_sp_sb_port_pool_get
,
3279 .sb_port_pool_set
= mlxsw_sp_sb_port_pool_set
,
3280 .sb_tc_pool_bind_get
= mlxsw_sp_sb_tc_pool_bind_get
,
3281 .sb_tc_pool_bind_set
= mlxsw_sp_sb_tc_pool_bind_set
,
3282 .sb_occ_snapshot
= mlxsw_sp_sb_occ_snapshot
,
3283 .sb_occ_max_clear
= mlxsw_sp_sb_occ_max_clear
,
3284 .sb_occ_port_pool_get
= mlxsw_sp_sb_occ_port_pool_get
,
3285 .sb_occ_tc_port_bind_get
= mlxsw_sp_sb_occ_tc_port_bind_get
,
3286 .trap_init
= mlxsw_sp_trap_init
,
3287 .trap_fini
= mlxsw_sp_trap_fini
,
3288 .trap_action_set
= mlxsw_sp_trap_action_set
,
3289 .trap_group_init
= mlxsw_sp_trap_group_init
,
3290 .trap_group_set
= mlxsw_sp_trap_group_set
,
3291 .trap_policer_init
= mlxsw_sp_trap_policer_init
,
3292 .trap_policer_fini
= mlxsw_sp_trap_policer_fini
,
3293 .trap_policer_set
= mlxsw_sp_trap_policer_set
,
3294 .trap_policer_counter_get
= mlxsw_sp_trap_policer_counter_get
,
3295 .txhdr_construct
= mlxsw_sp_txhdr_construct
,
3296 .resources_register
= mlxsw_sp1_resources_register
,
3297 .kvd_sizes_get
= mlxsw_sp_kvd_sizes_get
,
3298 .ptp_transmitted
= mlxsw_sp_ptp_transmitted
,
3299 .txhdr_len
= MLXSW_TXHDR_LEN
,
3300 .profile
= &mlxsw_sp1_config_profile
,
3301 .res_query_enabled
= true,
3302 .fw_fatal_enabled
= true,
3303 .temp_warn_enabled
= true,
3306 static struct mlxsw_driver mlxsw_sp2_driver
= {
3307 .kind
= mlxsw_sp2_driver_name
,
3308 .priv_size
= sizeof(struct mlxsw_sp
),
3309 .fw_req_rev
= &mlxsw_sp2_fw_rev
,
3310 .fw_filename
= MLXSW_SP2_FW_FILENAME
,
3311 .init
= mlxsw_sp2_init
,
3312 .fini
= mlxsw_sp_fini
,
3313 .basic_trap_groups_set
= mlxsw_sp_basic_trap_groups_set
,
3314 .port_split
= mlxsw_sp_port_split
,
3315 .port_unsplit
= mlxsw_sp_port_unsplit
,
3316 .sb_pool_get
= mlxsw_sp_sb_pool_get
,
3317 .sb_pool_set
= mlxsw_sp_sb_pool_set
,
3318 .sb_port_pool_get
= mlxsw_sp_sb_port_pool_get
,
3319 .sb_port_pool_set
= mlxsw_sp_sb_port_pool_set
,
3320 .sb_tc_pool_bind_get
= mlxsw_sp_sb_tc_pool_bind_get
,
3321 .sb_tc_pool_bind_set
= mlxsw_sp_sb_tc_pool_bind_set
,
3322 .sb_occ_snapshot
= mlxsw_sp_sb_occ_snapshot
,
3323 .sb_occ_max_clear
= mlxsw_sp_sb_occ_max_clear
,
3324 .sb_occ_port_pool_get
= mlxsw_sp_sb_occ_port_pool_get
,
3325 .sb_occ_tc_port_bind_get
= mlxsw_sp_sb_occ_tc_port_bind_get
,
3326 .trap_init
= mlxsw_sp_trap_init
,
3327 .trap_fini
= mlxsw_sp_trap_fini
,
3328 .trap_action_set
= mlxsw_sp_trap_action_set
,
3329 .trap_group_init
= mlxsw_sp_trap_group_init
,
3330 .trap_group_set
= mlxsw_sp_trap_group_set
,
3331 .trap_policer_init
= mlxsw_sp_trap_policer_init
,
3332 .trap_policer_fini
= mlxsw_sp_trap_policer_fini
,
3333 .trap_policer_set
= mlxsw_sp_trap_policer_set
,
3334 .trap_policer_counter_get
= mlxsw_sp_trap_policer_counter_get
,
3335 .txhdr_construct
= mlxsw_sp_txhdr_construct
,
3336 .resources_register
= mlxsw_sp2_resources_register
,
3337 .params_register
= mlxsw_sp2_params_register
,
3338 .params_unregister
= mlxsw_sp2_params_unregister
,
3339 .ptp_transmitted
= mlxsw_sp_ptp_transmitted
,
3340 .txhdr_len
= MLXSW_TXHDR_LEN
,
3341 .profile
= &mlxsw_sp2_config_profile
,
3342 .res_query_enabled
= true,
3343 .fw_fatal_enabled
= true,
3344 .temp_warn_enabled
= true,
3347 static struct mlxsw_driver mlxsw_sp3_driver
= {
3348 .kind
= mlxsw_sp3_driver_name
,
3349 .priv_size
= sizeof(struct mlxsw_sp
),
3350 .fw_req_rev
= &mlxsw_sp3_fw_rev
,
3351 .fw_filename
= MLXSW_SP3_FW_FILENAME
,
3352 .init
= mlxsw_sp3_init
,
3353 .fini
= mlxsw_sp_fini
,
3354 .basic_trap_groups_set
= mlxsw_sp_basic_trap_groups_set
,
3355 .port_split
= mlxsw_sp_port_split
,
3356 .port_unsplit
= mlxsw_sp_port_unsplit
,
3357 .sb_pool_get
= mlxsw_sp_sb_pool_get
,
3358 .sb_pool_set
= mlxsw_sp_sb_pool_set
,
3359 .sb_port_pool_get
= mlxsw_sp_sb_port_pool_get
,
3360 .sb_port_pool_set
= mlxsw_sp_sb_port_pool_set
,
3361 .sb_tc_pool_bind_get
= mlxsw_sp_sb_tc_pool_bind_get
,
3362 .sb_tc_pool_bind_set
= mlxsw_sp_sb_tc_pool_bind_set
,
3363 .sb_occ_snapshot
= mlxsw_sp_sb_occ_snapshot
,
3364 .sb_occ_max_clear
= mlxsw_sp_sb_occ_max_clear
,
3365 .sb_occ_port_pool_get
= mlxsw_sp_sb_occ_port_pool_get
,
3366 .sb_occ_tc_port_bind_get
= mlxsw_sp_sb_occ_tc_port_bind_get
,
3367 .trap_init
= mlxsw_sp_trap_init
,
3368 .trap_fini
= mlxsw_sp_trap_fini
,
3369 .trap_action_set
= mlxsw_sp_trap_action_set
,
3370 .trap_group_init
= mlxsw_sp_trap_group_init
,
3371 .trap_group_set
= mlxsw_sp_trap_group_set
,
3372 .trap_policer_init
= mlxsw_sp_trap_policer_init
,
3373 .trap_policer_fini
= mlxsw_sp_trap_policer_fini
,
3374 .trap_policer_set
= mlxsw_sp_trap_policer_set
,
3375 .trap_policer_counter_get
= mlxsw_sp_trap_policer_counter_get
,
3376 .txhdr_construct
= mlxsw_sp_txhdr_construct
,
3377 .resources_register
= mlxsw_sp2_resources_register
,
3378 .params_register
= mlxsw_sp2_params_register
,
3379 .params_unregister
= mlxsw_sp2_params_unregister
,
3380 .ptp_transmitted
= mlxsw_sp_ptp_transmitted
,
3381 .txhdr_len
= MLXSW_TXHDR_LEN
,
3382 .profile
= &mlxsw_sp2_config_profile
,
3383 .res_query_enabled
= true,
3384 .fw_fatal_enabled
= true,
3385 .temp_warn_enabled
= true,
3388 bool mlxsw_sp_port_dev_check(const struct net_device
*dev
)
3390 return dev
->netdev_ops
== &mlxsw_sp_port_netdev_ops
;
3393 static int mlxsw_sp_lower_dev_walk(struct net_device
*lower_dev
,
3394 struct netdev_nested_priv
*priv
)
3398 if (mlxsw_sp_port_dev_check(lower_dev
)) {
3399 priv
->data
= (void *)netdev_priv(lower_dev
);
3406 struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find(struct net_device
*dev
)
3408 struct netdev_nested_priv priv
= {
3412 if (mlxsw_sp_port_dev_check(dev
))
3413 return netdev_priv(dev
);
3415 netdev_walk_all_lower_dev(dev
, mlxsw_sp_lower_dev_walk
, &priv
);
3417 return (struct mlxsw_sp_port
*)priv
.data
;
3420 struct mlxsw_sp
*mlxsw_sp_lower_get(struct net_device
*dev
)
3422 struct mlxsw_sp_port
*mlxsw_sp_port
;
3424 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find(dev
);
3425 return mlxsw_sp_port
? mlxsw_sp_port
->mlxsw_sp
: NULL
;
3428 struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find_rcu(struct net_device
*dev
)
3430 struct netdev_nested_priv priv
= {
3434 if (mlxsw_sp_port_dev_check(dev
))
3435 return netdev_priv(dev
);
3437 netdev_walk_all_lower_dev_rcu(dev
, mlxsw_sp_lower_dev_walk
,
3440 return (struct mlxsw_sp_port
*)priv
.data
;
3443 struct mlxsw_sp_port
*mlxsw_sp_port_lower_dev_hold(struct net_device
*dev
)
3445 struct mlxsw_sp_port
*mlxsw_sp_port
;
3448 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find_rcu(dev
);
3450 dev_hold(mlxsw_sp_port
->dev
);
3452 return mlxsw_sp_port
;
3455 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port
*mlxsw_sp_port
)
3457 dev_put(mlxsw_sp_port
->dev
);
3461 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port
*mlxsw_sp_port
,
3462 struct net_device
*lag_dev
)
3464 struct net_device
*br_dev
= netdev_master_upper_dev_get(lag_dev
);
3465 struct net_device
*upper_dev
;
3466 struct list_head
*iter
;
3468 if (netif_is_bridge_port(lag_dev
))
3469 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
, lag_dev
, br_dev
);
3471 netdev_for_each_upper_dev_rcu(lag_dev
, upper_dev
, iter
) {
3472 if (!netif_is_bridge_port(upper_dev
))
3474 br_dev
= netdev_master_upper_dev_get(upper_dev
);
3475 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
, upper_dev
, br_dev
);
3479 static int mlxsw_sp_lag_create(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
3481 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3483 mlxsw_reg_sldr_lag_create_pack(sldr_pl
, lag_id
);
3484 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3487 static int mlxsw_sp_lag_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
3489 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3491 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl
, lag_id
);
3492 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3495 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
3496 u16 lag_id
, u8 port_index
)
3498 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3499 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3501 mlxsw_reg_slcor_port_add_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3502 lag_id
, port_index
);
3503 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3506 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
3509 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3510 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3512 mlxsw_reg_slcor_port_remove_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3514 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3517 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port
*mlxsw_sp_port
,
3520 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3521 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3523 mlxsw_reg_slcor_col_enable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3525 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3528 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port
*mlxsw_sp_port
,
3531 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3532 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3534 mlxsw_reg_slcor_col_disable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3536 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3539 static int mlxsw_sp_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
3540 struct net_device
*lag_dev
,
3543 struct mlxsw_sp_upper
*lag
;
3544 int free_lag_id
= -1;
3548 max_lag
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_LAG
);
3549 for (i
= 0; i
< max_lag
; i
++) {
3550 lag
= mlxsw_sp_lag_get(mlxsw_sp
, i
);
3551 if (lag
->ref_count
) {
3552 if (lag
->dev
== lag_dev
) {
3556 } else if (free_lag_id
< 0) {
3560 if (free_lag_id
< 0)
3562 *p_lag_id
= free_lag_id
;
3567 mlxsw_sp_master_lag_check(struct mlxsw_sp
*mlxsw_sp
,
3568 struct net_device
*lag_dev
,
3569 struct netdev_lag_upper_info
*lag_upper_info
,
3570 struct netlink_ext_ack
*extack
)
3574 if (mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
) != 0) {
3575 NL_SET_ERR_MSG_MOD(extack
, "Exceeded number of supported LAG devices");
3578 if (lag_upper_info
->tx_type
!= NETDEV_LAG_TX_TYPE_HASH
) {
3579 NL_SET_ERR_MSG_MOD(extack
, "LAG device using unsupported Tx type");
3585 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
3586 u16 lag_id
, u8
*p_port_index
)
3588 u64 max_lag_members
;
3591 max_lag_members
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
3593 for (i
= 0; i
< max_lag_members
; i
++) {
3594 if (!mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
)) {
3602 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
3603 struct net_device
*lag_dev
,
3604 struct netlink_ext_ack
*extack
)
3606 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3607 struct mlxsw_sp_upper
*lag
;
3612 err
= mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
);
3615 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
3616 if (!lag
->ref_count
) {
3617 err
= mlxsw_sp_lag_create(mlxsw_sp
, lag_id
);
3623 err
= mlxsw_sp_port_lag_index_get(mlxsw_sp
, lag_id
, &port_index
);
3626 err
= mlxsw_sp_lag_col_port_add(mlxsw_sp_port
, lag_id
, port_index
);
3628 goto err_col_port_add
;
3630 mlxsw_core_lag_mapping_set(mlxsw_sp
->core
, lag_id
, port_index
,
3631 mlxsw_sp_port
->local_port
);
3632 mlxsw_sp_port
->lag_id
= lag_id
;
3633 mlxsw_sp_port
->lagged
= 1;
3636 /* Port is no longer usable as a router interface */
3637 if (mlxsw_sp_port
->default_vlan
->fid
)
3638 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port
->default_vlan
);
3640 /* Join a router interface configured on the LAG, if exists */
3641 err
= mlxsw_sp_port_vlan_router_join(mlxsw_sp_port
->default_vlan
,
3644 goto err_router_join
;
3650 mlxsw_sp_port
->lagged
= 0;
3651 mlxsw_core_lag_mapping_clear(mlxsw_sp
->core
, lag_id
,
3652 mlxsw_sp_port
->local_port
);
3653 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
3655 if (!lag
->ref_count
)
3656 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
3660 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
3661 struct net_device
*lag_dev
)
3663 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3664 u16 lag_id
= mlxsw_sp_port
->lag_id
;
3665 struct mlxsw_sp_upper
*lag
;
3667 if (!mlxsw_sp_port
->lagged
)
3669 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
3670 WARN_ON(lag
->ref_count
== 0);
3672 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
3674 /* Any VLANs configured on the port are no longer valid */
3675 mlxsw_sp_port_vlan_flush(mlxsw_sp_port
, false);
3676 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port
->default_vlan
);
3677 /* Make the LAG and its directly linked uppers leave bridges they
3680 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port
, lag_dev
);
3682 if (lag
->ref_count
== 1)
3683 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
3685 mlxsw_core_lag_mapping_clear(mlxsw_sp
->core
, lag_id
,
3686 mlxsw_sp_port
->local_port
);
3687 mlxsw_sp_port
->lagged
= 0;
3690 /* Make sure untagged frames are allowed to ingress */
3691 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, MLXSW_SP_DEFAULT_VID
,
3695 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
3698 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3699 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3701 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl
, lag_id
,
3702 mlxsw_sp_port
->local_port
);
3703 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3706 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
3709 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3710 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3712 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl
, lag_id
,
3713 mlxsw_sp_port
->local_port
);
3714 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3718 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port
*mlxsw_sp_port
)
3722 err
= mlxsw_sp_lag_col_port_enable(mlxsw_sp_port
,
3723 mlxsw_sp_port
->lag_id
);
3727 err
= mlxsw_sp_lag_dist_port_add(mlxsw_sp_port
, mlxsw_sp_port
->lag_id
);
3729 goto err_dist_port_add
;
3734 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port
, mlxsw_sp_port
->lag_id
);
3739 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port
*mlxsw_sp_port
)
3743 err
= mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port
,
3744 mlxsw_sp_port
->lag_id
);
3748 err
= mlxsw_sp_lag_col_port_disable(mlxsw_sp_port
,
3749 mlxsw_sp_port
->lag_id
);
3751 goto err_col_port_disable
;
3755 err_col_port_disable
:
3756 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port
, mlxsw_sp_port
->lag_id
);
3760 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port
*mlxsw_sp_port
,
3761 struct netdev_lag_lower_state_info
*info
)
3763 if (info
->tx_enabled
)
3764 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port
);
3766 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port
);
3769 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
3772 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3773 enum mlxsw_reg_spms_state spms_state
;
3778 spms_state
= enable
? MLXSW_REG_SPMS_STATE_FORWARDING
:
3779 MLXSW_REG_SPMS_STATE_DISCARDING
;
3781 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
3784 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
3786 for (vid
= 0; vid
< VLAN_N_VID
; vid
++)
3787 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
3789 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
3794 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port
*mlxsw_sp_port
)
3799 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, true);
3802 err
= mlxsw_sp_port_stp_set(mlxsw_sp_port
, true);
3804 goto err_port_stp_set
;
3805 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, 1, VLAN_N_VID
- 2,
3808 goto err_port_vlan_set
;
3810 for (; vid
<= VLAN_N_VID
- 1; vid
++) {
3811 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
,
3814 goto err_vid_learning_set
;
3819 err_vid_learning_set
:
3820 for (vid
--; vid
>= 1; vid
--)
3821 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
, true);
3823 mlxsw_sp_port_stp_set(mlxsw_sp_port
, false);
3825 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, false);
3829 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port
*mlxsw_sp_port
)
3833 for (vid
= VLAN_N_VID
- 1; vid
>= 1; vid
--)
3834 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
,
3837 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, 1, VLAN_N_VID
- 2,
3839 mlxsw_sp_port_stp_set(mlxsw_sp_port
, false);
3840 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, false);
3843 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device
*br_dev
)
3845 unsigned int num_vxlans
= 0;
3846 struct net_device
*dev
;
3847 struct list_head
*iter
;
3849 netdev_for_each_lower_dev(br_dev
, dev
, iter
) {
3850 if (netif_is_vxlan(dev
))
3854 return num_vxlans
> 1;
3857 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device
*br_dev
)
3859 DECLARE_BITMAP(vlans
, VLAN_N_VID
) = {0};
3860 struct net_device
*dev
;
3861 struct list_head
*iter
;
3863 netdev_for_each_lower_dev(br_dev
, dev
, iter
) {
3867 if (!netif_is_vxlan(dev
))
3870 err
= mlxsw_sp_vxlan_mapped_vid(dev
, &pvid
);
3874 if (test_and_set_bit(pvid
, vlans
))
3881 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device
*br_dev
,
3882 struct netlink_ext_ack
*extack
)
3884 if (br_multicast_enabled(br_dev
)) {
3885 NL_SET_ERR_MSG_MOD(extack
, "Multicast can not be enabled on a bridge with a VxLAN device");
3889 if (!br_vlan_enabled(br_dev
) &&
3890 mlxsw_sp_bridge_has_multiple_vxlans(br_dev
)) {
3891 NL_SET_ERR_MSG_MOD(extack
, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
3895 if (br_vlan_enabled(br_dev
) &&
3896 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev
)) {
3897 NL_SET_ERR_MSG_MOD(extack
, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
3904 static int mlxsw_sp_netdevice_port_upper_event(struct net_device
*lower_dev
,
3905 struct net_device
*dev
,
3906 unsigned long event
, void *ptr
)
3908 struct netdev_notifier_changeupper_info
*info
;
3909 struct mlxsw_sp_port
*mlxsw_sp_port
;
3910 struct netlink_ext_ack
*extack
;
3911 struct net_device
*upper_dev
;
3912 struct mlxsw_sp
*mlxsw_sp
;
3916 mlxsw_sp_port
= netdev_priv(dev
);
3917 mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3919 extack
= netdev_notifier_info_to_extack(&info
->info
);
3922 case NETDEV_PRECHANGEUPPER
:
3923 upper_dev
= info
->upper_dev
;
3924 if (!is_vlan_dev(upper_dev
) &&
3925 !netif_is_lag_master(upper_dev
) &&
3926 !netif_is_bridge_master(upper_dev
) &&
3927 !netif_is_ovs_master(upper_dev
) &&
3928 !netif_is_macvlan(upper_dev
)) {
3929 NL_SET_ERR_MSG_MOD(extack
, "Unknown upper device type");
3934 if (netif_is_bridge_master(upper_dev
) &&
3935 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp
, upper_dev
) &&
3936 mlxsw_sp_bridge_has_vxlan(upper_dev
) &&
3937 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev
, extack
))
3939 if (netdev_has_any_upper_dev(upper_dev
) &&
3940 (!netif_is_bridge_master(upper_dev
) ||
3941 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp
,
3943 NL_SET_ERR_MSG_MOD(extack
, "Enslaving a port to a device that already has an upper device is not supported");
3946 if (netif_is_lag_master(upper_dev
) &&
3947 !mlxsw_sp_master_lag_check(mlxsw_sp
, upper_dev
,
3948 info
->upper_info
, extack
))
3950 if (netif_is_lag_master(upper_dev
) && vlan_uses_dev(dev
)) {
3951 NL_SET_ERR_MSG_MOD(extack
, "Master device is a LAG master and this device has a VLAN");
3954 if (netif_is_lag_port(dev
) && is_vlan_dev(upper_dev
) &&
3955 !netif_is_lag_master(vlan_dev_real_dev(upper_dev
))) {
3956 NL_SET_ERR_MSG_MOD(extack
, "Can not put a VLAN on a LAG port");
3959 if (netif_is_macvlan(upper_dev
) &&
3960 !mlxsw_sp_rif_exists(mlxsw_sp
, lower_dev
)) {
3961 NL_SET_ERR_MSG_MOD(extack
, "macvlan is only supported on top of router interfaces");
3964 if (netif_is_ovs_master(upper_dev
) && vlan_uses_dev(dev
)) {
3965 NL_SET_ERR_MSG_MOD(extack
, "Master device is an OVS master and this device has a VLAN");
3968 if (netif_is_ovs_port(dev
) && is_vlan_dev(upper_dev
)) {
3969 NL_SET_ERR_MSG_MOD(extack
, "Can not put a VLAN on an OVS port");
3972 if (netif_is_bridge_master(upper_dev
)) {
3973 br_vlan_get_proto(upper_dev
, &proto
);
3974 if (br_vlan_enabled(upper_dev
) &&
3975 proto
!= ETH_P_8021Q
&& proto
!= ETH_P_8021AD
) {
3976 NL_SET_ERR_MSG_MOD(extack
, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
3979 if (vlan_uses_dev(lower_dev
) &&
3980 br_vlan_enabled(upper_dev
) &&
3981 proto
== ETH_P_8021AD
) {
3982 NL_SET_ERR_MSG_MOD(extack
, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
3986 if (netif_is_bridge_port(lower_dev
) && is_vlan_dev(upper_dev
)) {
3987 struct net_device
*br_dev
= netdev_master_upper_dev_get(lower_dev
);
3989 if (br_vlan_enabled(br_dev
)) {
3990 br_vlan_get_proto(br_dev
, &proto
);
3991 if (proto
== ETH_P_8021AD
) {
3992 NL_SET_ERR_MSG_MOD(extack
, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
3997 if (is_vlan_dev(upper_dev
) &&
3998 ntohs(vlan_dev_vlan_proto(upper_dev
)) != ETH_P_8021Q
) {
3999 NL_SET_ERR_MSG_MOD(extack
, "VLAN uppers are only supported with 802.1q VLAN protocol");
4003 case NETDEV_CHANGEUPPER
:
4004 upper_dev
= info
->upper_dev
;
4005 if (netif_is_bridge_master(upper_dev
)) {
4007 err
= mlxsw_sp_port_bridge_join(mlxsw_sp_port
,
4012 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
,
4015 } else if (netif_is_lag_master(upper_dev
)) {
4016 if (info
->linking
) {
4017 err
= mlxsw_sp_port_lag_join(mlxsw_sp_port
,
4020 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port
);
4021 mlxsw_sp_port_lag_leave(mlxsw_sp_port
,
4024 } else if (netif_is_ovs_master(upper_dev
)) {
4026 err
= mlxsw_sp_port_ovs_join(mlxsw_sp_port
);
4028 mlxsw_sp_port_ovs_leave(mlxsw_sp_port
);
4029 } else if (netif_is_macvlan(upper_dev
)) {
4031 mlxsw_sp_rif_macvlan_del(mlxsw_sp
, upper_dev
);
4032 } else if (is_vlan_dev(upper_dev
)) {
4033 struct net_device
*br_dev
;
4035 if (!netif_is_bridge_port(upper_dev
))
4039 br_dev
= netdev_master_upper_dev_get(upper_dev
);
4040 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
, upper_dev
,
4049 static int mlxsw_sp_netdevice_port_lower_event(struct net_device
*dev
,
4050 unsigned long event
, void *ptr
)
4052 struct netdev_notifier_changelowerstate_info
*info
;
4053 struct mlxsw_sp_port
*mlxsw_sp_port
;
4056 mlxsw_sp_port
= netdev_priv(dev
);
4060 case NETDEV_CHANGELOWERSTATE
:
4061 if (netif_is_lag_port(dev
) && mlxsw_sp_port
->lagged
) {
4062 err
= mlxsw_sp_port_lag_changed(mlxsw_sp_port
,
4063 info
->lower_state_info
);
4065 netdev_err(dev
, "Failed to reflect link aggregation lower state change\n");
4073 static int mlxsw_sp_netdevice_port_event(struct net_device
*lower_dev
,
4074 struct net_device
*port_dev
,
4075 unsigned long event
, void *ptr
)
4078 case NETDEV_PRECHANGEUPPER
:
4079 case NETDEV_CHANGEUPPER
:
4080 return mlxsw_sp_netdevice_port_upper_event(lower_dev
, port_dev
,
4082 case NETDEV_CHANGELOWERSTATE
:
4083 return mlxsw_sp_netdevice_port_lower_event(port_dev
, event
,
4090 static int mlxsw_sp_netdevice_lag_event(struct net_device
*lag_dev
,
4091 unsigned long event
, void *ptr
)
4093 struct net_device
*dev
;
4094 struct list_head
*iter
;
4097 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
4098 if (mlxsw_sp_port_dev_check(dev
)) {
4099 ret
= mlxsw_sp_netdevice_port_event(lag_dev
, dev
, event
,
4109 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device
*vlan_dev
,
4110 struct net_device
*dev
,
4111 unsigned long event
, void *ptr
,
4114 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
4115 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4116 struct netdev_notifier_changeupper_info
*info
= ptr
;
4117 struct netlink_ext_ack
*extack
;
4118 struct net_device
*upper_dev
;
4121 extack
= netdev_notifier_info_to_extack(&info
->info
);
4124 case NETDEV_PRECHANGEUPPER
:
4125 upper_dev
= info
->upper_dev
;
4126 if (!netif_is_bridge_master(upper_dev
) &&
4127 !netif_is_macvlan(upper_dev
)) {
4128 NL_SET_ERR_MSG_MOD(extack
, "Unknown upper device type");
4133 if (netif_is_bridge_master(upper_dev
) &&
4134 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp
, upper_dev
) &&
4135 mlxsw_sp_bridge_has_vxlan(upper_dev
) &&
4136 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev
, extack
))
4138 if (netdev_has_any_upper_dev(upper_dev
) &&
4139 (!netif_is_bridge_master(upper_dev
) ||
4140 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp
,
4142 NL_SET_ERR_MSG_MOD(extack
, "Enslaving a port to a device that already has an upper device is not supported");
4145 if (netif_is_macvlan(upper_dev
) &&
4146 !mlxsw_sp_rif_exists(mlxsw_sp
, vlan_dev
)) {
4147 NL_SET_ERR_MSG_MOD(extack
, "macvlan is only supported on top of router interfaces");
4151 case NETDEV_CHANGEUPPER
:
4152 upper_dev
= info
->upper_dev
;
4153 if (netif_is_bridge_master(upper_dev
)) {
4155 err
= mlxsw_sp_port_bridge_join(mlxsw_sp_port
,
4160 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
,
4163 } else if (netif_is_macvlan(upper_dev
)) {
4165 mlxsw_sp_rif_macvlan_del(mlxsw_sp
, upper_dev
);
4176 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device
*vlan_dev
,
4177 struct net_device
*lag_dev
,
4178 unsigned long event
,
4181 struct net_device
*dev
;
4182 struct list_head
*iter
;
4185 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
4186 if (mlxsw_sp_port_dev_check(dev
)) {
4187 ret
= mlxsw_sp_netdevice_port_vlan_event(vlan_dev
, dev
,
4198 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device
*vlan_dev
,
4199 struct net_device
*br_dev
,
4200 unsigned long event
, void *ptr
,
4203 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(vlan_dev
);
4204 struct netdev_notifier_changeupper_info
*info
= ptr
;
4205 struct netlink_ext_ack
*extack
;
4206 struct net_device
*upper_dev
;
4211 extack
= netdev_notifier_info_to_extack(&info
->info
);
4214 case NETDEV_PRECHANGEUPPER
:
4215 upper_dev
= info
->upper_dev
;
4216 if (!netif_is_macvlan(upper_dev
)) {
4217 NL_SET_ERR_MSG_MOD(extack
, "Unknown upper device type");
4222 if (netif_is_macvlan(upper_dev
) &&
4223 !mlxsw_sp_rif_exists(mlxsw_sp
, vlan_dev
)) {
4224 NL_SET_ERR_MSG_MOD(extack
, "macvlan is only supported on top of router interfaces");
4228 case NETDEV_CHANGEUPPER
:
4229 upper_dev
= info
->upper_dev
;
4232 if (netif_is_macvlan(upper_dev
))
4233 mlxsw_sp_rif_macvlan_del(mlxsw_sp
, upper_dev
);
4240 static int mlxsw_sp_netdevice_vlan_event(struct net_device
*vlan_dev
,
4241 unsigned long event
, void *ptr
)
4243 struct net_device
*real_dev
= vlan_dev_real_dev(vlan_dev
);
4244 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
4246 if (mlxsw_sp_port_dev_check(real_dev
))
4247 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev
, real_dev
,
4249 else if (netif_is_lag_master(real_dev
))
4250 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev
,
4253 else if (netif_is_bridge_master(real_dev
))
4254 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev
, real_dev
,
4260 static int mlxsw_sp_netdevice_bridge_event(struct net_device
*br_dev
,
4261 unsigned long event
, void *ptr
)
4263 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(br_dev
);
4264 struct netdev_notifier_changeupper_info
*info
= ptr
;
4265 struct netlink_ext_ack
*extack
;
4266 struct net_device
*upper_dev
;
4272 extack
= netdev_notifier_info_to_extack(&info
->info
);
4275 case NETDEV_PRECHANGEUPPER
:
4276 upper_dev
= info
->upper_dev
;
4277 if (!is_vlan_dev(upper_dev
) && !netif_is_macvlan(upper_dev
)) {
4278 NL_SET_ERR_MSG_MOD(extack
, "Unknown upper device type");
4283 if (br_vlan_enabled(br_dev
)) {
4284 br_vlan_get_proto(br_dev
, &proto
);
4285 if (proto
== ETH_P_8021AD
) {
4286 NL_SET_ERR_MSG_MOD(extack
, "Uppers are not supported on top of an 802.1ad bridge");
4290 if (is_vlan_dev(upper_dev
) &&
4291 ntohs(vlan_dev_vlan_proto(upper_dev
)) != ETH_P_8021Q
) {
4292 NL_SET_ERR_MSG_MOD(extack
, "VLAN uppers are only supported with 802.1q VLAN protocol");
4295 if (netif_is_macvlan(upper_dev
) &&
4296 !mlxsw_sp_rif_exists(mlxsw_sp
, br_dev
)) {
4297 NL_SET_ERR_MSG_MOD(extack
, "macvlan is only supported on top of router interfaces");
4301 case NETDEV_CHANGEUPPER
:
4302 upper_dev
= info
->upper_dev
;
4305 if (is_vlan_dev(upper_dev
))
4306 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp
, upper_dev
);
4307 if (netif_is_macvlan(upper_dev
))
4308 mlxsw_sp_rif_macvlan_del(mlxsw_sp
, upper_dev
);
4315 static int mlxsw_sp_netdevice_macvlan_event(struct net_device
*macvlan_dev
,
4316 unsigned long event
, void *ptr
)
4318 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(macvlan_dev
);
4319 struct netdev_notifier_changeupper_info
*info
= ptr
;
4320 struct netlink_ext_ack
*extack
;
4322 if (!mlxsw_sp
|| event
!= NETDEV_PRECHANGEUPPER
)
4325 extack
= netdev_notifier_info_to_extack(&info
->info
);
4327 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
4328 NL_SET_ERR_MSG_MOD(extack
, "Unknown upper device type");
4333 static bool mlxsw_sp_is_vrf_event(unsigned long event
, void *ptr
)
4335 struct netdev_notifier_changeupper_info
*info
= ptr
;
4337 if (event
!= NETDEV_PRECHANGEUPPER
&& event
!= NETDEV_CHANGEUPPER
)
4339 return netif_is_l3_master(info
->upper_dev
);
4342 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp
*mlxsw_sp
,
4343 struct net_device
*dev
,
4344 unsigned long event
, void *ptr
)
4346 struct netdev_notifier_changeupper_info
*cu_info
;
4347 struct netdev_notifier_info
*info
= ptr
;
4348 struct netlink_ext_ack
*extack
;
4349 struct net_device
*upper_dev
;
4351 extack
= netdev_notifier_info_to_extack(info
);
4354 case NETDEV_CHANGEUPPER
:
4355 cu_info
= container_of(info
,
4356 struct netdev_notifier_changeupper_info
,
4358 upper_dev
= cu_info
->upper_dev
;
4359 if (!netif_is_bridge_master(upper_dev
))
4361 if (!mlxsw_sp_lower_get(upper_dev
))
4363 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev
, extack
))
4365 if (cu_info
->linking
) {
4366 if (!netif_running(dev
))
4368 /* When the bridge is VLAN-aware, the VNI of the VxLAN
4369 * device needs to be mapped to a VLAN, but at this
4370 * point no VLANs are configured on the VxLAN device
4372 if (br_vlan_enabled(upper_dev
))
4374 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp
, upper_dev
,
4377 /* VLANs were already flushed, which triggered the
4380 if (br_vlan_enabled(upper_dev
))
4382 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp
, dev
);
4386 upper_dev
= netdev_master_upper_dev_get(dev
);
4389 if (!netif_is_bridge_master(upper_dev
))
4391 if (!mlxsw_sp_lower_get(upper_dev
))
4393 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp
, upper_dev
, dev
, 0,
4396 upper_dev
= netdev_master_upper_dev_get(dev
);
4399 if (!netif_is_bridge_master(upper_dev
))
4401 if (!mlxsw_sp_lower_get(upper_dev
))
4403 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp
, dev
);
4410 static int mlxsw_sp_netdevice_event(struct notifier_block
*nb
,
4411 unsigned long event
, void *ptr
)
4413 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
4414 struct mlxsw_sp_span_entry
*span_entry
;
4415 struct mlxsw_sp
*mlxsw_sp
;
4418 mlxsw_sp
= container_of(nb
, struct mlxsw_sp
, netdevice_nb
);
4419 if (event
== NETDEV_UNREGISTER
) {
4420 span_entry
= mlxsw_sp_span_entry_find_by_port(mlxsw_sp
, dev
);
4422 mlxsw_sp_span_entry_invalidate(mlxsw_sp
, span_entry
);
4424 mlxsw_sp_span_respin(mlxsw_sp
);
4426 if (netif_is_vxlan(dev
))
4427 err
= mlxsw_sp_netdevice_vxlan_event(mlxsw_sp
, dev
, event
, ptr
);
4428 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp
, dev
))
4429 err
= mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp
, dev
,
4431 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp
, dev
))
4432 err
= mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp
, dev
,
4434 else if (event
== NETDEV_PRE_CHANGEADDR
||
4435 event
== NETDEV_CHANGEADDR
||
4436 event
== NETDEV_CHANGEMTU
)
4437 err
= mlxsw_sp_netdevice_router_port_event(dev
, event
, ptr
);
4438 else if (mlxsw_sp_is_vrf_event(event
, ptr
))
4439 err
= mlxsw_sp_netdevice_vrf_event(dev
, event
, ptr
);
4440 else if (mlxsw_sp_port_dev_check(dev
))
4441 err
= mlxsw_sp_netdevice_port_event(dev
, dev
, event
, ptr
);
4442 else if (netif_is_lag_master(dev
))
4443 err
= mlxsw_sp_netdevice_lag_event(dev
, event
, ptr
);
4444 else if (is_vlan_dev(dev
))
4445 err
= mlxsw_sp_netdevice_vlan_event(dev
, event
, ptr
);
4446 else if (netif_is_bridge_master(dev
))
4447 err
= mlxsw_sp_netdevice_bridge_event(dev
, event
, ptr
);
4448 else if (netif_is_macvlan(dev
))
4449 err
= mlxsw_sp_netdevice_macvlan_event(dev
, event
, ptr
);
4451 return notifier_from_errno(err
);
4454 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly
= {
4455 .notifier_call
= mlxsw_sp_inetaddr_valid_event
,
4458 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly
= {
4459 .notifier_call
= mlxsw_sp_inet6addr_valid_event
,
4462 static const struct pci_device_id mlxsw_sp1_pci_id_table
[] = {
4463 {PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_SPECTRUM
), 0},
4467 static struct pci_driver mlxsw_sp1_pci_driver
= {
4468 .name
= mlxsw_sp1_driver_name
,
4469 .id_table
= mlxsw_sp1_pci_id_table
,
4472 static const struct pci_device_id mlxsw_sp2_pci_id_table
[] = {
4473 {PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_SPECTRUM2
), 0},
4477 static struct pci_driver mlxsw_sp2_pci_driver
= {
4478 .name
= mlxsw_sp2_driver_name
,
4479 .id_table
= mlxsw_sp2_pci_id_table
,
4482 static const struct pci_device_id mlxsw_sp3_pci_id_table
[] = {
4483 {PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_SPECTRUM3
), 0},
4487 static struct pci_driver mlxsw_sp3_pci_driver
= {
4488 .name
= mlxsw_sp3_driver_name
,
4489 .id_table
= mlxsw_sp3_pci_id_table
,
4492 static int __init
mlxsw_sp_module_init(void)
4496 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb
);
4497 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb
);
4499 err
= mlxsw_core_driver_register(&mlxsw_sp1_driver
);
4501 goto err_sp1_core_driver_register
;
4503 err
= mlxsw_core_driver_register(&mlxsw_sp2_driver
);
4505 goto err_sp2_core_driver_register
;
4507 err
= mlxsw_core_driver_register(&mlxsw_sp3_driver
);
4509 goto err_sp3_core_driver_register
;
4511 err
= mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver
);
4513 goto err_sp1_pci_driver_register
;
4515 err
= mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver
);
4517 goto err_sp2_pci_driver_register
;
4519 err
= mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver
);
4521 goto err_sp3_pci_driver_register
;
4525 err_sp3_pci_driver_register
:
4526 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver
);
4527 err_sp2_pci_driver_register
:
4528 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver
);
4529 err_sp1_pci_driver_register
:
4530 mlxsw_core_driver_unregister(&mlxsw_sp3_driver
);
4531 err_sp3_core_driver_register
:
4532 mlxsw_core_driver_unregister(&mlxsw_sp2_driver
);
4533 err_sp2_core_driver_register
:
4534 mlxsw_core_driver_unregister(&mlxsw_sp1_driver
);
4535 err_sp1_core_driver_register
:
4536 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb
);
4537 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb
);
4541 static void __exit
mlxsw_sp_module_exit(void)
4543 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver
);
4544 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver
);
4545 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver
);
4546 mlxsw_core_driver_unregister(&mlxsw_sp3_driver
);
4547 mlxsw_core_driver_unregister(&mlxsw_sp2_driver
);
4548 mlxsw_core_driver_unregister(&mlxsw_sp1_driver
);
4549 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb
);
4550 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb
);
4553 module_init(mlxsw_sp_module_init
);
4554 module_exit(mlxsw_sp_module_exit
);
4556 MODULE_LICENSE("Dual BSD/GPL");
4557 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4558 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4559 MODULE_DEVICE_TABLE(pci
, mlxsw_sp1_pci_id_table
);
4560 MODULE_DEVICE_TABLE(pci
, mlxsw_sp2_pci_id_table
);
4561 MODULE_DEVICE_TABLE(pci
, mlxsw_sp3_pci_id_table
);
4562 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME
);
4563 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME
);
4564 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME
);