1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
7 #include <linux/module.h>
8 #include <linux/phy/phy.h>
11 #include "sparx5_main_regs.h"
12 #include "sparx5_main.h"
13 #include "sparx5_port.h"
15 #define SPX5_ETYPE_TAG_C 0x8100
16 #define SPX5_ETYPE_TAG_S 0x88a8
18 #define SPX5_WAIT_US 1000
19 #define SPX5_WAIT_MAX_US 2000
26 #define PAUSE_DISCARD 0xC
27 #define ETH_MAXLEN (ETH_DATA_LEN + ETH_HLEN + ETH_FCS_LEN)
29 static void decode_sgmii_word(u16 lp_abil
, struct sparx5_port_status
*status
)
31 status
->an_complete
= true;
32 if (!(lp_abil
& LPA_SGMII_LINK
)) {
37 switch (lp_abil
& LPA_SGMII_SPD_MASK
) {
39 status
->speed
= SPEED_10
;
42 status
->speed
= SPEED_100
;
45 status
->speed
= SPEED_1000
;
51 if (lp_abil
& LPA_SGMII_FULL_DUPLEX
)
52 status
->duplex
= DUPLEX_FULL
;
54 status
->duplex
= DUPLEX_HALF
;
57 static void decode_cl37_word(u16 lp_abil
, uint16_t ld_abil
, struct sparx5_port_status
*status
)
59 status
->link
= !(lp_abil
& ADVERTISE_RFAULT
) && status
->link
;
60 status
->an_complete
= true;
61 status
->duplex
= (ADVERTISE_1000XFULL
& lp_abil
) ?
62 DUPLEX_FULL
: DUPLEX_UNKNOWN
; // 1G HDX not supported
64 if ((ld_abil
& ADVERTISE_1000XPAUSE
) &&
65 (lp_abil
& ADVERTISE_1000XPAUSE
)) {
66 status
->pause
= MLO_PAUSE_RX
| MLO_PAUSE_TX
;
67 } else if ((ld_abil
& ADVERTISE_1000XPSE_ASYM
) &&
68 (lp_abil
& ADVERTISE_1000XPSE_ASYM
)) {
69 status
->pause
|= (lp_abil
& ADVERTISE_1000XPAUSE
) ?
71 status
->pause
|= (ld_abil
& ADVERTISE_1000XPAUSE
) ?
74 status
->pause
= MLO_PAUSE_NONE
;
78 static int sparx5_get_dev2g5_status(struct sparx5
*sparx5
,
79 struct sparx5_port
*port
,
80 struct sparx5_port_status
*status
)
82 u32 portno
= port
->portno
;
86 /* Get PCS Link down sticky */
87 value
= spx5_rd(sparx5
, DEV2G5_PCS1G_STICKY(portno
));
88 status
->link_down
= DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(value
);
89 if (status
->link_down
) /* Clear the sticky */
90 spx5_wr(value
, sparx5
, DEV2G5_PCS1G_STICKY(portno
));
92 /* Get both current Link and Sync status */
93 value
= spx5_rd(sparx5
, DEV2G5_PCS1G_LINK_STATUS(portno
));
94 status
->link
= DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(value
) &&
95 DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(value
);
97 if (port
->conf
.portmode
== PHY_INTERFACE_MODE_1000BASEX
)
98 status
->speed
= SPEED_1000
;
99 else if (port
->conf
.portmode
== PHY_INTERFACE_MODE_2500BASEX
)
100 status
->speed
= SPEED_2500
;
102 status
->duplex
= DUPLEX_FULL
;
104 /* Get PCS ANEG status register */
105 value
= spx5_rd(sparx5
, DEV2G5_PCS1G_ANEG_STATUS(portno
));
107 /* Aneg complete provides more information */
108 if (DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(value
)) {
109 lp_adv
= DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(value
);
110 if (port
->conf
.portmode
== PHY_INTERFACE_MODE_SGMII
) {
111 decode_sgmii_word(lp_adv
, status
);
113 value
= spx5_rd(sparx5
, DEV2G5_PCS1G_ANEG_CFG(portno
));
114 ld_adv
= DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(value
);
115 decode_cl37_word(lp_adv
, ld_adv
, status
);
121 static int sparx5_get_sfi_status(struct sparx5
*sparx5
,
122 struct sparx5_port
*port
,
123 struct sparx5_port_status
*status
)
125 bool high_speed_dev
= sparx5_is_baser(port
->conf
.portmode
);
126 u32 portno
= port
->portno
;
127 u32 value
, dev
, tinst
;
130 if (!high_speed_dev
) {
131 netdev_err(port
->ndev
, "error: low speed and SFI mode\n");
135 dev
= sparx5_to_high_dev(sparx5
, portno
);
136 tinst
= sparx5_port_dev_index(sparx5
, portno
);
137 inst
= spx5_inst_get(sparx5
, dev
, tinst
);
139 value
= spx5_inst_rd(inst
, DEV10G_MAC_TX_MONITOR_STICKY(0));
140 if (value
!= DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY
) {
141 /* The link is or has been down. Clear the sticky bit */
142 status
->link_down
= 1;
143 spx5_inst_wr(0xffffffff, inst
, DEV10G_MAC_TX_MONITOR_STICKY(0));
144 value
= spx5_inst_rd(inst
, DEV10G_MAC_TX_MONITOR_STICKY(0));
146 status
->link
= (value
== DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY
);
147 status
->duplex
= DUPLEX_FULL
;
148 if (port
->conf
.portmode
== PHY_INTERFACE_MODE_5GBASER
)
149 status
->speed
= SPEED_5000
;
150 else if (port
->conf
.portmode
== PHY_INTERFACE_MODE_10GBASER
)
151 status
->speed
= SPEED_10000
;
153 status
->speed
= SPEED_25000
;
158 /* Get link status of 1000Base-X/in-band and SFI ports.
160 int sparx5_get_port_status(struct sparx5
*sparx5
,
161 struct sparx5_port
*port
,
162 struct sparx5_port_status
*status
)
164 memset(status
, 0, sizeof(*status
));
165 status
->speed
= port
->conf
.speed
;
166 if (port
->conf
.power_down
) {
167 status
->link
= false;
170 switch (port
->conf
.portmode
) {
171 case PHY_INTERFACE_MODE_SGMII
:
172 case PHY_INTERFACE_MODE_QSGMII
:
173 case PHY_INTERFACE_MODE_1000BASEX
:
174 case PHY_INTERFACE_MODE_2500BASEX
:
175 return sparx5_get_dev2g5_status(sparx5
, port
, status
);
176 case PHY_INTERFACE_MODE_5GBASER
:
177 case PHY_INTERFACE_MODE_10GBASER
:
178 case PHY_INTERFACE_MODE_25GBASER
:
179 return sparx5_get_sfi_status(sparx5
, port
, status
);
180 case PHY_INTERFACE_MODE_NA
:
183 netdev_err(port
->ndev
, "Status not supported");
189 static int sparx5_port_error(struct sparx5_port
*port
,
190 struct sparx5_port_config
*conf
,
191 enum port_error errtype
)
194 case SPX5_PERR_SPEED
:
195 netdev_err(port
->ndev
,
196 "Interface does not support speed: %u: for %s\n",
197 conf
->speed
, phy_modes(conf
->portmode
));
199 case SPX5_PERR_IFTYPE
:
200 netdev_err(port
->ndev
,
201 "Switch port does not support interface type: %s\n",
202 phy_modes(conf
->portmode
));
205 netdev_err(port
->ndev
,
206 "Interface configuration error\n");
212 static int sparx5_port_verify_speed(struct sparx5
*sparx5
,
213 struct sparx5_port
*port
,
214 struct sparx5_port_config
*conf
)
216 const struct sparx5_ops
*ops
= sparx5
->data
->ops
;
218 if ((ops
->is_port_2g5(port
->portno
) &&
219 conf
->speed
> SPEED_2500
) ||
220 (ops
->is_port_5g(port
->portno
) &&
221 conf
->speed
> SPEED_5000
) ||
222 (ops
->is_port_10g(port
->portno
) &&
223 conf
->speed
> SPEED_10000
))
224 return sparx5_port_error(port
, conf
, SPX5_PERR_SPEED
);
226 switch (conf
->portmode
) {
227 case PHY_INTERFACE_MODE_NA
:
229 case PHY_INTERFACE_MODE_1000BASEX
:
230 if (conf
->speed
!= SPEED_1000
||
231 ops
->is_port_2g5(port
->portno
))
232 return sparx5_port_error(port
, conf
, SPX5_PERR_SPEED
);
233 if (ops
->is_port_2g5(port
->portno
))
234 return sparx5_port_error(port
, conf
, SPX5_PERR_IFTYPE
);
236 case PHY_INTERFACE_MODE_2500BASEX
:
237 if (conf
->speed
!= SPEED_2500
||
238 ops
->is_port_2g5(port
->portno
))
239 return sparx5_port_error(port
, conf
, SPX5_PERR_SPEED
);
241 case PHY_INTERFACE_MODE_QSGMII
:
242 if (port
->portno
> 47)
243 return sparx5_port_error(port
, conf
, SPX5_PERR_IFTYPE
);
245 case PHY_INTERFACE_MODE_SGMII
:
246 if (conf
->speed
!= SPEED_1000
&&
247 conf
->speed
!= SPEED_100
&&
248 conf
->speed
!= SPEED_10
&&
249 conf
->speed
!= SPEED_2500
)
250 return sparx5_port_error(port
, conf
, SPX5_PERR_SPEED
);
252 case PHY_INTERFACE_MODE_5GBASER
:
253 case PHY_INTERFACE_MODE_10GBASER
:
254 case PHY_INTERFACE_MODE_25GBASER
:
255 if ((conf
->speed
!= SPEED_5000
&&
256 conf
->speed
!= SPEED_10000
&&
257 conf
->speed
!= SPEED_25000
))
258 return sparx5_port_error(port
, conf
, SPX5_PERR_SPEED
);
261 return sparx5_port_error(port
, conf
, SPX5_PERR_IFTYPE
);
266 static bool sparx5_dev_change(struct sparx5
*sparx5
,
267 struct sparx5_port
*port
,
268 struct sparx5_port_config
*conf
)
270 return sparx5_is_baser(port
->conf
.portmode
) ^
271 sparx5_is_baser(conf
->portmode
);
274 static int sparx5_port_flush_poll(struct sparx5
*sparx5
, u32 portno
)
276 u32 value
, resource
, prio
, delay_cnt
= 0;
277 bool poll_src
= true;
280 /* Resource == 0: Memory tracked per source (SRC-MEM)
281 * Resource == 1: Frame references tracked per source (SRC-REF)
282 * Resource == 2: Memory tracked per destination (DST-MEM)
283 * Resource == 3: Frame references tracked per destination. (DST-REF)
288 for (resource
= 0; resource
< (poll_src
? 2 : 1); resource
++) {
291 base
= (resource
== 0 ? 2048 : 0) + SPX5_PRIOS
* portno
;
292 for (prio
= 0; prio
< SPX5_PRIOS
; prio
++) {
293 value
= spx5_rd(sparx5
,
294 QRES_RES_STAT(base
+ prio
));
296 mem
= resource
== 0 ?
297 "DST-MEM" : "SRC-MEM";
306 if (delay_cnt
++ == 2000) {
308 "Flush timeout port %u. %s queue not empty\n",
313 usleep_range(SPX5_WAIT_US
, SPX5_WAIT_MAX_US
);
318 static int sparx5_port_disable(struct sparx5
*sparx5
, struct sparx5_port
*port
, bool high_spd_dev
)
320 u32 tinst
= high_spd_dev
?
321 sparx5_port_dev_index(sparx5
, port
->portno
) : port
->portno
;
322 u32 dev
= high_spd_dev
?
323 sparx5_to_high_dev(sparx5
, port
->portno
) : TARGET_DEV2G5
;
324 void __iomem
*devinst
= spx5_inst_get(sparx5
, dev
, tinst
);
325 const struct sparx5_ops
*ops
= sparx5
->data
->ops
;
326 u32 spd
= port
->conf
.speed
;
331 /* 1: Reset the PCS Rx clock domain */
332 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST
,
333 DEV10G_DEV_RST_CTRL_PCS_RX_RST
,
335 DEV10G_DEV_RST_CTRL(0));
337 /* 2: Disable MAC frame reception */
339 DEV10G_MAC_ENA_CFG_RX_ENA
,
341 DEV10G_MAC_ENA_CFG(0));
343 /* 1: Reset the PCS Rx clock domain */
344 spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_PCS_RX_RST
,
345 DEV2G5_DEV_RST_CTRL_PCS_RX_RST
,
347 DEV2G5_DEV_RST_CTRL(0));
348 /* 2: Disable MAC frame reception */
350 DEV2G5_MAC_ENA_CFG_RX_ENA
,
352 DEV2G5_MAC_ENA_CFG(0));
354 /* 3: Disable traffic being sent to or from switch port->portno */
356 QFWD_SWITCH_PORT_MODE_PORT_ENA
,
358 QFWD_SWITCH_PORT_MODE(port
->portno
));
360 /* 4: Disable dequeuing from the egress queues */
361 spx5_rmw(HSCH_PORT_MODE_DEQUEUE_DIS
,
362 HSCH_PORT_MODE_DEQUEUE_DIS
,
364 HSCH_PORT_MODE(port
->portno
));
366 /* 5: Disable Flowcontrol */
367 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(0xFFF - 1),
368 QSYS_PAUSE_CFG_PAUSE_STOP
,
370 QSYS_PAUSE_CFG(port
->portno
));
372 spd_prm
= spd
== SPEED_10
? 1000 : spd
== SPEED_100
? 100 : 10;
373 /* 6: Wait while the last frame is exiting the queues */
374 usleep_range(8 * spd_prm
, 10 * spd_prm
);
376 /* 7: Flush the queues associated with the port->portno */
377 spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port
->portno
) |
378 HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) |
379 HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) |
380 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(1),
381 HSCH_FLUSH_CTRL_FLUSH_PORT
|
382 HSCH_FLUSH_CTRL_FLUSH_DST
|
383 HSCH_FLUSH_CTRL_FLUSH_SRC
|
384 HSCH_FLUSH_CTRL_FLUSH_ENA
,
388 /* 8: Enable dequeuing from the egress queues */
390 HSCH_PORT_MODE_DEQUEUE_DIS
,
392 HSCH_PORT_MODE(port
->portno
));
394 /* 9: Wait until flushing is complete */
395 err
= sparx5_port_flush_poll(sparx5
, port
->portno
);
399 /* 10: Reset the MAC clock domain */
401 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
402 DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(1) |
403 DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(1),
404 DEV10G_DEV_RST_CTRL_PCS_TX_RST
|
405 DEV10G_DEV_RST_CTRL_MAC_RX_RST
|
406 DEV10G_DEV_RST_CTRL_MAC_TX_RST
,
408 DEV10G_DEV_RST_CTRL(0));
411 spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(3) |
412 DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
413 DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(1) |
414 DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(1) |
415 DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(1),
416 DEV2G5_DEV_RST_CTRL_SPEED_SEL
|
417 DEV2G5_DEV_RST_CTRL_PCS_TX_RST
|
418 DEV2G5_DEV_RST_CTRL_PCS_RX_RST
|
419 DEV2G5_DEV_RST_CTRL_MAC_TX_RST
|
420 DEV2G5_DEV_RST_CTRL_MAC_RX_RST
,
422 DEV2G5_DEV_RST_CTRL(0));
424 /* 11: Clear flushing */
425 spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port
->portno
) |
426 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(0),
427 HSCH_FLUSH_CTRL_FLUSH_PORT
|
428 HSCH_FLUSH_CTRL_FLUSH_ENA
,
433 u32 pcs
= sparx5_to_pcs_dev(sparx5
, port
->portno
);
434 void __iomem
*pcsinst
= spx5_inst_get(sparx5
, pcs
, tinst
);
436 /* 12: Disable 5G/10G/25 BaseR PCS */
437 spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(0),
438 PCS10G_BR_PCS_CFG_PCS_ENA
,
440 PCS10G_BR_PCS_CFG(0));
442 if (ops
->is_port_25g(port
->portno
))
443 /* Disable 25G PCS */
444 spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0),
445 DEV25G_PCS25G_CFG_PCS25G_ENA
,
447 DEV25G_PCS25G_CFG(tinst
));
449 /* 12: Disable 1G PCS */
450 spx5_rmw(DEV2G5_PCS1G_CFG_PCS_ENA_SET(0),
451 DEV2G5_PCS1G_CFG_PCS_ENA
,
453 DEV2G5_PCS1G_CFG(port
->portno
));
456 /* The port is now flushed and disabled */
460 static int sparx5_port_fifo_sz(struct sparx5
*sparx5
,
461 u32 portno
, u32 speed
)
463 u32 sys_clk
= sparx5_clk_period(sparx5
->coreclock
);
464 const u32 taxi_dist
[SPX5_PORTS_ALL
] = {
465 6, 8, 10, 6, 8, 10, 6, 8, 10, 6, 8, 10,
467 11, 12, 13, 14, 15, 16, 17, 18,
468 11, 12, 13, 14, 15, 16, 17, 18,
469 11, 12, 13, 14, 15, 16, 17, 18,
470 11, 12, 13, 14, 15, 16, 17, 18,
471 4, 6, 8, 4, 6, 8, 6, 8,
472 2, 2, 2, 2, 2, 2, 2, 4, 2
474 u32 mac_per
= 6400, tmp1
, tmp2
, tmp3
;
479 if (!is_sparx5(sparx5
))
512 tmp1
= 1000 * mac_width
/ fifo_width
;
513 tmp2
= 3000 + ((12000 + 2 * taxi_dist
[portno
] * 1000)
514 * sys_clk
/ mac_per
);
515 tmp3
= tmp1
* tmp2
/ 1000;
516 return (tmp3
+ 2000 + 999) / 1000 + addition
;
519 /* Configure port muxing:
520 * QSGMII: 4x2G5 devices
522 int sparx5_port_mux_set(struct sparx5
*sparx5
, struct sparx5_port
*port
,
523 struct sparx5_port_config
*conf
)
525 u32 portno
= port
->portno
;
528 if (port
->conf
.portmode
== conf
->portmode
)
529 return 0; /* Nothing to do */
531 switch (conf
->portmode
) {
532 case PHY_INTERFACE_MODE_QSGMII
: /* QSGMII: 4x2G5 devices. Mode Q' */
533 inst
= (portno
- portno
% 4) / 4;
537 PORT_CONF_QSGMII_ENA
);
539 if ((portno
/ 4 % 2) == 0) {
540 /* Affects d0-d3,d8-d11..d40-d43 */
541 spx5_rmw(PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(1) |
542 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(1) |
543 PORT_CONF_USGMII_CFG_QUAD_MODE_SET(1),
544 PORT_CONF_USGMII_CFG_BYPASS_SCRAM
|
545 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM
|
546 PORT_CONF_USGMII_CFG_QUAD_MODE
,
548 PORT_CONF_USGMII_CFG((portno
/ 8)));
557 static int sparx5_port_max_tags_set(struct sparx5
*sparx5
,
558 struct sparx5_port
*port
)
560 enum sparx5_port_max_tags max_tags
= port
->max_vlan_tags
;
561 int tag_ct
= max_tags
== SPX5_PORT_MAX_TAGS_ONE
? 1 :
562 max_tags
== SPX5_PORT_MAX_TAGS_TWO
? 2 : 0;
563 bool dtag
= max_tags
== SPX5_PORT_MAX_TAGS_TWO
;
564 enum sparx5_vlan_port_type vlan_type
= port
->vlan_type
;
565 bool dotag
= max_tags
!= SPX5_PORT_MAX_TAGS_NONE
;
566 u32 dev
= sparx5_to_high_dev(sparx5
, port
->portno
);
567 u32 tinst
= sparx5_port_dev_index(sparx5
, port
->portno
);
568 void __iomem
*inst
= spx5_inst_get(sparx5
, dev
, tinst
);
569 const struct sparx5_ops
*ops
= sparx5
->data
->ops
;
572 etype
= (vlan_type
== SPX5_VLAN_PORT_TYPE_S_CUSTOM
?
574 vlan_type
== SPX5_VLAN_PORT_TYPE_C
?
575 SPX5_ETYPE_TAG_C
: SPX5_ETYPE_TAG_S
);
577 spx5_wr(DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(etype
) |
578 DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(dtag
) |
579 DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag
) |
580 DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag
),
582 DEV2G5_MAC_TAGS_CFG(port
->portno
));
584 if (ops
->is_port_2g5(port
->portno
))
587 spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype
) |
588 DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(dotag
),
589 DEV10G_MAC_TAGS_CFG_TAG_ID
|
590 DEV10G_MAC_TAGS_CFG_TAG_ENA
,
592 DEV10G_MAC_TAGS_CFG(0, 0));
594 spx5_inst_rmw(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(tag_ct
),
595 DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS
,
597 DEV10G_MAC_NUM_TAGS_CFG(0));
599 spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(dotag
),
600 DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK
,
602 DEV10G_MAC_MAXLEN_CFG(0));
606 int sparx5_port_fwd_urg(struct sparx5
*sparx5
, u32 speed
)
608 u32 clk_period_ps
= 1600; /* 625Mhz for now */
630 return urg
/ clk_period_ps
- 1;
633 static u16
sparx5_wm_enc(u16 value
)
636 return 2048 + value
/ 16;
641 static int sparx5_port_fc_setup(struct sparx5
*sparx5
,
642 struct sparx5_port
*port
,
643 struct sparx5_port_config
*conf
)
645 bool fc_obey
= conf
->pause
& MLO_PAUSE_RX
? 1 : 0;
646 u32 pause_stop
= 0xFFF - 1; /* FC gen disabled */
648 if (conf
->pause
& MLO_PAUSE_TX
)
649 pause_stop
= sparx5_wm_enc(4 * (ETH_MAXLEN
/
650 SPX5_BUFFER_CELL_SZ
));
652 /* Set HDX flowcontrol */
653 spx5_rmw(DSM_MAC_CFG_HDX_BACKPREASSURE_SET(conf
->duplex
== DUPLEX_HALF
),
654 DSM_MAC_CFG_HDX_BACKPREASSURE
,
656 DSM_MAC_CFG(port
->portno
));
658 /* Obey flowcontrol */
659 spx5_rmw(DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(fc_obey
),
660 DSM_RX_PAUSE_CFG_RX_PAUSE_EN
,
662 DSM_RX_PAUSE_CFG(port
->portno
));
664 /* Disable forward pressure */
665 spx5_rmw(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(fc_obey
),
666 QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS
,
668 QSYS_FWD_PRESSURE(port
->portno
));
670 /* Generate pause frames */
671 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop
),
672 QSYS_PAUSE_CFG_PAUSE_STOP
,
674 QSYS_PAUSE_CFG(port
->portno
));
679 static u16
sparx5_get_aneg_word(struct sparx5_port_config
*conf
)
681 if (conf
->portmode
== PHY_INTERFACE_MODE_1000BASEX
) /* cl-37 aneg */
682 return (conf
->pause_adv
| ADVERTISE_LPACK
| ADVERTISE_1000XFULL
);
684 return 1; /* Enable SGMII Aneg */
687 int sparx5_serdes_set(struct sparx5
*sparx5
,
688 struct sparx5_port
*port
,
689 struct sparx5_port_config
*conf
)
691 int portmode
, err
, speed
= conf
->speed
;
693 if (conf
->portmode
== PHY_INTERFACE_MODE_QSGMII
&&
694 ((port
->portno
% 4) != 0)) {
697 if (sparx5_is_baser(conf
->portmode
)) {
698 if (conf
->portmode
== PHY_INTERFACE_MODE_25GBASER
)
700 else if (conf
->portmode
== PHY_INTERFACE_MODE_10GBASER
)
706 err
= phy_set_media(port
->serdes
, conf
->media
);
710 err
= phy_set_speed(port
->serdes
, speed
);
714 if (conf
->serdes_reset
) {
715 err
= phy_reset(port
->serdes
);
720 /* Configure SerDes with port parameters
721 * For BaseR, the serdes driver supports 10GGBASE-R and speed 5G/10G/25G
723 portmode
= conf
->portmode
;
724 if (sparx5_is_baser(conf
->portmode
))
725 portmode
= PHY_INTERFACE_MODE_10GBASER
;
726 err
= phy_set_mode_ext(port
->serdes
, PHY_MODE_ETHERNET
, portmode
);
729 conf
->serdes_reset
= false;
733 static int sparx5_port_pcs_low_set(struct sparx5
*sparx5
,
734 struct sparx5_port
*port
,
735 struct sparx5_port_config
*conf
)
737 bool sgmii
= false, inband_aneg
= false;
741 if (conf
->portmode
== PHY_INTERFACE_MODE_SGMII
||
742 conf
->portmode
== PHY_INTERFACE_MODE_QSGMII
)
743 inband_aneg
= true; /* Cisco-SGMII in-band-aneg */
744 else if (conf
->portmode
== PHY_INTERFACE_MODE_1000BASEX
&&
746 inband_aneg
= true; /* Clause-37 in-band-aneg */
748 err
= sparx5_serdes_set(sparx5
, port
, conf
);
752 sgmii
= true; /* Phy is connected to the MAC */
755 /* Choose SGMII or 1000BaseX/2500BaseX PCS mode */
756 spx5_rmw(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(sgmii
),
757 DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA
,
759 DEV2G5_PCS1G_MODE_CFG(port
->portno
));
762 spx5_wr(DEV2G5_PCS1G_CFG_PCS_ENA_SET(1),
764 DEV2G5_PCS1G_CFG(port
->portno
));
767 u16 abil
= sparx5_get_aneg_word(conf
);
769 /* Enable in-band aneg */
770 spx5_wr(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(abil
) |
771 DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
772 DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(1) |
773 DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(1),
775 DEV2G5_PCS1G_ANEG_CFG(port
->portno
));
777 spx5_wr(0, sparx5
, DEV2G5_PCS1G_ANEG_CFG(port
->portno
));
780 /* Take PCS out of reset */
781 spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(2) |
782 DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
783 DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(0),
784 DEV2G5_DEV_RST_CTRL_SPEED_SEL
|
785 DEV2G5_DEV_RST_CTRL_PCS_TX_RST
|
786 DEV2G5_DEV_RST_CTRL_PCS_RX_RST
,
788 DEV2G5_DEV_RST_CTRL(port
->portno
));
793 static int sparx5_port_pcs_high_set(struct sparx5
*sparx5
,
794 struct sparx5_port
*port
,
795 struct sparx5_port_config
*conf
)
797 u32 clk_spd
= conf
->portmode
== PHY_INTERFACE_MODE_5GBASER
? 1 : 0;
798 u32 pix
= sparx5_port_dev_index(sparx5
, port
->portno
);
799 u32 dev
= sparx5_to_high_dev(sparx5
, port
->portno
);
800 u32 pcs
= sparx5_to_pcs_dev(sparx5
, port
->portno
);
801 void __iomem
*devinst
;
802 void __iomem
*pcsinst
;
805 devinst
= spx5_inst_get(sparx5
, dev
, pix
);
806 pcsinst
= spx5_inst_get(sparx5
, pcs
, pix
);
808 /* SFI : No in-band-aneg. Speeds 5G/10G/25G */
809 err
= sparx5_serdes_set(sparx5
, port
, conf
);
812 if (conf
->portmode
== PHY_INTERFACE_MODE_25GBASER
) {
813 /* Enable PCS for 25G device, speed 25G */
814 spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(1),
815 DEV25G_PCS25G_CFG_PCS25G_ENA
,
817 DEV25G_PCS25G_CFG(pix
));
819 /* Enable PCS for 5G/10G/25G devices, speed 5G/10G */
820 spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(1),
821 PCS10G_BR_PCS_CFG_PCS_ENA
,
823 PCS10G_BR_PCS_CFG(0));
826 /* Enable 5G/10G/25G MAC module */
827 spx5_inst_wr(DEV10G_MAC_ENA_CFG_RX_ENA_SET(1) |
828 DEV10G_MAC_ENA_CFG_TX_ENA_SET(1),
830 DEV10G_MAC_ENA_CFG(0));
832 /* Take the device out of reset */
833 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(0) |
834 DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
835 DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(0) |
836 DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
837 DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd
),
838 DEV10G_DEV_RST_CTRL_PCS_RX_RST
|
839 DEV10G_DEV_RST_CTRL_PCS_TX_RST
|
840 DEV10G_DEV_RST_CTRL_MAC_RX_RST
|
841 DEV10G_DEV_RST_CTRL_MAC_TX_RST
|
842 DEV10G_DEV_RST_CTRL_SPEED_SEL
,
844 DEV10G_DEV_RST_CTRL(0));
849 /* Switch between 1G/2500 and 5G/10G/25G devices */
850 static void sparx5_dev_switch(struct sparx5
*sparx5
, int port
, bool hsd
)
852 const struct sparx5_ops
*ops
= sparx5
->data
->ops
;
855 bt_indx
= BIT(ops
->get_port_dev_bit(sparx5
, port
));
857 if (ops
->is_port_5g(port
)) {
858 spx5_rmw(hsd
? 0 : bt_indx
,
861 PORT_CONF_DEV5G_MODES
);
862 } else if (ops
->is_port_10g(port
)) {
863 spx5_rmw(hsd
? 0 : bt_indx
,
866 PORT_CONF_DEV10G_MODES
);
867 } else if (ops
->is_port_25g(port
)) {
868 spx5_rmw(hsd
? 0 : bt_indx
,
871 PORT_CONF_DEV25G_MODES
);
875 /* Configure speed/duplex dependent registers */
876 static int sparx5_port_config_low_set(struct sparx5
*sparx5
,
877 struct sparx5_port
*port
,
878 struct sparx5_port_config
*conf
)
880 u32 clk_spd
, gig_mode
, tx_gap
, hdx_gap_1
, hdx_gap_2
;
881 bool fdx
= conf
->duplex
== DUPLEX_FULL
;
882 int spd
= conf
->speed
;
884 clk_spd
= spd
== SPEED_10
? 0 : spd
== SPEED_100
? 1 : 2;
885 gig_mode
= spd
== SPEED_1000
|| spd
== SPEED_2500
;
886 tx_gap
= spd
== SPEED_1000
? 4 : fdx
? 6 : 5;
887 hdx_gap_1
= spd
== SPEED_1000
? 0 : spd
== SPEED_100
? 1 : 2;
888 hdx_gap_2
= spd
== SPEED_1000
? 0 : spd
== SPEED_100
? 4 : 1;
891 spx5_rmw(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(gig_mode
) |
892 DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(fdx
),
893 DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA
|
894 DEV2G5_MAC_MODE_CFG_FDX_ENA
,
896 DEV2G5_MAC_MODE_CFG(port
->portno
));
898 /* Set MAC IFG Gaps */
899 spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(tx_gap
) |
900 DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(hdx_gap_1
) |
901 DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(hdx_gap_2
),
903 DEV2G5_MAC_IFG_CFG(port
->portno
));
905 /* Disabling frame aging when in HDX (due to HDX issue) */
906 spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(fdx
== 0),
907 HSCH_PORT_MODE_AGE_DIS
,
909 HSCH_PORT_MODE(port
->portno
));
911 /* Enable MAC module */
912 spx5_wr(DEV2G5_MAC_ENA_CFG_RX_ENA
|
913 DEV2G5_MAC_ENA_CFG_TX_ENA
,
915 DEV2G5_MAC_ENA_CFG(port
->portno
));
917 /* Select speed and take MAC out of reset */
918 spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd
) |
919 DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
920 DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(0),
921 DEV2G5_DEV_RST_CTRL_SPEED_SEL
|
922 DEV2G5_DEV_RST_CTRL_MAC_TX_RST
|
923 DEV2G5_DEV_RST_CTRL_MAC_RX_RST
,
925 DEV2G5_DEV_RST_CTRL(port
->portno
));
927 /* Enable PHAD_CTRL for better timestamping */
928 if (!is_sparx5(sparx5
)) {
929 for (int i
= 0; i
< 2; ++i
) {
930 /* Divide the port clock by three for the two
931 * phase detection registers.
933 spx5_rmw(DEV2G5_PHAD_CTRL_DIV_CFG_SET(3) |
934 DEV2G5_PHAD_CTRL_PHAD_ENA_SET(1),
935 DEV2G5_PHAD_CTRL_DIV_CFG
|
936 DEV2G5_PHAD_CTRL_PHAD_ENA
,
937 sparx5
, DEV2G5_PHAD_CTRL(port
->portno
, i
));
944 int sparx5_port_pcs_set(struct sparx5
*sparx5
,
945 struct sparx5_port
*port
,
946 struct sparx5_port_config
*conf
)
949 bool high_speed_dev
= sparx5_is_baser(conf
->portmode
);
952 if (sparx5_dev_change(sparx5
, port
, conf
)) {
954 sparx5_dev_switch(sparx5
, port
->portno
, high_speed_dev
);
956 /* Disable the not-in-use device */
957 err
= sparx5_port_disable(sparx5
, port
, !high_speed_dev
);
961 /* Disable the port before re-configuring */
962 err
= sparx5_port_disable(sparx5
, port
, high_speed_dev
);
967 err
= sparx5_port_pcs_high_set(sparx5
, port
, conf
);
969 err
= sparx5_port_pcs_low_set(sparx5
, port
, conf
);
975 /* Enable/disable 1G counters in ASM */
976 spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev
),
977 ASM_PORT_CFG_CSC_STAT_DIS
,
979 ASM_PORT_CFG(port
->portno
));
981 /* Enable/disable 1G counters in DSM */
982 spx5_rmw(DSM_BUF_CFG_CSC_STAT_DIS_SET(high_speed_dev
),
983 DSM_BUF_CFG_CSC_STAT_DIS
,
985 DSM_BUF_CFG(port
->portno
));
993 int sparx5_port_config(struct sparx5
*sparx5
,
994 struct sparx5_port
*port
,
995 struct sparx5_port_config
*conf
)
997 bool high_speed_dev
= sparx5_is_baser(conf
->portmode
);
998 const struct sparx5_ops
*ops
= sparx5
->data
->ops
;
999 int err
, urgency
, stop_wm
;
1001 err
= sparx5_port_verify_speed(sparx5
, port
, conf
);
1005 /* high speed device is already configured */
1006 if (!high_speed_dev
)
1007 sparx5_port_config_low_set(sparx5
, port
, conf
);
1009 /* Configure flow control */
1010 err
= sparx5_port_fc_setup(sparx5
, port
, conf
);
1014 if (!is_sparx5(sparx5
) && ops
->is_port_10g(port
->portno
) &&
1015 conf
->speed
< SPEED_10000
)
1016 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
1017 DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA
,
1019 DSM_DEV_TX_STOP_WM_CFG(port
->portno
));
1021 /* Set the DSM stop watermark */
1022 stop_wm
= sparx5_port_fifo_sz(sparx5
, port
->portno
, conf
->speed
);
1023 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(stop_wm
),
1024 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM
,
1026 DSM_DEV_TX_STOP_WM_CFG(port
->portno
));
1028 /* Enable port in queue system */
1029 urgency
= sparx5_port_fwd_urg(sparx5
, conf
->speed
);
1030 spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) |
1031 QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency
),
1032 QFWD_SWITCH_PORT_MODE_PORT_ENA
|
1033 QFWD_SWITCH_PORT_MODE_FWD_URGENCY
,
1035 QFWD_SWITCH_PORT_MODE(port
->portno
));
1037 /* Save the new values */
1043 /* Initialize port config to default */
1044 int sparx5_port_init(struct sparx5
*sparx5
,
1045 struct sparx5_port
*port
,
1046 struct sparx5_port_config
*conf
)
1048 u32 pause_start
= sparx5_wm_enc(6 * (ETH_MAXLEN
/ SPX5_BUFFER_CELL_SZ
));
1049 u32 atop
= sparx5_wm_enc(20 * (ETH_MAXLEN
/ SPX5_BUFFER_CELL_SZ
));
1050 const struct sparx5_ops
*ops
= sparx5
->data
->ops
;
1051 u32 devhigh
= sparx5_to_high_dev(sparx5
, port
->portno
);
1052 u32 pix
= sparx5_port_dev_index(sparx5
, port
->portno
);
1053 u32 pcs
= sparx5_to_pcs_dev(sparx5
, port
->portno
);
1054 bool sd_pol
= port
->signd_active_high
;
1055 bool sd_sel
= !port
->signd_internal
;
1056 bool sd_ena
= port
->signd_enable
;
1057 u32 pause_stop
= 0xFFF - 1; /* FC generate disabled */
1058 void __iomem
*devinst
;
1059 void __iomem
*pcsinst
;
1062 devinst
= spx5_inst_get(sparx5
, devhigh
, pix
);
1063 pcsinst
= spx5_inst_get(sparx5
, pcs
, pix
);
1065 /* Set the mux port mode */
1066 err
= ops
->set_port_mux(sparx5
, port
, conf
);
1070 /* Configure MAC vlan awareness */
1071 err
= sparx5_port_max_tags_set(sparx5
, port
);
1075 /* Set Max Length */
1076 spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN
),
1077 DEV2G5_MAC_MAXLEN_CFG_MAX_LEN
,
1079 DEV2G5_MAC_MAXLEN_CFG(port
->portno
));
1081 /* 1G/2G5: Signal Detect configuration */
1082 spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol
) |
1083 DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel
) |
1084 DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena
),
1086 DEV2G5_PCS1G_SD_CFG(port
->portno
));
1088 /* Set Pause WM hysteresis */
1089 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start
) |
1090 QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop
) |
1091 QSYS_PAUSE_CFG_PAUSE_ENA_SET(1),
1092 QSYS_PAUSE_CFG_PAUSE_START
|
1093 QSYS_PAUSE_CFG_PAUSE_STOP
|
1094 QSYS_PAUSE_CFG_PAUSE_ENA
,
1096 QSYS_PAUSE_CFG(port
->portno
));
1098 /* Port ATOP. Frames are tail dropped when this WM is hit */
1099 spx5_wr(QSYS_ATOP_ATOP_SET(atop
),
1101 QSYS_ATOP(port
->portno
));
1103 /* Discard pause frame 01-80-C2-00-00-01 */
1104 spx5_wr(PAUSE_DISCARD
, sparx5
, ANA_CL_CAPTURE_BPDU_CFG(port
->portno
));
1106 /* Discard SMAC multicast */
1107 spx5_rmw(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(0),
1108 ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS
,
1109 sparx5
, ANA_CL_FILTER_CTRL(port
->portno
));
1111 if (conf
->portmode
== PHY_INTERFACE_MODE_QSGMII
||
1112 conf
->portmode
== PHY_INTERFACE_MODE_SGMII
) {
1113 err
= sparx5_serdes_set(sparx5
, port
, conf
);
1117 if (!ops
->is_port_2g5(port
->portno
))
1118 /* Enable shadow device */
1119 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
1120 DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA
,
1122 DSM_DEV_TX_STOP_WM_CFG(port
->portno
));
1124 sparx5_dev_switch(sparx5
, port
->portno
, false);
1126 if (conf
->portmode
== PHY_INTERFACE_MODE_QSGMII
) {
1127 // All ports must be PCS enabled in QSGMII mode
1128 spx5_rmw(DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0),
1129 DEV2G5_DEV_RST_CTRL_PCS_TX_RST
,
1131 DEV2G5_DEV_RST_CTRL(port
->portno
));
1133 /* Default IFGs for 1G */
1134 spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(6) |
1135 DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(0) |
1136 DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(0),
1138 DEV2G5_MAC_IFG_CFG(port
->portno
));
1140 if (ops
->is_port_2g5(port
->portno
))
1141 return 0; /* Low speed device only - return */
1143 /* Now setup the high speed device */
1144 if (conf
->portmode
== PHY_INTERFACE_MODE_NA
)
1145 conf
->portmode
= PHY_INTERFACE_MODE_10GBASER
;
1147 if (sparx5_is_baser(conf
->portmode
))
1148 sparx5_dev_switch(sparx5
, port
->portno
, true);
1150 /* Set Max Length */
1151 spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN
),
1152 DEV10G_MAC_MAXLEN_CFG_MAX_LEN
,
1154 DEV10G_MAC_ENA_CFG(0));
1156 /* Handle Signal Detect in 10G PCS */
1157 spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol
) |
1158 PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(sd_sel
) |
1159 PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(sd_ena
),
1161 PCS10G_BR_PCS_SD_CFG(0));
1163 if (ops
->is_port_25g(port
->portno
)) {
1164 /* Handle Signal Detect in 25G PCS */
1165 spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol
) |
1166 DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel
) |
1167 DEV25G_PCS25G_SD_CFG_SD_ENA_SET(sd_ena
),
1169 DEV25G_PCS25G_SD_CFG(pix
));
1172 if (!is_sparx5(sparx5
)) {
1176 if (ops
->is_port_10g(port
->portno
)) {
1177 dev
= sparx5_to_high_dev(sparx5
, port
->portno
);
1178 tinst
= sparx5_port_dev_index(sparx5
, port
->portno
);
1179 inst
= spx5_inst_get(sparx5
, dev
, tinst
);
1181 spx5_inst_wr(5, inst
,
1182 DEV10G_PTP_STAMPER_CFG(port
->portno
));
1183 } else if (ops
->is_port_5g(port
->portno
)) {
1184 dev
= sparx5_to_high_dev(sparx5
, port
->portno
);
1185 tinst
= sparx5_port_dev_index(sparx5
, port
->portno
);
1186 inst
= spx5_inst_get(sparx5
, dev
, tinst
);
1188 spx5_inst_wr(5, inst
,
1189 DEV5G_PTP_STAMPER_CFG(port
->portno
));
1196 void sparx5_port_enable(struct sparx5_port
*port
, bool enable
)
1198 struct sparx5
*sparx5
= port
->sparx5
;
1200 /* Enable port for frame transfer? */
1201 spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(enable
),
1202 QFWD_SWITCH_PORT_MODE_PORT_ENA
,
1204 QFWD_SWITCH_PORT_MODE(port
->portno
));
1207 int sparx5_port_qos_set(struct sparx5_port
*port
,
1208 struct sparx5_port_qos
*qos
)
1210 sparx5_port_qos_dscp_set(port
, &qos
->dscp
);
1211 sparx5_port_qos_pcp_set(port
, &qos
->pcp
);
1212 sparx5_port_qos_pcp_rewr_set(port
, &qos
->pcp_rewr
);
1213 sparx5_port_qos_dscp_rewr_set(port
, &qos
->dscp_rewr
);
1214 sparx5_port_qos_default_set(port
, qos
);
1219 int sparx5_port_qos_pcp_rewr_set(const struct sparx5_port
*port
,
1220 struct sparx5_port_qos_pcp_rewr
*qos
)
1222 int i
, mode
= SPARX5_PORT_REW_TAG_CTRL_CLASSIFIED
;
1223 struct sparx5
*sparx5
= port
->sparx5
;
1226 /* Use mapping table, with classified QoS as index, to map QoS and DP
1227 * to tagged PCP and DEI, if PCP is trusted. Otherwise use classified
1228 * PCP. Classified PCP equals frame PCP.
1231 mode
= SPARX5_PORT_REW_TAG_CTRL_MAPPED
;
1233 spx5_rmw(REW_TAG_CTRL_TAG_PCP_CFG_SET(mode
) |
1234 REW_TAG_CTRL_TAG_DEI_CFG_SET(mode
),
1235 REW_TAG_CTRL_TAG_PCP_CFG
| REW_TAG_CTRL_TAG_DEI_CFG
,
1236 port
->sparx5
, REW_TAG_CTRL(port
->portno
));
1238 for (i
= 0; i
< ARRAY_SIZE(qos
->map
.map
); i
++) {
1239 /* Extract PCP and DEI */
1240 pcp
= qos
->map
.map
[i
];
1241 if (pcp
> SPARX5_PORT_QOS_PCP_COUNT
)
1246 /* Rewrite PCP and DEI, for each classified QoS class and DP
1247 * level. This table is only used if tag ctrl mode is set to
1250 * 0:0nd - prio=0 and dp:0 => pcp=0 and dei=0
1251 * 0:0de - prio=0 and dp:1 => pcp=0 and dei=1
1254 spx5_rmw(REW_PCP_MAP_DE1_PCP_DE1_SET(pcp
),
1255 REW_PCP_MAP_DE1_PCP_DE1
, sparx5
,
1256 REW_PCP_MAP_DE1(port
->portno
, i
));
1258 spx5_rmw(REW_DEI_MAP_DE1_DEI_DE1_SET(dei
),
1259 REW_DEI_MAP_DE1_DEI_DE1
, port
->sparx5
,
1260 REW_DEI_MAP_DE1(port
->portno
, i
));
1262 spx5_rmw(REW_PCP_MAP_DE0_PCP_DE0_SET(pcp
),
1263 REW_PCP_MAP_DE0_PCP_DE0
, sparx5
,
1264 REW_PCP_MAP_DE0(port
->portno
, i
));
1266 spx5_rmw(REW_DEI_MAP_DE0_DEI_DE0_SET(dei
),
1267 REW_DEI_MAP_DE0_DEI_DE0
, port
->sparx5
,
1268 REW_DEI_MAP_DE0(port
->portno
, i
));
1275 int sparx5_port_qos_pcp_set(const struct sparx5_port
*port
,
1276 struct sparx5_port_qos_pcp
*qos
)
1278 struct sparx5
*sparx5
= port
->sparx5
;
1279 u8
*pcp_itr
= qos
->map
.map
;
1283 /* Enable/disable pcp and dp for qos classification. */
1284 spx5_rmw(ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA_SET(qos
->qos_enable
) |
1285 ANA_CL_QOS_CFG_PCP_DEI_DP_ENA_SET(qos
->dp_enable
),
1286 ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA
| ANA_CL_QOS_CFG_PCP_DEI_DP_ENA
,
1287 sparx5
, ANA_CL_QOS_CFG(port
->portno
));
1289 /* Map each pcp and dei value to priority and dp */
1290 for (i
= 0; i
< ARRAY_SIZE(qos
->map
.map
); i
++) {
1291 pcp
= *(pcp_itr
+ i
);
1292 dp
= (i
< SPARX5_PORT_QOS_PCP_COUNT
) ? 0 : 1;
1293 spx5_rmw(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL_SET(pcp
) |
1294 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_SET(dp
),
1295 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL
|
1296 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL
, sparx5
,
1297 ANA_CL_PCP_DEI_MAP_CFG(port
->portno
, i
));
1303 void sparx5_port_qos_dscp_rewr_mode_set(const struct sparx5_port
*port
,
1306 spx5_rmw(ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL_SET(mode
),
1307 ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL
, port
->sparx5
,
1308 ANA_CL_QOS_CFG(port
->portno
));
1311 int sparx5_port_qos_dscp_rewr_set(const struct sparx5_port
*port
,
1312 struct sparx5_port_qos_dscp_rewr
*qos
)
1314 struct sparx5
*sparx5
= port
->sparx5
;
1319 /* On egress, rewrite DSCP value to either classified DSCP or frame
1320 * DSCP. If enabled; classified DSCP, if disabled; frame DSCP.
1325 spx5_rmw(REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(rewr
),
1326 REW_DSCP_MAP_DSCP_UPDATE_ENA
, sparx5
,
1327 REW_DSCP_MAP(port
->portno
));
1329 /* On ingress, map each classified QoS class and DP to classified DSCP
1330 * value. This mapping table is global for all ports.
1332 for (i
= 0; i
< ARRAY_SIZE(qos
->map
.map
); i
++) {
1333 dscp
= qos
->map
.map
[i
];
1334 spx5_rmw(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(dscp
),
1335 ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL
, sparx5
,
1336 ANA_CL_QOS_MAP_CFG(i
));
1342 int sparx5_port_qos_dscp_set(const struct sparx5_port
*port
,
1343 struct sparx5_port_qos_dscp
*qos
)
1345 struct sparx5
*sparx5
= port
->sparx5
;
1346 u8
*dscp
= qos
->map
.map
;
1349 /* Enable/disable dscp and dp for qos classification.
1350 * Disable rewrite of dscp values for now.
1352 spx5_rmw(ANA_CL_QOS_CFG_DSCP_QOS_ENA_SET(qos
->qos_enable
) |
1353 ANA_CL_QOS_CFG_DSCP_DP_ENA_SET(qos
->dp_enable
) |
1354 ANA_CL_QOS_CFG_DSCP_KEEP_ENA_SET(1),
1355 ANA_CL_QOS_CFG_DSCP_QOS_ENA
| ANA_CL_QOS_CFG_DSCP_DP_ENA
|
1356 ANA_CL_QOS_CFG_DSCP_KEEP_ENA
, sparx5
,
1357 ANA_CL_QOS_CFG(port
->portno
));
1359 /* Map each dscp value to priority and dp */
1360 for (i
= 0; i
< ARRAY_SIZE(qos
->map
.map
); i
++) {
1361 spx5_rmw(ANA_CL_DSCP_CFG_DSCP_QOS_VAL_SET(*(dscp
+ i
)) |
1362 ANA_CL_DSCP_CFG_DSCP_DP_VAL_SET(0),
1363 ANA_CL_DSCP_CFG_DSCP_QOS_VAL
|
1364 ANA_CL_DSCP_CFG_DSCP_DP_VAL
, sparx5
,
1365 ANA_CL_DSCP_CFG(i
));
1368 /* Set per-dscp trust */
1369 for (i
= 0; i
< ARRAY_SIZE(qos
->map
.map
); i
++) {
1370 if (qos
->qos_enable
) {
1371 spx5_rmw(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_SET(1),
1372 ANA_CL_DSCP_CFG_DSCP_TRUST_ENA
, sparx5
,
1373 ANA_CL_DSCP_CFG(i
));
1380 int sparx5_port_qos_default_set(const struct sparx5_port
*port
,
1381 const struct sparx5_port_qos
*qos
)
1383 struct sparx5
*sparx5
= port
->sparx5
;
1385 /* Set default prio and dp level */
1386 spx5_rmw(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL_SET(qos
->default_prio
) |
1387 ANA_CL_QOS_CFG_DEFAULT_DP_VAL_SET(0),
1388 ANA_CL_QOS_CFG_DEFAULT_QOS_VAL
|
1389 ANA_CL_QOS_CFG_DEFAULT_DP_VAL
,
1390 sparx5
, ANA_CL_QOS_CFG(port
->portno
));
1392 /* Set default pcp and dei for untagged frames */
1393 spx5_rmw(ANA_CL_VLAN_CTRL_PORT_PCP_SET(0) |
1394 ANA_CL_VLAN_CTRL_PORT_DEI_SET(0),
1395 ANA_CL_VLAN_CTRL_PORT_PCP
|
1396 ANA_CL_VLAN_CTRL_PORT_DEI
,
1397 sparx5
, ANA_CL_VLAN_CTRL(port
->portno
));
1402 int sparx5_get_internal_port(struct sparx5
*sparx5
, int port
)
1404 return sparx5
->data
->consts
->n_ports
+ port
;