1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
7 #include <linux/module.h>
8 #include <linux/device.h>
10 #include "sparx5_main_regs.h"
11 #include "sparx5_main.h"
13 /* QSYS calendar information */
14 #define SPX5_PORTS_PER_CALREG 10 /* Ports mapped in a calendar register */
15 #define SPX5_CALBITS_PER_PORT 3 /* Bit per port in calendar register */
17 /* DSM calendar information */
18 #define SPX5_DSM_CAL_TAXIS 8
19 #define SPX5_DSM_CAL_BW_LOSS 553
21 #define SPX5_TAXI_PORT_MAX 70
23 #define SPEED_12500 12500
25 /* Maps from taxis to port numbers */
26 static u32 sparx5_taxi_ports
[SPX5_DSM_CAL_TAXIS
][SPX5_DSM_CAL_MAX_DEVS_PER_TAXI
] = {
27 {57, 12, 0, 1, 2, 16, 17, 18, 19, 20, 21, 22, 23},
28 {58, 13, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31},
29 {59, 14, 6, 7, 8, 32, 33, 34, 35, 36, 37, 38, 39},
30 {60, 15, 9, 10, 11, 40, 41, 42, 43, 44, 45, 46, 47},
31 {61, 48, 49, 50, 99, 99, 99, 99, 99, 99, 99, 99, 99},
32 {62, 51, 52, 53, 99, 99, 99, 99, 99, 99, 99, 99, 99},
33 {56, 63, 54, 55, 99, 99, 99, 99, 99, 99, 99, 99, 99},
34 {64, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99},
37 static u32
sparx5_target_bandwidth(struct sparx5
*sparx5
)
39 switch (sparx5
->target_ct
) {
40 case SPX5_TARGET_CT_7546
:
41 case SPX5_TARGET_CT_7546TSN
:
43 case SPX5_TARGET_CT_7549
:
44 case SPX5_TARGET_CT_7549TSN
:
46 case SPX5_TARGET_CT_7552
:
47 case SPX5_TARGET_CT_7552TSN
:
49 case SPX5_TARGET_CT_7556
:
50 case SPX5_TARGET_CT_7556TSN
:
52 case SPX5_TARGET_CT_7558
:
53 case SPX5_TARGET_CT_7558TSN
:
55 case SPX5_TARGET_CT_LAN9691VAO
:
57 case SPX5_TARGET_CT_LAN9694RED
:
58 case SPX5_TARGET_CT_LAN9694TSN
:
59 case SPX5_TARGET_CT_LAN9694
:
61 case SPX5_TARGET_CT_LAN9696RED
:
62 case SPX5_TARGET_CT_LAN9696TSN
:
63 case SPX5_TARGET_CT_LAN9692VAO
:
64 case SPX5_TARGET_CT_LAN9696
:
66 case SPX5_TARGET_CT_LAN9698RED
:
67 case SPX5_TARGET_CT_LAN9698TSN
:
68 case SPX5_TARGET_CT_LAN9693VAO
:
69 case SPX5_TARGET_CT_LAN9698
:
76 static u32
sparx5_clk_to_bandwidth(enum sparx5_core_clockfreq cclock
)
79 case SPX5_CORE_CLOCK_250MHZ
: return 83000; /* 250000 / 3 */
80 case SPX5_CORE_CLOCK_328MHZ
: return 109375; /* 328000 / 3 */
81 case SPX5_CORE_CLOCK_500MHZ
: return 166000; /* 500000 / 3 */
82 case SPX5_CORE_CLOCK_625MHZ
: return 208000; /* 625000 / 3 */
88 u32
sparx5_cal_speed_to_value(enum sparx5_cal_bw speed
)
91 case SPX5_CAL_SPEED_1G
: return 1000;
92 case SPX5_CAL_SPEED_2G5
: return 2500;
93 case SPX5_CAL_SPEED_5G
: return 5000;
94 case SPX5_CAL_SPEED_10G
: return 10000;
95 case SPX5_CAL_SPEED_25G
: return 25000;
96 case SPX5_CAL_SPEED_0G5
: return 500;
97 case SPX5_CAL_SPEED_12G5
: return 12500;
101 EXPORT_SYMBOL_GPL(sparx5_cal_speed_to_value
);
103 static u32
sparx5_bandwidth_to_calendar(u32 bw
)
106 case SPEED_10
: return SPX5_CAL_SPEED_0G5
;
107 case SPEED_100
: return SPX5_CAL_SPEED_0G5
;
108 case SPEED_1000
: return SPX5_CAL_SPEED_1G
;
109 case SPEED_2500
: return SPX5_CAL_SPEED_2G5
;
110 case SPEED_5000
: return SPX5_CAL_SPEED_5G
;
111 case SPEED_10000
: return SPX5_CAL_SPEED_10G
;
112 case SPEED_12500
: return SPX5_CAL_SPEED_12G5
;
113 case SPEED_25000
: return SPX5_CAL_SPEED_25G
;
114 case SPEED_UNKNOWN
: return SPX5_CAL_SPEED_1G
;
115 default: return SPX5_CAL_SPEED_NONE
;
119 enum sparx5_cal_bw
sparx5_get_port_cal_speed(struct sparx5
*sparx5
, u32 portno
)
121 struct sparx5_port
*port
;
123 if (portno
>= sparx5
->data
->consts
->n_ports
) {
126 sparx5_get_internal_port(sparx5
, SPX5_PORT_CPU_0
) ||
128 sparx5_get_internal_port(sparx5
, SPX5_PORT_CPU_1
)) {
130 return SPX5_CAL_SPEED_2G5
;
132 sparx5_get_internal_port(sparx5
, SPX5_PORT_VD0
)) {
133 /* IPMC only idle BW */
134 return SPX5_CAL_SPEED_NONE
;
136 sparx5_get_internal_port(sparx5
, SPX5_PORT_VD1
)) {
137 /* OAM only idle BW */
138 return SPX5_CAL_SPEED_NONE
;
140 sparx5_get_internal_port(sparx5
, SPX5_PORT_VD2
)) {
141 /* IPinIP gets only idle BW */
142 return SPX5_CAL_SPEED_NONE
;
144 /* not in port map */
145 return SPX5_CAL_SPEED_NONE
;
147 /* Front ports - may be used */
148 port
= sparx5
->ports
[portno
];
150 return SPX5_CAL_SPEED_NONE
;
151 return sparx5_bandwidth_to_calendar(port
->conf
.bandwidth
);
153 EXPORT_SYMBOL_GPL(sparx5_get_port_cal_speed
);
155 /* Auto configure the QSYS calendar based on port configuration */
156 int sparx5_config_auto_calendar(struct sparx5
*sparx5
)
158 const struct sparx5_consts
*consts
= sparx5
->data
->consts
;
159 u32 cal
[7], value
, idx
, portno
;
161 u32 total_bw
= 0, used_port_bw
= 0;
163 enum sparx5_cal_bw spd
;
165 memset(cal
, 0, sizeof(cal
));
167 max_core_bw
= sparx5_clk_to_bandwidth(sparx5
->coreclock
);
168 if (max_core_bw
== 0) {
169 dev_err(sparx5
->dev
, "Core clock not supported");
173 /* Setup the calendar with the bandwidth to each port */
174 for (portno
= 0; portno
< consts
->n_ports_all
; portno
++) {
175 u64 reg
, offset
, this_bw
;
177 spd
= sparx5_get_port_cal_speed(sparx5
, portno
);
178 if (spd
== SPX5_CAL_SPEED_NONE
)
181 this_bw
= sparx5_cal_speed_to_value(spd
);
182 if (portno
< consts
->n_ports
)
183 used_port_bw
+= this_bw
;
185 /* Internal ports are granted half the value */
186 this_bw
= this_bw
/ 2;
189 offset
= do_div(reg
, SPX5_PORTS_PER_CALREG
);
190 cal
[reg
] |= spd
<< (offset
* SPX5_CALBITS_PER_PORT
);
193 if (used_port_bw
> sparx5_target_bandwidth(sparx5
)) {
195 "Port BW %u above target BW %u\n",
196 used_port_bw
, sparx5_target_bandwidth(sparx5
));
200 if (total_bw
> max_core_bw
) {
202 "Total BW %u above switch core BW %u\n",
203 total_bw
, max_core_bw
);
207 /* Halt the calendar while changing it */
208 if (is_sparx5(sparx5
))
209 spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(10),
210 QSYS_CAL_CTRL_CAL_MODE
,
211 sparx5
, QSYS_CAL_CTRL
);
213 /* Assign port bandwidth to auto calendar */
214 for (idx
= 0; idx
< consts
->n_auto_cals
; idx
++)
215 spx5_wr(cal
[idx
], sparx5
, QSYS_CAL_AUTO(idx
));
217 /* Increase grant rate of all ports to account for
218 * core clock ppm deviations
220 spx5_rmw(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_SET(671), /* 672->671 */
221 QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE
,
225 /* Grant idle usage to VD 0-2 */
226 for (idx
= 2; idx
< 5; idx
++)
227 spx5_wr(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(12),
229 HSCH_OUTB_SHARE_ENA(idx
));
231 /* Enable Auto mode */
232 spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(8),
233 QSYS_CAL_CTRL_CAL_MODE
,
234 sparx5
, QSYS_CAL_CTRL
);
236 /* Verify successful calendar config */
237 value
= spx5_rd(sparx5
, QSYS_CAL_CTRL
);
238 if (QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(value
)) {
239 dev_err(sparx5
->dev
, "QSYS calendar error\n");
245 static u32
sparx5_dsm_exb_gcd(u32 a
, u32 b
)
249 return sparx5_dsm_exb_gcd(b
, a
% b
);
252 static u32
sparx5_dsm_cal_len(u32
*cal
)
254 u32 idx
= 0, len
= 0;
256 while (idx
< SPX5_DSM_CAL_LEN
) {
257 if (cal
[idx
] != SPX5_DSM_CAL_EMPTY
)
264 static u32
sparx5_dsm_cp_cal(u32
*sched
)
268 while (idx
< SPX5_DSM_CAL_LEN
) {
269 if (sched
[idx
] != SPX5_DSM_CAL_EMPTY
) {
271 sched
[idx
] = SPX5_DSM_CAL_EMPTY
;
276 return SPX5_DSM_CAL_EMPTY
;
279 int sparx5_dsm_calendar_calc(struct sparx5
*sparx5
, u32 taxi
,
280 struct sparx5_calendar_data
*data
)
283 u32 gcd
, idx
, sum
, min
, factor
;
284 u32 num_of_slots
, slot_spd
, empty_slots
;
285 u32 taxi_bw
, clk_period_ps
;
287 clk_period_ps
= sparx5_clk_period(sparx5
->coreclock
);
288 taxi_bw
= 128 * 1000000 / clk_period_ps
;
289 slow_mode
= !!(clk_period_ps
> 2000);
290 memcpy(data
->taxi_ports
, &sparx5_taxi_ports
[taxi
],
291 sizeof(data
->taxi_ports
));
293 for (idx
= 0; idx
< SPX5_DSM_CAL_LEN
; idx
++) {
294 data
->new_slots
[idx
] = SPX5_DSM_CAL_EMPTY
;
295 data
->schedule
[idx
] = SPX5_DSM_CAL_EMPTY
;
296 data
->temp_sched
[idx
] = SPX5_DSM_CAL_EMPTY
;
298 /* Default empty calendar */
299 data
->schedule
[0] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI
;
301 /* Map ports to taxi positions */
302 for (idx
= 0; idx
< SPX5_DSM_CAL_MAX_DEVS_PER_TAXI
; idx
++) {
303 u32 portno
= data
->taxi_ports
[idx
];
305 if (portno
< sparx5
->data
->consts
->n_ports_all
) {
306 data
->taxi_speeds
[idx
] = sparx5_cal_speed_to_value
307 (sparx5_get_port_cal_speed(sparx5
, portno
));
309 data
->taxi_speeds
[idx
] = 0;
315 for (idx
= 0; idx
< ARRAY_SIZE(data
->taxi_speeds
); idx
++) {
318 sum
+= data
->taxi_speeds
[idx
];
319 if (data
->taxi_speeds
[idx
] && data
->taxi_speeds
[idx
] < min
)
320 min
= data
->taxi_speeds
[idx
];
322 for (jdx
= 0; jdx
< ARRAY_SIZE(data
->taxi_speeds
); jdx
++)
323 gcd
= sparx5_dsm_exb_gcd(gcd
, data
->taxi_speeds
[jdx
]);
325 if (sum
== 0) /* Empty calendar */
327 /* Make room for overhead traffic */
328 factor
= 100 * 100 * 1000 / (100 * 100 - SPX5_DSM_CAL_BW_LOSS
);
330 if (sum
* factor
> (taxi_bw
* 1000)) {
332 "Taxi %u, Requested BW %u above available BW %u\n",
336 for (idx
= 0; idx
< 4; idx
++) {
347 slot_spd
= raw_spd
* factor
/ 1000;
348 num_of_slots
= taxi_bw
/ slot_spd
;
349 if (num_of_slots
<= 64)
353 num_of_slots
= num_of_slots
> 64 ? 64 : num_of_slots
;
354 slot_spd
= taxi_bw
/ num_of_slots
;
357 for (idx
= 0; idx
< ARRAY_SIZE(data
->taxi_speeds
); idx
++) {
358 u32 spd
= data
->taxi_speeds
[idx
];
359 u32 adjusted_speed
= data
->taxi_speeds
[idx
] * factor
/ 1000;
361 if (adjusted_speed
> 0) {
362 data
->avg_dist
[idx
] = (128 * 1000000 * 10) /
363 (adjusted_speed
* clk_period_ps
);
365 data
->avg_dist
[idx
] = -1;
367 data
->dev_slots
[idx
] = ((spd
* factor
/ slot_spd
) + 999) / 1000;
368 if (spd
!= 25000 && (spd
!= 10000 || !slow_mode
)) {
369 if (num_of_slots
< (5 * data
->dev_slots
[idx
])) {
371 "Taxi %u, speed %u, Low slot sep.\n",
376 sum
+= data
->dev_slots
[idx
];
377 if (sum
> num_of_slots
) {
379 "Taxi %u with overhead factor %u\n",
385 empty_slots
= num_of_slots
- sum
;
387 for (idx
= 0; idx
< empty_slots
; idx
++)
388 data
->schedule
[idx
] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI
;
390 for (idx
= 1; idx
< num_of_slots
; idx
++) {
392 u32 slot
, jdx
, kdx
, ts
;
394 u32 num_of_old_slots
, num_of_new_slots
, tgt_score
;
396 for (slot
= 0; slot
< ARRAY_SIZE(data
->dev_slots
); slot
++) {
397 if (data
->dev_slots
[slot
] == idx
) {
398 data
->indices
[indices_len
] = slot
;
402 if (indices_len
== 0)
405 for (slot
= 0; slot
< idx
; slot
++) {
406 for (jdx
= 0; jdx
< indices_len
; jdx
++, kdx
++)
407 data
->new_slots
[kdx
] = data
->indices
[jdx
];
410 for (slot
= 0; slot
< SPX5_DSM_CAL_LEN
; slot
++) {
411 if (data
->schedule
[slot
] == SPX5_DSM_CAL_EMPTY
)
415 num_of_old_slots
= slot
;
416 num_of_new_slots
= kdx
;
420 if (num_of_new_slots
> num_of_old_slots
) {
421 memcpy(data
->short_list
, data
->schedule
,
422 sizeof(data
->short_list
));
423 memcpy(data
->long_list
, data
->new_slots
,
424 sizeof(data
->long_list
));
425 tgt_score
= 100000 * num_of_old_slots
/
428 memcpy(data
->short_list
, data
->new_slots
,
429 sizeof(data
->short_list
));
430 memcpy(data
->long_list
, data
->schedule
,
431 sizeof(data
->long_list
));
432 tgt_score
= 100000 * num_of_new_slots
/
436 while (sparx5_dsm_cal_len(data
->short_list
) > 0 ||
437 sparx5_dsm_cal_len(data
->long_list
) > 0) {
440 if (sparx5_dsm_cal_len(data
->short_list
) > 0) {
441 data
->temp_sched
[ts
] =
442 sparx5_dsm_cp_cal(data
->short_list
);
447 while (sparx5_dsm_cal_len(data
->long_list
) > 0 &&
449 data
->temp_sched
[ts
] =
450 sparx5_dsm_cp_cal(data
->long_list
);
457 "Error in DSM calendar calculation\n");
462 for (slot
= 0; slot
< SPX5_DSM_CAL_LEN
; slot
++) {
463 if (data
->temp_sched
[slot
] == SPX5_DSM_CAL_EMPTY
)
466 for (slot
= 0; slot
< SPX5_DSM_CAL_LEN
; slot
++) {
467 data
->schedule
[slot
] = data
->temp_sched
[slot
];
468 data
->temp_sched
[slot
] = SPX5_DSM_CAL_EMPTY
;
469 data
->new_slots
[slot
] = SPX5_DSM_CAL_EMPTY
;
475 static int sparx5_dsm_calendar_check(struct sparx5
*sparx5
,
476 struct sparx5_calendar_data
*data
)
478 u32 num_of_slots
, idx
, port
;
480 u32 slot_indices
[SPX5_DSM_CAL_LEN
], distances
[SPX5_DSM_CAL_LEN
];
481 u32 cal_length
= sparx5_dsm_cal_len(data
->schedule
);
483 for (port
= 0; port
< SPX5_DSM_CAL_MAX_DEVS_PER_TAXI
; port
++) {
485 max_dist
= data
->avg_dist
[port
];
486 for (idx
= 0; idx
< SPX5_DSM_CAL_LEN
; idx
++) {
487 slot_indices
[idx
] = SPX5_DSM_CAL_EMPTY
;
488 distances
[idx
] = SPX5_DSM_CAL_EMPTY
;
491 for (idx
= 0; idx
< cal_length
; idx
++) {
492 if (data
->schedule
[idx
] == port
) {
493 slot_indices
[num_of_slots
] = idx
;
498 slot_indices
[num_of_slots
] = slot_indices
[0] + cal_length
;
500 for (idx
= 0; idx
< num_of_slots
; idx
++) {
501 distances
[idx
] = (slot_indices
[idx
+ 1] -
502 slot_indices
[idx
]) * 10;
505 for (idx
= 0; idx
< num_of_slots
; idx
++) {
508 cnt
= distances
[idx
] - max_dist
;
512 for (jdx
= (idx
+ 1) % num_of_slots
;
514 jdx
= (jdx
+ 1) % num_of_slots
, kdx
++) {
515 cnt
= cnt
+ distances
[jdx
] - max_dist
;
526 "Port %u: distance %u above limit %d\n",
527 port
, cnt
, max_dist
);
531 static int sparx5_dsm_calendar_update(struct sparx5
*sparx5
, u32 taxi
,
532 struct sparx5_calendar_data
*data
)
534 u32 cal_len
= sparx5_dsm_cal_len(data
->schedule
), len
, idx
;
536 if (!is_sparx5(sparx5
)) {
539 val
= spx5_rd(sparx5
, DSM_TAXI_CAL_CFG(taxi
));
540 act
= DSM_TAXI_CAL_CFG_CAL_SEL_STAT_GET(val
);
542 spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_SEL_SET(!act
),
543 DSM_TAXI_CAL_CFG_CAL_PGM_SEL
,
544 sparx5
, DSM_TAXI_CAL_CFG(taxi
));
547 spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(1),
548 DSM_TAXI_CAL_CFG_CAL_PGM_ENA
,
550 DSM_TAXI_CAL_CFG(taxi
));
551 for (idx
= 0; idx
< cal_len
; idx
++) {
552 spx5_rmw(DSM_TAXI_CAL_CFG_CAL_IDX_SET(idx
),
553 DSM_TAXI_CAL_CFG_CAL_IDX
,
555 DSM_TAXI_CAL_CFG(taxi
));
556 spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_VAL_SET(data
->schedule
[idx
]),
557 DSM_TAXI_CAL_CFG_CAL_PGM_VAL
,
559 DSM_TAXI_CAL_CFG(taxi
));
561 spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(0),
562 DSM_TAXI_CAL_CFG_CAL_PGM_ENA
,
564 DSM_TAXI_CAL_CFG(taxi
));
565 len
= DSM_TAXI_CAL_CFG_CAL_CUR_LEN_GET(spx5_rd(sparx5
,
566 DSM_TAXI_CAL_CFG(taxi
)));
567 if (len
!= cal_len
- 1)
570 if (!is_sparx5(sparx5
)) {
571 spx5_rmw(DSM_TAXI_CAL_CFG_CAL_SWITCH_SET(1),
572 DSM_TAXI_CAL_CFG_CAL_SWITCH
,
573 sparx5
, DSM_TAXI_CAL_CFG(taxi
));
578 dev_err(sparx5
->dev
, "Incorrect calendar length: %u\n", len
);
582 /* Configure the DSM calendar based on port configuration */
583 int sparx5_config_dsm_calendar(struct sparx5
*sparx5
)
585 const struct sparx5_ops
*ops
= sparx5
->data
->ops
;
587 struct sparx5_calendar_data
*data
;
590 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
594 for (taxi
= 0; taxi
< sparx5
->data
->consts
->n_dsm_cal_taxis
; ++taxi
) {
595 err
= ops
->dsm_calendar_calc(sparx5
, taxi
, data
);
597 dev_err(sparx5
->dev
, "DSM calendar calculation failed\n");
600 err
= sparx5_dsm_calendar_check(sparx5
, data
);
602 dev_err(sparx5
->dev
, "DSM calendar check failed\n");
605 err
= sparx5_dsm_calendar_update(sparx5
, taxi
, data
);
607 dev_err(sparx5
->dev
, "DSM calendar update failed\n");