drm: bridge: adv7511: remove s32 format from i2s capabilities
[drm/drm-misc.git] / drivers / net / ethernet / microchip / sparx5 / sparx5_calendar.c
blob5fe941c66c175d9ada4d621c03caaba682237eff
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5 */
7 #include <linux/module.h>
8 #include <linux/device.h>
10 #include "sparx5_main_regs.h"
11 #include "sparx5_main.h"
13 /* QSYS calendar information */
14 #define SPX5_PORTS_PER_CALREG 10 /* Ports mapped in a calendar register */
15 #define SPX5_CALBITS_PER_PORT 3 /* Bit per port in calendar register */
17 /* DSM calendar information */
18 #define SPX5_DSM_CAL_TAXIS 8
19 #define SPX5_DSM_CAL_BW_LOSS 553
21 #define SPX5_TAXI_PORT_MAX 70
23 #define SPEED_12500 12500
25 /* Maps from taxis to port numbers */
26 static u32 sparx5_taxi_ports[SPX5_DSM_CAL_TAXIS][SPX5_DSM_CAL_MAX_DEVS_PER_TAXI] = {
27 {57, 12, 0, 1, 2, 16, 17, 18, 19, 20, 21, 22, 23},
28 {58, 13, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31},
29 {59, 14, 6, 7, 8, 32, 33, 34, 35, 36, 37, 38, 39},
30 {60, 15, 9, 10, 11, 40, 41, 42, 43, 44, 45, 46, 47},
31 {61, 48, 49, 50, 99, 99, 99, 99, 99, 99, 99, 99, 99},
32 {62, 51, 52, 53, 99, 99, 99, 99, 99, 99, 99, 99, 99},
33 {56, 63, 54, 55, 99, 99, 99, 99, 99, 99, 99, 99, 99},
34 {64, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99},
37 static u32 sparx5_target_bandwidth(struct sparx5 *sparx5)
39 switch (sparx5->target_ct) {
40 case SPX5_TARGET_CT_7546:
41 case SPX5_TARGET_CT_7546TSN:
42 return 65000;
43 case SPX5_TARGET_CT_7549:
44 case SPX5_TARGET_CT_7549TSN:
45 return 91000;
46 case SPX5_TARGET_CT_7552:
47 case SPX5_TARGET_CT_7552TSN:
48 return 129000;
49 case SPX5_TARGET_CT_7556:
50 case SPX5_TARGET_CT_7556TSN:
51 return 161000;
52 case SPX5_TARGET_CT_7558:
53 case SPX5_TARGET_CT_7558TSN:
54 return 201000;
55 case SPX5_TARGET_CT_LAN9691VAO:
56 return 46000;
57 case SPX5_TARGET_CT_LAN9694RED:
58 case SPX5_TARGET_CT_LAN9694TSN:
59 case SPX5_TARGET_CT_LAN9694:
60 return 68000;
61 case SPX5_TARGET_CT_LAN9696RED:
62 case SPX5_TARGET_CT_LAN9696TSN:
63 case SPX5_TARGET_CT_LAN9692VAO:
64 case SPX5_TARGET_CT_LAN9696:
65 return 88000;
66 case SPX5_TARGET_CT_LAN9698RED:
67 case SPX5_TARGET_CT_LAN9698TSN:
68 case SPX5_TARGET_CT_LAN9693VAO:
69 case SPX5_TARGET_CT_LAN9698:
70 return 101000;
71 default:
72 return 0;
76 static u32 sparx5_clk_to_bandwidth(enum sparx5_core_clockfreq cclock)
78 switch (cclock) {
79 case SPX5_CORE_CLOCK_250MHZ: return 83000; /* 250000 / 3 */
80 case SPX5_CORE_CLOCK_328MHZ: return 109375; /* 328000 / 3 */
81 case SPX5_CORE_CLOCK_500MHZ: return 166000; /* 500000 / 3 */
82 case SPX5_CORE_CLOCK_625MHZ: return 208000; /* 625000 / 3 */
83 default: return 0;
85 return 0;
88 u32 sparx5_cal_speed_to_value(enum sparx5_cal_bw speed)
90 switch (speed) {
91 case SPX5_CAL_SPEED_1G: return 1000;
92 case SPX5_CAL_SPEED_2G5: return 2500;
93 case SPX5_CAL_SPEED_5G: return 5000;
94 case SPX5_CAL_SPEED_10G: return 10000;
95 case SPX5_CAL_SPEED_25G: return 25000;
96 case SPX5_CAL_SPEED_0G5: return 500;
97 case SPX5_CAL_SPEED_12G5: return 12500;
98 default: return 0;
101 EXPORT_SYMBOL_GPL(sparx5_cal_speed_to_value);
103 static u32 sparx5_bandwidth_to_calendar(u32 bw)
105 switch (bw) {
106 case SPEED_10: return SPX5_CAL_SPEED_0G5;
107 case SPEED_100: return SPX5_CAL_SPEED_0G5;
108 case SPEED_1000: return SPX5_CAL_SPEED_1G;
109 case SPEED_2500: return SPX5_CAL_SPEED_2G5;
110 case SPEED_5000: return SPX5_CAL_SPEED_5G;
111 case SPEED_10000: return SPX5_CAL_SPEED_10G;
112 case SPEED_12500: return SPX5_CAL_SPEED_12G5;
113 case SPEED_25000: return SPX5_CAL_SPEED_25G;
114 case SPEED_UNKNOWN: return SPX5_CAL_SPEED_1G;
115 default: return SPX5_CAL_SPEED_NONE;
119 enum sparx5_cal_bw sparx5_get_port_cal_speed(struct sparx5 *sparx5, u32 portno)
121 struct sparx5_port *port;
123 if (portno >= sparx5->data->consts->n_ports) {
124 /* Internal ports */
125 if (portno ==
126 sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_0) ||
127 portno ==
128 sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_1)) {
129 /* Equals 1.25G */
130 return SPX5_CAL_SPEED_2G5;
131 } else if (portno ==
132 sparx5_get_internal_port(sparx5, SPX5_PORT_VD0)) {
133 /* IPMC only idle BW */
134 return SPX5_CAL_SPEED_NONE;
135 } else if (portno ==
136 sparx5_get_internal_port(sparx5, SPX5_PORT_VD1)) {
137 /* OAM only idle BW */
138 return SPX5_CAL_SPEED_NONE;
139 } else if (portno ==
140 sparx5_get_internal_port(sparx5, SPX5_PORT_VD2)) {
141 /* IPinIP gets only idle BW */
142 return SPX5_CAL_SPEED_NONE;
144 /* not in port map */
145 return SPX5_CAL_SPEED_NONE;
147 /* Front ports - may be used */
148 port = sparx5->ports[portno];
149 if (!port)
150 return SPX5_CAL_SPEED_NONE;
151 return sparx5_bandwidth_to_calendar(port->conf.bandwidth);
153 EXPORT_SYMBOL_GPL(sparx5_get_port_cal_speed);
155 /* Auto configure the QSYS calendar based on port configuration */
156 int sparx5_config_auto_calendar(struct sparx5 *sparx5)
158 const struct sparx5_consts *consts = sparx5->data->consts;
159 u32 cal[7], value, idx, portno;
160 u32 max_core_bw;
161 u32 total_bw = 0, used_port_bw = 0;
162 int err = 0;
163 enum sparx5_cal_bw spd;
165 memset(cal, 0, sizeof(cal));
167 max_core_bw = sparx5_clk_to_bandwidth(sparx5->coreclock);
168 if (max_core_bw == 0) {
169 dev_err(sparx5->dev, "Core clock not supported");
170 return -EINVAL;
173 /* Setup the calendar with the bandwidth to each port */
174 for (portno = 0; portno < consts->n_ports_all; portno++) {
175 u64 reg, offset, this_bw;
177 spd = sparx5_get_port_cal_speed(sparx5, portno);
178 if (spd == SPX5_CAL_SPEED_NONE)
179 continue;
181 this_bw = sparx5_cal_speed_to_value(spd);
182 if (portno < consts->n_ports)
183 used_port_bw += this_bw;
184 else
185 /* Internal ports are granted half the value */
186 this_bw = this_bw / 2;
187 total_bw += this_bw;
188 reg = portno;
189 offset = do_div(reg, SPX5_PORTS_PER_CALREG);
190 cal[reg] |= spd << (offset * SPX5_CALBITS_PER_PORT);
193 if (used_port_bw > sparx5_target_bandwidth(sparx5)) {
194 dev_err(sparx5->dev,
195 "Port BW %u above target BW %u\n",
196 used_port_bw, sparx5_target_bandwidth(sparx5));
197 return -EINVAL;
200 if (total_bw > max_core_bw) {
201 dev_err(sparx5->dev,
202 "Total BW %u above switch core BW %u\n",
203 total_bw, max_core_bw);
204 return -EINVAL;
207 /* Halt the calendar while changing it */
208 if (is_sparx5(sparx5))
209 spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(10),
210 QSYS_CAL_CTRL_CAL_MODE,
211 sparx5, QSYS_CAL_CTRL);
213 /* Assign port bandwidth to auto calendar */
214 for (idx = 0; idx < consts->n_auto_cals; idx++)
215 spx5_wr(cal[idx], sparx5, QSYS_CAL_AUTO(idx));
217 /* Increase grant rate of all ports to account for
218 * core clock ppm deviations
220 spx5_rmw(QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE_SET(671), /* 672->671 */
221 QSYS_CAL_CTRL_CAL_AUTO_GRANT_RATE,
222 sparx5,
223 QSYS_CAL_CTRL);
225 /* Grant idle usage to VD 0-2 */
226 for (idx = 2; idx < 5; idx++)
227 spx5_wr(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(12),
228 sparx5,
229 HSCH_OUTB_SHARE_ENA(idx));
231 /* Enable Auto mode */
232 spx5_rmw(QSYS_CAL_CTRL_CAL_MODE_SET(8),
233 QSYS_CAL_CTRL_CAL_MODE,
234 sparx5, QSYS_CAL_CTRL);
236 /* Verify successful calendar config */
237 value = spx5_rd(sparx5, QSYS_CAL_CTRL);
238 if (QSYS_CAL_CTRL_CAL_AUTO_ERROR_GET(value)) {
239 dev_err(sparx5->dev, "QSYS calendar error\n");
240 err = -EINVAL;
242 return err;
245 static u32 sparx5_dsm_exb_gcd(u32 a, u32 b)
247 if (b == 0)
248 return a;
249 return sparx5_dsm_exb_gcd(b, a % b);
252 static u32 sparx5_dsm_cal_len(u32 *cal)
254 u32 idx = 0, len = 0;
256 while (idx < SPX5_DSM_CAL_LEN) {
257 if (cal[idx] != SPX5_DSM_CAL_EMPTY)
258 len++;
259 idx++;
261 return len;
264 static u32 sparx5_dsm_cp_cal(u32 *sched)
266 u32 idx = 0, tmp;
268 while (idx < SPX5_DSM_CAL_LEN) {
269 if (sched[idx] != SPX5_DSM_CAL_EMPTY) {
270 tmp = sched[idx];
271 sched[idx] = SPX5_DSM_CAL_EMPTY;
272 return tmp;
274 idx++;
276 return SPX5_DSM_CAL_EMPTY;
279 int sparx5_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
280 struct sparx5_calendar_data *data)
282 bool slow_mode;
283 u32 gcd, idx, sum, min, factor;
284 u32 num_of_slots, slot_spd, empty_slots;
285 u32 taxi_bw, clk_period_ps;
287 clk_period_ps = sparx5_clk_period(sparx5->coreclock);
288 taxi_bw = 128 * 1000000 / clk_period_ps;
289 slow_mode = !!(clk_period_ps > 2000);
290 memcpy(data->taxi_ports, &sparx5_taxi_ports[taxi],
291 sizeof(data->taxi_ports));
293 for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) {
294 data->new_slots[idx] = SPX5_DSM_CAL_EMPTY;
295 data->schedule[idx] = SPX5_DSM_CAL_EMPTY;
296 data->temp_sched[idx] = SPX5_DSM_CAL_EMPTY;
298 /* Default empty calendar */
299 data->schedule[0] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI;
301 /* Map ports to taxi positions */
302 for (idx = 0; idx < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; idx++) {
303 u32 portno = data->taxi_ports[idx];
305 if (portno < sparx5->data->consts->n_ports_all) {
306 data->taxi_speeds[idx] = sparx5_cal_speed_to_value
307 (sparx5_get_port_cal_speed(sparx5, portno));
308 } else {
309 data->taxi_speeds[idx] = 0;
313 sum = 0;
314 min = 25000;
315 for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) {
316 u32 jdx;
318 sum += data->taxi_speeds[idx];
319 if (data->taxi_speeds[idx] && data->taxi_speeds[idx] < min)
320 min = data->taxi_speeds[idx];
321 gcd = min;
322 for (jdx = 0; jdx < ARRAY_SIZE(data->taxi_speeds); jdx++)
323 gcd = sparx5_dsm_exb_gcd(gcd, data->taxi_speeds[jdx]);
325 if (sum == 0) /* Empty calendar */
326 return 0;
327 /* Make room for overhead traffic */
328 factor = 100 * 100 * 1000 / (100 * 100 - SPX5_DSM_CAL_BW_LOSS);
330 if (sum * factor > (taxi_bw * 1000)) {
331 dev_err(sparx5->dev,
332 "Taxi %u, Requested BW %u above available BW %u\n",
333 taxi, sum, taxi_bw);
334 return -EINVAL;
336 for (idx = 0; idx < 4; idx++) {
337 u32 raw_spd;
339 if (idx == 0)
340 raw_spd = gcd / 5;
341 else if (idx == 1)
342 raw_spd = gcd / 2;
343 else if (idx == 2)
344 raw_spd = gcd;
345 else
346 raw_spd = min;
347 slot_spd = raw_spd * factor / 1000;
348 num_of_slots = taxi_bw / slot_spd;
349 if (num_of_slots <= 64)
350 break;
353 num_of_slots = num_of_slots > 64 ? 64 : num_of_slots;
354 slot_spd = taxi_bw / num_of_slots;
356 sum = 0;
357 for (idx = 0; idx < ARRAY_SIZE(data->taxi_speeds); idx++) {
358 u32 spd = data->taxi_speeds[idx];
359 u32 adjusted_speed = data->taxi_speeds[idx] * factor / 1000;
361 if (adjusted_speed > 0) {
362 data->avg_dist[idx] = (128 * 1000000 * 10) /
363 (adjusted_speed * clk_period_ps);
364 } else {
365 data->avg_dist[idx] = -1;
367 data->dev_slots[idx] = ((spd * factor / slot_spd) + 999) / 1000;
368 if (spd != 25000 && (spd != 10000 || !slow_mode)) {
369 if (num_of_slots < (5 * data->dev_slots[idx])) {
370 dev_err(sparx5->dev,
371 "Taxi %u, speed %u, Low slot sep.\n",
372 taxi, spd);
373 return -EINVAL;
376 sum += data->dev_slots[idx];
377 if (sum > num_of_slots) {
378 dev_err(sparx5->dev,
379 "Taxi %u with overhead factor %u\n",
380 taxi, factor);
381 return -EINVAL;
385 empty_slots = num_of_slots - sum;
387 for (idx = 0; idx < empty_slots; idx++)
388 data->schedule[idx] = SPX5_DSM_CAL_MAX_DEVS_PER_TAXI;
390 for (idx = 1; idx < num_of_slots; idx++) {
391 u32 indices_len = 0;
392 u32 slot, jdx, kdx, ts;
393 s32 cnt;
394 u32 num_of_old_slots, num_of_new_slots, tgt_score;
396 for (slot = 0; slot < ARRAY_SIZE(data->dev_slots); slot++) {
397 if (data->dev_slots[slot] == idx) {
398 data->indices[indices_len] = slot;
399 indices_len++;
402 if (indices_len == 0)
403 continue;
404 kdx = 0;
405 for (slot = 0; slot < idx; slot++) {
406 for (jdx = 0; jdx < indices_len; jdx++, kdx++)
407 data->new_slots[kdx] = data->indices[jdx];
410 for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
411 if (data->schedule[slot] == SPX5_DSM_CAL_EMPTY)
412 break;
415 num_of_old_slots = slot;
416 num_of_new_slots = kdx;
417 cnt = 0;
418 ts = 0;
420 if (num_of_new_slots > num_of_old_slots) {
421 memcpy(data->short_list, data->schedule,
422 sizeof(data->short_list));
423 memcpy(data->long_list, data->new_slots,
424 sizeof(data->long_list));
425 tgt_score = 100000 * num_of_old_slots /
426 num_of_new_slots;
427 } else {
428 memcpy(data->short_list, data->new_slots,
429 sizeof(data->short_list));
430 memcpy(data->long_list, data->schedule,
431 sizeof(data->long_list));
432 tgt_score = 100000 * num_of_new_slots /
433 num_of_old_slots;
436 while (sparx5_dsm_cal_len(data->short_list) > 0 ||
437 sparx5_dsm_cal_len(data->long_list) > 0) {
438 u32 act = 0;
440 if (sparx5_dsm_cal_len(data->short_list) > 0) {
441 data->temp_sched[ts] =
442 sparx5_dsm_cp_cal(data->short_list);
443 ts++;
444 cnt += 100000;
445 act = 1;
447 while (sparx5_dsm_cal_len(data->long_list) > 0 &&
448 cnt > 0) {
449 data->temp_sched[ts] =
450 sparx5_dsm_cp_cal(data->long_list);
451 ts++;
452 cnt -= tgt_score;
453 act = 1;
455 if (act == 0) {
456 dev_err(sparx5->dev,
457 "Error in DSM calendar calculation\n");
458 return -EINVAL;
462 for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
463 if (data->temp_sched[slot] == SPX5_DSM_CAL_EMPTY)
464 break;
466 for (slot = 0; slot < SPX5_DSM_CAL_LEN; slot++) {
467 data->schedule[slot] = data->temp_sched[slot];
468 data->temp_sched[slot] = SPX5_DSM_CAL_EMPTY;
469 data->new_slots[slot] = SPX5_DSM_CAL_EMPTY;
472 return 0;
475 static int sparx5_dsm_calendar_check(struct sparx5 *sparx5,
476 struct sparx5_calendar_data *data)
478 u32 num_of_slots, idx, port;
479 int cnt, max_dist;
480 u32 slot_indices[SPX5_DSM_CAL_LEN], distances[SPX5_DSM_CAL_LEN];
481 u32 cal_length = sparx5_dsm_cal_len(data->schedule);
483 for (port = 0; port < SPX5_DSM_CAL_MAX_DEVS_PER_TAXI; port++) {
484 num_of_slots = 0;
485 max_dist = data->avg_dist[port];
486 for (idx = 0; idx < SPX5_DSM_CAL_LEN; idx++) {
487 slot_indices[idx] = SPX5_DSM_CAL_EMPTY;
488 distances[idx] = SPX5_DSM_CAL_EMPTY;
491 for (idx = 0; idx < cal_length; idx++) {
492 if (data->schedule[idx] == port) {
493 slot_indices[num_of_slots] = idx;
494 num_of_slots++;
498 slot_indices[num_of_slots] = slot_indices[0] + cal_length;
500 for (idx = 0; idx < num_of_slots; idx++) {
501 distances[idx] = (slot_indices[idx + 1] -
502 slot_indices[idx]) * 10;
505 for (idx = 0; idx < num_of_slots; idx++) {
506 u32 jdx, kdx;
508 cnt = distances[idx] - max_dist;
509 if (cnt < 0)
510 cnt = -cnt;
511 kdx = 0;
512 for (jdx = (idx + 1) % num_of_slots;
513 jdx != idx;
514 jdx = (jdx + 1) % num_of_slots, kdx++) {
515 cnt = cnt + distances[jdx] - max_dist;
516 if (cnt < 0)
517 cnt = -cnt;
518 if (cnt > max_dist)
519 goto check_err;
523 return 0;
524 check_err:
525 dev_err(sparx5->dev,
526 "Port %u: distance %u above limit %d\n",
527 port, cnt, max_dist);
528 return -EINVAL;
531 static int sparx5_dsm_calendar_update(struct sparx5 *sparx5, u32 taxi,
532 struct sparx5_calendar_data *data)
534 u32 cal_len = sparx5_dsm_cal_len(data->schedule), len, idx;
536 if (!is_sparx5(sparx5)) {
537 u32 val, act;
539 val = spx5_rd(sparx5, DSM_TAXI_CAL_CFG(taxi));
540 act = DSM_TAXI_CAL_CFG_CAL_SEL_STAT_GET(val);
542 spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_SEL_SET(!act),
543 DSM_TAXI_CAL_CFG_CAL_PGM_SEL,
544 sparx5, DSM_TAXI_CAL_CFG(taxi));
547 spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(1),
548 DSM_TAXI_CAL_CFG_CAL_PGM_ENA,
549 sparx5,
550 DSM_TAXI_CAL_CFG(taxi));
551 for (idx = 0; idx < cal_len; idx++) {
552 spx5_rmw(DSM_TAXI_CAL_CFG_CAL_IDX_SET(idx),
553 DSM_TAXI_CAL_CFG_CAL_IDX,
554 sparx5,
555 DSM_TAXI_CAL_CFG(taxi));
556 spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_VAL_SET(data->schedule[idx]),
557 DSM_TAXI_CAL_CFG_CAL_PGM_VAL,
558 sparx5,
559 DSM_TAXI_CAL_CFG(taxi));
561 spx5_rmw(DSM_TAXI_CAL_CFG_CAL_PGM_ENA_SET(0),
562 DSM_TAXI_CAL_CFG_CAL_PGM_ENA,
563 sparx5,
564 DSM_TAXI_CAL_CFG(taxi));
565 len = DSM_TAXI_CAL_CFG_CAL_CUR_LEN_GET(spx5_rd(sparx5,
566 DSM_TAXI_CAL_CFG(taxi)));
567 if (len != cal_len - 1)
568 goto update_err;
570 if (!is_sparx5(sparx5)) {
571 spx5_rmw(DSM_TAXI_CAL_CFG_CAL_SWITCH_SET(1),
572 DSM_TAXI_CAL_CFG_CAL_SWITCH,
573 sparx5, DSM_TAXI_CAL_CFG(taxi));
576 return 0;
577 update_err:
578 dev_err(sparx5->dev, "Incorrect calendar length: %u\n", len);
579 return -EINVAL;
582 /* Configure the DSM calendar based on port configuration */
583 int sparx5_config_dsm_calendar(struct sparx5 *sparx5)
585 const struct sparx5_ops *ops = sparx5->data->ops;
586 int taxi;
587 struct sparx5_calendar_data *data;
588 int err = 0;
590 data = kzalloc(sizeof(*data), GFP_KERNEL);
591 if (!data)
592 return -ENOMEM;
594 for (taxi = 0; taxi < sparx5->data->consts->n_dsm_cal_taxis; ++taxi) {
595 err = ops->dsm_calendar_calc(sparx5, taxi, data);
596 if (err) {
597 dev_err(sparx5->dev, "DSM calendar calculation failed\n");
598 goto cal_out;
600 err = sparx5_dsm_calendar_check(sparx5, data);
601 if (err) {
602 dev_err(sparx5->dev, "DSM calendar check failed\n");
603 goto cal_out;
605 err = sparx5_dsm_calendar_update(sparx5, taxi, data);
606 if (err) {
607 dev_err(sparx5->dev, "DSM calendar update failed\n");
608 goto cal_out;
611 cal_out:
612 kfree(data);
613 return err;