1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
4 * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
7 #include <net/pkt_cls.h>
9 #include "sparx5_main.h"
10 #include "sparx5_qos.h"
12 /* Calculate new base_time based on cycle_time.
14 * The hardware requires a base_time that is always in the future.
15 * We define threshold_time as current_time + (2 * cycle_time).
16 * If base_time is below threshold_time this function recalculates it to be in
18 * threshold_time <= base_time < (threshold_time + cycle_time)
20 * A very simple algorithm could be like this:
21 * new_base_time = org_base_time + N * cycle_time
22 * using the lowest N so (new_base_time >= threshold_time
24 void sparx5_new_base_time(struct sparx5
*sparx5
, const u32 cycle_time
,
25 const ktime_t org_base_time
, ktime_t
*new_base_time
)
27 ktime_t current_time
, threshold_time
, new_time
;
33 new_time
= org_base_time
;
35 sparx5_ptp_gettime64(&sparx5
->phc
[SPARX5_PHC_PORT
].info
, &ts
);
36 current_time
= timespec64_to_ktime(ts
);
37 threshold_time
= current_time
+ (2 * cycle_time
);
38 diff_time
= threshold_time
- new_time
;
39 nr_of_cycles
= div_u64(diff_time
, cycle_time
);
40 nr_of_cycles_p2
= 1; /* Use 2^0 as start value */
42 if (new_time
>= threshold_time
) {
43 *new_base_time
= new_time
;
47 /* Calculate the smallest power of 2 (nr_of_cycles_p2)
48 * that is larger than nr_of_cycles.
50 while (nr_of_cycles_p2
< nr_of_cycles
)
51 nr_of_cycles_p2
<<= 1; /* Next (higher) power of 2 */
53 /* Add as big chunks (power of 2 * cycle_time)
54 * as possible for each power of 2
56 while (nr_of_cycles_p2
) {
57 if (new_time
< threshold_time
) {
58 new_time
+= cycle_time
* nr_of_cycles_p2
;
59 while (new_time
< threshold_time
)
60 new_time
+= cycle_time
* nr_of_cycles_p2
;
61 new_time
-= cycle_time
* nr_of_cycles_p2
;
63 nr_of_cycles_p2
>>= 1; /* Next (lower) power of 2 */
65 new_time
+= cycle_time
;
66 *new_base_time
= new_time
;
69 /* Max rates for leak groups */
70 static const u32 spx5_hsch_max_group_rate
[SPX5_HSCH_LEAK_GRP_CNT
] = {
71 1048568, /* 1.049 Gbps */
72 2621420, /* 2.621 Gbps */
73 10485680, /* 10.486 Gbps */
74 26214200 /* 26.214 Gbps */
77 u32
sparx5_get_hsch_max_group_rate(int grp
)
79 return spx5_hsch_max_group_rate
[grp
];
82 static struct sparx5_layer layers
[SPX5_HSCH_LAYER_CNT
];
84 static u32
sparx5_lg_get_leak_time(struct sparx5
*sparx5
, u32 layer
, u32 group
)
88 value
= spx5_rd(sparx5
, HSCH_HSCH_TIMER_CFG(layer
, group
));
89 return HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(value
);
92 static void sparx5_lg_set_leak_time(struct sparx5
*sparx5
, u32 layer
, u32 group
,
95 spx5_wr(HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(leak_time
), sparx5
,
96 HSCH_HSCH_TIMER_CFG(layer
, group
));
99 static u32
sparx5_lg_get_first(struct sparx5
*sparx5
, u32 layer
, u32 group
)
103 value
= spx5_rd(sparx5
, HSCH_HSCH_LEAK_CFG(layer
, group
));
104 return HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(value
);
107 static u32
sparx5_lg_get_next(struct sparx5
*sparx5
, u32 layer
, u32 group
,
113 value
= spx5_rd(sparx5
, HSCH_SE_CONNECT(idx
));
114 return HSCH_SE_CONNECT_SE_LEAK_LINK_GET(value
);
117 static u32
sparx5_lg_get_last(struct sparx5
*sparx5
, u32 layer
, u32 group
)
121 itr
= sparx5_lg_get_first(sparx5
, layer
, group
);
124 next
= sparx5_lg_get_next(sparx5
, layer
, group
, itr
);
132 static bool sparx5_lg_is_last(struct sparx5
*sparx5
, u32 layer
, u32 group
,
135 return idx
== sparx5_lg_get_next(sparx5
, layer
, group
, idx
);
138 static bool sparx5_lg_is_first(struct sparx5
*sparx5
, u32 layer
, u32 group
,
141 return idx
== sparx5_lg_get_first(sparx5
, layer
, group
);
144 static bool sparx5_lg_is_empty(struct sparx5
*sparx5
, u32 layer
, u32 group
)
146 return sparx5_lg_get_leak_time(sparx5
, layer
, group
) == 0;
149 static bool sparx5_lg_is_singular(struct sparx5
*sparx5
, u32 layer
, u32 group
)
151 if (sparx5_lg_is_empty(sparx5
, layer
, group
))
154 return sparx5_lg_get_first(sparx5
, layer
, group
) ==
155 sparx5_lg_get_last(sparx5
, layer
, group
);
158 static void sparx5_lg_enable(struct sparx5
*sparx5
, u32 layer
, u32 group
,
161 sparx5_lg_set_leak_time(sparx5
, layer
, group
, leak_time
);
164 static void sparx5_lg_disable(struct sparx5
*sparx5
, u32 layer
, u32 group
)
166 sparx5_lg_set_leak_time(sparx5
, layer
, group
, 0);
169 static int sparx5_lg_get_group_by_index(struct sparx5
*sparx5
, u32 layer
,
175 for (i
= 0; i
< SPX5_HSCH_LEAK_GRP_CNT
; i
++) {
176 if (sparx5_lg_is_empty(sparx5
, layer
, i
))
179 itr
= sparx5_lg_get_first(sparx5
, layer
, i
);
182 next
= sparx5_lg_get_next(sparx5
, layer
, i
, itr
);
186 return 0; /* Found it */
189 break; /* Was not found */
198 static int sparx5_lg_get_group_by_rate(u32 layer
, u32 rate
, u32
*group
)
200 struct sparx5_layer
*l
= &layers
[layer
];
201 struct sparx5_lg
*lg
;
204 for (i
= 0; i
< SPX5_HSCH_LEAK_GRP_CNT
; i
++) {
205 lg
= &l
->leak_groups
[i
];
206 if (rate
<= lg
->max_rate
) {
215 static int sparx5_lg_get_adjacent(struct sparx5
*sparx5
, u32 layer
, u32 group
,
216 u32 idx
, u32
*prev
, u32
*next
, u32
*first
)
220 *first
= sparx5_lg_get_first(sparx5
, layer
, group
);
226 *next
= sparx5_lg_get_next(sparx5
, layer
, group
, itr
);
229 return 0; /* Found it */
232 return -1; /* Was not found */
241 static int sparx5_lg_conf_set(struct sparx5
*sparx5
, u32 layer
, u32 group
,
242 u32 se_first
, u32 idx
, u32 idx_next
, bool empty
)
244 u32 leak_time
= layers
[layer
].leak_groups
[group
].leak_time
;
247 sparx5_lg_disable(sparx5
, layer
, group
);
253 spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer
),
254 HSCH_HSCH_CFG_CFG_HSCH_LAYER
, sparx5
, HSCH_HSCH_CFG_CFG
);
257 spx5_wr(HSCH_SE_CONNECT_SE_LEAK_LINK_SET(idx_next
), sparx5
,
258 HSCH_SE_CONNECT(idx
));
260 /* Set the first element. */
261 spx5_rmw(HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(se_first
),
262 HSCH_HSCH_LEAK_CFG_LEAK_FIRST
, sparx5
,
263 HSCH_HSCH_LEAK_CFG(layer
, group
));
266 sparx5_lg_enable(sparx5
, layer
, group
, leak_time
);
271 static int sparx5_lg_del(struct sparx5
*sparx5
, u32 layer
, u32 group
, u32 idx
)
273 u32 first
, next
, prev
;
276 /* idx *must* be present in the leak group */
277 WARN_ON(sparx5_lg_get_adjacent(sparx5
, layer
, group
, idx
, &prev
, &next
,
280 if (sparx5_lg_is_singular(sparx5
, layer
, group
)) {
282 } else if (sparx5_lg_is_last(sparx5
, layer
, group
, idx
)) {
283 /* idx is removed, prev is now last */
286 } else if (sparx5_lg_is_first(sparx5
, layer
, group
, idx
)) {
287 /* idx is removed and points to itself, first is next */
291 /* Next is not touched */
295 return sparx5_lg_conf_set(sparx5
, layer
, group
, first
, idx
, next
,
299 static int sparx5_lg_add(struct sparx5
*sparx5
, u32 layer
, u32 new_group
,
302 u32 first
, next
, old_group
;
304 pr_debug("ADD: layer: %d, new_group: %d, idx: %d", layer
, new_group
,
307 /* Is this SE already shaping ? */
308 if (sparx5_lg_get_group_by_index(sparx5
, layer
, idx
, &old_group
) >= 0) {
309 if (old_group
!= new_group
) {
310 /* Delete from old group */
311 sparx5_lg_del(sparx5
, layer
, old_group
, idx
);
313 /* Nothing to do here */
318 /* We always add to head of the list */
321 if (sparx5_lg_is_empty(sparx5
, layer
, new_group
))
324 next
= sparx5_lg_get_first(sparx5
, layer
, new_group
);
326 return sparx5_lg_conf_set(sparx5
, layer
, new_group
, first
, idx
, next
,
330 static int sparx5_shaper_conf_set(struct sparx5_port
*port
,
331 const struct sparx5_shaper
*sh
, u32 layer
,
334 int (*sparx5_lg_action
)(struct sparx5
*, u32
, u32
, u32
);
335 struct sparx5
*sparx5
= port
->sparx5
;
337 if (!sh
->rate
&& !sh
->burst
)
338 sparx5_lg_action
= &sparx5_lg_del
;
340 sparx5_lg_action
= &sparx5_lg_add
;
343 spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer
),
344 HSCH_HSCH_CFG_CFG_HSCH_LAYER
, sparx5
, HSCH_HSCH_CFG_CFG
);
347 spx5_rmw(HSCH_SE_CFG_SE_FRM_MODE_SET(sh
->mode
), HSCH_SE_CFG_SE_FRM_MODE
,
348 sparx5
, HSCH_SE_CFG(idx
));
350 /* Set committed rate and burst */
351 spx5_wr(HSCH_CIR_CFG_CIR_RATE_SET(sh
->rate
) |
352 HSCH_CIR_CFG_CIR_BURST_SET(sh
->burst
),
353 sparx5
, HSCH_CIR_CFG(idx
));
355 /* This has to be done after the shaper configuration has been set */
356 sparx5_lg_action(sparx5
, layer
, group
, idx
);
361 static u32
sparx5_weight_to_hw_cost(u32 weight_min
, u32 weight
)
363 return ((((SPX5_DWRR_COST_MAX
<< 4) * weight_min
/ weight
) + 8) >> 4) -
367 static int sparx5_dwrr_conf_set(struct sparx5_port
*port
,
368 struct sparx5_dwrr
*dwrr
)
370 u32 layer
= is_sparx5(port
->sparx5
) ? 2 : 1;
373 spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer
) |
374 HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port
->portno
),
375 HSCH_HSCH_CFG_CFG_HSCH_LAYER
| HSCH_HSCH_CFG_CFG_CFG_SE_IDX
,
376 port
->sparx5
, HSCH_HSCH_CFG_CFG
);
378 /* Number of *lower* indexes that are arbitrated dwrr */
379 spx5_rmw(HSCH_SE_CFG_SE_DWRR_CNT_SET(dwrr
->count
),
380 HSCH_SE_CFG_SE_DWRR_CNT
, port
->sparx5
,
381 HSCH_SE_CFG(port
->portno
));
383 for (i
= 0; i
< dwrr
->count
; i
++) {
384 spx5_rmw(HSCH_DWRR_ENTRY_DWRR_COST_SET(dwrr
->cost
[i
]),
385 HSCH_DWRR_ENTRY_DWRR_COST
, port
->sparx5
,
392 static int sparx5_leak_groups_init(struct sparx5
*sparx5
)
394 const struct sparx5_ops
*ops
= sparx5
->data
->ops
;
395 struct sparx5_layer
*layer
;
396 u32 sys_clk_per_100ps
;
397 struct sparx5_lg
*lg
;
401 sys_clk_per_100ps
= spx5_rd(sparx5
, HSCH_SYS_CLK_PER
);
403 for (i
= 0; i
< SPX5_HSCH_LAYER_CNT
; i
++) {
405 for (ii
= 0; ii
< SPX5_HSCH_LEAK_GRP_CNT
; ii
++) {
406 lg
= &layer
->leak_groups
[ii
];
407 lg
->max_rate
= ops
->get_hsch_max_group_rate(i
);
409 /* Calculate the leak time in us, to serve a maximum
410 * rate of 'max_rate' for this group
412 leak_time_us
= (SPX5_SE_RATE_MAX
* 1000) / lg
->max_rate
;
414 /* Hardware wants leak time in ns */
415 lg
->leak_time
= 1000 * leak_time_us
;
417 /* Calculate resolution */
418 lg
->resolution
= 1000 / leak_time_us
;
420 /* Maximum number of shapers that can be served by
423 lg
->max_ses
= (1000 * leak_time_us
) / sys_clk_per_100ps
;
426 * Wanted bandwidth is 100Mbit:
428 * 100 mbps can be served by leak group zero.
430 * leak_time is 125000 ns.
433 * cir = 100000 / 8 = 12500
434 * leaks_pr_sec = 125000 / 10^9 = 8000
435 * bw = 12500 * 8000 = 10^8 (100 Mbit)
438 /* Disable by default - this also indicates an empty
441 sparx5_lg_disable(sparx5
, i
, ii
);
448 int sparx5_qos_init(struct sparx5
*sparx5
)
452 ret
= sparx5_leak_groups_init(sparx5
);
456 ret
= sparx5_dcb_init(sparx5
);
460 sparx5_psfp_init(sparx5
);
465 int sparx5_tc_mqprio_add(struct net_device
*ndev
, u8 num_tc
)
469 if (num_tc
!= SPX5_PRIOS
) {
470 netdev_err(ndev
, "Only %d traffic classes supported\n",
475 netdev_set_num_tc(ndev
, num_tc
);
477 for (i
= 0; i
< num_tc
; i
++)
478 netdev_set_tc_queue(ndev
, i
, 1, i
);
480 netdev_dbg(ndev
, "dev->num_tc %u dev->real_num_tx_queues %u\n",
481 ndev
->num_tc
, ndev
->real_num_tx_queues
);
486 int sparx5_tc_mqprio_del(struct net_device
*ndev
)
488 netdev_reset_tc(ndev
);
490 netdev_dbg(ndev
, "dev->num_tc %u dev->real_num_tx_queues %u\n",
491 ndev
->num_tc
, ndev
->real_num_tx_queues
);
496 int sparx5_tc_tbf_add(struct sparx5_port
*port
,
497 struct tc_tbf_qopt_offload_replace_params
*params
,
500 struct sparx5_shaper sh
= {
501 .mode
= SPX5_SE_MODE_DATARATE
,
502 .rate
= div_u64(params
->rate
.rate_bytes_ps
, 1000) * 8,
503 .burst
= params
->max_size
,
505 struct sparx5_lg
*lg
;
508 /* Find suitable group for this se */
509 if (sparx5_lg_get_group_by_rate(layer
, sh
.rate
, &group
) < 0) {
510 pr_debug("Could not find leak group for se with rate: %d",
515 lg
= &layers
[layer
].leak_groups
[group
];
517 pr_debug("Found matching group (speed: %d)\n", lg
->max_rate
);
519 if (sh
.rate
< SPX5_SE_RATE_MIN
|| sh
.burst
< SPX5_SE_BURST_MIN
)
522 /* Calculate committed rate and burst */
523 sh
.rate
= DIV_ROUND_UP(sh
.rate
, lg
->resolution
);
524 sh
.burst
= DIV_ROUND_UP(sh
.burst
, SPX5_SE_BURST_UNIT
);
526 if (sh
.rate
> SPX5_SE_RATE_MAX
|| sh
.burst
> SPX5_SE_BURST_MAX
)
529 return sparx5_shaper_conf_set(port
, &sh
, layer
, idx
, group
);
532 int sparx5_tc_tbf_del(struct sparx5_port
*port
, u32 layer
, u32 idx
)
534 struct sparx5_shaper sh
= {0};
537 sparx5_lg_get_group_by_index(port
->sparx5
, layer
, idx
, &group
);
539 return sparx5_shaper_conf_set(port
, &sh
, layer
, idx
, group
);
542 int sparx5_tc_ets_add(struct sparx5_port
*port
,
543 struct tc_ets_qopt_offload_replace_params
*params
)
545 struct sparx5_dwrr dwrr
= {0};
546 /* Minimum weight for each iteration */
547 unsigned int w_min
= 100;
550 /* Find minimum weight for all dwrr bands */
551 for (i
= 0; i
< SPX5_PRIOS
; i
++) {
552 if (params
->quanta
[i
] == 0)
554 w_min
= min(w_min
, params
->weights
[i
]);
557 for (i
= 0; i
< SPX5_PRIOS
; i
++) {
558 /* Strict band; skip */
559 if (params
->quanta
[i
] == 0)
564 /* On the sparx5, bands with higher indexes are preferred and
565 * arbitrated strict. Strict bands are put in the lower indexes,
566 * by tc, so we reverse the bands here.
568 * Also convert the weight to something the hardware
571 dwrr
.cost
[SPX5_PRIOS
- i
- 1] =
572 sparx5_weight_to_hw_cost(w_min
, params
->weights
[i
]);
575 return sparx5_dwrr_conf_set(port
, &dwrr
);
578 int sparx5_tc_ets_del(struct sparx5_port
*port
)
580 struct sparx5_dwrr dwrr
= {0};
582 return sparx5_dwrr_conf_set(port
, &dwrr
);