1 // SPDX-License-Identifier: GPL-2.0
2 /* Texas Instruments K3 AM65 Ethernet QoS submodule
3 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
5 * quality of service module includes:
6 * Enhanced Scheduler Traffic (EST - P802.1Qbv/D2.2)
7 * Interspersed Express Traffic (IET - P802.3br/D2.0)
10 #include <linux/pm_runtime.h>
11 #include <linux/math.h>
12 #include <linux/math64.h>
13 #include <linux/time.h>
14 #include <linux/units.h>
15 #include <net/pkt_cls.h>
17 #include "am65-cpsw-nuss.h"
18 #include "am65-cpsw-qos.h"
19 #include "am65-cpts.h"
22 #define TO_MBPS(x) DIV_ROUND_UP((x), BYTES_PER_MBIT)
25 TACT_PROG
, /* need program timer */
26 TACT_NEED_STOP
, /* need stop first */
27 TACT_SKIP_PROG
, /* just buffer can be updated */
30 static void am65_cpsw_iet_change_preemptible_tcs(struct am65_cpsw_port
*port
, u8 preemptible_tcs
);
33 am65_cpsw_qos_tx_rate_calc(u32 rate_mbps
, unsigned long bus_freq
)
38 ir
= DIV_ROUND_UP(((u64
)rate_mbps
* 32768), bus_freq
);
42 static void am65_cpsw_tx_pn_shaper_reset(struct am65_cpsw_port
*port
)
46 for (prio
= 0; prio
< AM65_CPSW_PN_FIFO_PRIO_NUM
; prio
++) {
47 writel(0, port
->port_base
+ AM65_CPSW_PN_REG_PRI_CIR(prio
));
48 writel(0, port
->port_base
+ AM65_CPSW_PN_REG_PRI_EIR(prio
));
52 static void am65_cpsw_tx_pn_shaper_apply(struct am65_cpsw_port
*port
)
54 struct am65_cpsw_mqprio
*p_mqprio
= &port
->qos
.mqprio
;
55 struct am65_cpsw_common
*common
= port
->common
;
56 struct tc_mqprio_qopt_offload
*mqprio
;
57 bool enable
, shaper_susp
= false;
61 mqprio
= &p_mqprio
->mqprio_hw
;
62 /* takes care of no link case as well */
63 if (p_mqprio
->max_rate_total
> port
->qos
.link_speed
)
66 am65_cpsw_tx_pn_shaper_reset(port
);
68 enable
= p_mqprio
->shaper_en
&& !shaper_susp
;
72 /* Rate limit is specified per Traffic Class but
73 * for CPSW, rate limit can be applied per priority
76 * We have assigned the same priority (TCn) to all queues
77 * of a Traffic Class so they share the same shaper
80 for (tc
= 0; tc
< mqprio
->qopt
.num_tc
; tc
++) {
83 rate_mbps
= TO_MBPS(mqprio
->min_rate
[tc
]);
84 rate_mbps
= am65_cpsw_qos_tx_rate_calc(rate_mbps
,
87 port
->port_base
+ AM65_CPSW_PN_REG_PRI_CIR(prio
));
91 if (mqprio
->max_rate
[tc
]) {
92 rate_mbps
= mqprio
->max_rate
[tc
] - mqprio
->min_rate
[tc
];
93 rate_mbps
= TO_MBPS(rate_mbps
);
94 rate_mbps
= am65_cpsw_qos_tx_rate_calc(rate_mbps
,
99 port
->port_base
+ AM65_CPSW_PN_REG_PRI_EIR(prio
));
103 static int am65_cpsw_mqprio_verify_shaper(struct am65_cpsw_port
*port
,
104 struct tc_mqprio_qopt_offload
*mqprio
)
106 struct am65_cpsw_mqprio
*p_mqprio
= &port
->qos
.mqprio
;
107 struct netlink_ext_ack
*extack
= mqprio
->extack
;
108 u64 min_rate_total
= 0, max_rate_total
= 0;
109 u32 min_rate_msk
= 0, max_rate_msk
= 0;
110 bool has_min_rate
, has_max_rate
;
113 if (!(mqprio
->flags
& TC_MQPRIO_F_SHAPER
))
116 if (mqprio
->shaper
!= TC_MQPRIO_SHAPER_BW_RATE
)
119 has_min_rate
= !!(mqprio
->flags
& TC_MQPRIO_F_MIN_RATE
);
120 has_max_rate
= !!(mqprio
->flags
& TC_MQPRIO_F_MAX_RATE
);
122 if (!has_min_rate
&& has_max_rate
) {
123 NL_SET_ERR_MSG_MOD(extack
, "min_rate is required with max_rate");
130 num_tc
= mqprio
->qopt
.num_tc
;
132 for (i
= num_tc
- 1; i
>= 0; i
--) {
135 if (mqprio
->min_rate
[i
])
136 min_rate_msk
|= BIT(i
);
137 min_rate_total
+= mqprio
->min_rate
[i
];
140 if (mqprio
->max_rate
[i
])
141 max_rate_msk
|= BIT(i
);
142 max_rate_total
+= mqprio
->max_rate
[i
];
144 if (!mqprio
->min_rate
[i
] && mqprio
->max_rate
[i
]) {
145 NL_SET_ERR_MSG_FMT_MOD(extack
,
146 "TX tc%d rate max>0 but min=0",
151 if (mqprio
->max_rate
[i
] &&
152 mqprio
->max_rate
[i
] < mqprio
->min_rate
[i
]) {
153 NL_SET_ERR_MSG_FMT_MOD(extack
,
154 "TX tc%d rate min(%llu)>max(%llu)",
155 i
, mqprio
->min_rate
[i
],
156 mqprio
->max_rate
[i
]);
161 ch_msk
= GENMASK(num_tc
- 1, i
);
162 if ((min_rate_msk
& BIT(i
)) && (min_rate_msk
^ ch_msk
)) {
163 NL_SET_ERR_MSG_FMT_MOD(extack
,
164 "Min rate must be set sequentially hi->lo tx_rate_msk%x",
169 if ((max_rate_msk
& BIT(i
)) && (max_rate_msk
^ ch_msk
)) {
170 NL_SET_ERR_MSG_FMT_MOD(extack
,
171 "Max rate must be set sequentially hi->lo tx_rate_msk%x",
177 min_rate_total
= TO_MBPS(min_rate_total
);
178 max_rate_total
= TO_MBPS(max_rate_total
);
180 p_mqprio
->shaper_en
= true;
181 p_mqprio
->max_rate_total
= max_t(u64
, min_rate_total
, max_rate_total
);
186 static void am65_cpsw_reset_tc_mqprio(struct net_device
*ndev
)
188 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
189 struct am65_cpsw_mqprio
*p_mqprio
= &port
->qos
.mqprio
;
191 p_mqprio
->shaper_en
= false;
192 p_mqprio
->max_rate_total
= 0;
194 am65_cpsw_tx_pn_shaper_reset(port
);
195 netdev_reset_tc(ndev
);
197 /* Reset all Queue priorities to 0 */
198 writel(0, port
->port_base
+ AM65_CPSW_PN_REG_TX_PRI_MAP
);
200 am65_cpsw_iet_change_preemptible_tcs(port
, 0);
203 static int am65_cpsw_setup_mqprio(struct net_device
*ndev
, void *type_data
)
205 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
206 struct am65_cpsw_mqprio
*p_mqprio
= &port
->qos
.mqprio
;
207 struct tc_mqprio_qopt_offload
*mqprio
= type_data
;
208 struct am65_cpsw_common
*common
= port
->common
;
209 struct tc_mqprio_qopt
*qopt
= &mqprio
->qopt
;
210 int i
, tc
, offset
, count
, prio
, ret
;
211 u8 num_tc
= qopt
->num_tc
;
214 memcpy(&p_mqprio
->mqprio_hw
, mqprio
, sizeof(*mqprio
));
216 ret
= pm_runtime_get_sync(common
->dev
);
218 pm_runtime_put_noidle(common
->dev
);
223 am65_cpsw_reset_tc_mqprio(ndev
);
228 ret
= am65_cpsw_mqprio_verify_shaper(port
, mqprio
);
232 netdev_set_num_tc(ndev
, num_tc
);
234 /* Multiple Linux priorities can map to a Traffic Class
235 * A Traffic Class can have multiple contiguous Queues,
236 * Queues get mapped to Channels (thread_id),
237 * if not VLAN tagged, thread_id is used as packet_priority
238 * if VLAN tagged. VLAN priority is used as packet_priority
239 * packet_priority gets mapped to header_priority in p0_rx_pri_map,
240 * header_priority gets mapped to switch_priority in pn_tx_pri_map.
241 * As p0_rx_pri_map is left at defaults (0x76543210), we can
242 * assume that Queue_n gets mapped to header_priority_n. We can then
243 * set the switch priority in pn_tx_pri_map.
246 for (tc
= 0; tc
< num_tc
; tc
++) {
249 /* For simplicity we assign the same priority (TCn) to
250 * all queues of a Traffic Class.
252 for (i
= qopt
->offset
[tc
]; i
< qopt
->offset
[tc
] + qopt
->count
[tc
]; i
++)
253 tx_prio_map
|= prio
<< (4 * i
);
255 count
= qopt
->count
[tc
];
256 offset
= qopt
->offset
[tc
];
257 netdev_set_tc_queue(ndev
, tc
, count
, offset
);
260 writel(tx_prio_map
, port
->port_base
+ AM65_CPSW_PN_REG_TX_PRI_MAP
);
262 am65_cpsw_tx_pn_shaper_apply(port
);
263 am65_cpsw_iet_change_preemptible_tcs(port
, mqprio
->preemptible_tcs
);
266 pm_runtime_put(common
->dev
);
271 static int am65_cpsw_iet_set_verify_timeout_count(struct am65_cpsw_port
*port
)
273 int verify_time_ms
= port
->qos
.iet
.verify_time_ms
;
276 /* The number of wireside clocks contained in the verify
277 * timeout counter. The default is 0x1312d0
278 * (10ms at 125Mhz in 1G mode).
280 val
= 125 * HZ_PER_MHZ
; /* assuming 125MHz wireside clock */
282 val
/= MILLIHZ_PER_HZ
; /* count per ms timeout */
283 val
*= verify_time_ms
; /* count for timeout ms */
285 if (val
> AM65_CPSW_PN_MAC_VERIFY_CNT_MASK
)
288 writel(val
, port
->port_base
+ AM65_CPSW_PN_REG_IET_VERIFY
);
293 static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port
*port
)
300 /* Reset the verify state machine by writing 1
303 ctrl
= readl(port
->port_base
+ AM65_CPSW_PN_REG_IET_CTRL
);
304 ctrl
|= AM65_CPSW_PN_IET_MAC_LINKFAIL
;
305 writel(ctrl
, port
->port_base
+ AM65_CPSW_PN_REG_IET_CTRL
);
307 /* Clear MAC_LINKFAIL bit to start Verify. */
308 ctrl
= readl(port
->port_base
+ AM65_CPSW_PN_REG_IET_CTRL
);
309 ctrl
&= ~AM65_CPSW_PN_IET_MAC_LINKFAIL
;
310 writel(ctrl
, port
->port_base
+ AM65_CPSW_PN_REG_IET_CTRL
);
312 msleep(port
->qos
.iet
.verify_time_ms
);
314 status
= readl(port
->port_base
+ AM65_CPSW_PN_REG_IET_STATUS
);
315 if (status
& AM65_CPSW_PN_MAC_VERIFIED
)
318 if (status
& AM65_CPSW_PN_MAC_VERIFY_FAIL
) {
319 netdev_dbg(port
->ndev
,
320 "MAC Merge verify failed, trying again\n");
324 if (status
& AM65_CPSW_PN_MAC_RESPOND_ERR
) {
325 netdev_dbg(port
->ndev
, "MAC Merge respond error\n");
329 if (status
& AM65_CPSW_PN_MAC_VERIFY_ERR
) {
330 netdev_dbg(port
->ndev
, "MAC Merge verify error\n");
335 netdev_dbg(port
->ndev
, "MAC Merge verify timeout\n");
339 static void am65_cpsw_iet_set_preempt_mask(struct am65_cpsw_port
*port
, u8 preemptible_tcs
)
343 val
= readl(port
->port_base
+ AM65_CPSW_PN_REG_IET_CTRL
);
344 val
&= ~AM65_CPSW_PN_IET_MAC_PREMPT_MASK
;
345 val
|= AM65_CPSW_PN_IET_MAC_SET_PREEMPT(preemptible_tcs
);
346 writel(val
, port
->port_base
+ AM65_CPSW_PN_REG_IET_CTRL
);
349 /* enable common IET_ENABLE only if at least 1 port has rx IET enabled.
350 * UAPI doesn't allow tx enable without rx enable.
352 void am65_cpsw_iet_common_enable(struct am65_cpsw_common
*common
)
354 struct am65_cpsw_port
*port
;
355 bool rx_enable
= false;
359 for (i
= 0; i
< common
->port_num
; i
++) {
360 port
= &common
->ports
[i
];
361 val
= readl(port
->port_base
+ AM65_CPSW_PN_REG_CTL
);
362 rx_enable
= !!(val
& AM65_CPSW_PN_CTL_IET_PORT_EN
);
367 val
= readl(common
->cpsw_base
+ AM65_CPSW_REG_CTL
);
370 val
|= AM65_CPSW_CTL_IET_EN
;
372 val
&= ~AM65_CPSW_CTL_IET_EN
;
374 writel(val
, common
->cpsw_base
+ AM65_CPSW_REG_CTL
);
375 common
->iet_enabled
= rx_enable
;
378 /* CPSW does not have an IRQ to notify changes to the MAC Merge TX status
379 * (active/inactive), but the preemptible traffic classes should only be
380 * committed to hardware once TX is active. Resort to polling.
382 void am65_cpsw_iet_commit_preemptible_tcs(struct am65_cpsw_port
*port
)
388 if (port
->qos
.link_speed
== SPEED_UNKNOWN
)
391 val
= readl(port
->port_base
+ AM65_CPSW_PN_REG_CTL
);
392 if (!(val
& AM65_CPSW_PN_CTL_IET_PORT_EN
))
395 /* update common IET enable */
396 am65_cpsw_iet_common_enable(port
->common
);
398 /* update verify count */
399 err
= am65_cpsw_iet_set_verify_timeout_count(port
);
401 netdev_err(port
->ndev
, "couldn't set verify count: %d\n", err
);
405 val
= readl(port
->port_base
+ AM65_CPSW_PN_REG_IET_CTRL
);
406 if (!(val
& AM65_CPSW_PN_IET_MAC_DISABLEVERIFY
)) {
407 err
= am65_cpsw_iet_verify_wait(port
);
412 preemptible_tcs
= port
->qos
.iet
.preemptible_tcs
;
413 am65_cpsw_iet_set_preempt_mask(port
, preemptible_tcs
);
416 static void am65_cpsw_iet_change_preemptible_tcs(struct am65_cpsw_port
*port
, u8 preemptible_tcs
)
418 struct am65_cpsw_ndev_priv
*priv
= am65_ndev_to_priv(port
->ndev
);
420 port
->qos
.iet
.preemptible_tcs
= preemptible_tcs
;
421 mutex_lock(&priv
->mm_lock
);
422 am65_cpsw_iet_commit_preemptible_tcs(port
);
423 mutex_unlock(&priv
->mm_lock
);
426 static void am65_cpsw_iet_link_state_update(struct net_device
*ndev
)
428 struct am65_cpsw_ndev_priv
*priv
= am65_ndev_to_priv(ndev
);
429 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
431 mutex_lock(&priv
->mm_lock
);
432 am65_cpsw_iet_commit_preemptible_tcs(port
);
433 mutex_unlock(&priv
->mm_lock
);
436 static int am65_cpsw_port_est_enabled(struct am65_cpsw_port
*port
)
438 return port
->qos
.est_oper
|| port
->qos
.est_admin
;
441 static void am65_cpsw_est_enable(struct am65_cpsw_common
*common
, int enable
)
445 val
= readl(common
->cpsw_base
+ AM65_CPSW_REG_CTL
);
448 val
|= AM65_CPSW_CTL_EST_EN
;
450 val
&= ~AM65_CPSW_CTL_EST_EN
;
452 writel(val
, common
->cpsw_base
+ AM65_CPSW_REG_CTL
);
453 common
->est_enabled
= enable
;
456 static void am65_cpsw_port_est_enable(struct am65_cpsw_port
*port
, int enable
)
460 val
= readl(port
->port_base
+ AM65_CPSW_PN_REG_CTL
);
462 val
|= AM65_CPSW_PN_CTL_EST_PORT_EN
;
464 val
&= ~AM65_CPSW_PN_CTL_EST_PORT_EN
;
466 writel(val
, port
->port_base
+ AM65_CPSW_PN_REG_CTL
);
469 /* target new EST RAM buffer, actual toggle happens after cycle completion */
470 static void am65_cpsw_port_est_assign_buf_num(struct net_device
*ndev
,
473 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
476 val
= readl(port
->port_base
+ AM65_CPSW_PN_REG_EST_CTL
);
478 val
|= AM65_CPSW_PN_EST_BUFSEL
;
480 val
&= ~AM65_CPSW_PN_EST_BUFSEL
;
482 writel(val
, port
->port_base
+ AM65_CPSW_PN_REG_EST_CTL
);
485 /* am65_cpsw_port_est_is_swapped() - Indicate if h/w is transitioned
486 * admin -> oper or not
488 * Return true if already transitioned. i.e oper is equal to admin and buf
489 * numbers match (est_oper->buf match with est_admin->buf).
490 * false if before transition. i.e oper is not equal to admin, (i.e a
491 * previous admin command is waiting to be transitioned to oper state
492 * and est_oper->buf not match with est_oper->buf).
494 static int am65_cpsw_port_est_is_swapped(struct net_device
*ndev
, int *oper
,
497 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
500 val
= readl(port
->port_base
+ AM65_CPSW_PN_REG_FIFO_STATUS
);
501 *oper
= !!(val
& AM65_CPSW_PN_FST_EST_BUFACT
);
503 val
= readl(port
->port_base
+ AM65_CPSW_PN_REG_EST_CTL
);
504 *admin
= !!(val
& AM65_CPSW_PN_EST_BUFSEL
);
506 return *admin
== *oper
;
509 /* am65_cpsw_port_est_get_free_buf_num() - Get free buffer number for
510 * Admin to program the new schedule.
513 * If oper is same as admin, return the other buffer (!oper) as the admin
514 * buffer. If oper is not the same, driver let the current oper to continue
515 * as it is in the process of transitioning from admin -> oper. So keep the
516 * oper by selecting the same oper buffer by writing to EST_BUFSEL bit in
517 * EST CTL register. In the second iteration they will match and code returns.
518 * The actual buffer to write command is selected later before it is ready
519 * to update the schedule.
521 static int am65_cpsw_port_est_get_free_buf_num(struct net_device
*ndev
)
527 if (am65_cpsw_port_est_is_swapped(ndev
, &oper
, &admin
))
530 /* admin is not set, so hinder transition as it's not allowed
531 * to touch memory in-flight, by targeting same oper buf.
533 am65_cpsw_port_est_assign_buf_num(ndev
, oper
);
536 "Prev. EST admin cycle is in transit %d -> %d\n",
543 static void am65_cpsw_admin_to_oper(struct net_device
*ndev
)
545 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
547 devm_kfree(&ndev
->dev
, port
->qos
.est_oper
);
549 port
->qos
.est_oper
= port
->qos
.est_admin
;
550 port
->qos
.est_admin
= NULL
;
553 static void am65_cpsw_port_est_get_buf_num(struct net_device
*ndev
,
554 struct am65_cpsw_est
*est_new
)
556 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
559 val
= readl(port
->port_base
+ AM65_CPSW_PN_REG_EST_CTL
);
560 val
&= ~AM65_CPSW_PN_EST_ONEBUF
;
561 writel(val
, port
->port_base
+ AM65_CPSW_PN_REG_EST_CTL
);
563 est_new
->buf
= am65_cpsw_port_est_get_free_buf_num(ndev
);
565 /* rolled buf num means changed buf while configuring */
566 if (port
->qos
.est_oper
&& port
->qos
.est_admin
&&
567 est_new
->buf
== port
->qos
.est_oper
->buf
)
568 am65_cpsw_admin_to_oper(ndev
);
571 static void am65_cpsw_est_set(struct net_device
*ndev
, int enable
)
573 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
574 struct am65_cpsw_common
*common
= port
->common
;
575 int common_enable
= 0;
578 am65_cpsw_port_est_enable(port
, enable
);
580 for (i
= 0; i
< common
->port_num
; i
++)
581 common_enable
|= am65_cpsw_port_est_enabled(&common
->ports
[i
]);
583 common_enable
|= enable
;
584 am65_cpsw_est_enable(common
, common_enable
);
587 /* This update is supposed to be used in any routine before getting real state
588 * of admin -> oper transition, particularly it's supposed to be used in some
589 * generic routine for providing real state to Taprio Qdisc.
591 static void am65_cpsw_est_update_state(struct net_device
*ndev
)
593 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
596 if (!port
->qos
.est_admin
)
599 if (!am65_cpsw_port_est_is_swapped(ndev
, &oper
, &admin
))
602 am65_cpsw_admin_to_oper(ndev
);
605 /* Fetch command count it's number of bytes in Gigabit mode or nibbles in
606 * 10/100Mb mode. So, having speed and time in ns, recalculate ns to number of
607 * bytes/nibbles that can be sent while transmission on given speed.
609 static int am65_est_cmd_ns_to_cnt(u64 ns
, int link_speed
)
613 temp
= ns
* link_speed
;
614 if (link_speed
< SPEED_1000
)
617 return DIV_ROUND_UP(temp
, 8 * 1000);
620 static void __iomem
*am65_cpsw_est_set_sched_cmds(void __iomem
*addr
,
624 u32 prio_mask
, cmd_fetch_cnt
, cmd
;
627 if (fetch_cnt
> AM65_CPSW_FETCH_CNT_MAX
) {
628 fetch_cnt
-= AM65_CPSW_FETCH_CNT_MAX
;
629 cmd_fetch_cnt
= AM65_CPSW_FETCH_CNT_MAX
;
631 cmd_fetch_cnt
= fetch_cnt
;
632 /* fetch count can't be less than 16? */
633 if (cmd_fetch_cnt
&& cmd_fetch_cnt
< 16)
639 prio_mask
= fetch_allow
& AM65_CPSW_FETCH_ALLOW_MSK
;
640 cmd
= (cmd_fetch_cnt
<< AM65_CPSW_FETCH_CNT_OFFSET
) | prio_mask
;
649 static int am65_cpsw_est_calc_cmd_num(struct net_device
*ndev
,
650 struct tc_taprio_qopt_offload
*taprio
,
653 int i
, cmd_cnt
, cmd_sum
= 0;
656 for (i
= 0; i
< taprio
->num_entries
; i
++) {
657 if (taprio
->entries
[i
].command
!= TC_TAPRIO_CMD_SET_GATES
) {
658 dev_err(&ndev
->dev
, "Only SET command is supported");
662 fetch_cnt
= am65_est_cmd_ns_to_cnt(taprio
->entries
[i
].interval
,
665 cmd_cnt
= DIV_ROUND_UP(fetch_cnt
, AM65_CPSW_FETCH_CNT_MAX
);
678 static int am65_cpsw_est_check_scheds(struct net_device
*ndev
,
679 struct am65_cpsw_est
*est_new
)
681 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
684 cmd_num
= am65_cpsw_est_calc_cmd_num(ndev
, &est_new
->taprio
,
685 port
->qos
.link_speed
);
689 if (cmd_num
> AM65_CPSW_FETCH_RAM_CMD_NUM
/ 2) {
690 dev_err(&ndev
->dev
, "No fetch RAM");
697 static void am65_cpsw_est_set_sched_list(struct net_device
*ndev
,
698 struct am65_cpsw_est
*est_new
)
700 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
701 u32 fetch_cnt
, fetch_allow
, all_fetch_allow
= 0;
702 void __iomem
*ram_addr
, *max_ram_addr
;
703 struct tc_taprio_sched_entry
*entry
;
706 ram_addr
= port
->fetch_ram_base
;
707 ram_size
= AM65_CPSW_FETCH_RAM_CMD_NUM
* 2;
708 ram_addr
+= est_new
->buf
* ram_size
;
710 max_ram_addr
= ram_size
+ ram_addr
;
711 for (i
= 0; i
< est_new
->taprio
.num_entries
; i
++) {
712 entry
= &est_new
->taprio
.entries
[i
];
714 fetch_cnt
= am65_est_cmd_ns_to_cnt(entry
->interval
,
715 port
->qos
.link_speed
);
716 fetch_allow
= entry
->gate_mask
;
717 if (fetch_allow
> AM65_CPSW_FETCH_ALLOW_MAX
)
718 dev_dbg(&ndev
->dev
, "fetch_allow > 8 bits: %d\n",
721 ram_addr
= am65_cpsw_est_set_sched_cmds(ram_addr
, fetch_cnt
,
724 if (!fetch_cnt
&& i
< est_new
->taprio
.num_entries
- 1) {
726 "next scheds after %d have no impact", i
+ 1);
730 all_fetch_allow
|= fetch_allow
;
733 /* end cmd, enabling non-timed queues for potential over cycle time */
734 if (ram_addr
< max_ram_addr
)
735 writel(~all_fetch_allow
& AM65_CPSW_FETCH_ALLOW_MSK
, ram_addr
);
739 * Enable ESTf periodic output, set cycle start time and interval.
741 static int am65_cpsw_timer_set(struct net_device
*ndev
,
742 struct am65_cpsw_est
*est_new
)
744 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
745 struct am65_cpsw_common
*common
= port
->common
;
746 struct am65_cpts
*cpts
= common
->cpts
;
747 struct am65_cpts_estf_cfg cfg
;
749 cfg
.ns_period
= est_new
->taprio
.cycle_time
;
750 cfg
.ns_start
= est_new
->taprio
.base_time
;
752 return am65_cpts_estf_enable(cpts
, port
->port_id
- 1, &cfg
);
755 static void am65_cpsw_timer_stop(struct net_device
*ndev
)
757 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
758 struct am65_cpts
*cpts
= port
->common
->cpts
;
760 am65_cpts_estf_disable(cpts
, port
->port_id
- 1);
763 static enum timer_act
am65_cpsw_timer_act(struct net_device
*ndev
,
764 struct am65_cpsw_est
*est_new
)
766 struct tc_taprio_qopt_offload
*taprio_oper
, *taprio_new
;
767 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
768 struct am65_cpts
*cpts
= port
->common
->cpts
;
772 if (!port
->qos
.est_oper
)
775 taprio_new
= &est_new
->taprio
;
776 taprio_oper
= &port
->qos
.est_oper
->taprio
;
778 if (taprio_new
->cycle_time
!= taprio_oper
->cycle_time
)
779 return TACT_NEED_STOP
;
781 /* in order to avoid timer reset get base_time form oper taprio */
782 if (!taprio_new
->base_time
&& taprio_oper
)
783 taprio_new
->base_time
= taprio_oper
->base_time
;
785 if (taprio_new
->base_time
== taprio_oper
->base_time
)
786 return TACT_SKIP_PROG
;
788 /* base times are cycle synchronized */
789 diff
= taprio_new
->base_time
- taprio_oper
->base_time
;
790 diff
= diff
< 0 ? -diff
: diff
;
791 if (diff
% taprio_new
->cycle_time
)
792 return TACT_NEED_STOP
;
794 cur_time
= am65_cpts_ns_gettime(cpts
);
795 if (taprio_new
->base_time
<= cur_time
+ taprio_new
->cycle_time
)
796 return TACT_SKIP_PROG
;
798 /* TODO: Admin schedule at future time is not currently supported */
799 return TACT_NEED_STOP
;
802 static void am65_cpsw_stop_est(struct net_device
*ndev
)
804 am65_cpsw_est_set(ndev
, 0);
805 am65_cpsw_timer_stop(ndev
);
808 static void am65_cpsw_taprio_destroy(struct net_device
*ndev
)
810 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
812 am65_cpsw_stop_est(ndev
);
814 devm_kfree(&ndev
->dev
, port
->qos
.est_admin
);
815 devm_kfree(&ndev
->dev
, port
->qos
.est_oper
);
817 port
->qos
.est_oper
= NULL
;
818 port
->qos
.est_admin
= NULL
;
820 am65_cpsw_reset_tc_mqprio(ndev
);
823 static void am65_cpsw_cp_taprio(struct tc_taprio_qopt_offload
*from
,
824 struct tc_taprio_qopt_offload
*to
)
829 for (i
= 0; i
< from
->num_entries
; i
++)
830 to
->entries
[i
] = from
->entries
[i
];
833 static int am65_cpsw_taprio_replace(struct net_device
*ndev
,
834 struct tc_taprio_qopt_offload
*taprio
)
836 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
837 struct netlink_ext_ack
*extack
= taprio
->mqprio
.extack
;
838 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
839 struct am65_cpts
*cpts
= common
->cpts
;
840 struct am65_cpsw_est
*est_new
;
844 if (!netif_running(ndev
)) {
845 NL_SET_ERR_MSG_MOD(extack
, "interface is down, link speed unknown");
849 if (common
->pf_p0_rx_ptype_rrobin
) {
850 NL_SET_ERR_MSG_MOD(extack
,
851 "p0-rx-ptype-rrobin flag conflicts with taprio qdisc");
855 if (port
->qos
.link_speed
== SPEED_UNKNOWN
)
858 if (taprio
->cycle_time_extension
) {
859 NL_SET_ERR_MSG_MOD(extack
,
860 "cycle time extension not supported");
864 est_new
= devm_kzalloc(&ndev
->dev
,
865 struct_size(est_new
, taprio
.entries
, taprio
->num_entries
),
870 ret
= am65_cpsw_setup_mqprio(ndev
, &taprio
->mqprio
);
874 am65_cpsw_cp_taprio(taprio
, &est_new
->taprio
);
876 am65_cpsw_est_update_state(ndev
);
878 ret
= am65_cpsw_est_check_scheds(ndev
, est_new
);
882 tact
= am65_cpsw_timer_act(ndev
, est_new
);
883 if (tact
== TACT_NEED_STOP
) {
884 NL_SET_ERR_MSG_MOD(extack
,
885 "Can't toggle estf timer, stop taprio first");
890 if (tact
== TACT_PROG
)
891 am65_cpsw_timer_stop(ndev
);
893 am65_cpsw_port_est_get_buf_num(ndev
, est_new
);
894 am65_cpsw_est_set_sched_list(ndev
, est_new
);
895 am65_cpsw_port_est_assign_buf_num(ndev
, est_new
->buf
);
897 /* If the base-time is in the past, start schedule from the time:
898 * base_time + (N*cycle_time)
899 * where N is the smallest possible integer such that the above
900 * time is in the future.
902 cur_time
= am65_cpts_ns_gettime(cpts
);
903 if (est_new
->taprio
.base_time
< cur_time
) {
904 n
= div64_u64(cur_time
- est_new
->taprio
.base_time
, est_new
->taprio
.cycle_time
);
905 est_new
->taprio
.base_time
+= (n
+ 1) * est_new
->taprio
.cycle_time
;
908 am65_cpsw_est_set(ndev
, 1);
910 if (tact
== TACT_PROG
) {
911 ret
= am65_cpsw_timer_set(ndev
, est_new
);
913 NL_SET_ERR_MSG_MOD(extack
,
914 "Failed to set cycle time");
919 devm_kfree(&ndev
->dev
, port
->qos
.est_admin
);
920 port
->qos
.est_admin
= est_new
;
921 am65_cpsw_iet_change_preemptible_tcs(port
, taprio
->mqprio
.preemptible_tcs
);
926 am65_cpsw_reset_tc_mqprio(ndev
);
927 devm_kfree(&ndev
->dev
, est_new
);
931 static void am65_cpsw_est_link_up(struct net_device
*ndev
, int link_speed
)
933 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
937 if (!am65_cpsw_port_est_enabled(port
))
940 if (port
->qos
.link_down_time
) {
941 cur_time
= ktime_get();
942 delta
= ktime_us_delta(cur_time
, port
->qos
.link_down_time
);
943 if (delta
> USEC_PER_SEC
) {
945 "Link has been lost too long, stopping TAS");
953 am65_cpsw_taprio_destroy(ndev
);
956 static int am65_cpsw_setup_taprio(struct net_device
*ndev
, void *type_data
)
958 struct tc_taprio_qopt_offload
*taprio
= type_data
;
961 switch (taprio
->cmd
) {
962 case TAPRIO_CMD_REPLACE
:
963 err
= am65_cpsw_taprio_replace(ndev
, taprio
);
965 case TAPRIO_CMD_DESTROY
:
966 am65_cpsw_taprio_destroy(ndev
);
975 static int am65_cpsw_tc_query_caps(struct net_device
*ndev
, void *type_data
)
977 struct tc_query_caps_base
*base
= type_data
;
979 switch (base
->type
) {
980 case TC_SETUP_QDISC_MQPRIO
: {
981 struct tc_mqprio_caps
*caps
= base
->caps
;
983 caps
->validate_queue_counts
= true;
988 case TC_SETUP_QDISC_TAPRIO
: {
989 struct tc_taprio_caps
*caps
= base
->caps
;
991 caps
->gate_mask_per_txq
= true;
1000 static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port
*port
,
1001 struct netlink_ext_ack
*extack
,
1002 struct flow_cls_offload
*cls
,
1005 struct flow_rule
*rule
= flow_cls_offload_flow_rule(cls
);
1006 struct flow_dissector
*dissector
= rule
->match
.dissector
;
1007 static const u8 mc_mac
[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
1008 struct am65_cpsw_qos
*qos
= &port
->qos
;
1009 struct flow_match_eth_addrs match
;
1012 if (dissector
->used_keys
&
1013 ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC
) |
1014 BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL
) |
1015 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS
))) {
1016 NL_SET_ERR_MSG_MOD(extack
,
1017 "Unsupported keys used");
1021 if (flow_rule_match_has_control_flags(rule
, extack
))
1024 if (!flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
1025 NL_SET_ERR_MSG_MOD(extack
, "Not matching on eth address");
1029 flow_rule_match_eth_addrs(rule
, &match
);
1031 if (!is_zero_ether_addr(match
.mask
->src
)) {
1032 NL_SET_ERR_MSG_MOD(extack
,
1033 "Matching on source MAC not supported");
1037 if (is_broadcast_ether_addr(match
.key
->dst
) &&
1038 is_broadcast_ether_addr(match
.mask
->dst
)) {
1039 ret
= cpsw_ale_rx_ratelimit_bc(port
->common
->ale
, port
->port_id
, rate_pkt_ps
);
1043 qos
->ale_bc_ratelimit
.cookie
= cls
->cookie
;
1044 qos
->ale_bc_ratelimit
.rate_packet_ps
= rate_pkt_ps
;
1045 } else if (ether_addr_equal_unaligned(match
.key
->dst
, mc_mac
) &&
1046 ether_addr_equal_unaligned(match
.mask
->dst
, mc_mac
)) {
1047 ret
= cpsw_ale_rx_ratelimit_mc(port
->common
->ale
, port
->port_id
, rate_pkt_ps
);
1051 qos
->ale_mc_ratelimit
.cookie
= cls
->cookie
;
1052 qos
->ale_mc_ratelimit
.rate_packet_ps
= rate_pkt_ps
;
1054 NL_SET_ERR_MSG_MOD(extack
, "Not supported matching key");
1061 static int am65_cpsw_qos_clsflower_policer_validate(const struct flow_action
*action
,
1062 const struct flow_action_entry
*act
,
1063 struct netlink_ext_ack
*extack
)
1065 if (act
->police
.exceed
.act_id
!= FLOW_ACTION_DROP
) {
1066 NL_SET_ERR_MSG_MOD(extack
,
1067 "Offload not supported when exceed action is not drop");
1071 if (act
->police
.notexceed
.act_id
!= FLOW_ACTION_PIPE
&&
1072 act
->police
.notexceed
.act_id
!= FLOW_ACTION_ACCEPT
) {
1073 NL_SET_ERR_MSG_MOD(extack
,
1074 "Offload not supported when conform action is not pipe or ok");
1078 if (act
->police
.notexceed
.act_id
== FLOW_ACTION_ACCEPT
&&
1079 !flow_action_is_last_entry(action
, act
)) {
1080 NL_SET_ERR_MSG_MOD(extack
,
1081 "Offload not supported when conform action is ok, but action is not last");
1085 if (act
->police
.rate_bytes_ps
|| act
->police
.peakrate_bytes_ps
||
1086 act
->police
.avrate
|| act
->police
.overhead
) {
1087 NL_SET_ERR_MSG_MOD(extack
,
1088 "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
1095 static int am65_cpsw_qos_configure_clsflower(struct am65_cpsw_port
*port
,
1096 struct flow_cls_offload
*cls
)
1098 struct flow_rule
*rule
= flow_cls_offload_flow_rule(cls
);
1099 struct netlink_ext_ack
*extack
= cls
->common
.extack
;
1100 const struct flow_action_entry
*act
;
1103 flow_action_for_each(i
, act
, &rule
->action
) {
1105 case FLOW_ACTION_POLICE
:
1106 ret
= am65_cpsw_qos_clsflower_policer_validate(&rule
->action
, act
, extack
);
1110 return am65_cpsw_qos_clsflower_add_policer(port
, extack
, cls
,
1111 act
->police
.rate_pkt_ps
);
1113 NL_SET_ERR_MSG_MOD(extack
,
1114 "Action not supported");
1121 static int am65_cpsw_qos_delete_clsflower(struct am65_cpsw_port
*port
, struct flow_cls_offload
*cls
)
1123 struct am65_cpsw_qos
*qos
= &port
->qos
;
1125 if (cls
->cookie
== qos
->ale_bc_ratelimit
.cookie
) {
1126 qos
->ale_bc_ratelimit
.cookie
= 0;
1127 qos
->ale_bc_ratelimit
.rate_packet_ps
= 0;
1128 cpsw_ale_rx_ratelimit_bc(port
->common
->ale
, port
->port_id
, 0);
1131 if (cls
->cookie
== qos
->ale_mc_ratelimit
.cookie
) {
1132 qos
->ale_mc_ratelimit
.cookie
= 0;
1133 qos
->ale_mc_ratelimit
.rate_packet_ps
= 0;
1134 cpsw_ale_rx_ratelimit_mc(port
->common
->ale
, port
->port_id
, 0);
1140 static int am65_cpsw_qos_setup_tc_clsflower(struct am65_cpsw_port
*port
,
1141 struct flow_cls_offload
*cls_flower
)
1143 switch (cls_flower
->command
) {
1144 case FLOW_CLS_REPLACE
:
1145 return am65_cpsw_qos_configure_clsflower(port
, cls_flower
);
1146 case FLOW_CLS_DESTROY
:
1147 return am65_cpsw_qos_delete_clsflower(port
, cls_flower
);
1153 static int am65_cpsw_qos_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
, void *cb_priv
)
1155 struct am65_cpsw_port
*port
= cb_priv
;
1157 if (!tc_cls_can_offload_and_chain0(port
->ndev
, type_data
))
1161 case TC_SETUP_CLSFLOWER
:
1162 return am65_cpsw_qos_setup_tc_clsflower(port
, type_data
);
1168 static LIST_HEAD(am65_cpsw_qos_block_cb_list
);
1170 static int am65_cpsw_qos_setup_tc_block(struct net_device
*ndev
, struct flow_block_offload
*f
)
1172 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
1174 return flow_block_cb_setup_simple(f
, &am65_cpsw_qos_block_cb_list
,
1175 am65_cpsw_qos_setup_tc_block_cb
,
1180 am65_cpsw_qos_tx_p0_rate_apply(struct am65_cpsw_common
*common
,
1181 int tx_ch
, u32 rate_mbps
)
1183 struct am65_cpsw_host
*host
= am65_common_get_host(common
);
1187 ch_cir
= am65_cpsw_qos_tx_rate_calc(rate_mbps
, common
->bus_freq
);
1188 writel(ch_cir
, host
->port_base
+ AM65_CPSW_PN_REG_PRI_CIR(tx_ch
));
1190 /* update rates for every port tx queues */
1191 for (i
= 0; i
< common
->port_num
; i
++) {
1192 struct net_device
*ndev
= common
->ports
[i
].ndev
;
1196 netdev_get_tx_queue(ndev
, tx_ch
)->tx_maxrate
= rate_mbps
;
1200 int am65_cpsw_qos_ndo_tx_p0_set_maxrate(struct net_device
*ndev
,
1201 int queue
, u32 rate_mbps
)
1203 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
1204 struct am65_cpsw_common
*common
= port
->common
;
1205 struct am65_cpsw_tx_chn
*tx_chn
;
1206 u32 ch_rate
, tx_ch_rate_msk_new
;
1210 dev_dbg(common
->dev
, "apply TX%d rate limiting %uMbps tx_rate_msk%x\n",
1211 queue
, rate_mbps
, common
->tx_ch_rate_msk
);
1213 if (common
->pf_p0_rx_ptype_rrobin
) {
1214 dev_err(common
->dev
, "TX Rate Limiting failed - rrobin mode\n");
1218 ch_rate
= netdev_get_tx_queue(ndev
, queue
)->tx_maxrate
;
1219 if (ch_rate
== rate_mbps
)
1222 ret
= pm_runtime_get_sync(common
->dev
);
1224 pm_runtime_put_noidle(common
->dev
);
1229 tx_ch_rate_msk_new
= common
->tx_ch_rate_msk
;
1230 if (rate_mbps
&& !(tx_ch_rate_msk_new
& BIT(queue
))) {
1231 tx_ch_rate_msk_new
|= BIT(queue
);
1232 ch_msk
= GENMASK(common
->tx_ch_num
- 1, queue
);
1233 ch_msk
= tx_ch_rate_msk_new
^ ch_msk
;
1234 } else if (!rate_mbps
) {
1235 tx_ch_rate_msk_new
&= ~BIT(queue
);
1236 ch_msk
= queue
? GENMASK(queue
- 1, 0) : 0;
1237 ch_msk
= tx_ch_rate_msk_new
& ch_msk
;
1241 dev_err(common
->dev
, "TX rate limiting has to be enabled sequentially hi->lo tx_rate_msk:%x tx_rate_msk_new:%x\n",
1242 common
->tx_ch_rate_msk
, tx_ch_rate_msk_new
);
1247 tx_chn
= &common
->tx_chns
[queue
];
1248 tx_chn
->rate_mbps
= rate_mbps
;
1249 common
->tx_ch_rate_msk
= tx_ch_rate_msk_new
;
1251 if (!common
->usage_count
)
1252 /* will be applied on next netif up */
1255 am65_cpsw_qos_tx_p0_rate_apply(common
, queue
, rate_mbps
);
1258 pm_runtime_put(common
->dev
);
1262 void am65_cpsw_qos_tx_p0_rate_init(struct am65_cpsw_common
*common
)
1264 struct am65_cpsw_host
*host
= am65_common_get_host(common
);
1267 for (tx_ch
= 0; tx_ch
< common
->tx_ch_num
; tx_ch
++) {
1268 struct am65_cpsw_tx_chn
*tx_chn
= &common
->tx_chns
[tx_ch
];
1271 if (!tx_chn
->rate_mbps
)
1274 ch_cir
= am65_cpsw_qos_tx_rate_calc(tx_chn
->rate_mbps
,
1277 host
->port_base
+ AM65_CPSW_PN_REG_PRI_CIR(tx_ch
));
1281 int am65_cpsw_qos_ndo_setup_tc(struct net_device
*ndev
, enum tc_setup_type type
,
1286 return am65_cpsw_tc_query_caps(ndev
, type_data
);
1287 case TC_SETUP_QDISC_TAPRIO
:
1288 return am65_cpsw_setup_taprio(ndev
, type_data
);
1289 case TC_SETUP_QDISC_MQPRIO
:
1290 return am65_cpsw_setup_mqprio(ndev
, type_data
);
1291 case TC_SETUP_BLOCK
:
1292 return am65_cpsw_qos_setup_tc_block(ndev
, type_data
);
1298 void am65_cpsw_qos_link_up(struct net_device
*ndev
, int link_speed
)
1300 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
1302 port
->qos
.link_speed
= link_speed
;
1303 am65_cpsw_tx_pn_shaper_apply(port
);
1304 am65_cpsw_iet_link_state_update(ndev
);
1306 am65_cpsw_est_link_up(ndev
, link_speed
);
1307 port
->qos
.link_down_time
= 0;
1310 void am65_cpsw_qos_link_down(struct net_device
*ndev
)
1312 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
1314 port
->qos
.link_speed
= SPEED_UNKNOWN
;
1315 am65_cpsw_tx_pn_shaper_apply(port
);
1316 am65_cpsw_iet_link_state_update(ndev
);
1318 if (!port
->qos
.link_down_time
)
1319 port
->qos
.link_down_time
= ktime_get();