2 * mtu3_qmu.c - Queue Management Unit driver for device controller
4 * Copyright (C) 2016 MediaTek Inc.
6 * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
20 * Queue Management Unit (QMU) is designed to unload SW effort
21 * to serve DMA interrupts.
22 * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD),
23 * SW links data buffers and triggers QMU to send / receive data to
24 * host / from device at a time.
25 * And now only GPD is supported.
27 * For more detailed information, please refer to QMU Programming Guide
30 #include <linux/dmapool.h>
31 #include <linux/iopoll.h>
35 #define QMU_CHECKSUM_LEN 16
37 #define GPD_FLAGS_HWO BIT(0)
38 #define GPD_FLAGS_BDP BIT(1)
39 #define GPD_FLAGS_BPS BIT(2)
40 #define GPD_FLAGS_IOC BIT(7)
42 #define GPD_EXT_FLAG_ZLP BIT(5)
45 static struct qmu_gpd
*gpd_dma_to_virt(struct mtu3_gpd_ring
*ring
,
48 dma_addr_t dma_base
= ring
->dma
;
49 struct qmu_gpd
*gpd_head
= ring
->start
;
50 u32 offset
= (dma_addr
- dma_base
) / sizeof(*gpd_head
);
52 if (offset
>= MAX_GPD_NUM
)
55 return gpd_head
+ offset
;
58 static dma_addr_t
gpd_virt_to_dma(struct mtu3_gpd_ring
*ring
,
61 dma_addr_t dma_base
= ring
->dma
;
62 struct qmu_gpd
*gpd_head
= ring
->start
;
65 offset
= gpd
- gpd_head
;
66 if (offset
>= MAX_GPD_NUM
)
69 return dma_base
+ (offset
* sizeof(*gpd
));
72 static void gpd_ring_init(struct mtu3_gpd_ring
*ring
, struct qmu_gpd
*gpd
)
77 ring
->end
= gpd
+ MAX_GPD_NUM
- 1;
80 static void reset_gpd_list(struct mtu3_ep
*mep
)
82 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
83 struct qmu_gpd
*gpd
= ring
->start
;
86 gpd
->flag
&= ~GPD_FLAGS_HWO
;
87 gpd_ring_init(ring
, gpd
);
91 int mtu3_gpd_ring_alloc(struct mtu3_ep
*mep
)
94 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
96 /* software own all gpds as default */
97 gpd
= dma_pool_zalloc(mep
->mtu
->qmu_gpd_pool
, GFP_ATOMIC
, &ring
->dma
);
101 gpd_ring_init(ring
, gpd
);
106 void mtu3_gpd_ring_free(struct mtu3_ep
*mep
)
108 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
110 dma_pool_free(mep
->mtu
->qmu_gpd_pool
,
111 ring
->start
, ring
->dma
);
112 memset(ring
, 0, sizeof(*ring
));
116 * calculate check sum of a gpd or bd
117 * add "noinline" and "mb" to prevent wrong calculation
119 static noinline u8
qmu_calc_checksum(u8
*data
)
124 data
[1] = 0x0; /* set checksum to 0 */
126 mb(); /* ensure the gpd/bd is really up-to-date */
127 for (i
= 0; i
< QMU_CHECKSUM_LEN
; i
++)
130 /* Default: HWO=1, @flag[bit0] */
133 return 0xFF - chksum
;
136 void mtu3_qmu_resume(struct mtu3_ep
*mep
)
138 struct mtu3
*mtu
= mep
->mtu
;
139 void __iomem
*mbase
= mtu
->mac_base
;
140 int epnum
= mep
->epnum
;
143 offset
= mep
->is_in
? USB_QMU_TQCSR(epnum
) : USB_QMU_RQCSR(epnum
);
145 mtu3_writel(mbase
, offset
, QMU_Q_RESUME
);
146 if (!(mtu3_readl(mbase
, offset
) & QMU_Q_ACTIVE
))
147 mtu3_writel(mbase
, offset
, QMU_Q_RESUME
);
150 static struct qmu_gpd
*advance_enq_gpd(struct mtu3_gpd_ring
*ring
)
152 if (ring
->enqueue
< ring
->end
)
155 ring
->enqueue
= ring
->start
;
157 return ring
->enqueue
;
160 static struct qmu_gpd
*advance_deq_gpd(struct mtu3_gpd_ring
*ring
)
162 if (ring
->dequeue
< ring
->end
)
165 ring
->dequeue
= ring
->start
;
167 return ring
->dequeue
;
170 /* check if a ring is emtpy */
171 static int gpd_ring_empty(struct mtu3_gpd_ring
*ring
)
173 struct qmu_gpd
*enq
= ring
->enqueue
;
174 struct qmu_gpd
*next
;
176 if (ring
->enqueue
< ring
->end
)
181 /* one gpd is reserved to simplify gpd preparation */
182 return next
== ring
->dequeue
;
185 int mtu3_prepare_transfer(struct mtu3_ep
*mep
)
187 return gpd_ring_empty(&mep
->gpd_ring
);
190 static int mtu3_prepare_tx_gpd(struct mtu3_ep
*mep
, struct mtu3_request
*mreq
)
193 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
194 struct qmu_gpd
*gpd
= ring
->enqueue
;
195 struct usb_request
*req
= &mreq
->request
;
197 /* set all fields to zero as default value */
198 memset(gpd
, 0, sizeof(*gpd
));
200 gpd
->buffer
= cpu_to_le32((u32
)req
->dma
);
201 gpd
->buf_len
= cpu_to_le16(req
->length
);
202 gpd
->flag
|= GPD_FLAGS_IOC
;
204 /* get the next GPD */
205 enq
= advance_enq_gpd(ring
);
206 dev_dbg(mep
->mtu
->dev
, "TX-EP%d queue gpd=%p, enq=%p\n",
207 mep
->epnum
, gpd
, enq
);
209 enq
->flag
&= ~GPD_FLAGS_HWO
;
210 gpd
->next_gpd
= cpu_to_le32((u32
)gpd_virt_to_dma(ring
, enq
));
213 gpd
->ext_flag
|= GPD_EXT_FLAG_ZLP
;
215 gpd
->chksum
= qmu_calc_checksum((u8
*)gpd
);
216 gpd
->flag
|= GPD_FLAGS_HWO
;
223 static int mtu3_prepare_rx_gpd(struct mtu3_ep
*mep
, struct mtu3_request
*mreq
)
226 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
227 struct qmu_gpd
*gpd
= ring
->enqueue
;
228 struct usb_request
*req
= &mreq
->request
;
230 /* set all fields to zero as default value */
231 memset(gpd
, 0, sizeof(*gpd
));
233 gpd
->buffer
= cpu_to_le32((u32
)req
->dma
);
234 gpd
->data_buf_len
= cpu_to_le16(req
->length
);
235 gpd
->flag
|= GPD_FLAGS_IOC
;
237 /* get the next GPD */
238 enq
= advance_enq_gpd(ring
);
239 dev_dbg(mep
->mtu
->dev
, "RX-EP%d queue gpd=%p, enq=%p\n",
240 mep
->epnum
, gpd
, enq
);
242 enq
->flag
&= ~GPD_FLAGS_HWO
;
243 gpd
->next_gpd
= cpu_to_le32((u32
)gpd_virt_to_dma(ring
, enq
));
244 gpd
->chksum
= qmu_calc_checksum((u8
*)gpd
);
245 gpd
->flag
|= GPD_FLAGS_HWO
;
252 void mtu3_insert_gpd(struct mtu3_ep
*mep
, struct mtu3_request
*mreq
)
256 mtu3_prepare_tx_gpd(mep
, mreq
);
258 mtu3_prepare_rx_gpd(mep
, mreq
);
261 int mtu3_qmu_start(struct mtu3_ep
*mep
)
263 struct mtu3
*mtu
= mep
->mtu
;
264 void __iomem
*mbase
= mtu
->mac_base
;
265 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
266 u8 epnum
= mep
->epnum
;
269 /* set QMU start address */
270 mtu3_writel(mbase
, USB_QMU_TQSAR(mep
->epnum
), ring
->dma
);
271 mtu3_setbits(mbase
, MU3D_EP_TXCR0(mep
->epnum
), TX_DMAREQEN
);
272 mtu3_setbits(mbase
, U3D_QCR0
, QMU_TX_CS_EN(epnum
));
273 /* send zero length packet according to ZLP flag in GPD */
274 mtu3_setbits(mbase
, U3D_QCR1
, QMU_TX_ZLP(epnum
));
275 mtu3_writel(mbase
, U3D_TQERRIESR0
,
276 QMU_TX_LEN_ERR(epnum
) | QMU_TX_CS_ERR(epnum
));
278 if (mtu3_readl(mbase
, USB_QMU_TQCSR(epnum
)) & QMU_Q_ACTIVE
) {
279 dev_warn(mtu
->dev
, "Tx %d Active Now!\n", epnum
);
282 mtu3_writel(mbase
, USB_QMU_TQCSR(epnum
), QMU_Q_START
);
285 mtu3_writel(mbase
, USB_QMU_RQSAR(mep
->epnum
), ring
->dma
);
286 mtu3_setbits(mbase
, MU3D_EP_RXCR0(mep
->epnum
), RX_DMAREQEN
);
287 mtu3_setbits(mbase
, U3D_QCR0
, QMU_RX_CS_EN(epnum
));
288 /* don't expect ZLP */
289 mtu3_clrbits(mbase
, U3D_QCR3
, QMU_RX_ZLP(epnum
));
290 /* move to next GPD when receive ZLP */
291 mtu3_setbits(mbase
, U3D_QCR3
, QMU_RX_COZ(epnum
));
292 mtu3_writel(mbase
, U3D_RQERRIESR0
,
293 QMU_RX_LEN_ERR(epnum
) | QMU_RX_CS_ERR(epnum
));
294 mtu3_writel(mbase
, U3D_RQERRIESR1
, QMU_RX_ZLP_ERR(epnum
));
296 if (mtu3_readl(mbase
, USB_QMU_RQCSR(epnum
)) & QMU_Q_ACTIVE
) {
297 dev_warn(mtu
->dev
, "Rx %d Active Now!\n", epnum
);
300 mtu3_writel(mbase
, USB_QMU_RQCSR(epnum
), QMU_Q_START
);
306 /* may called in atomic context */
307 void mtu3_qmu_stop(struct mtu3_ep
*mep
)
309 struct mtu3
*mtu
= mep
->mtu
;
310 void __iomem
*mbase
= mtu
->mac_base
;
311 int epnum
= mep
->epnum
;
316 qcsr
= mep
->is_in
? USB_QMU_TQCSR(epnum
) : USB_QMU_RQCSR(epnum
);
318 if (!(mtu3_readl(mbase
, qcsr
) & QMU_Q_ACTIVE
)) {
319 dev_dbg(mtu
->dev
, "%s's qmu is inactive now!\n", mep
->name
);
322 mtu3_writel(mbase
, qcsr
, QMU_Q_STOP
);
324 ret
= readl_poll_timeout_atomic(mbase
+ qcsr
, value
,
325 !(value
& QMU_Q_ACTIVE
), 1, 1000);
327 dev_err(mtu
->dev
, "stop %s's qmu failed\n", mep
->name
);
331 dev_dbg(mtu
->dev
, "%s's qmu stop now!\n", mep
->name
);
334 void mtu3_qmu_flush(struct mtu3_ep
*mep
)
337 dev_dbg(mep
->mtu
->dev
, "%s flush QMU %s\n", __func__
,
338 ((mep
->is_in
) ? "TX" : "RX"));
346 * QMU can't transfer zero length packet directly (a hardware limit
347 * on old SoCs), so when needs to send ZLP, we intentionally trigger
348 * a length error interrupt, and in the ISR sends a ZLP by BMU.
350 static void qmu_tx_zlp_error_handler(struct mtu3
*mtu
, u8 epnum
)
352 struct mtu3_ep
*mep
= mtu
->in_eps
+ epnum
;
353 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
354 void __iomem
*mbase
= mtu
->mac_base
;
355 struct qmu_gpd
*gpd_current
= NULL
;
356 dma_addr_t gpd_dma
= mtu3_readl(mbase
, USB_QMU_TQCPR(epnum
));
357 struct usb_request
*req
= NULL
;
358 struct mtu3_request
*mreq
;
362 mreq
= next_request(mep
);
363 if (mreq
&& mreq
->request
.length
== 0)
364 req
= &mreq
->request
;
368 gpd_current
= gpd_dma_to_virt(ring
, gpd_dma
);
370 if (le16_to_cpu(gpd_current
->buf_len
) != 0) {
371 dev_err(mtu
->dev
, "TX EP%d buffer length error(!=0)\n", epnum
);
375 dev_dbg(mtu
->dev
, "%s send ZLP for req=%p\n", __func__
, mreq
);
377 mtu3_clrbits(mbase
, MU3D_EP_TXCR0(mep
->epnum
), TX_DMAREQEN
);
379 ret
= readl_poll_timeout_atomic(mbase
+ MU3D_EP_TXCR0(mep
->epnum
),
380 txcsr
, !(txcsr
& TX_FIFOFULL
), 1, 1000);
382 dev_err(mtu
->dev
, "%s wait for fifo empty fail\n", __func__
);
385 mtu3_setbits(mbase
, MU3D_EP_TXCR0(mep
->epnum
), TX_TXPKTRDY
);
387 /* by pass the current GDP */
388 gpd_current
->flag
|= GPD_FLAGS_BPS
;
389 gpd_current
->chksum
= qmu_calc_checksum((u8
*)gpd_current
);
390 gpd_current
->flag
|= GPD_FLAGS_HWO
;
392 /*enable DMAREQEN, switch back to QMU mode */
393 mtu3_setbits(mbase
, MU3D_EP_TXCR0(mep
->epnum
), TX_DMAREQEN
);
394 mtu3_qmu_resume(mep
);
398 * NOTE: request list maybe is already empty as following case:
399 * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)-->
400 * queue_tx --> process_tasklet(meanwhile, the second one is transferred,
401 * tasklet process both of them)-->qmu_interrupt for second one.
402 * To avoid upper case, put qmu_done_tx in ISR directly to process it.
404 static void qmu_done_tx(struct mtu3
*mtu
, u8 epnum
)
406 struct mtu3_ep
*mep
= mtu
->in_eps
+ epnum
;
407 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
408 void __iomem
*mbase
= mtu
->mac_base
;
409 struct qmu_gpd
*gpd
= ring
->dequeue
;
410 struct qmu_gpd
*gpd_current
= NULL
;
411 dma_addr_t gpd_dma
= mtu3_readl(mbase
, USB_QMU_TQCPR(epnum
));
412 struct usb_request
*request
= NULL
;
413 struct mtu3_request
*mreq
;
415 /*transfer phy address got from QMU register to virtual address */
416 gpd_current
= gpd_dma_to_virt(ring
, gpd_dma
);
418 dev_dbg(mtu
->dev
, "%s EP%d, last=%p, current=%p, enq=%p\n",
419 __func__
, epnum
, gpd
, gpd_current
, ring
->enqueue
);
421 while (gpd
!= gpd_current
&& !(gpd
->flag
& GPD_FLAGS_HWO
)) {
423 mreq
= next_request(mep
);
425 if (mreq
== NULL
|| mreq
->gpd
!= gpd
) {
426 dev_err(mtu
->dev
, "no correct TX req is found\n");
430 request
= &mreq
->request
;
431 request
->actual
= le16_to_cpu(gpd
->buf_len
);
432 mtu3_req_complete(mep
, request
, 0);
434 gpd
= advance_deq_gpd(ring
);
437 dev_dbg(mtu
->dev
, "%s EP%d, deq=%p, enq=%p, complete\n",
438 __func__
, epnum
, ring
->dequeue
, ring
->enqueue
);
442 static void qmu_done_rx(struct mtu3
*mtu
, u8 epnum
)
444 struct mtu3_ep
*mep
= mtu
->out_eps
+ epnum
;
445 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
446 void __iomem
*mbase
= mtu
->mac_base
;
447 struct qmu_gpd
*gpd
= ring
->dequeue
;
448 struct qmu_gpd
*gpd_current
= NULL
;
449 dma_addr_t gpd_dma
= mtu3_readl(mbase
, USB_QMU_RQCPR(epnum
));
450 struct usb_request
*req
= NULL
;
451 struct mtu3_request
*mreq
;
453 gpd_current
= gpd_dma_to_virt(ring
, gpd_dma
);
455 dev_dbg(mtu
->dev
, "%s EP%d, last=%p, current=%p, enq=%p\n",
456 __func__
, epnum
, gpd
, gpd_current
, ring
->enqueue
);
458 while (gpd
!= gpd_current
&& !(gpd
->flag
& GPD_FLAGS_HWO
)) {
460 mreq
= next_request(mep
);
462 if (mreq
== NULL
|| mreq
->gpd
!= gpd
) {
463 dev_err(mtu
->dev
, "no correct RX req is found\n");
466 req
= &mreq
->request
;
468 req
->actual
= le16_to_cpu(gpd
->buf_len
);
469 mtu3_req_complete(mep
, req
, 0);
471 gpd
= advance_deq_gpd(ring
);
474 dev_dbg(mtu
->dev
, "%s EP%d, deq=%p, enq=%p, complete\n",
475 __func__
, epnum
, ring
->dequeue
, ring
->enqueue
);
478 static void qmu_done_isr(struct mtu3
*mtu
, u32 done_status
)
482 for (i
= 1; i
< mtu
->num_eps
; i
++) {
483 if (done_status
& QMU_RX_DONE_INT(i
))
485 if (done_status
& QMU_TX_DONE_INT(i
))
490 static void qmu_exception_isr(struct mtu3
*mtu
, u32 qmu_status
)
492 void __iomem
*mbase
= mtu
->mac_base
;
496 if ((qmu_status
& RXQ_CSERR_INT
) || (qmu_status
& RXQ_LENERR_INT
)) {
497 errval
= mtu3_readl(mbase
, U3D_RQERRIR0
);
498 for (i
= 1; i
< mtu
->num_eps
; i
++) {
499 if (errval
& QMU_RX_CS_ERR(i
))
500 dev_err(mtu
->dev
, "Rx %d CS error!\n", i
);
502 if (errval
& QMU_RX_LEN_ERR(i
))
503 dev_err(mtu
->dev
, "RX %d Length error\n", i
);
505 mtu3_writel(mbase
, U3D_RQERRIR0
, errval
);
508 if (qmu_status
& RXQ_ZLPERR_INT
) {
509 errval
= mtu3_readl(mbase
, U3D_RQERRIR1
);
510 for (i
= 1; i
< mtu
->num_eps
; i
++) {
511 if (errval
& QMU_RX_ZLP_ERR(i
))
512 dev_dbg(mtu
->dev
, "RX EP%d Recv ZLP\n", i
);
514 mtu3_writel(mbase
, U3D_RQERRIR1
, errval
);
517 if ((qmu_status
& TXQ_CSERR_INT
) || (qmu_status
& TXQ_LENERR_INT
)) {
518 errval
= mtu3_readl(mbase
, U3D_TQERRIR0
);
519 for (i
= 1; i
< mtu
->num_eps
; i
++) {
520 if (errval
& QMU_TX_CS_ERR(i
))
521 dev_err(mtu
->dev
, "Tx %d checksum error!\n", i
);
523 if (errval
& QMU_TX_LEN_ERR(i
))
524 qmu_tx_zlp_error_handler(mtu
, i
);
526 mtu3_writel(mbase
, U3D_TQERRIR0
, errval
);
530 irqreturn_t
mtu3_qmu_isr(struct mtu3
*mtu
)
532 void __iomem
*mbase
= mtu
->mac_base
;
536 /* U3D_QISAR1 is read update */
537 qmu_status
= mtu3_readl(mbase
, U3D_QISAR1
);
538 qmu_status
&= mtu3_readl(mbase
, U3D_QIER1
);
540 qmu_done_status
= mtu3_readl(mbase
, U3D_QISAR0
);
541 qmu_done_status
&= mtu3_readl(mbase
, U3D_QIER0
);
542 mtu3_writel(mbase
, U3D_QISAR0
, qmu_done_status
); /* W1C */
543 dev_dbg(mtu
->dev
, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
544 (qmu_done_status
& 0xFFFF), qmu_done_status
>> 16,
548 qmu_done_isr(mtu
, qmu_done_status
);
551 qmu_exception_isr(mtu
, qmu_status
);
556 int mtu3_qmu_init(struct mtu3
*mtu
)
559 compiletime_assert(QMU_GPD_SIZE
== 16, "QMU_GPD size SHOULD be 16B");
561 mtu
->qmu_gpd_pool
= dma_pool_create("QMU_GPD", mtu
->dev
,
562 QMU_GPD_RING_SIZE
, QMU_GPD_SIZE
, 0);
564 if (!mtu
->qmu_gpd_pool
)
570 void mtu3_qmu_exit(struct mtu3
*mtu
)
572 dma_pool_destroy(mtu
->qmu_gpd_pool
);