1 // SPDX-License-Identifier: GPL-2.0
3 * mtu3_qmu.c - Queue Management Unit driver for device controller
5 * Copyright (C) 2016 MediaTek Inc.
7 * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
11 * Queue Management Unit (QMU) is designed to unload SW effort
12 * to serve DMA interrupts.
13 * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD),
14 * SW links data buffers and triggers QMU to send / receive data to
15 * host / from device at a time.
16 * And now only GPD is supported.
18 * For more detailed information, please refer to QMU Programming Guide
21 #include <linux/dmapool.h>
22 #include <linux/iopoll.h>
25 #include "mtu3_trace.h"
27 #define QMU_CHECKSUM_LEN 16
29 #define GPD_FLAGS_HWO BIT(0)
30 #define GPD_FLAGS_BDP BIT(1)
31 #define GPD_FLAGS_BPS BIT(2)
32 #define GPD_FLAGS_ZLP BIT(6)
33 #define GPD_FLAGS_IOC BIT(7)
34 #define GET_GPD_HWO(gpd) (le32_to_cpu((gpd)->dw0_info) & GPD_FLAGS_HWO)
36 #define GPD_RX_BUF_LEN_OG(x) (((x) & 0xffff) << 16)
37 #define GPD_RX_BUF_LEN_EL(x) (((x) & 0xfffff) << 12)
38 #define GPD_RX_BUF_LEN(mtu, x) \
41 ((mtu)->gen2cp) ? GPD_RX_BUF_LEN_EL(x_) : GPD_RX_BUF_LEN_OG(x_); \
44 #define GPD_DATA_LEN_OG(x) ((x) & 0xffff)
45 #define GPD_DATA_LEN_EL(x) ((x) & 0xfffff)
46 #define GPD_DATA_LEN(mtu, x) \
49 ((mtu)->gen2cp) ? GPD_DATA_LEN_EL(x_) : GPD_DATA_LEN_OG(x_); \
52 #define GPD_EXT_FLAG_ZLP BIT(29)
53 #define GPD_EXT_NGP_OG(x) (((x) & 0xf) << 20)
54 #define GPD_EXT_BUF_OG(x) (((x) & 0xf) << 16)
55 #define GPD_EXT_NGP_EL(x) (((x) & 0xf) << 28)
56 #define GPD_EXT_BUF_EL(x) (((x) & 0xf) << 24)
57 #define GPD_EXT_NGP(mtu, x) \
60 ((mtu)->gen2cp) ? GPD_EXT_NGP_EL(x_) : GPD_EXT_NGP_OG(x_); \
63 #define GPD_EXT_BUF(mtu, x) \
66 ((mtu)->gen2cp) ? GPD_EXT_BUF_EL(x_) : GPD_EXT_BUF_OG(x_); \
69 #define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo))
70 #define HILO_DMA(hi, lo) \
71 ((dma_addr_t)HILO_GEN64((le32_to_cpu(hi)), (le32_to_cpu(lo))))
73 static dma_addr_t
read_txq_cur_addr(void __iomem
*mbase
, u8 epnum
)
78 txcpr
= mtu3_readl(mbase
, USB_QMU_TQCPR(epnum
));
79 txhiar
= mtu3_readl(mbase
, USB_QMU_TQHIAR(epnum
));
81 return HILO_DMA(QMU_CUR_GPD_ADDR_HI(txhiar
), txcpr
);
84 static dma_addr_t
read_rxq_cur_addr(void __iomem
*mbase
, u8 epnum
)
89 rxcpr
= mtu3_readl(mbase
, USB_QMU_RQCPR(epnum
));
90 rxhiar
= mtu3_readl(mbase
, USB_QMU_RQHIAR(epnum
));
92 return HILO_DMA(QMU_CUR_GPD_ADDR_HI(rxhiar
), rxcpr
);
95 static void write_txq_start_addr(void __iomem
*mbase
, u8 epnum
, dma_addr_t dma
)
99 mtu3_writel(mbase
, USB_QMU_TQSAR(epnum
),
100 cpu_to_le32(lower_32_bits(dma
)));
101 tqhiar
= mtu3_readl(mbase
, USB_QMU_TQHIAR(epnum
));
102 tqhiar
&= ~QMU_START_ADDR_HI_MSK
;
103 tqhiar
|= QMU_START_ADDR_HI(upper_32_bits(dma
));
104 mtu3_writel(mbase
, USB_QMU_TQHIAR(epnum
), tqhiar
);
107 static void write_rxq_start_addr(void __iomem
*mbase
, u8 epnum
, dma_addr_t dma
)
111 mtu3_writel(mbase
, USB_QMU_RQSAR(epnum
),
112 cpu_to_le32(lower_32_bits(dma
)));
113 rqhiar
= mtu3_readl(mbase
, USB_QMU_RQHIAR(epnum
));
114 rqhiar
&= ~QMU_START_ADDR_HI_MSK
;
115 rqhiar
|= QMU_START_ADDR_HI(upper_32_bits(dma
));
116 mtu3_writel(mbase
, USB_QMU_RQHIAR(epnum
), rqhiar
);
119 static struct qmu_gpd
*gpd_dma_to_virt(struct mtu3_gpd_ring
*ring
,
122 dma_addr_t dma_base
= ring
->dma
;
123 struct qmu_gpd
*gpd_head
= ring
->start
;
124 u32 offset
= (dma_addr
- dma_base
) / sizeof(*gpd_head
);
126 if (offset
>= MAX_GPD_NUM
)
129 return gpd_head
+ offset
;
132 static dma_addr_t
gpd_virt_to_dma(struct mtu3_gpd_ring
*ring
,
135 dma_addr_t dma_base
= ring
->dma
;
136 struct qmu_gpd
*gpd_head
= ring
->start
;
139 offset
= gpd
- gpd_head
;
140 if (offset
>= MAX_GPD_NUM
)
143 return dma_base
+ (offset
* sizeof(*gpd
));
146 static void gpd_ring_init(struct mtu3_gpd_ring
*ring
, struct qmu_gpd
*gpd
)
151 ring
->end
= gpd
+ MAX_GPD_NUM
- 1;
154 static void reset_gpd_list(struct mtu3_ep
*mep
)
156 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
157 struct qmu_gpd
*gpd
= ring
->start
;
160 gpd
->dw0_info
&= cpu_to_le32(~GPD_FLAGS_HWO
);
161 gpd_ring_init(ring
, gpd
);
165 int mtu3_gpd_ring_alloc(struct mtu3_ep
*mep
)
168 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
170 /* software own all gpds as default */
171 gpd
= dma_pool_zalloc(mep
->mtu
->qmu_gpd_pool
, GFP_ATOMIC
, &ring
->dma
);
175 gpd_ring_init(ring
, gpd
);
180 void mtu3_gpd_ring_free(struct mtu3_ep
*mep
)
182 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
184 dma_pool_free(mep
->mtu
->qmu_gpd_pool
,
185 ring
->start
, ring
->dma
);
186 memset(ring
, 0, sizeof(*ring
));
189 void mtu3_qmu_resume(struct mtu3_ep
*mep
)
191 struct mtu3
*mtu
= mep
->mtu
;
192 void __iomem
*mbase
= mtu
->mac_base
;
193 int epnum
= mep
->epnum
;
196 offset
= mep
->is_in
? USB_QMU_TQCSR(epnum
) : USB_QMU_RQCSR(epnum
);
198 mtu3_writel(mbase
, offset
, QMU_Q_RESUME
);
199 if (!(mtu3_readl(mbase
, offset
) & QMU_Q_ACTIVE
))
200 mtu3_writel(mbase
, offset
, QMU_Q_RESUME
);
203 static struct qmu_gpd
*advance_enq_gpd(struct mtu3_gpd_ring
*ring
)
205 if (ring
->enqueue
< ring
->end
)
208 ring
->enqueue
= ring
->start
;
210 return ring
->enqueue
;
213 static struct qmu_gpd
*advance_deq_gpd(struct mtu3_gpd_ring
*ring
)
215 if (ring
->dequeue
< ring
->end
)
218 ring
->dequeue
= ring
->start
;
220 return ring
->dequeue
;
223 /* check if a ring is emtpy */
224 static int gpd_ring_empty(struct mtu3_gpd_ring
*ring
)
226 struct qmu_gpd
*enq
= ring
->enqueue
;
227 struct qmu_gpd
*next
;
229 if (ring
->enqueue
< ring
->end
)
234 /* one gpd is reserved to simplify gpd preparation */
235 return next
== ring
->dequeue
;
238 int mtu3_prepare_transfer(struct mtu3_ep
*mep
)
240 return gpd_ring_empty(&mep
->gpd_ring
);
243 static int mtu3_prepare_tx_gpd(struct mtu3_ep
*mep
, struct mtu3_request
*mreq
)
246 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
247 struct qmu_gpd
*gpd
= ring
->enqueue
;
248 struct usb_request
*req
= &mreq
->request
;
249 struct mtu3
*mtu
= mep
->mtu
;
253 gpd
->dw0_info
= 0; /* SW own it */
254 gpd
->buffer
= cpu_to_le32(lower_32_bits(req
->dma
));
255 ext_addr
= GPD_EXT_BUF(mtu
, upper_32_bits(req
->dma
));
256 gpd
->dw3_info
= cpu_to_le32(GPD_DATA_LEN(mtu
, req
->length
));
258 /* get the next GPD */
259 enq
= advance_enq_gpd(ring
);
260 enq_dma
= gpd_virt_to_dma(ring
, enq
);
261 dev_dbg(mep
->mtu
->dev
, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
262 mep
->epnum
, gpd
, enq
, &enq_dma
);
264 enq
->dw0_info
&= cpu_to_le32(~GPD_FLAGS_HWO
);
265 gpd
->next_gpd
= cpu_to_le32(lower_32_bits(enq_dma
));
266 ext_addr
|= GPD_EXT_NGP(mtu
, upper_32_bits(enq_dma
));
267 gpd
->dw0_info
= cpu_to_le32(ext_addr
);
271 gpd
->dw0_info
|= cpu_to_le32(GPD_FLAGS_ZLP
);
273 gpd
->dw3_info
|= cpu_to_le32(GPD_EXT_FLAG_ZLP
);
276 gpd
->dw0_info
|= cpu_to_le32(GPD_FLAGS_IOC
| GPD_FLAGS_HWO
);
279 trace_mtu3_prepare_gpd(mep
, gpd
);
284 static int mtu3_prepare_rx_gpd(struct mtu3_ep
*mep
, struct mtu3_request
*mreq
)
287 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
288 struct qmu_gpd
*gpd
= ring
->enqueue
;
289 struct usb_request
*req
= &mreq
->request
;
290 struct mtu3
*mtu
= mep
->mtu
;
294 gpd
->dw0_info
= 0; /* SW own it */
295 gpd
->buffer
= cpu_to_le32(lower_32_bits(req
->dma
));
296 ext_addr
= GPD_EXT_BUF(mtu
, upper_32_bits(req
->dma
));
297 gpd
->dw0_info
= cpu_to_le32(GPD_RX_BUF_LEN(mtu
, req
->length
));
299 /* get the next GPD */
300 enq
= advance_enq_gpd(ring
);
301 enq_dma
= gpd_virt_to_dma(ring
, enq
);
302 dev_dbg(mep
->mtu
->dev
, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
303 mep
->epnum
, gpd
, enq
, &enq_dma
);
305 enq
->dw0_info
&= cpu_to_le32(~GPD_FLAGS_HWO
);
306 gpd
->next_gpd
= cpu_to_le32(lower_32_bits(enq_dma
));
307 ext_addr
|= GPD_EXT_NGP(mtu
, upper_32_bits(enq_dma
));
308 gpd
->dw3_info
= cpu_to_le32(ext_addr
);
309 gpd
->dw0_info
|= cpu_to_le32(GPD_FLAGS_IOC
| GPD_FLAGS_HWO
);
312 trace_mtu3_prepare_gpd(mep
, gpd
);
317 void mtu3_insert_gpd(struct mtu3_ep
*mep
, struct mtu3_request
*mreq
)
321 mtu3_prepare_tx_gpd(mep
, mreq
);
323 mtu3_prepare_rx_gpd(mep
, mreq
);
326 int mtu3_qmu_start(struct mtu3_ep
*mep
)
328 struct mtu3
*mtu
= mep
->mtu
;
329 void __iomem
*mbase
= mtu
->mac_base
;
330 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
331 u8 epnum
= mep
->epnum
;
334 /* set QMU start address */
335 write_txq_start_addr(mbase
, epnum
, ring
->dma
);
336 mtu3_setbits(mbase
, MU3D_EP_TXCR0(epnum
), TX_DMAREQEN
);
337 /* send zero length packet according to ZLP flag in GPD */
338 mtu3_setbits(mbase
, U3D_QCR1
, QMU_TX_ZLP(epnum
));
339 mtu3_writel(mbase
, U3D_TQERRIESR0
,
340 QMU_TX_LEN_ERR(epnum
) | QMU_TX_CS_ERR(epnum
));
342 if (mtu3_readl(mbase
, USB_QMU_TQCSR(epnum
)) & QMU_Q_ACTIVE
) {
343 dev_warn(mtu
->dev
, "Tx %d Active Now!\n", epnum
);
346 mtu3_writel(mbase
, USB_QMU_TQCSR(epnum
), QMU_Q_START
);
349 write_rxq_start_addr(mbase
, epnum
, ring
->dma
);
350 mtu3_setbits(mbase
, MU3D_EP_RXCR0(epnum
), RX_DMAREQEN
);
351 /* don't expect ZLP */
352 mtu3_clrbits(mbase
, U3D_QCR3
, QMU_RX_ZLP(epnum
));
353 /* move to next GPD when receive ZLP */
354 mtu3_setbits(mbase
, U3D_QCR3
, QMU_RX_COZ(epnum
));
355 mtu3_writel(mbase
, U3D_RQERRIESR0
,
356 QMU_RX_LEN_ERR(epnum
) | QMU_RX_CS_ERR(epnum
));
357 mtu3_writel(mbase
, U3D_RQERRIESR1
, QMU_RX_ZLP_ERR(epnum
));
359 if (mtu3_readl(mbase
, USB_QMU_RQCSR(epnum
)) & QMU_Q_ACTIVE
) {
360 dev_warn(mtu
->dev
, "Rx %d Active Now!\n", epnum
);
363 mtu3_writel(mbase
, USB_QMU_RQCSR(epnum
), QMU_Q_START
);
369 /* may called in atomic context */
370 void mtu3_qmu_stop(struct mtu3_ep
*mep
)
372 struct mtu3
*mtu
= mep
->mtu
;
373 void __iomem
*mbase
= mtu
->mac_base
;
374 int epnum
= mep
->epnum
;
379 qcsr
= mep
->is_in
? USB_QMU_TQCSR(epnum
) : USB_QMU_RQCSR(epnum
);
381 if (!(mtu3_readl(mbase
, qcsr
) & QMU_Q_ACTIVE
)) {
382 dev_dbg(mtu
->dev
, "%s's qmu is inactive now!\n", mep
->name
);
385 mtu3_writel(mbase
, qcsr
, QMU_Q_STOP
);
387 ret
= readl_poll_timeout_atomic(mbase
+ qcsr
, value
,
388 !(value
& QMU_Q_ACTIVE
), 1, 1000);
390 dev_err(mtu
->dev
, "stop %s's qmu failed\n", mep
->name
);
394 dev_dbg(mtu
->dev
, "%s's qmu stop now!\n", mep
->name
);
397 void mtu3_qmu_flush(struct mtu3_ep
*mep
)
400 dev_dbg(mep
->mtu
->dev
, "%s flush QMU %s\n", __func__
,
401 ((mep
->is_in
) ? "TX" : "RX"));
409 * QMU can't transfer zero length packet directly (a hardware limit
410 * on old SoCs), so when needs to send ZLP, we intentionally trigger
411 * a length error interrupt, and in the ISR sends a ZLP by BMU.
413 static void qmu_tx_zlp_error_handler(struct mtu3
*mtu
, u8 epnum
)
415 struct mtu3_ep
*mep
= mtu
->in_eps
+ epnum
;
416 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
417 void __iomem
*mbase
= mtu
->mac_base
;
418 struct qmu_gpd
*gpd_current
= NULL
;
419 struct mtu3_request
*mreq
;
420 dma_addr_t cur_gpd_dma
;
424 mreq
= next_request(mep
);
425 if (mreq
&& mreq
->request
.length
!= 0)
428 cur_gpd_dma
= read_txq_cur_addr(mbase
, epnum
);
429 gpd_current
= gpd_dma_to_virt(ring
, cur_gpd_dma
);
431 if (GPD_DATA_LEN(mtu
, le32_to_cpu(gpd_current
->dw3_info
)) != 0) {
432 dev_err(mtu
->dev
, "TX EP%d buffer length error(!=0)\n", epnum
);
436 dev_dbg(mtu
->dev
, "%s send ZLP for req=%p\n", __func__
, mreq
);
437 trace_mtu3_zlp_exp_gpd(mep
, gpd_current
);
439 mtu3_clrbits(mbase
, MU3D_EP_TXCR0(mep
->epnum
), TX_DMAREQEN
);
441 ret
= readl_poll_timeout_atomic(mbase
+ MU3D_EP_TXCR0(mep
->epnum
),
442 txcsr
, !(txcsr
& TX_FIFOFULL
), 1, 1000);
444 dev_err(mtu
->dev
, "%s wait for fifo empty fail\n", __func__
);
447 mtu3_setbits(mbase
, MU3D_EP_TXCR0(mep
->epnum
), TX_TXPKTRDY
);
449 /* by pass the current GDP */
450 gpd_current
->dw0_info
|= cpu_to_le32(GPD_FLAGS_BPS
| GPD_FLAGS_HWO
);
452 /*enable DMAREQEN, switch back to QMU mode */
453 mtu3_setbits(mbase
, MU3D_EP_TXCR0(mep
->epnum
), TX_DMAREQEN
);
454 mtu3_qmu_resume(mep
);
458 * NOTE: request list maybe is already empty as following case:
459 * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)-->
460 * queue_tx --> process_tasklet(meanwhile, the second one is transferred,
461 * tasklet process both of them)-->qmu_interrupt for second one.
462 * To avoid upper case, put qmu_done_tx in ISR directly to process it.
464 static void qmu_done_tx(struct mtu3
*mtu
, u8 epnum
)
466 struct mtu3_ep
*mep
= mtu
->in_eps
+ epnum
;
467 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
468 void __iomem
*mbase
= mtu
->mac_base
;
469 struct qmu_gpd
*gpd
= ring
->dequeue
;
470 struct qmu_gpd
*gpd_current
= NULL
;
471 struct usb_request
*request
= NULL
;
472 struct mtu3_request
*mreq
;
473 dma_addr_t cur_gpd_dma
;
475 /*transfer phy address got from QMU register to virtual address */
476 cur_gpd_dma
= read_txq_cur_addr(mbase
, epnum
);
477 gpd_current
= gpd_dma_to_virt(ring
, cur_gpd_dma
);
479 dev_dbg(mtu
->dev
, "%s EP%d, last=%p, current=%p, enq=%p\n",
480 __func__
, epnum
, gpd
, gpd_current
, ring
->enqueue
);
482 while (gpd
!= gpd_current
&& !GET_GPD_HWO(gpd
)) {
484 mreq
= next_request(mep
);
486 if (mreq
== NULL
|| mreq
->gpd
!= gpd
) {
487 dev_err(mtu
->dev
, "no correct TX req is found\n");
491 request
= &mreq
->request
;
492 request
->actual
= GPD_DATA_LEN(mtu
, le32_to_cpu(gpd
->dw3_info
));
493 trace_mtu3_complete_gpd(mep
, gpd
);
494 mtu3_req_complete(mep
, request
, 0);
496 gpd
= advance_deq_gpd(ring
);
499 dev_dbg(mtu
->dev
, "%s EP%d, deq=%p, enq=%p, complete\n",
500 __func__
, epnum
, ring
->dequeue
, ring
->enqueue
);
504 static void qmu_done_rx(struct mtu3
*mtu
, u8 epnum
)
506 struct mtu3_ep
*mep
= mtu
->out_eps
+ epnum
;
507 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
508 void __iomem
*mbase
= mtu
->mac_base
;
509 struct qmu_gpd
*gpd
= ring
->dequeue
;
510 struct qmu_gpd
*gpd_current
= NULL
;
511 struct usb_request
*req
= NULL
;
512 struct mtu3_request
*mreq
;
513 dma_addr_t cur_gpd_dma
;
515 cur_gpd_dma
= read_rxq_cur_addr(mbase
, epnum
);
516 gpd_current
= gpd_dma_to_virt(ring
, cur_gpd_dma
);
518 dev_dbg(mtu
->dev
, "%s EP%d, last=%p, current=%p, enq=%p\n",
519 __func__
, epnum
, gpd
, gpd_current
, ring
->enqueue
);
521 while (gpd
!= gpd_current
&& !GET_GPD_HWO(gpd
)) {
523 mreq
= next_request(mep
);
525 if (mreq
== NULL
|| mreq
->gpd
!= gpd
) {
526 dev_err(mtu
->dev
, "no correct RX req is found\n");
529 req
= &mreq
->request
;
531 req
->actual
= GPD_DATA_LEN(mtu
, le32_to_cpu(gpd
->dw3_info
));
532 trace_mtu3_complete_gpd(mep
, gpd
);
533 mtu3_req_complete(mep
, req
, 0);
535 gpd
= advance_deq_gpd(ring
);
538 dev_dbg(mtu
->dev
, "%s EP%d, deq=%p, enq=%p, complete\n",
539 __func__
, epnum
, ring
->dequeue
, ring
->enqueue
);
542 static void qmu_done_isr(struct mtu3
*mtu
, u32 done_status
)
546 for (i
= 1; i
< mtu
->num_eps
; i
++) {
547 if (done_status
& QMU_RX_DONE_INT(i
))
549 if (done_status
& QMU_TX_DONE_INT(i
))
554 static void qmu_exception_isr(struct mtu3
*mtu
, u32 qmu_status
)
556 void __iomem
*mbase
= mtu
->mac_base
;
560 if ((qmu_status
& RXQ_CSERR_INT
) || (qmu_status
& RXQ_LENERR_INT
)) {
561 errval
= mtu3_readl(mbase
, U3D_RQERRIR0
);
562 for (i
= 1; i
< mtu
->num_eps
; i
++) {
563 if (errval
& QMU_RX_CS_ERR(i
))
564 dev_err(mtu
->dev
, "Rx %d CS error!\n", i
);
566 if (errval
& QMU_RX_LEN_ERR(i
))
567 dev_err(mtu
->dev
, "RX %d Length error\n", i
);
569 mtu3_writel(mbase
, U3D_RQERRIR0
, errval
);
572 if (qmu_status
& RXQ_ZLPERR_INT
) {
573 errval
= mtu3_readl(mbase
, U3D_RQERRIR1
);
574 for (i
= 1; i
< mtu
->num_eps
; i
++) {
575 if (errval
& QMU_RX_ZLP_ERR(i
))
576 dev_dbg(mtu
->dev
, "RX EP%d Recv ZLP\n", i
);
578 mtu3_writel(mbase
, U3D_RQERRIR1
, errval
);
581 if ((qmu_status
& TXQ_CSERR_INT
) || (qmu_status
& TXQ_LENERR_INT
)) {
582 errval
= mtu3_readl(mbase
, U3D_TQERRIR0
);
583 for (i
= 1; i
< mtu
->num_eps
; i
++) {
584 if (errval
& QMU_TX_CS_ERR(i
))
585 dev_err(mtu
->dev
, "Tx %d checksum error!\n", i
);
587 if (errval
& QMU_TX_LEN_ERR(i
))
588 qmu_tx_zlp_error_handler(mtu
, i
);
590 mtu3_writel(mbase
, U3D_TQERRIR0
, errval
);
594 irqreturn_t
mtu3_qmu_isr(struct mtu3
*mtu
)
596 void __iomem
*mbase
= mtu
->mac_base
;
600 /* U3D_QISAR1 is read update */
601 qmu_status
= mtu3_readl(mbase
, U3D_QISAR1
);
602 qmu_status
&= mtu3_readl(mbase
, U3D_QIER1
);
604 qmu_done_status
= mtu3_readl(mbase
, U3D_QISAR0
);
605 qmu_done_status
&= mtu3_readl(mbase
, U3D_QIER0
);
606 mtu3_writel(mbase
, U3D_QISAR0
, qmu_done_status
); /* W1C */
607 dev_dbg(mtu
->dev
, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
608 (qmu_done_status
& 0xFFFF), qmu_done_status
>> 16,
610 trace_mtu3_qmu_isr(qmu_done_status
, qmu_status
);
613 qmu_done_isr(mtu
, qmu_done_status
);
616 qmu_exception_isr(mtu
, qmu_status
);
621 int mtu3_qmu_init(struct mtu3
*mtu
)
624 compiletime_assert(QMU_GPD_SIZE
== 16, "QMU_GPD size SHOULD be 16B");
626 mtu
->qmu_gpd_pool
= dma_pool_create("QMU_GPD", mtu
->dev
,
627 QMU_GPD_RING_SIZE
, QMU_GPD_SIZE
, 0);
629 if (!mtu
->qmu_gpd_pool
)
635 void mtu3_qmu_exit(struct mtu3
*mtu
)
637 dma_pool_destroy(mtu
->qmu_gpd_pool
);