1 // SPDX-License-Identifier: GPL-2.0
3 * mtu3_qmu.c - Queue Management Unit driver for device controller
5 * Copyright (C) 2016 MediaTek Inc.
7 * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
11 * Queue Management Unit (QMU) is designed to unload SW effort
12 * to serve DMA interrupts.
13 * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD),
14 * SW links data buffers and triggers QMU to send / receive data to
15 * host / from device at a time.
16 * And now only GPD is supported.
18 * For more detailed information, please refer to QMU Programming Guide
21 #include <linux/dmapool.h>
22 #include <linux/iopoll.h>
26 #define QMU_CHECKSUM_LEN 16
28 #define GPD_FLAGS_HWO BIT(0)
29 #define GPD_FLAGS_BDP BIT(1)
30 #define GPD_FLAGS_BPS BIT(2)
31 #define GPD_FLAGS_IOC BIT(7)
33 #define GPD_EXT_FLAG_ZLP BIT(5)
34 #define GPD_EXT_NGP(x) (((x) & 0xf) << 4)
35 #define GPD_EXT_BUF(x) (((x) & 0xf) << 0)
37 #define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo))
38 #define HILO_DMA(hi, lo) \
39 ((dma_addr_t)HILO_GEN64((le32_to_cpu(hi)), (le32_to_cpu(lo))))
41 static dma_addr_t
read_txq_cur_addr(void __iomem
*mbase
, u8 epnum
)
46 txcpr
= mtu3_readl(mbase
, USB_QMU_TQCPR(epnum
));
47 txhiar
= mtu3_readl(mbase
, USB_QMU_TQHIAR(epnum
));
49 return HILO_DMA(QMU_CUR_GPD_ADDR_HI(txhiar
), txcpr
);
52 static dma_addr_t
read_rxq_cur_addr(void __iomem
*mbase
, u8 epnum
)
57 rxcpr
= mtu3_readl(mbase
, USB_QMU_RQCPR(epnum
));
58 rxhiar
= mtu3_readl(mbase
, USB_QMU_RQHIAR(epnum
));
60 return HILO_DMA(QMU_CUR_GPD_ADDR_HI(rxhiar
), rxcpr
);
63 static void write_txq_start_addr(void __iomem
*mbase
, u8 epnum
, dma_addr_t dma
)
67 mtu3_writel(mbase
, USB_QMU_TQSAR(epnum
),
68 cpu_to_le32(lower_32_bits(dma
)));
69 tqhiar
= mtu3_readl(mbase
, USB_QMU_TQHIAR(epnum
));
70 tqhiar
&= ~QMU_START_ADDR_HI_MSK
;
71 tqhiar
|= QMU_START_ADDR_HI(upper_32_bits(dma
));
72 mtu3_writel(mbase
, USB_QMU_TQHIAR(epnum
), tqhiar
);
75 static void write_rxq_start_addr(void __iomem
*mbase
, u8 epnum
, dma_addr_t dma
)
79 mtu3_writel(mbase
, USB_QMU_RQSAR(epnum
),
80 cpu_to_le32(lower_32_bits(dma
)));
81 rqhiar
= mtu3_readl(mbase
, USB_QMU_RQHIAR(epnum
));
82 rqhiar
&= ~QMU_START_ADDR_HI_MSK
;
83 rqhiar
|= QMU_START_ADDR_HI(upper_32_bits(dma
));
84 mtu3_writel(mbase
, USB_QMU_RQHIAR(epnum
), rqhiar
);
87 static struct qmu_gpd
*gpd_dma_to_virt(struct mtu3_gpd_ring
*ring
,
90 dma_addr_t dma_base
= ring
->dma
;
91 struct qmu_gpd
*gpd_head
= ring
->start
;
92 u32 offset
= (dma_addr
- dma_base
) / sizeof(*gpd_head
);
94 if (offset
>= MAX_GPD_NUM
)
97 return gpd_head
+ offset
;
100 static dma_addr_t
gpd_virt_to_dma(struct mtu3_gpd_ring
*ring
,
103 dma_addr_t dma_base
= ring
->dma
;
104 struct qmu_gpd
*gpd_head
= ring
->start
;
107 offset
= gpd
- gpd_head
;
108 if (offset
>= MAX_GPD_NUM
)
111 return dma_base
+ (offset
* sizeof(*gpd
));
114 static void gpd_ring_init(struct mtu3_gpd_ring
*ring
, struct qmu_gpd
*gpd
)
119 ring
->end
= gpd
+ MAX_GPD_NUM
- 1;
122 static void reset_gpd_list(struct mtu3_ep
*mep
)
124 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
125 struct qmu_gpd
*gpd
= ring
->start
;
128 gpd
->flag
&= ~GPD_FLAGS_HWO
;
129 gpd_ring_init(ring
, gpd
);
133 int mtu3_gpd_ring_alloc(struct mtu3_ep
*mep
)
136 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
138 /* software own all gpds as default */
139 gpd
= dma_pool_zalloc(mep
->mtu
->qmu_gpd_pool
, GFP_ATOMIC
, &ring
->dma
);
143 gpd_ring_init(ring
, gpd
);
148 void mtu3_gpd_ring_free(struct mtu3_ep
*mep
)
150 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
152 dma_pool_free(mep
->mtu
->qmu_gpd_pool
,
153 ring
->start
, ring
->dma
);
154 memset(ring
, 0, sizeof(*ring
));
158 * calculate check sum of a gpd or bd
159 * add "noinline" and "mb" to prevent wrong calculation
161 static noinline u8
qmu_calc_checksum(u8
*data
)
166 data
[1] = 0x0; /* set checksum to 0 */
168 mb(); /* ensure the gpd/bd is really up-to-date */
169 for (i
= 0; i
< QMU_CHECKSUM_LEN
; i
++)
172 /* Default: HWO=1, @flag[bit0] */
175 return 0xFF - chksum
;
178 void mtu3_qmu_resume(struct mtu3_ep
*mep
)
180 struct mtu3
*mtu
= mep
->mtu
;
181 void __iomem
*mbase
= mtu
->mac_base
;
182 int epnum
= mep
->epnum
;
185 offset
= mep
->is_in
? USB_QMU_TQCSR(epnum
) : USB_QMU_RQCSR(epnum
);
187 mtu3_writel(mbase
, offset
, QMU_Q_RESUME
);
188 if (!(mtu3_readl(mbase
, offset
) & QMU_Q_ACTIVE
))
189 mtu3_writel(mbase
, offset
, QMU_Q_RESUME
);
192 static struct qmu_gpd
*advance_enq_gpd(struct mtu3_gpd_ring
*ring
)
194 if (ring
->enqueue
< ring
->end
)
197 ring
->enqueue
= ring
->start
;
199 return ring
->enqueue
;
202 static struct qmu_gpd
*advance_deq_gpd(struct mtu3_gpd_ring
*ring
)
204 if (ring
->dequeue
< ring
->end
)
207 ring
->dequeue
= ring
->start
;
209 return ring
->dequeue
;
212 /* check if a ring is emtpy */
213 static int gpd_ring_empty(struct mtu3_gpd_ring
*ring
)
215 struct qmu_gpd
*enq
= ring
->enqueue
;
216 struct qmu_gpd
*next
;
218 if (ring
->enqueue
< ring
->end
)
223 /* one gpd is reserved to simplify gpd preparation */
224 return next
== ring
->dequeue
;
227 int mtu3_prepare_transfer(struct mtu3_ep
*mep
)
229 return gpd_ring_empty(&mep
->gpd_ring
);
232 static int mtu3_prepare_tx_gpd(struct mtu3_ep
*mep
, struct mtu3_request
*mreq
)
235 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
236 struct qmu_gpd
*gpd
= ring
->enqueue
;
237 struct usb_request
*req
= &mreq
->request
;
241 /* set all fields to zero as default value */
242 memset(gpd
, 0, sizeof(*gpd
));
244 gpd
->buffer
= cpu_to_le32(lower_32_bits(req
->dma
));
245 ext_addr
= GPD_EXT_BUF(upper_32_bits(req
->dma
));
246 gpd
->buf_len
= cpu_to_le16(req
->length
);
247 gpd
->flag
|= GPD_FLAGS_IOC
;
249 /* get the next GPD */
250 enq
= advance_enq_gpd(ring
);
251 enq_dma
= gpd_virt_to_dma(ring
, enq
);
252 dev_dbg(mep
->mtu
->dev
, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
253 mep
->epnum
, gpd
, enq
, &enq_dma
);
255 enq
->flag
&= ~GPD_FLAGS_HWO
;
256 gpd
->next_gpd
= cpu_to_le32(lower_32_bits(enq_dma
));
257 ext_addr
|= GPD_EXT_NGP(upper_32_bits(enq_dma
));
258 gpd
->tx_ext_addr
= cpu_to_le16(ext_addr
);
261 gpd
->ext_flag
|= GPD_EXT_FLAG_ZLP
;
263 gpd
->chksum
= qmu_calc_checksum((u8
*)gpd
);
264 gpd
->flag
|= GPD_FLAGS_HWO
;
271 static int mtu3_prepare_rx_gpd(struct mtu3_ep
*mep
, struct mtu3_request
*mreq
)
274 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
275 struct qmu_gpd
*gpd
= ring
->enqueue
;
276 struct usb_request
*req
= &mreq
->request
;
280 /* set all fields to zero as default value */
281 memset(gpd
, 0, sizeof(*gpd
));
283 gpd
->buffer
= cpu_to_le32(lower_32_bits(req
->dma
));
284 ext_addr
= GPD_EXT_BUF(upper_32_bits(req
->dma
));
285 gpd
->data_buf_len
= cpu_to_le16(req
->length
);
286 gpd
->flag
|= GPD_FLAGS_IOC
;
288 /* get the next GPD */
289 enq
= advance_enq_gpd(ring
);
290 enq_dma
= gpd_virt_to_dma(ring
, enq
);
291 dev_dbg(mep
->mtu
->dev
, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
292 mep
->epnum
, gpd
, enq
, &enq_dma
);
294 enq
->flag
&= ~GPD_FLAGS_HWO
;
295 gpd
->next_gpd
= cpu_to_le32(lower_32_bits(enq_dma
));
296 ext_addr
|= GPD_EXT_NGP(upper_32_bits(enq_dma
));
297 gpd
->rx_ext_addr
= cpu_to_le16(ext_addr
);
298 gpd
->chksum
= qmu_calc_checksum((u8
*)gpd
);
299 gpd
->flag
|= GPD_FLAGS_HWO
;
306 void mtu3_insert_gpd(struct mtu3_ep
*mep
, struct mtu3_request
*mreq
)
310 mtu3_prepare_tx_gpd(mep
, mreq
);
312 mtu3_prepare_rx_gpd(mep
, mreq
);
315 int mtu3_qmu_start(struct mtu3_ep
*mep
)
317 struct mtu3
*mtu
= mep
->mtu
;
318 void __iomem
*mbase
= mtu
->mac_base
;
319 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
320 u8 epnum
= mep
->epnum
;
323 /* set QMU start address */
324 write_txq_start_addr(mbase
, epnum
, ring
->dma
);
325 mtu3_setbits(mbase
, MU3D_EP_TXCR0(epnum
), TX_DMAREQEN
);
326 mtu3_setbits(mbase
, U3D_QCR0
, QMU_TX_CS_EN(epnum
));
327 /* send zero length packet according to ZLP flag in GPD */
328 mtu3_setbits(mbase
, U3D_QCR1
, QMU_TX_ZLP(epnum
));
329 mtu3_writel(mbase
, U3D_TQERRIESR0
,
330 QMU_TX_LEN_ERR(epnum
) | QMU_TX_CS_ERR(epnum
));
332 if (mtu3_readl(mbase
, USB_QMU_TQCSR(epnum
)) & QMU_Q_ACTIVE
) {
333 dev_warn(mtu
->dev
, "Tx %d Active Now!\n", epnum
);
336 mtu3_writel(mbase
, USB_QMU_TQCSR(epnum
), QMU_Q_START
);
339 write_rxq_start_addr(mbase
, epnum
, ring
->dma
);
340 mtu3_setbits(mbase
, MU3D_EP_RXCR0(epnum
), RX_DMAREQEN
);
341 mtu3_setbits(mbase
, U3D_QCR0
, QMU_RX_CS_EN(epnum
));
342 /* don't expect ZLP */
343 mtu3_clrbits(mbase
, U3D_QCR3
, QMU_RX_ZLP(epnum
));
344 /* move to next GPD when receive ZLP */
345 mtu3_setbits(mbase
, U3D_QCR3
, QMU_RX_COZ(epnum
));
346 mtu3_writel(mbase
, U3D_RQERRIESR0
,
347 QMU_RX_LEN_ERR(epnum
) | QMU_RX_CS_ERR(epnum
));
348 mtu3_writel(mbase
, U3D_RQERRIESR1
, QMU_RX_ZLP_ERR(epnum
));
350 if (mtu3_readl(mbase
, USB_QMU_RQCSR(epnum
)) & QMU_Q_ACTIVE
) {
351 dev_warn(mtu
->dev
, "Rx %d Active Now!\n", epnum
);
354 mtu3_writel(mbase
, USB_QMU_RQCSR(epnum
), QMU_Q_START
);
360 /* may called in atomic context */
361 void mtu3_qmu_stop(struct mtu3_ep
*mep
)
363 struct mtu3
*mtu
= mep
->mtu
;
364 void __iomem
*mbase
= mtu
->mac_base
;
365 int epnum
= mep
->epnum
;
370 qcsr
= mep
->is_in
? USB_QMU_TQCSR(epnum
) : USB_QMU_RQCSR(epnum
);
372 if (!(mtu3_readl(mbase
, qcsr
) & QMU_Q_ACTIVE
)) {
373 dev_dbg(mtu
->dev
, "%s's qmu is inactive now!\n", mep
->name
);
376 mtu3_writel(mbase
, qcsr
, QMU_Q_STOP
);
378 ret
= readl_poll_timeout_atomic(mbase
+ qcsr
, value
,
379 !(value
& QMU_Q_ACTIVE
), 1, 1000);
381 dev_err(mtu
->dev
, "stop %s's qmu failed\n", mep
->name
);
385 dev_dbg(mtu
->dev
, "%s's qmu stop now!\n", mep
->name
);
388 void mtu3_qmu_flush(struct mtu3_ep
*mep
)
391 dev_dbg(mep
->mtu
->dev
, "%s flush QMU %s\n", __func__
,
392 ((mep
->is_in
) ? "TX" : "RX"));
400 * QMU can't transfer zero length packet directly (a hardware limit
401 * on old SoCs), so when needs to send ZLP, we intentionally trigger
402 * a length error interrupt, and in the ISR sends a ZLP by BMU.
404 static void qmu_tx_zlp_error_handler(struct mtu3
*mtu
, u8 epnum
)
406 struct mtu3_ep
*mep
= mtu
->in_eps
+ epnum
;
407 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
408 void __iomem
*mbase
= mtu
->mac_base
;
409 struct qmu_gpd
*gpd_current
= NULL
;
410 struct usb_request
*req
= NULL
;
411 struct mtu3_request
*mreq
;
412 dma_addr_t cur_gpd_dma
;
416 mreq
= next_request(mep
);
417 if (mreq
&& mreq
->request
.length
== 0)
418 req
= &mreq
->request
;
422 cur_gpd_dma
= read_txq_cur_addr(mbase
, epnum
);
423 gpd_current
= gpd_dma_to_virt(ring
, cur_gpd_dma
);
425 if (le16_to_cpu(gpd_current
->buf_len
) != 0) {
426 dev_err(mtu
->dev
, "TX EP%d buffer length error(!=0)\n", epnum
);
430 dev_dbg(mtu
->dev
, "%s send ZLP for req=%p\n", __func__
, mreq
);
432 mtu3_clrbits(mbase
, MU3D_EP_TXCR0(mep
->epnum
), TX_DMAREQEN
);
434 ret
= readl_poll_timeout_atomic(mbase
+ MU3D_EP_TXCR0(mep
->epnum
),
435 txcsr
, !(txcsr
& TX_FIFOFULL
), 1, 1000);
437 dev_err(mtu
->dev
, "%s wait for fifo empty fail\n", __func__
);
440 mtu3_setbits(mbase
, MU3D_EP_TXCR0(mep
->epnum
), TX_TXPKTRDY
);
442 /* by pass the current GDP */
443 gpd_current
->flag
|= GPD_FLAGS_BPS
;
444 gpd_current
->chksum
= qmu_calc_checksum((u8
*)gpd_current
);
445 gpd_current
->flag
|= GPD_FLAGS_HWO
;
447 /*enable DMAREQEN, switch back to QMU mode */
448 mtu3_setbits(mbase
, MU3D_EP_TXCR0(mep
->epnum
), TX_DMAREQEN
);
449 mtu3_qmu_resume(mep
);
453 * NOTE: request list maybe is already empty as following case:
454 * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)-->
455 * queue_tx --> process_tasklet(meanwhile, the second one is transferred,
456 * tasklet process both of them)-->qmu_interrupt for second one.
457 * To avoid upper case, put qmu_done_tx in ISR directly to process it.
459 static void qmu_done_tx(struct mtu3
*mtu
, u8 epnum
)
461 struct mtu3_ep
*mep
= mtu
->in_eps
+ epnum
;
462 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
463 void __iomem
*mbase
= mtu
->mac_base
;
464 struct qmu_gpd
*gpd
= ring
->dequeue
;
465 struct qmu_gpd
*gpd_current
= NULL
;
466 struct usb_request
*request
= NULL
;
467 struct mtu3_request
*mreq
;
468 dma_addr_t cur_gpd_dma
;
470 /*transfer phy address got from QMU register to virtual address */
471 cur_gpd_dma
= read_txq_cur_addr(mbase
, epnum
);
472 gpd_current
= gpd_dma_to_virt(ring
, cur_gpd_dma
);
474 dev_dbg(mtu
->dev
, "%s EP%d, last=%p, current=%p, enq=%p\n",
475 __func__
, epnum
, gpd
, gpd_current
, ring
->enqueue
);
477 while (gpd
!= gpd_current
&& !(gpd
->flag
& GPD_FLAGS_HWO
)) {
479 mreq
= next_request(mep
);
481 if (mreq
== NULL
|| mreq
->gpd
!= gpd
) {
482 dev_err(mtu
->dev
, "no correct TX req is found\n");
486 request
= &mreq
->request
;
487 request
->actual
= le16_to_cpu(gpd
->buf_len
);
488 mtu3_req_complete(mep
, request
, 0);
490 gpd
= advance_deq_gpd(ring
);
493 dev_dbg(mtu
->dev
, "%s EP%d, deq=%p, enq=%p, complete\n",
494 __func__
, epnum
, ring
->dequeue
, ring
->enqueue
);
498 static void qmu_done_rx(struct mtu3
*mtu
, u8 epnum
)
500 struct mtu3_ep
*mep
= mtu
->out_eps
+ epnum
;
501 struct mtu3_gpd_ring
*ring
= &mep
->gpd_ring
;
502 void __iomem
*mbase
= mtu
->mac_base
;
503 struct qmu_gpd
*gpd
= ring
->dequeue
;
504 struct qmu_gpd
*gpd_current
= NULL
;
505 struct usb_request
*req
= NULL
;
506 struct mtu3_request
*mreq
;
507 dma_addr_t cur_gpd_dma
;
509 cur_gpd_dma
= read_rxq_cur_addr(mbase
, epnum
);
510 gpd_current
= gpd_dma_to_virt(ring
, cur_gpd_dma
);
512 dev_dbg(mtu
->dev
, "%s EP%d, last=%p, current=%p, enq=%p\n",
513 __func__
, epnum
, gpd
, gpd_current
, ring
->enqueue
);
515 while (gpd
!= gpd_current
&& !(gpd
->flag
& GPD_FLAGS_HWO
)) {
517 mreq
= next_request(mep
);
519 if (mreq
== NULL
|| mreq
->gpd
!= gpd
) {
520 dev_err(mtu
->dev
, "no correct RX req is found\n");
523 req
= &mreq
->request
;
525 req
->actual
= le16_to_cpu(gpd
->buf_len
);
526 mtu3_req_complete(mep
, req
, 0);
528 gpd
= advance_deq_gpd(ring
);
531 dev_dbg(mtu
->dev
, "%s EP%d, deq=%p, enq=%p, complete\n",
532 __func__
, epnum
, ring
->dequeue
, ring
->enqueue
);
535 static void qmu_done_isr(struct mtu3
*mtu
, u32 done_status
)
539 for (i
= 1; i
< mtu
->num_eps
; i
++) {
540 if (done_status
& QMU_RX_DONE_INT(i
))
542 if (done_status
& QMU_TX_DONE_INT(i
))
547 static void qmu_exception_isr(struct mtu3
*mtu
, u32 qmu_status
)
549 void __iomem
*mbase
= mtu
->mac_base
;
553 if ((qmu_status
& RXQ_CSERR_INT
) || (qmu_status
& RXQ_LENERR_INT
)) {
554 errval
= mtu3_readl(mbase
, U3D_RQERRIR0
);
555 for (i
= 1; i
< mtu
->num_eps
; i
++) {
556 if (errval
& QMU_RX_CS_ERR(i
))
557 dev_err(mtu
->dev
, "Rx %d CS error!\n", i
);
559 if (errval
& QMU_RX_LEN_ERR(i
))
560 dev_err(mtu
->dev
, "RX %d Length error\n", i
);
562 mtu3_writel(mbase
, U3D_RQERRIR0
, errval
);
565 if (qmu_status
& RXQ_ZLPERR_INT
) {
566 errval
= mtu3_readl(mbase
, U3D_RQERRIR1
);
567 for (i
= 1; i
< mtu
->num_eps
; i
++) {
568 if (errval
& QMU_RX_ZLP_ERR(i
))
569 dev_dbg(mtu
->dev
, "RX EP%d Recv ZLP\n", i
);
571 mtu3_writel(mbase
, U3D_RQERRIR1
, errval
);
574 if ((qmu_status
& TXQ_CSERR_INT
) || (qmu_status
& TXQ_LENERR_INT
)) {
575 errval
= mtu3_readl(mbase
, U3D_TQERRIR0
);
576 for (i
= 1; i
< mtu
->num_eps
; i
++) {
577 if (errval
& QMU_TX_CS_ERR(i
))
578 dev_err(mtu
->dev
, "Tx %d checksum error!\n", i
);
580 if (errval
& QMU_TX_LEN_ERR(i
))
581 qmu_tx_zlp_error_handler(mtu
, i
);
583 mtu3_writel(mbase
, U3D_TQERRIR0
, errval
);
587 irqreturn_t
mtu3_qmu_isr(struct mtu3
*mtu
)
589 void __iomem
*mbase
= mtu
->mac_base
;
593 /* U3D_QISAR1 is read update */
594 qmu_status
= mtu3_readl(mbase
, U3D_QISAR1
);
595 qmu_status
&= mtu3_readl(mbase
, U3D_QIER1
);
597 qmu_done_status
= mtu3_readl(mbase
, U3D_QISAR0
);
598 qmu_done_status
&= mtu3_readl(mbase
, U3D_QIER0
);
599 mtu3_writel(mbase
, U3D_QISAR0
, qmu_done_status
); /* W1C */
600 dev_dbg(mtu
->dev
, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
601 (qmu_done_status
& 0xFFFF), qmu_done_status
>> 16,
605 qmu_done_isr(mtu
, qmu_done_status
);
608 qmu_exception_isr(mtu
, qmu_status
);
613 int mtu3_qmu_init(struct mtu3
*mtu
)
616 compiletime_assert(QMU_GPD_SIZE
== 16, "QMU_GPD size SHOULD be 16B");
618 mtu
->qmu_gpd_pool
= dma_pool_create("QMU_GPD", mtu
->dev
,
619 QMU_GPD_RING_SIZE
, QMU_GPD_SIZE
, 0);
621 if (!mtu
->qmu_gpd_pool
)
627 void mtu3_qmu_exit(struct mtu3
*mtu
)
629 dma_pool_destroy(mtu
->qmu_gpd_pool
);