2 * Copyright (C) 2005-2006 by Texas Instruments
3 * Copyright (c) 2008, MontaVista Software, Inc. <source@mvista.com>
5 * This file implements a DMA interface using TI's CPPI 4.1 DMA.
7 * This program is free software; you can distribute it and/or modify it
8 * under the terms of the GNU General Public License (Version 2) as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 #include <linux/errno.h>
23 #include <linux/dma-mapping.h>
27 #include "musb_core.h"
29 #include "cppi41_dma.h"
32 #define USB_CPPI41_DESC_SIZE_SHIFT 6
33 #define USB_CPPI41_DESC_ALIGN (1 << USB_CPPI41_DESC_SIZE_SHIFT)
34 #define USB_CPPI41_CH_NUM_PD 64 /* 4K bulk data at full speed */
35 #define USB_CPPI41_MAX_PD (USB_CPPI41_CH_NUM_PD * USB_CPPI41_NUM_CH)
41 #define dprintk(x, ...) printk(x, ## __VA_ARGS__)
43 #define dprintk(x, ...)
47 * Data structure definitions
51 * USB Packet Descriptor
56 /* Hardware descriptor fields from this point */
57 struct cppi41_host_pkt_desc hw_desc
;
58 /* Protocol specific data */
60 struct usb_pkt_desc
*next_pd_ptr
;
67 * struct cppi41_channel - DMA Channel Control Structure
69 * Using the same for Tx/Rx.
71 struct cppi41_channel
{
72 struct dma_channel channel
;
74 struct cppi41_dma_ch_obj dma_ch_obj
; /* DMA channel object */
75 struct cppi41_queue src_queue
; /* Tx queue or Rx free descriptor/ */
77 struct cppi41_queue_obj queue_obj
; /* Tx queue object or Rx free */
78 /* descriptor/buffer queue object */
80 u32 tag_info
; /* Tx PD Tag Information field */
82 /* Which direction of which endpoint? */
83 struct musb_hw_ep
*end_pt
;
85 u8 ch_num
; /* Channel number of Tx/Rx 0..3 */
87 /* DMA mode: "transparent", RNDIS, CDC, or Generic RNDIS */
91 /* Book keeping for the current transfer request */
92 dma_addr_t start_addr
;
101 * struct cppi41 - CPPI 4.1 DMA Controller Object
103 * Encapsulates all book keeping and data structures pertaining to
104 * the CPPI 1.4 DMA controller.
107 struct dma_controller controller
;
110 struct cppi41_channel tx_cppi_ch
[USB_CPPI41_NUM_CH
];
111 struct cppi41_channel rx_cppi_ch
[USB_CPPI41_NUM_CH
];
113 struct usb_pkt_desc
*pd_pool_head
; /* Free PD pool head */
114 dma_addr_t pd_mem_phys
; /* PD memory physical address */
115 void *pd_mem
; /* PD memory pointer */
116 u8 pd_mem_rgn
; /* PD memory region number */
118 u16 teardownQNum
; /* Teardown completion queue number */
119 struct cppi41_queue_obj queue_obj
; /* Teardown completion queue */
121 u32 pkt_info
; /* Tx PD Packet Information field */
125 static void print_pd_list(struct usb_pkt_desc
*pd_pool_head
)
127 struct usb_pkt_desc
*curr_pd
= pd_pool_head
;
130 while (curr_pd
!= NULL
) {
132 dprintk("\n%02x ", cnt
);
134 dprintk(" %p", curr_pd
);
135 curr_pd
= curr_pd
->next_pd_ptr
;
141 static struct usb_pkt_desc
*usb_get_free_pd(struct cppi41
*cppi
)
143 struct usb_pkt_desc
*free_pd
= cppi
->pd_pool_head
;
145 if (free_pd
!= NULL
) {
146 cppi
->pd_pool_head
= free_pd
->next_pd_ptr
;
147 free_pd
->next_pd_ptr
= NULL
;
152 static void usb_put_free_pd(struct cppi41
*cppi
, struct usb_pkt_desc
*free_pd
)
154 free_pd
->next_pd_ptr
= cppi
->pd_pool_head
;
155 cppi
->pd_pool_head
= free_pd
;
159 * cppi41_controller_start - start DMA controller
160 * @controller: the controller
162 * This function initializes the CPPI 4.1 Tx/Rx channels.
164 static int __init
cppi41_controller_start(struct dma_controller
*controller
)
167 struct cppi41_channel
*cppi_ch
;
168 void __iomem
*reg_base
;
169 struct usb_pkt_desc
*curr_pd
;
170 unsigned long pd_addr
;
173 cppi
= container_of(controller
, struct cppi41
, controller
);
176 * TODO: We may need to check USB_CPPI41_MAX_PD here since CPPI 4.1
177 * requires the descriptor count to be a multiple of 2 ^ 5 (i.e. 32).
178 * Similarly, the descriptor size should also be a multiple of 32.
182 * Allocate free packet descriptor pool for all Tx/Rx endpoints --
183 * dma_alloc_coherent() will return a page aligned address, so our
184 * alignment requirement will be honored.
186 cppi
->pd_mem
= dma_alloc_coherent(cppi
->musb
->controller
,
188 USB_CPPI41_DESC_ALIGN
,
190 GFP_KERNEL
| GFP_DMA
);
191 if (cppi
->pd_mem
== NULL
) {
192 DBG(1, "ERROR: packet descriptor memory allocation failed\n");
195 if (cppi41_mem_rgn_alloc(usb_cppi41_info
.q_mgr
, cppi
->pd_mem_phys
,
196 USB_CPPI41_DESC_SIZE_SHIFT
,
197 get_count_order(USB_CPPI41_MAX_PD
),
198 &cppi
->pd_mem_rgn
)) {
199 DBG(1, "ERROR: queue manager memory region allocation "
204 /* Allocate the teardown completion queue */
205 if (cppi41_queue_alloc(CPPI41_UNASSIGNED_QUEUE
,
206 0, &cppi
->teardownQNum
)) {
207 DBG(1, "ERROR: teardown completion queue allocation failed\n");
210 DBG(4, "Allocated teardown completion queue %d in queue manager 0\n",
213 if (cppi41_queue_init(&cppi
->queue_obj
, 0, cppi
->teardownQNum
)) {
214 DBG(1, "ERROR: teardown completion queue initialization "
220 * "Slice" PDs one-by-one from the big chunk and
221 * add them to the free pool.
223 curr_pd
= (struct usb_pkt_desc
*)cppi
->pd_mem
;
224 pd_addr
= cppi
->pd_mem_phys
;
225 for (i
= 0; i
< USB_CPPI41_MAX_PD
; i
++) {
226 curr_pd
->dma_addr
= pd_addr
;
228 usb_put_free_pd(cppi
, curr_pd
);
229 curr_pd
= (struct usb_pkt_desc
*)((char *)curr_pd
+
230 USB_CPPI41_DESC_ALIGN
);
231 pd_addr
+= USB_CPPI41_DESC_ALIGN
;
234 /* Configure the Tx channels */
235 for (i
= 0, cppi_ch
= cppi
->tx_cppi_ch
;
236 i
< ARRAY_SIZE(cppi
->tx_cppi_ch
); ++i
, ++cppi_ch
) {
237 const struct cppi41_tx_ch
*tx_info
;
239 memset(cppi_ch
, 0, sizeof(struct cppi41_channel
));
240 cppi_ch
->transmit
= 1;
242 cppi_ch
->channel
.private_data
= cppi
;
245 * Extract the CPPI 4.1 DMA Tx channel configuration and
246 * construct/store the Tx PD tag info field for later use...
248 tx_info
= cppi41_dma_block
[usb_cppi41_info
.dma_block
].tx_ch_info
249 + usb_cppi41_info
.ep_dma_ch
[i
];
250 cppi_ch
->src_queue
= tx_info
->tx_queue
[0];
251 cppi_ch
->tag_info
= (tx_info
->port_num
<<
252 CPPI41_SRC_TAG_PORT_NUM_SHIFT
) |
254 CPPI41_SRC_TAG_CH_NUM_SHIFT
) |
255 (tx_info
->sub_ch_num
<<
256 CPPI41_SRC_TAG_SUB_CH_NUM_SHIFT
);
259 /* Configure the Rx channels */
260 for (i
= 0, cppi_ch
= cppi
->rx_cppi_ch
;
261 i
< ARRAY_SIZE(cppi
->rx_cppi_ch
); ++i
, ++cppi_ch
) {
262 memset(cppi_ch
, 0, sizeof(struct cppi41_channel
));
264 cppi_ch
->channel
.private_data
= cppi
;
267 /* Construct/store Tx PD packet info field for later use */
268 cppi
->pkt_info
= (CPPI41_PKT_TYPE_USB
<< CPPI41_PKT_TYPE_SHIFT
) |
269 (CPPI41_RETURN_LINKED
<< CPPI41_RETURN_POLICY_SHIFT
) |
270 (usb_cppi41_info
.q_mgr
<< CPPI41_RETURN_QMGR_SHIFT
) |
271 (usb_cppi41_info
.tx_comp_q
[0] <<
272 CPPI41_RETURN_QNUM_SHIFT
);
274 /* Do necessary configuartion in hardware to get started */
275 reg_base
= cppi
->musb
->ctrl_base
;
277 /* Disable auto request mode */
278 musb_writel(reg_base
, USB_AUTOREQ_REG
, 0);
280 /* Disable the CDC/RNDIS modes */
281 musb_writel(reg_base
, USB_TX_MODE_REG
, 0);
282 musb_writel(reg_base
, USB_RX_MODE_REG
, 0);
287 if (cppi41_queue_free(0, cppi
->teardownQNum
))
288 DBG(1, "ERROR: failed to free teardown completion queue\n");
291 if (cppi41_mem_rgn_free(usb_cppi41_info
.q_mgr
, cppi
->pd_mem_rgn
))
292 DBG(1, "ERROR: failed to free queue manager memory region\n");
295 dma_free_coherent(cppi
->musb
->controller
,
296 USB_CPPI41_MAX_PD
* USB_CPPI41_DESC_ALIGN
,
297 cppi
->pd_mem
, cppi
->pd_mem_phys
);
303 * cppi41_controller_stop - stop DMA controller
304 * @controller: the controller
306 * De-initialize the DMA Controller as necessary.
308 static int cppi41_controller_stop(struct dma_controller
*controller
)
311 void __iomem
*reg_base
;
313 cppi
= container_of(controller
, struct cppi41
, controller
);
316 * pop all the teardwon descriptor queued to tdQueue
318 cppi41_free_teardown_queue(0);
320 /* Free the teardown completion queue */
321 if (cppi41_queue_free(usb_cppi41_info
.q_mgr
, cppi
->teardownQNum
))
322 DBG(1, "ERROR: failed to free teardown completion queue\n");
325 * Free the packet descriptor region allocated
326 * for all Tx/Rx channels.
328 if (cppi41_mem_rgn_free(usb_cppi41_info
.q_mgr
, cppi
->pd_mem_rgn
))
329 DBG(1, "ERROR: failed to free queue manager memory region\n");
331 dma_free_coherent(cppi
->musb
->controller
,
332 USB_CPPI41_MAX_PD
* USB_CPPI41_DESC_ALIGN
,
333 cppi
->pd_mem
, cppi
->pd_mem_phys
);
335 reg_base
= cppi
->musb
->ctrl_base
;
337 /* Disable auto request mode */
338 musb_writel(reg_base
, USB_AUTOREQ_REG
, 0);
340 /* Disable the CDC/RNDIS modes */
341 musb_writel(reg_base
, USB_TX_MODE_REG
, 0);
342 musb_writel(reg_base
, USB_RX_MODE_REG
, 0);
348 * cppi41_channel_alloc - allocate a CPPI channel for DMA.
349 * @controller: the controller
351 * @is_tx: 1 for Tx channel, 0 for Rx channel
353 * With CPPI, channels are bound to each transfer direction of a non-control
354 * endpoint, so allocating (and deallocating) is mostly a way to notice bad
355 * housekeeping on the software side. We assume the IRQs are always active.
357 static struct dma_channel
*cppi41_channel_alloc(struct dma_controller
359 struct musb_hw_ep
*ep
, u8 is_tx
)
362 struct cppi41_channel
*cppi_ch
;
363 u32 ch_num
, ep_num
= ep
->epnum
;
365 cppi
= container_of(controller
, struct cppi41
, controller
);
367 /* Remember, ep_num: 1 .. Max_EP, and CPPI ch_num: 0 .. Max_EP - 1 */
370 if (ep_num
> USB_CPPI41_NUM_CH
) {
371 DBG(1, "No %cx DMA channel for EP%d\n",
372 is_tx
? 'T' : 'R', ep_num
);
376 cppi_ch
= (is_tx
? cppi
->tx_cppi_ch
: cppi
->rx_cppi_ch
) + ch_num
;
378 /* As of now, just return the corresponding CPPI 4.1 channel handle */
380 /* Initialize the CPPI 4.1 Tx DMA channel */
381 if (cppi41_tx_ch_init(&cppi_ch
->dma_ch_obj
,
382 usb_cppi41_info
.dma_block
,
383 usb_cppi41_info
.ep_dma_ch
[ch_num
])) {
384 DBG(1, "ERROR: cppi41_tx_ch_init failed for "
385 "channel %d\n", ch_num
);
389 * Teardown descriptors will be pushed to the dedicated
392 cppi41_dma_ch_default_queue(&cppi_ch
->dma_ch_obj
,
393 0, cppi
->teardownQNum
);
395 struct cppi41_rx_ch_cfg rx_cfg
;
396 u8 q_mgr
= usb_cppi41_info
.q_mgr
;
399 /* Initialize the CPPI 4.1 Rx DMA channel */
400 if (cppi41_rx_ch_init(&cppi_ch
->dma_ch_obj
,
401 usb_cppi41_info
.dma_block
,
402 usb_cppi41_info
.ep_dma_ch
[ch_num
])) {
403 DBG(1, "ERROR: cppi41_rx_ch_init failed\n");
407 if (cppi41_queue_alloc(CPPI41_FREE_DESC_BUF_QUEUE
|
408 CPPI41_UNASSIGNED_QUEUE
,
409 q_mgr
, &cppi_ch
->src_queue
.q_num
)) {
410 DBG(1, "ERROR: cppi41_queue_alloc failed for "
411 "free descriptor/buffer queue\n");
414 DBG(4, "Allocated free descriptor/buffer queue %d in "
415 "queue manager %d\n", cppi_ch
->src_queue
.q_num
, q_mgr
);
417 rx_cfg
.default_desc_type
= cppi41_rx_host_desc
;
418 rx_cfg
.sop_offset
= 0;
419 rx_cfg
.retry_starved
= 1;
420 rx_cfg
.rx_queue
.q_mgr
= cppi_ch
->src_queue
.q_mgr
= q_mgr
;
421 rx_cfg
.rx_queue
.q_num
= usb_cppi41_info
.rx_comp_q
[0];
422 for (i
= 0; i
< 4; i
++)
423 rx_cfg
.cfg
.host_pkt
.fdb_queue
[i
] = cppi_ch
->src_queue
;
424 cppi41_rx_ch_configure(&cppi_ch
->dma_ch_obj
, &rx_cfg
);
427 /* Initialize the CPPI 4.1 DMA source queue */
428 if (cppi41_queue_init(&cppi_ch
->queue_obj
, cppi_ch
->src_queue
.q_mgr
,
429 cppi_ch
->src_queue
.q_num
)) {
430 DBG(1, "ERROR: cppi41_queue_init failed for %s queue",
431 is_tx
? "Tx" : "Rx free descriptor/buffer");
433 cppi41_queue_free(cppi_ch
->src_queue
.q_mgr
,
434 cppi_ch
->src_queue
.q_num
))
435 DBG(1, "ERROR: failed to free Rx descriptor/buffer "
440 /* Enable the DMA channel */
441 cppi41_dma_ch_enable(&cppi_ch
->dma_ch_obj
);
444 DBG(1, "Re-allocating DMA %cx channel %d (%p)\n",
445 is_tx
? 'T' : 'R', ch_num
, cppi_ch
);
447 cppi_ch
->end_pt
= ep
;
448 cppi_ch
->ch_num
= ch_num
;
449 cppi_ch
->channel
.status
= MUSB_DMA_STATUS_FREE
;
451 DBG(4, "Allocated DMA %cx channel %d for EP%d\n", is_tx
? 'T' : 'R',
454 return &cppi_ch
->channel
;
458 * cppi41_channel_release - release a CPPI DMA channel
459 * @channel: the channel
461 static void cppi41_channel_release(struct dma_channel
*channel
)
463 struct cppi41_channel
*cppi_ch
;
465 /* REVISIT: for paranoia, check state and abort if needed... */
466 cppi_ch
= container_of(channel
, struct cppi41_channel
, channel
);
467 if (cppi_ch
->end_pt
== NULL
)
468 DBG(1, "Releasing idle DMA channel %p\n", cppi_ch
);
470 /* But for now, not its IRQ */
471 cppi_ch
->end_pt
= NULL
;
472 channel
->status
= MUSB_DMA_STATUS_UNKNOWN
;
474 cppi41_dma_ch_disable(&cppi_ch
->dma_ch_obj
);
476 /* De-allocate Rx free descriptior/buffer queue */
477 if (cppi_ch
->transmit
== 0 &&
478 cppi41_queue_free(cppi_ch
->src_queue
.q_mgr
,
479 cppi_ch
->src_queue
.q_num
))
480 DBG(1, "ERROR: failed to free Rx descriptor/buffer queue\n");
483 static void cppi41_mode_update(struct cppi41_channel
*cppi_ch
, u8 mode
)
485 if (mode
!= cppi_ch
->dma_mode
) {
486 struct cppi41
*cppi
= cppi_ch
->channel
.private_data
;
487 void *__iomem reg_base
= cppi
->musb
->ctrl_base
;
489 u8 ep_num
= cppi_ch
->ch_num
+ 1;
491 if (cppi_ch
->transmit
) {
492 reg_val
= musb_readl(reg_base
, USB_TX_MODE_REG
);
493 reg_val
&= ~USB_TX_MODE_MASK(ep_num
);
494 reg_val
|= mode
<< USB_TX_MODE_SHIFT(ep_num
);
495 musb_writel(reg_base
, USB_TX_MODE_REG
, reg_val
);
497 reg_val
= musb_readl(reg_base
, USB_RX_MODE_REG
);
498 reg_val
&= ~USB_RX_MODE_MASK(ep_num
);
499 reg_val
|= mode
<< USB_RX_MODE_SHIFT(ep_num
);
500 musb_writel(reg_base
, USB_RX_MODE_REG
, reg_val
);
502 cppi_ch
->dma_mode
= mode
;
509 * Tx is a lot more reasonable than Rx: RNDIS mode seems to behave well except
510 * how it handles the exactly-N-packets case. It appears that there's a hiccup
511 * in that case (maybe the DMA completes before a ZLP gets written?) boiling
512 * down to not being able to rely on the XFER DMA writing any terminating zero
513 * length packet before the next transfer is started...
515 * The generic RNDIS mode does not have this misfeature, so we prefer using it
516 * instead. We then send the terminating ZLP *explictly* using DMA instead of
517 * doing it by PIO after an IRQ.
522 * cppi41_next_tx_segment - DMA write for the next chunk of a buffer
525 * Context: controller IRQ-locked
527 static unsigned cppi41_next_tx_segment(struct cppi41_channel
*tx_ch
)
529 struct cppi41
*cppi
= tx_ch
->channel
.private_data
;
530 struct usb_pkt_desc
*curr_pd
;
531 u32 length
= tx_ch
->length
- tx_ch
->curr_offset
;
532 u32 pkt_size
= tx_ch
->pkt_size
;
536 * Tx can use the generic RNDIS mode where we can probably fit this
537 * transfer in one PD and one IRQ. The only time we would NOT want
538 * to use it is when the hardware constraints prevent it...
540 if ((pkt_size
& 0x3f) == 0 && length
> pkt_size
) {
543 cppi41_mode_update(tx_ch
, USB_GENERIC_RNDIS_MODE
);
545 num_pds
= (length
+ pkt_size
- 1) / pkt_size
;
546 cppi41_mode_update(tx_ch
, USB_TRANSPARENT_MODE
);
550 * If length of transmit buffer is 0 or a multiple of the endpoint size,
551 * then send the zero length packet.
553 if (!length
|| (tx_ch
->transfer_mode
&& length
% pkt_size
== 0))
556 DBG(4, "TX DMA%u, %s, maxpkt %u, %u PDs, addr %#x, len %u\n",
557 tx_ch
->ch_num
, tx_ch
->dma_mode
? "accelerated" : "transparent",
558 pkt_size
, num_pds
, tx_ch
->start_addr
+ tx_ch
->curr_offset
, length
);
560 for (n
= 0; n
< num_pds
; n
++) {
561 struct cppi41_host_pkt_desc
*hw_desc
;
563 /* Get Tx host packet descriptor from the free pool */
564 curr_pd
= usb_get_free_pd(cppi
);
565 if (curr_pd
== NULL
) {
566 DBG(1, "No Tx PDs\n");
570 if (length
< pkt_size
)
573 hw_desc
= &curr_pd
->hw_desc
;
574 hw_desc
->desc_info
= (CPPI41_DESC_TYPE_HOST
<<
575 CPPI41_DESC_TYPE_SHIFT
) | pkt_size
;
576 hw_desc
->tag_info
= tx_ch
->tag_info
;
577 hw_desc
->pkt_info
= cppi
->pkt_info
;
579 hw_desc
->buf_ptr
= tx_ch
->start_addr
+ tx_ch
->curr_offset
;
580 hw_desc
->buf_len
= pkt_size
;
581 hw_desc
->next_desc_ptr
= 0;
583 curr_pd
->ch_num
= tx_ch
->ch_num
;
584 curr_pd
->ep_num
= tx_ch
->end_pt
->epnum
;
586 tx_ch
->curr_offset
+= pkt_size
;
590 tx_ch
->zlp_queued
= 1;
592 DBG(5, "TX PD %p: buf %08x, len %08x, pkt info %08x\n", curr_pd
,
593 hw_desc
->buf_ptr
, hw_desc
->buf_len
, hw_desc
->pkt_info
);
595 cppi41_queue_push(&tx_ch
->queue_obj
, curr_pd
->dma_addr
,
596 USB_CPPI41_DESC_ALIGN
, pkt_size
);
602 static void cppi41_autoreq_update(struct cppi41_channel
*rx_ch
, u8 autoreq
)
604 struct cppi41
*cppi
= rx_ch
->channel
.private_data
;
606 if (is_host_active(cppi
->musb
) &&
607 autoreq
!= rx_ch
->autoreq
) {
608 void *__iomem reg_base
= cppi
->musb
->ctrl_base
;
609 u32 reg_val
= musb_readl(reg_base
, USB_AUTOREQ_REG
);
610 u8 ep_num
= rx_ch
->ch_num
+ 1;
612 reg_val
&= ~USB_RX_AUTOREQ_MASK(ep_num
);
613 reg_val
|= autoreq
<< USB_RX_AUTOREQ_SHIFT(ep_num
);
615 musb_writel(reg_base
, USB_AUTOREQ_REG
, reg_val
);
616 rx_ch
->autoreq
= autoreq
;
620 static void cppi41_set_ep_size(struct cppi41_channel
*rx_ch
, u32 pkt_size
)
622 struct cppi41
*cppi
= rx_ch
->channel
.private_data
;
623 void *__iomem reg_base
= cppi
->musb
->ctrl_base
;
624 u8 ep_num
= rx_ch
->ch_num
+ 1;
626 musb_writel(reg_base
, USB_GENERIC_RNDIS_EP_SIZE_REG(ep_num
), pkt_size
);
632 * Consider a 1KB bulk Rx buffer in two scenarios: (a) it's fed two 300 byte
633 * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
634 * (Full speed transfers have similar scenarios.)
636 * The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
637 * and the next packet goes into a buffer that's queued later; while (b) fills
638 * the buffer with 1024 bytes. How to do that with accelerated DMA modes?
640 * Rx queues in RNDIS mode (one single BD) handle (a) correctly but (b) loses
641 * BADLY because nothing (!) happens when that second packet fills the buffer,
642 * much less when a third one arrives -- which makes it not a "true" RNDIS mode.
643 * In the RNDIS protocol short-packet termination is optional, and it's fine if
644 * the peripherals (not hosts!) pad the messages out to end of buffer. Standard
645 * PCI host controller DMA descriptors implement that mode by default... which
648 * Generic RNDIS mode is the only way to reliably make both cases work. This
649 * mode is identical to the "normal" RNDIS mode except for the case where the
650 * last packet of the segment matches the max USB packet size -- in this case,
651 * the packet will be closed when a value (0x10000 max) in the Generic RNDIS
652 * EP Size register is reached. This mode will work for the network drivers
653 * (CDC/RNDIS) as well as for the mass storage drivers where there is no short
656 * BUT we can only use non-transparent modes when USB packet size is a multiple
657 * of 64 bytes. Let's see what happens when this is not the case...
659 * Rx queues (2 BDs with 512 bytes each) have converse problems to RNDIS mode:
660 * (b) is handled right but (a) loses badly. DMA doesn't stop after receiving
661 * a short packet and processes both of those PDs; so both packets are loaded
662 * into the buffer (with 212 byte gap between them), and the next buffer queued
663 * will NOT get its 300 bytes of data. Even in the case when there should be
664 * no short packets (URB_SHORT_NOT_OK is set), queueing several packets in the
665 * host mode doesn't win us anything since we have to manually "prod" the Rx
666 * process after each packet is received by setting ReqPkt bit in endpoint's
667 * RXCSR; in the peripheral mode without short packets, queueing could be used
668 * BUT we'll have to *teardown* the channel if a short packet still arrives in
669 * the peripheral mode, and to "collect" the left-over packet descriptors from
670 * the free descriptor/buffer queue in both cases...
672 * One BD at a time is the only way to make make both cases work reliably, with
673 * software handling both cases correctly, at the significant penalty of needing
674 * an IRQ per packet. (The lack of I/O overlap can be slightly ameliorated by
675 * enabling double buffering.)
677 * There seems to be no way to identify for sure the cases where the CDC mode
683 * cppi41_next_rx_segment - DMA read for the next chunk of a buffer
686 * Context: controller IRQ-locked
688 * NOTE: In the transparent mode, we have to queue one packet at a time since:
689 * - we must avoid starting reception of another packet after receiving
691 * - in host mode we have to set ReqPkt bit in the endpoint's RXCSR after
692 * receiving each packet but the last one... ugly!
694 static unsigned cppi41_next_rx_segment(struct cppi41_channel
*rx_ch
)
696 struct cppi41
*cppi
= rx_ch
->channel
.private_data
;
697 struct usb_pkt_desc
*curr_pd
;
698 struct cppi41_host_pkt_desc
*hw_desc
;
699 u32 length
= rx_ch
->length
- rx_ch
->curr_offset
;
700 u32 pkt_size
= rx_ch
->pkt_size
;
701 u32 max_rx_transfer_size
= 128 * 1024;
702 u32 i
, n_bd
, pkt_len
;
703 struct usb_gadget_driver
*gadget_driver
;
705 if (is_peripheral_active(cppi
->musb
)) {
706 /* TODO: temporary fix for CDC/RNDIS which needs to be in
707 * GENERIC_RNDIS mode. Without this RNDIS gadget taking
708 * more then 2K ms for a 64 byte pings.
710 #ifdef CONFIG_USB_GADGET_MUSB_HDRC
711 gadget_driver
= cppi
->musb
->gadget_driver
;
713 if (!strcmp(gadget_driver
->driver
.name
, "g_ether")) {
714 cppi41_mode_update(rx_ch
, USB_GENERIC_RNDIS_MODE
);
716 max_rx_transfer_size
= 512;
717 cppi41_mode_update(rx_ch
, USB_TRANSPARENT_MODE
);
720 if (rx_ch
->length
< max_rx_transfer_size
)
721 pkt_len
= rx_ch
->length
;
722 cppi41_set_ep_size(rx_ch
, pkt_len
);
725 * Rx can use the generic RNDIS mode where we can
726 * probably fit this transfer in one PD and one IRQ
727 * (or two with a short packet).
729 if ((pkt_size
& 0x3f) == 0 && length
>= 2 * pkt_size
) {
730 cppi41_mode_update(rx_ch
, USB_GENERIC_RNDIS_MODE
);
731 cppi41_autoreq_update(rx_ch
, USB_AUTOREQ_ALL_BUT_EOP
);
733 if (likely(length
< 0x10000))
734 pkt_size
= length
- length
% pkt_size
;
737 cppi41_set_ep_size(rx_ch
, pkt_size
);
739 cppi41_mode_update(rx_ch
, USB_TRANSPARENT_MODE
);
740 cppi41_autoreq_update(rx_ch
, USB_NO_AUTOREQ
);
744 DBG(4, "RX DMA%u, %s, maxpkt %u, addr %#x, rec'd %u/%u\n",
745 rx_ch
->ch_num
, rx_ch
->dma_mode
? "accelerated" : "transparent",
746 pkt_size
, rx_ch
->start_addr
+ rx_ch
->curr_offset
,
747 rx_ch
->curr_offset
, rx_ch
->length
);
749 /* calculate number of bd required */
750 n_bd
= (length
+ max_rx_transfer_size
- 1)/max_rx_transfer_size
;
752 for (i
= 0; i
< n_bd
; ++i
) {
753 /* Get Rx packet descriptor from the free pool */
754 curr_pd
= usb_get_free_pd(cppi
);
755 if (curr_pd
== NULL
) {
756 /* Shouldn't ever happen! */
757 DBG(4, "No Rx PDs\n");
762 (length
> max_rx_transfer_size
) ? max_rx_transfer_size
: length
;
764 hw_desc
= &curr_pd
->hw_desc
;
765 hw_desc
->orig_buf_ptr
= rx_ch
->start_addr
+ rx_ch
->curr_offset
;
766 hw_desc
->orig_buf_len
= pkt_len
;
768 curr_pd
->ch_num
= rx_ch
->ch_num
;
769 curr_pd
->ep_num
= rx_ch
->end_pt
->epnum
;
771 curr_pd
->eop
= (length
-= pkt_len
) ? 0 : 1;
772 rx_ch
->curr_offset
+= pkt_len
;
775 * Push the free Rx packet descriptor
776 * to the free descriptor/buffer queue.
778 cppi41_queue_push(&rx_ch
->queue_obj
, curr_pd
->dma_addr
,
779 USB_CPPI41_DESC_ALIGN
, 0);
784 * HCD arranged ReqPkt for the first packet.
785 * We arrange it for all but the last one.
787 if (is_host_active(cppi
->musb
) && rx_ch
->channel
.actual_len
) {
788 void __iomem
*epio
= rx_ch
->end_pt
->regs
;
789 u16 csr
= musb_readw(epio
, MUSB_RXCSR
);
791 csr
|= MUSB_RXCSR_H_REQPKT
| MUSB_RXCSR_H_WZC_BITS
;
792 musb_writew(epio
, MUSB_RXCSR
, csr
);
795 /* enable schedular if not enabled */
796 if (is_peripheral_active(cppi
->musb
) && (n_bd
> 0))
797 cppi41_enable_sched_rx();
802 * cppi41_channel_program - program channel for data transfer
803 * @channel: the channel
804 * @maxpacket: max packet size
805 * @mode: for Rx, 1 unless the USB protocol driver promised to treat
806 * all short reads as errors and kick in high level fault recovery;
807 * for Tx, 0 unless the protocol driver _requires_ short-packet
809 * @dma_addr: DMA address of buffer
810 * @length: length of buffer
812 * Context: controller IRQ-locked
814 static int cppi41_channel_program(struct dma_channel
*channel
, u16 maxpacket
,
815 u8 mode
, dma_addr_t dma_addr
, u32 length
)
817 struct cppi41_channel
*cppi_ch
;
820 cppi_ch
= container_of(channel
, struct cppi41_channel
, channel
);
822 switch (channel
->status
) {
823 case MUSB_DMA_STATUS_BUS_ABORT
:
824 case MUSB_DMA_STATUS_CORE_ABORT
:
825 /* Fault IRQ handler should have handled cleanup */
826 WARNING("%cx DMA%d not cleaned up after abort!\n",
827 cppi_ch
->transmit
? 'T' : 'R', cppi_ch
->ch_num
);
829 case MUSB_DMA_STATUS_BUSY
:
830 WARNING("Program active channel? %cx DMA%d\n",
831 cppi_ch
->transmit
? 'T' : 'R', cppi_ch
->ch_num
);
833 case MUSB_DMA_STATUS_UNKNOWN
:
834 DBG(1, "%cx DMA%d not allocated!\n",
835 cppi_ch
->transmit
? 'T' : 'R', cppi_ch
->ch_num
);
837 case MUSB_DMA_STATUS_FREE
:
841 channel
->status
= MUSB_DMA_STATUS_BUSY
;
843 /* Set the transfer parameters, then queue up the first segment */
844 cppi_ch
->start_addr
= dma_addr
;
845 cppi_ch
->curr_offset
= 0;
846 cppi_ch
->pkt_size
= maxpacket
;
847 cppi_ch
->length
= length
;
848 cppi_ch
->transfer_mode
= mode
;
849 cppi_ch
->zlp_queued
= 0;
850 cppi_ch
->channel
.actual_len
= 0;
852 /* Tx or Rx channel? */
853 if (cppi_ch
->transmit
)
854 queued
= cppi41_next_tx_segment(cppi_ch
);
856 queued
= cppi41_next_rx_segment(cppi_ch
);
861 static struct usb_pkt_desc
*usb_get_pd_ptr(struct cppi41
*cppi
,
862 unsigned long pd_addr
)
864 if (pd_addr
>= cppi
->pd_mem_phys
&& pd_addr
< cppi
->pd_mem_phys
+
865 USB_CPPI41_MAX_PD
* USB_CPPI41_DESC_ALIGN
)
866 return pd_addr
- cppi
->pd_mem_phys
+ cppi
->pd_mem
;
871 static int usb_check_teardown(struct cppi41_channel
*cppi_ch
,
872 unsigned long pd_addr
)
876 if (cppi41_get_teardown_info(pd_addr
, &info
)) {
877 DBG(1, "ERROR: not a teardown descriptor\n");
881 if ((info
& CPPI41_TEARDOWN_TX_RX_MASK
) ==
882 (!cppi_ch
->transmit
<< CPPI41_TEARDOWN_TX_RX_SHIFT
) &&
883 (info
& CPPI41_TEARDOWN_DMA_NUM_MASK
) ==
884 (usb_cppi41_info
.dma_block
<< CPPI41_TEARDOWN_DMA_NUM_SHIFT
) &&
885 (info
& CPPI41_TEARDOWN_CHAN_NUM_MASK
) ==
886 (usb_cppi41_info
.ep_dma_ch
[cppi_ch
->ch_num
] <<
887 CPPI41_TEARDOWN_CHAN_NUM_SHIFT
))
890 DBG(1, "ERROR: unexpected values in teardown descriptor\n");
895 * We can't handle the channel teardown via the default completion queue in
896 * context of the controller IRQ-locked, so we use the dedicated teardown
897 * completion queue which we can just poll for a teardown descriptor, not
898 * interfering with the Tx completion queue processing.
900 static void usb_tx_ch_teardown(struct cppi41_channel
*tx_ch
)
902 struct cppi41
*cppi
= tx_ch
->channel
.private_data
;
903 struct musb
*musb
= cppi
->musb
;
904 void __iomem
*reg_base
= musb
->ctrl_base
;
905 u32 td_reg
, timeout
= 0xfffff;
906 u8 ep_num
= tx_ch
->ch_num
+ 1;
907 unsigned long pd_addr
;
909 /* Initiate teardown for Tx DMA channel */
910 cppi41_dma_ch_teardown(&tx_ch
->dma_ch_obj
);
912 /* Wait for a descriptor to be queued and pop it... */
914 td_reg
= musb_readl(reg_base
, USB_TEARDOWN_REG
);
915 td_reg
|= USB_TX_TDOWN_MASK(ep_num
);
916 musb_writel(reg_base
, USB_TEARDOWN_REG
, td_reg
);
918 pd_addr
= cppi41_queue_pop(&cppi
->queue_obj
);
919 } while (!pd_addr
&& timeout
--);
923 dprintk("Descriptor (%08lx) popped from teardown completion "
926 if (usb_check_teardown(tx_ch
, pd_addr
)) {
927 dprintk("Teardown Desc (%p) rcvd\n", pd_addr
);
929 ERR("Invalid PD(%08lx)popped from TearDn completion"
933 ERR("Teardown Desc not rcvd\n");
938 * For Rx DMA channels, the situation is more complex: there's only a single
939 * completion queue for all our needs, so we have to temporarily redirect the
940 * completed descriptors to our teardown completion queue, with a possibility
941 * of a completed packet landing there as well...
943 static void usb_rx_ch_teardown(struct cppi41_channel
*rx_ch
)
945 struct cppi41
*cppi
= rx_ch
->channel
.private_data
;
946 u32 timeout
= 0xfffff;
948 cppi41_dma_ch_default_queue(&rx_ch
->dma_ch_obj
, 0, cppi
->teardownQNum
);
950 /* Initiate teardown for Rx DMA channel */
951 cppi41_dma_ch_teardown(&rx_ch
->dma_ch_obj
);
954 struct usb_pkt_desc
*curr_pd
;
955 unsigned long pd_addr
;
957 /* Wait for a descriptor to be queued and pop it... */
959 pd_addr
= cppi41_queue_pop(&cppi
->queue_obj
);
960 } while (!pd_addr
&& timeout
--);
963 ERR("teardown Desc not found\n");
967 dprintk("Descriptor (%08lx) popped from teardown completion "
971 * We might have popped a completed Rx PD, so check if the
972 * physical address is within the PD region first. If it's
973 * not the case, it must be a teardown descriptor...
975 curr_pd
= usb_get_pd_ptr(cppi
, pd_addr
);
976 if (curr_pd
== NULL
) {
977 if (usb_check_teardown(rx_ch
, pd_addr
))
982 /* Paranoia: check if PD is from the right channel... */
983 if (curr_pd
->ch_num
!= rx_ch
->ch_num
) {
984 ERR("Unexpected channel %d in Rx PD\n",
989 /* Extract the buffer length from the completed PD */
990 rx_ch
->channel
.actual_len
+= curr_pd
->hw_desc
.buf_len
;
993 * Return Rx PDs to the software list --
994 * this is protected by critical section.
996 usb_put_free_pd(cppi
, curr_pd
);
999 /* Now restore the default Rx completion queue... */
1000 cppi41_dma_ch_default_queue(&rx_ch
->dma_ch_obj
, usb_cppi41_info
.q_mgr
,
1001 usb_cppi41_info
.rx_comp_q
[0]);
1005 * cppi41_channel_abort
1007 * Context: controller IRQ-locked, endpoint selected.
1009 static int cppi41_channel_abort(struct dma_channel
*channel
)
1011 struct cppi41
*cppi
;
1012 struct cppi41_channel
*cppi_ch
;
1014 void __iomem
*reg_base
, *epio
;
1015 unsigned long pd_addr
;
1019 cppi_ch
= container_of(channel
, struct cppi41_channel
, channel
);
1020 ch_num
= cppi_ch
->ch_num
;
1022 switch (channel
->status
) {
1023 case MUSB_DMA_STATUS_BUS_ABORT
:
1024 case MUSB_DMA_STATUS_CORE_ABORT
:
1025 /* From Rx or Tx fault IRQ handler */
1026 case MUSB_DMA_STATUS_BUSY
:
1027 /* The hardware needs shutting down... */
1028 dprintk("%s: DMA busy, status = %x\n",
1029 __func__
, channel
->status
);
1031 case MUSB_DMA_STATUS_UNKNOWN
:
1032 DBG(1, "%cx DMA%d not allocated\n",
1033 cppi_ch
->transmit
? 'T' : 'R', ch_num
);
1035 case MUSB_DMA_STATUS_FREE
:
1039 cppi
= cppi_ch
->channel
.private_data
;
1041 reg_base
= musb
->ctrl_base
;
1042 epio
= cppi_ch
->end_pt
->regs
;
1043 ep_num
= ch_num
+ 1;
1045 #ifdef DEBUG_CPPI_TD
1046 printk("Before teardown:");
1047 print_pd_list(cppi
->pd_pool_head
);
1050 if (cppi_ch
->transmit
) {
1051 dprintk("Tx channel teardown, cppi_ch = %p\n", cppi_ch
);
1053 /* Tear down Tx DMA channel */
1054 usb_tx_ch_teardown(cppi_ch
);
1056 /* Issue CPPI FIFO teardown for Tx channel */
1057 td_reg
= musb_readl(reg_base
, USB_TEARDOWN_REG
);
1058 td_reg
|= USB_TX_TDOWN_MASK(ep_num
);
1059 musb_writel(reg_base
, USB_TEARDOWN_REG
, td_reg
);
1061 /* Flush FIFO of the endpoint */
1062 csr
= musb_readw(epio
, MUSB_TXCSR
);
1063 csr
|= MUSB_TXCSR_FLUSHFIFO
| MUSB_TXCSR_H_WZC_BITS
;
1064 musb_writew(epio
, MUSB_TXCSR
, csr
);
1065 musb_writew(epio
, MUSB_TXCSR
, csr
);
1067 dprintk("Rx channel teardown, cppi_ch = %p\n", cppi_ch
);
1069 /* Flush FIFO of the endpoint */
1070 csr
= musb_readw(epio
, MUSB_RXCSR
);
1071 csr
|= MUSB_RXCSR_FLUSHFIFO
| MUSB_RXCSR_H_WZC_BITS
;
1072 musb_writew(epio
, MUSB_RXCSR
, csr
);
1073 musb_writew(epio
, MUSB_RXCSR
, csr
);
1075 /* Issue CPPI FIFO teardown for Rx channel */
1076 td_reg
= musb_readl(reg_base
, USB_TEARDOWN_REG
);
1077 td_reg
|= USB_RX_TDOWN_MASK(ep_num
);
1078 musb_writel(reg_base
, USB_TEARDOWN_REG
, td_reg
);
1080 /* Tear down Rx DMA channel */
1081 usb_rx_ch_teardown(cppi_ch
);
1084 * NOTE: docs don't guarantee any of this works... we expect
1085 * that if the USB core stops telling the CPPI core to pull
1086 * more data from it, then it'll be safe to flush current Rx
1087 * DMA state iff any pending FIFO transfer is done.
1090 /* For host, ensure ReqPkt is never set again */
1091 cppi41_autoreq_update(cppi_ch
, USB_NO_AUTOREQ
);
1093 /* For host, clear (just) ReqPkt at end of current packet(s) */
1094 if (is_host_active(cppi
->musb
))
1095 csr
&= ~MUSB_RXCSR_H_REQPKT
;
1096 csr
|= MUSB_RXCSR_H_WZC_BITS
;
1098 /* Clear DMA enable */
1099 csr
&= ~MUSB_RXCSR_DMAENAB
;
1100 musb_writew(epio
, MUSB_RXCSR
, csr
);
1102 /* Flush the FIFO of endpoint once again */
1103 csr
= musb_readw(epio
, MUSB_RXCSR
);
1104 csr
|= MUSB_RXCSR_FLUSHFIFO
| MUSB_RXCSR_H_WZC_BITS
;
1105 musb_writew(epio
, MUSB_RXCSR
, csr
);
1111 * There might be PDs in the Rx/Tx source queue that were not consumed
1112 * by the DMA controller -- they need to be recycled properly.
1114 while ((pd_addr
= cppi41_queue_pop(&cppi_ch
->queue_obj
)) != 0) {
1115 struct usb_pkt_desc
*curr_pd
;
1117 curr_pd
= usb_get_pd_ptr(cppi
, pd_addr
);
1118 if (curr_pd
== NULL
) {
1119 ERR("Invalid PD popped from source queue\n");
1124 * Return Rx/Tx PDs to the software list --
1125 * this is protected by critical section.
1127 dprintk("Returning PD %p to the free PD list\n", curr_pd
);
1128 usb_put_free_pd(cppi
, curr_pd
);
1131 #ifdef DEBUG_CPPI_TD
1132 printk("After teardown:");
1133 print_pd_list(cppi
->pd_pool_head
);
1136 /* Re-enable the DMA channel */
1137 cppi41_dma_ch_enable(&cppi_ch
->dma_ch_obj
);
1139 channel
->status
= MUSB_DMA_STATUS_FREE
;
1145 * dma_controller_create - instantiate an object representing DMA controller.
1147 struct dma_controller
* __init
dma_controller_create(struct musb
*musb
,
1148 void __iomem
*mregs
)
1150 struct cppi41
*cppi
;
1152 cppi
= kzalloc(sizeof *cppi
, GFP_KERNEL
);
1156 /* Initialize the CPPI 4.1 DMA controller structure */
1158 cppi
->controller
.start
= cppi41_controller_start
;
1159 cppi
->controller
.stop
= cppi41_controller_stop
;
1160 cppi
->controller
.channel_alloc
= cppi41_channel_alloc
;
1161 cppi
->controller
.channel_release
= cppi41_channel_release
;
1162 cppi
->controller
.channel_program
= cppi41_channel_program
;
1163 cppi
->controller
.channel_abort
= cppi41_channel_abort
;
1165 return &cppi
->controller
;
1169 * dma_controller_destroy - destroy a previously instantiated DMA controller
1170 * @controller: the controller
1172 void dma_controller_destroy(struct dma_controller
*controller
)
1174 struct cppi41
*cppi
;
1176 cppi
= container_of(controller
, struct cppi41
, controller
);
1178 /* Free the CPPI object */
1182 static void usb_process_tx_queue(struct cppi41
*cppi
, unsigned index
)
1184 struct cppi41_queue_obj tx_queue_obj
;
1185 unsigned long pd_addr
;
1187 if (cppi41_queue_init(&tx_queue_obj
, usb_cppi41_info
.q_mgr
,
1188 usb_cppi41_info
.tx_comp_q
[index
])) {
1189 DBG(1, "ERROR: cppi41_queue_init failed for "
1190 "Tx completion queue");
1194 while ((pd_addr
= cppi41_queue_pop(&tx_queue_obj
)) != 0) {
1195 struct usb_pkt_desc
*curr_pd
;
1196 struct cppi41_channel
*tx_ch
;
1200 curr_pd
= usb_get_pd_ptr(cppi
, pd_addr
);
1201 if (curr_pd
== NULL
) {
1202 ERR("Invalid PD popped from Tx completion queue\n");
1206 /* Extract the data from received packet descriptor */
1207 ch_num
= curr_pd
->ch_num
;
1208 ep_num
= curr_pd
->ep_num
;
1209 length
= curr_pd
->hw_desc
.buf_len
;
1211 tx_ch
= &cppi
->tx_cppi_ch
[ch_num
];
1212 tx_ch
->channel
.actual_len
+= length
;
1215 * Return Tx PD to the software list --
1216 * this is protected by critical section
1218 usb_put_free_pd(cppi
, curr_pd
);
1220 if ((tx_ch
->curr_offset
< tx_ch
->length
) ||
1221 (tx_ch
->transfer_mode
&& !tx_ch
->zlp_queued
))
1222 cppi41_next_tx_segment(tx_ch
);
1223 else if (tx_ch
->channel
.actual_len
>= tx_ch
->length
) {
1224 tx_ch
->channel
.status
= MUSB_DMA_STATUS_FREE
;
1226 /* Tx completion routine callback */
1227 musb_dma_completion(cppi
->musb
, ep_num
, 1);
1232 static void usb_process_rx_queue(struct cppi41
*cppi
, unsigned index
)
1234 struct cppi41_queue_obj rx_queue_obj
;
1235 unsigned long pd_addr
;
1237 if (cppi41_queue_init(&rx_queue_obj
, usb_cppi41_info
.q_mgr
,
1238 usb_cppi41_info
.rx_comp_q
[index
])) {
1239 DBG(1, "ERROR: cppi41_queue_init failed for Rx queue\n");
1243 while ((pd_addr
= cppi41_queue_pop(&rx_queue_obj
)) != 0) {
1244 struct usb_pkt_desc
*curr_pd
;
1245 struct cppi41_channel
*rx_ch
;
1249 curr_pd
= usb_get_pd_ptr(cppi
, pd_addr
);
1250 if (curr_pd
== NULL
) {
1251 ERR("Invalid PD popped from Rx completion queue\n");
1255 /* Extract the data from received packet descriptor */
1256 ch_num
= curr_pd
->ch_num
;
1257 ep_num
= curr_pd
->ep_num
;
1258 length
= curr_pd
->hw_desc
.buf_len
;
1260 rx_ch
= &cppi
->rx_cppi_ch
[ch_num
];
1261 rx_ch
->channel
.actual_len
+= length
;
1265 /* disable the rx dma schedular */
1266 if (is_peripheral_active(cppi
->musb
)) {
1267 cppi41_disable_sched_rx();
1268 musb_dma_completion(cppi
->musb
, ep_num
, 0);
1273 * Return Rx PD to the software list --
1274 * this is protected by critical section
1276 usb_put_free_pd(cppi
, curr_pd
);
1278 if (unlikely(rx_ch
->channel
.actual_len
>= rx_ch
->length
||
1279 length
< curr_pd
->hw_desc
.orig_buf_len
)) {
1280 rx_ch
->channel
.status
= MUSB_DMA_STATUS_FREE
;
1282 /* Rx completion routine callback */
1283 musb_dma_completion(cppi
->musb
, ep_num
, 0);
1285 if (is_peripheral_active(cppi
->musb
) &&
1286 ((rx_ch
->length
- rx_ch
->curr_offset
) > 0))
1287 cppi41_next_rx_segment(rx_ch
);
1293 * cppi41_completion - handle interrupts from the Tx/Rx completion queues
1295 * NOTE: since we have to manually prod the Rx process in the transparent mode,
1296 * we certainly want to handle the Rx queues first.
1298 void cppi41_completion(struct musb
*musb
, u32 rx
, u32 tx
)
1300 struct cppi41
*cppi
;
1303 cppi
= container_of(musb
->dma_controller
, struct cppi41
, controller
);
1305 /* Process packet descriptors from the Rx queues */
1306 for (index
= 0; rx
!= 0; rx
>>= 1, index
++)
1308 usb_process_rx_queue(cppi
, index
);
1310 /* Process packet descriptors from the Tx completion queues */
1311 for (index
= 0; tx
!= 0; tx
>>= 1, index
++)
1313 usb_process_tx_queue(cppi
, index
);