Full support for Ginger Console
[linux-ginger.git] / drivers / usb / musb / cppi41_dma.c
blob98e49f28f0c29bdd10fff10e4f6994179b88cd36
1 /*
2 * Copyright (C) 2005-2006 by Texas Instruments
3 * Copyright (c) 2008, MontaVista Software, Inc. <source@mvista.com>
5 * This file implements a DMA interface using TI's CPPI 4.1 DMA.
7 * This program is free software; you can distribute it and/or modify it
8 * under the terms of the GNU General Public License (Version 2) as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 #include <linux/errno.h>
23 #include <linux/dma-mapping.h>
25 #include "cppi41.h"
27 #include "musb_core.h"
28 #include "musb_dma.h"
29 #include "cppi41_dma.h"
31 /* Configuration */
32 #define USB_CPPI41_DESC_SIZE_SHIFT 6
33 #define USB_CPPI41_DESC_ALIGN (1 << USB_CPPI41_DESC_SIZE_SHIFT)
34 #define USB_CPPI41_CH_NUM_PD 64 /* 4K bulk data at full speed */
35 #define USB_CPPI41_MAX_PD (USB_CPPI41_CH_NUM_PD * USB_CPPI41_NUM_CH)
37 #undef DEBUG_CPPI_TD
38 #undef USBDRV_DEBUG
40 #ifdef USBDRV_DEBUG
41 #define dprintk(x, ...) printk(x, ## __VA_ARGS__)
42 #else
43 #define dprintk(x, ...)
44 #endif
47 * Data structure definitions
51 * USB Packet Descriptor
53 struct usb_pkt_desc;
55 struct usb_pkt_desc {
56 /* Hardware descriptor fields from this point */
57 struct cppi41_host_pkt_desc hw_desc;
58 /* Protocol specific data */
59 dma_addr_t dma_addr;
60 struct usb_pkt_desc *next_pd_ptr;
61 u8 ch_num;
62 u8 ep_num;
63 u8 eop;
66 /**
67 * struct cppi41_channel - DMA Channel Control Structure
69 * Using the same for Tx/Rx.
71 struct cppi41_channel {
72 struct dma_channel channel;
74 struct cppi41_dma_ch_obj dma_ch_obj; /* DMA channel object */
75 struct cppi41_queue src_queue; /* Tx queue or Rx free descriptor/ */
76 /* buffer queue */
77 struct cppi41_queue_obj queue_obj; /* Tx queue object or Rx free */
78 /* descriptor/buffer queue object */
80 u32 tag_info; /* Tx PD Tag Information field */
82 /* Which direction of which endpoint? */
83 struct musb_hw_ep *end_pt;
84 u8 transmit;
85 u8 ch_num; /* Channel number of Tx/Rx 0..3 */
87 /* DMA mode: "transparent", RNDIS, CDC, or Generic RNDIS */
88 u8 dma_mode;
89 u8 autoreq;
91 /* Book keeping for the current transfer request */
92 dma_addr_t start_addr;
93 u32 length;
94 u32 curr_offset;
95 u16 pkt_size;
96 u8 transfer_mode;
97 u8 zlp_queued;
101 * struct cppi41 - CPPI 4.1 DMA Controller Object
103 * Encapsulates all book keeping and data structures pertaining to
104 * the CPPI 1.4 DMA controller.
106 struct cppi41 {
107 struct dma_controller controller;
108 struct musb *musb;
110 struct cppi41_channel tx_cppi_ch[USB_CPPI41_NUM_CH];
111 struct cppi41_channel rx_cppi_ch[USB_CPPI41_NUM_CH];
113 struct usb_pkt_desc *pd_pool_head; /* Free PD pool head */
114 dma_addr_t pd_mem_phys; /* PD memory physical address */
115 void *pd_mem; /* PD memory pointer */
116 u8 pd_mem_rgn; /* PD memory region number */
118 u16 teardownQNum; /* Teardown completion queue number */
119 struct cppi41_queue_obj queue_obj; /* Teardown completion queue */
120 /* object */
121 u32 pkt_info; /* Tx PD Packet Information field */
124 #ifdef DEBUG_CPPI_TD
125 static void print_pd_list(struct usb_pkt_desc *pd_pool_head)
127 struct usb_pkt_desc *curr_pd = pd_pool_head;
128 int cnt = 0;
130 while (curr_pd != NULL) {
131 if (cnt % 8 == 0)
132 dprintk("\n%02x ", cnt);
133 cnt++;
134 dprintk(" %p", curr_pd);
135 curr_pd = curr_pd->next_pd_ptr;
137 dprintk("\n");
139 #endif
141 static struct usb_pkt_desc *usb_get_free_pd(struct cppi41 *cppi)
143 struct usb_pkt_desc *free_pd = cppi->pd_pool_head;
145 if (free_pd != NULL) {
146 cppi->pd_pool_head = free_pd->next_pd_ptr;
147 free_pd->next_pd_ptr = NULL;
149 return free_pd;
152 static void usb_put_free_pd(struct cppi41 *cppi, struct usb_pkt_desc *free_pd)
154 free_pd->next_pd_ptr = cppi->pd_pool_head;
155 cppi->pd_pool_head = free_pd;
159 * cppi41_controller_start - start DMA controller
160 * @controller: the controller
162 * This function initializes the CPPI 4.1 Tx/Rx channels.
164 static int __init cppi41_controller_start(struct dma_controller *controller)
166 struct cppi41 *cppi;
167 struct cppi41_channel *cppi_ch;
168 void __iomem *reg_base;
169 struct usb_pkt_desc *curr_pd;
170 unsigned long pd_addr;
171 int i;
173 cppi = container_of(controller, struct cppi41, controller);
176 * TODO: We may need to check USB_CPPI41_MAX_PD here since CPPI 4.1
177 * requires the descriptor count to be a multiple of 2 ^ 5 (i.e. 32).
178 * Similarly, the descriptor size should also be a multiple of 32.
182 * Allocate free packet descriptor pool for all Tx/Rx endpoints --
183 * dma_alloc_coherent() will return a page aligned address, so our
184 * alignment requirement will be honored.
186 cppi->pd_mem = dma_alloc_coherent(cppi->musb->controller,
187 USB_CPPI41_MAX_PD *
188 USB_CPPI41_DESC_ALIGN,
189 &cppi->pd_mem_phys,
190 GFP_KERNEL | GFP_DMA);
191 if (cppi->pd_mem == NULL) {
192 DBG(1, "ERROR: packet descriptor memory allocation failed\n");
193 return 0;
195 if (cppi41_mem_rgn_alloc(usb_cppi41_info.q_mgr, cppi->pd_mem_phys,
196 USB_CPPI41_DESC_SIZE_SHIFT,
197 get_count_order(USB_CPPI41_MAX_PD),
198 &cppi->pd_mem_rgn)) {
199 DBG(1, "ERROR: queue manager memory region allocation "
200 "failed\n");
201 goto free_pds;
204 /* Allocate the teardown completion queue */
205 if (cppi41_queue_alloc(CPPI41_UNASSIGNED_QUEUE,
206 0, &cppi->teardownQNum)) {
207 DBG(1, "ERROR: teardown completion queue allocation failed\n");
208 goto free_mem_rgn;
210 DBG(4, "Allocated teardown completion queue %d in queue manager 0\n",
211 cppi->teardownQNum);
213 if (cppi41_queue_init(&cppi->queue_obj, 0, cppi->teardownQNum)) {
214 DBG(1, "ERROR: teardown completion queue initialization "
215 "failed\n");
216 goto free_queue;
220 * "Slice" PDs one-by-one from the big chunk and
221 * add them to the free pool.
223 curr_pd = (struct usb_pkt_desc *)cppi->pd_mem;
224 pd_addr = cppi->pd_mem_phys;
225 for (i = 0; i < USB_CPPI41_MAX_PD; i++) {
226 curr_pd->dma_addr = pd_addr;
228 usb_put_free_pd(cppi, curr_pd);
229 curr_pd = (struct usb_pkt_desc *)((char *)curr_pd +
230 USB_CPPI41_DESC_ALIGN);
231 pd_addr += USB_CPPI41_DESC_ALIGN;
234 /* Configure the Tx channels */
235 for (i = 0, cppi_ch = cppi->tx_cppi_ch;
236 i < ARRAY_SIZE(cppi->tx_cppi_ch); ++i, ++cppi_ch) {
237 const struct cppi41_tx_ch *tx_info;
239 memset(cppi_ch, 0, sizeof(struct cppi41_channel));
240 cppi_ch->transmit = 1;
241 cppi_ch->ch_num = i;
242 cppi_ch->channel.private_data = cppi;
245 * Extract the CPPI 4.1 DMA Tx channel configuration and
246 * construct/store the Tx PD tag info field for later use...
248 tx_info = cppi41_dma_block[usb_cppi41_info.dma_block].tx_ch_info
249 + usb_cppi41_info.ep_dma_ch[i];
250 cppi_ch->src_queue = tx_info->tx_queue[0];
251 cppi_ch->tag_info = (tx_info->port_num <<
252 CPPI41_SRC_TAG_PORT_NUM_SHIFT) |
253 (tx_info->ch_num <<
254 CPPI41_SRC_TAG_CH_NUM_SHIFT) |
255 (tx_info->sub_ch_num <<
256 CPPI41_SRC_TAG_SUB_CH_NUM_SHIFT);
259 /* Configure the Rx channels */
260 for (i = 0, cppi_ch = cppi->rx_cppi_ch;
261 i < ARRAY_SIZE(cppi->rx_cppi_ch); ++i, ++cppi_ch) {
262 memset(cppi_ch, 0, sizeof(struct cppi41_channel));
263 cppi_ch->ch_num = i;
264 cppi_ch->channel.private_data = cppi;
267 /* Construct/store Tx PD packet info field for later use */
268 cppi->pkt_info = (CPPI41_PKT_TYPE_USB << CPPI41_PKT_TYPE_SHIFT) |
269 (CPPI41_RETURN_LINKED << CPPI41_RETURN_POLICY_SHIFT) |
270 (usb_cppi41_info.q_mgr << CPPI41_RETURN_QMGR_SHIFT) |
271 (usb_cppi41_info.tx_comp_q[0] <<
272 CPPI41_RETURN_QNUM_SHIFT);
274 /* Do necessary configuartion in hardware to get started */
275 reg_base = cppi->musb->ctrl_base;
277 /* Disable auto request mode */
278 musb_writel(reg_base, USB_AUTOREQ_REG, 0);
280 /* Disable the CDC/RNDIS modes */
281 musb_writel(reg_base, USB_TX_MODE_REG, 0);
282 musb_writel(reg_base, USB_RX_MODE_REG, 0);
284 return 1;
286 free_queue:
287 if (cppi41_queue_free(0, cppi->teardownQNum))
288 DBG(1, "ERROR: failed to free teardown completion queue\n");
290 free_mem_rgn:
291 if (cppi41_mem_rgn_free(usb_cppi41_info.q_mgr, cppi->pd_mem_rgn))
292 DBG(1, "ERROR: failed to free queue manager memory region\n");
294 free_pds:
295 dma_free_coherent(cppi->musb->controller,
296 USB_CPPI41_MAX_PD * USB_CPPI41_DESC_ALIGN,
297 cppi->pd_mem, cppi->pd_mem_phys);
299 return 0;
303 * cppi41_controller_stop - stop DMA controller
304 * @controller: the controller
306 * De-initialize the DMA Controller as necessary.
308 static int cppi41_controller_stop(struct dma_controller *controller)
310 struct cppi41 *cppi;
311 void __iomem *reg_base;
313 cppi = container_of(controller, struct cppi41, controller);
316 * pop all the teardwon descriptor queued to tdQueue
318 cppi41_free_teardown_queue(0);
320 /* Free the teardown completion queue */
321 if (cppi41_queue_free(usb_cppi41_info.q_mgr, cppi->teardownQNum))
322 DBG(1, "ERROR: failed to free teardown completion queue\n");
325 * Free the packet descriptor region allocated
326 * for all Tx/Rx channels.
328 if (cppi41_mem_rgn_free(usb_cppi41_info.q_mgr, cppi->pd_mem_rgn))
329 DBG(1, "ERROR: failed to free queue manager memory region\n");
331 dma_free_coherent(cppi->musb->controller,
332 USB_CPPI41_MAX_PD * USB_CPPI41_DESC_ALIGN,
333 cppi->pd_mem, cppi->pd_mem_phys);
335 reg_base = cppi->musb->ctrl_base;
337 /* Disable auto request mode */
338 musb_writel(reg_base, USB_AUTOREQ_REG, 0);
340 /* Disable the CDC/RNDIS modes */
341 musb_writel(reg_base, USB_TX_MODE_REG, 0);
342 musb_writel(reg_base, USB_RX_MODE_REG, 0);
344 return 1;
348 * cppi41_channel_alloc - allocate a CPPI channel for DMA.
349 * @controller: the controller
350 * @ep: the endpoint
351 * @is_tx: 1 for Tx channel, 0 for Rx channel
353 * With CPPI, channels are bound to each transfer direction of a non-control
354 * endpoint, so allocating (and deallocating) is mostly a way to notice bad
355 * housekeeping on the software side. We assume the IRQs are always active.
357 static struct dma_channel *cppi41_channel_alloc(struct dma_controller
358 *controller,
359 struct musb_hw_ep *ep, u8 is_tx)
361 struct cppi41 *cppi;
362 struct cppi41_channel *cppi_ch;
363 u32 ch_num, ep_num = ep->epnum;
365 cppi = container_of(controller, struct cppi41, controller);
367 /* Remember, ep_num: 1 .. Max_EP, and CPPI ch_num: 0 .. Max_EP - 1 */
368 ch_num = ep_num - 1;
370 if (ep_num > USB_CPPI41_NUM_CH) {
371 DBG(1, "No %cx DMA channel for EP%d\n",
372 is_tx ? 'T' : 'R', ep_num);
373 return NULL;
376 cppi_ch = (is_tx ? cppi->tx_cppi_ch : cppi->rx_cppi_ch) + ch_num;
378 /* As of now, just return the corresponding CPPI 4.1 channel handle */
379 if (is_tx) {
380 /* Initialize the CPPI 4.1 Tx DMA channel */
381 if (cppi41_tx_ch_init(&cppi_ch->dma_ch_obj,
382 usb_cppi41_info.dma_block,
383 usb_cppi41_info.ep_dma_ch[ch_num])) {
384 DBG(1, "ERROR: cppi41_tx_ch_init failed for "
385 "channel %d\n", ch_num);
386 return NULL;
389 * Teardown descriptors will be pushed to the dedicated
390 * completion queue.
392 cppi41_dma_ch_default_queue(&cppi_ch->dma_ch_obj,
393 0, cppi->teardownQNum);
394 } else {
395 struct cppi41_rx_ch_cfg rx_cfg;
396 u8 q_mgr = usb_cppi41_info.q_mgr;
397 int i;
399 /* Initialize the CPPI 4.1 Rx DMA channel */
400 if (cppi41_rx_ch_init(&cppi_ch->dma_ch_obj,
401 usb_cppi41_info.dma_block,
402 usb_cppi41_info.ep_dma_ch[ch_num])) {
403 DBG(1, "ERROR: cppi41_rx_ch_init failed\n");
404 return NULL;
407 if (cppi41_queue_alloc(CPPI41_FREE_DESC_BUF_QUEUE |
408 CPPI41_UNASSIGNED_QUEUE,
409 q_mgr, &cppi_ch->src_queue.q_num)) {
410 DBG(1, "ERROR: cppi41_queue_alloc failed for "
411 "free descriptor/buffer queue\n");
412 return NULL;
414 DBG(4, "Allocated free descriptor/buffer queue %d in "
415 "queue manager %d\n", cppi_ch->src_queue.q_num, q_mgr);
417 rx_cfg.default_desc_type = cppi41_rx_host_desc;
418 rx_cfg.sop_offset = 0;
419 rx_cfg.retry_starved = 1;
420 rx_cfg.rx_queue.q_mgr = cppi_ch->src_queue.q_mgr = q_mgr;
421 rx_cfg.rx_queue.q_num = usb_cppi41_info.rx_comp_q[0];
422 for (i = 0; i < 4; i++)
423 rx_cfg.cfg.host_pkt.fdb_queue[i] = cppi_ch->src_queue;
424 cppi41_rx_ch_configure(&cppi_ch->dma_ch_obj, &rx_cfg);
427 /* Initialize the CPPI 4.1 DMA source queue */
428 if (cppi41_queue_init(&cppi_ch->queue_obj, cppi_ch->src_queue.q_mgr,
429 cppi_ch->src_queue.q_num)) {
430 DBG(1, "ERROR: cppi41_queue_init failed for %s queue",
431 is_tx ? "Tx" : "Rx free descriptor/buffer");
432 if (is_tx == 0 &&
433 cppi41_queue_free(cppi_ch->src_queue.q_mgr,
434 cppi_ch->src_queue.q_num))
435 DBG(1, "ERROR: failed to free Rx descriptor/buffer "
436 "queue\n");
437 return NULL;
440 /* Enable the DMA channel */
441 cppi41_dma_ch_enable(&cppi_ch->dma_ch_obj);
443 if (cppi_ch->end_pt)
444 DBG(1, "Re-allocating DMA %cx channel %d (%p)\n",
445 is_tx ? 'T' : 'R', ch_num, cppi_ch);
447 cppi_ch->end_pt = ep;
448 cppi_ch->ch_num = ch_num;
449 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
451 DBG(4, "Allocated DMA %cx channel %d for EP%d\n", is_tx ? 'T' : 'R',
452 ch_num, ep_num);
454 return &cppi_ch->channel;
458 * cppi41_channel_release - release a CPPI DMA channel
459 * @channel: the channel
461 static void cppi41_channel_release(struct dma_channel *channel)
463 struct cppi41_channel *cppi_ch;
465 /* REVISIT: for paranoia, check state and abort if needed... */
466 cppi_ch = container_of(channel, struct cppi41_channel, channel);
467 if (cppi_ch->end_pt == NULL)
468 DBG(1, "Releasing idle DMA channel %p\n", cppi_ch);
470 /* But for now, not its IRQ */
471 cppi_ch->end_pt = NULL;
472 channel->status = MUSB_DMA_STATUS_UNKNOWN;
474 cppi41_dma_ch_disable(&cppi_ch->dma_ch_obj);
476 /* De-allocate Rx free descriptior/buffer queue */
477 if (cppi_ch->transmit == 0 &&
478 cppi41_queue_free(cppi_ch->src_queue.q_mgr,
479 cppi_ch->src_queue.q_num))
480 DBG(1, "ERROR: failed to free Rx descriptor/buffer queue\n");
483 static void cppi41_mode_update(struct cppi41_channel *cppi_ch, u8 mode)
485 if (mode != cppi_ch->dma_mode) {
486 struct cppi41 *cppi = cppi_ch->channel.private_data;
487 void *__iomem reg_base = cppi->musb->ctrl_base;
488 u32 reg_val;
489 u8 ep_num = cppi_ch->ch_num + 1;
491 if (cppi_ch->transmit) {
492 reg_val = musb_readl(reg_base, USB_TX_MODE_REG);
493 reg_val &= ~USB_TX_MODE_MASK(ep_num);
494 reg_val |= mode << USB_TX_MODE_SHIFT(ep_num);
495 musb_writel(reg_base, USB_TX_MODE_REG, reg_val);
496 } else {
497 reg_val = musb_readl(reg_base, USB_RX_MODE_REG);
498 reg_val &= ~USB_RX_MODE_MASK(ep_num);
499 reg_val |= mode << USB_RX_MODE_SHIFT(ep_num);
500 musb_writel(reg_base, USB_RX_MODE_REG, reg_val);
502 cppi_ch->dma_mode = mode;
507 * CPPI 4.1 Tx:
508 * ============
509 * Tx is a lot more reasonable than Rx: RNDIS mode seems to behave well except
510 * how it handles the exactly-N-packets case. It appears that there's a hiccup
511 * in that case (maybe the DMA completes before a ZLP gets written?) boiling
512 * down to not being able to rely on the XFER DMA writing any terminating zero
513 * length packet before the next transfer is started...
515 * The generic RNDIS mode does not have this misfeature, so we prefer using it
516 * instead. We then send the terminating ZLP *explictly* using DMA instead of
517 * doing it by PIO after an IRQ.
522 * cppi41_next_tx_segment - DMA write for the next chunk of a buffer
523 * @tx_ch: Tx channel
525 * Context: controller IRQ-locked
527 static unsigned cppi41_next_tx_segment(struct cppi41_channel *tx_ch)
529 struct cppi41 *cppi = tx_ch->channel.private_data;
530 struct usb_pkt_desc *curr_pd;
531 u32 length = tx_ch->length - tx_ch->curr_offset;
532 u32 pkt_size = tx_ch->pkt_size;
533 unsigned num_pds, n;
536 * Tx can use the generic RNDIS mode where we can probably fit this
537 * transfer in one PD and one IRQ. The only time we would NOT want
538 * to use it is when the hardware constraints prevent it...
540 if ((pkt_size & 0x3f) == 0 && length > pkt_size) {
541 num_pds = 1;
542 pkt_size = length;
543 cppi41_mode_update(tx_ch, USB_GENERIC_RNDIS_MODE);
544 } else {
545 num_pds = (length + pkt_size - 1) / pkt_size;
546 cppi41_mode_update(tx_ch, USB_TRANSPARENT_MODE);
550 * If length of transmit buffer is 0 or a multiple of the endpoint size,
551 * then send the zero length packet.
553 if (!length || (tx_ch->transfer_mode && length % pkt_size == 0))
554 num_pds++;
556 DBG(4, "TX DMA%u, %s, maxpkt %u, %u PDs, addr %#x, len %u\n",
557 tx_ch->ch_num, tx_ch->dma_mode ? "accelerated" : "transparent",
558 pkt_size, num_pds, tx_ch->start_addr + tx_ch->curr_offset, length);
560 for (n = 0; n < num_pds; n++) {
561 struct cppi41_host_pkt_desc *hw_desc;
563 /* Get Tx host packet descriptor from the free pool */
564 curr_pd = usb_get_free_pd(cppi);
565 if (curr_pd == NULL) {
566 DBG(1, "No Tx PDs\n");
567 break;
570 if (length < pkt_size)
571 pkt_size = length;
573 hw_desc = &curr_pd->hw_desc;
574 hw_desc->desc_info = (CPPI41_DESC_TYPE_HOST <<
575 CPPI41_DESC_TYPE_SHIFT) | pkt_size;
576 hw_desc->tag_info = tx_ch->tag_info;
577 hw_desc->pkt_info = cppi->pkt_info;
579 hw_desc->buf_ptr = tx_ch->start_addr + tx_ch->curr_offset;
580 hw_desc->buf_len = pkt_size;
581 hw_desc->next_desc_ptr = 0;
583 curr_pd->ch_num = tx_ch->ch_num;
584 curr_pd->ep_num = tx_ch->end_pt->epnum;
586 tx_ch->curr_offset += pkt_size;
587 length -= pkt_size;
589 if (pkt_size == 0)
590 tx_ch->zlp_queued = 1;
592 DBG(5, "TX PD %p: buf %08x, len %08x, pkt info %08x\n", curr_pd,
593 hw_desc->buf_ptr, hw_desc->buf_len, hw_desc->pkt_info);
595 cppi41_queue_push(&tx_ch->queue_obj, curr_pd->dma_addr,
596 USB_CPPI41_DESC_ALIGN, pkt_size);
599 return n;
602 static void cppi41_autoreq_update(struct cppi41_channel *rx_ch, u8 autoreq)
604 struct cppi41 *cppi = rx_ch->channel.private_data;
606 if (is_host_active(cppi->musb) &&
607 autoreq != rx_ch->autoreq) {
608 void *__iomem reg_base = cppi->musb->ctrl_base;
609 u32 reg_val = musb_readl(reg_base, USB_AUTOREQ_REG);
610 u8 ep_num = rx_ch->ch_num + 1;
612 reg_val &= ~USB_RX_AUTOREQ_MASK(ep_num);
613 reg_val |= autoreq << USB_RX_AUTOREQ_SHIFT(ep_num);
615 musb_writel(reg_base, USB_AUTOREQ_REG, reg_val);
616 rx_ch->autoreq = autoreq;
620 static void cppi41_set_ep_size(struct cppi41_channel *rx_ch, u32 pkt_size)
622 struct cppi41 *cppi = rx_ch->channel.private_data;
623 void *__iomem reg_base = cppi->musb->ctrl_base;
624 u8 ep_num = rx_ch->ch_num + 1;
626 musb_writel(reg_base, USB_GENERIC_RNDIS_EP_SIZE_REG(ep_num), pkt_size);
630 * CPPI 4.1 Rx:
631 * ============
632 * Consider a 1KB bulk Rx buffer in two scenarios: (a) it's fed two 300 byte
633 * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
634 * (Full speed transfers have similar scenarios.)
636 * The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
637 * and the next packet goes into a buffer that's queued later; while (b) fills
638 * the buffer with 1024 bytes. How to do that with accelerated DMA modes?
640 * Rx queues in RNDIS mode (one single BD) handle (a) correctly but (b) loses
641 * BADLY because nothing (!) happens when that second packet fills the buffer,
642 * much less when a third one arrives -- which makes it not a "true" RNDIS mode.
643 * In the RNDIS protocol short-packet termination is optional, and it's fine if
644 * the peripherals (not hosts!) pad the messages out to end of buffer. Standard
645 * PCI host controller DMA descriptors implement that mode by default... which
646 * is no accident.
648 * Generic RNDIS mode is the only way to reliably make both cases work. This
649 * mode is identical to the "normal" RNDIS mode except for the case where the
650 * last packet of the segment matches the max USB packet size -- in this case,
651 * the packet will be closed when a value (0x10000 max) in the Generic RNDIS
652 * EP Size register is reached. This mode will work for the network drivers
653 * (CDC/RNDIS) as well as for the mass storage drivers where there is no short
654 * packet.
656 * BUT we can only use non-transparent modes when USB packet size is a multiple
657 * of 64 bytes. Let's see what happens when this is not the case...
659 * Rx queues (2 BDs with 512 bytes each) have converse problems to RNDIS mode:
660 * (b) is handled right but (a) loses badly. DMA doesn't stop after receiving
661 * a short packet and processes both of those PDs; so both packets are loaded
662 * into the buffer (with 212 byte gap between them), and the next buffer queued
663 * will NOT get its 300 bytes of data. Even in the case when there should be
664 * no short packets (URB_SHORT_NOT_OK is set), queueing several packets in the
665 * host mode doesn't win us anything since we have to manually "prod" the Rx
666 * process after each packet is received by setting ReqPkt bit in endpoint's
667 * RXCSR; in the peripheral mode without short packets, queueing could be used
668 * BUT we'll have to *teardown* the channel if a short packet still arrives in
669 * the peripheral mode, and to "collect" the left-over packet descriptors from
670 * the free descriptor/buffer queue in both cases...
672 * One BD at a time is the only way to make make both cases work reliably, with
673 * software handling both cases correctly, at the significant penalty of needing
674 * an IRQ per packet. (The lack of I/O overlap can be slightly ameliorated by
675 * enabling double buffering.)
677 * There seems to be no way to identify for sure the cases where the CDC mode
678 * is appropriate...
683 * cppi41_next_rx_segment - DMA read for the next chunk of a buffer
684 * @rx_ch: Rx channel
686 * Context: controller IRQ-locked
688 * NOTE: In the transparent mode, we have to queue one packet at a time since:
689 * - we must avoid starting reception of another packet after receiving
690 * a short packet;
691 * - in host mode we have to set ReqPkt bit in the endpoint's RXCSR after
692 * receiving each packet but the last one... ugly!
694 static unsigned cppi41_next_rx_segment(struct cppi41_channel *rx_ch)
696 struct cppi41 *cppi = rx_ch->channel.private_data;
697 struct usb_pkt_desc *curr_pd;
698 struct cppi41_host_pkt_desc *hw_desc;
699 u32 length = rx_ch->length - rx_ch->curr_offset;
700 u32 pkt_size = rx_ch->pkt_size;
701 u32 max_rx_transfer_size = 128 * 1024;
702 u32 i, n_bd , pkt_len;
703 struct usb_gadget_driver *gadget_driver;
705 if (is_peripheral_active(cppi->musb)) {
706 /* TODO: temporary fix for CDC/RNDIS which needs to be in
707 * GENERIC_RNDIS mode. Without this RNDIS gadget taking
708 * more then 2K ms for a 64 byte pings.
710 #ifdef CONFIG_USB_GADGET_MUSB_HDRC
711 gadget_driver = cppi->musb->gadget_driver;
712 #endif
713 if (!strcmp(gadget_driver->driver.name, "g_ether")) {
714 cppi41_mode_update(rx_ch, USB_GENERIC_RNDIS_MODE);
715 } else {
716 max_rx_transfer_size = 512;
717 cppi41_mode_update(rx_ch, USB_TRANSPARENT_MODE);
719 pkt_len = 0;
720 if (rx_ch->length < max_rx_transfer_size)
721 pkt_len = rx_ch->length;
722 cppi41_set_ep_size(rx_ch, pkt_len);
723 } else {
725 * Rx can use the generic RNDIS mode where we can
726 * probably fit this transfer in one PD and one IRQ
727 * (or two with a short packet).
729 if ((pkt_size & 0x3f) == 0 && length >= 2 * pkt_size) {
730 cppi41_mode_update(rx_ch, USB_GENERIC_RNDIS_MODE);
731 cppi41_autoreq_update(rx_ch, USB_AUTOREQ_ALL_BUT_EOP);
733 if (likely(length < 0x10000))
734 pkt_size = length - length % pkt_size;
735 else
736 pkt_size = 0x10000;
737 cppi41_set_ep_size(rx_ch, pkt_size);
738 } else {
739 cppi41_mode_update(rx_ch, USB_TRANSPARENT_MODE);
740 cppi41_autoreq_update(rx_ch, USB_NO_AUTOREQ);
744 DBG(4, "RX DMA%u, %s, maxpkt %u, addr %#x, rec'd %u/%u\n",
745 rx_ch->ch_num, rx_ch->dma_mode ? "accelerated" : "transparent",
746 pkt_size, rx_ch->start_addr + rx_ch->curr_offset,
747 rx_ch->curr_offset, rx_ch->length);
749 /* calculate number of bd required */
750 n_bd = (length + max_rx_transfer_size - 1)/max_rx_transfer_size;
752 for (i = 0; i < n_bd ; ++i) {
753 /* Get Rx packet descriptor from the free pool */
754 curr_pd = usb_get_free_pd(cppi);
755 if (curr_pd == NULL) {
756 /* Shouldn't ever happen! */
757 DBG(4, "No Rx PDs\n");
758 goto sched;
761 pkt_len =
762 (length > max_rx_transfer_size) ? max_rx_transfer_size : length;
764 hw_desc = &curr_pd->hw_desc;
765 hw_desc->orig_buf_ptr = rx_ch->start_addr + rx_ch->curr_offset;
766 hw_desc->orig_buf_len = pkt_len;
768 curr_pd->ch_num = rx_ch->ch_num;
769 curr_pd->ep_num = rx_ch->end_pt->epnum;
771 curr_pd->eop = (length -= pkt_len) ? 0 : 1;
772 rx_ch->curr_offset += pkt_len;
775 * Push the free Rx packet descriptor
776 * to the free descriptor/buffer queue.
778 cppi41_queue_push(&rx_ch->queue_obj, curr_pd->dma_addr,
779 USB_CPPI41_DESC_ALIGN, 0);
782 sched:
784 * HCD arranged ReqPkt for the first packet.
785 * We arrange it for all but the last one.
787 if (is_host_active(cppi->musb) && rx_ch->channel.actual_len) {
788 void __iomem *epio = rx_ch->end_pt->regs;
789 u16 csr = musb_readw(epio, MUSB_RXCSR);
791 csr |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
792 musb_writew(epio, MUSB_RXCSR, csr);
795 /* enable schedular if not enabled */
796 if (is_peripheral_active(cppi->musb) && (n_bd > 0))
797 cppi41_enable_sched_rx();
798 return 1;
802 * cppi41_channel_program - program channel for data transfer
803 * @channel: the channel
804 * @maxpacket: max packet size
805 * @mode: for Rx, 1 unless the USB protocol driver promised to treat
806 * all short reads as errors and kick in high level fault recovery;
807 * for Tx, 0 unless the protocol driver _requires_ short-packet
808 * termination mode
809 * @dma_addr: DMA address of buffer
810 * @length: length of buffer
812 * Context: controller IRQ-locked
814 static int cppi41_channel_program(struct dma_channel *channel, u16 maxpacket,
815 u8 mode, dma_addr_t dma_addr, u32 length)
817 struct cppi41_channel *cppi_ch;
818 unsigned queued;
820 cppi_ch = container_of(channel, struct cppi41_channel, channel);
822 switch (channel->status) {
823 case MUSB_DMA_STATUS_BUS_ABORT:
824 case MUSB_DMA_STATUS_CORE_ABORT:
825 /* Fault IRQ handler should have handled cleanup */
826 WARNING("%cx DMA%d not cleaned up after abort!\n",
827 cppi_ch->transmit ? 'T' : 'R', cppi_ch->ch_num);
828 break;
829 case MUSB_DMA_STATUS_BUSY:
830 WARNING("Program active channel? %cx DMA%d\n",
831 cppi_ch->transmit ? 'T' : 'R', cppi_ch->ch_num);
832 break;
833 case MUSB_DMA_STATUS_UNKNOWN:
834 DBG(1, "%cx DMA%d not allocated!\n",
835 cppi_ch->transmit ? 'T' : 'R', cppi_ch->ch_num);
836 return 0;
837 case MUSB_DMA_STATUS_FREE:
838 break;
841 channel->status = MUSB_DMA_STATUS_BUSY;
843 /* Set the transfer parameters, then queue up the first segment */
844 cppi_ch->start_addr = dma_addr;
845 cppi_ch->curr_offset = 0;
846 cppi_ch->pkt_size = maxpacket;
847 cppi_ch->length = length;
848 cppi_ch->transfer_mode = mode;
849 cppi_ch->zlp_queued = 0;
850 cppi_ch->channel.actual_len = 0;
852 /* Tx or Rx channel? */
853 if (cppi_ch->transmit)
854 queued = cppi41_next_tx_segment(cppi_ch);
855 else
856 queued = cppi41_next_rx_segment(cppi_ch);
858 return queued > 0;
861 static struct usb_pkt_desc *usb_get_pd_ptr(struct cppi41 *cppi,
862 unsigned long pd_addr)
864 if (pd_addr >= cppi->pd_mem_phys && pd_addr < cppi->pd_mem_phys +
865 USB_CPPI41_MAX_PD * USB_CPPI41_DESC_ALIGN)
866 return pd_addr - cppi->pd_mem_phys + cppi->pd_mem;
867 else
868 return NULL;
871 static int usb_check_teardown(struct cppi41_channel *cppi_ch,
872 unsigned long pd_addr)
874 u32 info;
876 if (cppi41_get_teardown_info(pd_addr, &info)) {
877 DBG(1, "ERROR: not a teardown descriptor\n");
878 return 0;
881 if ((info & CPPI41_TEARDOWN_TX_RX_MASK) ==
882 (!cppi_ch->transmit << CPPI41_TEARDOWN_TX_RX_SHIFT) &&
883 (info & CPPI41_TEARDOWN_DMA_NUM_MASK) ==
884 (usb_cppi41_info.dma_block << CPPI41_TEARDOWN_DMA_NUM_SHIFT) &&
885 (info & CPPI41_TEARDOWN_CHAN_NUM_MASK) ==
886 (usb_cppi41_info.ep_dma_ch[cppi_ch->ch_num] <<
887 CPPI41_TEARDOWN_CHAN_NUM_SHIFT))
888 return 1;
890 DBG(1, "ERROR: unexpected values in teardown descriptor\n");
891 return 0;
895 * We can't handle the channel teardown via the default completion queue in
896 * context of the controller IRQ-locked, so we use the dedicated teardown
897 * completion queue which we can just poll for a teardown descriptor, not
898 * interfering with the Tx completion queue processing.
900 static void usb_tx_ch_teardown(struct cppi41_channel *tx_ch)
902 struct cppi41 *cppi = tx_ch->channel.private_data;
903 struct musb *musb = cppi->musb;
904 void __iomem *reg_base = musb->ctrl_base;
905 u32 td_reg, timeout = 0xfffff;
906 u8 ep_num = tx_ch->ch_num + 1;
907 unsigned long pd_addr;
909 /* Initiate teardown for Tx DMA channel */
910 cppi41_dma_ch_teardown(&tx_ch->dma_ch_obj);
912 /* Wait for a descriptor to be queued and pop it... */
913 do {
914 td_reg = musb_readl(reg_base, USB_TEARDOWN_REG);
915 td_reg |= USB_TX_TDOWN_MASK(ep_num);
916 musb_writel(reg_base, USB_TEARDOWN_REG, td_reg);
918 pd_addr = cppi41_queue_pop(&cppi->queue_obj);
919 } while (!pd_addr && timeout--);
921 if (pd_addr) {
923 dprintk("Descriptor (%08lx) popped from teardown completion "
924 "queue\n", pd_addr);
926 if (usb_check_teardown(tx_ch, pd_addr)) {
927 dprintk("Teardown Desc (%p) rcvd\n", pd_addr);
928 } else
929 ERR("Invalid PD(%08lx)popped from TearDn completion"
930 "queue\n", pd_addr);
931 } else {
932 if (timeout <= 0)
933 ERR("Teardown Desc not rcvd\n");
938 * For Rx DMA channels, the situation is more complex: there's only a single
939 * completion queue for all our needs, so we have to temporarily redirect the
940 * completed descriptors to our teardown completion queue, with a possibility
941 * of a completed packet landing there as well...
943 static void usb_rx_ch_teardown(struct cppi41_channel *rx_ch)
945 struct cppi41 *cppi = rx_ch->channel.private_data;
946 u32 timeout = 0xfffff;
948 cppi41_dma_ch_default_queue(&rx_ch->dma_ch_obj, 0, cppi->teardownQNum);
950 /* Initiate teardown for Rx DMA channel */
951 cppi41_dma_ch_teardown(&rx_ch->dma_ch_obj);
953 do {
954 struct usb_pkt_desc *curr_pd;
955 unsigned long pd_addr;
957 /* Wait for a descriptor to be queued and pop it... */
958 do {
959 pd_addr = cppi41_queue_pop(&cppi->queue_obj);
960 } while (!pd_addr && timeout--);
962 if (timeout <= 0) {
963 ERR("teardown Desc not found\n");
964 break;
967 dprintk("Descriptor (%08lx) popped from teardown completion "
968 "queue\n", pd_addr);
971 * We might have popped a completed Rx PD, so check if the
972 * physical address is within the PD region first. If it's
973 * not the case, it must be a teardown descriptor...
974 * */
975 curr_pd = usb_get_pd_ptr(cppi, pd_addr);
976 if (curr_pd == NULL) {
977 if (usb_check_teardown(rx_ch, pd_addr))
978 break;
979 continue;
982 /* Paranoia: check if PD is from the right channel... */
983 if (curr_pd->ch_num != rx_ch->ch_num) {
984 ERR("Unexpected channel %d in Rx PD\n",
985 curr_pd->ch_num);
986 continue;
989 /* Extract the buffer length from the completed PD */
990 rx_ch->channel.actual_len += curr_pd->hw_desc.buf_len;
993 * Return Rx PDs to the software list --
994 * this is protected by critical section.
996 usb_put_free_pd(cppi, curr_pd);
997 } while (0);
999 /* Now restore the default Rx completion queue... */
1000 cppi41_dma_ch_default_queue(&rx_ch->dma_ch_obj, usb_cppi41_info.q_mgr,
1001 usb_cppi41_info.rx_comp_q[0]);
1005 * cppi41_channel_abort
1007 * Context: controller IRQ-locked, endpoint selected.
1009 static int cppi41_channel_abort(struct dma_channel *channel)
1011 struct cppi41 *cppi;
1012 struct cppi41_channel *cppi_ch;
1013 struct musb *musb;
1014 void __iomem *reg_base, *epio;
1015 unsigned long pd_addr;
1016 u32 csr, td_reg;
1017 u8 ch_num, ep_num;
1019 cppi_ch = container_of(channel, struct cppi41_channel, channel);
1020 ch_num = cppi_ch->ch_num;
1022 switch (channel->status) {
1023 case MUSB_DMA_STATUS_BUS_ABORT:
1024 case MUSB_DMA_STATUS_CORE_ABORT:
1025 /* From Rx or Tx fault IRQ handler */
1026 case MUSB_DMA_STATUS_BUSY:
1027 /* The hardware needs shutting down... */
1028 dprintk("%s: DMA busy, status = %x\n",
1029 __func__, channel->status);
1030 break;
1031 case MUSB_DMA_STATUS_UNKNOWN:
1032 DBG(1, "%cx DMA%d not allocated\n",
1033 cppi_ch->transmit ? 'T' : 'R', ch_num);
1034 /* FALLTHROUGH */
1035 case MUSB_DMA_STATUS_FREE:
1036 return 0;
1039 cppi = cppi_ch->channel.private_data;
1040 musb = cppi->musb;
1041 reg_base = musb->ctrl_base;
1042 epio = cppi_ch->end_pt->regs;
1043 ep_num = ch_num + 1;
1045 #ifdef DEBUG_CPPI_TD
1046 printk("Before teardown:");
1047 print_pd_list(cppi->pd_pool_head);
1048 #endif
1050 if (cppi_ch->transmit) {
1051 dprintk("Tx channel teardown, cppi_ch = %p\n", cppi_ch);
1053 /* Tear down Tx DMA channel */
1054 usb_tx_ch_teardown(cppi_ch);
1056 /* Issue CPPI FIFO teardown for Tx channel */
1057 td_reg = musb_readl(reg_base, USB_TEARDOWN_REG);
1058 td_reg |= USB_TX_TDOWN_MASK(ep_num);
1059 musb_writel(reg_base, USB_TEARDOWN_REG, td_reg);
1061 /* Flush FIFO of the endpoint */
1062 csr = musb_readw(epio, MUSB_TXCSR);
1063 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_H_WZC_BITS;
1064 musb_writew(epio, MUSB_TXCSR, csr);
1065 musb_writew(epio, MUSB_TXCSR, csr);
1066 } else { /* Rx */
1067 dprintk("Rx channel teardown, cppi_ch = %p\n", cppi_ch);
1069 /* Flush FIFO of the endpoint */
1070 csr = musb_readw(epio, MUSB_RXCSR);
1071 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_H_WZC_BITS;
1072 musb_writew(epio, MUSB_RXCSR, csr);
1073 musb_writew(epio, MUSB_RXCSR, csr);
1075 /* Issue CPPI FIFO teardown for Rx channel */
1076 td_reg = musb_readl(reg_base, USB_TEARDOWN_REG);
1077 td_reg |= USB_RX_TDOWN_MASK(ep_num);
1078 musb_writel(reg_base, USB_TEARDOWN_REG, td_reg);
1080 /* Tear down Rx DMA channel */
1081 usb_rx_ch_teardown(cppi_ch);
1084 * NOTE: docs don't guarantee any of this works... we expect
1085 * that if the USB core stops telling the CPPI core to pull
1086 * more data from it, then it'll be safe to flush current Rx
1087 * DMA state iff any pending FIFO transfer is done.
1090 /* For host, ensure ReqPkt is never set again */
1091 cppi41_autoreq_update(cppi_ch, USB_NO_AUTOREQ);
1093 /* For host, clear (just) ReqPkt at end of current packet(s) */
1094 if (is_host_active(cppi->musb))
1095 csr &= ~MUSB_RXCSR_H_REQPKT;
1096 csr |= MUSB_RXCSR_H_WZC_BITS;
1098 /* Clear DMA enable */
1099 csr &= ~MUSB_RXCSR_DMAENAB;
1100 musb_writew(epio, MUSB_RXCSR, csr);
1102 /* Flush the FIFO of endpoint once again */
1103 csr = musb_readw(epio, MUSB_RXCSR);
1104 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_H_WZC_BITS;
1105 musb_writew(epio, MUSB_RXCSR, csr);
1107 udelay(50);
1111 * There might be PDs in the Rx/Tx source queue that were not consumed
1112 * by the DMA controller -- they need to be recycled properly.
1114 while ((pd_addr = cppi41_queue_pop(&cppi_ch->queue_obj)) != 0) {
1115 struct usb_pkt_desc *curr_pd;
1117 curr_pd = usb_get_pd_ptr(cppi, pd_addr);
1118 if (curr_pd == NULL) {
1119 ERR("Invalid PD popped from source queue\n");
1120 continue;
1124 * Return Rx/Tx PDs to the software list --
1125 * this is protected by critical section.
1127 dprintk("Returning PD %p to the free PD list\n", curr_pd);
1128 usb_put_free_pd(cppi, curr_pd);
1131 #ifdef DEBUG_CPPI_TD
1132 printk("After teardown:");
1133 print_pd_list(cppi->pd_pool_head);
1134 #endif
1136 /* Re-enable the DMA channel */
1137 cppi41_dma_ch_enable(&cppi_ch->dma_ch_obj);
1139 channel->status = MUSB_DMA_STATUS_FREE;
1141 return 0;
1145 * dma_controller_create - instantiate an object representing DMA controller.
1147 struct dma_controller * __init dma_controller_create(struct musb *musb,
1148 void __iomem *mregs)
1150 struct cppi41 *cppi;
1152 cppi = kzalloc(sizeof *cppi, GFP_KERNEL);
1153 if (!cppi)
1154 return NULL;
1156 /* Initialize the CPPI 4.1 DMA controller structure */
1157 cppi->musb = musb;
1158 cppi->controller.start = cppi41_controller_start;
1159 cppi->controller.stop = cppi41_controller_stop;
1160 cppi->controller.channel_alloc = cppi41_channel_alloc;
1161 cppi->controller.channel_release = cppi41_channel_release;
1162 cppi->controller.channel_program = cppi41_channel_program;
1163 cppi->controller.channel_abort = cppi41_channel_abort;
1165 return &cppi->controller;
1169 * dma_controller_destroy - destroy a previously instantiated DMA controller
1170 * @controller: the controller
1172 void dma_controller_destroy(struct dma_controller *controller)
1174 struct cppi41 *cppi;
1176 cppi = container_of(controller, struct cppi41, controller);
1178 /* Free the CPPI object */
1179 kfree(cppi);
1182 static void usb_process_tx_queue(struct cppi41 *cppi, unsigned index)
1184 struct cppi41_queue_obj tx_queue_obj;
1185 unsigned long pd_addr;
1187 if (cppi41_queue_init(&tx_queue_obj, usb_cppi41_info.q_mgr,
1188 usb_cppi41_info.tx_comp_q[index])) {
1189 DBG(1, "ERROR: cppi41_queue_init failed for "
1190 "Tx completion queue");
1191 return;
1194 while ((pd_addr = cppi41_queue_pop(&tx_queue_obj)) != 0) {
1195 struct usb_pkt_desc *curr_pd;
1196 struct cppi41_channel *tx_ch;
1197 u8 ch_num, ep_num;
1198 u32 length;
1200 curr_pd = usb_get_pd_ptr(cppi, pd_addr);
1201 if (curr_pd == NULL) {
1202 ERR("Invalid PD popped from Tx completion queue\n");
1203 continue;
1206 /* Extract the data from received packet descriptor */
1207 ch_num = curr_pd->ch_num;
1208 ep_num = curr_pd->ep_num;
1209 length = curr_pd->hw_desc.buf_len;
1211 tx_ch = &cppi->tx_cppi_ch[ch_num];
1212 tx_ch->channel.actual_len += length;
1215 * Return Tx PD to the software list --
1216 * this is protected by critical section
1218 usb_put_free_pd(cppi, curr_pd);
1220 if ((tx_ch->curr_offset < tx_ch->length) ||
1221 (tx_ch->transfer_mode && !tx_ch->zlp_queued))
1222 cppi41_next_tx_segment(tx_ch);
1223 else if (tx_ch->channel.actual_len >= tx_ch->length) {
1224 tx_ch->channel.status = MUSB_DMA_STATUS_FREE;
1226 /* Tx completion routine callback */
1227 musb_dma_completion(cppi->musb, ep_num, 1);
1232 static void usb_process_rx_queue(struct cppi41 *cppi, unsigned index)
1234 struct cppi41_queue_obj rx_queue_obj;
1235 unsigned long pd_addr;
1237 if (cppi41_queue_init(&rx_queue_obj, usb_cppi41_info.q_mgr,
1238 usb_cppi41_info.rx_comp_q[index])) {
1239 DBG(1, "ERROR: cppi41_queue_init failed for Rx queue\n");
1240 return;
1243 while ((pd_addr = cppi41_queue_pop(&rx_queue_obj)) != 0) {
1244 struct usb_pkt_desc *curr_pd;
1245 struct cppi41_channel *rx_ch;
1246 u8 ch_num, ep_num;
1247 u32 length;
1249 curr_pd = usb_get_pd_ptr(cppi, pd_addr);
1250 if (curr_pd == NULL) {
1251 ERR("Invalid PD popped from Rx completion queue\n");
1252 continue;
1255 /* Extract the data from received packet descriptor */
1256 ch_num = curr_pd->ch_num;
1257 ep_num = curr_pd->ep_num;
1258 length = curr_pd->hw_desc.buf_len;
1260 rx_ch = &cppi->rx_cppi_ch[ch_num];
1261 rx_ch->channel.actual_len += length;
1263 if (curr_pd->eop) {
1264 curr_pd->eop = 0;
1265 /* disable the rx dma schedular */
1266 if (is_peripheral_active(cppi->musb)) {
1267 cppi41_disable_sched_rx();
1268 musb_dma_completion(cppi->musb, ep_num, 0);
1273 * Return Rx PD to the software list --
1274 * this is protected by critical section
1276 usb_put_free_pd(cppi, curr_pd);
1278 if (unlikely(rx_ch->channel.actual_len >= rx_ch->length ||
1279 length < curr_pd->hw_desc.orig_buf_len)) {
1280 rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
1282 /* Rx completion routine callback */
1283 musb_dma_completion(cppi->musb, ep_num, 0);
1284 } else {
1285 if (is_peripheral_active(cppi->musb) &&
1286 ((rx_ch->length - rx_ch->curr_offset) > 0))
1287 cppi41_next_rx_segment(rx_ch);
1293 * cppi41_completion - handle interrupts from the Tx/Rx completion queues
1295 * NOTE: since we have to manually prod the Rx process in the transparent mode,
1296 * we certainly want to handle the Rx queues first.
1298 void cppi41_completion(struct musb *musb, u32 rx, u32 tx)
1300 struct cppi41 *cppi;
1301 unsigned index;
1303 cppi = container_of(musb->dma_controller, struct cppi41, controller);
1305 /* Process packet descriptors from the Rx queues */
1306 for (index = 0; rx != 0; rx >>= 1, index++)
1307 if (rx & 1)
1308 usb_process_rx_queue(cppi, index);
1310 /* Process packet descriptors from the Tx completion queues */
1311 for (index = 0; tx != 0; tx >>= 1, index++)
1312 if (tx & 1)
1313 usb_process_tx_queue(cppi, index);