2 * Copyright (C) 2005-2006 by Texas Instruments
4 * This file implements a DMA interface using TI's CPPI DMA.
5 * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB.
6 * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
9 #include <linux/platform_device.h>
10 #include <linux/slab.h>
11 #include <linux/usb.h>
13 #include "musb_core.h"
14 #include "musb_debug.h"
18 /* CPPI DMA status 7-mar-2006:
20 * - See musb_{host,gadget}.c for more info
22 * - Correct RX DMA generally forces the engine into irq-per-packet mode,
23 * which can easily saturate the CPU under non-mass-storage loads.
25 * NOTES 24-aug-2006 (2.6.18-rc4):
27 * - peripheral RXDMA wedged in a test with packets of length 512/512/1.
28 * evidently after the 1 byte packet was received and acked, the queue
29 * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003,
30 * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401
31 * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx
32 * of its next (512 byte) packet. IRQ issues?
34 * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will
35 * evidently also directly update the RX and TX CSRs ... so audit all
36 * host and peripheral side DMA code to avoid CSR access after DMA has
40 /* REVISIT now we can avoid preallocating these descriptors; or
41 * more simply, switch to a global freelist not per-channel ones.
42 * Note: at full speed, 64 descriptors == 4K bulk data.
44 #define NUM_TXCHAN_BD 64
45 #define NUM_RXCHAN_BD 64
47 static inline void cpu_drain_writebuffer(void)
50 #ifdef CONFIG_CPU_ARM926T
51 /* REVISIT this "should not be needed",
52 * but lack of it sure seemed to hurt ...
54 asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n");
58 static inline struct cppi_descriptor
*cppi_bd_alloc(struct cppi_channel
*c
)
60 struct cppi_descriptor
*bd
= c
->freelist
;
63 c
->freelist
= bd
->next
;
68 cppi_bd_free(struct cppi_channel
*c
, struct cppi_descriptor
*bd
)
72 bd
->next
= c
->freelist
;
77 * Start DMA controller
79 * Initialize the DMA controller as necessary.
82 /* zero out entire rx state RAM entry for the channel */
83 static void cppi_reset_rx(struct cppi_rx_stateram __iomem
*rx
)
85 musb_writel(&rx
->rx_skipbytes
, 0, 0);
86 musb_writel(&rx
->rx_head
, 0, 0);
87 musb_writel(&rx
->rx_sop
, 0, 0);
88 musb_writel(&rx
->rx_current
, 0, 0);
89 musb_writel(&rx
->rx_buf_current
, 0, 0);
90 musb_writel(&rx
->rx_len_len
, 0, 0);
91 musb_writel(&rx
->rx_cnt_cnt
, 0, 0);
94 /* zero out entire tx state RAM entry for the channel */
95 static void cppi_reset_tx(struct cppi_tx_stateram __iomem
*tx
, u32 ptr
)
97 musb_writel(&tx
->tx_head
, 0, 0);
98 musb_writel(&tx
->tx_buf
, 0, 0);
99 musb_writel(&tx
->tx_current
, 0, 0);
100 musb_writel(&tx
->tx_buf_current
, 0, 0);
101 musb_writel(&tx
->tx_info
, 0, 0);
102 musb_writel(&tx
->tx_rem_len
, 0, 0);
103 /* musb_writel(&tx->tx_dummy, 0, 0); */
104 musb_writel(&tx
->tx_complete
, 0, ptr
);
107 static void __init
cppi_pool_init(struct cppi
*cppi
, struct cppi_channel
*c
)
111 /* initialize channel fields */
114 c
->last_processed
= NULL
;
115 c
->channel
.status
= MUSB_DMA_STATUS_UNKNOWN
;
116 c
->controller
= cppi
;
120 /* build the BD Free list for the channel */
121 for (j
= 0; j
< NUM_TXCHAN_BD
+ 1; j
++) {
122 struct cppi_descriptor
*bd
;
125 bd
= dma_pool_alloc(cppi
->pool
, GFP_KERNEL
, &dma
);
131 static int cppi_channel_abort(struct dma_channel
*);
133 static void cppi_pool_free(struct cppi_channel
*c
)
135 struct cppi
*cppi
= c
->controller
;
136 struct cppi_descriptor
*bd
;
138 (void) cppi_channel_abort(&c
->channel
);
139 c
->channel
.status
= MUSB_DMA_STATUS_UNKNOWN
;
140 c
->controller
= NULL
;
142 /* free all its bds */
143 bd
= c
->last_processed
;
146 dma_pool_free(cppi
->pool
, bd
, bd
->dma
);
147 bd
= cppi_bd_alloc(c
);
149 c
->last_processed
= NULL
;
152 static int __init
cppi_controller_start(struct dma_controller
*c
)
154 struct cppi
*controller
;
155 void __iomem
*tibase
;
158 controller
= container_of(c
, struct cppi
, controller
);
160 /* do whatever is necessary to start controller */
161 for (i
= 0; i
< ARRAY_SIZE(controller
->tx
); i
++) {
162 controller
->tx
[i
].transmit
= true;
163 controller
->tx
[i
].index
= i
;
165 for (i
= 0; i
< ARRAY_SIZE(controller
->rx
); i
++) {
166 controller
->rx
[i
].transmit
= false;
167 controller
->rx
[i
].index
= i
;
170 /* setup BD list on a per channel basis */
171 for (i
= 0; i
< ARRAY_SIZE(controller
->tx
); i
++)
172 cppi_pool_init(controller
, controller
->tx
+ i
);
173 for (i
= 0; i
< ARRAY_SIZE(controller
->rx
); i
++)
174 cppi_pool_init(controller
, controller
->rx
+ i
);
176 tibase
= controller
->tibase
;
177 INIT_LIST_HEAD(&controller
->tx_complete
);
179 /* initialise tx/rx channel head pointers to zero */
180 for (i
= 0; i
< ARRAY_SIZE(controller
->tx
); i
++) {
181 struct cppi_channel
*tx_ch
= controller
->tx
+ i
;
182 struct cppi_tx_stateram __iomem
*tx
;
184 INIT_LIST_HEAD(&tx_ch
->tx_complete
);
186 tx
= tibase
+ DAVINCI_TXCPPI_STATERAM_OFFSET(i
);
187 tx_ch
->state_ram
= tx
;
188 cppi_reset_tx(tx
, 0);
190 for (i
= 0; i
< ARRAY_SIZE(controller
->rx
); i
++) {
191 struct cppi_channel
*rx_ch
= controller
->rx
+ i
;
192 struct cppi_rx_stateram __iomem
*rx
;
194 INIT_LIST_HEAD(&rx_ch
->tx_complete
);
196 rx
= tibase
+ DAVINCI_RXCPPI_STATERAM_OFFSET(i
);
197 rx_ch
->state_ram
= rx
;
201 /* enable individual cppi channels */
202 musb_writel(tibase
, DAVINCI_TXCPPI_INTENAB_REG
,
203 DAVINCI_DMA_ALL_CHANNELS_ENABLE
);
204 musb_writel(tibase
, DAVINCI_RXCPPI_INTENAB_REG
,
205 DAVINCI_DMA_ALL_CHANNELS_ENABLE
);
207 /* enable tx/rx CPPI control */
208 musb_writel(tibase
, DAVINCI_TXCPPI_CTRL_REG
, DAVINCI_DMA_CTRL_ENABLE
);
209 musb_writel(tibase
, DAVINCI_RXCPPI_CTRL_REG
, DAVINCI_DMA_CTRL_ENABLE
);
211 /* disable RNDIS mode, also host rx RNDIS autorequest */
212 musb_writel(tibase
, DAVINCI_RNDIS_REG
, 0);
213 musb_writel(tibase
, DAVINCI_AUTOREQ_REG
, 0);
219 * Stop DMA controller
221 * De-Init the DMA controller as necessary.
224 static int cppi_controller_stop(struct dma_controller
*c
)
226 struct cppi
*controller
;
227 void __iomem
*tibase
;
230 controller
= container_of(c
, struct cppi
, controller
);
232 tibase
= controller
->tibase
;
233 /* DISABLE INDIVIDUAL CHANNEL Interrupts */
234 musb_writel(tibase
, DAVINCI_TXCPPI_INTCLR_REG
,
235 DAVINCI_DMA_ALL_CHANNELS_ENABLE
);
236 musb_writel(tibase
, DAVINCI_RXCPPI_INTCLR_REG
,
237 DAVINCI_DMA_ALL_CHANNELS_ENABLE
);
239 DBG(1, "Tearing down RX and TX Channels\n");
240 for (i
= 0; i
< ARRAY_SIZE(controller
->tx
); i
++) {
241 /* FIXME restructure of txdma to use bds like rxdma */
242 controller
->tx
[i
].last_processed
= NULL
;
243 cppi_pool_free(controller
->tx
+ i
);
245 for (i
= 0; i
< ARRAY_SIZE(controller
->rx
); i
++)
246 cppi_pool_free(controller
->rx
+ i
);
248 /* in Tx Case proper teardown is supported. We resort to disabling
249 * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is
250 * complete TX CPPI cannot be disabled.
252 /*disable tx/rx cppi */
253 musb_writel(tibase
, DAVINCI_TXCPPI_CTRL_REG
, DAVINCI_DMA_CTRL_DISABLE
);
254 musb_writel(tibase
, DAVINCI_RXCPPI_CTRL_REG
, DAVINCI_DMA_CTRL_DISABLE
);
259 /* While dma channel is allocated, we only want the core irqs active
260 * for fault reports, otherwise we'd get irqs that we don't care about.
261 * Except for TX irqs, where dma done != fifo empty and reusable ...
263 * NOTE: docs don't say either way, but irq masking **enables** irqs.
265 * REVISIT same issue applies to pure PIO usage too, and non-cppi dma...
267 static inline void core_rxirq_disable(void __iomem
*tibase
, unsigned epnum
)
269 musb_writel(tibase
, DAVINCI_USB_INT_MASK_CLR_REG
, 1 << (epnum
+ 8));
272 static inline void core_rxirq_enable(void __iomem
*tibase
, unsigned epnum
)
274 musb_writel(tibase
, DAVINCI_USB_INT_MASK_SET_REG
, 1 << (epnum
+ 8));
279 * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to
280 * each transfer direction of a non-control endpoint, so allocating
281 * (and deallocating) is mostly a way to notice bad housekeeping on
282 * the software side. We assume the irqs are always active.
284 static struct dma_channel
*
285 cppi_channel_allocate(struct dma_controller
*c
,
286 struct musb_hw_ep
*ep
, u8 transmit
)
288 struct cppi
*controller
;
290 struct cppi_channel
*cppi_ch
;
291 void __iomem
*tibase
;
293 controller
= container_of(c
, struct cppi
, controller
);
294 tibase
= controller
->tibase
;
296 /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
297 index
= ep
->epnum
- 1;
299 /* return the corresponding CPPI Channel Handle, and
300 * probably disable the non-CPPI irq until we need it.
303 if (index
>= ARRAY_SIZE(controller
->tx
)) {
304 DBG(1, "no %cX%d CPPI channel\n", 'T', index
);
307 cppi_ch
= controller
->tx
+ index
;
309 if (index
>= ARRAY_SIZE(controller
->rx
)) {
310 DBG(1, "no %cX%d CPPI channel\n", 'R', index
);
313 cppi_ch
= controller
->rx
+ index
;
314 core_rxirq_disable(tibase
, ep
->epnum
);
317 /* REVISIT make this an error later once the same driver code works
318 * with the other DMA engine too
321 DBG(1, "re-allocating DMA%d %cX channel %p\n",
322 index
, transmit
? 'T' : 'R', cppi_ch
);
324 cppi_ch
->channel
.status
= MUSB_DMA_STATUS_FREE
;
326 DBG(4, "Allocate CPPI%d %cX\n", index
, transmit
? 'T' : 'R');
327 return &cppi_ch
->channel
;
330 /* Release a CPPI Channel. */
331 static void cppi_channel_release(struct dma_channel
*channel
)
333 struct cppi_channel
*c
;
334 void __iomem
*tibase
;
336 /* REVISIT: for paranoia, check state and abort if needed... */
338 c
= container_of(channel
, struct cppi_channel
, channel
);
339 tibase
= c
->controller
->tibase
;
341 DBG(1, "releasing idle DMA channel %p\n", c
);
342 else if (!c
->transmit
)
343 core_rxirq_enable(tibase
, c
->index
+ 1);
345 /* for now, leave its cppi IRQ enabled (we won't trigger it) */
347 channel
->status
= MUSB_DMA_STATUS_UNKNOWN
;
350 /* Context: controller irqlocked */
352 cppi_dump_rx(int level
, struct cppi_channel
*c
, const char *tag
)
354 void __iomem
*base
= c
->controller
->mregs
;
355 struct cppi_rx_stateram __iomem
*rx
= c
->state_ram
;
357 musb_ep_select(base
, c
->index
+ 1);
359 DBG(level
, "RX DMA%d%s: %d left, csr %04x, "
360 "%08x H%08x S%08x C%08x, "
361 "B%08x L%08x %08x .. %08x"
364 musb_readl(c
->controller
->tibase
,
365 DAVINCI_RXCPPI_BUFCNT0_REG
+ 4 * c
->index
),
366 musb_readw(c
->hw_ep
->regs
, MUSB_RXCSR
),
368 musb_readl(&rx
->rx_skipbytes
, 0),
369 musb_readl(&rx
->rx_head
, 0),
370 musb_readl(&rx
->rx_sop
, 0),
371 musb_readl(&rx
->rx_current
, 0),
373 musb_readl(&rx
->rx_buf_current
, 0),
374 musb_readl(&rx
->rx_len_len
, 0),
375 musb_readl(&rx
->rx_cnt_cnt
, 0),
376 musb_readl(&rx
->rx_complete
, 0)
380 /* Context: controller irqlocked */
382 cppi_dump_tx(int level
, struct cppi_channel
*c
, const char *tag
)
384 void __iomem
*base
= c
->controller
->mregs
;
385 struct cppi_tx_stateram __iomem
*tx
= c
->state_ram
;
387 musb_ep_select(base
, c
->index
+ 1);
389 DBG(level
, "TX DMA%d%s: csr %04x, "
390 "H%08x S%08x C%08x %08x, "
391 "F%08x L%08x .. %08x"
394 musb_readw(c
->hw_ep
->regs
, MUSB_TXCSR
),
396 musb_readl(&tx
->tx_head
, 0),
397 musb_readl(&tx
->tx_buf
, 0),
398 musb_readl(&tx
->tx_current
, 0),
399 musb_readl(&tx
->tx_buf_current
, 0),
401 musb_readl(&tx
->tx_info
, 0),
402 musb_readl(&tx
->tx_rem_len
, 0),
403 /* dummy/unused word 6 */
404 musb_readl(&tx
->tx_complete
, 0)
408 /* Context: controller irqlocked */
410 cppi_rndis_update(struct cppi_channel
*c
, int is_rx
,
411 void __iomem
*tibase
, int is_rndis
)
413 /* we may need to change the rndis flag for this cppi channel */
414 if (c
->is_rndis
!= is_rndis
) {
415 u32 value
= musb_readl(tibase
, DAVINCI_RNDIS_REG
);
416 u32 temp
= 1 << (c
->index
);
424 musb_writel(tibase
, DAVINCI_RNDIS_REG
, value
);
425 c
->is_rndis
= is_rndis
;
429 #ifdef CONFIG_USB_MUSB_DEBUG
430 static void cppi_dump_rxbd(const char *tag
, struct cppi_descriptor
*bd
)
432 pr_debug("RXBD/%s %08x: "
433 "nxt %08x buf %08x off.blen %08x opt.plen %08x\n",
435 bd
->hw_next
, bd
->hw_bufp
, bd
->hw_off_len
,
440 static void cppi_dump_rxq(int level
, const char *tag
, struct cppi_channel
*rx
)
442 #ifdef CONFIG_USB_MUSB_DEBUG
443 struct cppi_descriptor
*bd
;
445 if (!_dbg_level(level
))
447 cppi_dump_rx(level
, rx
, tag
);
448 if (rx
->last_processed
)
449 cppi_dump_rxbd("last", rx
->last_processed
);
450 for (bd
= rx
->head
; bd
; bd
= bd
->next
)
451 cppi_dump_rxbd("active", bd
);
456 /* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX;
457 * so we won't ever use it (see "CPPI RX Woes" below).
459 static inline int cppi_autoreq_update(struct cppi_channel
*rx
,
460 void __iomem
*tibase
, int onepacket
, unsigned n_bds
)
464 #ifdef RNDIS_RX_IS_USABLE
466 /* assert(is_host_active(musb)) */
468 /* start from "AutoReq never" */
469 tmp
= musb_readl(tibase
, DAVINCI_AUTOREQ_REG
);
470 val
= tmp
& ~((0x3) << (rx
->index
* 2));
472 /* HCD arranged reqpkt for packet #1. we arrange int
473 * for all but the last one, maybe in two segments.
477 /* use two segments, autoreq "all" then the last "never" */
478 val
|= ((0x3) << (rx
->index
* 2));
481 /* one segment, autoreq "all-but-last" */
482 val
|= ((0x1) << (rx
->index
* 2));
489 /* make sure that autoreq is updated before continuing */
490 musb_writel(tibase
, DAVINCI_AUTOREQ_REG
, val
);
492 tmp
= musb_readl(tibase
, DAVINCI_AUTOREQ_REG
);
500 /* REQPKT is turned off after each segment */
501 if (n_bds
&& rx
->channel
.actual_len
) {
502 void __iomem
*regs
= rx
->hw_ep
->regs
;
504 val
= musb_readw(regs
, MUSB_RXCSR
);
505 if (!(val
& MUSB_RXCSR_H_REQPKT
)) {
506 val
|= MUSB_RXCSR_H_REQPKT
| MUSB_RXCSR_H_WZC_BITS
;
507 musb_writew(regs
, MUSB_RXCSR
, val
);
508 /* flush writebufer */
509 val
= musb_readw(regs
, MUSB_RXCSR
);
516 /* Buffer enqueuing Logic:
518 * - RX builds new queues each time, to help handle routine "early
519 * termination" cases (faults, including errors and short reads)
522 * - for now, TX reuses the same queue of BDs every time
524 * REVISIT long term, we want a normal dynamic model.
525 * ... the goal will be to append to the
526 * existing queue, processing completed "dma buffers" (segments) on the fly.
528 * Otherwise we force an IRQ latency between requests, which slows us a lot
529 * (especially in "transparent" dma). Unfortunately that model seems to be
530 * inherent in the DMA model from the Mentor code, except in the rare case
531 * of transfers big enough (~128+ KB) that we could append "middle" segments
532 * in the TX paths. (RX can't do this, see below.)
534 * That's true even in the CPPI- friendly iso case, where most urbs have
535 * several small segments provided in a group and where the "packet at a time"
536 * "transparent" DMA model is always correct, even on the RX side.
542 * TX is a lot more reasonable than RX; it doesn't need to run in
543 * irq-per-packet mode very often. RNDIS mode seems to behave too
544 * (except how it handles the exactly-N-packets case). Building a
545 * txdma queue with multiple requests (urb or usb_request) looks
546 * like it would work ... but fault handling would need much testing.
548 * The main issue with TX mode RNDIS relates to transfer lengths that
549 * are an exact multiple of the packet length. It appears that there's
550 * a hiccup in that case (maybe the DMA completes before the ZLP gets
551 * written?) boiling down to not being able to rely on CPPI writing any
552 * terminating zero length packet before the next transfer is written.
553 * So that's punted to PIO; better yet, gadget drivers can avoid it.
555 * Plus, there's allegedly an undocumented constraint that rndis transfer
556 * length be a multiple of 64 bytes ... but the chip doesn't act that
557 * way, and we really don't _want_ that behavior anyway.
559 * On TX, "transparent" mode works ... although experiments have shown
560 * problems trying to use the SOP/EOP bits in different USB packets.
562 * REVISIT try to handle terminating zero length packets using CPPI
563 * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet
564 * links avoid that issue by forcing them to avoid zlps.)
567 cppi_next_tx_segment(struct musb
*musb
, struct cppi_channel
*tx
)
569 unsigned maxpacket
= tx
->maxpacket
;
570 dma_addr_t addr
= tx
->buf_dma
+ tx
->offset
;
571 size_t length
= tx
->buf_len
- tx
->offset
;
572 struct cppi_descriptor
*bd
;
575 struct cppi_tx_stateram __iomem
*tx_ram
= tx
->state_ram
;
578 /* TX can use the CPPI "rndis" mode, where we can probably fit this
579 * transfer in one BD and one IRQ. The only time we would NOT want
580 * to use it is when hardware constraints prevent it, or if we'd
581 * trigger the "send a ZLP?" confusion.
583 rndis
= (maxpacket
& 0x3f) == 0
584 && length
> maxpacket
586 && (length
% maxpacket
) != 0;
592 n_bds
= length
/ maxpacket
;
593 if (!length
|| (length
% maxpacket
))
595 n_bds
= min(n_bds
, (unsigned) NUM_TXCHAN_BD
);
596 length
= min(n_bds
* maxpacket
, length
);
599 DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n",
602 rndis
? "rndis" : "transparent",
606 cppi_rndis_update(tx
, 0, musb
->ctrl_base
, rndis
);
608 /* assuming here that channel_program is called during
609 * transfer initiation ... current code maintains state
610 * for one outstanding request only (no queues, not even
611 * the implicit ones of an iso urb).
616 tx
->last_processed
= NULL
;
618 /* FIXME use BD pool like RX side does, and just queue
619 * the minimum number for this request.
622 /* Prepare queue of BDs first, then hand it to hardware.
623 * All BDs except maybe the last should be of full packet
624 * size; for RNDIS there _is_ only that last packet.
626 for (i
= 0; i
< n_bds
; ) {
627 if (++i
< n_bds
&& bd
->next
)
628 bd
->hw_next
= bd
->next
->dma
;
632 bd
->hw_bufp
= tx
->buf_dma
+ tx
->offset
;
634 /* FIXME set EOP only on the last packet,
635 * SOP only on the first ... avoid IRQs
637 if ((tx
->offset
+ maxpacket
) <= tx
->buf_len
) {
638 tx
->offset
+= maxpacket
;
639 bd
->hw_off_len
= maxpacket
;
640 bd
->hw_options
= CPPI_SOP_SET
| CPPI_EOP_SET
641 | CPPI_OWN_SET
| maxpacket
;
643 /* only this one may be a partial USB Packet */
646 partial_len
= tx
->buf_len
- tx
->offset
;
647 tx
->offset
= tx
->buf_len
;
648 bd
->hw_off_len
= partial_len
;
650 bd
->hw_options
= CPPI_SOP_SET
| CPPI_EOP_SET
651 | CPPI_OWN_SET
| partial_len
;
652 if (partial_len
== 0)
653 bd
->hw_options
|= CPPI_ZERO_SET
;
656 DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n",
657 bd
, bd
->hw_next
, bd
->hw_bufp
,
658 bd
->hw_off_len
, bd
->hw_options
);
660 /* update the last BD enqueued to the list */
665 /* BDs live in DMA-coherent memory, but writes might be pending */
666 cpu_drain_writebuffer();
668 /* Write to the HeadPtr in state RAM to trigger */
669 musb_writel(&tx_ram
->tx_head
, 0, (u32
)tx
->freelist
->dma
);
671 cppi_dump_tx(5, tx
, "/S");
677 * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte
678 * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
679 * (Full speed transfers have similar scenarios.)
681 * The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
682 * and the next packet goes into a buffer that's queued later; while (b) fills
683 * the buffer with 1024 bytes. How to do that with CPPI?
685 * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but
686 * (b) loses **BADLY** because nothing (!) happens when that second packet
687 * fills the buffer, much less when a third one arrives. (Which makes this
688 * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination
689 * is optional, and it's fine if peripherals -- not hosts! -- pad messages
690 * out to end-of-buffer. Standard PCI host controller DMA descriptors
691 * implement that mode by default ... which is no accident.)
693 * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have
694 * converse problems: (b) is handled right, but (a) loses badly. CPPI RX
695 * ignores SOP/EOP markings and processes both of those BDs; so both packets
696 * are loaded into the buffer (with a 212 byte gap between them), and the next
697 * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP
698 * are intended as outputs for RX queues, not inputs...)
700 * - A variant of "transparent" mode -- one BD at a time -- is the only way to
701 * reliably make both cases work, with software handling both cases correctly
702 * and at the significant penalty of needing an IRQ per packet. (The lack of
703 * I/O overlap can be slightly ameliorated by enabling double buffering.)
705 * So how to get rid of IRQ-per-packet? The transparent multi-BD case could
706 * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK
707 * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors
708 * with guaranteed driver level fault recovery and scrubbing out what's left
709 * of that garbaged datastream.
711 * But there seems to be no way to identify the cases where CPPI RNDIS mode
712 * is appropriate -- which do NOT include RNDIS host drivers, but do include
713 * the CDC Ethernet driver! -- and the documentation is incomplete/wrong.
714 * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic
715 * that applies best on the peripheral side (and which could fail rudely).
717 * Leaving only "transparent" mode; we avoid multi-bd modes in almost all
718 * cases other than mass storage class. Otherwise we're correct but slow,
719 * since CPPI penalizes our need for a "true RNDIS" default mode.
723 /* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY
726 * (a) peripheral mode ... since rndis peripherals could pad their
727 * writes to hosts, causing i/o failure; or we'd have to cope with
728 * a largely unknowable variety of host side protocol variants
729 * (b) and short reads are NOT errors ... since full reads would
730 * cause those same i/o failures
731 * (c) and read length is
732 * - less than 64KB (max per cppi descriptor)
733 * - not a multiple of 4096 (g_zero default, full reads typical)
734 * - N (>1) packets long, ditto (full reads not EXPECTED)
738 * Cost of heuristic failing: RXDMA wedges at the end of transfers that
739 * fill out the whole buffer. Buggy host side usb network drivers could
740 * trigger that, but "in the field" such bugs seem to be all but unknown.
742 * So this module parameter lets the heuristic be disabled. When using
743 * gadgetfs, the heuristic will probably need to be disabled.
745 static int cppi_rx_rndis
= 1;
747 module_param(cppi_rx_rndis
, bool, 0);
748 MODULE_PARM_DESC(cppi_rx_rndis
, "enable/disable RX RNDIS heuristic");
752 * cppi_next_rx_segment - dma read for the next chunk of a buffer
753 * @musb: the controller
755 * @onepacket: true unless caller treats short reads as errors, and
756 * performs fault recovery above usbcore.
757 * Context: controller irqlocked
759 * See above notes about why we can't use multi-BD RX queues except in
760 * rare cases (mass storage class), and can never use the hardware "rndis"
761 * mode (since it's not a "true" RNDIS mode) with complete safety..
763 * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in
764 * code to recover from corrupted datastreams after each short transfer.
767 cppi_next_rx_segment(struct musb
*musb
, struct cppi_channel
*rx
, int onepacket
)
769 unsigned maxpacket
= rx
->maxpacket
;
770 dma_addr_t addr
= rx
->buf_dma
+ rx
->offset
;
771 size_t length
= rx
->buf_len
- rx
->offset
;
772 struct cppi_descriptor
*bd
, *tail
;
775 void __iomem
*tibase
= musb
->ctrl_base
;
777 struct cppi_rx_stateram __iomem
*rx_ram
= rx
->state_ram
;
780 /* almost every USB driver, host or peripheral side */
783 /* maybe apply the heuristic above */
785 && is_peripheral_active(musb
)
786 && length
> maxpacket
787 && (length
& ~0xffff) == 0
788 && (length
& 0x0fff) != 0
789 && (length
& (maxpacket
- 1)) == 0) {
794 /* virtually nothing except mass storage class */
795 if (length
> 0xffff) {
796 n_bds
= 0xffff / maxpacket
;
797 length
= n_bds
* maxpacket
;
799 n_bds
= length
/ maxpacket
;
800 if (length
% maxpacket
)
806 n_bds
= min(n_bds
, (unsigned) NUM_RXCHAN_BD
);
809 /* In host mode, autorequest logic can generate some IN tokens; it's
810 * tricky since we can't leave REQPKT set in RXCSR after the transfer
811 * finishes. So: multipacket transfers involve two or more segments.
812 * And always at least two IRQs ... RNDIS mode is not an option.
814 if (is_host_active(musb
))
815 n_bds
= cppi_autoreq_update(rx
, tibase
, onepacket
, n_bds
);
817 cppi_rndis_update(rx
, 1, musb
->ctrl_base
, is_rndis
);
819 length
= min(n_bds
* maxpacket
, length
);
821 DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
822 "dma 0x%x len %u %u/%u\n",
823 rx
->index
, maxpacket
,
825 ? (is_rndis
? "rndis" : "onepacket")
829 DAVINCI_RXCPPI_BUFCNT0_REG
+ (rx
->index
* 4))
831 addr
, length
, rx
->channel
.actual_len
, rx
->buf_len
);
833 /* only queue one segment at a time, since the hardware prevents
834 * correct queue shutdown after unexpected short packets
836 bd
= cppi_bd_alloc(rx
);
839 /* Build BDs for all packets in this segment */
840 for (i
= 0, tail
= NULL
; bd
&& i
< n_bds
; i
++, tail
= bd
) {
844 bd
= cppi_bd_alloc(rx
);
848 tail
->hw_next
= bd
->dma
;
852 /* all but the last packet will be maxpacket size */
853 if (maxpacket
< length
)
860 rx
->offset
+= bd_len
;
862 bd
->hw_off_len
= (0 /*offset*/ << 16) + bd_len
;
865 bd
->hw_options
= CPPI_OWN_SET
| (i
== 0 ? length
: 0);
869 /* we always expect at least one reusable BD! */
871 WARNING("rx dma%d -- no BDs? need %d\n", rx
->index
, n_bds
);
873 } else if (i
< n_bds
)
874 WARNING("rx dma%d -- only %d of %d BDs\n", rx
->index
, i
, n_bds
);
882 /* short reads and other faults should terminate this entire
883 * dma segment. we want one "dma packet" per dma segment, not
884 * one per USB packet, terminating the whole queue at once...
885 * NOTE that current hardware seems to ignore SOP and EOP.
887 bd
->hw_options
|= CPPI_SOP_SET
;
888 tail
->hw_options
|= CPPI_EOP_SET
;
890 #ifdef CONFIG_USB_MUSB_DEBUG
892 struct cppi_descriptor
*d
;
894 for (d
= rx
->head
; d
; d
= d
->next
)
895 cppi_dump_rxbd("S", d
);
899 /* in case the preceding transfer left some state... */
900 tail
= rx
->last_processed
;
903 tail
->hw_next
= bd
->dma
;
906 core_rxirq_enable(tibase
, rx
->index
+ 1);
908 /* BDs live in DMA-coherent memory, but writes might be pending */
909 cpu_drain_writebuffer();
911 /* REVISIT specs say to write this AFTER the BUFCNT register
912 * below ... but that loses badly.
914 musb_writel(&rx_ram
->rx_head
, 0, bd
->dma
);
916 /* bufferCount must be at least 3, and zeroes on completion
917 * unless it underflows below zero, or stops at two, or keeps
920 i
= musb_readl(tibase
,
921 DAVINCI_RXCPPI_BUFCNT0_REG
+ (rx
->index
* 4))
926 DAVINCI_RXCPPI_BUFCNT0_REG
+ (rx
->index
* 4),
928 else if (n_bds
> (i
- 3))
930 DAVINCI_RXCPPI_BUFCNT0_REG
+ (rx
->index
* 4),
933 i
= musb_readl(tibase
,
934 DAVINCI_RXCPPI_BUFCNT0_REG
+ (rx
->index
* 4))
936 if (i
< (2 + n_bds
)) {
937 DBG(2, "bufcnt%d underrun - %d (for %d)\n",
938 rx
->index
, i
, n_bds
);
940 DAVINCI_RXCPPI_BUFCNT0_REG
+ (rx
->index
* 4),
944 cppi_dump_rx(4, rx
, "/S");
948 * cppi_channel_program - program channel for data transfer
950 * @maxpacket: max packet size
951 * @mode: For RX, 1 unless the usb protocol driver promised to treat
952 * all short reads as errors and kick in high level fault recovery.
953 * For TX, ignored because of RNDIS mode races/glitches.
954 * @dma_addr: dma address of buffer
955 * @len: length of buffer
956 * Context: controller irqlocked
958 static int cppi_channel_program(struct dma_channel
*ch
,
959 u16 maxpacket
, u8 mode
,
960 dma_addr_t dma_addr
, u32 len
)
962 struct cppi_channel
*cppi_ch
;
963 struct cppi
*controller
;
966 cppi_ch
= container_of(ch
, struct cppi_channel
, channel
);
967 controller
= cppi_ch
->controller
;
968 musb
= controller
->musb
;
970 switch (ch
->status
) {
971 case MUSB_DMA_STATUS_BUS_ABORT
:
972 case MUSB_DMA_STATUS_CORE_ABORT
:
973 /* fault irq handler should have handled cleanup */
974 WARNING("%cX DMA%d not cleaned up after abort!\n",
975 cppi_ch
->transmit
? 'T' : 'R',
979 case MUSB_DMA_STATUS_BUSY
:
980 WARNING("program active channel? %cX DMA%d\n",
981 cppi_ch
->transmit
? 'T' : 'R',
985 case MUSB_DMA_STATUS_UNKNOWN
:
986 DBG(1, "%cX DMA%d not allocated!\n",
987 cppi_ch
->transmit
? 'T' : 'R',
990 case MUSB_DMA_STATUS_FREE
:
994 ch
->status
= MUSB_DMA_STATUS_BUSY
;
996 /* set transfer parameters, then queue up its first segment */
997 cppi_ch
->buf_dma
= dma_addr
;
999 cppi_ch
->maxpacket
= maxpacket
;
1000 cppi_ch
->buf_len
= len
;
1001 cppi_ch
->channel
.actual_len
= 0;
1003 /* TX channel? or RX? */
1004 if (cppi_ch
->transmit
)
1005 cppi_next_tx_segment(musb
, cppi_ch
);
1007 cppi_next_rx_segment(musb
, cppi_ch
, mode
);
1012 static bool cppi_rx_scan(struct cppi
*cppi
, unsigned ch
)
1014 struct cppi_channel
*rx
= &cppi
->rx
[ch
];
1015 struct cppi_rx_stateram __iomem
*state
= rx
->state_ram
;
1016 struct cppi_descriptor
*bd
;
1017 struct cppi_descriptor
*last
= rx
->last_processed
;
1018 bool completed
= false;
1021 dma_addr_t safe2ack
;
1022 void __iomem
*regs
= rx
->hw_ep
->regs
;
1024 cppi_dump_rx(6, rx
, "/K");
1026 bd
= last
? last
->next
: rx
->head
;
1030 /* run through all completed BDs */
1031 for (i
= 0, safe2ack
= musb_readl(&state
->rx_complete
, 0);
1032 (safe2ack
|| completed
) && bd
&& i
< NUM_RXCHAN_BD
;
1033 i
++, bd
= bd
->next
) {
1036 /* catch latest BD writes from CPPI */
1038 if (!completed
&& (bd
->hw_options
& CPPI_OWN_SET
))
1041 DBG(5, "C/RXBD %08x: nxt %08x buf %08x "
1042 "off.len %08x opt.len %08x (%d)\n",
1043 bd
->dma
, bd
->hw_next
, bd
->hw_bufp
,
1044 bd
->hw_off_len
, bd
->hw_options
,
1045 rx
->channel
.actual_len
);
1047 /* actual packet received length */
1048 if ((bd
->hw_options
& CPPI_SOP_SET
) && !completed
)
1049 len
= bd
->hw_off_len
& CPPI_RECV_PKTLEN_MASK
;
1053 if (bd
->hw_options
& CPPI_EOQ_MASK
)
1056 if (!completed
&& len
< bd
->buflen
) {
1057 /* NOTE: when we get a short packet, RXCSR_H_REQPKT
1058 * must have been cleared, and no more DMA packets may
1059 * active be in the queue... TI docs didn't say, but
1060 * CPPI ignores those BDs even though OWN is still set.
1063 DBG(3, "rx short %d/%d (%d)\n",
1065 rx
->channel
.actual_len
);
1068 /* If we got here, we expect to ack at least one BD; meanwhile
1069 * CPPI may completing other BDs while we scan this list...
1071 * RACE: we can notice OWN cleared before CPPI raises the
1072 * matching irq by writing that BD as the completion pointer.
1073 * In such cases, stop scanning and wait for the irq, avoiding
1074 * lost acks and states where BD ownership is unclear.
1076 if (bd
->dma
== safe2ack
) {
1077 musb_writel(&state
->rx_complete
, 0, safe2ack
);
1078 safe2ack
= musb_readl(&state
->rx_complete
, 0);
1080 if (bd
->dma
== safe2ack
)
1084 rx
->channel
.actual_len
+= len
;
1086 cppi_bd_free(rx
, last
);
1089 /* stop scanning on end-of-segment */
1090 if (bd
->hw_next
== 0)
1093 rx
->last_processed
= last
;
1095 /* dma abort, lost ack, or ... */
1096 if (!acked
&& last
) {
1099 if (safe2ack
== 0 || safe2ack
== rx
->last_processed
->dma
)
1100 musb_writel(&state
->rx_complete
, 0, safe2ack
);
1101 if (safe2ack
== 0) {
1102 cppi_bd_free(rx
, last
);
1103 rx
->last_processed
= NULL
;
1105 /* if we land here on the host side, H_REQPKT will
1106 * be clear and we need to restart the queue...
1110 musb_ep_select(cppi
->mregs
, rx
->index
+ 1);
1111 csr
= musb_readw(regs
, MUSB_RXCSR
);
1112 if (csr
& MUSB_RXCSR_DMAENAB
) {
1113 DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n",
1117 ? rx
->last_processed
->dma
1119 completed
? ", completed" : "",
1121 cppi_dump_rxq(4, "/what?", rx
);
1129 /* REVISIT seems like "autoreq all but EOP" doesn't...
1130 * setting it here "should" be racey, but seems to work
1132 csr
= musb_readw(rx
->hw_ep
->regs
, MUSB_RXCSR
);
1133 if (is_host_active(cppi
->musb
)
1135 && !(csr
& MUSB_RXCSR_H_REQPKT
)) {
1136 csr
|= MUSB_RXCSR_H_REQPKT
;
1137 musb_writew(regs
, MUSB_RXCSR
,
1138 MUSB_RXCSR_H_WZC_BITS
| csr
);
1139 csr
= musb_readw(rx
->hw_ep
->regs
, MUSB_RXCSR
);
1146 cppi_dump_rx(6, rx
, completed
? "/completed" : "/cleaned");
1150 irqreturn_t
cppi_interrupt(int irq
, void *dev_id
)
1152 struct musb
*musb
= dev_id
;
1154 void __iomem
*tibase
;
1155 struct musb_hw_ep
*hw_ep
= NULL
;
1158 unsigned long flags
;
1160 cppi
= container_of(musb
->dma_controller
, struct cppi
, controller
);
1162 spin_lock_irqsave(&musb
->lock
, flags
);
1164 tibase
= musb
->ctrl_base
;
1166 tx
= musb_readl(tibase
, DAVINCI_TXCPPI_MASKED_REG
);
1167 rx
= musb_readl(tibase
, DAVINCI_RXCPPI_MASKED_REG
);
1172 DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx
, rx
);
1174 /* process TX channels */
1175 for (index
= 0; tx
; tx
= tx
>> 1, index
++) {
1176 struct cppi_channel
*tx_ch
;
1177 struct cppi_tx_stateram __iomem
*tx_ram
;
1178 bool completed
= false;
1179 struct cppi_descriptor
*bd
;
1184 tx_ch
= cppi
->tx
+ index
;
1185 tx_ram
= tx_ch
->state_ram
;
1187 /* FIXME need a cppi_tx_scan() routine, which
1188 * can also be called from abort code
1191 cppi_dump_tx(5, tx_ch
, "/E");
1196 * If Head is null then this could mean that a abort interrupt
1197 * that needs to be acknowledged.
1200 DBG(1, "null BD\n");
1201 tx_ram
->tx_complete
= 0;
1205 /* run through all completed BDs */
1206 for (i
= 0; !completed
&& bd
&& i
< NUM_TXCHAN_BD
;
1207 i
++, bd
= bd
->next
) {
1210 /* catch latest BD writes from CPPI */
1212 if (bd
->hw_options
& CPPI_OWN_SET
)
1215 DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n",
1216 bd
, bd
->hw_next
, bd
->hw_bufp
,
1217 bd
->hw_off_len
, bd
->hw_options
);
1219 len
= bd
->hw_off_len
& CPPI_BUFFER_LEN_MASK
;
1220 tx_ch
->channel
.actual_len
+= len
;
1222 tx_ch
->last_processed
= bd
;
1224 /* write completion register to acknowledge
1225 * processing of completed BDs, and possibly
1226 * release the IRQ; EOQ might not be set ...
1228 * REVISIT use the same ack strategy as rx
1230 * REVISIT have observed bit 18 set; huh??
1232 /* if ((bd->hw_options & CPPI_EOQ_MASK)) */
1233 musb_writel(&tx_ram
->tx_complete
, 0, bd
->dma
);
1235 /* stop scanning on end-of-segment */
1236 if (bd
->hw_next
== 0)
1240 /* on end of segment, maybe go to next one */
1242 /* cppi_dump_tx(4, tx_ch, "/complete"); */
1244 /* transfer more, or report completion */
1245 if (tx_ch
->offset
>= tx_ch
->buf_len
) {
1248 tx_ch
->channel
.status
= MUSB_DMA_STATUS_FREE
;
1250 hw_ep
= tx_ch
->hw_ep
;
1252 musb_dma_completion(musb
, index
+ 1, 1);
1255 /* Bigger transfer than we could fit in
1256 * that first batch of descriptors...
1258 cppi_next_tx_segment(musb
, tx_ch
);
1264 /* Start processing the RX block */
1265 for (index
= 0; rx
; rx
= rx
>> 1, index
++) {
1268 struct cppi_channel
*rx_ch
;
1270 rx_ch
= cppi
->rx
+ index
;
1272 /* let incomplete dma segments finish */
1273 if (!cppi_rx_scan(cppi
, index
))
1276 /* start another dma segment if needed */
1277 if (rx_ch
->channel
.actual_len
!= rx_ch
->buf_len
1278 && rx_ch
->channel
.actual_len
1280 cppi_next_rx_segment(musb
, rx_ch
, 1);
1284 /* all segments completed! */
1285 rx_ch
->channel
.status
= MUSB_DMA_STATUS_FREE
;
1287 hw_ep
= rx_ch
->hw_ep
;
1289 core_rxirq_disable(tibase
, index
+ 1);
1290 musb_dma_completion(musb
, index
+ 1, 0);
1294 /* write to CPPI EOI register to re-enable interrupts */
1295 musb_writel(tibase
, DAVINCI_CPPI_EOI_REG
, 0);
1298 spin_unlock_irqrestore(&musb
->lock
, flags
);
1303 /* Instantiate a software object representing a DMA controller. */
1304 struct dma_controller
*__init
1305 dma_controller_create(struct musb
*musb
, void __iomem
*mregs
)
1307 struct cppi
*controller
;
1308 struct device
*dev
= musb
->controller
;
1309 struct platform_device
*pdev
= to_platform_device(dev
);
1310 int irq
= platform_get_irq(pdev
, 1);
1312 controller
= kzalloc(sizeof *controller
, GFP_KERNEL
);
1316 controller
->mregs
= mregs
;
1317 controller
->tibase
= mregs
- DAVINCI_BASE_OFFSET
;
1319 controller
->musb
= musb
;
1320 controller
->controller
.start
= cppi_controller_start
;
1321 controller
->controller
.stop
= cppi_controller_stop
;
1322 controller
->controller
.channel_alloc
= cppi_channel_allocate
;
1323 controller
->controller
.channel_release
= cppi_channel_release
;
1324 controller
->controller
.channel_program
= cppi_channel_program
;
1325 controller
->controller
.channel_abort
= cppi_channel_abort
;
1327 /* NOTE: allocating from on-chip SRAM would give the least
1328 * contention for memory access, if that ever matters here.
1331 /* setup BufferPool */
1332 controller
->pool
= dma_pool_create("cppi",
1333 controller
->musb
->controller
,
1334 sizeof(struct cppi_descriptor
),
1335 CPPI_DESCRIPTOR_ALIGN
, 0);
1336 if (!controller
->pool
) {
1342 if (request_irq(irq
, cppi_interrupt
, 0, "cppi-dma", musb
)) {
1343 dev_err(dev
, "request_irq %d failed!\n", irq
);
1344 dma_controller_destroy(&controller
->controller
);
1347 controller
->irq
= irq
;
1350 return &controller
->controller
;
1354 * Destroy a previously-instantiated DMA controller.
1356 void dma_controller_destroy(struct dma_controller
*c
)
1360 cppi
= container_of(c
, struct cppi
, controller
);
1363 free_irq(cppi
->irq
, cppi
->musb
);
1365 /* assert: caller stopped the controller first */
1366 dma_pool_destroy(cppi
->pool
);
1372 * Context: controller irqlocked, endpoint selected
1374 static int cppi_channel_abort(struct dma_channel
*channel
)
1376 struct cppi_channel
*cppi_ch
;
1377 struct cppi
*controller
;
1378 void __iomem
*mbase
;
1379 void __iomem
*tibase
;
1382 struct cppi_descriptor
*queue
;
1384 cppi_ch
= container_of(channel
, struct cppi_channel
, channel
);
1386 controller
= cppi_ch
->controller
;
1388 switch (channel
->status
) {
1389 case MUSB_DMA_STATUS_BUS_ABORT
:
1390 case MUSB_DMA_STATUS_CORE_ABORT
:
1391 /* from RX or TX fault irq handler */
1392 case MUSB_DMA_STATUS_BUSY
:
1393 /* the hardware needs shutting down */
1394 regs
= cppi_ch
->hw_ep
->regs
;
1396 case MUSB_DMA_STATUS_UNKNOWN
:
1397 case MUSB_DMA_STATUS_FREE
:
1403 if (!cppi_ch
->transmit
&& cppi_ch
->head
)
1404 cppi_dump_rxq(3, "/abort", cppi_ch
);
1406 mbase
= controller
->mregs
;
1407 tibase
= controller
->tibase
;
1409 queue
= cppi_ch
->head
;
1410 cppi_ch
->head
= NULL
;
1411 cppi_ch
->tail
= NULL
;
1413 /* REVISIT should rely on caller having done this,
1414 * and caller should rely on us not changing it.
1415 * peripheral code is safe ... check host too.
1417 musb_ep_select(mbase
, cppi_ch
->index
+ 1);
1419 if (cppi_ch
->transmit
) {
1420 struct cppi_tx_stateram __iomem
*tx_ram
;
1421 /* REVISIT put timeouts on these controller handshakes */
1423 cppi_dump_tx(6, cppi_ch
, " (teardown)");
1425 /* teardown DMA engine then usb core */
1427 value
= musb_readl(tibase
, DAVINCI_TXCPPI_TEAR_REG
);
1428 } while (!(value
& CPPI_TEAR_READY
));
1429 musb_writel(tibase
, DAVINCI_TXCPPI_TEAR_REG
, cppi_ch
->index
);
1431 tx_ram
= cppi_ch
->state_ram
;
1433 value
= musb_readl(&tx_ram
->tx_complete
, 0);
1434 } while (0xFFFFFFFC != value
);
1436 /* FIXME clean up the transfer state ... here?
1437 * the completion routine should get called with
1438 * an appropriate status code.
1441 value
= musb_readw(regs
, MUSB_TXCSR
);
1442 value
&= ~MUSB_TXCSR_DMAENAB
;
1443 value
|= MUSB_TXCSR_FLUSHFIFO
;
1444 musb_writew(regs
, MUSB_TXCSR
, value
);
1445 musb_writew(regs
, MUSB_TXCSR
, value
);
1448 * 1. Write to completion Ptr value 0x1(bit 0 set)
1450 * 2. Wait for abort interrupt and then put the channel in
1451 * compare mode by writing 1 to the tx_complete register.
1453 cppi_reset_tx(tx_ram
, 1);
1455 musb_writel(&tx_ram
->tx_complete
, 0, 1);
1456 cppi_dump_tx(5, cppi_ch
, " (done teardown)");
1458 /* REVISIT tx side _should_ clean up the same way
1459 * as the RX side ... this does no cleanup at all!
1465 /* NOTE: docs don't guarantee any of this works ... we
1466 * expect that if the usb core stops telling the cppi core
1467 * to pull more data from it, then it'll be safe to flush
1468 * current RX DMA state iff any pending fifo transfer is done.
1471 core_rxirq_disable(tibase
, cppi_ch
->index
+ 1);
1473 /* for host, ensure ReqPkt is never set again */
1474 if (is_host_active(cppi_ch
->controller
->musb
)) {
1475 value
= musb_readl(tibase
, DAVINCI_AUTOREQ_REG
);
1476 value
&= ~((0x3) << (cppi_ch
->index
* 2));
1477 musb_writel(tibase
, DAVINCI_AUTOREQ_REG
, value
);
1480 csr
= musb_readw(regs
, MUSB_RXCSR
);
1482 /* for host, clear (just) ReqPkt at end of current packet(s) */
1483 if (is_host_active(cppi_ch
->controller
->musb
)) {
1484 csr
|= MUSB_RXCSR_H_WZC_BITS
;
1485 csr
&= ~MUSB_RXCSR_H_REQPKT
;
1487 csr
|= MUSB_RXCSR_P_WZC_BITS
;
1489 /* clear dma enable */
1490 csr
&= ~(MUSB_RXCSR_DMAENAB
);
1491 musb_writew(regs
, MUSB_RXCSR
, csr
);
1492 csr
= musb_readw(regs
, MUSB_RXCSR
);
1494 /* Quiesce: wait for current dma to finish (if not cleanup).
1495 * We can't use bit zero of stateram->rx_sop, since that
1496 * refers to an entire "DMA packet" not just emptying the
1497 * current fifo. Most segments need multiple usb packets.
1499 if (channel
->status
== MUSB_DMA_STATUS_BUSY
)
1502 /* scan the current list, reporting any data that was
1503 * transferred and acking any IRQ
1505 cppi_rx_scan(controller
, cppi_ch
->index
);
1507 /* clobber the existing state once it's idle
1509 * NOTE: arguably, we should also wait for all the other
1510 * RX channels to quiesce (how??) and then temporarily
1511 * disable RXCPPI_CTRL_REG ... but it seems that we can
1512 * rely on the controller restarting from state ram, with
1513 * only RXCPPI_BUFCNT state being bogus. BUFCNT will
1514 * correct itself after the next DMA transfer though.
1516 * REVISIT does using rndis mode change that?
1518 cppi_reset_rx(cppi_ch
->state_ram
);
1520 /* next DMA request _should_ load cppi head ptr */
1522 /* ... we don't "free" that list, only mutate it in place. */
1523 cppi_dump_rx(5, cppi_ch
, " (done abort)");
1525 /* clean up previously pending bds */
1526 cppi_bd_free(cppi_ch
, cppi_ch
->last_processed
);
1527 cppi_ch
->last_processed
= NULL
;
1530 struct cppi_descriptor
*tmp
= queue
->next
;
1532 cppi_bd_free(cppi_ch
, queue
);
1537 channel
->status
= MUSB_DMA_STATUS_FREE
;
1538 cppi_ch
->buf_dma
= 0;
1539 cppi_ch
->offset
= 0;
1540 cppi_ch
->buf_len
= 0;
1541 cppi_ch
->maxpacket
= 0;
1547 * Power Management ... probably turn off cppi during suspend, restart;
1548 * check state ram? Clocking is presumably shared with usb core.