Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cris-mirror.git] / drivers / usb / musb / musb_host.c
blob45ed32c2cba949dd7babb4b9e4879a89aa251027
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * MUSB OTG driver host support
5 * Copyright 2005 Mentor Graphics Corporation
6 * Copyright (C) 2005-2006 by Texas Instruments
7 * Copyright (C) 2006-2007 Nokia Corporation
8 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
9 */
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/errno.h>
17 #include <linux/list.h>
18 #include <linux/dma-mapping.h>
20 #include "musb_core.h"
21 #include "musb_host.h"
22 #include "musb_trace.h"
24 /* MUSB HOST status 22-mar-2006
26 * - There's still lots of partial code duplication for fault paths, so
27 * they aren't handled as consistently as they need to be.
29 * - PIO mostly behaved when last tested.
30 * + including ep0, with all usbtest cases 9, 10
31 * + usbtest 14 (ep0out) doesn't seem to run at all
32 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
33 * configurations, but otherwise double buffering passes basic tests.
34 * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
36 * - DMA (CPPI) ... partially behaves, not currently recommended
37 * + about 1/15 the speed of typical EHCI implementations (PCI)
38 * + RX, all too often reqpkt seems to misbehave after tx
39 * + TX, no known issues (other than evident silicon issue)
41 * - DMA (Mentor/OMAP) ...has at least toggle update problems
43 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
44 * starvation ... nothing yet for TX, interrupt, or bulk.
46 * - Not tested with HNP, but some SRP paths seem to behave.
48 * NOTE 24-August-2006:
50 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
51 * extra endpoint for periodic use enabling hub + keybd + mouse. That
52 * mostly works, except that with "usbnet" it's easy to trigger cases
53 * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
54 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
55 * although ARP RX wins. (That test was done with a full speed link.)
60 * NOTE on endpoint usage:
62 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
63 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
64 * (Yes, bulk _could_ use more of the endpoints than that, and would even
65 * benefit from it.)
67 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
68 * So far that scheduling is both dumb and optimistic: the endpoint will be
69 * "claimed" until its software queue is no longer refilled. No multiplexing
70 * of transfers between endpoints, or anything clever.
73 struct musb *hcd_to_musb(struct usb_hcd *hcd)
75 return *(struct musb **) hcd->hcd_priv;
79 static void musb_ep_program(struct musb *musb, u8 epnum,
80 struct urb *urb, int is_out,
81 u8 *buf, u32 offset, u32 len);
84 * Clear TX fifo. Needed to avoid BABBLE errors.
86 static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
88 struct musb *musb = ep->musb;
89 void __iomem *epio = ep->regs;
90 u16 csr;
91 int retries = 1000;
93 csr = musb_readw(epio, MUSB_TXCSR);
94 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
95 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
96 musb_writew(epio, MUSB_TXCSR, csr);
97 csr = musb_readw(epio, MUSB_TXCSR);
100 * FIXME: sometimes the tx fifo flush failed, it has been
101 * observed during device disconnect on AM335x.
103 * To reproduce the issue, ensure tx urb(s) are queued when
104 * unplug the usb device which is connected to AM335x usb
105 * host port.
107 * I found using a usb-ethernet device and running iperf
108 * (client on AM335x) has very high chance to trigger it.
110 * Better to turn on musb_dbg() in musb_cleanup_urb() with
111 * CPPI enabled to see the issue when aborting the tx channel.
113 if (dev_WARN_ONCE(musb->controller, retries-- < 1,
114 "Could not flush host TX%d fifo: csr: %04x\n",
115 ep->epnum, csr))
116 return;
117 mdelay(1);
121 static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
123 void __iomem *epio = ep->regs;
124 u16 csr;
125 int retries = 5;
127 /* scrub any data left in the fifo */
128 do {
129 csr = musb_readw(epio, MUSB_TXCSR);
130 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
131 break;
132 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
133 csr = musb_readw(epio, MUSB_TXCSR);
134 udelay(10);
135 } while (--retries);
137 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
138 ep->epnum, csr);
140 /* and reset for the next transfer */
141 musb_writew(epio, MUSB_TXCSR, 0);
145 * Start transmit. Caller is responsible for locking shared resources.
146 * musb must be locked.
148 static inline void musb_h_tx_start(struct musb_hw_ep *ep)
150 u16 txcsr;
152 /* NOTE: no locks here; caller should lock and select EP */
153 if (ep->epnum) {
154 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
155 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
156 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
157 } else {
158 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
159 musb_writew(ep->regs, MUSB_CSR0, txcsr);
164 static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
166 u16 txcsr;
168 /* NOTE: no locks here; caller should lock and select EP */
169 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
170 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
171 if (is_cppi_enabled(ep->musb))
172 txcsr |= MUSB_TXCSR_DMAMODE;
173 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
176 static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
178 if (is_in != 0 || ep->is_shared_fifo)
179 ep->in_qh = qh;
180 if (is_in == 0 || ep->is_shared_fifo)
181 ep->out_qh = qh;
184 static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
186 return is_in ? ep->in_qh : ep->out_qh;
190 * Start the URB at the front of an endpoint's queue
191 * end must be claimed from the caller.
193 * Context: controller locked, irqs blocked
195 static void
196 musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
198 u32 len;
199 void __iomem *mbase = musb->mregs;
200 struct urb *urb = next_urb(qh);
201 void *buf = urb->transfer_buffer;
202 u32 offset = 0;
203 struct musb_hw_ep *hw_ep = qh->hw_ep;
204 int epnum = hw_ep->epnum;
206 /* initialize software qh state */
207 qh->offset = 0;
208 qh->segsize = 0;
210 /* gather right source of data */
211 switch (qh->type) {
212 case USB_ENDPOINT_XFER_CONTROL:
213 /* control transfers always start with SETUP */
214 is_in = 0;
215 musb->ep0_stage = MUSB_EP0_START;
216 buf = urb->setup_packet;
217 len = 8;
218 break;
219 case USB_ENDPOINT_XFER_ISOC:
220 qh->iso_idx = 0;
221 qh->frame = 0;
222 offset = urb->iso_frame_desc[0].offset;
223 len = urb->iso_frame_desc[0].length;
224 break;
225 default: /* bulk, interrupt */
226 /* actual_length may be nonzero on retry paths */
227 buf = urb->transfer_buffer + urb->actual_length;
228 len = urb->transfer_buffer_length - urb->actual_length;
231 trace_musb_urb_start(musb, urb);
233 /* Configure endpoint */
234 musb_ep_set_qh(hw_ep, is_in, qh);
235 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
237 /* transmit may have more work: start it when it is time */
238 if (is_in)
239 return;
241 /* determine if the time is right for a periodic transfer */
242 switch (qh->type) {
243 case USB_ENDPOINT_XFER_ISOC:
244 case USB_ENDPOINT_XFER_INT:
245 musb_dbg(musb, "check whether there's still time for periodic Tx");
246 /* FIXME this doesn't implement that scheduling policy ...
247 * or handle framecounter wrapping
249 if (1) { /* Always assume URB_ISO_ASAP */
250 /* REVISIT the SOF irq handler shouldn't duplicate
251 * this code; and we don't init urb->start_frame...
253 qh->frame = 0;
254 goto start;
255 } else {
256 qh->frame = urb->start_frame;
257 /* enable SOF interrupt so we can count down */
258 musb_dbg(musb, "SOF for %d", epnum);
259 #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
260 musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
261 #endif
263 break;
264 default:
265 start:
266 musb_dbg(musb, "Start TX%d %s", epnum,
267 hw_ep->tx_channel ? "dma" : "pio");
269 if (!hw_ep->tx_channel)
270 musb_h_tx_start(hw_ep);
271 else if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
272 musb_h_tx_dma_start(hw_ep);
276 /* Context: caller owns controller lock, IRQs are blocked */
277 static void musb_giveback(struct musb *musb, struct urb *urb, int status)
278 __releases(musb->lock)
279 __acquires(musb->lock)
281 trace_musb_urb_gb(musb, urb);
283 usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
284 spin_unlock(&musb->lock);
285 usb_hcd_giveback_urb(musb->hcd, urb, status);
286 spin_lock(&musb->lock);
289 /* For bulk/interrupt endpoints only */
290 static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
291 struct urb *urb)
293 void __iomem *epio = qh->hw_ep->regs;
294 u16 csr;
297 * FIXME: the current Mentor DMA code seems to have
298 * problems getting toggle correct.
301 if (is_in)
302 csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
303 else
304 csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
306 usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
310 * Advance this hardware endpoint's queue, completing the specified URB and
311 * advancing to either the next URB queued to that qh, or else invalidating
312 * that qh and advancing to the next qh scheduled after the current one.
314 * Context: caller owns controller lock, IRQs are blocked
316 static void musb_advance_schedule(struct musb *musb, struct urb *urb,
317 struct musb_hw_ep *hw_ep, int is_in)
319 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
320 struct musb_hw_ep *ep = qh->hw_ep;
321 int ready = qh->is_ready;
322 int status;
324 status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
326 /* save toggle eagerly, for paranoia */
327 switch (qh->type) {
328 case USB_ENDPOINT_XFER_BULK:
329 case USB_ENDPOINT_XFER_INT:
330 musb_save_toggle(qh, is_in, urb);
331 break;
332 case USB_ENDPOINT_XFER_ISOC:
333 if (status == 0 && urb->error_count)
334 status = -EXDEV;
335 break;
338 qh->is_ready = 0;
339 musb_giveback(musb, urb, status);
340 qh->is_ready = ready;
342 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
343 * invalidate qh as soon as list_empty(&hep->urb_list)
345 if (list_empty(&qh->hep->urb_list)) {
346 struct list_head *head;
347 struct dma_controller *dma = musb->dma_controller;
349 if (is_in) {
350 ep->rx_reinit = 1;
351 if (ep->rx_channel) {
352 dma->channel_release(ep->rx_channel);
353 ep->rx_channel = NULL;
355 } else {
356 ep->tx_reinit = 1;
357 if (ep->tx_channel) {
358 dma->channel_release(ep->tx_channel);
359 ep->tx_channel = NULL;
363 /* Clobber old pointers to this qh */
364 musb_ep_set_qh(ep, is_in, NULL);
365 qh->hep->hcpriv = NULL;
367 switch (qh->type) {
369 case USB_ENDPOINT_XFER_CONTROL:
370 case USB_ENDPOINT_XFER_BULK:
371 /* fifo policy for these lists, except that NAKing
372 * should rotate a qh to the end (for fairness).
374 if (qh->mux == 1) {
375 head = qh->ring.prev;
376 list_del(&qh->ring);
377 kfree(qh);
378 qh = first_qh(head);
379 break;
382 case USB_ENDPOINT_XFER_ISOC:
383 case USB_ENDPOINT_XFER_INT:
384 /* this is where periodic bandwidth should be
385 * de-allocated if it's tracked and allocated;
386 * and where we'd update the schedule tree...
388 kfree(qh);
389 qh = NULL;
390 break;
394 if (qh != NULL && qh->is_ready) {
395 musb_dbg(musb, "... next ep%d %cX urb %p",
396 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
397 musb_start_urb(musb, is_in, qh);
401 static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
403 /* we don't want fifo to fill itself again;
404 * ignore dma (various models),
405 * leave toggle alone (may not have been saved yet)
407 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
408 csr &= ~(MUSB_RXCSR_H_REQPKT
409 | MUSB_RXCSR_H_AUTOREQ
410 | MUSB_RXCSR_AUTOCLEAR);
412 /* write 2x to allow double buffering */
413 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
414 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
416 /* flush writebuffer */
417 return musb_readw(hw_ep->regs, MUSB_RXCSR);
421 * PIO RX for a packet (or part of it).
423 static bool
424 musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
426 u16 rx_count;
427 u8 *buf;
428 u16 csr;
429 bool done = false;
430 u32 length;
431 int do_flush = 0;
432 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
433 void __iomem *epio = hw_ep->regs;
434 struct musb_qh *qh = hw_ep->in_qh;
435 int pipe = urb->pipe;
436 void *buffer = urb->transfer_buffer;
438 /* musb_ep_select(mbase, epnum); */
439 rx_count = musb_readw(epio, MUSB_RXCOUNT);
440 musb_dbg(musb, "RX%d count %d, buffer %p len %d/%d", epnum, rx_count,
441 urb->transfer_buffer, qh->offset,
442 urb->transfer_buffer_length);
444 /* unload FIFO */
445 if (usb_pipeisoc(pipe)) {
446 int status = 0;
447 struct usb_iso_packet_descriptor *d;
449 if (iso_err) {
450 status = -EILSEQ;
451 urb->error_count++;
454 d = urb->iso_frame_desc + qh->iso_idx;
455 buf = buffer + d->offset;
456 length = d->length;
457 if (rx_count > length) {
458 if (status == 0) {
459 status = -EOVERFLOW;
460 urb->error_count++;
462 musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
463 do_flush = 1;
464 } else
465 length = rx_count;
466 urb->actual_length += length;
467 d->actual_length = length;
469 d->status = status;
471 /* see if we are done */
472 done = (++qh->iso_idx >= urb->number_of_packets);
473 } else {
474 /* non-isoch */
475 buf = buffer + qh->offset;
476 length = urb->transfer_buffer_length - qh->offset;
477 if (rx_count > length) {
478 if (urb->status == -EINPROGRESS)
479 urb->status = -EOVERFLOW;
480 musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
481 do_flush = 1;
482 } else
483 length = rx_count;
484 urb->actual_length += length;
485 qh->offset += length;
487 /* see if we are done */
488 done = (urb->actual_length == urb->transfer_buffer_length)
489 || (rx_count < qh->maxpacket)
490 || (urb->status != -EINPROGRESS);
491 if (done
492 && (urb->status == -EINPROGRESS)
493 && (urb->transfer_flags & URB_SHORT_NOT_OK)
494 && (urb->actual_length
495 < urb->transfer_buffer_length))
496 urb->status = -EREMOTEIO;
499 musb_read_fifo(hw_ep, length, buf);
501 csr = musb_readw(epio, MUSB_RXCSR);
502 csr |= MUSB_RXCSR_H_WZC_BITS;
503 if (unlikely(do_flush))
504 musb_h_flush_rxfifo(hw_ep, csr);
505 else {
506 /* REVISIT this assumes AUTOCLEAR is never set */
507 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
508 if (!done)
509 csr |= MUSB_RXCSR_H_REQPKT;
510 musb_writew(epio, MUSB_RXCSR, csr);
513 return done;
516 /* we don't always need to reinit a given side of an endpoint...
517 * when we do, use tx/rx reinit routine and then construct a new CSR
518 * to address data toggle, NYET, and DMA or PIO.
520 * it's possible that driver bugs (especially for DMA) or aborting a
521 * transfer might have left the endpoint busier than it should be.
522 * the busy/not-empty tests are basically paranoia.
524 static void
525 musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
527 struct musb_hw_ep *ep = musb->endpoints + epnum;
528 u16 csr;
530 /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
531 * That always uses tx_reinit since ep0 repurposes TX register
532 * offsets; the initial SETUP packet is also a kind of OUT.
535 /* if programmed for Tx, put it in RX mode */
536 if (ep->is_shared_fifo) {
537 csr = musb_readw(ep->regs, MUSB_TXCSR);
538 if (csr & MUSB_TXCSR_MODE) {
539 musb_h_tx_flush_fifo(ep);
540 csr = musb_readw(ep->regs, MUSB_TXCSR);
541 musb_writew(ep->regs, MUSB_TXCSR,
542 csr | MUSB_TXCSR_FRCDATATOG);
546 * Clear the MODE bit (and everything else) to enable Rx.
547 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
549 if (csr & MUSB_TXCSR_DMAMODE)
550 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
551 musb_writew(ep->regs, MUSB_TXCSR, 0);
553 /* scrub all previous state, clearing toggle */
555 csr = musb_readw(ep->regs, MUSB_RXCSR);
556 if (csr & MUSB_RXCSR_RXPKTRDY)
557 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
558 musb_readw(ep->regs, MUSB_RXCOUNT));
560 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
562 /* target addr and (for multipoint) hub addr/port */
563 if (musb->is_multipoint) {
564 musb_write_rxfunaddr(musb, epnum, qh->addr_reg);
565 musb_write_rxhubaddr(musb, epnum, qh->h_addr_reg);
566 musb_write_rxhubport(musb, epnum, qh->h_port_reg);
567 } else
568 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
570 /* protocol/endpoint, interval/NAKlimit, i/o size */
571 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
572 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
573 /* NOTE: bulk combining rewrites high bits of maxpacket */
574 /* Set RXMAXP with the FIFO size of the endpoint
575 * to disable double buffer mode.
577 if (musb->double_buffer_not_ok)
578 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
579 else
580 musb_writew(ep->regs, MUSB_RXMAXP,
581 qh->maxpacket | ((qh->hb_mult - 1) << 11));
583 ep->rx_reinit = 0;
586 static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
587 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
588 struct urb *urb, u32 offset,
589 u32 *length, u8 *mode)
591 struct dma_channel *channel = hw_ep->tx_channel;
592 void __iomem *epio = hw_ep->regs;
593 u16 pkt_size = qh->maxpacket;
594 u16 csr;
596 if (*length > channel->max_len)
597 *length = channel->max_len;
599 csr = musb_readw(epio, MUSB_TXCSR);
600 if (*length > pkt_size) {
601 *mode = 1;
602 csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
603 /* autoset shouldn't be set in high bandwidth */
605 * Enable Autoset according to table
606 * below
607 * bulk_split hb_mult Autoset_Enable
608 * 0 1 Yes(Normal)
609 * 0 >1 No(High BW ISO)
610 * 1 1 Yes(HS bulk)
611 * 1 >1 Yes(FS bulk)
613 if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
614 can_bulk_split(hw_ep->musb, qh->type)))
615 csr |= MUSB_TXCSR_AUTOSET;
616 } else {
617 *mode = 0;
618 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
619 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
621 channel->desired_mode = *mode;
622 musb_writew(epio, MUSB_TXCSR, csr);
625 static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
626 struct musb_hw_ep *hw_ep,
627 struct musb_qh *qh,
628 struct urb *urb,
629 u32 offset,
630 u32 *length,
631 u8 *mode)
633 struct dma_channel *channel = hw_ep->tx_channel;
635 channel->actual_len = 0;
638 * TX uses "RNDIS" mode automatically but needs help
639 * to identify the zero-length-final-packet case.
641 *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
644 static bool musb_tx_dma_program(struct dma_controller *dma,
645 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
646 struct urb *urb, u32 offset, u32 length)
648 struct dma_channel *channel = hw_ep->tx_channel;
649 u16 pkt_size = qh->maxpacket;
650 u8 mode;
652 if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
653 musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
654 &length, &mode);
655 else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
656 musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
657 &length, &mode);
658 else
659 return false;
661 qh->segsize = length;
664 * Ensure the data reaches to main memory before starting
665 * DMA transfer
667 wmb();
669 if (!dma->channel_program(channel, pkt_size, mode,
670 urb->transfer_dma + offset, length)) {
671 void __iomem *epio = hw_ep->regs;
672 u16 csr;
674 dma->channel_release(channel);
675 hw_ep->tx_channel = NULL;
677 csr = musb_readw(epio, MUSB_TXCSR);
678 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
679 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
680 return false;
682 return true;
686 * Program an HDRC endpoint as per the given URB
687 * Context: irqs blocked, controller lock held
689 static void musb_ep_program(struct musb *musb, u8 epnum,
690 struct urb *urb, int is_out,
691 u8 *buf, u32 offset, u32 len)
693 struct dma_controller *dma_controller;
694 struct dma_channel *dma_channel;
695 u8 dma_ok;
696 void __iomem *mbase = musb->mregs;
697 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
698 void __iomem *epio = hw_ep->regs;
699 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
700 u16 packet_sz = qh->maxpacket;
701 u8 use_dma = 1;
702 u16 csr;
704 musb_dbg(musb, "%s hw%d urb %p spd%d dev%d ep%d%s "
705 "h_addr%02x h_port%02x bytes %d",
706 is_out ? "-->" : "<--",
707 epnum, urb, urb->dev->speed,
708 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
709 qh->h_addr_reg, qh->h_port_reg,
710 len);
712 musb_ep_select(mbase, epnum);
714 if (is_out && !len) {
715 use_dma = 0;
716 csr = musb_readw(epio, MUSB_TXCSR);
717 csr &= ~MUSB_TXCSR_DMAENAB;
718 musb_writew(epio, MUSB_TXCSR, csr);
719 hw_ep->tx_channel = NULL;
722 /* candidate for DMA? */
723 dma_controller = musb->dma_controller;
724 if (use_dma && is_dma_capable() && epnum && dma_controller) {
725 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
726 if (!dma_channel) {
727 dma_channel = dma_controller->channel_alloc(
728 dma_controller, hw_ep, is_out);
729 if (is_out)
730 hw_ep->tx_channel = dma_channel;
731 else
732 hw_ep->rx_channel = dma_channel;
734 } else
735 dma_channel = NULL;
737 /* make sure we clear DMAEnab, autoSet bits from previous run */
739 /* OUT/transmit/EP0 or IN/receive? */
740 if (is_out) {
741 u16 csr;
742 u16 int_txe;
743 u16 load_count;
745 csr = musb_readw(epio, MUSB_TXCSR);
747 /* disable interrupt in case we flush */
748 int_txe = musb->intrtxe;
749 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
751 /* general endpoint setup */
752 if (epnum) {
753 /* flush all old state, set default */
755 * We could be flushing valid
756 * packets in double buffering
757 * case
759 if (!hw_ep->tx_double_buffered)
760 musb_h_tx_flush_fifo(hw_ep);
763 * We must not clear the DMAMODE bit before or in
764 * the same cycle with the DMAENAB bit, so we clear
765 * the latter first...
767 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
768 | MUSB_TXCSR_AUTOSET
769 | MUSB_TXCSR_DMAENAB
770 | MUSB_TXCSR_FRCDATATOG
771 | MUSB_TXCSR_H_RXSTALL
772 | MUSB_TXCSR_H_ERROR
773 | MUSB_TXCSR_TXPKTRDY
775 csr |= MUSB_TXCSR_MODE;
777 if (!hw_ep->tx_double_buffered) {
778 if (usb_gettoggle(urb->dev, qh->epnum, 1))
779 csr |= MUSB_TXCSR_H_WR_DATATOGGLE
780 | MUSB_TXCSR_H_DATATOGGLE;
781 else
782 csr |= MUSB_TXCSR_CLRDATATOG;
785 musb_writew(epio, MUSB_TXCSR, csr);
786 /* REVISIT may need to clear FLUSHFIFO ... */
787 csr &= ~MUSB_TXCSR_DMAMODE;
788 musb_writew(epio, MUSB_TXCSR, csr);
789 csr = musb_readw(epio, MUSB_TXCSR);
790 } else {
791 /* endpoint 0: just flush */
792 musb_h_ep0_flush_fifo(hw_ep);
795 /* target addr and (for multipoint) hub addr/port */
796 if (musb->is_multipoint) {
797 musb_write_txfunaddr(musb, epnum, qh->addr_reg);
798 musb_write_txhubaddr(musb, epnum, qh->h_addr_reg);
799 musb_write_txhubport(musb, epnum, qh->h_port_reg);
800 /* FIXME if !epnum, do the same for RX ... */
801 } else
802 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
804 /* protocol/endpoint/interval/NAKlimit */
805 if (epnum) {
806 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
807 if (musb->double_buffer_not_ok) {
808 musb_writew(epio, MUSB_TXMAXP,
809 hw_ep->max_packet_sz_tx);
810 } else if (can_bulk_split(musb, qh->type)) {
811 qh->hb_mult = hw_ep->max_packet_sz_tx
812 / packet_sz;
813 musb_writew(epio, MUSB_TXMAXP, packet_sz
814 | ((qh->hb_mult) - 1) << 11);
815 } else {
816 musb_writew(epio, MUSB_TXMAXP,
817 qh->maxpacket |
818 ((qh->hb_mult - 1) << 11));
820 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
821 } else {
822 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
823 if (musb->is_multipoint)
824 musb_writeb(epio, MUSB_TYPE0,
825 qh->type_reg);
828 if (can_bulk_split(musb, qh->type))
829 load_count = min((u32) hw_ep->max_packet_sz_tx,
830 len);
831 else
832 load_count = min((u32) packet_sz, len);
834 if (dma_channel && musb_tx_dma_program(dma_controller,
835 hw_ep, qh, urb, offset, len))
836 load_count = 0;
838 if (load_count) {
839 /* PIO to load FIFO */
840 qh->segsize = load_count;
841 if (!buf) {
842 sg_miter_start(&qh->sg_miter, urb->sg, 1,
843 SG_MITER_ATOMIC
844 | SG_MITER_FROM_SG);
845 if (!sg_miter_next(&qh->sg_miter)) {
846 dev_err(musb->controller,
847 "error: sg"
848 "list empty\n");
849 sg_miter_stop(&qh->sg_miter);
850 goto finish;
852 buf = qh->sg_miter.addr + urb->sg->offset +
853 urb->actual_length;
854 load_count = min_t(u32, load_count,
855 qh->sg_miter.length);
856 musb_write_fifo(hw_ep, load_count, buf);
857 qh->sg_miter.consumed = load_count;
858 sg_miter_stop(&qh->sg_miter);
859 } else
860 musb_write_fifo(hw_ep, load_count, buf);
862 finish:
863 /* re-enable interrupt */
864 musb_writew(mbase, MUSB_INTRTXE, int_txe);
866 /* IN/receive */
867 } else {
868 u16 csr;
870 if (hw_ep->rx_reinit) {
871 musb_rx_reinit(musb, qh, epnum);
873 /* init new state: toggle and NYET, maybe DMA later */
874 if (usb_gettoggle(urb->dev, qh->epnum, 0))
875 csr = MUSB_RXCSR_H_WR_DATATOGGLE
876 | MUSB_RXCSR_H_DATATOGGLE;
877 else
878 csr = 0;
879 if (qh->type == USB_ENDPOINT_XFER_INT)
880 csr |= MUSB_RXCSR_DISNYET;
882 } else {
883 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
885 if (csr & (MUSB_RXCSR_RXPKTRDY
886 | MUSB_RXCSR_DMAENAB
887 | MUSB_RXCSR_H_REQPKT))
888 ERR("broken !rx_reinit, ep%d csr %04x\n",
889 hw_ep->epnum, csr);
891 /* scrub any stale state, leaving toggle alone */
892 csr &= MUSB_RXCSR_DISNYET;
895 /* kick things off */
897 if ((is_cppi_enabled(musb) || tusb_dma_omap(musb)) && dma_channel) {
898 /* Candidate for DMA */
899 dma_channel->actual_len = 0L;
900 qh->segsize = len;
902 /* AUTOREQ is in a DMA register */
903 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
904 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
907 * Unless caller treats short RX transfers as
908 * errors, we dare not queue multiple transfers.
910 dma_ok = dma_controller->channel_program(dma_channel,
911 packet_sz, !(urb->transfer_flags &
912 URB_SHORT_NOT_OK),
913 urb->transfer_dma + offset,
914 qh->segsize);
915 if (!dma_ok) {
916 dma_controller->channel_release(dma_channel);
917 hw_ep->rx_channel = dma_channel = NULL;
918 } else
919 csr |= MUSB_RXCSR_DMAENAB;
922 csr |= MUSB_RXCSR_H_REQPKT;
923 musb_dbg(musb, "RXCSR%d := %04x", epnum, csr);
924 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
925 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
929 /* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
930 * the end; avoids starvation for other endpoints.
932 static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
933 int is_in)
935 struct dma_channel *dma;
936 struct urb *urb;
937 void __iomem *mbase = musb->mregs;
938 void __iomem *epio = ep->regs;
939 struct musb_qh *cur_qh, *next_qh;
940 u16 rx_csr, tx_csr;
942 musb_ep_select(mbase, ep->epnum);
943 if (is_in) {
944 dma = is_dma_capable() ? ep->rx_channel : NULL;
947 * Need to stop the transaction by clearing REQPKT first
948 * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
949 * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
951 rx_csr = musb_readw(epio, MUSB_RXCSR);
952 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
953 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
954 musb_writew(epio, MUSB_RXCSR, rx_csr);
955 rx_csr &= ~MUSB_RXCSR_DATAERROR;
956 musb_writew(epio, MUSB_RXCSR, rx_csr);
958 cur_qh = first_qh(&musb->in_bulk);
959 } else {
960 dma = is_dma_capable() ? ep->tx_channel : NULL;
962 /* clear nak timeout bit */
963 tx_csr = musb_readw(epio, MUSB_TXCSR);
964 tx_csr |= MUSB_TXCSR_H_WZC_BITS;
965 tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
966 musb_writew(epio, MUSB_TXCSR, tx_csr);
968 cur_qh = first_qh(&musb->out_bulk);
970 if (cur_qh) {
971 urb = next_urb(cur_qh);
972 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
973 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
974 musb->dma_controller->channel_abort(dma);
975 urb->actual_length += dma->actual_len;
976 dma->actual_len = 0L;
978 musb_save_toggle(cur_qh, is_in, urb);
980 if (is_in) {
981 /* move cur_qh to end of queue */
982 list_move_tail(&cur_qh->ring, &musb->in_bulk);
984 /* get the next qh from musb->in_bulk */
985 next_qh = first_qh(&musb->in_bulk);
987 /* set rx_reinit and schedule the next qh */
988 ep->rx_reinit = 1;
989 } else {
990 /* move cur_qh to end of queue */
991 list_move_tail(&cur_qh->ring, &musb->out_bulk);
993 /* get the next qh from musb->out_bulk */
994 next_qh = first_qh(&musb->out_bulk);
996 /* set tx_reinit and schedule the next qh */
997 ep->tx_reinit = 1;
999 musb_start_urb(musb, is_in, next_qh);
1004 * Service the default endpoint (ep0) as host.
1005 * Return true until it's time to start the status stage.
1007 static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
1009 bool more = false;
1010 u8 *fifo_dest = NULL;
1011 u16 fifo_count = 0;
1012 struct musb_hw_ep *hw_ep = musb->control_ep;
1013 struct musb_qh *qh = hw_ep->in_qh;
1014 struct usb_ctrlrequest *request;
1016 switch (musb->ep0_stage) {
1017 case MUSB_EP0_IN:
1018 fifo_dest = urb->transfer_buffer + urb->actual_length;
1019 fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
1020 urb->actual_length);
1021 if (fifo_count < len)
1022 urb->status = -EOVERFLOW;
1024 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
1026 urb->actual_length += fifo_count;
1027 if (len < qh->maxpacket) {
1028 /* always terminate on short read; it's
1029 * rarely reported as an error.
1031 } else if (urb->actual_length <
1032 urb->transfer_buffer_length)
1033 more = true;
1034 break;
1035 case MUSB_EP0_START:
1036 request = (struct usb_ctrlrequest *) urb->setup_packet;
1038 if (!request->wLength) {
1039 musb_dbg(musb, "start no-DATA");
1040 break;
1041 } else if (request->bRequestType & USB_DIR_IN) {
1042 musb_dbg(musb, "start IN-DATA");
1043 musb->ep0_stage = MUSB_EP0_IN;
1044 more = true;
1045 break;
1046 } else {
1047 musb_dbg(musb, "start OUT-DATA");
1048 musb->ep0_stage = MUSB_EP0_OUT;
1049 more = true;
1051 /* FALLTHROUGH */
1052 case MUSB_EP0_OUT:
1053 fifo_count = min_t(size_t, qh->maxpacket,
1054 urb->transfer_buffer_length -
1055 urb->actual_length);
1056 if (fifo_count) {
1057 fifo_dest = (u8 *) (urb->transfer_buffer
1058 + urb->actual_length);
1059 musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p",
1060 fifo_count,
1061 (fifo_count == 1) ? "" : "s",
1062 fifo_dest);
1063 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
1065 urb->actual_length += fifo_count;
1066 more = true;
1068 break;
1069 default:
1070 ERR("bogus ep0 stage %d\n", musb->ep0_stage);
1071 break;
1074 return more;
1078 * Handle default endpoint interrupt as host. Only called in IRQ time
1079 * from musb_interrupt().
1081 * called with controller irqlocked
1083 irqreturn_t musb_h_ep0_irq(struct musb *musb)
1085 struct urb *urb;
1086 u16 csr, len;
1087 int status = 0;
1088 void __iomem *mbase = musb->mregs;
1089 struct musb_hw_ep *hw_ep = musb->control_ep;
1090 void __iomem *epio = hw_ep->regs;
1091 struct musb_qh *qh = hw_ep->in_qh;
1092 bool complete = false;
1093 irqreturn_t retval = IRQ_NONE;
1095 /* ep0 only has one queue, "in" */
1096 urb = next_urb(qh);
1098 musb_ep_select(mbase, 0);
1099 csr = musb_readw(epio, MUSB_CSR0);
1100 len = (csr & MUSB_CSR0_RXPKTRDY)
1101 ? musb_readb(epio, MUSB_COUNT0)
1102 : 0;
1104 musb_dbg(musb, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d",
1105 csr, qh, len, urb, musb->ep0_stage);
1107 /* if we just did status stage, we are done */
1108 if (MUSB_EP0_STATUS == musb->ep0_stage) {
1109 retval = IRQ_HANDLED;
1110 complete = true;
1113 /* prepare status */
1114 if (csr & MUSB_CSR0_H_RXSTALL) {
1115 musb_dbg(musb, "STALLING ENDPOINT");
1116 status = -EPIPE;
1118 } else if (csr & MUSB_CSR0_H_ERROR) {
1119 musb_dbg(musb, "no response, csr0 %04x", csr);
1120 status = -EPROTO;
1122 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1123 musb_dbg(musb, "control NAK timeout");
1125 /* NOTE: this code path would be a good place to PAUSE a
1126 * control transfer, if another one is queued, so that
1127 * ep0 is more likely to stay busy. That's already done
1128 * for bulk RX transfers.
1130 * if (qh->ring.next != &musb->control), then
1131 * we have a candidate... NAKing is *NOT* an error
1133 musb_writew(epio, MUSB_CSR0, 0);
1134 retval = IRQ_HANDLED;
1137 if (status) {
1138 musb_dbg(musb, "aborting");
1139 retval = IRQ_HANDLED;
1140 if (urb)
1141 urb->status = status;
1142 complete = true;
1144 /* use the proper sequence to abort the transfer */
1145 if (csr & MUSB_CSR0_H_REQPKT) {
1146 csr &= ~MUSB_CSR0_H_REQPKT;
1147 musb_writew(epio, MUSB_CSR0, csr);
1148 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1149 musb_writew(epio, MUSB_CSR0, csr);
1150 } else {
1151 musb_h_ep0_flush_fifo(hw_ep);
1154 musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1156 /* clear it */
1157 musb_writew(epio, MUSB_CSR0, 0);
1160 if (unlikely(!urb)) {
1161 /* stop endpoint since we have no place for its data, this
1162 * SHOULD NEVER HAPPEN! */
1163 ERR("no URB for end 0\n");
1165 musb_h_ep0_flush_fifo(hw_ep);
1166 goto done;
1169 if (!complete) {
1170 /* call common logic and prepare response */
1171 if (musb_h_ep0_continue(musb, len, urb)) {
1172 /* more packets required */
1173 csr = (MUSB_EP0_IN == musb->ep0_stage)
1174 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1175 } else {
1176 /* data transfer complete; perform status phase */
1177 if (usb_pipeout(urb->pipe)
1178 || !urb->transfer_buffer_length)
1179 csr = MUSB_CSR0_H_STATUSPKT
1180 | MUSB_CSR0_H_REQPKT;
1181 else
1182 csr = MUSB_CSR0_H_STATUSPKT
1183 | MUSB_CSR0_TXPKTRDY;
1185 /* disable ping token in status phase */
1186 csr |= MUSB_CSR0_H_DIS_PING;
1188 /* flag status stage */
1189 musb->ep0_stage = MUSB_EP0_STATUS;
1191 musb_dbg(musb, "ep0 STATUS, csr %04x", csr);
1194 musb_writew(epio, MUSB_CSR0, csr);
1195 retval = IRQ_HANDLED;
1196 } else
1197 musb->ep0_stage = MUSB_EP0_IDLE;
1199 /* call completion handler if done */
1200 if (complete)
1201 musb_advance_schedule(musb, urb, hw_ep, 1);
1202 done:
1203 return retval;
1207 #ifdef CONFIG_USB_INVENTRA_DMA
1209 /* Host side TX (OUT) using Mentor DMA works as follows:
1210 submit_urb ->
1211 - if queue was empty, Program Endpoint
1212 - ... which starts DMA to fifo in mode 1 or 0
1214 DMA Isr (transfer complete) -> TxAvail()
1215 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1216 only in musb_cleanup_urb)
1217 - TxPktRdy has to be set in mode 0 or for
1218 short packets in mode 1.
1221 #endif
1223 /* Service a Tx-Available or dma completion irq for the endpoint */
1224 void musb_host_tx(struct musb *musb, u8 epnum)
1226 int pipe;
1227 bool done = false;
1228 u16 tx_csr;
1229 size_t length = 0;
1230 size_t offset = 0;
1231 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1232 void __iomem *epio = hw_ep->regs;
1233 struct musb_qh *qh = hw_ep->out_qh;
1234 struct urb *urb = next_urb(qh);
1235 u32 status = 0;
1236 void __iomem *mbase = musb->mregs;
1237 struct dma_channel *dma;
1238 bool transfer_pending = false;
1240 musb_ep_select(mbase, epnum);
1241 tx_csr = musb_readw(epio, MUSB_TXCSR);
1243 /* with CPPI, DMA sometimes triggers "extra" irqs */
1244 if (!urb) {
1245 musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1246 return;
1249 pipe = urb->pipe;
1250 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1251 trace_musb_urb_tx(musb, urb);
1252 musb_dbg(musb, "OUT/TX%d end, csr %04x%s", epnum, tx_csr,
1253 dma ? ", dma" : "");
1255 /* check for errors */
1256 if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1257 /* dma was disabled, fifo flushed */
1258 musb_dbg(musb, "TX end %d stall", epnum);
1260 /* stall; record URB status */
1261 status = -EPIPE;
1263 } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1264 /* (NON-ISO) dma was disabled, fifo flushed */
1265 musb_dbg(musb, "TX 3strikes on ep=%d", epnum);
1267 status = -ETIMEDOUT;
1269 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1270 if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
1271 && !list_is_singular(&musb->out_bulk)) {
1272 musb_dbg(musb, "NAK timeout on TX%d ep", epnum);
1273 musb_bulk_nak_timeout(musb, hw_ep, 0);
1274 } else {
1275 musb_dbg(musb, "TX ep%d device not responding", epnum);
1276 /* NOTE: this code path would be a good place to PAUSE a
1277 * transfer, if there's some other (nonperiodic) tx urb
1278 * that could use this fifo. (dma complicates it...)
1279 * That's already done for bulk RX transfers.
1281 * if (bulk && qh->ring.next != &musb->out_bulk), then
1282 * we have a candidate... NAKing is *NOT* an error
1284 musb_ep_select(mbase, epnum);
1285 musb_writew(epio, MUSB_TXCSR,
1286 MUSB_TXCSR_H_WZC_BITS
1287 | MUSB_TXCSR_TXPKTRDY);
1289 return;
1292 done:
1293 if (status) {
1294 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1295 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1296 musb->dma_controller->channel_abort(dma);
1299 /* do the proper sequence to abort the transfer in the
1300 * usb core; the dma engine should already be stopped.
1302 musb_h_tx_flush_fifo(hw_ep);
1303 tx_csr &= ~(MUSB_TXCSR_AUTOSET
1304 | MUSB_TXCSR_DMAENAB
1305 | MUSB_TXCSR_H_ERROR
1306 | MUSB_TXCSR_H_RXSTALL
1307 | MUSB_TXCSR_H_NAKTIMEOUT
1310 musb_ep_select(mbase, epnum);
1311 musb_writew(epio, MUSB_TXCSR, tx_csr);
1312 /* REVISIT may need to clear FLUSHFIFO ... */
1313 musb_writew(epio, MUSB_TXCSR, tx_csr);
1314 musb_writeb(epio, MUSB_TXINTERVAL, 0);
1316 done = true;
1319 /* second cppi case */
1320 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1321 musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1322 return;
1325 if (is_dma_capable() && dma && !status) {
1327 * DMA has completed. But if we're using DMA mode 1 (multi
1328 * packet DMA), we need a terminal TXPKTRDY interrupt before
1329 * we can consider this transfer completed, lest we trash
1330 * its last packet when writing the next URB's data. So we
1331 * switch back to mode 0 to get that interrupt; we'll come
1332 * back here once it happens.
1334 if (tx_csr & MUSB_TXCSR_DMAMODE) {
1336 * We shouldn't clear DMAMODE with DMAENAB set; so
1337 * clear them in a safe order. That should be OK
1338 * once TXPKTRDY has been set (and I've never seen
1339 * it being 0 at this moment -- DMA interrupt latency
1340 * is significant) but if it hasn't been then we have
1341 * no choice but to stop being polite and ignore the
1342 * programmer's guide... :-)
1344 * Note that we must write TXCSR with TXPKTRDY cleared
1345 * in order not to re-trigger the packet send (this bit
1346 * can't be cleared by CPU), and there's another caveat:
1347 * TXPKTRDY may be set shortly and then cleared in the
1348 * double-buffered FIFO mode, so we do an extra TXCSR
1349 * read for debouncing...
1351 tx_csr &= musb_readw(epio, MUSB_TXCSR);
1352 if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1353 tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1354 MUSB_TXCSR_TXPKTRDY);
1355 musb_writew(epio, MUSB_TXCSR,
1356 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1358 tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1359 MUSB_TXCSR_TXPKTRDY);
1360 musb_writew(epio, MUSB_TXCSR,
1361 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1364 * There is no guarantee that we'll get an interrupt
1365 * after clearing DMAMODE as we might have done this
1366 * too late (after TXPKTRDY was cleared by controller).
1367 * Re-read TXCSR as we have spoiled its previous value.
1369 tx_csr = musb_readw(epio, MUSB_TXCSR);
1373 * We may get here from a DMA completion or TXPKTRDY interrupt.
1374 * In any case, we must check the FIFO status here and bail out
1375 * only if the FIFO still has data -- that should prevent the
1376 * "missed" TXPKTRDY interrupts and deal with double-buffered
1377 * FIFO mode too...
1379 if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1380 musb_dbg(musb,
1381 "DMA complete but FIFO not empty, CSR %04x",
1382 tx_csr);
1383 return;
1387 if (!status || dma || usb_pipeisoc(pipe)) {
1388 if (dma)
1389 length = dma->actual_len;
1390 else
1391 length = qh->segsize;
1392 qh->offset += length;
1394 if (usb_pipeisoc(pipe)) {
1395 struct usb_iso_packet_descriptor *d;
1397 d = urb->iso_frame_desc + qh->iso_idx;
1398 d->actual_length = length;
1399 d->status = status;
1400 if (++qh->iso_idx >= urb->number_of_packets) {
1401 done = true;
1402 } else {
1403 d++;
1404 offset = d->offset;
1405 length = d->length;
1407 } else if (dma && urb->transfer_buffer_length == qh->offset) {
1408 done = true;
1409 } else {
1410 /* see if we need to send more data, or ZLP */
1411 if (qh->segsize < qh->maxpacket)
1412 done = true;
1413 else if (qh->offset == urb->transfer_buffer_length
1414 && !(urb->transfer_flags
1415 & URB_ZERO_PACKET))
1416 done = true;
1417 if (!done) {
1418 offset = qh->offset;
1419 length = urb->transfer_buffer_length - offset;
1420 transfer_pending = true;
1425 /* urb->status != -EINPROGRESS means request has been faulted,
1426 * so we must abort this transfer after cleanup
1428 if (urb->status != -EINPROGRESS) {
1429 done = true;
1430 if (status == 0)
1431 status = urb->status;
1434 if (done) {
1435 /* set status */
1436 urb->status = status;
1437 urb->actual_length = qh->offset;
1438 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1439 return;
1440 } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1441 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1442 offset, length)) {
1443 if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
1444 musb_h_tx_dma_start(hw_ep);
1445 return;
1447 } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
1448 musb_dbg(musb, "not complete, but DMA enabled?");
1449 return;
1453 * PIO: start next packet in this URB.
1455 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1456 * (and presumably, FIFO is not half-full) we should write *two*
1457 * packets before updating TXCSR; other docs disagree...
1459 if (length > qh->maxpacket)
1460 length = qh->maxpacket;
1461 /* Unmap the buffer so that CPU can use it */
1462 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1465 * We need to map sg if the transfer_buffer is
1466 * NULL.
1468 if (!urb->transfer_buffer)
1469 qh->use_sg = true;
1471 if (qh->use_sg) {
1472 /* sg_miter_start is already done in musb_ep_program */
1473 if (!sg_miter_next(&qh->sg_miter)) {
1474 dev_err(musb->controller, "error: sg list empty\n");
1475 sg_miter_stop(&qh->sg_miter);
1476 status = -EINVAL;
1477 goto done;
1479 urb->transfer_buffer = qh->sg_miter.addr;
1480 length = min_t(u32, length, qh->sg_miter.length);
1481 musb_write_fifo(hw_ep, length, urb->transfer_buffer);
1482 qh->sg_miter.consumed = length;
1483 sg_miter_stop(&qh->sg_miter);
1484 } else {
1485 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1488 qh->segsize = length;
1490 if (qh->use_sg) {
1491 if (offset + length >= urb->transfer_buffer_length)
1492 qh->use_sg = false;
1495 musb_ep_select(mbase, epnum);
1496 musb_writew(epio, MUSB_TXCSR,
1497 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1500 #ifdef CONFIG_USB_TI_CPPI41_DMA
1501 /* Seems to set up ISO for cppi41 and not advance len. See commit c57c41d */
1502 static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1503 struct musb_hw_ep *hw_ep,
1504 struct musb_qh *qh,
1505 struct urb *urb,
1506 size_t len)
1508 struct dma_channel *channel = hw_ep->rx_channel;
1509 void __iomem *epio = hw_ep->regs;
1510 dma_addr_t *buf;
1511 u32 length;
1512 u16 val;
1514 buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
1515 (u32)urb->transfer_dma;
1517 length = urb->iso_frame_desc[qh->iso_idx].length;
1519 val = musb_readw(epio, MUSB_RXCSR);
1520 val |= MUSB_RXCSR_DMAENAB;
1521 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1523 return dma->channel_program(channel, qh->maxpacket, 0,
1524 (u32)buf, length);
1526 #else
1527 static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1528 struct musb_hw_ep *hw_ep,
1529 struct musb_qh *qh,
1530 struct urb *urb,
1531 size_t len)
1533 return false;
1535 #endif
1537 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
1538 defined(CONFIG_USB_TI_CPPI41_DMA)
1539 /* Host side RX (IN) using Mentor DMA works as follows:
1540 submit_urb ->
1541 - if queue was empty, ProgramEndpoint
1542 - first IN token is sent out (by setting ReqPkt)
1543 LinuxIsr -> RxReady()
1544 /\ => first packet is received
1545 | - Set in mode 0 (DmaEnab, ~ReqPkt)
1546 | -> DMA Isr (transfer complete) -> RxReady()
1547 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1548 | - if urb not complete, send next IN token (ReqPkt)
1549 | | else complete urb.
1551 ---------------------------
1553 * Nuances of mode 1:
1554 * For short packets, no ack (+RxPktRdy) is sent automatically
1555 * (even if AutoClear is ON)
1556 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1557 * automatically => major problem, as collecting the next packet becomes
1558 * difficult. Hence mode 1 is not used.
1560 * REVISIT
1561 * All we care about at this driver level is that
1562 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1563 * (b) termination conditions are: short RX, or buffer full;
1564 * (c) fault modes include
1565 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1566 * (and that endpoint's dma queue stops immediately)
1567 * - overflow (full, PLUS more bytes in the terminal packet)
1569 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1570 * thus be a great candidate for using mode 1 ... for all but the
1571 * last packet of one URB's transfer.
1573 static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1574 struct musb_hw_ep *hw_ep,
1575 struct musb_qh *qh,
1576 struct urb *urb,
1577 size_t len)
1579 struct dma_channel *channel = hw_ep->rx_channel;
1580 void __iomem *epio = hw_ep->regs;
1581 u16 val;
1582 int pipe;
1583 bool done;
1585 pipe = urb->pipe;
1587 if (usb_pipeisoc(pipe)) {
1588 struct usb_iso_packet_descriptor *d;
1590 d = urb->iso_frame_desc + qh->iso_idx;
1591 d->actual_length = len;
1593 /* even if there was an error, we did the dma
1594 * for iso_frame_desc->length
1596 if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1597 d->status = 0;
1599 if (++qh->iso_idx >= urb->number_of_packets) {
1600 done = true;
1601 } else {
1602 /* REVISIT: Why ignore return value here? */
1603 if (musb_dma_cppi41(hw_ep->musb))
1604 done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
1605 urb, len);
1606 done = false;
1609 } else {
1610 /* done if urb buffer is full or short packet is recd */
1611 done = (urb->actual_length + len >=
1612 urb->transfer_buffer_length
1613 || channel->actual_len < qh->maxpacket
1614 || channel->rx_packet_done);
1617 /* send IN token for next packet, without AUTOREQ */
1618 if (!done) {
1619 val = musb_readw(epio, MUSB_RXCSR);
1620 val |= MUSB_RXCSR_H_REQPKT;
1621 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1624 return done;
1627 /* Disadvantage of using mode 1:
1628 * It's basically usable only for mass storage class; essentially all
1629 * other protocols also terminate transfers on short packets.
1631 * Details:
1632 * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1633 * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1634 * to use the extra IN token to grab the last packet using mode 0, then
1635 * the problem is that you cannot be sure when the device will send the
1636 * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1637 * such that it gets lost when RxCSR is re-set at the end of the mode 1
1638 * transfer, while sometimes it is recd just a little late so that if you
1639 * try to configure for mode 0 soon after the mode 1 transfer is
1640 * completed, you will find rxcount 0. Okay, so you might think why not
1641 * wait for an interrupt when the pkt is recd. Well, you won't get any!
1643 static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1644 struct musb_hw_ep *hw_ep,
1645 struct musb_qh *qh,
1646 struct urb *urb,
1647 size_t len,
1648 u8 iso_err)
1650 struct musb *musb = hw_ep->musb;
1651 void __iomem *epio = hw_ep->regs;
1652 struct dma_channel *channel = hw_ep->rx_channel;
1653 u16 rx_count, val;
1654 int length, pipe, done;
1655 dma_addr_t buf;
1657 rx_count = musb_readw(epio, MUSB_RXCOUNT);
1658 pipe = urb->pipe;
1660 if (usb_pipeisoc(pipe)) {
1661 int d_status = 0;
1662 struct usb_iso_packet_descriptor *d;
1664 d = urb->iso_frame_desc + qh->iso_idx;
1666 if (iso_err) {
1667 d_status = -EILSEQ;
1668 urb->error_count++;
1670 if (rx_count > d->length) {
1671 if (d_status == 0) {
1672 d_status = -EOVERFLOW;
1673 urb->error_count++;
1675 musb_dbg(musb, "** OVERFLOW %d into %d",
1676 rx_count, d->length);
1678 length = d->length;
1679 } else
1680 length = rx_count;
1681 d->status = d_status;
1682 buf = urb->transfer_dma + d->offset;
1683 } else {
1684 length = rx_count;
1685 buf = urb->transfer_dma + urb->actual_length;
1688 channel->desired_mode = 0;
1689 #ifdef USE_MODE1
1690 /* because of the issue below, mode 1 will
1691 * only rarely behave with correct semantics.
1693 if ((urb->transfer_flags & URB_SHORT_NOT_OK)
1694 && (urb->transfer_buffer_length - urb->actual_length)
1695 > qh->maxpacket)
1696 channel->desired_mode = 1;
1697 if (rx_count < hw_ep->max_packet_sz_rx) {
1698 length = rx_count;
1699 channel->desired_mode = 0;
1700 } else {
1701 length = urb->transfer_buffer_length;
1703 #endif
1705 /* See comments above on disadvantages of using mode 1 */
1706 val = musb_readw(epio, MUSB_RXCSR);
1707 val &= ~MUSB_RXCSR_H_REQPKT;
1709 if (channel->desired_mode == 0)
1710 val &= ~MUSB_RXCSR_H_AUTOREQ;
1711 else
1712 val |= MUSB_RXCSR_H_AUTOREQ;
1713 val |= MUSB_RXCSR_DMAENAB;
1715 /* autoclear shouldn't be set in high bandwidth */
1716 if (qh->hb_mult == 1)
1717 val |= MUSB_RXCSR_AUTOCLEAR;
1719 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1721 /* REVISIT if when actual_length != 0,
1722 * transfer_buffer_length needs to be
1723 * adjusted first...
1725 done = dma->channel_program(channel, qh->maxpacket,
1726 channel->desired_mode,
1727 buf, length);
1729 if (!done) {
1730 dma->channel_release(channel);
1731 hw_ep->rx_channel = NULL;
1732 channel = NULL;
1733 val = musb_readw(epio, MUSB_RXCSR);
1734 val &= ~(MUSB_RXCSR_DMAENAB
1735 | MUSB_RXCSR_H_AUTOREQ
1736 | MUSB_RXCSR_AUTOCLEAR);
1737 musb_writew(epio, MUSB_RXCSR, val);
1740 return done;
1742 #else
1743 static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1744 struct musb_hw_ep *hw_ep,
1745 struct musb_qh *qh,
1746 struct urb *urb,
1747 size_t len)
1749 return false;
1752 static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1753 struct musb_hw_ep *hw_ep,
1754 struct musb_qh *qh,
1755 struct urb *urb,
1756 size_t len,
1757 u8 iso_err)
1759 return false;
1761 #endif
1764 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1765 * and high-bandwidth IN transfer cases.
1767 void musb_host_rx(struct musb *musb, u8 epnum)
1769 struct urb *urb;
1770 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1771 struct dma_controller *c = musb->dma_controller;
1772 void __iomem *epio = hw_ep->regs;
1773 struct musb_qh *qh = hw_ep->in_qh;
1774 size_t xfer_len;
1775 void __iomem *mbase = musb->mregs;
1776 u16 rx_csr, val;
1777 bool iso_err = false;
1778 bool done = false;
1779 u32 status;
1780 struct dma_channel *dma;
1781 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
1783 musb_ep_select(mbase, epnum);
1785 urb = next_urb(qh);
1786 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1787 status = 0;
1788 xfer_len = 0;
1790 rx_csr = musb_readw(epio, MUSB_RXCSR);
1791 val = rx_csr;
1793 if (unlikely(!urb)) {
1794 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1795 * usbtest #11 (unlinks) triggers it regularly, sometimes
1796 * with fifo full. (Only with DMA??)
1798 musb_dbg(musb, "BOGUS RX%d ready, csr %04x, count %d",
1799 epnum, val, musb_readw(epio, MUSB_RXCOUNT));
1800 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1801 return;
1804 trace_musb_urb_rx(musb, urb);
1806 /* check for errors, concurrent stall & unlink is not really
1807 * handled yet! */
1808 if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1809 musb_dbg(musb, "RX end %d STALL", epnum);
1811 /* stall; record URB status */
1812 status = -EPIPE;
1814 } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1815 musb_dbg(musb, "end %d RX proto error", epnum);
1817 status = -EPROTO;
1818 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1820 rx_csr &= ~MUSB_RXCSR_H_ERROR;
1821 musb_writew(epio, MUSB_RXCSR, rx_csr);
1823 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1825 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1826 musb_dbg(musb, "RX end %d NAK timeout", epnum);
1828 /* NOTE: NAKing is *NOT* an error, so we want to
1829 * continue. Except ... if there's a request for
1830 * another QH, use that instead of starving it.
1832 * Devices like Ethernet and serial adapters keep
1833 * reads posted at all times, which will starve
1834 * other devices without this logic.
1836 if (usb_pipebulk(urb->pipe)
1837 && qh->mux == 1
1838 && !list_is_singular(&musb->in_bulk)) {
1839 musb_bulk_nak_timeout(musb, hw_ep, 1);
1840 return;
1842 musb_ep_select(mbase, epnum);
1843 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1844 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1845 musb_writew(epio, MUSB_RXCSR, rx_csr);
1847 goto finish;
1848 } else {
1849 musb_dbg(musb, "RX end %d ISO data error", epnum);
1850 /* packet error reported later */
1851 iso_err = true;
1853 } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1854 musb_dbg(musb, "end %d high bandwidth incomplete ISO packet RX",
1855 epnum);
1856 status = -EPROTO;
1859 /* faults abort the transfer */
1860 if (status) {
1861 /* clean up dma and collect transfer count */
1862 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1863 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1864 musb->dma_controller->channel_abort(dma);
1865 xfer_len = dma->actual_len;
1867 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1868 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1869 done = true;
1870 goto finish;
1873 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1874 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1875 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1876 goto finish;
1879 /* thorough shutdown for now ... given more precise fault handling
1880 * and better queueing support, we might keep a DMA pipeline going
1881 * while processing this irq for earlier completions.
1884 /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1885 if (!musb_dma_inventra(musb) && !musb_dma_ux500(musb) &&
1886 (rx_csr & MUSB_RXCSR_H_REQPKT)) {
1887 /* REVISIT this happened for a while on some short reads...
1888 * the cleanup still needs investigation... looks bad...
1889 * and also duplicates dma cleanup code above ... plus,
1890 * shouldn't this be the "half full" double buffer case?
1892 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1893 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1894 musb->dma_controller->channel_abort(dma);
1895 xfer_len = dma->actual_len;
1896 done = true;
1899 musb_dbg(musb, "RXCSR%d %04x, reqpkt, len %zu%s", epnum, rx_csr,
1900 xfer_len, dma ? ", dma" : "");
1901 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1903 musb_ep_select(mbase, epnum);
1904 musb_writew(epio, MUSB_RXCSR,
1905 MUSB_RXCSR_H_WZC_BITS | rx_csr);
1908 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1909 xfer_len = dma->actual_len;
1911 val &= ~(MUSB_RXCSR_DMAENAB
1912 | MUSB_RXCSR_H_AUTOREQ
1913 | MUSB_RXCSR_AUTOCLEAR
1914 | MUSB_RXCSR_RXPKTRDY);
1915 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1917 if (musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1918 musb_dma_cppi41(musb)) {
1919 done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
1920 musb_dbg(hw_ep->musb,
1921 "ep %d dma %s, rxcsr %04x, rxcount %d",
1922 epnum, done ? "off" : "reset",
1923 musb_readw(epio, MUSB_RXCSR),
1924 musb_readw(epio, MUSB_RXCOUNT));
1925 } else {
1926 done = true;
1929 } else if (urb->status == -EINPROGRESS) {
1930 /* if no errors, be sure a packet is ready for unloading */
1931 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1932 status = -EPROTO;
1933 ERR("Rx interrupt with no errors or packet!\n");
1935 /* FIXME this is another "SHOULD NEVER HAPPEN" */
1937 /* SCRUB (RX) */
1938 /* do the proper sequence to abort the transfer */
1939 musb_ep_select(mbase, epnum);
1940 val &= ~MUSB_RXCSR_H_REQPKT;
1941 musb_writew(epio, MUSB_RXCSR, val);
1942 goto finish;
1945 /* we are expecting IN packets */
1946 if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1947 musb_dma_cppi41(musb)) && dma) {
1948 musb_dbg(hw_ep->musb,
1949 "RX%d count %d, buffer 0x%llx len %d/%d",
1950 epnum, musb_readw(epio, MUSB_RXCOUNT),
1951 (unsigned long long) urb->transfer_dma
1952 + urb->actual_length,
1953 qh->offset,
1954 urb->transfer_buffer_length);
1956 if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
1957 xfer_len, iso_err))
1958 goto finish;
1959 else
1960 dev_err(musb->controller, "error: rx_dma failed\n");
1963 if (!dma) {
1964 unsigned int received_len;
1966 /* Unmap the buffer so that CPU can use it */
1967 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1970 * We need to map sg if the transfer_buffer is
1971 * NULL.
1973 if (!urb->transfer_buffer) {
1974 qh->use_sg = true;
1975 sg_miter_start(&qh->sg_miter, urb->sg, 1,
1976 sg_flags);
1979 if (qh->use_sg) {
1980 if (!sg_miter_next(&qh->sg_miter)) {
1981 dev_err(musb->controller, "error: sg list empty\n");
1982 sg_miter_stop(&qh->sg_miter);
1983 status = -EINVAL;
1984 done = true;
1985 goto finish;
1987 urb->transfer_buffer = qh->sg_miter.addr;
1988 received_len = urb->actual_length;
1989 qh->offset = 0x0;
1990 done = musb_host_packet_rx(musb, urb, epnum,
1991 iso_err);
1992 /* Calculate the number of bytes received */
1993 received_len = urb->actual_length -
1994 received_len;
1995 qh->sg_miter.consumed = received_len;
1996 sg_miter_stop(&qh->sg_miter);
1997 } else {
1998 done = musb_host_packet_rx(musb, urb,
1999 epnum, iso_err);
2001 musb_dbg(musb, "read %spacket", done ? "last " : "");
2005 finish:
2006 urb->actual_length += xfer_len;
2007 qh->offset += xfer_len;
2008 if (done) {
2009 if (qh->use_sg)
2010 qh->use_sg = false;
2012 if (urb->status == -EINPROGRESS)
2013 urb->status = status;
2014 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
2018 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
2019 * the software schedule associates multiple such nodes with a given
2020 * host side hardware endpoint + direction; scheduling may activate
2021 * that hardware endpoint.
2023 static int musb_schedule(
2024 struct musb *musb,
2025 struct musb_qh *qh,
2026 int is_in)
2028 int idle = 0;
2029 int best_diff;
2030 int best_end, epnum;
2031 struct musb_hw_ep *hw_ep = NULL;
2032 struct list_head *head = NULL;
2033 u8 toggle;
2034 u8 txtype;
2035 struct urb *urb = next_urb(qh);
2037 /* use fixed hardware for control and bulk */
2038 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
2039 head = &musb->control;
2040 hw_ep = musb->control_ep;
2041 goto success;
2044 /* else, periodic transfers get muxed to other endpoints */
2047 * We know this qh hasn't been scheduled, so all we need to do
2048 * is choose which hardware endpoint to put it on ...
2050 * REVISIT what we really want here is a regular schedule tree
2051 * like e.g. OHCI uses.
2053 best_diff = 4096;
2054 best_end = -1;
2056 for (epnum = 1, hw_ep = musb->endpoints + 1;
2057 epnum < musb->nr_endpoints;
2058 epnum++, hw_ep++) {
2059 int diff;
2061 if (musb_ep_get_qh(hw_ep, is_in) != NULL)
2062 continue;
2064 if (hw_ep == musb->bulk_ep)
2065 continue;
2067 if (is_in)
2068 diff = hw_ep->max_packet_sz_rx;
2069 else
2070 diff = hw_ep->max_packet_sz_tx;
2071 diff -= (qh->maxpacket * qh->hb_mult);
2073 if (diff >= 0 && best_diff > diff) {
2076 * Mentor controller has a bug in that if we schedule
2077 * a BULK Tx transfer on an endpoint that had earlier
2078 * handled ISOC then the BULK transfer has to start on
2079 * a zero toggle. If the BULK transfer starts on a 1
2080 * toggle then this transfer will fail as the mentor
2081 * controller starts the Bulk transfer on a 0 toggle
2082 * irrespective of the programming of the toggle bits
2083 * in the TXCSR register. Check for this condition
2084 * while allocating the EP for a Tx Bulk transfer. If
2085 * so skip this EP.
2087 hw_ep = musb->endpoints + epnum;
2088 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
2089 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
2090 >> 4) & 0x3;
2091 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
2092 toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
2093 continue;
2095 best_diff = diff;
2096 best_end = epnum;
2099 /* use bulk reserved ep1 if no other ep is free */
2100 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
2101 hw_ep = musb->bulk_ep;
2102 if (is_in)
2103 head = &musb->in_bulk;
2104 else
2105 head = &musb->out_bulk;
2107 /* Enable bulk RX/TX NAK timeout scheme when bulk requests are
2108 * multiplexed. This scheme does not work in high speed to full
2109 * speed scenario as NAK interrupts are not coming from a
2110 * full speed device connected to a high speed device.
2111 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
2112 * 4 (8 frame or 8ms) for FS device.
2114 if (qh->dev)
2115 qh->intv_reg =
2116 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
2117 goto success;
2118 } else if (best_end < 0) {
2119 dev_err(musb->controller,
2120 "%s hwep alloc failed for %dx%d\n",
2121 musb_ep_xfertype_string(qh->type),
2122 qh->hb_mult, qh->maxpacket);
2123 return -ENOSPC;
2126 idle = 1;
2127 qh->mux = 0;
2128 hw_ep = musb->endpoints + best_end;
2129 musb_dbg(musb, "qh %p periodic slot %d", qh, best_end);
2130 success:
2131 if (head) {
2132 idle = list_empty(head);
2133 list_add_tail(&qh->ring, head);
2134 qh->mux = 1;
2136 qh->hw_ep = hw_ep;
2137 qh->hep->hcpriv = qh;
2138 if (idle)
2139 musb_start_urb(musb, is_in, qh);
2140 return 0;
2143 static int musb_urb_enqueue(
2144 struct usb_hcd *hcd,
2145 struct urb *urb,
2146 gfp_t mem_flags)
2148 unsigned long flags;
2149 struct musb *musb = hcd_to_musb(hcd);
2150 struct usb_host_endpoint *hep = urb->ep;
2151 struct musb_qh *qh;
2152 struct usb_endpoint_descriptor *epd = &hep->desc;
2153 int ret;
2154 unsigned type_reg;
2155 unsigned interval;
2157 /* host role must be active */
2158 if (!is_host_active(musb) || !musb->is_active)
2159 return -ENODEV;
2161 trace_musb_urb_enq(musb, urb);
2163 spin_lock_irqsave(&musb->lock, flags);
2164 ret = usb_hcd_link_urb_to_ep(hcd, urb);
2165 qh = ret ? NULL : hep->hcpriv;
2166 if (qh)
2167 urb->hcpriv = qh;
2168 spin_unlock_irqrestore(&musb->lock, flags);
2170 /* DMA mapping was already done, if needed, and this urb is on
2171 * hep->urb_list now ... so we're done, unless hep wasn't yet
2172 * scheduled onto a live qh.
2174 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
2175 * disabled, testing for empty qh->ring and avoiding qh setup costs
2176 * except for the first urb queued after a config change.
2178 if (qh || ret)
2179 return ret;
2181 /* Allocate and initialize qh, minimizing the work done each time
2182 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
2184 * REVISIT consider a dedicated qh kmem_cache, so it's harder
2185 * for bugs in other kernel code to break this driver...
2187 qh = kzalloc(sizeof *qh, mem_flags);
2188 if (!qh) {
2189 spin_lock_irqsave(&musb->lock, flags);
2190 usb_hcd_unlink_urb_from_ep(hcd, urb);
2191 spin_unlock_irqrestore(&musb->lock, flags);
2192 return -ENOMEM;
2195 qh->hep = hep;
2196 qh->dev = urb->dev;
2197 INIT_LIST_HEAD(&qh->ring);
2198 qh->is_ready = 1;
2200 qh->maxpacket = usb_endpoint_maxp(epd);
2201 qh->type = usb_endpoint_type(epd);
2203 /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
2204 * Some musb cores don't support high bandwidth ISO transfers; and
2205 * we don't (yet!) support high bandwidth interrupt transfers.
2207 qh->hb_mult = usb_endpoint_maxp_mult(epd);
2208 if (qh->hb_mult > 1) {
2209 int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
2211 if (ok)
2212 ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
2213 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
2214 if (!ok) {
2215 dev_err(musb->controller,
2216 "high bandwidth %s (%dx%d) not supported\n",
2217 musb_ep_xfertype_string(qh->type),
2218 qh->hb_mult, qh->maxpacket & 0x7ff);
2219 ret = -EMSGSIZE;
2220 goto done;
2222 qh->maxpacket &= 0x7ff;
2225 qh->epnum = usb_endpoint_num(epd);
2227 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
2228 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
2230 /* precompute rxtype/txtype/type0 register */
2231 type_reg = (qh->type << 4) | qh->epnum;
2232 switch (urb->dev->speed) {
2233 case USB_SPEED_LOW:
2234 type_reg |= 0xc0;
2235 break;
2236 case USB_SPEED_FULL:
2237 type_reg |= 0x80;
2238 break;
2239 default:
2240 type_reg |= 0x40;
2242 qh->type_reg = type_reg;
2244 /* Precompute RXINTERVAL/TXINTERVAL register */
2245 switch (qh->type) {
2246 case USB_ENDPOINT_XFER_INT:
2248 * Full/low speeds use the linear encoding,
2249 * high speed uses the logarithmic encoding.
2251 if (urb->dev->speed <= USB_SPEED_FULL) {
2252 interval = max_t(u8, epd->bInterval, 1);
2253 break;
2255 /* FALLTHROUGH */
2256 case USB_ENDPOINT_XFER_ISOC:
2257 /* ISO always uses logarithmic encoding */
2258 interval = min_t(u8, epd->bInterval, 16);
2259 break;
2260 default:
2261 /* REVISIT we actually want to use NAK limits, hinting to the
2262 * transfer scheduling logic to try some other qh, e.g. try
2263 * for 2 msec first:
2265 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2267 * The downside of disabling this is that transfer scheduling
2268 * gets VERY unfair for nonperiodic transfers; a misbehaving
2269 * peripheral could make that hurt. That's perfectly normal
2270 * for reads from network or serial adapters ... so we have
2271 * partial NAKlimit support for bulk RX.
2273 * The upside of disabling it is simpler transfer scheduling.
2275 interval = 0;
2277 qh->intv_reg = interval;
2279 /* precompute addressing for external hub/tt ports */
2280 if (musb->is_multipoint) {
2281 struct usb_device *parent = urb->dev->parent;
2283 if (parent != hcd->self.root_hub) {
2284 qh->h_addr_reg = (u8) parent->devnum;
2286 /* set up tt info if needed */
2287 if (urb->dev->tt) {
2288 qh->h_port_reg = (u8) urb->dev->ttport;
2289 if (urb->dev->tt->hub)
2290 qh->h_addr_reg =
2291 (u8) urb->dev->tt->hub->devnum;
2292 if (urb->dev->tt->multi)
2293 qh->h_addr_reg |= 0x80;
2298 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2299 * until we get real dma queues (with an entry for each urb/buffer),
2300 * we only have work to do in the former case.
2302 spin_lock_irqsave(&musb->lock, flags);
2303 if (hep->hcpriv || !next_urb(qh)) {
2304 /* some concurrent activity submitted another urb to hep...
2305 * odd, rare, error prone, but legal.
2307 kfree(qh);
2308 qh = NULL;
2309 ret = 0;
2310 } else
2311 ret = musb_schedule(musb, qh,
2312 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2314 if (ret == 0) {
2315 urb->hcpriv = qh;
2316 /* FIXME set urb->start_frame for iso/intr, it's tested in
2317 * musb_start_urb(), but otherwise only konicawc cares ...
2320 spin_unlock_irqrestore(&musb->lock, flags);
2322 done:
2323 if (ret != 0) {
2324 spin_lock_irqsave(&musb->lock, flags);
2325 usb_hcd_unlink_urb_from_ep(hcd, urb);
2326 spin_unlock_irqrestore(&musb->lock, flags);
2327 kfree(qh);
2329 return ret;
2334 * abort a transfer that's at the head of a hardware queue.
2335 * called with controller locked, irqs blocked
2336 * that hardware queue advances to the next transfer, unless prevented
2338 static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2340 struct musb_hw_ep *ep = qh->hw_ep;
2341 struct musb *musb = ep->musb;
2342 void __iomem *epio = ep->regs;
2343 unsigned hw_end = ep->epnum;
2344 void __iomem *regs = ep->musb->mregs;
2345 int is_in = usb_pipein(urb->pipe);
2346 int status = 0;
2347 u16 csr;
2348 struct dma_channel *dma = NULL;
2350 musb_ep_select(regs, hw_end);
2352 if (is_dma_capable()) {
2353 dma = is_in ? ep->rx_channel : ep->tx_channel;
2354 if (dma) {
2355 status = ep->musb->dma_controller->channel_abort(dma);
2356 musb_dbg(musb, "abort %cX%d DMA for urb %p --> %d",
2357 is_in ? 'R' : 'T', ep->epnum,
2358 urb, status);
2359 urb->actual_length += dma->actual_len;
2363 /* turn off DMA requests, discard state, stop polling ... */
2364 if (ep->epnum && is_in) {
2365 /* giveback saves bulk toggle */
2366 csr = musb_h_flush_rxfifo(ep, 0);
2368 /* clear the endpoint's irq status here to avoid bogus irqs */
2369 if (is_dma_capable() && dma)
2370 musb_platform_clear_ep_rxintr(musb, ep->epnum);
2371 } else if (ep->epnum) {
2372 musb_h_tx_flush_fifo(ep);
2373 csr = musb_readw(epio, MUSB_TXCSR);
2374 csr &= ~(MUSB_TXCSR_AUTOSET
2375 | MUSB_TXCSR_DMAENAB
2376 | MUSB_TXCSR_H_RXSTALL
2377 | MUSB_TXCSR_H_NAKTIMEOUT
2378 | MUSB_TXCSR_H_ERROR
2379 | MUSB_TXCSR_TXPKTRDY);
2380 musb_writew(epio, MUSB_TXCSR, csr);
2381 /* REVISIT may need to clear FLUSHFIFO ... */
2382 musb_writew(epio, MUSB_TXCSR, csr);
2383 /* flush cpu writebuffer */
2384 csr = musb_readw(epio, MUSB_TXCSR);
2385 } else {
2386 musb_h_ep0_flush_fifo(ep);
2388 if (status == 0)
2389 musb_advance_schedule(ep->musb, urb, ep, is_in);
2390 return status;
2393 static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2395 struct musb *musb = hcd_to_musb(hcd);
2396 struct musb_qh *qh;
2397 unsigned long flags;
2398 int is_in = usb_pipein(urb->pipe);
2399 int ret;
2401 trace_musb_urb_deq(musb, urb);
2403 spin_lock_irqsave(&musb->lock, flags);
2404 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2405 if (ret)
2406 goto done;
2408 qh = urb->hcpriv;
2409 if (!qh)
2410 goto done;
2413 * Any URB not actively programmed into endpoint hardware can be
2414 * immediately given back; that's any URB not at the head of an
2415 * endpoint queue, unless someday we get real DMA queues. And even
2416 * if it's at the head, it might not be known to the hardware...
2418 * Otherwise abort current transfer, pending DMA, etc.; urb->status
2419 * has already been updated. This is a synchronous abort; it'd be
2420 * OK to hold off until after some IRQ, though.
2422 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2424 if (!qh->is_ready
2425 || urb->urb_list.prev != &qh->hep->urb_list
2426 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2427 int ready = qh->is_ready;
2429 qh->is_ready = 0;
2430 musb_giveback(musb, urb, 0);
2431 qh->is_ready = ready;
2433 /* If nothing else (usually musb_giveback) is using it
2434 * and its URB list has emptied, recycle this qh.
2436 if (ready && list_empty(&qh->hep->urb_list)) {
2437 qh->hep->hcpriv = NULL;
2438 list_del(&qh->ring);
2439 kfree(qh);
2441 } else
2442 ret = musb_cleanup_urb(urb, qh);
2443 done:
2444 spin_unlock_irqrestore(&musb->lock, flags);
2445 return ret;
2448 /* disable an endpoint */
2449 static void
2450 musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2452 u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2453 unsigned long flags;
2454 struct musb *musb = hcd_to_musb(hcd);
2455 struct musb_qh *qh;
2456 struct urb *urb;
2458 spin_lock_irqsave(&musb->lock, flags);
2460 qh = hep->hcpriv;
2461 if (qh == NULL)
2462 goto exit;
2464 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2466 /* Kick the first URB off the hardware, if needed */
2467 qh->is_ready = 0;
2468 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2469 urb = next_urb(qh);
2471 /* make software (then hardware) stop ASAP */
2472 if (!urb->unlinked)
2473 urb->status = -ESHUTDOWN;
2475 /* cleanup */
2476 musb_cleanup_urb(urb, qh);
2478 /* Then nuke all the others ... and advance the
2479 * queue on hw_ep (e.g. bulk ring) when we're done.
2481 while (!list_empty(&hep->urb_list)) {
2482 urb = next_urb(qh);
2483 urb->status = -ESHUTDOWN;
2484 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2486 } else {
2487 /* Just empty the queue; the hardware is busy with
2488 * other transfers, and since !qh->is_ready nothing
2489 * will activate any of these as it advances.
2491 while (!list_empty(&hep->urb_list))
2492 musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2494 hep->hcpriv = NULL;
2495 list_del(&qh->ring);
2496 kfree(qh);
2498 exit:
2499 spin_unlock_irqrestore(&musb->lock, flags);
2502 static int musb_h_get_frame_number(struct usb_hcd *hcd)
2504 struct musb *musb = hcd_to_musb(hcd);
2506 return musb_readw(musb->mregs, MUSB_FRAME);
2509 static int musb_h_start(struct usb_hcd *hcd)
2511 struct musb *musb = hcd_to_musb(hcd);
2513 /* NOTE: musb_start() is called when the hub driver turns
2514 * on port power, or when (OTG) peripheral starts.
2516 hcd->state = HC_STATE_RUNNING;
2517 musb->port1_status = 0;
2518 return 0;
2521 static void musb_h_stop(struct usb_hcd *hcd)
2523 musb_stop(hcd_to_musb(hcd));
2524 hcd->state = HC_STATE_HALT;
2527 static int musb_bus_suspend(struct usb_hcd *hcd)
2529 struct musb *musb = hcd_to_musb(hcd);
2530 u8 devctl;
2532 musb_port_suspend(musb, true);
2534 if (!is_host_active(musb))
2535 return 0;
2537 switch (musb->xceiv->otg->state) {
2538 case OTG_STATE_A_SUSPEND:
2539 return 0;
2540 case OTG_STATE_A_WAIT_VRISE:
2541 /* ID could be grounded even if there's no device
2542 * on the other end of the cable. NOTE that the
2543 * A_WAIT_VRISE timers are messy with MUSB...
2545 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2546 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2547 musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
2548 break;
2549 default:
2550 break;
2553 if (musb->is_active) {
2554 WARNING("trying to suspend as %s while active\n",
2555 usb_otg_state_string(musb->xceiv->otg->state));
2556 return -EBUSY;
2557 } else
2558 return 0;
2561 static int musb_bus_resume(struct usb_hcd *hcd)
2563 struct musb *musb = hcd_to_musb(hcd);
2565 if (musb->config &&
2566 musb->config->host_port_deassert_reset_at_resume)
2567 musb_port_reset(musb, false);
2569 return 0;
2572 #ifndef CONFIG_MUSB_PIO_ONLY
2574 #define MUSB_USB_DMA_ALIGN 4
2576 struct musb_temp_buffer {
2577 void *kmalloc_ptr;
2578 void *old_xfer_buffer;
2579 u8 data[0];
2582 static void musb_free_temp_buffer(struct urb *urb)
2584 enum dma_data_direction dir;
2585 struct musb_temp_buffer *temp;
2586 size_t length;
2588 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2589 return;
2591 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2593 temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
2594 data);
2596 if (dir == DMA_FROM_DEVICE) {
2597 if (usb_pipeisoc(urb->pipe))
2598 length = urb->transfer_buffer_length;
2599 else
2600 length = urb->actual_length;
2602 memcpy(temp->old_xfer_buffer, temp->data, length);
2604 urb->transfer_buffer = temp->old_xfer_buffer;
2605 kfree(temp->kmalloc_ptr);
2607 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2610 static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
2612 enum dma_data_direction dir;
2613 struct musb_temp_buffer *temp;
2614 void *kmalloc_ptr;
2615 size_t kmalloc_size;
2617 if (urb->num_sgs || urb->sg ||
2618 urb->transfer_buffer_length == 0 ||
2619 !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
2620 return 0;
2622 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2624 /* Allocate a buffer with enough padding for alignment */
2625 kmalloc_size = urb->transfer_buffer_length +
2626 sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
2628 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2629 if (!kmalloc_ptr)
2630 return -ENOMEM;
2632 /* Position our struct temp_buffer such that data is aligned */
2633 temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
2636 temp->kmalloc_ptr = kmalloc_ptr;
2637 temp->old_xfer_buffer = urb->transfer_buffer;
2638 if (dir == DMA_TO_DEVICE)
2639 memcpy(temp->data, urb->transfer_buffer,
2640 urb->transfer_buffer_length);
2641 urb->transfer_buffer = temp->data;
2643 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2645 return 0;
2648 static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
2649 gfp_t mem_flags)
2651 struct musb *musb = hcd_to_musb(hcd);
2652 int ret;
2655 * The DMA engine in RTL1.8 and above cannot handle
2656 * DMA addresses that are not aligned to a 4 byte boundary.
2657 * For such engine implemented (un)map_urb_for_dma hooks.
2658 * Do not use these hooks for RTL<1.8
2660 if (musb->hwvers < MUSB_HWVERS_1800)
2661 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2663 ret = musb_alloc_temp_buffer(urb, mem_flags);
2664 if (ret)
2665 return ret;
2667 ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2668 if (ret)
2669 musb_free_temp_buffer(urb);
2671 return ret;
2674 static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
2676 struct musb *musb = hcd_to_musb(hcd);
2678 usb_hcd_unmap_urb_for_dma(hcd, urb);
2680 /* Do not use this hook for RTL<1.8 (see description above) */
2681 if (musb->hwvers < MUSB_HWVERS_1800)
2682 return;
2684 musb_free_temp_buffer(urb);
2686 #endif /* !CONFIG_MUSB_PIO_ONLY */
2688 static const struct hc_driver musb_hc_driver = {
2689 .description = "musb-hcd",
2690 .product_desc = "MUSB HDRC host driver",
2691 .hcd_priv_size = sizeof(struct musb *),
2692 .flags = HCD_USB2 | HCD_MEMORY,
2694 /* not using irq handler or reset hooks from usbcore, since
2695 * those must be shared with peripheral code for OTG configs
2698 .start = musb_h_start,
2699 .stop = musb_h_stop,
2701 .get_frame_number = musb_h_get_frame_number,
2703 .urb_enqueue = musb_urb_enqueue,
2704 .urb_dequeue = musb_urb_dequeue,
2705 .endpoint_disable = musb_h_disable,
2707 #ifndef CONFIG_MUSB_PIO_ONLY
2708 .map_urb_for_dma = musb_map_urb_for_dma,
2709 .unmap_urb_for_dma = musb_unmap_urb_for_dma,
2710 #endif
2712 .hub_status_data = musb_hub_status_data,
2713 .hub_control = musb_hub_control,
2714 .bus_suspend = musb_bus_suspend,
2715 .bus_resume = musb_bus_resume,
2716 /* .start_port_reset = NULL, */
2717 /* .hub_irq_enable = NULL, */
2720 int musb_host_alloc(struct musb *musb)
2722 struct device *dev = musb->controller;
2724 /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
2725 musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
2726 if (!musb->hcd)
2727 return -EINVAL;
2729 *musb->hcd->hcd_priv = (unsigned long) musb;
2730 musb->hcd->self.uses_pio_for_control = 1;
2731 musb->hcd->uses_new_polling = 1;
2732 musb->hcd->has_tt = 1;
2734 return 0;
2737 void musb_host_cleanup(struct musb *musb)
2739 if (musb->port_mode == MUSB_PORT_MODE_GADGET)
2740 return;
2741 usb_remove_hcd(musb->hcd);
2744 void musb_host_free(struct musb *musb)
2746 usb_put_hcd(musb->hcd);
2749 int musb_host_setup(struct musb *musb, int power_budget)
2751 int ret;
2752 struct usb_hcd *hcd = musb->hcd;
2754 if (musb->port_mode == MUSB_PORT_MODE_HOST) {
2755 MUSB_HST_MODE(musb);
2756 musb->xceiv->otg->default_a = 1;
2757 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2759 otg_set_host(musb->xceiv->otg, &hcd->self);
2760 hcd->self.otg_port = 1;
2761 musb->xceiv->otg->host = &hcd->self;
2762 hcd->power_budget = 2 * (power_budget ? : 250);
2764 ret = usb_add_hcd(hcd, 0, 0);
2765 if (ret < 0)
2766 return ret;
2768 device_wakeup_enable(hcd->self.controller);
2769 return 0;
2772 void musb_host_resume_root_hub(struct musb *musb)
2774 usb_hcd_resume_root_hub(musb->hcd);
2777 void musb_host_poke_root_hub(struct musb *musb)
2779 MUSB_HST_MODE(musb);
2780 if (musb->hcd->status_urb)
2781 usb_hcd_poll_rh_status(musb->hcd);
2782 else
2783 usb_hcd_resume_root_hub(musb->hcd);