proc: use seq_puts()/seq_putc() where possible
[linux-2.6/next.git] / drivers / usb / musb / musb_gadget.c
blob9b162dfaa4fbcb81121e0ecbce52f44285ab3a46
1 /*
2 * MUSB OTG driver peripheral support
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <linux/kernel.h>
37 #include <linux/list.h>
38 #include <linux/timer.h>
39 #include <linux/module.h>
40 #include <linux/smp.h>
41 #include <linux/spinlock.h>
42 #include <linux/delay.h>
43 #include <linux/moduleparam.h>
44 #include <linux/stat.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/slab.h>
48 #include "musb_core.h"
51 /* MUSB PERIPHERAL status 3-mar-2006:
53 * - EP0 seems solid. It passes both USBCV and usbtest control cases.
54 * Minor glitches:
56 * + remote wakeup to Linux hosts work, but saw USBCV failures;
57 * in one test run (operator error?)
58 * + endpoint halt tests -- in both usbtest and usbcv -- seem
59 * to break when dma is enabled ... is something wrongly
60 * clearing SENDSTALL?
62 * - Mass storage behaved ok when last tested. Network traffic patterns
63 * (with lots of short transfers etc) need retesting; they turn up the
64 * worst cases of the DMA, since short packets are typical but are not
65 * required.
67 * - TX/IN
68 * + both pio and dma behave in with network and g_zero tests
69 * + no cppi throughput issues other than no-hw-queueing
70 * + failed with FLAT_REG (DaVinci)
71 * + seems to behave with double buffering, PIO -and- CPPI
72 * + with gadgetfs + AIO, requests got lost?
74 * - RX/OUT
75 * + both pio and dma behave in with network and g_zero tests
76 * + dma is slow in typical case (short_not_ok is clear)
77 * + double buffering ok with PIO
78 * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
79 * + request lossage observed with gadgetfs
81 * - ISO not tested ... might work, but only weakly isochronous
83 * - Gadget driver disabling of softconnect during bind() is ignored; so
84 * drivers can't hold off host requests until userspace is ready.
85 * (Workaround: they can turn it off later.)
87 * - PORTABILITY (assumes PIO works):
88 * + DaVinci, basically works with cppi dma
89 * + OMAP 2430, ditto with mentor dma
90 * + TUSB 6010, platform-specific dma in the works
93 /* ----------------------------------------------------------------------- */
95 /* Maps the buffer to dma */
97 static inline void map_dma_buffer(struct musb_request *request,
98 struct musb *musb)
100 if (request->request.dma == DMA_ADDR_INVALID) {
101 request->request.dma = dma_map_single(
102 musb->controller,
103 request->request.buf,
104 request->request.length,
105 request->tx
106 ? DMA_TO_DEVICE
107 : DMA_FROM_DEVICE);
108 request->mapped = 1;
109 } else {
110 dma_sync_single_for_device(musb->controller,
111 request->request.dma,
112 request->request.length,
113 request->tx
114 ? DMA_TO_DEVICE
115 : DMA_FROM_DEVICE);
116 request->mapped = 0;
120 /* Unmap the buffer from dma and maps it back to cpu */
121 static inline void unmap_dma_buffer(struct musb_request *request,
122 struct musb *musb)
124 if (request->request.dma == DMA_ADDR_INVALID) {
125 DBG(20, "not unmapping a never mapped buffer\n");
126 return;
128 if (request->mapped) {
129 dma_unmap_single(musb->controller,
130 request->request.dma,
131 request->request.length,
132 request->tx
133 ? DMA_TO_DEVICE
134 : DMA_FROM_DEVICE);
135 request->request.dma = DMA_ADDR_INVALID;
136 request->mapped = 0;
137 } else {
138 dma_sync_single_for_cpu(musb->controller,
139 request->request.dma,
140 request->request.length,
141 request->tx
142 ? DMA_TO_DEVICE
143 : DMA_FROM_DEVICE);
149 * Immediately complete a request.
151 * @param request the request to complete
152 * @param status the status to complete the request with
153 * Context: controller locked, IRQs blocked.
155 void musb_g_giveback(
156 struct musb_ep *ep,
157 struct usb_request *request,
158 int status)
159 __releases(ep->musb->lock)
160 __acquires(ep->musb->lock)
162 struct musb_request *req;
163 struct musb *musb;
164 int busy = ep->busy;
166 req = to_musb_request(request);
168 list_del(&request->list);
169 if (req->request.status == -EINPROGRESS)
170 req->request.status = status;
171 musb = req->musb;
173 ep->busy = 1;
174 spin_unlock(&musb->lock);
175 if (is_dma_capable() && ep->dma)
176 unmap_dma_buffer(req, musb);
177 if (request->status == 0)
178 DBG(5, "%s done request %p, %d/%d\n",
179 ep->end_point.name, request,
180 req->request.actual, req->request.length);
181 else
182 DBG(2, "%s request %p, %d/%d fault %d\n",
183 ep->end_point.name, request,
184 req->request.actual, req->request.length,
185 request->status);
186 req->request.complete(&req->ep->end_point, &req->request);
187 spin_lock(&musb->lock);
188 ep->busy = busy;
191 /* ----------------------------------------------------------------------- */
194 * Abort requests queued to an endpoint using the status. Synchronous.
195 * caller locked controller and blocked irqs, and selected this ep.
197 static void nuke(struct musb_ep *ep, const int status)
199 struct musb_request *req = NULL;
200 void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
202 ep->busy = 1;
204 if (is_dma_capable() && ep->dma) {
205 struct dma_controller *c = ep->musb->dma_controller;
206 int value;
208 if (ep->is_in) {
210 * The programming guide says that we must not clear
211 * the DMAMODE bit before DMAENAB, so we only
212 * clear it in the second write...
214 musb_writew(epio, MUSB_TXCSR,
215 MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
216 musb_writew(epio, MUSB_TXCSR,
217 0 | MUSB_TXCSR_FLUSHFIFO);
218 } else {
219 musb_writew(epio, MUSB_RXCSR,
220 0 | MUSB_RXCSR_FLUSHFIFO);
221 musb_writew(epio, MUSB_RXCSR,
222 0 | MUSB_RXCSR_FLUSHFIFO);
225 value = c->channel_abort(ep->dma);
226 DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value);
227 c->channel_release(ep->dma);
228 ep->dma = NULL;
231 while (!list_empty(&(ep->req_list))) {
232 req = container_of(ep->req_list.next, struct musb_request,
233 request.list);
234 musb_g_giveback(ep, &req->request, status);
238 /* ----------------------------------------------------------------------- */
240 /* Data transfers - pure PIO, pure DMA, or mixed mode */
243 * This assumes the separate CPPI engine is responding to DMA requests
244 * from the usb core ... sequenced a bit differently from mentor dma.
247 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
249 if (can_bulk_split(musb, ep->type))
250 return ep->hw_ep->max_packet_sz_tx;
251 else
252 return ep->packet_sz;
256 #ifdef CONFIG_USB_INVENTRA_DMA
258 /* Peripheral tx (IN) using Mentor DMA works as follows:
259 Only mode 0 is used for transfers <= wPktSize,
260 mode 1 is used for larger transfers,
262 One of the following happens:
263 - Host sends IN token which causes an endpoint interrupt
264 -> TxAvail
265 -> if DMA is currently busy, exit.
266 -> if queue is non-empty, txstate().
268 - Request is queued by the gadget driver.
269 -> if queue was previously empty, txstate()
271 txstate()
272 -> start
273 /\ -> setup DMA
274 | (data is transferred to the FIFO, then sent out when
275 | IN token(s) are recd from Host.
276 | -> DMA interrupt on completion
277 | calls TxAvail.
278 | -> stop DMA, ~DMAENAB,
279 | -> set TxPktRdy for last short pkt or zlp
280 | -> Complete Request
281 | -> Continue next request (call txstate)
282 |___________________________________|
284 * Non-Mentor DMA engines can of course work differently, such as by
285 * upleveling from irq-per-packet to irq-per-buffer.
288 #endif
291 * An endpoint is transmitting data. This can be called either from
292 * the IRQ routine or from ep.queue() to kickstart a request on an
293 * endpoint.
295 * Context: controller locked, IRQs blocked, endpoint selected
297 static void txstate(struct musb *musb, struct musb_request *req)
299 u8 epnum = req->epnum;
300 struct musb_ep *musb_ep;
301 void __iomem *epio = musb->endpoints[epnum].regs;
302 struct usb_request *request;
303 u16 fifo_count = 0, csr;
304 int use_dma = 0;
306 musb_ep = req->ep;
308 /* we shouldn't get here while DMA is active ... but we do ... */
309 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
310 DBG(4, "dma pending...\n");
311 return;
314 /* read TXCSR before */
315 csr = musb_readw(epio, MUSB_TXCSR);
317 request = &req->request;
318 fifo_count = min(max_ep_writesize(musb, musb_ep),
319 (int)(request->length - request->actual));
321 if (csr & MUSB_TXCSR_TXPKTRDY) {
322 DBG(5, "%s old packet still ready , txcsr %03x\n",
323 musb_ep->end_point.name, csr);
324 return;
327 if (csr & MUSB_TXCSR_P_SENDSTALL) {
328 DBG(5, "%s stalling, txcsr %03x\n",
329 musb_ep->end_point.name, csr);
330 return;
333 DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
334 epnum, musb_ep->packet_sz, fifo_count,
335 csr);
337 #ifndef CONFIG_MUSB_PIO_ONLY
338 if (is_dma_capable() && musb_ep->dma) {
339 struct dma_controller *c = musb->dma_controller;
340 size_t request_size;
342 /* setup DMA, then program endpoint CSR */
343 request_size = min_t(size_t, request->length - request->actual,
344 musb_ep->dma->max_len);
346 use_dma = (request->dma != DMA_ADDR_INVALID);
348 /* MUSB_TXCSR_P_ISO is still set correctly */
350 #ifdef CONFIG_USB_INVENTRA_DMA
352 if (request_size < musb_ep->packet_sz)
353 musb_ep->dma->desired_mode = 0;
354 else
355 musb_ep->dma->desired_mode = 1;
357 use_dma = use_dma && c->channel_program(
358 musb_ep->dma, musb_ep->packet_sz,
359 musb_ep->dma->desired_mode,
360 request->dma + request->actual, request_size);
361 if (use_dma) {
362 if (musb_ep->dma->desired_mode == 0) {
364 * We must not clear the DMAMODE bit
365 * before the DMAENAB bit -- and the
366 * latter doesn't always get cleared
367 * before we get here...
369 csr &= ~(MUSB_TXCSR_AUTOSET
370 | MUSB_TXCSR_DMAENAB);
371 musb_writew(epio, MUSB_TXCSR, csr
372 | MUSB_TXCSR_P_WZC_BITS);
373 csr &= ~MUSB_TXCSR_DMAMODE;
374 csr |= (MUSB_TXCSR_DMAENAB |
375 MUSB_TXCSR_MODE);
376 /* against programming guide */
377 } else {
378 csr |= (MUSB_TXCSR_DMAENAB
379 | MUSB_TXCSR_DMAMODE
380 | MUSB_TXCSR_MODE);
381 if (!musb_ep->hb_mult)
382 csr |= MUSB_TXCSR_AUTOSET;
384 csr &= ~MUSB_TXCSR_P_UNDERRUN;
386 musb_writew(epio, MUSB_TXCSR, csr);
390 #elif defined(CONFIG_USB_TI_CPPI_DMA)
391 /* program endpoint CSR first, then setup DMA */
392 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
393 csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
394 MUSB_TXCSR_MODE;
395 musb_writew(epio, MUSB_TXCSR,
396 (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
397 | csr);
399 /* ensure writebuffer is empty */
400 csr = musb_readw(epio, MUSB_TXCSR);
402 /* NOTE host side sets DMAENAB later than this; both are
403 * OK since the transfer dma glue (between CPPI and Mentor
404 * fifos) just tells CPPI it could start. Data only moves
405 * to the USB TX fifo when both fifos are ready.
408 /* "mode" is irrelevant here; handle terminating ZLPs like
409 * PIO does, since the hardware RNDIS mode seems unreliable
410 * except for the last-packet-is-already-short case.
412 use_dma = use_dma && c->channel_program(
413 musb_ep->dma, musb_ep->packet_sz,
415 request->dma + request->actual,
416 request_size);
417 if (!use_dma) {
418 c->channel_release(musb_ep->dma);
419 musb_ep->dma = NULL;
420 csr &= ~MUSB_TXCSR_DMAENAB;
421 musb_writew(epio, MUSB_TXCSR, csr);
422 /* invariant: prequest->buf is non-null */
424 #elif defined(CONFIG_USB_TUSB_OMAP_DMA)
425 use_dma = use_dma && c->channel_program(
426 musb_ep->dma, musb_ep->packet_sz,
427 request->zero,
428 request->dma + request->actual,
429 request_size);
430 #endif
432 #endif
434 if (!use_dma) {
436 * Unmap the dma buffer back to cpu if dma channel
437 * programming fails
439 if (is_dma_capable() && musb_ep->dma)
440 unmap_dma_buffer(req, musb);
442 musb_write_fifo(musb_ep->hw_ep, fifo_count,
443 (u8 *) (request->buf + request->actual));
444 request->actual += fifo_count;
445 csr |= MUSB_TXCSR_TXPKTRDY;
446 csr &= ~MUSB_TXCSR_P_UNDERRUN;
447 musb_writew(epio, MUSB_TXCSR, csr);
450 /* host may already have the data when this message shows... */
451 DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
452 musb_ep->end_point.name, use_dma ? "dma" : "pio",
453 request->actual, request->length,
454 musb_readw(epio, MUSB_TXCSR),
455 fifo_count,
456 musb_readw(epio, MUSB_TXMAXP));
460 * FIFO state update (e.g. data ready).
461 * Called from IRQ, with controller locked.
463 void musb_g_tx(struct musb *musb, u8 epnum)
465 u16 csr;
466 struct usb_request *request;
467 u8 __iomem *mbase = musb->mregs;
468 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
469 void __iomem *epio = musb->endpoints[epnum].regs;
470 struct dma_channel *dma;
472 musb_ep_select(mbase, epnum);
473 request = next_request(musb_ep);
475 csr = musb_readw(epio, MUSB_TXCSR);
476 DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
478 dma = is_dma_capable() ? musb_ep->dma : NULL;
481 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
482 * probably rates reporting as a host error.
484 if (csr & MUSB_TXCSR_P_SENTSTALL) {
485 csr |= MUSB_TXCSR_P_WZC_BITS;
486 csr &= ~MUSB_TXCSR_P_SENTSTALL;
487 musb_writew(epio, MUSB_TXCSR, csr);
488 return;
491 if (csr & MUSB_TXCSR_P_UNDERRUN) {
492 /* We NAKed, no big deal... little reason to care. */
493 csr |= MUSB_TXCSR_P_WZC_BITS;
494 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
495 musb_writew(epio, MUSB_TXCSR, csr);
496 DBG(20, "underrun on ep%d, req %p\n", epnum, request);
499 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
501 * SHOULD NOT HAPPEN... has with CPPI though, after
502 * changing SENDSTALL (and other cases); harmless?
504 DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
505 return;
508 if (request) {
509 u8 is_dma = 0;
511 if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
512 is_dma = 1;
513 csr |= MUSB_TXCSR_P_WZC_BITS;
514 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
515 MUSB_TXCSR_TXPKTRDY);
516 musb_writew(epio, MUSB_TXCSR, csr);
517 /* Ensure writebuffer is empty. */
518 csr = musb_readw(epio, MUSB_TXCSR);
519 request->actual += musb_ep->dma->actual_len;
520 DBG(4, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
521 epnum, csr, musb_ep->dma->actual_len, request);
525 * First, maybe a terminating short packet. Some DMA
526 * engines might handle this by themselves.
528 if ((request->zero && request->length
529 && (request->length % musb_ep->packet_sz == 0)
530 && (request->actual == request->length))
531 #ifdef CONFIG_USB_INVENTRA_DMA
532 || (is_dma && (!dma->desired_mode ||
533 (request->actual &
534 (musb_ep->packet_sz - 1))))
535 #endif
538 * On DMA completion, FIFO may not be
539 * available yet...
541 if (csr & MUSB_TXCSR_TXPKTRDY)
542 return;
544 DBG(4, "sending zero pkt\n");
545 musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
546 | MUSB_TXCSR_TXPKTRDY);
547 request->zero = 0;
550 if (request->actual == request->length) {
551 musb_g_giveback(musb_ep, request, 0);
552 request = musb_ep->desc ? next_request(musb_ep) : NULL;
553 if (!request) {
554 DBG(4, "%s idle now\n",
555 musb_ep->end_point.name);
556 return;
560 txstate(musb, to_musb_request(request));
564 /* ------------------------------------------------------------ */
566 #ifdef CONFIG_USB_INVENTRA_DMA
568 /* Peripheral rx (OUT) using Mentor DMA works as follows:
569 - Only mode 0 is used.
571 - Request is queued by the gadget class driver.
572 -> if queue was previously empty, rxstate()
574 - Host sends OUT token which causes an endpoint interrupt
575 /\ -> RxReady
576 | -> if request queued, call rxstate
577 | /\ -> setup DMA
578 | | -> DMA interrupt on completion
579 | | -> RxReady
580 | | -> stop DMA
581 | | -> ack the read
582 | | -> if data recd = max expected
583 | | by the request, or host
584 | | sent a short packet,
585 | | complete the request,
586 | | and start the next one.
587 | |_____________________________________|
588 | else just wait for the host
589 | to send the next OUT token.
590 |__________________________________________________|
592 * Non-Mentor DMA engines can of course work differently.
595 #endif
598 * Context: controller locked, IRQs blocked, endpoint selected
600 static void rxstate(struct musb *musb, struct musb_request *req)
602 const u8 epnum = req->epnum;
603 struct usb_request *request = &req->request;
604 struct musb_ep *musb_ep;
605 void __iomem *epio = musb->endpoints[epnum].regs;
606 unsigned fifo_count = 0;
607 u16 len;
608 u16 csr = musb_readw(epio, MUSB_RXCSR);
609 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
611 if (hw_ep->is_shared_fifo)
612 musb_ep = &hw_ep->ep_in;
613 else
614 musb_ep = &hw_ep->ep_out;
616 len = musb_ep->packet_sz;
618 /* We shouldn't get here while DMA is active, but we do... */
619 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
620 DBG(4, "DMA pending...\n");
621 return;
624 if (csr & MUSB_RXCSR_P_SENDSTALL) {
625 DBG(5, "%s stalling, RXCSR %04x\n",
626 musb_ep->end_point.name, csr);
627 return;
630 if (is_cppi_enabled() && musb_ep->dma) {
631 struct dma_controller *c = musb->dma_controller;
632 struct dma_channel *channel = musb_ep->dma;
634 /* NOTE: CPPI won't actually stop advancing the DMA
635 * queue after short packet transfers, so this is almost
636 * always going to run as IRQ-per-packet DMA so that
637 * faults will be handled correctly.
639 if (c->channel_program(channel,
640 musb_ep->packet_sz,
641 !request->short_not_ok,
642 request->dma + request->actual,
643 request->length - request->actual)) {
645 /* make sure that if an rxpkt arrived after the irq,
646 * the cppi engine will be ready to take it as soon
647 * as DMA is enabled
649 csr &= ~(MUSB_RXCSR_AUTOCLEAR
650 | MUSB_RXCSR_DMAMODE);
651 csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
652 musb_writew(epio, MUSB_RXCSR, csr);
653 return;
657 if (csr & MUSB_RXCSR_RXPKTRDY) {
658 len = musb_readw(epio, MUSB_RXCOUNT);
659 if (request->actual < request->length) {
660 #ifdef CONFIG_USB_INVENTRA_DMA
661 if (is_dma_capable() && musb_ep->dma) {
662 struct dma_controller *c;
663 struct dma_channel *channel;
664 int use_dma = 0;
666 c = musb->dma_controller;
667 channel = musb_ep->dma;
669 /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
670 * mode 0 only. So we do not get endpoint interrupts due to DMA
671 * completion. We only get interrupts from DMA controller.
673 * We could operate in DMA mode 1 if we knew the size of the tranfer
674 * in advance. For mass storage class, request->length = what the host
675 * sends, so that'd work. But for pretty much everything else,
676 * request->length is routinely more than what the host sends. For
677 * most these gadgets, end of is signified either by a short packet,
678 * or filling the last byte of the buffer. (Sending extra data in
679 * that last pckate should trigger an overflow fault.) But in mode 1,
680 * we don't get DMA completion interrrupt for short packets.
682 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
683 * to get endpoint interrupt on every DMA req, but that didn't seem
684 * to work reliably.
686 * REVISIT an updated g_file_storage can set req->short_not_ok, which
687 * then becomes usable as a runtime "use mode 1" hint...
690 csr |= MUSB_RXCSR_DMAENAB;
691 #ifdef USE_MODE1
692 csr |= MUSB_RXCSR_AUTOCLEAR;
693 /* csr |= MUSB_RXCSR_DMAMODE; */
695 /* this special sequence (enabling and then
696 * disabling MUSB_RXCSR_DMAMODE) is required
697 * to get DMAReq to activate
699 musb_writew(epio, MUSB_RXCSR,
700 csr | MUSB_RXCSR_DMAMODE);
701 #else
702 if (!musb_ep->hb_mult &&
703 musb_ep->hw_ep->rx_double_buffered)
704 csr |= MUSB_RXCSR_AUTOCLEAR;
705 #endif
706 musb_writew(epio, MUSB_RXCSR, csr);
708 if (request->actual < request->length) {
709 int transfer_size = 0;
710 #ifdef USE_MODE1
711 transfer_size = min(request->length - request->actual,
712 channel->max_len);
713 #else
714 transfer_size = min(request->length - request->actual,
715 (unsigned)len);
716 #endif
717 if (transfer_size <= musb_ep->packet_sz)
718 musb_ep->dma->desired_mode = 0;
719 else
720 musb_ep->dma->desired_mode = 1;
722 use_dma = c->channel_program(
723 channel,
724 musb_ep->packet_sz,
725 channel->desired_mode,
726 request->dma
727 + request->actual,
728 transfer_size);
731 if (use_dma)
732 return;
734 #endif /* Mentor's DMA */
736 fifo_count = request->length - request->actual;
737 DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
738 musb_ep->end_point.name,
739 len, fifo_count,
740 musb_ep->packet_sz);
742 fifo_count = min_t(unsigned, len, fifo_count);
744 #ifdef CONFIG_USB_TUSB_OMAP_DMA
745 if (tusb_dma_omap() && musb_ep->dma) {
746 struct dma_controller *c = musb->dma_controller;
747 struct dma_channel *channel = musb_ep->dma;
748 u32 dma_addr = request->dma + request->actual;
749 int ret;
751 ret = c->channel_program(channel,
752 musb_ep->packet_sz,
753 channel->desired_mode,
754 dma_addr,
755 fifo_count);
756 if (ret)
757 return;
759 #endif
761 * Unmap the dma buffer back to cpu if dma channel
762 * programming fails. This buffer is mapped if the
763 * channel allocation is successful
765 if (is_dma_capable() && musb_ep->dma) {
766 unmap_dma_buffer(req, musb);
769 * Clear DMAENAB and AUTOCLEAR for the
770 * PIO mode transfer
772 csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
773 musb_writew(epio, MUSB_RXCSR, csr);
776 musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
777 (request->buf + request->actual));
778 request->actual += fifo_count;
780 /* REVISIT if we left anything in the fifo, flush
781 * it and report -EOVERFLOW
784 /* ack the read! */
785 csr |= MUSB_RXCSR_P_WZC_BITS;
786 csr &= ~MUSB_RXCSR_RXPKTRDY;
787 musb_writew(epio, MUSB_RXCSR, csr);
791 /* reach the end or short packet detected */
792 if (request->actual == request->length || len < musb_ep->packet_sz)
793 musb_g_giveback(musb_ep, request, 0);
797 * Data ready for a request; called from IRQ
799 void musb_g_rx(struct musb *musb, u8 epnum)
801 u16 csr;
802 struct usb_request *request;
803 void __iomem *mbase = musb->mregs;
804 struct musb_ep *musb_ep;
805 void __iomem *epio = musb->endpoints[epnum].regs;
806 struct dma_channel *dma;
807 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
809 if (hw_ep->is_shared_fifo)
810 musb_ep = &hw_ep->ep_in;
811 else
812 musb_ep = &hw_ep->ep_out;
814 musb_ep_select(mbase, epnum);
816 request = next_request(musb_ep);
817 if (!request)
818 return;
820 csr = musb_readw(epio, MUSB_RXCSR);
821 dma = is_dma_capable() ? musb_ep->dma : NULL;
823 DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
824 csr, dma ? " (dma)" : "", request);
826 if (csr & MUSB_RXCSR_P_SENTSTALL) {
827 csr |= MUSB_RXCSR_P_WZC_BITS;
828 csr &= ~MUSB_RXCSR_P_SENTSTALL;
829 musb_writew(epio, MUSB_RXCSR, csr);
830 return;
833 if (csr & MUSB_RXCSR_P_OVERRUN) {
834 /* csr |= MUSB_RXCSR_P_WZC_BITS; */
835 csr &= ~MUSB_RXCSR_P_OVERRUN;
836 musb_writew(epio, MUSB_RXCSR, csr);
838 DBG(3, "%s iso overrun on %p\n", musb_ep->name, request);
839 if (request->status == -EINPROGRESS)
840 request->status = -EOVERFLOW;
842 if (csr & MUSB_RXCSR_INCOMPRX) {
843 /* REVISIT not necessarily an error */
844 DBG(4, "%s, incomprx\n", musb_ep->end_point.name);
847 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
848 /* "should not happen"; likely RXPKTRDY pending for DMA */
849 DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1,
850 "%s busy, csr %04x\n",
851 musb_ep->end_point.name, csr);
852 return;
855 if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
856 csr &= ~(MUSB_RXCSR_AUTOCLEAR
857 | MUSB_RXCSR_DMAENAB
858 | MUSB_RXCSR_DMAMODE);
859 musb_writew(epio, MUSB_RXCSR,
860 MUSB_RXCSR_P_WZC_BITS | csr);
862 request->actual += musb_ep->dma->actual_len;
864 DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
865 epnum, csr,
866 musb_readw(epio, MUSB_RXCSR),
867 musb_ep->dma->actual_len, request);
869 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
870 /* Autoclear doesn't clear RxPktRdy for short packets */
871 if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
872 || (dma->actual_len
873 & (musb_ep->packet_sz - 1))) {
874 /* ack the read! */
875 csr &= ~MUSB_RXCSR_RXPKTRDY;
876 musb_writew(epio, MUSB_RXCSR, csr);
879 /* incomplete, and not short? wait for next IN packet */
880 if ((request->actual < request->length)
881 && (musb_ep->dma->actual_len
882 == musb_ep->packet_sz)) {
883 /* In double buffer case, continue to unload fifo if
884 * there is Rx packet in FIFO.
886 csr = musb_readw(epio, MUSB_RXCSR);
887 if ((csr & MUSB_RXCSR_RXPKTRDY) &&
888 hw_ep->rx_double_buffered)
889 goto exit;
890 return;
892 #endif
893 musb_g_giveback(musb_ep, request, 0);
895 request = next_request(musb_ep);
896 if (!request)
897 return;
899 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
900 exit:
901 #endif
902 /* Analyze request */
903 rxstate(musb, to_musb_request(request));
906 /* ------------------------------------------------------------ */
908 static int musb_gadget_enable(struct usb_ep *ep,
909 const struct usb_endpoint_descriptor *desc)
911 unsigned long flags;
912 struct musb_ep *musb_ep;
913 struct musb_hw_ep *hw_ep;
914 void __iomem *regs;
915 struct musb *musb;
916 void __iomem *mbase;
917 u8 epnum;
918 u16 csr;
919 unsigned tmp;
920 int status = -EINVAL;
922 if (!ep || !desc)
923 return -EINVAL;
925 musb_ep = to_musb_ep(ep);
926 hw_ep = musb_ep->hw_ep;
927 regs = hw_ep->regs;
928 musb = musb_ep->musb;
929 mbase = musb->mregs;
930 epnum = musb_ep->current_epnum;
932 spin_lock_irqsave(&musb->lock, flags);
934 if (musb_ep->desc) {
935 status = -EBUSY;
936 goto fail;
938 musb_ep->type = usb_endpoint_type(desc);
940 /* check direction and (later) maxpacket size against endpoint */
941 if (usb_endpoint_num(desc) != epnum)
942 goto fail;
944 /* REVISIT this rules out high bandwidth periodic transfers */
945 tmp = le16_to_cpu(desc->wMaxPacketSize);
946 if (tmp & ~0x07ff) {
947 int ok;
949 if (usb_endpoint_dir_in(desc))
950 ok = musb->hb_iso_tx;
951 else
952 ok = musb->hb_iso_rx;
954 if (!ok) {
955 DBG(4, "%s: not support ISO high bandwidth\n", __func__);
956 goto fail;
958 musb_ep->hb_mult = (tmp >> 11) & 3;
959 } else {
960 musb_ep->hb_mult = 0;
963 musb_ep->packet_sz = tmp & 0x7ff;
964 tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
966 /* enable the interrupts for the endpoint, set the endpoint
967 * packet size (or fail), set the mode, clear the fifo
969 musb_ep_select(mbase, epnum);
970 if (usb_endpoint_dir_in(desc)) {
971 u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
973 if (hw_ep->is_shared_fifo)
974 musb_ep->is_in = 1;
975 if (!musb_ep->is_in)
976 goto fail;
978 if (tmp > hw_ep->max_packet_sz_tx) {
979 DBG(4, "%s: packet size beyond hw fifo size\n", __func__);
980 goto fail;
983 int_txe |= (1 << epnum);
984 musb_writew(mbase, MUSB_INTRTXE, int_txe);
986 /* REVISIT if can_bulk_split(), use by updating "tmp";
987 * likewise high bandwidth periodic tx
989 /* Set TXMAXP with the FIFO size of the endpoint
990 * to disable double buffering mode.
992 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
994 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
995 if (musb_readw(regs, MUSB_TXCSR)
996 & MUSB_TXCSR_FIFONOTEMPTY)
997 csr |= MUSB_TXCSR_FLUSHFIFO;
998 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
999 csr |= MUSB_TXCSR_P_ISO;
1001 /* set twice in case of double buffering */
1002 musb_writew(regs, MUSB_TXCSR, csr);
1003 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1004 musb_writew(regs, MUSB_TXCSR, csr);
1006 } else {
1007 u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
1009 if (hw_ep->is_shared_fifo)
1010 musb_ep->is_in = 0;
1011 if (musb_ep->is_in)
1012 goto fail;
1014 if (tmp > hw_ep->max_packet_sz_rx) {
1015 DBG(4, "%s: packet size beyond hw fifo size\n", __func__);
1016 goto fail;
1019 int_rxe |= (1 << epnum);
1020 musb_writew(mbase, MUSB_INTRRXE, int_rxe);
1022 /* REVISIT if can_bulk_combine() use by updating "tmp"
1023 * likewise high bandwidth periodic rx
1025 /* Set RXMAXP with the FIFO size of the endpoint
1026 * to disable double buffering mode.
1028 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
1030 /* force shared fifo to OUT-only mode */
1031 if (hw_ep->is_shared_fifo) {
1032 csr = musb_readw(regs, MUSB_TXCSR);
1033 csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1034 musb_writew(regs, MUSB_TXCSR, csr);
1037 csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1038 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1039 csr |= MUSB_RXCSR_P_ISO;
1040 else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1041 csr |= MUSB_RXCSR_DISNYET;
1043 /* set twice in case of double buffering */
1044 musb_writew(regs, MUSB_RXCSR, csr);
1045 musb_writew(regs, MUSB_RXCSR, csr);
1048 /* NOTE: all the I/O code _should_ work fine without DMA, in case
1049 * for some reason you run out of channels here.
1051 if (is_dma_capable() && musb->dma_controller) {
1052 struct dma_controller *c = musb->dma_controller;
1054 musb_ep->dma = c->channel_alloc(c, hw_ep,
1055 (desc->bEndpointAddress & USB_DIR_IN));
1056 } else
1057 musb_ep->dma = NULL;
1059 musb_ep->desc = desc;
1060 musb_ep->busy = 0;
1061 musb_ep->wedged = 0;
1062 status = 0;
1064 pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1065 musb_driver_name, musb_ep->end_point.name,
1066 ({ char *s; switch (musb_ep->type) {
1067 case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
1068 case USB_ENDPOINT_XFER_INT: s = "int"; break;
1069 default: s = "iso"; break;
1070 }; s; }),
1071 musb_ep->is_in ? "IN" : "OUT",
1072 musb_ep->dma ? "dma, " : "",
1073 musb_ep->packet_sz);
1075 schedule_work(&musb->irq_work);
1077 fail:
1078 spin_unlock_irqrestore(&musb->lock, flags);
1079 return status;
1083 * Disable an endpoint flushing all requests queued.
1085 static int musb_gadget_disable(struct usb_ep *ep)
1087 unsigned long flags;
1088 struct musb *musb;
1089 u8 epnum;
1090 struct musb_ep *musb_ep;
1091 void __iomem *epio;
1092 int status = 0;
1094 musb_ep = to_musb_ep(ep);
1095 musb = musb_ep->musb;
1096 epnum = musb_ep->current_epnum;
1097 epio = musb->endpoints[epnum].regs;
1099 spin_lock_irqsave(&musb->lock, flags);
1100 musb_ep_select(musb->mregs, epnum);
1102 /* zero the endpoint sizes */
1103 if (musb_ep->is_in) {
1104 u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
1105 int_txe &= ~(1 << epnum);
1106 musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
1107 musb_writew(epio, MUSB_TXMAXP, 0);
1108 } else {
1109 u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
1110 int_rxe &= ~(1 << epnum);
1111 musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
1112 musb_writew(epio, MUSB_RXMAXP, 0);
1115 musb_ep->desc = NULL;
1117 /* abort all pending DMA and requests */
1118 nuke(musb_ep, -ESHUTDOWN);
1120 schedule_work(&musb->irq_work);
1122 spin_unlock_irqrestore(&(musb->lock), flags);
1124 DBG(2, "%s\n", musb_ep->end_point.name);
1126 return status;
1130 * Allocate a request for an endpoint.
1131 * Reused by ep0 code.
1133 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1135 struct musb_ep *musb_ep = to_musb_ep(ep);
1136 struct musb_request *request = NULL;
1138 request = kzalloc(sizeof *request, gfp_flags);
1139 if (!request) {
1140 DBG(4, "not enough memory\n");
1141 return NULL;
1144 INIT_LIST_HEAD(&request->request.list);
1145 request->request.dma = DMA_ADDR_INVALID;
1146 request->epnum = musb_ep->current_epnum;
1147 request->ep = musb_ep;
1149 return &request->request;
1153 * Free a request
1154 * Reused by ep0 code.
1156 void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1158 kfree(to_musb_request(req));
1161 static LIST_HEAD(buffers);
1163 struct free_record {
1164 struct list_head list;
1165 struct device *dev;
1166 unsigned bytes;
1167 dma_addr_t dma;
1171 * Context: controller locked, IRQs blocked.
1173 void musb_ep_restart(struct musb *musb, struct musb_request *req)
1175 DBG(3, "<== %s request %p len %u on hw_ep%d\n",
1176 req->tx ? "TX/IN" : "RX/OUT",
1177 &req->request, req->request.length, req->epnum);
1179 musb_ep_select(musb->mregs, req->epnum);
1180 if (req->tx)
1181 txstate(musb, req);
1182 else
1183 rxstate(musb, req);
1186 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1187 gfp_t gfp_flags)
1189 struct musb_ep *musb_ep;
1190 struct musb_request *request;
1191 struct musb *musb;
1192 int status = 0;
1193 unsigned long lockflags;
1195 if (!ep || !req)
1196 return -EINVAL;
1197 if (!req->buf)
1198 return -ENODATA;
1200 musb_ep = to_musb_ep(ep);
1201 musb = musb_ep->musb;
1203 request = to_musb_request(req);
1204 request->musb = musb;
1206 if (request->ep != musb_ep)
1207 return -EINVAL;
1209 DBG(4, "<== to %s request=%p\n", ep->name, req);
1211 /* request is mine now... */
1212 request->request.actual = 0;
1213 request->request.status = -EINPROGRESS;
1214 request->epnum = musb_ep->current_epnum;
1215 request->tx = musb_ep->is_in;
1217 if (is_dma_capable() && musb_ep->dma)
1218 map_dma_buffer(request, musb);
1219 else
1220 request->mapped = 0;
1222 spin_lock_irqsave(&musb->lock, lockflags);
1224 /* don't queue if the ep is down */
1225 if (!musb_ep->desc) {
1226 DBG(4, "req %p queued to %s while ep %s\n",
1227 req, ep->name, "disabled");
1228 status = -ESHUTDOWN;
1229 goto cleanup;
1232 /* add request to the list */
1233 list_add_tail(&(request->request.list), &(musb_ep->req_list));
1235 /* it this is the head of the queue, start i/o ... */
1236 if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next)
1237 musb_ep_restart(musb, request);
1239 cleanup:
1240 spin_unlock_irqrestore(&musb->lock, lockflags);
1241 return status;
1244 static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1246 struct musb_ep *musb_ep = to_musb_ep(ep);
1247 struct usb_request *r;
1248 unsigned long flags;
1249 int status = 0;
1250 struct musb *musb = musb_ep->musb;
1252 if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1253 return -EINVAL;
1255 spin_lock_irqsave(&musb->lock, flags);
1257 list_for_each_entry(r, &musb_ep->req_list, list) {
1258 if (r == request)
1259 break;
1261 if (r != request) {
1262 DBG(3, "request %p not queued to %s\n", request, ep->name);
1263 status = -EINVAL;
1264 goto done;
1267 /* if the hardware doesn't have the request, easy ... */
1268 if (musb_ep->req_list.next != &request->list || musb_ep->busy)
1269 musb_g_giveback(musb_ep, request, -ECONNRESET);
1271 /* ... else abort the dma transfer ... */
1272 else if (is_dma_capable() && musb_ep->dma) {
1273 struct dma_controller *c = musb->dma_controller;
1275 musb_ep_select(musb->mregs, musb_ep->current_epnum);
1276 if (c->channel_abort)
1277 status = c->channel_abort(musb_ep->dma);
1278 else
1279 status = -EBUSY;
1280 if (status == 0)
1281 musb_g_giveback(musb_ep, request, -ECONNRESET);
1282 } else {
1283 /* NOTE: by sticking to easily tested hardware/driver states,
1284 * we leave counting of in-flight packets imprecise.
1286 musb_g_giveback(musb_ep, request, -ECONNRESET);
1289 done:
1290 spin_unlock_irqrestore(&musb->lock, flags);
1291 return status;
1295 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1296 * data but will queue requests.
1298 * exported to ep0 code
1300 static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1302 struct musb_ep *musb_ep = to_musb_ep(ep);
1303 u8 epnum = musb_ep->current_epnum;
1304 struct musb *musb = musb_ep->musb;
1305 void __iomem *epio = musb->endpoints[epnum].regs;
1306 void __iomem *mbase;
1307 unsigned long flags;
1308 u16 csr;
1309 struct musb_request *request;
1310 int status = 0;
1312 if (!ep)
1313 return -EINVAL;
1314 mbase = musb->mregs;
1316 spin_lock_irqsave(&musb->lock, flags);
1318 if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1319 status = -EINVAL;
1320 goto done;
1323 musb_ep_select(mbase, epnum);
1325 request = to_musb_request(next_request(musb_ep));
1326 if (value) {
1327 if (request) {
1328 DBG(3, "request in progress, cannot halt %s\n",
1329 ep->name);
1330 status = -EAGAIN;
1331 goto done;
1333 /* Cannot portably stall with non-empty FIFO */
1334 if (musb_ep->is_in) {
1335 csr = musb_readw(epio, MUSB_TXCSR);
1336 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1337 DBG(3, "FIFO busy, cannot halt %s\n", ep->name);
1338 status = -EAGAIN;
1339 goto done;
1342 } else
1343 musb_ep->wedged = 0;
1345 /* set/clear the stall and toggle bits */
1346 DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1347 if (musb_ep->is_in) {
1348 csr = musb_readw(epio, MUSB_TXCSR);
1349 csr |= MUSB_TXCSR_P_WZC_BITS
1350 | MUSB_TXCSR_CLRDATATOG;
1351 if (value)
1352 csr |= MUSB_TXCSR_P_SENDSTALL;
1353 else
1354 csr &= ~(MUSB_TXCSR_P_SENDSTALL
1355 | MUSB_TXCSR_P_SENTSTALL);
1356 csr &= ~MUSB_TXCSR_TXPKTRDY;
1357 musb_writew(epio, MUSB_TXCSR, csr);
1358 } else {
1359 csr = musb_readw(epio, MUSB_RXCSR);
1360 csr |= MUSB_RXCSR_P_WZC_BITS
1361 | MUSB_RXCSR_FLUSHFIFO
1362 | MUSB_RXCSR_CLRDATATOG;
1363 if (value)
1364 csr |= MUSB_RXCSR_P_SENDSTALL;
1365 else
1366 csr &= ~(MUSB_RXCSR_P_SENDSTALL
1367 | MUSB_RXCSR_P_SENTSTALL);
1368 musb_writew(epio, MUSB_RXCSR, csr);
1371 /* maybe start the first request in the queue */
1372 if (!musb_ep->busy && !value && request) {
1373 DBG(3, "restarting the request\n");
1374 musb_ep_restart(musb, request);
1377 done:
1378 spin_unlock_irqrestore(&musb->lock, flags);
1379 return status;
1383 * Sets the halt feature with the clear requests ignored
1385 static int musb_gadget_set_wedge(struct usb_ep *ep)
1387 struct musb_ep *musb_ep = to_musb_ep(ep);
1389 if (!ep)
1390 return -EINVAL;
1392 musb_ep->wedged = 1;
1394 return usb_ep_set_halt(ep);
1397 static int musb_gadget_fifo_status(struct usb_ep *ep)
1399 struct musb_ep *musb_ep = to_musb_ep(ep);
1400 void __iomem *epio = musb_ep->hw_ep->regs;
1401 int retval = -EINVAL;
1403 if (musb_ep->desc && !musb_ep->is_in) {
1404 struct musb *musb = musb_ep->musb;
1405 int epnum = musb_ep->current_epnum;
1406 void __iomem *mbase = musb->mregs;
1407 unsigned long flags;
1409 spin_lock_irqsave(&musb->lock, flags);
1411 musb_ep_select(mbase, epnum);
1412 /* FIXME return zero unless RXPKTRDY is set */
1413 retval = musb_readw(epio, MUSB_RXCOUNT);
1415 spin_unlock_irqrestore(&musb->lock, flags);
1417 return retval;
1420 static void musb_gadget_fifo_flush(struct usb_ep *ep)
1422 struct musb_ep *musb_ep = to_musb_ep(ep);
1423 struct musb *musb = musb_ep->musb;
1424 u8 epnum = musb_ep->current_epnum;
1425 void __iomem *epio = musb->endpoints[epnum].regs;
1426 void __iomem *mbase;
1427 unsigned long flags;
1428 u16 csr, int_txe;
1430 mbase = musb->mregs;
1432 spin_lock_irqsave(&musb->lock, flags);
1433 musb_ep_select(mbase, (u8) epnum);
1435 /* disable interrupts */
1436 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1437 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
1439 if (musb_ep->is_in) {
1440 csr = musb_readw(epio, MUSB_TXCSR);
1441 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1442 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1443 musb_writew(epio, MUSB_TXCSR, csr);
1444 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1445 musb_writew(epio, MUSB_TXCSR, csr);
1447 } else {
1448 csr = musb_readw(epio, MUSB_RXCSR);
1449 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1450 musb_writew(epio, MUSB_RXCSR, csr);
1451 musb_writew(epio, MUSB_RXCSR, csr);
1454 /* re-enable interrupt */
1455 musb_writew(mbase, MUSB_INTRTXE, int_txe);
1456 spin_unlock_irqrestore(&musb->lock, flags);
1459 static const struct usb_ep_ops musb_ep_ops = {
1460 .enable = musb_gadget_enable,
1461 .disable = musb_gadget_disable,
1462 .alloc_request = musb_alloc_request,
1463 .free_request = musb_free_request,
1464 .queue = musb_gadget_queue,
1465 .dequeue = musb_gadget_dequeue,
1466 .set_halt = musb_gadget_set_halt,
1467 .set_wedge = musb_gadget_set_wedge,
1468 .fifo_status = musb_gadget_fifo_status,
1469 .fifo_flush = musb_gadget_fifo_flush
1472 /* ----------------------------------------------------------------------- */
1474 static int musb_gadget_get_frame(struct usb_gadget *gadget)
1476 struct musb *musb = gadget_to_musb(gadget);
1478 return (int)musb_readw(musb->mregs, MUSB_FRAME);
1481 static int musb_gadget_wakeup(struct usb_gadget *gadget)
1483 struct musb *musb = gadget_to_musb(gadget);
1484 void __iomem *mregs = musb->mregs;
1485 unsigned long flags;
1486 int status = -EINVAL;
1487 u8 power, devctl;
1488 int retries;
1490 spin_lock_irqsave(&musb->lock, flags);
1492 switch (musb->xceiv->state) {
1493 case OTG_STATE_B_PERIPHERAL:
1494 /* NOTE: OTG state machine doesn't include B_SUSPENDED;
1495 * that's part of the standard usb 1.1 state machine, and
1496 * doesn't affect OTG transitions.
1498 if (musb->may_wakeup && musb->is_suspended)
1499 break;
1500 goto done;
1501 case OTG_STATE_B_IDLE:
1502 /* Start SRP ... OTG not required. */
1503 devctl = musb_readb(mregs, MUSB_DEVCTL);
1504 DBG(2, "Sending SRP: devctl: %02x\n", devctl);
1505 devctl |= MUSB_DEVCTL_SESSION;
1506 musb_writeb(mregs, MUSB_DEVCTL, devctl);
1507 devctl = musb_readb(mregs, MUSB_DEVCTL);
1508 retries = 100;
1509 while (!(devctl & MUSB_DEVCTL_SESSION)) {
1510 devctl = musb_readb(mregs, MUSB_DEVCTL);
1511 if (retries-- < 1)
1512 break;
1514 retries = 10000;
1515 while (devctl & MUSB_DEVCTL_SESSION) {
1516 devctl = musb_readb(mregs, MUSB_DEVCTL);
1517 if (retries-- < 1)
1518 break;
1521 /* Block idling for at least 1s */
1522 musb_platform_try_idle(musb,
1523 jiffies + msecs_to_jiffies(1 * HZ));
1525 status = 0;
1526 goto done;
1527 default:
1528 DBG(2, "Unhandled wake: %s\n", otg_state_string(musb));
1529 goto done;
1532 status = 0;
1534 power = musb_readb(mregs, MUSB_POWER);
1535 power |= MUSB_POWER_RESUME;
1536 musb_writeb(mregs, MUSB_POWER, power);
1537 DBG(2, "issue wakeup\n");
1539 /* FIXME do this next chunk in a timer callback, no udelay */
1540 mdelay(2);
1542 power = musb_readb(mregs, MUSB_POWER);
1543 power &= ~MUSB_POWER_RESUME;
1544 musb_writeb(mregs, MUSB_POWER, power);
1545 done:
1546 spin_unlock_irqrestore(&musb->lock, flags);
1547 return status;
1550 static int
1551 musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1553 struct musb *musb = gadget_to_musb(gadget);
1555 musb->is_self_powered = !!is_selfpowered;
1556 return 0;
1559 static void musb_pullup(struct musb *musb, int is_on)
1561 u8 power;
1563 power = musb_readb(musb->mregs, MUSB_POWER);
1564 if (is_on)
1565 power |= MUSB_POWER_SOFTCONN;
1566 else
1567 power &= ~MUSB_POWER_SOFTCONN;
1569 /* FIXME if on, HdrcStart; if off, HdrcStop */
1571 DBG(3, "gadget %s D+ pullup %s\n",
1572 musb->gadget_driver->function, is_on ? "on" : "off");
1573 musb_writeb(musb->mregs, MUSB_POWER, power);
1576 #if 0
1577 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1579 DBG(2, "<= %s =>\n", __func__);
1582 * FIXME iff driver's softconnect flag is set (as it is during probe,
1583 * though that can clear it), just musb_pullup().
1586 return -EINVAL;
1588 #endif
1590 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1592 struct musb *musb = gadget_to_musb(gadget);
1594 if (!musb->xceiv->set_power)
1595 return -EOPNOTSUPP;
1596 return otg_set_power(musb->xceiv, mA);
1599 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1601 struct musb *musb = gadget_to_musb(gadget);
1602 unsigned long flags;
1604 is_on = !!is_on;
1606 /* NOTE: this assumes we are sensing vbus; we'd rather
1607 * not pullup unless the B-session is active.
1609 spin_lock_irqsave(&musb->lock, flags);
1610 if (is_on != musb->softconnect) {
1611 musb->softconnect = is_on;
1612 musb_pullup(musb, is_on);
1614 spin_unlock_irqrestore(&musb->lock, flags);
1615 return 0;
1618 static const struct usb_gadget_ops musb_gadget_operations = {
1619 .get_frame = musb_gadget_get_frame,
1620 .wakeup = musb_gadget_wakeup,
1621 .set_selfpowered = musb_gadget_set_self_powered,
1622 /* .vbus_session = musb_gadget_vbus_session, */
1623 .vbus_draw = musb_gadget_vbus_draw,
1624 .pullup = musb_gadget_pullup,
1627 /* ----------------------------------------------------------------------- */
1629 /* Registration */
1631 /* Only this registration code "knows" the rule (from USB standards)
1632 * about there being only one external upstream port. It assumes
1633 * all peripheral ports are external...
1635 static struct musb *the_gadget;
1637 static void musb_gadget_release(struct device *dev)
1639 /* kref_put(WHAT) */
1640 dev_dbg(dev, "%s\n", __func__);
1644 static void __init
1645 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1647 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1649 memset(ep, 0, sizeof *ep);
1651 ep->current_epnum = epnum;
1652 ep->musb = musb;
1653 ep->hw_ep = hw_ep;
1654 ep->is_in = is_in;
1656 INIT_LIST_HEAD(&ep->req_list);
1658 sprintf(ep->name, "ep%d%s", epnum,
1659 (!epnum || hw_ep->is_shared_fifo) ? "" : (
1660 is_in ? "in" : "out"));
1661 ep->end_point.name = ep->name;
1662 INIT_LIST_HEAD(&ep->end_point.ep_list);
1663 if (!epnum) {
1664 ep->end_point.maxpacket = 64;
1665 ep->end_point.ops = &musb_g_ep0_ops;
1666 musb->g.ep0 = &ep->end_point;
1667 } else {
1668 if (is_in)
1669 ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
1670 else
1671 ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
1672 ep->end_point.ops = &musb_ep_ops;
1673 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1678 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1679 * to the rest of the driver state.
1681 static inline void __init musb_g_init_endpoints(struct musb *musb)
1683 u8 epnum;
1684 struct musb_hw_ep *hw_ep;
1685 unsigned count = 0;
1687 /* intialize endpoint list just once */
1688 INIT_LIST_HEAD(&(musb->g.ep_list));
1690 for (epnum = 0, hw_ep = musb->endpoints;
1691 epnum < musb->nr_endpoints;
1692 epnum++, hw_ep++) {
1693 if (hw_ep->is_shared_fifo /* || !epnum */) {
1694 init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1695 count++;
1696 } else {
1697 if (hw_ep->max_packet_sz_tx) {
1698 init_peripheral_ep(musb, &hw_ep->ep_in,
1699 epnum, 1);
1700 count++;
1702 if (hw_ep->max_packet_sz_rx) {
1703 init_peripheral_ep(musb, &hw_ep->ep_out,
1704 epnum, 0);
1705 count++;
1711 /* called once during driver setup to initialize and link into
1712 * the driver model; memory is zeroed.
1714 int __init musb_gadget_setup(struct musb *musb)
1716 int status;
1718 /* REVISIT minor race: if (erroneously) setting up two
1719 * musb peripherals at the same time, only the bus lock
1720 * is probably held.
1722 if (the_gadget)
1723 return -EBUSY;
1724 the_gadget = musb;
1726 musb->g.ops = &musb_gadget_operations;
1727 musb->g.is_dualspeed = 1;
1728 musb->g.speed = USB_SPEED_UNKNOWN;
1730 /* this "gadget" abstracts/virtualizes the controller */
1731 dev_set_name(&musb->g.dev, "gadget");
1732 musb->g.dev.parent = musb->controller;
1733 musb->g.dev.dma_mask = musb->controller->dma_mask;
1734 musb->g.dev.release = musb_gadget_release;
1735 musb->g.name = musb_driver_name;
1737 if (is_otg_enabled(musb))
1738 musb->g.is_otg = 1;
1740 musb_g_init_endpoints(musb);
1742 musb->is_active = 0;
1743 musb_platform_try_idle(musb, 0);
1745 status = device_register(&musb->g.dev);
1746 if (status != 0) {
1747 put_device(&musb->g.dev);
1748 the_gadget = NULL;
1750 return status;
1753 void musb_gadget_cleanup(struct musb *musb)
1755 if (musb != the_gadget)
1756 return;
1758 device_unregister(&musb->g.dev);
1759 the_gadget = NULL;
1763 * Register the gadget driver. Used by gadget drivers when
1764 * registering themselves with the controller.
1766 * -EINVAL something went wrong (not driver)
1767 * -EBUSY another gadget is already using the controller
1768 * -ENOMEM no memeory to perform the operation
1770 * @param driver the gadget driver
1771 * @param bind the driver's bind function
1772 * @return <0 if error, 0 if everything is fine
1774 int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1775 int (*bind)(struct usb_gadget *))
1777 int retval;
1778 unsigned long flags;
1779 struct musb *musb = the_gadget;
1781 if (!driver
1782 || driver->speed != USB_SPEED_HIGH
1783 || !bind || !driver->setup)
1784 return -EINVAL;
1786 /* driver must be initialized to support peripheral mode */
1787 if (!musb) {
1788 DBG(1, "%s, no dev??\n", __func__);
1789 return -ENODEV;
1792 DBG(3, "registering driver %s\n", driver->function);
1793 spin_lock_irqsave(&musb->lock, flags);
1795 if (musb->gadget_driver) {
1796 DBG(1, "%s is already bound to %s\n",
1797 musb_driver_name,
1798 musb->gadget_driver->driver.name);
1799 retval = -EBUSY;
1800 } else {
1801 musb->gadget_driver = driver;
1802 musb->g.dev.driver = &driver->driver;
1803 driver->driver.bus = NULL;
1804 musb->softconnect = 1;
1805 retval = 0;
1808 spin_unlock_irqrestore(&musb->lock, flags);
1810 if (retval == 0) {
1811 retval = bind(&musb->g);
1812 if (retval != 0) {
1813 DBG(3, "bind to driver %s failed --> %d\n",
1814 driver->driver.name, retval);
1815 musb->gadget_driver = NULL;
1816 musb->g.dev.driver = NULL;
1819 spin_lock_irqsave(&musb->lock, flags);
1821 otg_set_peripheral(musb->xceiv, &musb->g);
1822 musb->xceiv->state = OTG_STATE_B_IDLE;
1823 musb->is_active = 1;
1825 /* FIXME this ignores the softconnect flag. Drivers are
1826 * allowed hold the peripheral inactive until for example
1827 * userspace hooks up printer hardware or DSP codecs, so
1828 * hosts only see fully functional devices.
1831 if (!is_otg_enabled(musb))
1832 musb_start(musb);
1834 otg_set_peripheral(musb->xceiv, &musb->g);
1836 spin_unlock_irqrestore(&musb->lock, flags);
1838 if (is_otg_enabled(musb)) {
1839 struct usb_hcd *hcd = musb_to_hcd(musb);
1841 DBG(3, "OTG startup...\n");
1843 /* REVISIT: funcall to other code, which also
1844 * handles power budgeting ... this way also
1845 * ensures HdrcStart is indirectly called.
1847 retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
1848 if (retval < 0) {
1849 DBG(1, "add_hcd failed, %d\n", retval);
1850 spin_lock_irqsave(&musb->lock, flags);
1851 otg_set_peripheral(musb->xceiv, NULL);
1852 musb->gadget_driver = NULL;
1853 musb->g.dev.driver = NULL;
1854 spin_unlock_irqrestore(&musb->lock, flags);
1855 } else {
1856 hcd->self.uses_pio_for_control = 1;
1861 return retval;
1863 EXPORT_SYMBOL(usb_gadget_probe_driver);
1865 static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
1867 int i;
1868 struct musb_hw_ep *hw_ep;
1870 /* don't disconnect if it's not connected */
1871 if (musb->g.speed == USB_SPEED_UNKNOWN)
1872 driver = NULL;
1873 else
1874 musb->g.speed = USB_SPEED_UNKNOWN;
1876 /* deactivate the hardware */
1877 if (musb->softconnect) {
1878 musb->softconnect = 0;
1879 musb_pullup(musb, 0);
1881 musb_stop(musb);
1883 /* killing any outstanding requests will quiesce the driver;
1884 * then report disconnect
1886 if (driver) {
1887 for (i = 0, hw_ep = musb->endpoints;
1888 i < musb->nr_endpoints;
1889 i++, hw_ep++) {
1890 musb_ep_select(musb->mregs, i);
1891 if (hw_ep->is_shared_fifo /* || !epnum */) {
1892 nuke(&hw_ep->ep_in, -ESHUTDOWN);
1893 } else {
1894 if (hw_ep->max_packet_sz_tx)
1895 nuke(&hw_ep->ep_in, -ESHUTDOWN);
1896 if (hw_ep->max_packet_sz_rx)
1897 nuke(&hw_ep->ep_out, -ESHUTDOWN);
1901 spin_unlock(&musb->lock);
1902 driver->disconnect(&musb->g);
1903 spin_lock(&musb->lock);
1908 * Unregister the gadget driver. Used by gadget drivers when
1909 * unregistering themselves from the controller.
1911 * @param driver the gadget driver to unregister
1913 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1915 unsigned long flags;
1916 int retval = 0;
1917 struct musb *musb = the_gadget;
1919 if (!driver || !driver->unbind || !musb)
1920 return -EINVAL;
1922 /* REVISIT always use otg_set_peripheral() here too;
1923 * this needs to shut down the OTG engine.
1926 spin_lock_irqsave(&musb->lock, flags);
1928 #ifdef CONFIG_USB_MUSB_OTG
1929 musb_hnp_stop(musb);
1930 #endif
1932 if (musb->gadget_driver == driver) {
1934 (void) musb_gadget_vbus_draw(&musb->g, 0);
1936 musb->xceiv->state = OTG_STATE_UNDEFINED;
1937 stop_activity(musb, driver);
1938 otg_set_peripheral(musb->xceiv, NULL);
1940 DBG(3, "unregistering driver %s\n", driver->function);
1941 spin_unlock_irqrestore(&musb->lock, flags);
1942 driver->unbind(&musb->g);
1943 spin_lock_irqsave(&musb->lock, flags);
1945 musb->gadget_driver = NULL;
1946 musb->g.dev.driver = NULL;
1948 musb->is_active = 0;
1949 musb_platform_try_idle(musb, 0);
1950 } else
1951 retval = -EINVAL;
1952 spin_unlock_irqrestore(&musb->lock, flags);
1954 if (is_otg_enabled(musb) && retval == 0) {
1955 usb_remove_hcd(musb_to_hcd(musb));
1956 /* FIXME we need to be able to register another
1957 * gadget driver here and have everything work;
1958 * that currently misbehaves.
1962 return retval;
1964 EXPORT_SYMBOL(usb_gadget_unregister_driver);
1967 /* ----------------------------------------------------------------------- */
1969 /* lifecycle operations called through plat_uds.c */
1971 void musb_g_resume(struct musb *musb)
1973 musb->is_suspended = 0;
1974 switch (musb->xceiv->state) {
1975 case OTG_STATE_B_IDLE:
1976 break;
1977 case OTG_STATE_B_WAIT_ACON:
1978 case OTG_STATE_B_PERIPHERAL:
1979 musb->is_active = 1;
1980 if (musb->gadget_driver && musb->gadget_driver->resume) {
1981 spin_unlock(&musb->lock);
1982 musb->gadget_driver->resume(&musb->g);
1983 spin_lock(&musb->lock);
1985 break;
1986 default:
1987 WARNING("unhandled RESUME transition (%s)\n",
1988 otg_state_string(musb));
1992 /* called when SOF packets stop for 3+ msec */
1993 void musb_g_suspend(struct musb *musb)
1995 u8 devctl;
1997 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1998 DBG(3, "devctl %02x\n", devctl);
2000 switch (musb->xceiv->state) {
2001 case OTG_STATE_B_IDLE:
2002 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2003 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2004 break;
2005 case OTG_STATE_B_PERIPHERAL:
2006 musb->is_suspended = 1;
2007 if (musb->gadget_driver && musb->gadget_driver->suspend) {
2008 spin_unlock(&musb->lock);
2009 musb->gadget_driver->suspend(&musb->g);
2010 spin_lock(&musb->lock);
2012 break;
2013 default:
2014 /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
2015 * A_PERIPHERAL may need care too
2017 WARNING("unhandled SUSPEND transition (%s)\n",
2018 otg_state_string(musb));
2022 /* Called during SRP */
2023 void musb_g_wakeup(struct musb *musb)
2025 musb_gadget_wakeup(&musb->g);
2028 /* called when VBUS drops below session threshold, and in other cases */
2029 void musb_g_disconnect(struct musb *musb)
2031 void __iomem *mregs = musb->mregs;
2032 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
2034 DBG(3, "devctl %02x\n", devctl);
2036 /* clear HR */
2037 musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
2039 /* don't draw vbus until new b-default session */
2040 (void) musb_gadget_vbus_draw(&musb->g, 0);
2042 musb->g.speed = USB_SPEED_UNKNOWN;
2043 if (musb->gadget_driver && musb->gadget_driver->disconnect) {
2044 spin_unlock(&musb->lock);
2045 musb->gadget_driver->disconnect(&musb->g);
2046 spin_lock(&musb->lock);
2049 switch (musb->xceiv->state) {
2050 default:
2051 #ifdef CONFIG_USB_MUSB_OTG
2052 DBG(2, "Unhandled disconnect %s, setting a_idle\n",
2053 otg_state_string(musb));
2054 musb->xceiv->state = OTG_STATE_A_IDLE;
2055 MUSB_HST_MODE(musb);
2056 break;
2057 case OTG_STATE_A_PERIPHERAL:
2058 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2059 MUSB_HST_MODE(musb);
2060 break;
2061 case OTG_STATE_B_WAIT_ACON:
2062 case OTG_STATE_B_HOST:
2063 #endif
2064 case OTG_STATE_B_PERIPHERAL:
2065 case OTG_STATE_B_IDLE:
2066 musb->xceiv->state = OTG_STATE_B_IDLE;
2067 break;
2068 case OTG_STATE_B_SRP_INIT:
2069 break;
2072 musb->is_active = 0;
2075 void musb_g_reset(struct musb *musb)
2076 __releases(musb->lock)
2077 __acquires(musb->lock)
2079 void __iomem *mbase = musb->mregs;
2080 u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
2081 u8 power;
2083 DBG(3, "<== %s addr=%x driver '%s'\n",
2084 (devctl & MUSB_DEVCTL_BDEVICE)
2085 ? "B-Device" : "A-Device",
2086 musb_readb(mbase, MUSB_FADDR),
2087 musb->gadget_driver
2088 ? musb->gadget_driver->driver.name
2089 : NULL
2092 /* report disconnect, if we didn't already (flushing EP state) */
2093 if (musb->g.speed != USB_SPEED_UNKNOWN)
2094 musb_g_disconnect(musb);
2096 /* clear HR */
2097 else if (devctl & MUSB_DEVCTL_HR)
2098 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2101 /* what speed did we negotiate? */
2102 power = musb_readb(mbase, MUSB_POWER);
2103 musb->g.speed = (power & MUSB_POWER_HSMODE)
2104 ? USB_SPEED_HIGH : USB_SPEED_FULL;
2106 /* start in USB_STATE_DEFAULT */
2107 musb->is_active = 1;
2108 musb->is_suspended = 0;
2109 MUSB_DEV_MODE(musb);
2110 musb->address = 0;
2111 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2113 musb->may_wakeup = 0;
2114 musb->g.b_hnp_enable = 0;
2115 musb->g.a_alt_hnp_support = 0;
2116 musb->g.a_hnp_support = 0;
2118 /* Normal reset, as B-Device;
2119 * or else after HNP, as A-Device
2121 if (devctl & MUSB_DEVCTL_BDEVICE) {
2122 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2123 musb->g.is_a_peripheral = 0;
2124 } else if (is_otg_enabled(musb)) {
2125 musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
2126 musb->g.is_a_peripheral = 1;
2127 } else
2128 WARN_ON(1);
2130 /* start with default limits on VBUS power draw */
2131 (void) musb_gadget_vbus_draw(&musb->g,
2132 is_otg_enabled(musb) ? 8 : 100);