2 * MUSB OTG driver core code
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
38 * This consists of a Host Controller Driver (HCD) and a peripheral
39 * controller driver implementing the "Gadget" API; OTG support is
40 * in the works. These are normal Linux-USB controller drivers which
41 * use IRQs and have no dedicated thread.
43 * This version of the driver has only been used with products from
44 * Texas Instruments. Those products integrate the Inventra logic
45 * with other DMA, IRQ, and bus modules, as well as other logic that
46 * needs to be reflected in this driver.
49 * NOTE: the original Mentor code here was pretty much a collection
50 * of mechanisms that don't seem to have been fully integrated/working
51 * for *any* Linux kernel version. This version aims at Linux 2.6.now,
52 * Key open issues include:
54 * - Lack of host-side transaction scheduling, for all transfer types.
55 * The hardware doesn't do it; instead, software must.
57 * This is not an issue for OTG devices that don't support external
58 * hubs, but for more "normal" USB hosts it's a user issue that the
59 * "multipoint" support doesn't scale in the expected ways. That
60 * includes DaVinci EVM in a common non-OTG mode.
62 * * Control and bulk use dedicated endpoints, and there's as
63 * yet no mechanism to either (a) reclaim the hardware when
64 * peripherals are NAKing, which gets complicated with bulk
65 * endpoints, or (b) use more than a single bulk endpoint in
68 * RESULT: one device may be perceived as blocking another one.
70 * * Interrupt and isochronous will dynamically allocate endpoint
71 * hardware, but (a) there's no record keeping for bandwidth;
72 * (b) in the common case that few endpoints are available, there
73 * is no mechanism to reuse endpoints to talk to multiple devices.
75 * RESULT: At one extreme, bandwidth can be overcommitted in
76 * some hardware configurations, no faults will be reported.
77 * At the other extreme, the bandwidth capabilities which do
78 * exist tend to be severely undercommitted. You can't yet hook
79 * up both a keyboard and a mouse to an external USB hub.
83 * This gets many kinds of configuration information:
84 * - Kconfig for everything user-configurable
85 * - platform_device for addressing, irq, and platform_data
86 * - platform_data is mostly for board-specific information
87 * (plus recentrly, SOC or family details)
89 * Most of the conditional compilation will (someday) vanish.
92 #include <linux/module.h>
93 #include <linux/kernel.h>
94 #include <linux/sched.h>
95 #include <linux/slab.h>
96 #include <linux/list.h>
97 #include <linux/kobject.h>
98 #include <linux/prefetch.h>
99 #include <linux/platform_device.h>
100 #include <linux/io.h>
101 #include <linux/dma-mapping.h>
102 #include <linux/usb.h>
104 #include "musb_core.h"
105 #include "musb_trace.h"
107 #define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
110 #define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
111 #define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
113 #define MUSB_VERSION "6.0"
115 #define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
117 #define MUSB_DRIVER_NAME "musb-hdrc"
118 const char musb_driver_name
[] = MUSB_DRIVER_NAME
;
120 MODULE_DESCRIPTION(DRIVER_INFO
);
121 MODULE_AUTHOR(DRIVER_AUTHOR
);
122 MODULE_LICENSE("GPL");
123 MODULE_ALIAS("platform:" MUSB_DRIVER_NAME
);
126 /*-------------------------------------------------------------------------*/
128 static inline struct musb
*dev_to_musb(struct device
*dev
)
130 return dev_get_drvdata(dev
);
133 /*-------------------------------------------------------------------------*/
135 #ifndef CONFIG_BLACKFIN
136 static int musb_ulpi_read(struct usb_phy
*phy
, u32 reg
)
138 void __iomem
*addr
= phy
->io_priv
;
144 pm_runtime_get_sync(phy
->io_dev
);
146 /* Make sure the transceiver is not in low power mode */
147 power
= musb_readb(addr
, MUSB_POWER
);
148 power
&= ~MUSB_POWER_SUSPENDM
;
149 musb_writeb(addr
, MUSB_POWER
, power
);
151 /* REVISIT: musbhdrc_ulpi_an.pdf recommends setting the
152 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
155 musb_writeb(addr
, MUSB_ULPI_REG_ADDR
, (u8
)reg
);
156 musb_writeb(addr
, MUSB_ULPI_REG_CONTROL
,
157 MUSB_ULPI_REG_REQ
| MUSB_ULPI_RDN_WR
);
159 while (!(musb_readb(addr
, MUSB_ULPI_REG_CONTROL
)
160 & MUSB_ULPI_REG_CMPLT
)) {
168 r
= musb_readb(addr
, MUSB_ULPI_REG_CONTROL
);
169 r
&= ~MUSB_ULPI_REG_CMPLT
;
170 musb_writeb(addr
, MUSB_ULPI_REG_CONTROL
, r
);
172 ret
= musb_readb(addr
, MUSB_ULPI_REG_DATA
);
175 pm_runtime_put(phy
->io_dev
);
180 static int musb_ulpi_write(struct usb_phy
*phy
, u32 val
, u32 reg
)
182 void __iomem
*addr
= phy
->io_priv
;
188 pm_runtime_get_sync(phy
->io_dev
);
190 /* Make sure the transceiver is not in low power mode */
191 power
= musb_readb(addr
, MUSB_POWER
);
192 power
&= ~MUSB_POWER_SUSPENDM
;
193 musb_writeb(addr
, MUSB_POWER
, power
);
195 musb_writeb(addr
, MUSB_ULPI_REG_ADDR
, (u8
)reg
);
196 musb_writeb(addr
, MUSB_ULPI_REG_DATA
, (u8
)val
);
197 musb_writeb(addr
, MUSB_ULPI_REG_CONTROL
, MUSB_ULPI_REG_REQ
);
199 while (!(musb_readb(addr
, MUSB_ULPI_REG_CONTROL
)
200 & MUSB_ULPI_REG_CMPLT
)) {
208 r
= musb_readb(addr
, MUSB_ULPI_REG_CONTROL
);
209 r
&= ~MUSB_ULPI_REG_CMPLT
;
210 musb_writeb(addr
, MUSB_ULPI_REG_CONTROL
, r
);
213 pm_runtime_put(phy
->io_dev
);
218 #define musb_ulpi_read NULL
219 #define musb_ulpi_write NULL
222 static struct usb_phy_io_ops musb_ulpi_access
= {
223 .read
= musb_ulpi_read
,
224 .write
= musb_ulpi_write
,
227 /*-------------------------------------------------------------------------*/
229 static u32
musb_default_fifo_offset(u8 epnum
)
231 return 0x20 + (epnum
* 4);
234 /* "flat" mapping: each endpoint has its own i/o address */
235 static void musb_flat_ep_select(void __iomem
*mbase
, u8 epnum
)
239 static u32
musb_flat_ep_offset(u8 epnum
, u16 offset
)
241 return 0x100 + (0x10 * epnum
) + offset
;
244 /* "indexed" mapping: INDEX register controls register bank select */
245 static void musb_indexed_ep_select(void __iomem
*mbase
, u8 epnum
)
247 musb_writeb(mbase
, MUSB_INDEX
, epnum
);
250 static u32
musb_indexed_ep_offset(u8 epnum
, u16 offset
)
252 return 0x10 + offset
;
255 static u32
musb_default_busctl_offset(u8 epnum
, u16 offset
)
257 return 0x80 + (0x08 * epnum
) + offset
;
260 static u8
musb_default_readb(const void __iomem
*addr
, unsigned offset
)
262 u8 data
= __raw_readb(addr
+ offset
);
264 trace_musb_readb(__builtin_return_address(0), addr
, offset
, data
);
268 static void musb_default_writeb(void __iomem
*addr
, unsigned offset
, u8 data
)
270 trace_musb_writeb(__builtin_return_address(0), addr
, offset
, data
);
271 __raw_writeb(data
, addr
+ offset
);
274 static u16
musb_default_readw(const void __iomem
*addr
, unsigned offset
)
276 u16 data
= __raw_readw(addr
+ offset
);
278 trace_musb_readw(__builtin_return_address(0), addr
, offset
, data
);
282 static void musb_default_writew(void __iomem
*addr
, unsigned offset
, u16 data
)
284 trace_musb_writew(__builtin_return_address(0), addr
, offset
, data
);
285 __raw_writew(data
, addr
+ offset
);
288 static u32
musb_default_readl(const void __iomem
*addr
, unsigned offset
)
290 u32 data
= __raw_readl(addr
+ offset
);
292 trace_musb_readl(__builtin_return_address(0), addr
, offset
, data
);
296 static void musb_default_writel(void __iomem
*addr
, unsigned offset
, u32 data
)
298 trace_musb_writel(__builtin_return_address(0), addr
, offset
, data
);
299 __raw_writel(data
, addr
+ offset
);
303 * Load an endpoint's FIFO
305 static void musb_default_write_fifo(struct musb_hw_ep
*hw_ep
, u16 len
,
308 struct musb
*musb
= hw_ep
->musb
;
309 void __iomem
*fifo
= hw_ep
->fifo
;
311 if (unlikely(len
== 0))
316 dev_dbg(musb
->controller
, "%cX ep%d fifo %p count %d buf %p\n",
317 'T', hw_ep
->epnum
, fifo
, len
, src
);
319 /* we can't assume unaligned reads work */
320 if (likely((0x01 & (unsigned long) src
) == 0)) {
323 /* best case is 32bit-aligned source address */
324 if ((0x02 & (unsigned long) src
) == 0) {
326 iowrite32_rep(fifo
, src
+ index
, len
>> 2);
327 index
+= len
& ~0x03;
330 __raw_writew(*(u16
*)&src
[index
], fifo
);
335 iowrite16_rep(fifo
, src
+ index
, len
>> 1);
336 index
+= len
& ~0x01;
340 __raw_writeb(src
[index
], fifo
);
343 iowrite8_rep(fifo
, src
, len
);
348 * Unload an endpoint's FIFO
350 static void musb_default_read_fifo(struct musb_hw_ep
*hw_ep
, u16 len
, u8
*dst
)
352 struct musb
*musb
= hw_ep
->musb
;
353 void __iomem
*fifo
= hw_ep
->fifo
;
355 if (unlikely(len
== 0))
358 dev_dbg(musb
->controller
, "%cX ep%d fifo %p count %d buf %p\n",
359 'R', hw_ep
->epnum
, fifo
, len
, dst
);
361 /* we can't assume unaligned writes work */
362 if (likely((0x01 & (unsigned long) dst
) == 0)) {
365 /* best case is 32bit-aligned destination address */
366 if ((0x02 & (unsigned long) dst
) == 0) {
368 ioread32_rep(fifo
, dst
, len
>> 2);
372 *(u16
*)&dst
[index
] = __raw_readw(fifo
);
377 ioread16_rep(fifo
, dst
, len
>> 1);
382 dst
[index
] = __raw_readb(fifo
);
385 ioread8_rep(fifo
, dst
, len
);
390 * Old style IO functions
392 u8 (*musb_readb
)(const void __iomem
*addr
, unsigned offset
);
393 EXPORT_SYMBOL_GPL(musb_readb
);
395 void (*musb_writeb
)(void __iomem
*addr
, unsigned offset
, u8 data
);
396 EXPORT_SYMBOL_GPL(musb_writeb
);
398 u16 (*musb_readw
)(const void __iomem
*addr
, unsigned offset
);
399 EXPORT_SYMBOL_GPL(musb_readw
);
401 void (*musb_writew
)(void __iomem
*addr
, unsigned offset
, u16 data
);
402 EXPORT_SYMBOL_GPL(musb_writew
);
404 u32 (*musb_readl
)(const void __iomem
*addr
, unsigned offset
);
405 EXPORT_SYMBOL_GPL(musb_readl
);
407 void (*musb_writel
)(void __iomem
*addr
, unsigned offset
, u32 data
);
408 EXPORT_SYMBOL_GPL(musb_writel
);
410 #ifndef CONFIG_MUSB_PIO_ONLY
411 struct dma_controller
*
412 (*musb_dma_controller_create
)(struct musb
*musb
, void __iomem
*base
);
413 EXPORT_SYMBOL(musb_dma_controller_create
);
415 void (*musb_dma_controller_destroy
)(struct dma_controller
*c
);
416 EXPORT_SYMBOL(musb_dma_controller_destroy
);
420 * New style IO functions
422 void musb_read_fifo(struct musb_hw_ep
*hw_ep
, u16 len
, u8
*dst
)
424 return hw_ep
->musb
->io
.read_fifo(hw_ep
, len
, dst
);
427 void musb_write_fifo(struct musb_hw_ep
*hw_ep
, u16 len
, const u8
*src
)
429 return hw_ep
->musb
->io
.write_fifo(hw_ep
, len
, src
);
432 /*-------------------------------------------------------------------------*/
434 /* for high speed test mode; see USB 2.0 spec 7.1.20 */
435 static const u8 musb_test_packet
[53] = {
436 /* implicit SYNC then DATA0 to start */
439 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
441 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
443 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
444 /* JJJJJJJKKKKKKK x8 */
445 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
447 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
448 /* JKKKKKKK x10, JK */
449 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
451 /* implicit CRC16 then EOP to end */
454 void musb_load_testpacket(struct musb
*musb
)
456 void __iomem
*regs
= musb
->endpoints
[0].regs
;
458 musb_ep_select(musb
->mregs
, 0);
459 musb_write_fifo(musb
->control_ep
,
460 sizeof(musb_test_packet
), musb_test_packet
);
461 musb_writew(regs
, MUSB_CSR0
, MUSB_CSR0_TXPKTRDY
);
464 /*-------------------------------------------------------------------------*/
467 * Handles OTG hnp timeouts, such as b_ase0_brst
469 static void musb_otg_timer_func(unsigned long data
)
471 struct musb
*musb
= (struct musb
*)data
;
474 spin_lock_irqsave(&musb
->lock
, flags
);
475 switch (musb
->xceiv
->otg
->state
) {
476 case OTG_STATE_B_WAIT_ACON
:
478 "HNP: b_wait_acon timeout; back to b_peripheral");
479 musb_g_disconnect(musb
);
480 musb
->xceiv
->otg
->state
= OTG_STATE_B_PERIPHERAL
;
483 case OTG_STATE_A_SUSPEND
:
484 case OTG_STATE_A_WAIT_BCON
:
485 musb_dbg(musb
, "HNP: %s timeout",
486 usb_otg_state_string(musb
->xceiv
->otg
->state
));
487 musb_platform_set_vbus(musb
, 0);
488 musb
->xceiv
->otg
->state
= OTG_STATE_A_WAIT_VFALL
;
491 musb_dbg(musb
, "HNP: Unhandled mode %s",
492 usb_otg_state_string(musb
->xceiv
->otg
->state
));
494 spin_unlock_irqrestore(&musb
->lock
, flags
);
498 * Stops the HNP transition. Caller must take care of locking.
500 void musb_hnp_stop(struct musb
*musb
)
502 struct usb_hcd
*hcd
= musb
->hcd
;
503 void __iomem
*mbase
= musb
->mregs
;
506 musb_dbg(musb
, "HNP: stop from %s",
507 usb_otg_state_string(musb
->xceiv
->otg
->state
));
509 switch (musb
->xceiv
->otg
->state
) {
510 case OTG_STATE_A_PERIPHERAL
:
511 musb_g_disconnect(musb
);
512 musb_dbg(musb
, "HNP: back to %s",
513 usb_otg_state_string(musb
->xceiv
->otg
->state
));
515 case OTG_STATE_B_HOST
:
516 musb_dbg(musb
, "HNP: Disabling HR");
518 hcd
->self
.is_b_host
= 0;
519 musb
->xceiv
->otg
->state
= OTG_STATE_B_PERIPHERAL
;
521 reg
= musb_readb(mbase
, MUSB_POWER
);
522 reg
|= MUSB_POWER_SUSPENDM
;
523 musb_writeb(mbase
, MUSB_POWER
, reg
);
524 /* REVISIT: Start SESSION_REQUEST here? */
527 musb_dbg(musb
, "HNP: Stopping in unknown state %s",
528 usb_otg_state_string(musb
->xceiv
->otg
->state
));
532 * When returning to A state after HNP, avoid hub_port_rebounce(),
533 * which cause occasional OPT A "Did not receive reset after connect"
536 musb
->port1_status
&= ~(USB_PORT_STAT_C_CONNECTION
<< 16);
539 static void musb_recover_from_babble(struct musb
*musb
);
542 * Interrupt Service Routine to record USB "global" interrupts.
543 * Since these do not happen often and signify things of
544 * paramount importance, it seems OK to check them individually;
545 * the order of the tests is specified in the manual
547 * @param musb instance pointer
548 * @param int_usb register contents
553 static irqreturn_t
musb_stage0_irq(struct musb
*musb
, u8 int_usb
,
556 irqreturn_t handled
= IRQ_NONE
;
558 musb_dbg(musb
, "<== DevCtl=%02x, int_usb=0x%x", devctl
, int_usb
);
560 /* in host mode, the peripheral may issue remote wakeup.
561 * in peripheral mode, the host may resume the link.
562 * spurious RESUME irqs happen too, paired with SUSPEND.
564 if (int_usb
& MUSB_INTR_RESUME
) {
565 handled
= IRQ_HANDLED
;
566 musb_dbg(musb
, "RESUME (%s)",
567 usb_otg_state_string(musb
->xceiv
->otg
->state
));
569 if (devctl
& MUSB_DEVCTL_HM
) {
570 switch (musb
->xceiv
->otg
->state
) {
571 case OTG_STATE_A_SUSPEND
:
572 /* remote wakeup? later, GetPortStatus
573 * will stop RESUME signaling
576 musb
->port1_status
|=
577 (USB_PORT_STAT_C_SUSPEND
<< 16)
578 | MUSB_PORT_STAT_RESUME
;
579 musb
->rh_timer
= jiffies
580 + msecs_to_jiffies(USB_RESUME_TIMEOUT
);
581 musb
->need_finish_resume
= 1;
583 musb
->xceiv
->otg
->state
= OTG_STATE_A_HOST
;
585 musb_host_resume_root_hub(musb
);
587 case OTG_STATE_B_WAIT_ACON
:
588 musb
->xceiv
->otg
->state
= OTG_STATE_B_PERIPHERAL
;
593 WARNING("bogus %s RESUME (%s)\n",
595 usb_otg_state_string(musb
->xceiv
->otg
->state
));
598 switch (musb
->xceiv
->otg
->state
) {
599 case OTG_STATE_A_SUSPEND
:
600 /* possibly DISCONNECT is upcoming */
601 musb
->xceiv
->otg
->state
= OTG_STATE_A_HOST
;
602 musb_host_resume_root_hub(musb
);
604 case OTG_STATE_B_WAIT_ACON
:
605 case OTG_STATE_B_PERIPHERAL
:
606 /* disconnect while suspended? we may
607 * not get a disconnect irq...
609 if ((devctl
& MUSB_DEVCTL_VBUS
)
610 != (3 << MUSB_DEVCTL_VBUS_SHIFT
)
612 musb
->int_usb
|= MUSB_INTR_DISCONNECT
;
613 musb
->int_usb
&= ~MUSB_INTR_SUSPEND
;
618 case OTG_STATE_B_IDLE
:
619 musb
->int_usb
&= ~MUSB_INTR_SUSPEND
;
622 WARNING("bogus %s RESUME (%s)\n",
624 usb_otg_state_string(musb
->xceiv
->otg
->state
));
629 /* see manual for the order of the tests */
630 if (int_usb
& MUSB_INTR_SESSREQ
) {
631 void __iomem
*mbase
= musb
->mregs
;
633 if ((devctl
& MUSB_DEVCTL_VBUS
) == MUSB_DEVCTL_VBUS
634 && (devctl
& MUSB_DEVCTL_BDEVICE
)) {
635 musb_dbg(musb
, "SessReq while on B state");
639 musb_dbg(musb
, "SESSION_REQUEST (%s)",
640 usb_otg_state_string(musb
->xceiv
->otg
->state
));
642 /* IRQ arrives from ID pin sense or (later, if VBUS power
643 * is removed) SRP. responses are time critical:
644 * - turn on VBUS (with silicon-specific mechanism)
645 * - go through A_WAIT_VRISE
646 * - ... to A_WAIT_BCON.
647 * a_wait_vrise_tmout triggers VBUS_ERROR transitions
649 musb_writeb(mbase
, MUSB_DEVCTL
, MUSB_DEVCTL_SESSION
);
650 musb
->ep0_stage
= MUSB_EP0_START
;
651 musb
->xceiv
->otg
->state
= OTG_STATE_A_IDLE
;
653 musb_platform_set_vbus(musb
, 1);
655 handled
= IRQ_HANDLED
;
658 if (int_usb
& MUSB_INTR_VBUSERROR
) {
661 /* During connection as an A-Device, we may see a short
662 * current spikes causing voltage drop, because of cable
663 * and peripheral capacitance combined with vbus draw.
664 * (So: less common with truly self-powered devices, where
665 * vbus doesn't act like a power supply.)
667 * Such spikes are short; usually less than ~500 usec, max
668 * of ~2 msec. That is, they're not sustained overcurrent
669 * errors, though they're reported using VBUSERROR irqs.
671 * Workarounds: (a) hardware: use self powered devices.
672 * (b) software: ignore non-repeated VBUS errors.
674 * REVISIT: do delays from lots of DEBUG_KERNEL checks
675 * make trouble here, keeping VBUS < 4.4V ?
677 switch (musb
->xceiv
->otg
->state
) {
678 case OTG_STATE_A_HOST
:
679 /* recovery is dicey once we've gotten past the
680 * initial stages of enumeration, but if VBUS
681 * stayed ok at the other end of the link, and
682 * another reset is due (at least for high speed,
683 * to redo the chirp etc), it might work OK...
685 case OTG_STATE_A_WAIT_BCON
:
686 case OTG_STATE_A_WAIT_VRISE
:
687 if (musb
->vbuserr_retry
) {
688 void __iomem
*mbase
= musb
->mregs
;
690 musb
->vbuserr_retry
--;
692 devctl
|= MUSB_DEVCTL_SESSION
;
693 musb_writeb(mbase
, MUSB_DEVCTL
, devctl
);
695 musb
->port1_status
|=
696 USB_PORT_STAT_OVERCURRENT
697 | (USB_PORT_STAT_C_OVERCURRENT
<< 16);
704 dev_printk(ignore
? KERN_DEBUG
: KERN_ERR
, musb
->controller
,
705 "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
706 usb_otg_state_string(musb
->xceiv
->otg
->state
),
709 switch (devctl
& MUSB_DEVCTL_VBUS
) {
710 case 0 << MUSB_DEVCTL_VBUS_SHIFT
:
711 s
= "<SessEnd"; break;
712 case 1 << MUSB_DEVCTL_VBUS_SHIFT
:
713 s
= "<AValid"; break;
714 case 2 << MUSB_DEVCTL_VBUS_SHIFT
:
715 s
= "<VBusValid"; break;
716 /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
720 VBUSERR_RETRY_COUNT
- musb
->vbuserr_retry
,
723 /* go through A_WAIT_VFALL then start a new session */
725 musb_platform_set_vbus(musb
, 0);
726 handled
= IRQ_HANDLED
;
729 if (int_usb
& MUSB_INTR_SUSPEND
) {
730 musb_dbg(musb
, "SUSPEND (%s) devctl %02x",
731 usb_otg_state_string(musb
->xceiv
->otg
->state
), devctl
);
732 handled
= IRQ_HANDLED
;
734 switch (musb
->xceiv
->otg
->state
) {
735 case OTG_STATE_A_PERIPHERAL
:
736 /* We also come here if the cable is removed, since
737 * this silicon doesn't report ID-no-longer-grounded.
739 * We depend on T(a_wait_bcon) to shut us down, and
740 * hope users don't do anything dicey during this
741 * undesired detour through A_WAIT_BCON.
744 musb_host_resume_root_hub(musb
);
745 musb_root_disconnect(musb
);
746 musb_platform_try_idle(musb
, jiffies
747 + msecs_to_jiffies(musb
->a_wait_bcon
748 ? : OTG_TIME_A_WAIT_BCON
));
751 case OTG_STATE_B_IDLE
:
752 if (!musb
->is_active
)
754 case OTG_STATE_B_PERIPHERAL
:
755 musb_g_suspend(musb
);
756 musb
->is_active
= musb
->g
.b_hnp_enable
;
757 if (musb
->is_active
) {
758 musb
->xceiv
->otg
->state
= OTG_STATE_B_WAIT_ACON
;
759 musb_dbg(musb
, "HNP: Setting timer for b_ase0_brst");
760 mod_timer(&musb
->otg_timer
, jiffies
762 OTG_TIME_B_ASE0_BRST
));
765 case OTG_STATE_A_WAIT_BCON
:
766 if (musb
->a_wait_bcon
!= 0)
767 musb_platform_try_idle(musb
, jiffies
768 + msecs_to_jiffies(musb
->a_wait_bcon
));
770 case OTG_STATE_A_HOST
:
771 musb
->xceiv
->otg
->state
= OTG_STATE_A_SUSPEND
;
772 musb
->is_active
= musb
->hcd
->self
.b_hnp_enable
;
774 case OTG_STATE_B_HOST
:
775 /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
776 musb_dbg(musb
, "REVISIT: SUSPEND as B_HOST");
779 /* "should not happen" */
785 if (int_usb
& MUSB_INTR_CONNECT
) {
786 struct usb_hcd
*hcd
= musb
->hcd
;
788 handled
= IRQ_HANDLED
;
791 musb
->ep0_stage
= MUSB_EP0_START
;
793 musb
->intrtxe
= musb
->epmask
;
794 musb_writew(musb
->mregs
, MUSB_INTRTXE
, musb
->intrtxe
);
795 musb
->intrrxe
= musb
->epmask
& 0xfffe;
796 musb_writew(musb
->mregs
, MUSB_INTRRXE
, musb
->intrrxe
);
797 musb_writeb(musb
->mregs
, MUSB_INTRUSBE
, 0xf7);
798 musb
->port1_status
&= ~(USB_PORT_STAT_LOW_SPEED
799 |USB_PORT_STAT_HIGH_SPEED
800 |USB_PORT_STAT_ENABLE
802 musb
->port1_status
|= USB_PORT_STAT_CONNECTION
803 |(USB_PORT_STAT_C_CONNECTION
<< 16);
805 /* high vs full speed is just a guess until after reset */
806 if (devctl
& MUSB_DEVCTL_LSDEV
)
807 musb
->port1_status
|= USB_PORT_STAT_LOW_SPEED
;
809 /* indicate new connection to OTG machine */
810 switch (musb
->xceiv
->otg
->state
) {
811 case OTG_STATE_B_PERIPHERAL
:
812 if (int_usb
& MUSB_INTR_SUSPEND
) {
813 musb_dbg(musb
, "HNP: SUSPEND+CONNECT, now b_host");
814 int_usb
&= ~MUSB_INTR_SUSPEND
;
817 musb_dbg(musb
, "CONNECT as b_peripheral???");
819 case OTG_STATE_B_WAIT_ACON
:
820 musb_dbg(musb
, "HNP: CONNECT, now b_host");
822 musb
->xceiv
->otg
->state
= OTG_STATE_B_HOST
;
824 musb
->hcd
->self
.is_b_host
= 1;
825 del_timer(&musb
->otg_timer
);
828 if ((devctl
& MUSB_DEVCTL_VBUS
)
829 == (3 << MUSB_DEVCTL_VBUS_SHIFT
)) {
830 musb
->xceiv
->otg
->state
= OTG_STATE_A_HOST
;
832 hcd
->self
.is_b_host
= 0;
837 musb_host_poke_root_hub(musb
);
839 musb_dbg(musb
, "CONNECT (%s) devctl %02x",
840 usb_otg_state_string(musb
->xceiv
->otg
->state
), devctl
);
843 if (int_usb
& MUSB_INTR_DISCONNECT
) {
844 musb_dbg(musb
, "DISCONNECT (%s) as %s, devctl %02x",
845 usb_otg_state_string(musb
->xceiv
->otg
->state
),
846 MUSB_MODE(musb
), devctl
);
847 handled
= IRQ_HANDLED
;
849 switch (musb
->xceiv
->otg
->state
) {
850 case OTG_STATE_A_HOST
:
851 case OTG_STATE_A_SUSPEND
:
852 musb_host_resume_root_hub(musb
);
853 musb_root_disconnect(musb
);
854 if (musb
->a_wait_bcon
!= 0)
855 musb_platform_try_idle(musb
, jiffies
856 + msecs_to_jiffies(musb
->a_wait_bcon
));
858 case OTG_STATE_B_HOST
:
859 /* REVISIT this behaves for "real disconnect"
860 * cases; make sure the other transitions from
861 * from B_HOST act right too. The B_HOST code
862 * in hnp_stop() is currently not used...
864 musb_root_disconnect(musb
);
866 musb
->hcd
->self
.is_b_host
= 0;
867 musb
->xceiv
->otg
->state
= OTG_STATE_B_PERIPHERAL
;
869 musb_g_disconnect(musb
);
871 case OTG_STATE_A_PERIPHERAL
:
873 musb_root_disconnect(musb
);
875 case OTG_STATE_B_WAIT_ACON
:
877 case OTG_STATE_B_PERIPHERAL
:
878 case OTG_STATE_B_IDLE
:
879 musb_g_disconnect(musb
);
882 WARNING("unhandled DISCONNECT transition (%s)\n",
883 usb_otg_state_string(musb
->xceiv
->otg
->state
));
888 /* mentor saves a bit: bus reset and babble share the same irq.
889 * only host sees babble; only peripheral sees bus reset.
891 if (int_usb
& MUSB_INTR_RESET
) {
892 handled
= IRQ_HANDLED
;
893 if (devctl
& MUSB_DEVCTL_HM
) {
895 * When BABBLE happens what we can depends on which
896 * platform MUSB is running, because some platforms
897 * implemented proprietary means for 'recovering' from
898 * Babble conditions. One such platform is AM335x. In
899 * most cases, however, the only thing we can do is
902 dev_err(musb
->controller
, "Babble\n");
904 if (is_host_active(musb
))
905 musb_recover_from_babble(musb
);
907 musb_dbg(musb
, "BUS RESET as %s",
908 usb_otg_state_string(musb
->xceiv
->otg
->state
));
909 switch (musb
->xceiv
->otg
->state
) {
910 case OTG_STATE_A_SUSPEND
:
913 case OTG_STATE_A_WAIT_BCON
: /* OPT TD.4.7-900ms */
914 /* never use invalid T(a_wait_bcon) */
915 musb_dbg(musb
, "HNP: in %s, %d msec timeout",
916 usb_otg_state_string(musb
->xceiv
->otg
->state
),
918 mod_timer(&musb
->otg_timer
, jiffies
919 + msecs_to_jiffies(TA_WAIT_BCON(musb
)));
921 case OTG_STATE_A_PERIPHERAL
:
922 del_timer(&musb
->otg_timer
);
925 case OTG_STATE_B_WAIT_ACON
:
926 musb_dbg(musb
, "HNP: RESET (%s), to b_peripheral",
927 usb_otg_state_string(musb
->xceiv
->otg
->state
));
928 musb
->xceiv
->otg
->state
= OTG_STATE_B_PERIPHERAL
;
931 case OTG_STATE_B_IDLE
:
932 musb
->xceiv
->otg
->state
= OTG_STATE_B_PERIPHERAL
;
934 case OTG_STATE_B_PERIPHERAL
:
938 musb_dbg(musb
, "Unhandled BUS RESET as %s",
939 usb_otg_state_string(musb
->xceiv
->otg
->state
));
945 /* REVISIT ... this would be for multiplexing periodic endpoints, or
946 * supporting transfer phasing to prevent exceeding ISO bandwidth
947 * limits of a given frame or microframe.
949 * It's not needed for peripheral side, which dedicates endpoints;
950 * though it _might_ use SOF irqs for other purposes.
952 * And it's not currently needed for host side, which also dedicates
953 * endpoints, relies on TX/RX interval registers, and isn't claimed
954 * to support ISO transfers yet.
956 if (int_usb
& MUSB_INTR_SOF
) {
957 void __iomem
*mbase
= musb
->mregs
;
958 struct musb_hw_ep
*ep
;
962 dev_dbg(musb
->controller
, "START_OF_FRAME\n");
963 handled
= IRQ_HANDLED
;
965 /* start any periodic Tx transfers waiting for current frame */
966 frame
= musb_readw(mbase
, MUSB_FRAME
);
967 ep
= musb
->endpoints
;
968 for (epnum
= 1; (epnum
< musb
->nr_endpoints
)
969 && (musb
->epmask
>= (1 << epnum
));
972 * FIXME handle framecounter wraps (12 bits)
973 * eliminate duplicated StartUrb logic
975 if (ep
->dwWaitFrame
>= frame
) {
977 pr_debug("SOF --> periodic TX%s on %d\n",
978 ep
->tx_channel
? " DMA" : "",
981 musb_h_tx_start(musb
, epnum
);
983 cppi_hostdma_start(musb
, epnum
);
985 } /* end of for loop */
989 schedule_work(&musb
->irq_work
);
994 /*-------------------------------------------------------------------------*/
996 static void musb_disable_interrupts(struct musb
*musb
)
998 void __iomem
*mbase
= musb
->mregs
;
1001 /* disable interrupts */
1002 musb_writeb(mbase
, MUSB_INTRUSBE
, 0);
1004 musb_writew(mbase
, MUSB_INTRTXE
, 0);
1006 musb_writew(mbase
, MUSB_INTRRXE
, 0);
1008 /* flush pending interrupts */
1009 temp
= musb_readb(mbase
, MUSB_INTRUSB
);
1010 temp
= musb_readw(mbase
, MUSB_INTRTX
);
1011 temp
= musb_readw(mbase
, MUSB_INTRRX
);
1014 static void musb_enable_interrupts(struct musb
*musb
)
1016 void __iomem
*regs
= musb
->mregs
;
1018 /* Set INT enable registers, enable interrupts */
1019 musb
->intrtxe
= musb
->epmask
;
1020 musb_writew(regs
, MUSB_INTRTXE
, musb
->intrtxe
);
1021 musb
->intrrxe
= musb
->epmask
& 0xfffe;
1022 musb_writew(regs
, MUSB_INTRRXE
, musb
->intrrxe
);
1023 musb_writeb(regs
, MUSB_INTRUSBE
, 0xf7);
1027 static void musb_generic_disable(struct musb
*musb
)
1029 void __iomem
*mbase
= musb
->mregs
;
1031 musb_disable_interrupts(musb
);
1034 musb_writeb(mbase
, MUSB_DEVCTL
, 0);
1038 * Program the HDRC to start (enable interrupts, dma, etc.).
1040 void musb_start(struct musb
*musb
)
1042 void __iomem
*regs
= musb
->mregs
;
1043 u8 devctl
= musb_readb(regs
, MUSB_DEVCTL
);
1046 musb_dbg(musb
, "<== devctl %02x", devctl
);
1048 musb_enable_interrupts(musb
);
1049 musb_writeb(regs
, MUSB_TESTMODE
, 0);
1051 power
= MUSB_POWER_ISOUPDATE
;
1053 * treating UNKNOWN as unspecified maximum speed, in which case
1054 * we will default to high-speed.
1056 if (musb
->config
->maximum_speed
== USB_SPEED_HIGH
||
1057 musb
->config
->maximum_speed
== USB_SPEED_UNKNOWN
)
1058 power
|= MUSB_POWER_HSENAB
;
1059 musb_writeb(regs
, MUSB_POWER
, power
);
1061 musb
->is_active
= 0;
1062 devctl
= musb_readb(regs
, MUSB_DEVCTL
);
1063 devctl
&= ~MUSB_DEVCTL_SESSION
;
1065 /* session started after:
1066 * (a) ID-grounded irq, host mode;
1067 * (b) vbus present/connect IRQ, peripheral mode;
1068 * (c) peripheral initiates, using SRP
1070 if (musb
->port_mode
!= MUSB_PORT_MODE_HOST
&&
1071 musb
->xceiv
->otg
->state
!= OTG_STATE_A_WAIT_BCON
&&
1072 (devctl
& MUSB_DEVCTL_VBUS
) == MUSB_DEVCTL_VBUS
) {
1073 musb
->is_active
= 1;
1075 devctl
|= MUSB_DEVCTL_SESSION
;
1078 musb_platform_enable(musb
);
1079 musb_writeb(regs
, MUSB_DEVCTL
, devctl
);
1083 * Make the HDRC stop (disable interrupts, etc.);
1084 * reversible by musb_start
1085 * called on gadget driver unregister
1086 * with controller locked, irqs blocked
1087 * acts as a NOP unless some role activated the hardware
1089 void musb_stop(struct musb
*musb
)
1091 /* stop IRQs, timers, ... */
1092 musb_platform_disable(musb
);
1093 musb_generic_disable(musb
);
1094 musb_dbg(musb
, "HDRC disabled");
1097 * - mark host and/or peripheral drivers unusable/inactive
1098 * - disable DMA (and enable it in HdrcStart)
1099 * - make sure we can musb_start() after musb_stop(); with
1100 * OTG mode, gadget driver module rmmod/modprobe cycles that
1103 musb_platform_try_idle(musb
, 0);
1106 /*-------------------------------------------------------------------------*/
1109 * The silicon either has hard-wired endpoint configurations, or else
1110 * "dynamic fifo" sizing. The driver has support for both, though at this
1111 * writing only the dynamic sizing is very well tested. Since we switched
1112 * away from compile-time hardware parameters, we can no longer rely on
1113 * dead code elimination to leave only the relevant one in the object file.
1115 * We don't currently use dynamic fifo setup capability to do anything
1116 * more than selecting one of a bunch of predefined configurations.
1118 static ushort fifo_mode
;
1120 /* "modprobe ... fifo_mode=1" etc */
1121 module_param(fifo_mode
, ushort
, 0);
1122 MODULE_PARM_DESC(fifo_mode
, "initial endpoint configuration");
1125 * tables defining fifo_mode values. define more if you like.
1126 * for host side, make sure both halves of ep1 are set up.
1129 /* mode 0 - fits in 2KB */
1130 static struct musb_fifo_cfg mode_0_cfg
[] = {
1131 { .hw_ep_num
= 1, .style
= FIFO_TX
, .maxpacket
= 512, },
1132 { .hw_ep_num
= 1, .style
= FIFO_RX
, .maxpacket
= 512, },
1133 { .hw_ep_num
= 2, .style
= FIFO_RXTX
, .maxpacket
= 512, },
1134 { .hw_ep_num
= 3, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1135 { .hw_ep_num
= 4, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1138 /* mode 1 - fits in 4KB */
1139 static struct musb_fifo_cfg mode_1_cfg
[] = {
1140 { .hw_ep_num
= 1, .style
= FIFO_TX
, .maxpacket
= 512, .mode
= BUF_DOUBLE
, },
1141 { .hw_ep_num
= 1, .style
= FIFO_RX
, .maxpacket
= 512, .mode
= BUF_DOUBLE
, },
1142 { .hw_ep_num
= 2, .style
= FIFO_RXTX
, .maxpacket
= 512, .mode
= BUF_DOUBLE
, },
1143 { .hw_ep_num
= 3, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1144 { .hw_ep_num
= 4, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1147 /* mode 2 - fits in 4KB */
1148 static struct musb_fifo_cfg mode_2_cfg
[] = {
1149 { .hw_ep_num
= 1, .style
= FIFO_TX
, .maxpacket
= 512, },
1150 { .hw_ep_num
= 1, .style
= FIFO_RX
, .maxpacket
= 512, },
1151 { .hw_ep_num
= 2, .style
= FIFO_TX
, .maxpacket
= 512, },
1152 { .hw_ep_num
= 2, .style
= FIFO_RX
, .maxpacket
= 512, },
1153 { .hw_ep_num
= 3, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1154 { .hw_ep_num
= 4, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1157 /* mode 3 - fits in 4KB */
1158 static struct musb_fifo_cfg mode_3_cfg
[] = {
1159 { .hw_ep_num
= 1, .style
= FIFO_TX
, .maxpacket
= 512, .mode
= BUF_DOUBLE
, },
1160 { .hw_ep_num
= 1, .style
= FIFO_RX
, .maxpacket
= 512, .mode
= BUF_DOUBLE
, },
1161 { .hw_ep_num
= 2, .style
= FIFO_TX
, .maxpacket
= 512, },
1162 { .hw_ep_num
= 2, .style
= FIFO_RX
, .maxpacket
= 512, },
1163 { .hw_ep_num
= 3, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1164 { .hw_ep_num
= 4, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1167 /* mode 4 - fits in 16KB */
1168 static struct musb_fifo_cfg mode_4_cfg
[] = {
1169 { .hw_ep_num
= 1, .style
= FIFO_TX
, .maxpacket
= 512, },
1170 { .hw_ep_num
= 1, .style
= FIFO_RX
, .maxpacket
= 512, },
1171 { .hw_ep_num
= 2, .style
= FIFO_TX
, .maxpacket
= 512, },
1172 { .hw_ep_num
= 2, .style
= FIFO_RX
, .maxpacket
= 512, },
1173 { .hw_ep_num
= 3, .style
= FIFO_TX
, .maxpacket
= 512, },
1174 { .hw_ep_num
= 3, .style
= FIFO_RX
, .maxpacket
= 512, },
1175 { .hw_ep_num
= 4, .style
= FIFO_TX
, .maxpacket
= 512, },
1176 { .hw_ep_num
= 4, .style
= FIFO_RX
, .maxpacket
= 512, },
1177 { .hw_ep_num
= 5, .style
= FIFO_TX
, .maxpacket
= 512, },
1178 { .hw_ep_num
= 5, .style
= FIFO_RX
, .maxpacket
= 512, },
1179 { .hw_ep_num
= 6, .style
= FIFO_TX
, .maxpacket
= 512, },
1180 { .hw_ep_num
= 6, .style
= FIFO_RX
, .maxpacket
= 512, },
1181 { .hw_ep_num
= 7, .style
= FIFO_TX
, .maxpacket
= 512, },
1182 { .hw_ep_num
= 7, .style
= FIFO_RX
, .maxpacket
= 512, },
1183 { .hw_ep_num
= 8, .style
= FIFO_TX
, .maxpacket
= 512, },
1184 { .hw_ep_num
= 8, .style
= FIFO_RX
, .maxpacket
= 512, },
1185 { .hw_ep_num
= 9, .style
= FIFO_TX
, .maxpacket
= 512, },
1186 { .hw_ep_num
= 9, .style
= FIFO_RX
, .maxpacket
= 512, },
1187 { .hw_ep_num
= 10, .style
= FIFO_TX
, .maxpacket
= 256, },
1188 { .hw_ep_num
= 10, .style
= FIFO_RX
, .maxpacket
= 64, },
1189 { .hw_ep_num
= 11, .style
= FIFO_TX
, .maxpacket
= 256, },
1190 { .hw_ep_num
= 11, .style
= FIFO_RX
, .maxpacket
= 64, },
1191 { .hw_ep_num
= 12, .style
= FIFO_TX
, .maxpacket
= 256, },
1192 { .hw_ep_num
= 12, .style
= FIFO_RX
, .maxpacket
= 64, },
1193 { .hw_ep_num
= 13, .style
= FIFO_RXTX
, .maxpacket
= 4096, },
1194 { .hw_ep_num
= 14, .style
= FIFO_RXTX
, .maxpacket
= 1024, },
1195 { .hw_ep_num
= 15, .style
= FIFO_RXTX
, .maxpacket
= 1024, },
1198 /* mode 5 - fits in 8KB */
1199 static struct musb_fifo_cfg mode_5_cfg
[] = {
1200 { .hw_ep_num
= 1, .style
= FIFO_TX
, .maxpacket
= 512, },
1201 { .hw_ep_num
= 1, .style
= FIFO_RX
, .maxpacket
= 512, },
1202 { .hw_ep_num
= 2, .style
= FIFO_TX
, .maxpacket
= 512, },
1203 { .hw_ep_num
= 2, .style
= FIFO_RX
, .maxpacket
= 512, },
1204 { .hw_ep_num
= 3, .style
= FIFO_TX
, .maxpacket
= 512, },
1205 { .hw_ep_num
= 3, .style
= FIFO_RX
, .maxpacket
= 512, },
1206 { .hw_ep_num
= 4, .style
= FIFO_TX
, .maxpacket
= 512, },
1207 { .hw_ep_num
= 4, .style
= FIFO_RX
, .maxpacket
= 512, },
1208 { .hw_ep_num
= 5, .style
= FIFO_TX
, .maxpacket
= 512, },
1209 { .hw_ep_num
= 5, .style
= FIFO_RX
, .maxpacket
= 512, },
1210 { .hw_ep_num
= 6, .style
= FIFO_TX
, .maxpacket
= 32, },
1211 { .hw_ep_num
= 6, .style
= FIFO_RX
, .maxpacket
= 32, },
1212 { .hw_ep_num
= 7, .style
= FIFO_TX
, .maxpacket
= 32, },
1213 { .hw_ep_num
= 7, .style
= FIFO_RX
, .maxpacket
= 32, },
1214 { .hw_ep_num
= 8, .style
= FIFO_TX
, .maxpacket
= 32, },
1215 { .hw_ep_num
= 8, .style
= FIFO_RX
, .maxpacket
= 32, },
1216 { .hw_ep_num
= 9, .style
= FIFO_TX
, .maxpacket
= 32, },
1217 { .hw_ep_num
= 9, .style
= FIFO_RX
, .maxpacket
= 32, },
1218 { .hw_ep_num
= 10, .style
= FIFO_TX
, .maxpacket
= 32, },
1219 { .hw_ep_num
= 10, .style
= FIFO_RX
, .maxpacket
= 32, },
1220 { .hw_ep_num
= 11, .style
= FIFO_TX
, .maxpacket
= 32, },
1221 { .hw_ep_num
= 11, .style
= FIFO_RX
, .maxpacket
= 32, },
1222 { .hw_ep_num
= 12, .style
= FIFO_TX
, .maxpacket
= 32, },
1223 { .hw_ep_num
= 12, .style
= FIFO_RX
, .maxpacket
= 32, },
1224 { .hw_ep_num
= 13, .style
= FIFO_RXTX
, .maxpacket
= 512, },
1225 { .hw_ep_num
= 14, .style
= FIFO_RXTX
, .maxpacket
= 1024, },
1226 { .hw_ep_num
= 15, .style
= FIFO_RXTX
, .maxpacket
= 1024, },
1230 * configure a fifo; for non-shared endpoints, this may be called
1231 * once for a tx fifo and once for an rx fifo.
1233 * returns negative errno or offset for next fifo.
1236 fifo_setup(struct musb
*musb
, struct musb_hw_ep
*hw_ep
,
1237 const struct musb_fifo_cfg
*cfg
, u16 offset
)
1239 void __iomem
*mbase
= musb
->mregs
;
1241 u16 maxpacket
= cfg
->maxpacket
;
1242 u16 c_off
= offset
>> 3;
1245 /* expect hw_ep has already been zero-initialized */
1247 size
= ffs(max(maxpacket
, (u16
) 8)) - 1;
1248 maxpacket
= 1 << size
;
1251 if (cfg
->mode
== BUF_DOUBLE
) {
1252 if ((offset
+ (maxpacket
<< 1)) >
1253 (1 << (musb
->config
->ram_bits
+ 2)))
1255 c_size
|= MUSB_FIFOSZ_DPB
;
1257 if ((offset
+ maxpacket
) > (1 << (musb
->config
->ram_bits
+ 2)))
1261 /* configure the FIFO */
1262 musb_writeb(mbase
, MUSB_INDEX
, hw_ep
->epnum
);
1264 /* EP0 reserved endpoint for control, bidirectional;
1265 * EP1 reserved for bulk, two unidirectional halves.
1267 if (hw_ep
->epnum
== 1)
1268 musb
->bulk_ep
= hw_ep
;
1269 /* REVISIT error check: be sure ep0 can both rx and tx ... */
1270 switch (cfg
->style
) {
1272 musb_write_txfifosz(mbase
, c_size
);
1273 musb_write_txfifoadd(mbase
, c_off
);
1274 hw_ep
->tx_double_buffered
= !!(c_size
& MUSB_FIFOSZ_DPB
);
1275 hw_ep
->max_packet_sz_tx
= maxpacket
;
1278 musb_write_rxfifosz(mbase
, c_size
);
1279 musb_write_rxfifoadd(mbase
, c_off
);
1280 hw_ep
->rx_double_buffered
= !!(c_size
& MUSB_FIFOSZ_DPB
);
1281 hw_ep
->max_packet_sz_rx
= maxpacket
;
1284 musb_write_txfifosz(mbase
, c_size
);
1285 musb_write_txfifoadd(mbase
, c_off
);
1286 hw_ep
->rx_double_buffered
= !!(c_size
& MUSB_FIFOSZ_DPB
);
1287 hw_ep
->max_packet_sz_rx
= maxpacket
;
1289 musb_write_rxfifosz(mbase
, c_size
);
1290 musb_write_rxfifoadd(mbase
, c_off
);
1291 hw_ep
->tx_double_buffered
= hw_ep
->rx_double_buffered
;
1292 hw_ep
->max_packet_sz_tx
= maxpacket
;
1294 hw_ep
->is_shared_fifo
= true;
1298 /* NOTE rx and tx endpoint irqs aren't managed separately,
1299 * which happens to be ok
1301 musb
->epmask
|= (1 << hw_ep
->epnum
);
1303 return offset
+ (maxpacket
<< ((c_size
& MUSB_FIFOSZ_DPB
) ? 1 : 0));
1306 static struct musb_fifo_cfg ep0_cfg
= {
1307 .style
= FIFO_RXTX
, .maxpacket
= 64,
1310 static int ep_config_from_table(struct musb
*musb
)
1312 const struct musb_fifo_cfg
*cfg
;
1315 struct musb_hw_ep
*hw_ep
= musb
->endpoints
;
1317 if (musb
->config
->fifo_cfg
) {
1318 cfg
= musb
->config
->fifo_cfg
;
1319 n
= musb
->config
->fifo_cfg_size
;
1323 switch (fifo_mode
) {
1329 n
= ARRAY_SIZE(mode_0_cfg
);
1333 n
= ARRAY_SIZE(mode_1_cfg
);
1337 n
= ARRAY_SIZE(mode_2_cfg
);
1341 n
= ARRAY_SIZE(mode_3_cfg
);
1345 n
= ARRAY_SIZE(mode_4_cfg
);
1349 n
= ARRAY_SIZE(mode_5_cfg
);
1353 pr_debug("%s: setup fifo_mode %d\n", musb_driver_name
, fifo_mode
);
1357 offset
= fifo_setup(musb
, hw_ep
, &ep0_cfg
, 0);
1358 /* assert(offset > 0) */
1360 /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would
1361 * be better than static musb->config->num_eps and DYN_FIFO_SIZE...
1364 for (i
= 0; i
< n
; i
++) {
1365 u8 epn
= cfg
->hw_ep_num
;
1367 if (epn
>= musb
->config
->num_eps
) {
1368 pr_debug("%s: invalid ep %d\n",
1369 musb_driver_name
, epn
);
1372 offset
= fifo_setup(musb
, hw_ep
+ epn
, cfg
++, offset
);
1374 pr_debug("%s: mem overrun, ep %d\n",
1375 musb_driver_name
, epn
);
1379 musb
->nr_endpoints
= max(epn
, musb
->nr_endpoints
);
1382 pr_debug("%s: %d/%d max ep, %d/%d memory\n",
1384 n
+ 1, musb
->config
->num_eps
* 2 - 1,
1385 offset
, (1 << (musb
->config
->ram_bits
+ 2)));
1387 if (!musb
->bulk_ep
) {
1388 pr_debug("%s: missing bulk\n", musb_driver_name
);
1397 * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
1398 * @param musb the controller
1400 static int ep_config_from_hw(struct musb
*musb
)
1403 struct musb_hw_ep
*hw_ep
;
1404 void __iomem
*mbase
= musb
->mregs
;
1407 musb_dbg(musb
, "<== static silicon ep config");
1409 /* FIXME pick up ep0 maxpacket size */
1411 for (epnum
= 1; epnum
< musb
->config
->num_eps
; epnum
++) {
1412 musb_ep_select(mbase
, epnum
);
1413 hw_ep
= musb
->endpoints
+ epnum
;
1415 ret
= musb_read_fifosize(musb
, hw_ep
, epnum
);
1419 /* FIXME set up hw_ep->{rx,tx}_double_buffered */
1421 /* pick an RX/TX endpoint for bulk */
1422 if (hw_ep
->max_packet_sz_tx
< 512
1423 || hw_ep
->max_packet_sz_rx
< 512)
1426 /* REVISIT: this algorithm is lazy, we should at least
1427 * try to pick a double buffered endpoint.
1431 musb
->bulk_ep
= hw_ep
;
1434 if (!musb
->bulk_ep
) {
1435 pr_debug("%s: missing bulk\n", musb_driver_name
);
1442 enum { MUSB_CONTROLLER_MHDRC
, MUSB_CONTROLLER_HDRC
, };
1444 /* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
1445 * configure endpoints, or take their config from silicon
1447 static int musb_core_init(u16 musb_type
, struct musb
*musb
)
1451 char aInfo
[90], aRevision
[32], aDate
[12];
1452 void __iomem
*mbase
= musb
->mregs
;
1456 /* log core options (read using indexed model) */
1457 reg
= musb_read_configdata(mbase
);
1459 strcpy(aInfo
, (reg
& MUSB_CONFIGDATA_UTMIDW
) ? "UTMI-16" : "UTMI-8");
1460 if (reg
& MUSB_CONFIGDATA_DYNFIFO
) {
1461 strcat(aInfo
, ", dyn FIFOs");
1462 musb
->dyn_fifo
= true;
1464 if (reg
& MUSB_CONFIGDATA_MPRXE
) {
1465 strcat(aInfo
, ", bulk combine");
1466 musb
->bulk_combine
= true;
1468 if (reg
& MUSB_CONFIGDATA_MPTXE
) {
1469 strcat(aInfo
, ", bulk split");
1470 musb
->bulk_split
= true;
1472 if (reg
& MUSB_CONFIGDATA_HBRXE
) {
1473 strcat(aInfo
, ", HB-ISO Rx");
1474 musb
->hb_iso_rx
= true;
1476 if (reg
& MUSB_CONFIGDATA_HBTXE
) {
1477 strcat(aInfo
, ", HB-ISO Tx");
1478 musb
->hb_iso_tx
= true;
1480 if (reg
& MUSB_CONFIGDATA_SOFTCONE
)
1481 strcat(aInfo
, ", SoftConn");
1483 pr_debug("%s: ConfigData=0x%02x (%s)\n", musb_driver_name
, reg
, aInfo
);
1486 if (MUSB_CONTROLLER_MHDRC
== musb_type
) {
1487 musb
->is_multipoint
= 1;
1490 musb
->is_multipoint
= 0;
1492 #ifndef CONFIG_USB_OTG_BLACKLIST_HUB
1493 pr_err("%s: kernel must blacklist external hubs\n",
1498 /* log release info */
1499 musb
->hwvers
= musb_read_hwvers(mbase
);
1500 snprintf(aRevision
, 32, "%d.%d%s", MUSB_HWVERS_MAJOR(musb
->hwvers
),
1501 MUSB_HWVERS_MINOR(musb
->hwvers
),
1502 (musb
->hwvers
& MUSB_HWVERS_RC
) ? "RC" : "");
1503 pr_debug("%s: %sHDRC RTL version %s %s\n",
1504 musb_driver_name
, type
, aRevision
, aDate
);
1507 musb_configure_ep0(musb
);
1509 /* discover endpoint configuration */
1510 musb
->nr_endpoints
= 1;
1514 status
= ep_config_from_table(musb
);
1516 status
= ep_config_from_hw(musb
);
1521 /* finish init, and print endpoint config */
1522 for (i
= 0; i
< musb
->nr_endpoints
; i
++) {
1523 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ i
;
1525 hw_ep
->fifo
= musb
->io
.fifo_offset(i
) + mbase
;
1526 #if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
1527 if (musb
->io
.quirks
& MUSB_IN_TUSB
) {
1528 hw_ep
->fifo_async
= musb
->async
+ 0x400 +
1529 musb
->io
.fifo_offset(i
);
1530 hw_ep
->fifo_sync
= musb
->sync
+ 0x400 +
1531 musb
->io
.fifo_offset(i
);
1532 hw_ep
->fifo_sync_va
=
1533 musb
->sync_va
+ 0x400 + musb
->io
.fifo_offset(i
);
1536 hw_ep
->conf
= mbase
- 0x400 + TUSB_EP0_CONF
;
1538 hw_ep
->conf
= mbase
+ 0x400 +
1539 (((i
- 1) & 0xf) << 2);
1543 hw_ep
->regs
= musb
->io
.ep_offset(i
, 0) + mbase
;
1544 hw_ep
->rx_reinit
= 1;
1545 hw_ep
->tx_reinit
= 1;
1547 if (hw_ep
->max_packet_sz_tx
) {
1548 musb_dbg(musb
, "%s: hw_ep %d%s, %smax %d",
1549 musb_driver_name
, i
,
1550 hw_ep
->is_shared_fifo
? "shared" : "tx",
1551 hw_ep
->tx_double_buffered
1552 ? "doublebuffer, " : "",
1553 hw_ep
->max_packet_sz_tx
);
1555 if (hw_ep
->max_packet_sz_rx
&& !hw_ep
->is_shared_fifo
) {
1556 musb_dbg(musb
, "%s: hw_ep %d%s, %smax %d",
1557 musb_driver_name
, i
,
1559 hw_ep
->rx_double_buffered
1560 ? "doublebuffer, " : "",
1561 hw_ep
->max_packet_sz_rx
);
1563 if (!(hw_ep
->max_packet_sz_tx
|| hw_ep
->max_packet_sz_rx
))
1564 musb_dbg(musb
, "hw_ep %d not configured", i
);
1570 /*-------------------------------------------------------------------------*/
1573 * handle all the irqs defined by the HDRC core. for now we expect: other
1574 * irq sources (phy, dma, etc) will be handled first, musb->int_* values
1575 * will be assigned, and the irq will already have been acked.
1577 * called in irq context with spinlock held, irqs blocked
1579 irqreturn_t
musb_interrupt(struct musb
*musb
)
1581 irqreturn_t retval
= IRQ_NONE
;
1582 unsigned long status
;
1583 unsigned long epnum
;
1586 if (!musb
->int_usb
&& !musb
->int_tx
&& !musb
->int_rx
)
1589 devctl
= musb_readb(musb
->mregs
, MUSB_DEVCTL
);
1591 trace_musb_isr(musb
);
1594 * According to Mentor Graphics' documentation, flowchart on page 98,
1595 * IRQ should be handled as follows:
1598 * . Session Request IRQ
1603 * . Reset/Babble IRQ
1604 * . SOF IRQ (we're not using this one)
1609 * We will be following that flowchart in order to avoid any problems
1610 * that might arise with internal Finite State Machine.
1614 retval
|= musb_stage0_irq(musb
, musb
->int_usb
, devctl
);
1616 if (musb
->int_tx
& 1) {
1617 if (is_host_active(musb
))
1618 retval
|= musb_h_ep0_irq(musb
);
1620 retval
|= musb_g_ep0_irq(musb
);
1622 /* we have just handled endpoint 0 IRQ, clear it */
1623 musb
->int_tx
&= ~BIT(0);
1626 status
= musb
->int_tx
;
1628 for_each_set_bit(epnum
, &status
, 16) {
1629 retval
= IRQ_HANDLED
;
1630 if (is_host_active(musb
))
1631 musb_host_tx(musb
, epnum
);
1633 musb_g_tx(musb
, epnum
);
1636 status
= musb
->int_rx
;
1638 for_each_set_bit(epnum
, &status
, 16) {
1639 retval
= IRQ_HANDLED
;
1640 if (is_host_active(musb
))
1641 musb_host_rx(musb
, epnum
);
1643 musb_g_rx(musb
, epnum
);
1648 EXPORT_SYMBOL_GPL(musb_interrupt
);
1650 #ifndef CONFIG_MUSB_PIO_ONLY
1651 static bool use_dma
= 1;
1653 /* "modprobe ... use_dma=0" etc */
1654 module_param(use_dma
, bool, 0644);
1655 MODULE_PARM_DESC(use_dma
, "enable/disable use of DMA");
1657 void musb_dma_completion(struct musb
*musb
, u8 epnum
, u8 transmit
)
1659 /* called with controller lock already held */
1662 if (!is_cppi_enabled(musb
)) {
1664 if (is_host_active(musb
))
1665 musb_h_ep0_irq(musb
);
1667 musb_g_ep0_irq(musb
);
1670 /* endpoints 1..15 */
1672 if (is_host_active(musb
))
1673 musb_host_tx(musb
, epnum
);
1675 musb_g_tx(musb
, epnum
);
1678 if (is_host_active(musb
))
1679 musb_host_rx(musb
, epnum
);
1681 musb_g_rx(musb
, epnum
);
1685 EXPORT_SYMBOL_GPL(musb_dma_completion
);
1691 static int (*musb_phy_callback
)(enum musb_vbus_id_status status
);
1694 * musb_mailbox - optional phy notifier function
1695 * @status phy state change
1697 * Optionally gets called from the USB PHY. Note that the USB PHY must be
1698 * disabled at the point the phy_callback is registered or unregistered.
1700 int musb_mailbox(enum musb_vbus_id_status status
)
1702 if (musb_phy_callback
)
1703 return musb_phy_callback(status
);
1707 EXPORT_SYMBOL_GPL(musb_mailbox
);
1709 /*-------------------------------------------------------------------------*/
1712 musb_mode_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1714 struct musb
*musb
= dev_to_musb(dev
);
1715 unsigned long flags
;
1718 spin_lock_irqsave(&musb
->lock
, flags
);
1719 ret
= sprintf(buf
, "%s\n", usb_otg_state_string(musb
->xceiv
->otg
->state
));
1720 spin_unlock_irqrestore(&musb
->lock
, flags
);
1726 musb_mode_store(struct device
*dev
, struct device_attribute
*attr
,
1727 const char *buf
, size_t n
)
1729 struct musb
*musb
= dev_to_musb(dev
);
1730 unsigned long flags
;
1733 spin_lock_irqsave(&musb
->lock
, flags
);
1734 if (sysfs_streq(buf
, "host"))
1735 status
= musb_platform_set_mode(musb
, MUSB_HOST
);
1736 else if (sysfs_streq(buf
, "peripheral"))
1737 status
= musb_platform_set_mode(musb
, MUSB_PERIPHERAL
);
1738 else if (sysfs_streq(buf
, "otg"))
1739 status
= musb_platform_set_mode(musb
, MUSB_OTG
);
1742 spin_unlock_irqrestore(&musb
->lock
, flags
);
1744 return (status
== 0) ? n
: status
;
1746 static DEVICE_ATTR(mode
, 0644, musb_mode_show
, musb_mode_store
);
1749 musb_vbus_store(struct device
*dev
, struct device_attribute
*attr
,
1750 const char *buf
, size_t n
)
1752 struct musb
*musb
= dev_to_musb(dev
);
1753 unsigned long flags
;
1756 if (sscanf(buf
, "%lu", &val
) < 1) {
1757 dev_err(dev
, "Invalid VBUS timeout ms value\n");
1761 spin_lock_irqsave(&musb
->lock
, flags
);
1762 /* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
1763 musb
->a_wait_bcon
= val
? max_t(int, val
, OTG_TIME_A_WAIT_BCON
) : 0 ;
1764 if (musb
->xceiv
->otg
->state
== OTG_STATE_A_WAIT_BCON
)
1765 musb
->is_active
= 0;
1766 musb_platform_try_idle(musb
, jiffies
+ msecs_to_jiffies(val
));
1767 spin_unlock_irqrestore(&musb
->lock
, flags
);
1773 musb_vbus_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1775 struct musb
*musb
= dev_to_musb(dev
);
1776 unsigned long flags
;
1781 spin_lock_irqsave(&musb
->lock
, flags
);
1782 val
= musb
->a_wait_bcon
;
1783 vbus
= musb_platform_get_vbus_status(musb
);
1785 /* Use default MUSB method by means of DEVCTL register */
1786 devctl
= musb_readb(musb
->mregs
, MUSB_DEVCTL
);
1787 if ((devctl
& MUSB_DEVCTL_VBUS
)
1788 == (3 << MUSB_DEVCTL_VBUS_SHIFT
))
1793 spin_unlock_irqrestore(&musb
->lock
, flags
);
1795 return sprintf(buf
, "Vbus %s, timeout %lu msec\n",
1796 vbus
? "on" : "off", val
);
1798 static DEVICE_ATTR(vbus
, 0644, musb_vbus_show
, musb_vbus_store
);
1800 /* Gadget drivers can't know that a host is connected so they might want
1801 * to start SRP, but users can. This allows userspace to trigger SRP.
1804 musb_srp_store(struct device
*dev
, struct device_attribute
*attr
,
1805 const char *buf
, size_t n
)
1807 struct musb
*musb
= dev_to_musb(dev
);
1810 if (sscanf(buf
, "%hu", &srp
) != 1
1812 dev_err(dev
, "SRP: Value must be 1\n");
1817 musb_g_wakeup(musb
);
1821 static DEVICE_ATTR(srp
, 0644, NULL
, musb_srp_store
);
1823 static struct attribute
*musb_attributes
[] = {
1824 &dev_attr_mode
.attr
,
1825 &dev_attr_vbus
.attr
,
1830 static const struct attribute_group musb_attr_group
= {
1831 .attrs
= musb_attributes
,
1834 /* Only used to provide driver mode change events */
1835 static void musb_irq_work(struct work_struct
*data
)
1837 struct musb
*musb
= container_of(data
, struct musb
, irq_work
);
1839 if (musb
->xceiv
->otg
->state
!= musb
->xceiv_old_state
) {
1840 musb
->xceiv_old_state
= musb
->xceiv
->otg
->state
;
1841 sysfs_notify(&musb
->controller
->kobj
, NULL
, "mode");
1845 static void musb_recover_from_babble(struct musb
*musb
)
1850 musb_disable_interrupts(musb
);
1853 * wait at least 320 cycles of 60MHz clock. That's 5.3us, we will give
1854 * it some slack and wait for 10us.
1858 ret
= musb_platform_recover(musb
);
1860 musb_enable_interrupts(musb
);
1864 /* drop session bit */
1865 devctl
= musb_readb(musb
->mregs
, MUSB_DEVCTL
);
1866 devctl
&= ~MUSB_DEVCTL_SESSION
;
1867 musb_writeb(musb
->mregs
, MUSB_DEVCTL
, devctl
);
1869 /* tell usbcore about it */
1870 musb_root_disconnect(musb
);
1873 * When a babble condition occurs, the musb controller
1874 * removes the session bit and the endpoint config is lost.
1877 ret
= ep_config_from_table(musb
);
1879 ret
= ep_config_from_hw(musb
);
1881 /* restart session */
1886 /* --------------------------------------------------------------------------
1890 static struct musb
*allocate_instance(struct device
*dev
,
1891 const struct musb_hdrc_config
*config
, void __iomem
*mbase
)
1894 struct musb_hw_ep
*ep
;
1898 musb
= devm_kzalloc(dev
, sizeof(*musb
), GFP_KERNEL
);
1902 INIT_LIST_HEAD(&musb
->control
);
1903 INIT_LIST_HEAD(&musb
->in_bulk
);
1904 INIT_LIST_HEAD(&musb
->out_bulk
);
1906 musb
->vbuserr_retry
= VBUSERR_RETRY_COUNT
;
1907 musb
->a_wait_bcon
= OTG_TIME_A_WAIT_BCON
;
1908 musb
->mregs
= mbase
;
1909 musb
->ctrl_base
= mbase
;
1910 musb
->nIrq
= -ENODEV
;
1911 musb
->config
= config
;
1912 BUG_ON(musb
->config
->num_eps
> MUSB_C_NUM_EPS
);
1913 for (epnum
= 0, ep
= musb
->endpoints
;
1914 epnum
< musb
->config
->num_eps
;
1920 musb
->controller
= dev
;
1922 ret
= musb_host_alloc(musb
);
1926 dev_set_drvdata(dev
, musb
);
1934 static void musb_free(struct musb
*musb
)
1936 /* this has multiple entry modes. it handles fault cleanup after
1937 * probe(), where things may be partially set up, as well as rmmod
1938 * cleanup after everything's been de-activated.
1942 sysfs_remove_group(&musb
->controller
->kobj
, &musb_attr_group
);
1945 if (musb
->nIrq
>= 0) {
1947 disable_irq_wake(musb
->nIrq
);
1948 free_irq(musb
->nIrq
, musb
);
1951 musb_host_free(musb
);
1954 static void musb_deassert_reset(struct work_struct
*work
)
1957 unsigned long flags
;
1959 musb
= container_of(work
, struct musb
, deassert_reset_work
.work
);
1961 spin_lock_irqsave(&musb
->lock
, flags
);
1963 if (musb
->port1_status
& USB_PORT_STAT_RESET
)
1964 musb_port_reset(musb
, false);
1966 spin_unlock_irqrestore(&musb
->lock
, flags
);
1970 * Perform generic per-controller initialization.
1972 * @dev: the controller (already clocked, etc)
1974 * @ctrl: virtual address of controller registers,
1975 * not yet corrected for platform-specific offsets
1978 musb_init_controller(struct device
*dev
, int nIrq
, void __iomem
*ctrl
)
1982 struct musb_hdrc_platform_data
*plat
= dev_get_platdata(dev
);
1984 /* The driver might handle more features than the board; OK.
1985 * Fail when the board needs a feature that's not enabled.
1988 dev_err(dev
, "no platform_data?\n");
1994 musb
= allocate_instance(dev
, plat
->config
, ctrl
);
2000 spin_lock_init(&musb
->lock
);
2001 musb
->board_set_power
= plat
->set_power
;
2002 musb
->min_power
= plat
->min_power
;
2003 musb
->ops
= plat
->platform_ops
;
2004 musb
->port_mode
= plat
->mode
;
2007 * Initialize the default IO functions. At least omap2430 needs
2008 * these early. We initialize the platform specific IO functions
2011 musb_readb
= musb_default_readb
;
2012 musb_writeb
= musb_default_writeb
;
2013 musb_readw
= musb_default_readw
;
2014 musb_writew
= musb_default_writew
;
2015 musb_readl
= musb_default_readl
;
2016 musb_writel
= musb_default_writel
;
2018 /* The musb_platform_init() call:
2019 * - adjusts musb->mregs
2020 * - sets the musb->isr
2021 * - may initialize an integrated transceiver
2022 * - initializes musb->xceiv, usually by otg_get_phy()
2023 * - stops powering VBUS
2025 * There are various transceiver configurations. Blackfin,
2026 * DaVinci, TUSB60x0, and others integrate them. OMAP3 uses
2027 * external/discrete ones in various flavors (twl4030 family,
2028 * isp1504, non-OTG, etc) mostly hooking up through ULPI.
2030 status
= musb_platform_init(musb
);
2039 if (musb
->ops
->quirks
)
2040 musb
->io
.quirks
= musb
->ops
->quirks
;
2042 /* Most devices use indexed offset or flat offset */
2043 if (musb
->io
.quirks
& MUSB_INDEXED_EP
) {
2044 musb
->io
.ep_offset
= musb_indexed_ep_offset
;
2045 musb
->io
.ep_select
= musb_indexed_ep_select
;
2047 musb
->io
.ep_offset
= musb_flat_ep_offset
;
2048 musb
->io
.ep_select
= musb_flat_ep_select
;
2050 /* And override them with platform specific ops if specified. */
2051 if (musb
->ops
->ep_offset
)
2052 musb
->io
.ep_offset
= musb
->ops
->ep_offset
;
2053 if (musb
->ops
->ep_select
)
2054 musb
->io
.ep_select
= musb
->ops
->ep_select
;
2056 /* At least tusb6010 has its own offsets */
2057 if (musb
->ops
->ep_offset
)
2058 musb
->io
.ep_offset
= musb
->ops
->ep_offset
;
2059 if (musb
->ops
->ep_select
)
2060 musb
->io
.ep_select
= musb
->ops
->ep_select
;
2062 if (musb
->ops
->fifo_mode
)
2063 fifo_mode
= musb
->ops
->fifo_mode
;
2067 if (musb
->ops
->fifo_offset
)
2068 musb
->io
.fifo_offset
= musb
->ops
->fifo_offset
;
2070 musb
->io
.fifo_offset
= musb_default_fifo_offset
;
2072 if (musb
->ops
->busctl_offset
)
2073 musb
->io
.busctl_offset
= musb
->ops
->busctl_offset
;
2075 musb
->io
.busctl_offset
= musb_default_busctl_offset
;
2077 if (musb
->ops
->readb
)
2078 musb_readb
= musb
->ops
->readb
;
2079 if (musb
->ops
->writeb
)
2080 musb_writeb
= musb
->ops
->writeb
;
2081 if (musb
->ops
->readw
)
2082 musb_readw
= musb
->ops
->readw
;
2083 if (musb
->ops
->writew
)
2084 musb_writew
= musb
->ops
->writew
;
2085 if (musb
->ops
->readl
)
2086 musb_readl
= musb
->ops
->readl
;
2087 if (musb
->ops
->writel
)
2088 musb_writel
= musb
->ops
->writel
;
2090 #ifndef CONFIG_MUSB_PIO_ONLY
2091 if (!musb
->ops
->dma_init
|| !musb
->ops
->dma_exit
) {
2092 dev_err(dev
, "DMA controller not set\n");
2096 musb_dma_controller_create
= musb
->ops
->dma_init
;
2097 musb_dma_controller_destroy
= musb
->ops
->dma_exit
;
2100 if (musb
->ops
->read_fifo
)
2101 musb
->io
.read_fifo
= musb
->ops
->read_fifo
;
2103 musb
->io
.read_fifo
= musb_default_read_fifo
;
2105 if (musb
->ops
->write_fifo
)
2106 musb
->io
.write_fifo
= musb
->ops
->write_fifo
;
2108 musb
->io
.write_fifo
= musb_default_write_fifo
;
2110 if (!musb
->xceiv
->io_ops
) {
2111 musb
->xceiv
->io_dev
= musb
->controller
;
2112 musb
->xceiv
->io_priv
= musb
->mregs
;
2113 musb
->xceiv
->io_ops
= &musb_ulpi_access
;
2116 if (musb
->ops
->phy_callback
)
2117 musb_phy_callback
= musb
->ops
->phy_callback
;
2120 * We need musb_read/write functions initialized for PM.
2121 * Note that at least 2430 glue needs autosuspend delay
2122 * somewhere above 300 ms for the hardware to idle properly
2123 * after disconnecting the cable in host mode. Let's use
2124 * 500 ms for some margin.
2126 pm_runtime_use_autosuspend(musb
->controller
);
2127 pm_runtime_set_autosuspend_delay(musb
->controller
, 500);
2128 pm_runtime_enable(musb
->controller
);
2129 pm_runtime_get_sync(musb
->controller
);
2131 status
= usb_phy_init(musb
->xceiv
);
2133 goto err_usb_phy_init
;
2135 if (use_dma
&& dev
->dma_mask
) {
2136 musb
->dma_controller
=
2137 musb_dma_controller_create(musb
, musb
->mregs
);
2138 if (IS_ERR(musb
->dma_controller
)) {
2139 status
= PTR_ERR(musb
->dma_controller
);
2144 /* be sure interrupts are disabled before connecting ISR */
2145 musb_platform_disable(musb
);
2146 musb_generic_disable(musb
);
2148 /* Init IRQ workqueue before request_irq */
2149 INIT_WORK(&musb
->irq_work
, musb_irq_work
);
2150 INIT_DELAYED_WORK(&musb
->deassert_reset_work
, musb_deassert_reset
);
2151 INIT_DELAYED_WORK(&musb
->finish_resume_work
, musb_host_finish_resume
);
2153 /* setup musb parts of the core (especially endpoints) */
2154 status
= musb_core_init(plat
->config
->multipoint
2155 ? MUSB_CONTROLLER_MHDRC
2156 : MUSB_CONTROLLER_HDRC
, musb
);
2160 setup_timer(&musb
->otg_timer
, musb_otg_timer_func
, (unsigned long) musb
);
2162 /* attach to the IRQ */
2163 if (request_irq(nIrq
, musb
->isr
, 0, dev_name(dev
), musb
)) {
2164 dev_err(dev
, "request_irq %d failed!\n", nIrq
);
2169 /* FIXME this handles wakeup irqs wrong */
2170 if (enable_irq_wake(nIrq
) == 0) {
2172 device_init_wakeup(dev
, 1);
2177 /* program PHY to use external vBus if required */
2178 if (plat
->extvbus
) {
2179 u8 busctl
= musb_read_ulpi_buscontrol(musb
->mregs
);
2180 busctl
|= MUSB_ULPI_USE_EXTVBUS
;
2181 musb_write_ulpi_buscontrol(musb
->mregs
, busctl
);
2184 if (musb
->xceiv
->otg
->default_a
) {
2185 MUSB_HST_MODE(musb
);
2186 musb
->xceiv
->otg
->state
= OTG_STATE_A_IDLE
;
2188 MUSB_DEV_MODE(musb
);
2189 musb
->xceiv
->otg
->state
= OTG_STATE_B_IDLE
;
2192 switch (musb
->port_mode
) {
2193 case MUSB_PORT_MODE_HOST
:
2194 status
= musb_host_setup(musb
, plat
->power
);
2197 status
= musb_platform_set_mode(musb
, MUSB_HOST
);
2199 case MUSB_PORT_MODE_GADGET
:
2200 status
= musb_gadget_setup(musb
);
2203 status
= musb_platform_set_mode(musb
, MUSB_PERIPHERAL
);
2205 case MUSB_PORT_MODE_DUAL_ROLE
:
2206 status
= musb_host_setup(musb
, plat
->power
);
2209 status
= musb_gadget_setup(musb
);
2211 musb_host_cleanup(musb
);
2214 status
= musb_platform_set_mode(musb
, MUSB_OTG
);
2217 dev_err(dev
, "unsupported port mode %d\n", musb
->port_mode
);
2224 status
= musb_init_debugfs(musb
);
2228 status
= sysfs_create_group(&musb
->controller
->kobj
, &musb_attr_group
);
2232 pm_runtime_mark_last_busy(musb
->controller
);
2233 pm_runtime_put_autosuspend(musb
->controller
);
2238 musb_exit_debugfs(musb
);
2241 musb_gadget_cleanup(musb
);
2242 musb_host_cleanup(musb
);
2245 cancel_work_sync(&musb
->irq_work
);
2246 cancel_delayed_work_sync(&musb
->finish_resume_work
);
2247 cancel_delayed_work_sync(&musb
->deassert_reset_work
);
2248 if (musb
->dma_controller
)
2249 musb_dma_controller_destroy(musb
->dma_controller
);
2252 usb_phy_shutdown(musb
->xceiv
);
2255 pm_runtime_dont_use_autosuspend(musb
->controller
);
2256 pm_runtime_put_sync(musb
->controller
);
2257 pm_runtime_disable(musb
->controller
);
2261 device_init_wakeup(dev
, 0);
2262 musb_platform_exit(musb
);
2265 dev_err(musb
->controller
,
2266 "musb_init_controller failed with status %d\n", status
);
2276 /*-------------------------------------------------------------------------*/
2278 /* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
2279 * bridge to a platform device; this driver then suffices.
2281 static int musb_probe(struct platform_device
*pdev
)
2283 struct device
*dev
= &pdev
->dev
;
2284 int irq
= platform_get_irq_byname(pdev
, "mc");
2285 struct resource
*iomem
;
2291 iomem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2292 base
= devm_ioremap_resource(dev
, iomem
);
2294 return PTR_ERR(base
);
2296 return musb_init_controller(dev
, irq
, base
);
2299 static int musb_remove(struct platform_device
*pdev
)
2301 struct device
*dev
= &pdev
->dev
;
2302 struct musb
*musb
= dev_to_musb(dev
);
2303 unsigned long flags
;
2305 /* this gets called on rmmod.
2306 * - Host mode: host may still be active
2307 * - Peripheral mode: peripheral is deactivated (or never-activated)
2308 * - OTG mode: both roles are deactivated (or never-activated)
2310 musb_exit_debugfs(musb
);
2312 cancel_work_sync(&musb
->irq_work
);
2313 cancel_delayed_work_sync(&musb
->finish_resume_work
);
2314 cancel_delayed_work_sync(&musb
->deassert_reset_work
);
2315 pm_runtime_get_sync(musb
->controller
);
2316 musb_host_cleanup(musb
);
2317 musb_gadget_cleanup(musb
);
2318 spin_lock_irqsave(&musb
->lock
, flags
);
2319 musb_platform_disable(musb
);
2320 musb_generic_disable(musb
);
2321 spin_unlock_irqrestore(&musb
->lock
, flags
);
2322 musb_writeb(musb
->mregs
, MUSB_DEVCTL
, 0);
2323 pm_runtime_dont_use_autosuspend(musb
->controller
);
2324 pm_runtime_put_sync(musb
->controller
);
2325 pm_runtime_disable(musb
->controller
);
2326 musb_platform_exit(musb
);
2327 musb_phy_callback
= NULL
;
2328 if (musb
->dma_controller
)
2329 musb_dma_controller_destroy(musb
->dma_controller
);
2330 usb_phy_shutdown(musb
->xceiv
);
2332 device_init_wakeup(dev
, 0);
2338 static void musb_save_context(struct musb
*musb
)
2341 void __iomem
*musb_base
= musb
->mregs
;
2344 musb
->context
.frame
= musb_readw(musb_base
, MUSB_FRAME
);
2345 musb
->context
.testmode
= musb_readb(musb_base
, MUSB_TESTMODE
);
2346 musb
->context
.busctl
= musb_read_ulpi_buscontrol(musb
->mregs
);
2347 musb
->context
.power
= musb_readb(musb_base
, MUSB_POWER
);
2348 musb
->context
.intrusbe
= musb_readb(musb_base
, MUSB_INTRUSBE
);
2349 musb
->context
.index
= musb_readb(musb_base
, MUSB_INDEX
);
2350 musb
->context
.devctl
= musb_readb(musb_base
, MUSB_DEVCTL
);
2352 for (i
= 0; i
< musb
->config
->num_eps
; ++i
) {
2353 struct musb_hw_ep
*hw_ep
;
2355 hw_ep
= &musb
->endpoints
[i
];
2363 musb_writeb(musb_base
, MUSB_INDEX
, i
);
2364 musb
->context
.index_regs
[i
].txmaxp
=
2365 musb_readw(epio
, MUSB_TXMAXP
);
2366 musb
->context
.index_regs
[i
].txcsr
=
2367 musb_readw(epio
, MUSB_TXCSR
);
2368 musb
->context
.index_regs
[i
].rxmaxp
=
2369 musb_readw(epio
, MUSB_RXMAXP
);
2370 musb
->context
.index_regs
[i
].rxcsr
=
2371 musb_readw(epio
, MUSB_RXCSR
);
2373 if (musb
->dyn_fifo
) {
2374 musb
->context
.index_regs
[i
].txfifoadd
=
2375 musb_read_txfifoadd(musb_base
);
2376 musb
->context
.index_regs
[i
].rxfifoadd
=
2377 musb_read_rxfifoadd(musb_base
);
2378 musb
->context
.index_regs
[i
].txfifosz
=
2379 musb_read_txfifosz(musb_base
);
2380 musb
->context
.index_regs
[i
].rxfifosz
=
2381 musb_read_rxfifosz(musb_base
);
2384 musb
->context
.index_regs
[i
].txtype
=
2385 musb_readb(epio
, MUSB_TXTYPE
);
2386 musb
->context
.index_regs
[i
].txinterval
=
2387 musb_readb(epio
, MUSB_TXINTERVAL
);
2388 musb
->context
.index_regs
[i
].rxtype
=
2389 musb_readb(epio
, MUSB_RXTYPE
);
2390 musb
->context
.index_regs
[i
].rxinterval
=
2391 musb_readb(epio
, MUSB_RXINTERVAL
);
2393 musb
->context
.index_regs
[i
].txfunaddr
=
2394 musb_read_txfunaddr(musb
, i
);
2395 musb
->context
.index_regs
[i
].txhubaddr
=
2396 musb_read_txhubaddr(musb
, i
);
2397 musb
->context
.index_regs
[i
].txhubport
=
2398 musb_read_txhubport(musb
, i
);
2400 musb
->context
.index_regs
[i
].rxfunaddr
=
2401 musb_read_rxfunaddr(musb
, i
);
2402 musb
->context
.index_regs
[i
].rxhubaddr
=
2403 musb_read_rxhubaddr(musb
, i
);
2404 musb
->context
.index_regs
[i
].rxhubport
=
2405 musb_read_rxhubport(musb
, i
);
2409 static void musb_restore_context(struct musb
*musb
)
2412 void __iomem
*musb_base
= musb
->mregs
;
2416 musb_writew(musb_base
, MUSB_FRAME
, musb
->context
.frame
);
2417 musb_writeb(musb_base
, MUSB_TESTMODE
, musb
->context
.testmode
);
2418 musb_write_ulpi_buscontrol(musb
->mregs
, musb
->context
.busctl
);
2420 /* Don't affect SUSPENDM/RESUME bits in POWER reg */
2421 power
= musb_readb(musb_base
, MUSB_POWER
);
2422 power
&= MUSB_POWER_SUSPENDM
| MUSB_POWER_RESUME
;
2423 musb
->context
.power
&= ~(MUSB_POWER_SUSPENDM
| MUSB_POWER_RESUME
);
2424 power
|= musb
->context
.power
;
2425 musb_writeb(musb_base
, MUSB_POWER
, power
);
2427 musb_writew(musb_base
, MUSB_INTRTXE
, musb
->intrtxe
);
2428 musb_writew(musb_base
, MUSB_INTRRXE
, musb
->intrrxe
);
2429 musb_writeb(musb_base
, MUSB_INTRUSBE
, musb
->context
.intrusbe
);
2430 if (musb
->context
.devctl
& MUSB_DEVCTL_SESSION
)
2431 musb_writeb(musb_base
, MUSB_DEVCTL
, musb
->context
.devctl
);
2433 for (i
= 0; i
< musb
->config
->num_eps
; ++i
) {
2434 struct musb_hw_ep
*hw_ep
;
2436 hw_ep
= &musb
->endpoints
[i
];
2444 musb_writeb(musb_base
, MUSB_INDEX
, i
);
2445 musb_writew(epio
, MUSB_TXMAXP
,
2446 musb
->context
.index_regs
[i
].txmaxp
);
2447 musb_writew(epio
, MUSB_TXCSR
,
2448 musb
->context
.index_regs
[i
].txcsr
);
2449 musb_writew(epio
, MUSB_RXMAXP
,
2450 musb
->context
.index_regs
[i
].rxmaxp
);
2451 musb_writew(epio
, MUSB_RXCSR
,
2452 musb
->context
.index_regs
[i
].rxcsr
);
2454 if (musb
->dyn_fifo
) {
2455 musb_write_txfifosz(musb_base
,
2456 musb
->context
.index_regs
[i
].txfifosz
);
2457 musb_write_rxfifosz(musb_base
,
2458 musb
->context
.index_regs
[i
].rxfifosz
);
2459 musb_write_txfifoadd(musb_base
,
2460 musb
->context
.index_regs
[i
].txfifoadd
);
2461 musb_write_rxfifoadd(musb_base
,
2462 musb
->context
.index_regs
[i
].rxfifoadd
);
2465 musb_writeb(epio
, MUSB_TXTYPE
,
2466 musb
->context
.index_regs
[i
].txtype
);
2467 musb_writeb(epio
, MUSB_TXINTERVAL
,
2468 musb
->context
.index_regs
[i
].txinterval
);
2469 musb_writeb(epio
, MUSB_RXTYPE
,
2470 musb
->context
.index_regs
[i
].rxtype
);
2471 musb_writeb(epio
, MUSB_RXINTERVAL
,
2473 musb
->context
.index_regs
[i
].rxinterval
);
2474 musb_write_txfunaddr(musb
, i
,
2475 musb
->context
.index_regs
[i
].txfunaddr
);
2476 musb_write_txhubaddr(musb
, i
,
2477 musb
->context
.index_regs
[i
].txhubaddr
);
2478 musb_write_txhubport(musb
, i
,
2479 musb
->context
.index_regs
[i
].txhubport
);
2481 musb_write_rxfunaddr(musb
, i
,
2482 musb
->context
.index_regs
[i
].rxfunaddr
);
2483 musb_write_rxhubaddr(musb
, i
,
2484 musb
->context
.index_regs
[i
].rxhubaddr
);
2485 musb_write_rxhubport(musb
, i
,
2486 musb
->context
.index_regs
[i
].rxhubport
);
2488 musb_writeb(musb_base
, MUSB_INDEX
, musb
->context
.index
);
2491 static int musb_suspend(struct device
*dev
)
2493 struct musb
*musb
= dev_to_musb(dev
);
2494 unsigned long flags
;
2496 musb_platform_disable(musb
);
2497 musb_generic_disable(musb
);
2499 spin_lock_irqsave(&musb
->lock
, flags
);
2501 if (is_peripheral_active(musb
)) {
2502 /* FIXME force disconnect unless we know USB will wake
2503 * the system up quickly enough to respond ...
2505 } else if (is_host_active(musb
)) {
2506 /* we know all the children are suspended; sometimes
2507 * they will even be wakeup-enabled.
2511 musb_save_context(musb
);
2513 spin_unlock_irqrestore(&musb
->lock
, flags
);
2517 static int musb_resume(struct device
*dev
)
2519 struct musb
*musb
= dev_to_musb(dev
);
2524 * For static cmos like DaVinci, register values were preserved
2525 * unless for some reason the whole soc powered down or the USB
2526 * module got reset through the PSC (vs just being disabled).
2528 * For the DSPS glue layer though, a full register restore has to
2529 * be done. As it shouldn't harm other platforms, we do it
2533 musb_restore_context(musb
);
2535 devctl
= musb_readb(musb
->mregs
, MUSB_DEVCTL
);
2536 mask
= MUSB_DEVCTL_BDEVICE
| MUSB_DEVCTL_FSDEV
| MUSB_DEVCTL_LSDEV
;
2537 if ((devctl
& mask
) != (musb
->context
.devctl
& mask
))
2538 musb
->port1_status
= 0;
2539 if (musb
->need_finish_resume
) {
2540 musb
->need_finish_resume
= 0;
2541 schedule_delayed_work(&musb
->finish_resume_work
,
2542 msecs_to_jiffies(USB_RESUME_TIMEOUT
));
2546 * The USB HUB code expects the device to be in RPM_ACTIVE once it came
2549 pm_runtime_disable(dev
);
2550 pm_runtime_set_active(dev
);
2551 pm_runtime_enable(dev
);
2558 static int musb_runtime_suspend(struct device
*dev
)
2560 struct musb
*musb
= dev_to_musb(dev
);
2562 musb_save_context(musb
);
2567 static int musb_runtime_resume(struct device
*dev
)
2569 struct musb
*musb
= dev_to_musb(dev
);
2570 static int first
= 1;
2573 * When pm_runtime_get_sync called for the first time in driver
2574 * init, some of the structure is still not initialized which is
2575 * used in restore function. But clock needs to be
2576 * enabled before any register access, so
2577 * pm_runtime_get_sync has to be called.
2578 * Also context restore without save does not make
2582 musb_restore_context(musb
);
2585 if (musb
->need_finish_resume
) {
2586 musb
->need_finish_resume
= 0;
2587 schedule_delayed_work(&musb
->finish_resume_work
,
2588 msecs_to_jiffies(USB_RESUME_TIMEOUT
));
2594 static const struct dev_pm_ops musb_dev_pm_ops
= {
2595 .suspend
= musb_suspend
,
2596 .resume
= musb_resume
,
2597 .runtime_suspend
= musb_runtime_suspend
,
2598 .runtime_resume
= musb_runtime_resume
,
2601 #define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
2603 #define MUSB_DEV_PM_OPS NULL
2606 static struct platform_driver musb_driver
= {
2608 .name
= (char *)musb_driver_name
,
2609 .bus
= &platform_bus_type
,
2610 .pm
= MUSB_DEV_PM_OPS
,
2612 .probe
= musb_probe
,
2613 .remove
= musb_remove
,
2616 module_platform_driver(musb_driver
);