2 * MUSB OTG driver core code
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
38 * This consists of a Host Controller Driver (HCD) and a peripheral
39 * controller driver implementing the "Gadget" API; OTG support is
40 * in the works. These are normal Linux-USB controller drivers which
41 * use IRQs and have no dedicated thread.
43 * This version of the driver has only been used with products from
44 * Texas Instruments. Those products integrate the Inventra logic
45 * with other DMA, IRQ, and bus modules, as well as other logic that
46 * needs to be reflected in this driver.
49 * NOTE: the original Mentor code here was pretty much a collection
50 * of mechanisms that don't seem to have been fully integrated/working
51 * for *any* Linux kernel version. This version aims at Linux 2.6.now,
52 * Key open issues include:
54 * - Lack of host-side transaction scheduling, for all transfer types.
55 * The hardware doesn't do it; instead, software must.
57 * This is not an issue for OTG devices that don't support external
58 * hubs, but for more "normal" USB hosts it's a user issue that the
59 * "multipoint" support doesn't scale in the expected ways. That
60 * includes DaVinci EVM in a common non-OTG mode.
62 * * Control and bulk use dedicated endpoints, and there's as
63 * yet no mechanism to either (a) reclaim the hardware when
64 * peripherals are NAKing, which gets complicated with bulk
65 * endpoints, or (b) use more than a single bulk endpoint in
68 * RESULT: one device may be perceived as blocking another one.
70 * * Interrupt and isochronous will dynamically allocate endpoint
71 * hardware, but (a) there's no record keeping for bandwidth;
72 * (b) in the common case that few endpoints are available, there
73 * is no mechanism to reuse endpoints to talk to multiple devices.
75 * RESULT: At one extreme, bandwidth can be overcommitted in
76 * some hardware configurations, no faults will be reported.
77 * At the other extreme, the bandwidth capabilities which do
78 * exist tend to be severely undercommitted. You can't yet hook
79 * up both a keyboard and a mouse to an external USB hub.
83 * This gets many kinds of configuration information:
84 * - Kconfig for everything user-configurable
85 * - platform_device for addressing, irq, and platform_data
86 * - platform_data is mostly for board-specific information
87 * (plus recentrly, SOC or family details)
89 * Most of the conditional compilation will (someday) vanish.
92 #include <linux/module.h>
93 #include <linux/kernel.h>
94 #include <linux/sched.h>
95 #include <linux/slab.h>
96 #include <linux/list.h>
97 #include <linux/kobject.h>
98 #include <linux/prefetch.h>
99 #include <linux/platform_device.h>
100 #include <linux/io.h>
101 #include <linux/dma-mapping.h>
102 #include <linux/usb.h>
104 #include "musb_core.h"
106 #define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
109 #define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
110 #define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
112 #define MUSB_VERSION "6.0"
114 #define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
116 #define MUSB_DRIVER_NAME "musb-hdrc"
117 const char musb_driver_name
[] = MUSB_DRIVER_NAME
;
119 MODULE_DESCRIPTION(DRIVER_INFO
);
120 MODULE_AUTHOR(DRIVER_AUTHOR
);
121 MODULE_LICENSE("GPL");
122 MODULE_ALIAS("platform:" MUSB_DRIVER_NAME
);
125 /*-------------------------------------------------------------------------*/
127 static inline struct musb
*dev_to_musb(struct device
*dev
)
129 return dev_get_drvdata(dev
);
132 /*-------------------------------------------------------------------------*/
134 #ifndef CONFIG_BLACKFIN
135 static int musb_ulpi_read(struct usb_phy
*phy
, u32 offset
)
137 void __iomem
*addr
= phy
->io_priv
;
143 pm_runtime_get_sync(phy
->io_dev
);
145 /* Make sure the transceiver is not in low power mode */
146 power
= musb_readb(addr
, MUSB_POWER
);
147 power
&= ~MUSB_POWER_SUSPENDM
;
148 musb_writeb(addr
, MUSB_POWER
, power
);
150 /* REVISIT: musbhdrc_ulpi_an.pdf recommends setting the
151 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
154 musb_writeb(addr
, MUSB_ULPI_REG_ADDR
, (u8
)offset
);
155 musb_writeb(addr
, MUSB_ULPI_REG_CONTROL
,
156 MUSB_ULPI_REG_REQ
| MUSB_ULPI_RDN_WR
);
158 while (!(musb_readb(addr
, MUSB_ULPI_REG_CONTROL
)
159 & MUSB_ULPI_REG_CMPLT
)) {
167 r
= musb_readb(addr
, MUSB_ULPI_REG_CONTROL
);
168 r
&= ~MUSB_ULPI_REG_CMPLT
;
169 musb_writeb(addr
, MUSB_ULPI_REG_CONTROL
, r
);
171 ret
= musb_readb(addr
, MUSB_ULPI_REG_DATA
);
174 pm_runtime_put(phy
->io_dev
);
179 static int musb_ulpi_write(struct usb_phy
*phy
, u32 offset
, u32 data
)
181 void __iomem
*addr
= phy
->io_priv
;
187 pm_runtime_get_sync(phy
->io_dev
);
189 /* Make sure the transceiver is not in low power mode */
190 power
= musb_readb(addr
, MUSB_POWER
);
191 power
&= ~MUSB_POWER_SUSPENDM
;
192 musb_writeb(addr
, MUSB_POWER
, power
);
194 musb_writeb(addr
, MUSB_ULPI_REG_ADDR
, (u8
)offset
);
195 musb_writeb(addr
, MUSB_ULPI_REG_DATA
, (u8
)data
);
196 musb_writeb(addr
, MUSB_ULPI_REG_CONTROL
, MUSB_ULPI_REG_REQ
);
198 while (!(musb_readb(addr
, MUSB_ULPI_REG_CONTROL
)
199 & MUSB_ULPI_REG_CMPLT
)) {
207 r
= musb_readb(addr
, MUSB_ULPI_REG_CONTROL
);
208 r
&= ~MUSB_ULPI_REG_CMPLT
;
209 musb_writeb(addr
, MUSB_ULPI_REG_CONTROL
, r
);
212 pm_runtime_put(phy
->io_dev
);
217 #define musb_ulpi_read NULL
218 #define musb_ulpi_write NULL
221 static struct usb_phy_io_ops musb_ulpi_access
= {
222 .read
= musb_ulpi_read
,
223 .write
= musb_ulpi_write
,
226 /*-------------------------------------------------------------------------*/
228 static u32
musb_default_fifo_offset(u8 epnum
)
230 return 0x20 + (epnum
* 4);
233 /* "flat" mapping: each endpoint has its own i/o address */
234 static void musb_flat_ep_select(void __iomem
*mbase
, u8 epnum
)
238 static u32
musb_flat_ep_offset(u8 epnum
, u16 offset
)
240 return 0x100 + (0x10 * epnum
) + offset
;
243 /* "indexed" mapping: INDEX register controls register bank select */
244 static void musb_indexed_ep_select(void __iomem
*mbase
, u8 epnum
)
246 musb_writeb(mbase
, MUSB_INDEX
, epnum
);
249 static u32
musb_indexed_ep_offset(u8 epnum
, u16 offset
)
251 return 0x10 + offset
;
254 static u8
musb_default_readb(const void __iomem
*addr
, unsigned offset
)
256 return __raw_readb(addr
+ offset
);
259 static void musb_default_writeb(void __iomem
*addr
, unsigned offset
, u8 data
)
261 __raw_writeb(data
, addr
+ offset
);
264 static u16
musb_default_readw(const void __iomem
*addr
, unsigned offset
)
266 return __raw_readw(addr
+ offset
);
269 static void musb_default_writew(void __iomem
*addr
, unsigned offset
, u16 data
)
271 __raw_writew(data
, addr
+ offset
);
274 static u32
musb_default_readl(const void __iomem
*addr
, unsigned offset
)
276 return __raw_readl(addr
+ offset
);
279 static void musb_default_writel(void __iomem
*addr
, unsigned offset
, u32 data
)
281 __raw_writel(data
, addr
+ offset
);
285 * Load an endpoint's FIFO
287 static void musb_default_write_fifo(struct musb_hw_ep
*hw_ep
, u16 len
,
290 struct musb
*musb
= hw_ep
->musb
;
291 void __iomem
*fifo
= hw_ep
->fifo
;
293 if (unlikely(len
== 0))
298 dev_dbg(musb
->controller
, "%cX ep%d fifo %p count %d buf %p\n",
299 'T', hw_ep
->epnum
, fifo
, len
, src
);
301 /* we can't assume unaligned reads work */
302 if (likely((0x01 & (unsigned long) src
) == 0)) {
305 /* best case is 32bit-aligned source address */
306 if ((0x02 & (unsigned long) src
) == 0) {
308 iowrite32_rep(fifo
, src
+ index
, len
>> 2);
309 index
+= len
& ~0x03;
312 musb_writew(fifo
, 0, *(u16
*)&src
[index
]);
317 iowrite16_rep(fifo
, src
+ index
, len
>> 1);
318 index
+= len
& ~0x01;
322 musb_writeb(fifo
, 0, src
[index
]);
325 iowrite8_rep(fifo
, src
, len
);
330 * Unload an endpoint's FIFO
332 static void musb_default_read_fifo(struct musb_hw_ep
*hw_ep
, u16 len
, u8
*dst
)
334 struct musb
*musb
= hw_ep
->musb
;
335 void __iomem
*fifo
= hw_ep
->fifo
;
337 if (unlikely(len
== 0))
340 dev_dbg(musb
->controller
, "%cX ep%d fifo %p count %d buf %p\n",
341 'R', hw_ep
->epnum
, fifo
, len
, dst
);
343 /* we can't assume unaligned writes work */
344 if (likely((0x01 & (unsigned long) dst
) == 0)) {
347 /* best case is 32bit-aligned destination address */
348 if ((0x02 & (unsigned long) dst
) == 0) {
350 ioread32_rep(fifo
, dst
, len
>> 2);
354 *(u16
*)&dst
[index
] = musb_readw(fifo
, 0);
359 ioread16_rep(fifo
, dst
, len
>> 1);
364 dst
[index
] = musb_readb(fifo
, 0);
367 ioread8_rep(fifo
, dst
, len
);
372 * Old style IO functions
374 u8 (*musb_readb
)(const void __iomem
*addr
, unsigned offset
);
375 EXPORT_SYMBOL_GPL(musb_readb
);
377 void (*musb_writeb
)(void __iomem
*addr
, unsigned offset
, u8 data
);
378 EXPORT_SYMBOL_GPL(musb_writeb
);
380 u16 (*musb_readw
)(const void __iomem
*addr
, unsigned offset
);
381 EXPORT_SYMBOL_GPL(musb_readw
);
383 void (*musb_writew
)(void __iomem
*addr
, unsigned offset
, u16 data
);
384 EXPORT_SYMBOL_GPL(musb_writew
);
386 u32 (*musb_readl
)(const void __iomem
*addr
, unsigned offset
);
387 EXPORT_SYMBOL_GPL(musb_readl
);
389 void (*musb_writel
)(void __iomem
*addr
, unsigned offset
, u32 data
);
390 EXPORT_SYMBOL_GPL(musb_writel
);
393 * New style IO functions
395 void musb_read_fifo(struct musb_hw_ep
*hw_ep
, u16 len
, u8
*dst
)
397 return hw_ep
->musb
->io
.read_fifo(hw_ep
, len
, dst
);
400 void musb_write_fifo(struct musb_hw_ep
*hw_ep
, u16 len
, const u8
*src
)
402 return hw_ep
->musb
->io
.write_fifo(hw_ep
, len
, src
);
405 /*-------------------------------------------------------------------------*/
407 /* for high speed test mode; see USB 2.0 spec 7.1.20 */
408 static const u8 musb_test_packet
[53] = {
409 /* implicit SYNC then DATA0 to start */
412 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
414 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
416 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
417 /* JJJJJJJKKKKKKK x8 */
418 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
420 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
421 /* JKKKKKKK x10, JK */
422 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
424 /* implicit CRC16 then EOP to end */
427 void musb_load_testpacket(struct musb
*musb
)
429 void __iomem
*regs
= musb
->endpoints
[0].regs
;
431 musb_ep_select(musb
->mregs
, 0);
432 musb_write_fifo(musb
->control_ep
,
433 sizeof(musb_test_packet
), musb_test_packet
);
434 musb_writew(regs
, MUSB_CSR0
, MUSB_CSR0_TXPKTRDY
);
437 /*-------------------------------------------------------------------------*/
440 * Handles OTG hnp timeouts, such as b_ase0_brst
442 static void musb_otg_timer_func(unsigned long data
)
444 struct musb
*musb
= (struct musb
*)data
;
447 spin_lock_irqsave(&musb
->lock
, flags
);
448 switch (musb
->xceiv
->otg
->state
) {
449 case OTG_STATE_B_WAIT_ACON
:
450 dev_dbg(musb
->controller
, "HNP: b_wait_acon timeout; back to b_peripheral\n");
451 musb_g_disconnect(musb
);
452 musb
->xceiv
->otg
->state
= OTG_STATE_B_PERIPHERAL
;
455 case OTG_STATE_A_SUSPEND
:
456 case OTG_STATE_A_WAIT_BCON
:
457 dev_dbg(musb
->controller
, "HNP: %s timeout\n",
458 usb_otg_state_string(musb
->xceiv
->otg
->state
));
459 musb_platform_set_vbus(musb
, 0);
460 musb
->xceiv
->otg
->state
= OTG_STATE_A_WAIT_VFALL
;
463 dev_dbg(musb
->controller
, "HNP: Unhandled mode %s\n",
464 usb_otg_state_string(musb
->xceiv
->otg
->state
));
466 spin_unlock_irqrestore(&musb
->lock
, flags
);
470 * Stops the HNP transition. Caller must take care of locking.
472 void musb_hnp_stop(struct musb
*musb
)
474 struct usb_hcd
*hcd
= musb
->hcd
;
475 void __iomem
*mbase
= musb
->mregs
;
478 dev_dbg(musb
->controller
, "HNP: stop from %s\n",
479 usb_otg_state_string(musb
->xceiv
->otg
->state
));
481 switch (musb
->xceiv
->otg
->state
) {
482 case OTG_STATE_A_PERIPHERAL
:
483 musb_g_disconnect(musb
);
484 dev_dbg(musb
->controller
, "HNP: back to %s\n",
485 usb_otg_state_string(musb
->xceiv
->otg
->state
));
487 case OTG_STATE_B_HOST
:
488 dev_dbg(musb
->controller
, "HNP: Disabling HR\n");
490 hcd
->self
.is_b_host
= 0;
491 musb
->xceiv
->otg
->state
= OTG_STATE_B_PERIPHERAL
;
493 reg
= musb_readb(mbase
, MUSB_POWER
);
494 reg
|= MUSB_POWER_SUSPENDM
;
495 musb_writeb(mbase
, MUSB_POWER
, reg
);
496 /* REVISIT: Start SESSION_REQUEST here? */
499 dev_dbg(musb
->controller
, "HNP: Stopping in unknown state %s\n",
500 usb_otg_state_string(musb
->xceiv
->otg
->state
));
504 * When returning to A state after HNP, avoid hub_port_rebounce(),
505 * which cause occasional OPT A "Did not receive reset after connect"
508 musb
->port1_status
&= ~(USB_PORT_STAT_C_CONNECTION
<< 16);
511 static void musb_recover_from_babble(struct musb
*musb
);
514 * Interrupt Service Routine to record USB "global" interrupts.
515 * Since these do not happen often and signify things of
516 * paramount importance, it seems OK to check them individually;
517 * the order of the tests is specified in the manual
519 * @param musb instance pointer
520 * @param int_usb register contents
525 static irqreturn_t
musb_stage0_irq(struct musb
*musb
, u8 int_usb
,
528 irqreturn_t handled
= IRQ_NONE
;
530 dev_dbg(musb
->controller
, "<== DevCtl=%02x, int_usb=0x%x\n", devctl
,
533 /* in host mode, the peripheral may issue remote wakeup.
534 * in peripheral mode, the host may resume the link.
535 * spurious RESUME irqs happen too, paired with SUSPEND.
537 if (int_usb
& MUSB_INTR_RESUME
) {
538 handled
= IRQ_HANDLED
;
539 dev_dbg(musb
->controller
, "RESUME (%s)\n",
540 usb_otg_state_string(musb
->xceiv
->otg
->state
));
542 if (devctl
& MUSB_DEVCTL_HM
) {
543 switch (musb
->xceiv
->otg
->state
) {
544 case OTG_STATE_A_SUSPEND
:
545 /* remote wakeup? later, GetPortStatus
546 * will stop RESUME signaling
549 musb
->port1_status
|=
550 (USB_PORT_STAT_C_SUSPEND
<< 16)
551 | MUSB_PORT_STAT_RESUME
;
552 musb
->rh_timer
= jiffies
553 + msecs_to_jiffies(USB_RESUME_TIMEOUT
);
554 musb
->need_finish_resume
= 1;
556 musb
->xceiv
->otg
->state
= OTG_STATE_A_HOST
;
558 musb_host_resume_root_hub(musb
);
560 case OTG_STATE_B_WAIT_ACON
:
561 musb
->xceiv
->otg
->state
= OTG_STATE_B_PERIPHERAL
;
566 WARNING("bogus %s RESUME (%s)\n",
568 usb_otg_state_string(musb
->xceiv
->otg
->state
));
571 switch (musb
->xceiv
->otg
->state
) {
572 case OTG_STATE_A_SUSPEND
:
573 /* possibly DISCONNECT is upcoming */
574 musb
->xceiv
->otg
->state
= OTG_STATE_A_HOST
;
575 musb_host_resume_root_hub(musb
);
577 case OTG_STATE_B_WAIT_ACON
:
578 case OTG_STATE_B_PERIPHERAL
:
579 /* disconnect while suspended? we may
580 * not get a disconnect irq...
582 if ((devctl
& MUSB_DEVCTL_VBUS
)
583 != (3 << MUSB_DEVCTL_VBUS_SHIFT
)
585 musb
->int_usb
|= MUSB_INTR_DISCONNECT
;
586 musb
->int_usb
&= ~MUSB_INTR_SUSPEND
;
591 case OTG_STATE_B_IDLE
:
592 musb
->int_usb
&= ~MUSB_INTR_SUSPEND
;
595 WARNING("bogus %s RESUME (%s)\n",
597 usb_otg_state_string(musb
->xceiv
->otg
->state
));
602 /* see manual for the order of the tests */
603 if (int_usb
& MUSB_INTR_SESSREQ
) {
604 void __iomem
*mbase
= musb
->mregs
;
606 if ((devctl
& MUSB_DEVCTL_VBUS
) == MUSB_DEVCTL_VBUS
607 && (devctl
& MUSB_DEVCTL_BDEVICE
)) {
608 dev_dbg(musb
->controller
, "SessReq while on B state\n");
612 dev_dbg(musb
->controller
, "SESSION_REQUEST (%s)\n",
613 usb_otg_state_string(musb
->xceiv
->otg
->state
));
615 /* IRQ arrives from ID pin sense or (later, if VBUS power
616 * is removed) SRP. responses are time critical:
617 * - turn on VBUS (with silicon-specific mechanism)
618 * - go through A_WAIT_VRISE
619 * - ... to A_WAIT_BCON.
620 * a_wait_vrise_tmout triggers VBUS_ERROR transitions
622 musb_writeb(mbase
, MUSB_DEVCTL
, MUSB_DEVCTL_SESSION
);
623 musb
->ep0_stage
= MUSB_EP0_START
;
624 musb
->xceiv
->otg
->state
= OTG_STATE_A_IDLE
;
626 musb_platform_set_vbus(musb
, 1);
628 handled
= IRQ_HANDLED
;
631 if (int_usb
& MUSB_INTR_VBUSERROR
) {
634 /* During connection as an A-Device, we may see a short
635 * current spikes causing voltage drop, because of cable
636 * and peripheral capacitance combined with vbus draw.
637 * (So: less common with truly self-powered devices, where
638 * vbus doesn't act like a power supply.)
640 * Such spikes are short; usually less than ~500 usec, max
641 * of ~2 msec. That is, they're not sustained overcurrent
642 * errors, though they're reported using VBUSERROR irqs.
644 * Workarounds: (a) hardware: use self powered devices.
645 * (b) software: ignore non-repeated VBUS errors.
647 * REVISIT: do delays from lots of DEBUG_KERNEL checks
648 * make trouble here, keeping VBUS < 4.4V ?
650 switch (musb
->xceiv
->otg
->state
) {
651 case OTG_STATE_A_HOST
:
652 /* recovery is dicey once we've gotten past the
653 * initial stages of enumeration, but if VBUS
654 * stayed ok at the other end of the link, and
655 * another reset is due (at least for high speed,
656 * to redo the chirp etc), it might work OK...
658 case OTG_STATE_A_WAIT_BCON
:
659 case OTG_STATE_A_WAIT_VRISE
:
660 if (musb
->vbuserr_retry
) {
661 void __iomem
*mbase
= musb
->mregs
;
663 musb
->vbuserr_retry
--;
665 devctl
|= MUSB_DEVCTL_SESSION
;
666 musb_writeb(mbase
, MUSB_DEVCTL
, devctl
);
668 musb
->port1_status
|=
669 USB_PORT_STAT_OVERCURRENT
670 | (USB_PORT_STAT_C_OVERCURRENT
<< 16);
677 dev_printk(ignore
? KERN_DEBUG
: KERN_ERR
, musb
->controller
,
678 "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
679 usb_otg_state_string(musb
->xceiv
->otg
->state
),
682 switch (devctl
& MUSB_DEVCTL_VBUS
) {
683 case 0 << MUSB_DEVCTL_VBUS_SHIFT
:
684 s
= "<SessEnd"; break;
685 case 1 << MUSB_DEVCTL_VBUS_SHIFT
:
686 s
= "<AValid"; break;
687 case 2 << MUSB_DEVCTL_VBUS_SHIFT
:
688 s
= "<VBusValid"; break;
689 /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
693 VBUSERR_RETRY_COUNT
- musb
->vbuserr_retry
,
696 /* go through A_WAIT_VFALL then start a new session */
698 musb_platform_set_vbus(musb
, 0);
699 handled
= IRQ_HANDLED
;
702 if (int_usb
& MUSB_INTR_SUSPEND
) {
703 dev_dbg(musb
->controller
, "SUSPEND (%s) devctl %02x\n",
704 usb_otg_state_string(musb
->xceiv
->otg
->state
), devctl
);
705 handled
= IRQ_HANDLED
;
707 switch (musb
->xceiv
->otg
->state
) {
708 case OTG_STATE_A_PERIPHERAL
:
709 /* We also come here if the cable is removed, since
710 * this silicon doesn't report ID-no-longer-grounded.
712 * We depend on T(a_wait_bcon) to shut us down, and
713 * hope users don't do anything dicey during this
714 * undesired detour through A_WAIT_BCON.
717 musb_host_resume_root_hub(musb
);
718 musb_root_disconnect(musb
);
719 musb_platform_try_idle(musb
, jiffies
720 + msecs_to_jiffies(musb
->a_wait_bcon
721 ? : OTG_TIME_A_WAIT_BCON
));
724 case OTG_STATE_B_IDLE
:
725 if (!musb
->is_active
)
727 case OTG_STATE_B_PERIPHERAL
:
728 musb_g_suspend(musb
);
729 musb
->is_active
= musb
->g
.b_hnp_enable
;
730 if (musb
->is_active
) {
731 musb
->xceiv
->otg
->state
= OTG_STATE_B_WAIT_ACON
;
732 dev_dbg(musb
->controller
, "HNP: Setting timer for b_ase0_brst\n");
733 mod_timer(&musb
->otg_timer
, jiffies
735 OTG_TIME_B_ASE0_BRST
));
738 case OTG_STATE_A_WAIT_BCON
:
739 if (musb
->a_wait_bcon
!= 0)
740 musb_platform_try_idle(musb
, jiffies
741 + msecs_to_jiffies(musb
->a_wait_bcon
));
743 case OTG_STATE_A_HOST
:
744 musb
->xceiv
->otg
->state
= OTG_STATE_A_SUSPEND
;
745 musb
->is_active
= musb
->hcd
->self
.b_hnp_enable
;
747 case OTG_STATE_B_HOST
:
748 /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
749 dev_dbg(musb
->controller
, "REVISIT: SUSPEND as B_HOST\n");
752 /* "should not happen" */
758 if (int_usb
& MUSB_INTR_CONNECT
) {
759 struct usb_hcd
*hcd
= musb
->hcd
;
761 handled
= IRQ_HANDLED
;
764 musb
->ep0_stage
= MUSB_EP0_START
;
766 musb
->intrtxe
= musb
->epmask
;
767 musb_writew(musb
->mregs
, MUSB_INTRTXE
, musb
->intrtxe
);
768 musb
->intrrxe
= musb
->epmask
& 0xfffe;
769 musb_writew(musb
->mregs
, MUSB_INTRRXE
, musb
->intrrxe
);
770 musb_writeb(musb
->mregs
, MUSB_INTRUSBE
, 0xf7);
771 musb
->port1_status
&= ~(USB_PORT_STAT_LOW_SPEED
772 |USB_PORT_STAT_HIGH_SPEED
773 |USB_PORT_STAT_ENABLE
775 musb
->port1_status
|= USB_PORT_STAT_CONNECTION
776 |(USB_PORT_STAT_C_CONNECTION
<< 16);
778 /* high vs full speed is just a guess until after reset */
779 if (devctl
& MUSB_DEVCTL_LSDEV
)
780 musb
->port1_status
|= USB_PORT_STAT_LOW_SPEED
;
782 /* indicate new connection to OTG machine */
783 switch (musb
->xceiv
->otg
->state
) {
784 case OTG_STATE_B_PERIPHERAL
:
785 if (int_usb
& MUSB_INTR_SUSPEND
) {
786 dev_dbg(musb
->controller
, "HNP: SUSPEND+CONNECT, now b_host\n");
787 int_usb
&= ~MUSB_INTR_SUSPEND
;
790 dev_dbg(musb
->controller
, "CONNECT as b_peripheral???\n");
792 case OTG_STATE_B_WAIT_ACON
:
793 dev_dbg(musb
->controller
, "HNP: CONNECT, now b_host\n");
795 musb
->xceiv
->otg
->state
= OTG_STATE_B_HOST
;
797 musb
->hcd
->self
.is_b_host
= 1;
798 del_timer(&musb
->otg_timer
);
801 if ((devctl
& MUSB_DEVCTL_VBUS
)
802 == (3 << MUSB_DEVCTL_VBUS_SHIFT
)) {
803 musb
->xceiv
->otg
->state
= OTG_STATE_A_HOST
;
805 hcd
->self
.is_b_host
= 0;
810 musb_host_poke_root_hub(musb
);
812 dev_dbg(musb
->controller
, "CONNECT (%s) devctl %02x\n",
813 usb_otg_state_string(musb
->xceiv
->otg
->state
), devctl
);
816 if (int_usb
& MUSB_INTR_DISCONNECT
) {
817 dev_dbg(musb
->controller
, "DISCONNECT (%s) as %s, devctl %02x\n",
818 usb_otg_state_string(musb
->xceiv
->otg
->state
),
819 MUSB_MODE(musb
), devctl
);
820 handled
= IRQ_HANDLED
;
822 switch (musb
->xceiv
->otg
->state
) {
823 case OTG_STATE_A_HOST
:
824 case OTG_STATE_A_SUSPEND
:
825 musb_host_resume_root_hub(musb
);
826 musb_root_disconnect(musb
);
827 if (musb
->a_wait_bcon
!= 0)
828 musb_platform_try_idle(musb
, jiffies
829 + msecs_to_jiffies(musb
->a_wait_bcon
));
831 case OTG_STATE_B_HOST
:
832 /* REVISIT this behaves for "real disconnect"
833 * cases; make sure the other transitions from
834 * from B_HOST act right too. The B_HOST code
835 * in hnp_stop() is currently not used...
837 musb_root_disconnect(musb
);
839 musb
->hcd
->self
.is_b_host
= 0;
840 musb
->xceiv
->otg
->state
= OTG_STATE_B_PERIPHERAL
;
842 musb_g_disconnect(musb
);
844 case OTG_STATE_A_PERIPHERAL
:
846 musb_root_disconnect(musb
);
848 case OTG_STATE_B_WAIT_ACON
:
850 case OTG_STATE_B_PERIPHERAL
:
851 case OTG_STATE_B_IDLE
:
852 musb_g_disconnect(musb
);
855 WARNING("unhandled DISCONNECT transition (%s)\n",
856 usb_otg_state_string(musb
->xceiv
->otg
->state
));
861 /* mentor saves a bit: bus reset and babble share the same irq.
862 * only host sees babble; only peripheral sees bus reset.
864 if (int_usb
& MUSB_INTR_RESET
) {
865 handled
= IRQ_HANDLED
;
866 if (devctl
& MUSB_DEVCTL_HM
) {
868 * When BABBLE happens what we can depends on which
869 * platform MUSB is running, because some platforms
870 * implemented proprietary means for 'recovering' from
871 * Babble conditions. One such platform is AM335x. In
872 * most cases, however, the only thing we can do is
875 dev_err(musb
->controller
, "Babble\n");
877 if (is_host_active(musb
))
878 musb_recover_from_babble(musb
);
880 dev_dbg(musb
->controller
, "BUS RESET as %s\n",
881 usb_otg_state_string(musb
->xceiv
->otg
->state
));
882 switch (musb
->xceiv
->otg
->state
) {
883 case OTG_STATE_A_SUSPEND
:
886 case OTG_STATE_A_WAIT_BCON
: /* OPT TD.4.7-900ms */
887 /* never use invalid T(a_wait_bcon) */
888 dev_dbg(musb
->controller
, "HNP: in %s, %d msec timeout\n",
889 usb_otg_state_string(musb
->xceiv
->otg
->state
),
891 mod_timer(&musb
->otg_timer
, jiffies
892 + msecs_to_jiffies(TA_WAIT_BCON(musb
)));
894 case OTG_STATE_A_PERIPHERAL
:
895 del_timer(&musb
->otg_timer
);
898 case OTG_STATE_B_WAIT_ACON
:
899 dev_dbg(musb
->controller
, "HNP: RESET (%s), to b_peripheral\n",
900 usb_otg_state_string(musb
->xceiv
->otg
->state
));
901 musb
->xceiv
->otg
->state
= OTG_STATE_B_PERIPHERAL
;
904 case OTG_STATE_B_IDLE
:
905 musb
->xceiv
->otg
->state
= OTG_STATE_B_PERIPHERAL
;
907 case OTG_STATE_B_PERIPHERAL
:
911 dev_dbg(musb
->controller
, "Unhandled BUS RESET as %s\n",
912 usb_otg_state_string(musb
->xceiv
->otg
->state
));
918 /* REVISIT ... this would be for multiplexing periodic endpoints, or
919 * supporting transfer phasing to prevent exceeding ISO bandwidth
920 * limits of a given frame or microframe.
922 * It's not needed for peripheral side, which dedicates endpoints;
923 * though it _might_ use SOF irqs for other purposes.
925 * And it's not currently needed for host side, which also dedicates
926 * endpoints, relies on TX/RX interval registers, and isn't claimed
927 * to support ISO transfers yet.
929 if (int_usb
& MUSB_INTR_SOF
) {
930 void __iomem
*mbase
= musb
->mregs
;
931 struct musb_hw_ep
*ep
;
935 dev_dbg(musb
->controller
, "START_OF_FRAME\n");
936 handled
= IRQ_HANDLED
;
938 /* start any periodic Tx transfers waiting for current frame */
939 frame
= musb_readw(mbase
, MUSB_FRAME
);
940 ep
= musb
->endpoints
;
941 for (epnum
= 1; (epnum
< musb
->nr_endpoints
)
942 && (musb
->epmask
>= (1 << epnum
));
945 * FIXME handle framecounter wraps (12 bits)
946 * eliminate duplicated StartUrb logic
948 if (ep
->dwWaitFrame
>= frame
) {
950 pr_debug("SOF --> periodic TX%s on %d\n",
951 ep
->tx_channel
? " DMA" : "",
954 musb_h_tx_start(musb
, epnum
);
956 cppi_hostdma_start(musb
, epnum
);
958 } /* end of for loop */
962 schedule_work(&musb
->irq_work
);
967 /*-------------------------------------------------------------------------*/
969 static void musb_disable_interrupts(struct musb
*musb
)
971 void __iomem
*mbase
= musb
->mregs
;
974 /* disable interrupts */
975 musb_writeb(mbase
, MUSB_INTRUSBE
, 0);
977 musb_writew(mbase
, MUSB_INTRTXE
, 0);
979 musb_writew(mbase
, MUSB_INTRRXE
, 0);
981 /* flush pending interrupts */
982 temp
= musb_readb(mbase
, MUSB_INTRUSB
);
983 temp
= musb_readw(mbase
, MUSB_INTRTX
);
984 temp
= musb_readw(mbase
, MUSB_INTRRX
);
987 static void musb_enable_interrupts(struct musb
*musb
)
989 void __iomem
*regs
= musb
->mregs
;
991 /* Set INT enable registers, enable interrupts */
992 musb
->intrtxe
= musb
->epmask
;
993 musb_writew(regs
, MUSB_INTRTXE
, musb
->intrtxe
);
994 musb
->intrrxe
= musb
->epmask
& 0xfffe;
995 musb_writew(regs
, MUSB_INTRRXE
, musb
->intrrxe
);
996 musb_writeb(regs
, MUSB_INTRUSBE
, 0xf7);
1000 static void musb_generic_disable(struct musb
*musb
)
1002 void __iomem
*mbase
= musb
->mregs
;
1004 musb_disable_interrupts(musb
);
1007 musb_writeb(mbase
, MUSB_DEVCTL
, 0);
1011 * Program the HDRC to start (enable interrupts, dma, etc.).
1013 void musb_start(struct musb
*musb
)
1015 void __iomem
*regs
= musb
->mregs
;
1016 u8 devctl
= musb_readb(regs
, MUSB_DEVCTL
);
1018 dev_dbg(musb
->controller
, "<== devctl %02x\n", devctl
);
1020 musb_enable_interrupts(musb
);
1021 musb_writeb(regs
, MUSB_TESTMODE
, 0);
1023 /* put into basic highspeed mode and start session */
1024 musb_writeb(regs
, MUSB_POWER
, MUSB_POWER_ISOUPDATE
1026 /* ENSUSPEND wedges tusb */
1027 /* | MUSB_POWER_ENSUSPEND */
1030 musb
->is_active
= 0;
1031 devctl
= musb_readb(regs
, MUSB_DEVCTL
);
1032 devctl
&= ~MUSB_DEVCTL_SESSION
;
1034 /* session started after:
1035 * (a) ID-grounded irq, host mode;
1036 * (b) vbus present/connect IRQ, peripheral mode;
1037 * (c) peripheral initiates, using SRP
1039 if (musb
->port_mode
!= MUSB_PORT_MODE_HOST
&&
1040 (devctl
& MUSB_DEVCTL_VBUS
) == MUSB_DEVCTL_VBUS
) {
1041 musb
->is_active
= 1;
1043 devctl
|= MUSB_DEVCTL_SESSION
;
1046 musb_platform_enable(musb
);
1047 musb_writeb(regs
, MUSB_DEVCTL
, devctl
);
1051 * Make the HDRC stop (disable interrupts, etc.);
1052 * reversible by musb_start
1053 * called on gadget driver unregister
1054 * with controller locked, irqs blocked
1055 * acts as a NOP unless some role activated the hardware
1057 void musb_stop(struct musb
*musb
)
1059 /* stop IRQs, timers, ... */
1060 musb_platform_disable(musb
);
1061 musb_generic_disable(musb
);
1062 dev_dbg(musb
->controller
, "HDRC disabled\n");
1065 * - mark host and/or peripheral drivers unusable/inactive
1066 * - disable DMA (and enable it in HdrcStart)
1067 * - make sure we can musb_start() after musb_stop(); with
1068 * OTG mode, gadget driver module rmmod/modprobe cycles that
1071 musb_platform_try_idle(musb
, 0);
1074 static void musb_shutdown(struct platform_device
*pdev
)
1076 struct musb
*musb
= dev_to_musb(&pdev
->dev
);
1077 unsigned long flags
;
1079 pm_runtime_get_sync(musb
->controller
);
1081 musb_host_cleanup(musb
);
1082 musb_gadget_cleanup(musb
);
1084 spin_lock_irqsave(&musb
->lock
, flags
);
1085 musb_platform_disable(musb
);
1086 musb_generic_disable(musb
);
1087 spin_unlock_irqrestore(&musb
->lock
, flags
);
1089 musb_writeb(musb
->mregs
, MUSB_DEVCTL
, 0);
1090 musb_platform_exit(musb
);
1092 pm_runtime_put(musb
->controller
);
1093 /* FIXME power down */
1097 /*-------------------------------------------------------------------------*/
1100 * The silicon either has hard-wired endpoint configurations, or else
1101 * "dynamic fifo" sizing. The driver has support for both, though at this
1102 * writing only the dynamic sizing is very well tested. Since we switched
1103 * away from compile-time hardware parameters, we can no longer rely on
1104 * dead code elimination to leave only the relevant one in the object file.
1106 * We don't currently use dynamic fifo setup capability to do anything
1107 * more than selecting one of a bunch of predefined configurations.
1109 static ushort fifo_mode
;
1111 /* "modprobe ... fifo_mode=1" etc */
1112 module_param(fifo_mode
, ushort
, 0);
1113 MODULE_PARM_DESC(fifo_mode
, "initial endpoint configuration");
1116 * tables defining fifo_mode values. define more if you like.
1117 * for host side, make sure both halves of ep1 are set up.
1120 /* mode 0 - fits in 2KB */
1121 static struct musb_fifo_cfg mode_0_cfg
[] = {
1122 { .hw_ep_num
= 1, .style
= FIFO_TX
, .maxpacket
= 512, },
1123 { .hw_ep_num
= 1, .style
= FIFO_RX
, .maxpacket
= 512, },
1124 { .hw_ep_num
= 2, .style
= FIFO_RXTX
, .maxpacket
= 512, },
1125 { .hw_ep_num
= 3, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1126 { .hw_ep_num
= 4, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1129 /* mode 1 - fits in 4KB */
1130 static struct musb_fifo_cfg mode_1_cfg
[] = {
1131 { .hw_ep_num
= 1, .style
= FIFO_TX
, .maxpacket
= 512, .mode
= BUF_DOUBLE
, },
1132 { .hw_ep_num
= 1, .style
= FIFO_RX
, .maxpacket
= 512, .mode
= BUF_DOUBLE
, },
1133 { .hw_ep_num
= 2, .style
= FIFO_RXTX
, .maxpacket
= 512, .mode
= BUF_DOUBLE
, },
1134 { .hw_ep_num
= 3, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1135 { .hw_ep_num
= 4, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1138 /* mode 2 - fits in 4KB */
1139 static struct musb_fifo_cfg mode_2_cfg
[] = {
1140 { .hw_ep_num
= 1, .style
= FIFO_TX
, .maxpacket
= 512, },
1141 { .hw_ep_num
= 1, .style
= FIFO_RX
, .maxpacket
= 512, },
1142 { .hw_ep_num
= 2, .style
= FIFO_TX
, .maxpacket
= 512, },
1143 { .hw_ep_num
= 2, .style
= FIFO_RX
, .maxpacket
= 512, },
1144 { .hw_ep_num
= 3, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1145 { .hw_ep_num
= 4, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1148 /* mode 3 - fits in 4KB */
1149 static struct musb_fifo_cfg mode_3_cfg
[] = {
1150 { .hw_ep_num
= 1, .style
= FIFO_TX
, .maxpacket
= 512, .mode
= BUF_DOUBLE
, },
1151 { .hw_ep_num
= 1, .style
= FIFO_RX
, .maxpacket
= 512, .mode
= BUF_DOUBLE
, },
1152 { .hw_ep_num
= 2, .style
= FIFO_TX
, .maxpacket
= 512, },
1153 { .hw_ep_num
= 2, .style
= FIFO_RX
, .maxpacket
= 512, },
1154 { .hw_ep_num
= 3, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1155 { .hw_ep_num
= 4, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1158 /* mode 4 - fits in 16KB */
1159 static struct musb_fifo_cfg mode_4_cfg
[] = {
1160 { .hw_ep_num
= 1, .style
= FIFO_TX
, .maxpacket
= 512, },
1161 { .hw_ep_num
= 1, .style
= FIFO_RX
, .maxpacket
= 512, },
1162 { .hw_ep_num
= 2, .style
= FIFO_TX
, .maxpacket
= 512, },
1163 { .hw_ep_num
= 2, .style
= FIFO_RX
, .maxpacket
= 512, },
1164 { .hw_ep_num
= 3, .style
= FIFO_TX
, .maxpacket
= 512, },
1165 { .hw_ep_num
= 3, .style
= FIFO_RX
, .maxpacket
= 512, },
1166 { .hw_ep_num
= 4, .style
= FIFO_TX
, .maxpacket
= 512, },
1167 { .hw_ep_num
= 4, .style
= FIFO_RX
, .maxpacket
= 512, },
1168 { .hw_ep_num
= 5, .style
= FIFO_TX
, .maxpacket
= 512, },
1169 { .hw_ep_num
= 5, .style
= FIFO_RX
, .maxpacket
= 512, },
1170 { .hw_ep_num
= 6, .style
= FIFO_TX
, .maxpacket
= 512, },
1171 { .hw_ep_num
= 6, .style
= FIFO_RX
, .maxpacket
= 512, },
1172 { .hw_ep_num
= 7, .style
= FIFO_TX
, .maxpacket
= 512, },
1173 { .hw_ep_num
= 7, .style
= FIFO_RX
, .maxpacket
= 512, },
1174 { .hw_ep_num
= 8, .style
= FIFO_TX
, .maxpacket
= 512, },
1175 { .hw_ep_num
= 8, .style
= FIFO_RX
, .maxpacket
= 512, },
1176 { .hw_ep_num
= 9, .style
= FIFO_TX
, .maxpacket
= 512, },
1177 { .hw_ep_num
= 9, .style
= FIFO_RX
, .maxpacket
= 512, },
1178 { .hw_ep_num
= 10, .style
= FIFO_TX
, .maxpacket
= 256, },
1179 { .hw_ep_num
= 10, .style
= FIFO_RX
, .maxpacket
= 64, },
1180 { .hw_ep_num
= 11, .style
= FIFO_TX
, .maxpacket
= 256, },
1181 { .hw_ep_num
= 11, .style
= FIFO_RX
, .maxpacket
= 64, },
1182 { .hw_ep_num
= 12, .style
= FIFO_TX
, .maxpacket
= 256, },
1183 { .hw_ep_num
= 12, .style
= FIFO_RX
, .maxpacket
= 64, },
1184 { .hw_ep_num
= 13, .style
= FIFO_RXTX
, .maxpacket
= 4096, },
1185 { .hw_ep_num
= 14, .style
= FIFO_RXTX
, .maxpacket
= 1024, },
1186 { .hw_ep_num
= 15, .style
= FIFO_RXTX
, .maxpacket
= 1024, },
1189 /* mode 5 - fits in 8KB */
1190 static struct musb_fifo_cfg mode_5_cfg
[] = {
1191 { .hw_ep_num
= 1, .style
= FIFO_TX
, .maxpacket
= 512, },
1192 { .hw_ep_num
= 1, .style
= FIFO_RX
, .maxpacket
= 512, },
1193 { .hw_ep_num
= 2, .style
= FIFO_TX
, .maxpacket
= 512, },
1194 { .hw_ep_num
= 2, .style
= FIFO_RX
, .maxpacket
= 512, },
1195 { .hw_ep_num
= 3, .style
= FIFO_TX
, .maxpacket
= 512, },
1196 { .hw_ep_num
= 3, .style
= FIFO_RX
, .maxpacket
= 512, },
1197 { .hw_ep_num
= 4, .style
= FIFO_TX
, .maxpacket
= 512, },
1198 { .hw_ep_num
= 4, .style
= FIFO_RX
, .maxpacket
= 512, },
1199 { .hw_ep_num
= 5, .style
= FIFO_TX
, .maxpacket
= 512, },
1200 { .hw_ep_num
= 5, .style
= FIFO_RX
, .maxpacket
= 512, },
1201 { .hw_ep_num
= 6, .style
= FIFO_TX
, .maxpacket
= 32, },
1202 { .hw_ep_num
= 6, .style
= FIFO_RX
, .maxpacket
= 32, },
1203 { .hw_ep_num
= 7, .style
= FIFO_TX
, .maxpacket
= 32, },
1204 { .hw_ep_num
= 7, .style
= FIFO_RX
, .maxpacket
= 32, },
1205 { .hw_ep_num
= 8, .style
= FIFO_TX
, .maxpacket
= 32, },
1206 { .hw_ep_num
= 8, .style
= FIFO_RX
, .maxpacket
= 32, },
1207 { .hw_ep_num
= 9, .style
= FIFO_TX
, .maxpacket
= 32, },
1208 { .hw_ep_num
= 9, .style
= FIFO_RX
, .maxpacket
= 32, },
1209 { .hw_ep_num
= 10, .style
= FIFO_TX
, .maxpacket
= 32, },
1210 { .hw_ep_num
= 10, .style
= FIFO_RX
, .maxpacket
= 32, },
1211 { .hw_ep_num
= 11, .style
= FIFO_TX
, .maxpacket
= 32, },
1212 { .hw_ep_num
= 11, .style
= FIFO_RX
, .maxpacket
= 32, },
1213 { .hw_ep_num
= 12, .style
= FIFO_TX
, .maxpacket
= 32, },
1214 { .hw_ep_num
= 12, .style
= FIFO_RX
, .maxpacket
= 32, },
1215 { .hw_ep_num
= 13, .style
= FIFO_RXTX
, .maxpacket
= 512, },
1216 { .hw_ep_num
= 14, .style
= FIFO_RXTX
, .maxpacket
= 1024, },
1217 { .hw_ep_num
= 15, .style
= FIFO_RXTX
, .maxpacket
= 1024, },
1221 * configure a fifo; for non-shared endpoints, this may be called
1222 * once for a tx fifo and once for an rx fifo.
1224 * returns negative errno or offset for next fifo.
1227 fifo_setup(struct musb
*musb
, struct musb_hw_ep
*hw_ep
,
1228 const struct musb_fifo_cfg
*cfg
, u16 offset
)
1230 void __iomem
*mbase
= musb
->mregs
;
1232 u16 maxpacket
= cfg
->maxpacket
;
1233 u16 c_off
= offset
>> 3;
1236 /* expect hw_ep has already been zero-initialized */
1238 size
= ffs(max(maxpacket
, (u16
) 8)) - 1;
1239 maxpacket
= 1 << size
;
1242 if (cfg
->mode
== BUF_DOUBLE
) {
1243 if ((offset
+ (maxpacket
<< 1)) >
1244 (1 << (musb
->config
->ram_bits
+ 2)))
1246 c_size
|= MUSB_FIFOSZ_DPB
;
1248 if ((offset
+ maxpacket
) > (1 << (musb
->config
->ram_bits
+ 2)))
1252 /* configure the FIFO */
1253 musb_writeb(mbase
, MUSB_INDEX
, hw_ep
->epnum
);
1255 /* EP0 reserved endpoint for control, bidirectional;
1256 * EP1 reserved for bulk, two unidirectional halves.
1258 if (hw_ep
->epnum
== 1)
1259 musb
->bulk_ep
= hw_ep
;
1260 /* REVISIT error check: be sure ep0 can both rx and tx ... */
1261 switch (cfg
->style
) {
1263 musb_write_txfifosz(mbase
, c_size
);
1264 musb_write_txfifoadd(mbase
, c_off
);
1265 hw_ep
->tx_double_buffered
= !!(c_size
& MUSB_FIFOSZ_DPB
);
1266 hw_ep
->max_packet_sz_tx
= maxpacket
;
1269 musb_write_rxfifosz(mbase
, c_size
);
1270 musb_write_rxfifoadd(mbase
, c_off
);
1271 hw_ep
->rx_double_buffered
= !!(c_size
& MUSB_FIFOSZ_DPB
);
1272 hw_ep
->max_packet_sz_rx
= maxpacket
;
1275 musb_write_txfifosz(mbase
, c_size
);
1276 musb_write_txfifoadd(mbase
, c_off
);
1277 hw_ep
->rx_double_buffered
= !!(c_size
& MUSB_FIFOSZ_DPB
);
1278 hw_ep
->max_packet_sz_rx
= maxpacket
;
1280 musb_write_rxfifosz(mbase
, c_size
);
1281 musb_write_rxfifoadd(mbase
, c_off
);
1282 hw_ep
->tx_double_buffered
= hw_ep
->rx_double_buffered
;
1283 hw_ep
->max_packet_sz_tx
= maxpacket
;
1285 hw_ep
->is_shared_fifo
= true;
1289 /* NOTE rx and tx endpoint irqs aren't managed separately,
1290 * which happens to be ok
1292 musb
->epmask
|= (1 << hw_ep
->epnum
);
1294 return offset
+ (maxpacket
<< ((c_size
& MUSB_FIFOSZ_DPB
) ? 1 : 0));
1297 static struct musb_fifo_cfg ep0_cfg
= {
1298 .style
= FIFO_RXTX
, .maxpacket
= 64,
1301 static int ep_config_from_table(struct musb
*musb
)
1303 const struct musb_fifo_cfg
*cfg
;
1306 struct musb_hw_ep
*hw_ep
= musb
->endpoints
;
1308 if (musb
->config
->fifo_cfg
) {
1309 cfg
= musb
->config
->fifo_cfg
;
1310 n
= musb
->config
->fifo_cfg_size
;
1314 switch (fifo_mode
) {
1320 n
= ARRAY_SIZE(mode_0_cfg
);
1324 n
= ARRAY_SIZE(mode_1_cfg
);
1328 n
= ARRAY_SIZE(mode_2_cfg
);
1332 n
= ARRAY_SIZE(mode_3_cfg
);
1336 n
= ARRAY_SIZE(mode_4_cfg
);
1340 n
= ARRAY_SIZE(mode_5_cfg
);
1344 printk(KERN_DEBUG
"%s: setup fifo_mode %d\n",
1345 musb_driver_name
, fifo_mode
);
1349 offset
= fifo_setup(musb
, hw_ep
, &ep0_cfg
, 0);
1350 /* assert(offset > 0) */
1352 /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would
1353 * be better than static musb->config->num_eps and DYN_FIFO_SIZE...
1356 for (i
= 0; i
< n
; i
++) {
1357 u8 epn
= cfg
->hw_ep_num
;
1359 if (epn
>= musb
->config
->num_eps
) {
1360 pr_debug("%s: invalid ep %d\n",
1361 musb_driver_name
, epn
);
1364 offset
= fifo_setup(musb
, hw_ep
+ epn
, cfg
++, offset
);
1366 pr_debug("%s: mem overrun, ep %d\n",
1367 musb_driver_name
, epn
);
1371 musb
->nr_endpoints
= max(epn
, musb
->nr_endpoints
);
1374 printk(KERN_DEBUG
"%s: %d/%d max ep, %d/%d memory\n",
1376 n
+ 1, musb
->config
->num_eps
* 2 - 1,
1377 offset
, (1 << (musb
->config
->ram_bits
+ 2)));
1379 if (!musb
->bulk_ep
) {
1380 pr_debug("%s: missing bulk\n", musb_driver_name
);
1389 * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
1390 * @param musb the controller
1392 static int ep_config_from_hw(struct musb
*musb
)
1395 struct musb_hw_ep
*hw_ep
;
1396 void __iomem
*mbase
= musb
->mregs
;
1399 dev_dbg(musb
->controller
, "<== static silicon ep config\n");
1401 /* FIXME pick up ep0 maxpacket size */
1403 for (epnum
= 1; epnum
< musb
->config
->num_eps
; epnum
++) {
1404 musb_ep_select(mbase
, epnum
);
1405 hw_ep
= musb
->endpoints
+ epnum
;
1407 ret
= musb_read_fifosize(musb
, hw_ep
, epnum
);
1411 /* FIXME set up hw_ep->{rx,tx}_double_buffered */
1413 /* pick an RX/TX endpoint for bulk */
1414 if (hw_ep
->max_packet_sz_tx
< 512
1415 || hw_ep
->max_packet_sz_rx
< 512)
1418 /* REVISIT: this algorithm is lazy, we should at least
1419 * try to pick a double buffered endpoint.
1423 musb
->bulk_ep
= hw_ep
;
1426 if (!musb
->bulk_ep
) {
1427 pr_debug("%s: missing bulk\n", musb_driver_name
);
1434 enum { MUSB_CONTROLLER_MHDRC
, MUSB_CONTROLLER_HDRC
, };
1436 /* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
1437 * configure endpoints, or take their config from silicon
1439 static int musb_core_init(u16 musb_type
, struct musb
*musb
)
1443 char aInfo
[90], aRevision
[32], aDate
[12];
1444 void __iomem
*mbase
= musb
->mregs
;
1448 /* log core options (read using indexed model) */
1449 reg
= musb_read_configdata(mbase
);
1451 strcpy(aInfo
, (reg
& MUSB_CONFIGDATA_UTMIDW
) ? "UTMI-16" : "UTMI-8");
1452 if (reg
& MUSB_CONFIGDATA_DYNFIFO
) {
1453 strcat(aInfo
, ", dyn FIFOs");
1454 musb
->dyn_fifo
= true;
1456 if (reg
& MUSB_CONFIGDATA_MPRXE
) {
1457 strcat(aInfo
, ", bulk combine");
1458 musb
->bulk_combine
= true;
1460 if (reg
& MUSB_CONFIGDATA_MPTXE
) {
1461 strcat(aInfo
, ", bulk split");
1462 musb
->bulk_split
= true;
1464 if (reg
& MUSB_CONFIGDATA_HBRXE
) {
1465 strcat(aInfo
, ", HB-ISO Rx");
1466 musb
->hb_iso_rx
= true;
1468 if (reg
& MUSB_CONFIGDATA_HBTXE
) {
1469 strcat(aInfo
, ", HB-ISO Tx");
1470 musb
->hb_iso_tx
= true;
1472 if (reg
& MUSB_CONFIGDATA_SOFTCONE
)
1473 strcat(aInfo
, ", SoftConn");
1475 printk(KERN_DEBUG
"%s: ConfigData=0x%02x (%s)\n",
1476 musb_driver_name
, reg
, aInfo
);
1479 if (MUSB_CONTROLLER_MHDRC
== musb_type
) {
1480 musb
->is_multipoint
= 1;
1483 musb
->is_multipoint
= 0;
1485 #ifndef CONFIG_USB_OTG_BLACKLIST_HUB
1487 "%s: kernel must blacklist external hubs\n",
1492 /* log release info */
1493 musb
->hwvers
= musb_read_hwvers(mbase
);
1494 snprintf(aRevision
, 32, "%d.%d%s", MUSB_HWVERS_MAJOR(musb
->hwvers
),
1495 MUSB_HWVERS_MINOR(musb
->hwvers
),
1496 (musb
->hwvers
& MUSB_HWVERS_RC
) ? "RC" : "");
1497 printk(KERN_DEBUG
"%s: %sHDRC RTL version %s %s\n",
1498 musb_driver_name
, type
, aRevision
, aDate
);
1501 musb_configure_ep0(musb
);
1503 /* discover endpoint configuration */
1504 musb
->nr_endpoints
= 1;
1508 status
= ep_config_from_table(musb
);
1510 status
= ep_config_from_hw(musb
);
1515 /* finish init, and print endpoint config */
1516 for (i
= 0; i
< musb
->nr_endpoints
; i
++) {
1517 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ i
;
1519 hw_ep
->fifo
= musb
->io
.fifo_offset(i
) + mbase
;
1520 #if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
1521 if (musb
->io
.quirks
& MUSB_IN_TUSB
) {
1522 hw_ep
->fifo_async
= musb
->async
+ 0x400 +
1523 musb
->io
.fifo_offset(i
);
1524 hw_ep
->fifo_sync
= musb
->sync
+ 0x400 +
1525 musb
->io
.fifo_offset(i
);
1526 hw_ep
->fifo_sync_va
=
1527 musb
->sync_va
+ 0x400 + musb
->io
.fifo_offset(i
);
1530 hw_ep
->conf
= mbase
- 0x400 + TUSB_EP0_CONF
;
1532 hw_ep
->conf
= mbase
+ 0x400 +
1533 (((i
- 1) & 0xf) << 2);
1537 hw_ep
->regs
= musb
->io
.ep_offset(i
, 0) + mbase
;
1538 hw_ep
->target_regs
= musb_read_target_reg_base(i
, mbase
);
1539 hw_ep
->rx_reinit
= 1;
1540 hw_ep
->tx_reinit
= 1;
1542 if (hw_ep
->max_packet_sz_tx
) {
1543 dev_dbg(musb
->controller
,
1544 "%s: hw_ep %d%s, %smax %d\n",
1545 musb_driver_name
, i
,
1546 hw_ep
->is_shared_fifo
? "shared" : "tx",
1547 hw_ep
->tx_double_buffered
1548 ? "doublebuffer, " : "",
1549 hw_ep
->max_packet_sz_tx
);
1551 if (hw_ep
->max_packet_sz_rx
&& !hw_ep
->is_shared_fifo
) {
1552 dev_dbg(musb
->controller
,
1553 "%s: hw_ep %d%s, %smax %d\n",
1554 musb_driver_name
, i
,
1556 hw_ep
->rx_double_buffered
1557 ? "doublebuffer, " : "",
1558 hw_ep
->max_packet_sz_rx
);
1560 if (!(hw_ep
->max_packet_sz_tx
|| hw_ep
->max_packet_sz_rx
))
1561 dev_dbg(musb
->controller
, "hw_ep %d not configured\n", i
);
1567 /*-------------------------------------------------------------------------*/
1570 * handle all the irqs defined by the HDRC core. for now we expect: other
1571 * irq sources (phy, dma, etc) will be handled first, musb->int_* values
1572 * will be assigned, and the irq will already have been acked.
1574 * called in irq context with spinlock held, irqs blocked
1576 irqreturn_t
musb_interrupt(struct musb
*musb
)
1578 irqreturn_t retval
= IRQ_NONE
;
1579 unsigned long status
;
1580 unsigned long epnum
;
1583 if (!musb
->int_usb
&& !musb
->int_tx
&& !musb
->int_rx
)
1586 devctl
= musb_readb(musb
->mregs
, MUSB_DEVCTL
);
1588 dev_dbg(musb
->controller
, "** IRQ %s usb%04x tx%04x rx%04x\n",
1589 is_host_active(musb
) ? "host" : "peripheral",
1590 musb
->int_usb
, musb
->int_tx
, musb
->int_rx
);
1593 * According to Mentor Graphics' documentation, flowchart on page 98,
1594 * IRQ should be handled as follows:
1597 * . Session Request IRQ
1602 * . Reset/Babble IRQ
1603 * . SOF IRQ (we're not using this one)
1608 * We will be following that flowchart in order to avoid any problems
1609 * that might arise with internal Finite State Machine.
1613 retval
|= musb_stage0_irq(musb
, musb
->int_usb
, devctl
);
1615 if (musb
->int_tx
& 1) {
1616 if (is_host_active(musb
))
1617 retval
|= musb_h_ep0_irq(musb
);
1619 retval
|= musb_g_ep0_irq(musb
);
1621 /* we have just handled endpoint 0 IRQ, clear it */
1622 musb
->int_tx
&= ~BIT(0);
1625 status
= musb
->int_tx
;
1627 for_each_set_bit(epnum
, &status
, 16) {
1628 retval
= IRQ_HANDLED
;
1629 if (is_host_active(musb
))
1630 musb_host_tx(musb
, epnum
);
1632 musb_g_tx(musb
, epnum
);
1635 status
= musb
->int_rx
;
1637 for_each_set_bit(epnum
, &status
, 16) {
1638 retval
= IRQ_HANDLED
;
1639 if (is_host_active(musb
))
1640 musb_host_rx(musb
, epnum
);
1642 musb_g_rx(musb
, epnum
);
1647 EXPORT_SYMBOL_GPL(musb_interrupt
);
1649 #ifndef CONFIG_MUSB_PIO_ONLY
1650 static bool use_dma
= 1;
1652 /* "modprobe ... use_dma=0" etc */
1653 module_param(use_dma
, bool, 0);
1654 MODULE_PARM_DESC(use_dma
, "enable/disable use of DMA");
1656 void musb_dma_completion(struct musb
*musb
, u8 epnum
, u8 transmit
)
1658 /* called with controller lock already held */
1661 #ifndef CONFIG_USB_TUSB_OMAP_DMA
1662 if (!is_cppi_enabled()) {
1664 if (is_host_active(musb
))
1665 musb_h_ep0_irq(musb
);
1667 musb_g_ep0_irq(musb
);
1671 /* endpoints 1..15 */
1673 if (is_host_active(musb
))
1674 musb_host_tx(musb
, epnum
);
1676 musb_g_tx(musb
, epnum
);
1679 if (is_host_active(musb
))
1680 musb_host_rx(musb
, epnum
);
1682 musb_g_rx(musb
, epnum
);
1686 EXPORT_SYMBOL_GPL(musb_dma_completion
);
1692 /*-------------------------------------------------------------------------*/
1695 musb_mode_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1697 struct musb
*musb
= dev_to_musb(dev
);
1698 unsigned long flags
;
1701 spin_lock_irqsave(&musb
->lock
, flags
);
1702 ret
= sprintf(buf
, "%s\n", usb_otg_state_string(musb
->xceiv
->otg
->state
));
1703 spin_unlock_irqrestore(&musb
->lock
, flags
);
1709 musb_mode_store(struct device
*dev
, struct device_attribute
*attr
,
1710 const char *buf
, size_t n
)
1712 struct musb
*musb
= dev_to_musb(dev
);
1713 unsigned long flags
;
1716 spin_lock_irqsave(&musb
->lock
, flags
);
1717 if (sysfs_streq(buf
, "host"))
1718 status
= musb_platform_set_mode(musb
, MUSB_HOST
);
1719 else if (sysfs_streq(buf
, "peripheral"))
1720 status
= musb_platform_set_mode(musb
, MUSB_PERIPHERAL
);
1721 else if (sysfs_streq(buf
, "otg"))
1722 status
= musb_platform_set_mode(musb
, MUSB_OTG
);
1725 spin_unlock_irqrestore(&musb
->lock
, flags
);
1727 return (status
== 0) ? n
: status
;
1729 static DEVICE_ATTR(mode
, 0644, musb_mode_show
, musb_mode_store
);
1732 musb_vbus_store(struct device
*dev
, struct device_attribute
*attr
,
1733 const char *buf
, size_t n
)
1735 struct musb
*musb
= dev_to_musb(dev
);
1736 unsigned long flags
;
1739 if (sscanf(buf
, "%lu", &val
) < 1) {
1740 dev_err(dev
, "Invalid VBUS timeout ms value\n");
1744 spin_lock_irqsave(&musb
->lock
, flags
);
1745 /* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
1746 musb
->a_wait_bcon
= val
? max_t(int, val
, OTG_TIME_A_WAIT_BCON
) : 0 ;
1747 if (musb
->xceiv
->otg
->state
== OTG_STATE_A_WAIT_BCON
)
1748 musb
->is_active
= 0;
1749 musb_platform_try_idle(musb
, jiffies
+ msecs_to_jiffies(val
));
1750 spin_unlock_irqrestore(&musb
->lock
, flags
);
1756 musb_vbus_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1758 struct musb
*musb
= dev_to_musb(dev
);
1759 unsigned long flags
;
1763 spin_lock_irqsave(&musb
->lock
, flags
);
1764 val
= musb
->a_wait_bcon
;
1765 /* FIXME get_vbus_status() is normally #defined as false...
1766 * and is effectively TUSB-specific.
1768 vbus
= musb_platform_get_vbus_status(musb
);
1769 spin_unlock_irqrestore(&musb
->lock
, flags
);
1771 return sprintf(buf
, "Vbus %s, timeout %lu msec\n",
1772 vbus
? "on" : "off", val
);
1774 static DEVICE_ATTR(vbus
, 0644, musb_vbus_show
, musb_vbus_store
);
1776 /* Gadget drivers can't know that a host is connected so they might want
1777 * to start SRP, but users can. This allows userspace to trigger SRP.
1780 musb_srp_store(struct device
*dev
, struct device_attribute
*attr
,
1781 const char *buf
, size_t n
)
1783 struct musb
*musb
= dev_to_musb(dev
);
1786 if (sscanf(buf
, "%hu", &srp
) != 1
1788 dev_err(dev
, "SRP: Value must be 1\n");
1793 musb_g_wakeup(musb
);
1797 static DEVICE_ATTR(srp
, 0644, NULL
, musb_srp_store
);
1799 static struct attribute
*musb_attributes
[] = {
1800 &dev_attr_mode
.attr
,
1801 &dev_attr_vbus
.attr
,
1806 static const struct attribute_group musb_attr_group
= {
1807 .attrs
= musb_attributes
,
1810 /* Only used to provide driver mode change events */
1811 static void musb_irq_work(struct work_struct
*data
)
1813 struct musb
*musb
= container_of(data
, struct musb
, irq_work
);
1815 if (musb
->xceiv
->otg
->state
!= musb
->xceiv_old_state
) {
1816 musb
->xceiv_old_state
= musb
->xceiv
->otg
->state
;
1817 sysfs_notify(&musb
->controller
->kobj
, NULL
, "mode");
1821 static void musb_recover_from_babble(struct musb
*musb
)
1826 musb_disable_interrupts(musb
);
1829 * wait at least 320 cycles of 60MHz clock. That's 5.3us, we will give
1830 * it some slack and wait for 10us.
1834 ret
= musb_platform_recover(musb
);
1836 musb_enable_interrupts(musb
);
1840 /* drop session bit */
1841 devctl
= musb_readb(musb
->mregs
, MUSB_DEVCTL
);
1842 devctl
&= ~MUSB_DEVCTL_SESSION
;
1843 musb_writeb(musb
->mregs
, MUSB_DEVCTL
, devctl
);
1845 /* tell usbcore about it */
1846 musb_root_disconnect(musb
);
1849 * When a babble condition occurs, the musb controller
1850 * removes the session bit and the endpoint config is lost.
1853 ret
= ep_config_from_table(musb
);
1855 ret
= ep_config_from_hw(musb
);
1857 /* restart session */
1862 /* --------------------------------------------------------------------------
1866 static struct musb
*allocate_instance(struct device
*dev
,
1867 struct musb_hdrc_config
*config
, void __iomem
*mbase
)
1870 struct musb_hw_ep
*ep
;
1874 musb
= devm_kzalloc(dev
, sizeof(*musb
), GFP_KERNEL
);
1878 INIT_LIST_HEAD(&musb
->control
);
1879 INIT_LIST_HEAD(&musb
->in_bulk
);
1880 INIT_LIST_HEAD(&musb
->out_bulk
);
1882 musb
->vbuserr_retry
= VBUSERR_RETRY_COUNT
;
1883 musb
->a_wait_bcon
= OTG_TIME_A_WAIT_BCON
;
1884 musb
->mregs
= mbase
;
1885 musb
->ctrl_base
= mbase
;
1886 musb
->nIrq
= -ENODEV
;
1887 musb
->config
= config
;
1888 BUG_ON(musb
->config
->num_eps
> MUSB_C_NUM_EPS
);
1889 for (epnum
= 0, ep
= musb
->endpoints
;
1890 epnum
< musb
->config
->num_eps
;
1896 musb
->controller
= dev
;
1898 ret
= musb_host_alloc(musb
);
1902 dev_set_drvdata(dev
, musb
);
1910 static void musb_free(struct musb
*musb
)
1912 /* this has multiple entry modes. it handles fault cleanup after
1913 * probe(), where things may be partially set up, as well as rmmod
1914 * cleanup after everything's been de-activated.
1918 sysfs_remove_group(&musb
->controller
->kobj
, &musb_attr_group
);
1921 if (musb
->nIrq
>= 0) {
1923 disable_irq_wake(musb
->nIrq
);
1924 free_irq(musb
->nIrq
, musb
);
1927 musb_host_free(musb
);
1930 static void musb_deassert_reset(struct work_struct
*work
)
1933 unsigned long flags
;
1935 musb
= container_of(work
, struct musb
, deassert_reset_work
.work
);
1937 spin_lock_irqsave(&musb
->lock
, flags
);
1939 if (musb
->port1_status
& USB_PORT_STAT_RESET
)
1940 musb_port_reset(musb
, false);
1942 spin_unlock_irqrestore(&musb
->lock
, flags
);
1946 * Perform generic per-controller initialization.
1948 * @dev: the controller (already clocked, etc)
1950 * @ctrl: virtual address of controller registers,
1951 * not yet corrected for platform-specific offsets
1954 musb_init_controller(struct device
*dev
, int nIrq
, void __iomem
*ctrl
)
1958 struct musb_hdrc_platform_data
*plat
= dev_get_platdata(dev
);
1960 /* The driver might handle more features than the board; OK.
1961 * Fail when the board needs a feature that's not enabled.
1964 dev_dbg(dev
, "no platform_data?\n");
1970 musb
= allocate_instance(dev
, plat
->config
, ctrl
);
1976 spin_lock_init(&musb
->lock
);
1977 musb
->board_set_power
= plat
->set_power
;
1978 musb
->min_power
= plat
->min_power
;
1979 musb
->ops
= plat
->platform_ops
;
1980 musb
->port_mode
= plat
->mode
;
1983 * Initialize the default IO functions. At least omap2430 needs
1984 * these early. We initialize the platform specific IO functions
1987 musb_readb
= musb_default_readb
;
1988 musb_writeb
= musb_default_writeb
;
1989 musb_readw
= musb_default_readw
;
1990 musb_writew
= musb_default_writew
;
1991 musb_readl
= musb_default_readl
;
1992 musb_writel
= musb_default_writel
;
1994 /* We need musb_read/write functions initialized for PM */
1995 pm_runtime_use_autosuspend(musb
->controller
);
1996 pm_runtime_set_autosuspend_delay(musb
->controller
, 200);
1997 pm_runtime_irq_safe(musb
->controller
);
1998 pm_runtime_enable(musb
->controller
);
2000 /* The musb_platform_init() call:
2001 * - adjusts musb->mregs
2002 * - sets the musb->isr
2003 * - may initialize an integrated transceiver
2004 * - initializes musb->xceiv, usually by otg_get_phy()
2005 * - stops powering VBUS
2007 * There are various transceiver configurations. Blackfin,
2008 * DaVinci, TUSB60x0, and others integrate them. OMAP3 uses
2009 * external/discrete ones in various flavors (twl4030 family,
2010 * isp1504, non-OTG, etc) mostly hooking up through ULPI.
2012 status
= musb_platform_init(musb
);
2021 if (musb
->ops
->quirks
)
2022 musb
->io
.quirks
= musb
->ops
->quirks
;
2024 /* Most devices use indexed offset or flat offset */
2025 if (musb
->io
.quirks
& MUSB_INDEXED_EP
) {
2026 musb
->io
.ep_offset
= musb_indexed_ep_offset
;
2027 musb
->io
.ep_select
= musb_indexed_ep_select
;
2029 musb
->io
.ep_offset
= musb_flat_ep_offset
;
2030 musb
->io
.ep_select
= musb_flat_ep_select
;
2033 /* At least tusb6010 has its own offsets */
2034 if (musb
->ops
->ep_offset
)
2035 musb
->io
.ep_offset
= musb
->ops
->ep_offset
;
2036 if (musb
->ops
->ep_select
)
2037 musb
->io
.ep_select
= musb
->ops
->ep_select
;
2039 if (musb
->ops
->fifo_mode
)
2040 fifo_mode
= musb
->ops
->fifo_mode
;
2044 if (musb
->ops
->fifo_offset
)
2045 musb
->io
.fifo_offset
= musb
->ops
->fifo_offset
;
2047 musb
->io
.fifo_offset
= musb_default_fifo_offset
;
2049 if (musb
->ops
->readb
)
2050 musb_readb
= musb
->ops
->readb
;
2051 if (musb
->ops
->writeb
)
2052 musb_writeb
= musb
->ops
->writeb
;
2053 if (musb
->ops
->readw
)
2054 musb_readw
= musb
->ops
->readw
;
2055 if (musb
->ops
->writew
)
2056 musb_writew
= musb
->ops
->writew
;
2057 if (musb
->ops
->readl
)
2058 musb_readl
= musb
->ops
->readl
;
2059 if (musb
->ops
->writel
)
2060 musb_writel
= musb
->ops
->writel
;
2062 if (musb
->ops
->read_fifo
)
2063 musb
->io
.read_fifo
= musb
->ops
->read_fifo
;
2065 musb
->io
.read_fifo
= musb_default_read_fifo
;
2067 if (musb
->ops
->write_fifo
)
2068 musb
->io
.write_fifo
= musb
->ops
->write_fifo
;
2070 musb
->io
.write_fifo
= musb_default_write_fifo
;
2072 if (!musb
->xceiv
->io_ops
) {
2073 musb
->xceiv
->io_dev
= musb
->controller
;
2074 musb
->xceiv
->io_priv
= musb
->mregs
;
2075 musb
->xceiv
->io_ops
= &musb_ulpi_access
;
2078 pm_runtime_get_sync(musb
->controller
);
2080 if (use_dma
&& dev
->dma_mask
) {
2081 musb
->dma_controller
= dma_controller_create(musb
, musb
->mregs
);
2082 if (IS_ERR(musb
->dma_controller
)) {
2083 status
= PTR_ERR(musb
->dma_controller
);
2088 /* be sure interrupts are disabled before connecting ISR */
2089 musb_platform_disable(musb
);
2090 musb_generic_disable(musb
);
2092 /* Init IRQ workqueue before request_irq */
2093 INIT_WORK(&musb
->irq_work
, musb_irq_work
);
2094 INIT_DELAYED_WORK(&musb
->deassert_reset_work
, musb_deassert_reset
);
2095 INIT_DELAYED_WORK(&musb
->finish_resume_work
, musb_host_finish_resume
);
2097 /* setup musb parts of the core (especially endpoints) */
2098 status
= musb_core_init(plat
->config
->multipoint
2099 ? MUSB_CONTROLLER_MHDRC
2100 : MUSB_CONTROLLER_HDRC
, musb
);
2104 setup_timer(&musb
->otg_timer
, musb_otg_timer_func
, (unsigned long) musb
);
2106 /* attach to the IRQ */
2107 if (request_irq(nIrq
, musb
->isr
, 0, dev_name(dev
), musb
)) {
2108 dev_err(dev
, "request_irq %d failed!\n", nIrq
);
2113 /* FIXME this handles wakeup irqs wrong */
2114 if (enable_irq_wake(nIrq
) == 0) {
2116 device_init_wakeup(dev
, 1);
2121 /* program PHY to use external vBus if required */
2122 if (plat
->extvbus
) {
2123 u8 busctl
= musb_read_ulpi_buscontrol(musb
->mregs
);
2124 busctl
|= MUSB_ULPI_USE_EXTVBUS
;
2125 musb_write_ulpi_buscontrol(musb
->mregs
, busctl
);
2128 if (musb
->xceiv
->otg
->default_a
) {
2129 MUSB_HST_MODE(musb
);
2130 musb
->xceiv
->otg
->state
= OTG_STATE_A_IDLE
;
2132 MUSB_DEV_MODE(musb
);
2133 musb
->xceiv
->otg
->state
= OTG_STATE_B_IDLE
;
2136 switch (musb
->port_mode
) {
2137 case MUSB_PORT_MODE_HOST
:
2138 status
= musb_host_setup(musb
, plat
->power
);
2141 status
= musb_platform_set_mode(musb
, MUSB_HOST
);
2143 case MUSB_PORT_MODE_GADGET
:
2144 status
= musb_gadget_setup(musb
);
2147 status
= musb_platform_set_mode(musb
, MUSB_PERIPHERAL
);
2149 case MUSB_PORT_MODE_DUAL_ROLE
:
2150 status
= musb_host_setup(musb
, plat
->power
);
2153 status
= musb_gadget_setup(musb
);
2155 musb_host_cleanup(musb
);
2158 status
= musb_platform_set_mode(musb
, MUSB_OTG
);
2161 dev_err(dev
, "unsupported port mode %d\n", musb
->port_mode
);
2168 status
= musb_init_debugfs(musb
);
2172 status
= sysfs_create_group(&musb
->controller
->kobj
, &musb_attr_group
);
2176 pm_runtime_put(musb
->controller
);
2181 musb_exit_debugfs(musb
);
2184 musb_gadget_cleanup(musb
);
2185 musb_host_cleanup(musb
);
2188 cancel_work_sync(&musb
->irq_work
);
2189 cancel_delayed_work_sync(&musb
->finish_resume_work
);
2190 cancel_delayed_work_sync(&musb
->deassert_reset_work
);
2191 if (musb
->dma_controller
)
2192 dma_controller_destroy(musb
->dma_controller
);
2194 pm_runtime_put_sync(musb
->controller
);
2198 device_init_wakeup(dev
, 0);
2199 musb_platform_exit(musb
);
2202 pm_runtime_disable(musb
->controller
);
2203 dev_err(musb
->controller
,
2204 "musb_init_controller failed with status %d\n", status
);
2214 /*-------------------------------------------------------------------------*/
2216 /* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
2217 * bridge to a platform device; this driver then suffices.
2219 static int musb_probe(struct platform_device
*pdev
)
2221 struct device
*dev
= &pdev
->dev
;
2222 int irq
= platform_get_irq_byname(pdev
, "mc");
2223 struct resource
*iomem
;
2229 iomem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2230 base
= devm_ioremap_resource(dev
, iomem
);
2232 return PTR_ERR(base
);
2234 return musb_init_controller(dev
, irq
, base
);
2237 static int musb_remove(struct platform_device
*pdev
)
2239 struct device
*dev
= &pdev
->dev
;
2240 struct musb
*musb
= dev_to_musb(dev
);
2242 /* this gets called on rmmod.
2243 * - Host mode: host may still be active
2244 * - Peripheral mode: peripheral is deactivated (or never-activated)
2245 * - OTG mode: both roles are deactivated (or never-activated)
2247 musb_exit_debugfs(musb
);
2248 musb_shutdown(pdev
);
2250 if (musb
->dma_controller
)
2251 dma_controller_destroy(musb
->dma_controller
);
2253 cancel_work_sync(&musb
->irq_work
);
2254 cancel_delayed_work_sync(&musb
->finish_resume_work
);
2255 cancel_delayed_work_sync(&musb
->deassert_reset_work
);
2257 device_init_wakeup(dev
, 0);
2263 static void musb_save_context(struct musb
*musb
)
2266 void __iomem
*musb_base
= musb
->mregs
;
2269 musb
->context
.frame
= musb_readw(musb_base
, MUSB_FRAME
);
2270 musb
->context
.testmode
= musb_readb(musb_base
, MUSB_TESTMODE
);
2271 musb
->context
.busctl
= musb_read_ulpi_buscontrol(musb
->mregs
);
2272 musb
->context
.power
= musb_readb(musb_base
, MUSB_POWER
);
2273 musb
->context
.intrusbe
= musb_readb(musb_base
, MUSB_INTRUSBE
);
2274 musb
->context
.index
= musb_readb(musb_base
, MUSB_INDEX
);
2275 musb
->context
.devctl
= musb_readb(musb_base
, MUSB_DEVCTL
);
2277 for (i
= 0; i
< musb
->config
->num_eps
; ++i
) {
2278 struct musb_hw_ep
*hw_ep
;
2280 hw_ep
= &musb
->endpoints
[i
];
2288 musb_writeb(musb_base
, MUSB_INDEX
, i
);
2289 musb
->context
.index_regs
[i
].txmaxp
=
2290 musb_readw(epio
, MUSB_TXMAXP
);
2291 musb
->context
.index_regs
[i
].txcsr
=
2292 musb_readw(epio
, MUSB_TXCSR
);
2293 musb
->context
.index_regs
[i
].rxmaxp
=
2294 musb_readw(epio
, MUSB_RXMAXP
);
2295 musb
->context
.index_regs
[i
].rxcsr
=
2296 musb_readw(epio
, MUSB_RXCSR
);
2298 if (musb
->dyn_fifo
) {
2299 musb
->context
.index_regs
[i
].txfifoadd
=
2300 musb_read_txfifoadd(musb_base
);
2301 musb
->context
.index_regs
[i
].rxfifoadd
=
2302 musb_read_rxfifoadd(musb_base
);
2303 musb
->context
.index_regs
[i
].txfifosz
=
2304 musb_read_txfifosz(musb_base
);
2305 musb
->context
.index_regs
[i
].rxfifosz
=
2306 musb_read_rxfifosz(musb_base
);
2309 musb
->context
.index_regs
[i
].txtype
=
2310 musb_readb(epio
, MUSB_TXTYPE
);
2311 musb
->context
.index_regs
[i
].txinterval
=
2312 musb_readb(epio
, MUSB_TXINTERVAL
);
2313 musb
->context
.index_regs
[i
].rxtype
=
2314 musb_readb(epio
, MUSB_RXTYPE
);
2315 musb
->context
.index_regs
[i
].rxinterval
=
2316 musb_readb(epio
, MUSB_RXINTERVAL
);
2318 musb
->context
.index_regs
[i
].txfunaddr
=
2319 musb_read_txfunaddr(musb_base
, i
);
2320 musb
->context
.index_regs
[i
].txhubaddr
=
2321 musb_read_txhubaddr(musb_base
, i
);
2322 musb
->context
.index_regs
[i
].txhubport
=
2323 musb_read_txhubport(musb_base
, i
);
2325 musb
->context
.index_regs
[i
].rxfunaddr
=
2326 musb_read_rxfunaddr(musb_base
, i
);
2327 musb
->context
.index_regs
[i
].rxhubaddr
=
2328 musb_read_rxhubaddr(musb_base
, i
);
2329 musb
->context
.index_regs
[i
].rxhubport
=
2330 musb_read_rxhubport(musb_base
, i
);
2334 static void musb_restore_context(struct musb
*musb
)
2337 void __iomem
*musb_base
= musb
->mregs
;
2338 void __iomem
*ep_target_regs
;
2342 musb_writew(musb_base
, MUSB_FRAME
, musb
->context
.frame
);
2343 musb_writeb(musb_base
, MUSB_TESTMODE
, musb
->context
.testmode
);
2344 musb_write_ulpi_buscontrol(musb
->mregs
, musb
->context
.busctl
);
2346 /* Don't affect SUSPENDM/RESUME bits in POWER reg */
2347 power
= musb_readb(musb_base
, MUSB_POWER
);
2348 power
&= MUSB_POWER_SUSPENDM
| MUSB_POWER_RESUME
;
2349 musb
->context
.power
&= ~(MUSB_POWER_SUSPENDM
| MUSB_POWER_RESUME
);
2350 power
|= musb
->context
.power
;
2351 musb_writeb(musb_base
, MUSB_POWER
, power
);
2353 musb_writew(musb_base
, MUSB_INTRTXE
, musb
->intrtxe
);
2354 musb_writew(musb_base
, MUSB_INTRRXE
, musb
->intrrxe
);
2355 musb_writeb(musb_base
, MUSB_INTRUSBE
, musb
->context
.intrusbe
);
2356 musb_writeb(musb_base
, MUSB_DEVCTL
, musb
->context
.devctl
);
2358 for (i
= 0; i
< musb
->config
->num_eps
; ++i
) {
2359 struct musb_hw_ep
*hw_ep
;
2361 hw_ep
= &musb
->endpoints
[i
];
2369 musb_writeb(musb_base
, MUSB_INDEX
, i
);
2370 musb_writew(epio
, MUSB_TXMAXP
,
2371 musb
->context
.index_regs
[i
].txmaxp
);
2372 musb_writew(epio
, MUSB_TXCSR
,
2373 musb
->context
.index_regs
[i
].txcsr
);
2374 musb_writew(epio
, MUSB_RXMAXP
,
2375 musb
->context
.index_regs
[i
].rxmaxp
);
2376 musb_writew(epio
, MUSB_RXCSR
,
2377 musb
->context
.index_regs
[i
].rxcsr
);
2379 if (musb
->dyn_fifo
) {
2380 musb_write_txfifosz(musb_base
,
2381 musb
->context
.index_regs
[i
].txfifosz
);
2382 musb_write_rxfifosz(musb_base
,
2383 musb
->context
.index_regs
[i
].rxfifosz
);
2384 musb_write_txfifoadd(musb_base
,
2385 musb
->context
.index_regs
[i
].txfifoadd
);
2386 musb_write_rxfifoadd(musb_base
,
2387 musb
->context
.index_regs
[i
].rxfifoadd
);
2390 musb_writeb(epio
, MUSB_TXTYPE
,
2391 musb
->context
.index_regs
[i
].txtype
);
2392 musb_writeb(epio
, MUSB_TXINTERVAL
,
2393 musb
->context
.index_regs
[i
].txinterval
);
2394 musb_writeb(epio
, MUSB_RXTYPE
,
2395 musb
->context
.index_regs
[i
].rxtype
);
2396 musb_writeb(epio
, MUSB_RXINTERVAL
,
2398 musb
->context
.index_regs
[i
].rxinterval
);
2399 musb_write_txfunaddr(musb_base
, i
,
2400 musb
->context
.index_regs
[i
].txfunaddr
);
2401 musb_write_txhubaddr(musb_base
, i
,
2402 musb
->context
.index_regs
[i
].txhubaddr
);
2403 musb_write_txhubport(musb_base
, i
,
2404 musb
->context
.index_regs
[i
].txhubport
);
2407 musb_read_target_reg_base(i
, musb_base
);
2409 musb_write_rxfunaddr(ep_target_regs
,
2410 musb
->context
.index_regs
[i
].rxfunaddr
);
2411 musb_write_rxhubaddr(ep_target_regs
,
2412 musb
->context
.index_regs
[i
].rxhubaddr
);
2413 musb_write_rxhubport(ep_target_regs
,
2414 musb
->context
.index_regs
[i
].rxhubport
);
2416 musb_writeb(musb_base
, MUSB_INDEX
, musb
->context
.index
);
2419 static int musb_suspend(struct device
*dev
)
2421 struct musb
*musb
= dev_to_musb(dev
);
2422 unsigned long flags
;
2424 spin_lock_irqsave(&musb
->lock
, flags
);
2426 if (is_peripheral_active(musb
)) {
2427 /* FIXME force disconnect unless we know USB will wake
2428 * the system up quickly enough to respond ...
2430 } else if (is_host_active(musb
)) {
2431 /* we know all the children are suspended; sometimes
2432 * they will even be wakeup-enabled.
2436 musb_save_context(musb
);
2438 spin_unlock_irqrestore(&musb
->lock
, flags
);
2442 static int musb_resume(struct device
*dev
)
2444 struct musb
*musb
= dev_to_musb(dev
);
2449 * For static cmos like DaVinci, register values were preserved
2450 * unless for some reason the whole soc powered down or the USB
2451 * module got reset through the PSC (vs just being disabled).
2453 * For the DSPS glue layer though, a full register restore has to
2454 * be done. As it shouldn't harm other platforms, we do it
2458 musb_restore_context(musb
);
2460 devctl
= musb_readb(musb
->mregs
, MUSB_DEVCTL
);
2461 mask
= MUSB_DEVCTL_BDEVICE
| MUSB_DEVCTL_FSDEV
| MUSB_DEVCTL_LSDEV
;
2462 if ((devctl
& mask
) != (musb
->context
.devctl
& mask
))
2463 musb
->port1_status
= 0;
2464 if (musb
->need_finish_resume
) {
2465 musb
->need_finish_resume
= 0;
2466 schedule_delayed_work(&musb
->finish_resume_work
,
2467 msecs_to_jiffies(USB_RESUME_TIMEOUT
));
2471 * The USB HUB code expects the device to be in RPM_ACTIVE once it came
2474 pm_runtime_disable(dev
);
2475 pm_runtime_set_active(dev
);
2476 pm_runtime_enable(dev
);
2480 static int musb_runtime_suspend(struct device
*dev
)
2482 struct musb
*musb
= dev_to_musb(dev
);
2484 musb_save_context(musb
);
2489 static int musb_runtime_resume(struct device
*dev
)
2491 struct musb
*musb
= dev_to_musb(dev
);
2492 static int first
= 1;
2495 * When pm_runtime_get_sync called for the first time in driver
2496 * init, some of the structure is still not initialized which is
2497 * used in restore function. But clock needs to be
2498 * enabled before any register access, so
2499 * pm_runtime_get_sync has to be called.
2500 * Also context restore without save does not make
2504 musb_restore_context(musb
);
2507 if (musb
->need_finish_resume
) {
2508 musb
->need_finish_resume
= 0;
2509 schedule_delayed_work(&musb
->finish_resume_work
,
2510 msecs_to_jiffies(USB_RESUME_TIMEOUT
));
2516 static const struct dev_pm_ops musb_dev_pm_ops
= {
2517 .suspend
= musb_suspend
,
2518 .resume
= musb_resume
,
2519 .runtime_suspend
= musb_runtime_suspend
,
2520 .runtime_resume
= musb_runtime_resume
,
2523 #define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
2525 #define MUSB_DEV_PM_OPS NULL
2528 static struct platform_driver musb_driver
= {
2530 .name
= (char *)musb_driver_name
,
2531 .bus
= &platform_bus_type
,
2532 .pm
= MUSB_DEV_PM_OPS
,
2534 .probe
= musb_probe
,
2535 .remove
= musb_remove
,
2536 .shutdown
= musb_shutdown
,
2539 module_platform_driver(musb_driver
);