1 // SPDX-License-Identifier: GPL-2.0
3 * MUSB OTG driver core code
5 * Copyright 2005 Mentor Graphics Corporation
6 * Copyright (C) 2005-2006 by Texas Instruments
7 * Copyright (C) 2006-2007 Nokia Corporation
11 * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
13 * This consists of a Host Controller Driver (HCD) and a peripheral
14 * controller driver implementing the "Gadget" API; OTG support is
15 * in the works. These are normal Linux-USB controller drivers which
16 * use IRQs and have no dedicated thread.
18 * This version of the driver has only been used with products from
19 * Texas Instruments. Those products integrate the Inventra logic
20 * with other DMA, IRQ, and bus modules, as well as other logic that
21 * needs to be reflected in this driver.
24 * NOTE: the original Mentor code here was pretty much a collection
25 * of mechanisms that don't seem to have been fully integrated/working
26 * for *any* Linux kernel version. This version aims at Linux 2.6.now,
27 * Key open issues include:
29 * - Lack of host-side transaction scheduling, for all transfer types.
30 * The hardware doesn't do it; instead, software must.
32 * This is not an issue for OTG devices that don't support external
33 * hubs, but for more "normal" USB hosts it's a user issue that the
34 * "multipoint" support doesn't scale in the expected ways. That
35 * includes DaVinci EVM in a common non-OTG mode.
37 * * Control and bulk use dedicated endpoints, and there's as
38 * yet no mechanism to either (a) reclaim the hardware when
39 * peripherals are NAKing, which gets complicated with bulk
40 * endpoints, or (b) use more than a single bulk endpoint in
43 * RESULT: one device may be perceived as blocking another one.
45 * * Interrupt and isochronous will dynamically allocate endpoint
46 * hardware, but (a) there's no record keeping for bandwidth;
47 * (b) in the common case that few endpoints are available, there
48 * is no mechanism to reuse endpoints to talk to multiple devices.
50 * RESULT: At one extreme, bandwidth can be overcommitted in
51 * some hardware configurations, no faults will be reported.
52 * At the other extreme, the bandwidth capabilities which do
53 * exist tend to be severely undercommitted. You can't yet hook
54 * up both a keyboard and a mouse to an external USB hub.
58 * This gets many kinds of configuration information:
59 * - Kconfig for everything user-configurable
60 * - platform_device for addressing, irq, and platform_data
61 * - platform_data is mostly for board-specific information
62 * (plus recentrly, SOC or family details)
64 * Most of the conditional compilation will (someday) vanish.
67 #include <linux/module.h>
68 #include <linux/kernel.h>
69 #include <linux/sched.h>
70 #include <linux/slab.h>
71 #include <linux/list.h>
72 #include <linux/kobject.h>
73 #include <linux/prefetch.h>
74 #include <linux/platform_device.h>
76 #include <linux/iopoll.h>
77 #include <linux/dma-mapping.h>
78 #include <linux/usb.h>
79 #include <linux/usb/of.h>
81 #include "musb_core.h"
82 #include "musb_trace.h"
84 #define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
87 #define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
88 #define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
90 #define MUSB_VERSION "6.0"
92 #define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
94 #define MUSB_DRIVER_NAME "musb-hdrc"
95 const char musb_driver_name
[] = MUSB_DRIVER_NAME
;
97 MODULE_DESCRIPTION(DRIVER_INFO
);
98 MODULE_AUTHOR(DRIVER_AUTHOR
);
99 MODULE_LICENSE("GPL");
100 MODULE_ALIAS("platform:" MUSB_DRIVER_NAME
);
103 /*-------------------------------------------------------------------------*/
105 static inline struct musb
*dev_to_musb(struct device
*dev
)
107 return dev_get_drvdata(dev
);
110 enum musb_mode
musb_get_mode(struct device
*dev
)
112 enum usb_dr_mode mode
;
114 mode
= usb_get_dr_mode(dev
);
116 case USB_DR_MODE_HOST
:
118 case USB_DR_MODE_PERIPHERAL
:
119 return MUSB_PERIPHERAL
;
120 case USB_DR_MODE_OTG
:
121 case USB_DR_MODE_UNKNOWN
:
126 EXPORT_SYMBOL_GPL(musb_get_mode
);
128 /*-------------------------------------------------------------------------*/
130 static int musb_ulpi_read(struct usb_phy
*phy
, u32 reg
)
132 void __iomem
*addr
= phy
->io_priv
;
138 pm_runtime_get_sync(phy
->io_dev
);
140 /* Make sure the transceiver is not in low power mode */
141 power
= musb_readb(addr
, MUSB_POWER
);
142 power
&= ~MUSB_POWER_SUSPENDM
;
143 musb_writeb(addr
, MUSB_POWER
, power
);
145 /* REVISIT: musbhdrc_ulpi_an.pdf recommends setting the
146 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
149 musb_writeb(addr
, MUSB_ULPI_REG_ADDR
, (u8
)reg
);
150 musb_writeb(addr
, MUSB_ULPI_REG_CONTROL
,
151 MUSB_ULPI_REG_REQ
| MUSB_ULPI_RDN_WR
);
153 while (!(musb_readb(addr
, MUSB_ULPI_REG_CONTROL
)
154 & MUSB_ULPI_REG_CMPLT
)) {
162 r
= musb_readb(addr
, MUSB_ULPI_REG_CONTROL
);
163 r
&= ~MUSB_ULPI_REG_CMPLT
;
164 musb_writeb(addr
, MUSB_ULPI_REG_CONTROL
, r
);
166 ret
= musb_readb(addr
, MUSB_ULPI_REG_DATA
);
169 pm_runtime_put(phy
->io_dev
);
174 static int musb_ulpi_write(struct usb_phy
*phy
, u32 val
, u32 reg
)
176 void __iomem
*addr
= phy
->io_priv
;
182 pm_runtime_get_sync(phy
->io_dev
);
184 /* Make sure the transceiver is not in low power mode */
185 power
= musb_readb(addr
, MUSB_POWER
);
186 power
&= ~MUSB_POWER_SUSPENDM
;
187 musb_writeb(addr
, MUSB_POWER
, power
);
189 musb_writeb(addr
, MUSB_ULPI_REG_ADDR
, (u8
)reg
);
190 musb_writeb(addr
, MUSB_ULPI_REG_DATA
, (u8
)val
);
191 musb_writeb(addr
, MUSB_ULPI_REG_CONTROL
, MUSB_ULPI_REG_REQ
);
193 while (!(musb_readb(addr
, MUSB_ULPI_REG_CONTROL
)
194 & MUSB_ULPI_REG_CMPLT
)) {
202 r
= musb_readb(addr
, MUSB_ULPI_REG_CONTROL
);
203 r
&= ~MUSB_ULPI_REG_CMPLT
;
204 musb_writeb(addr
, MUSB_ULPI_REG_CONTROL
, r
);
207 pm_runtime_put(phy
->io_dev
);
212 static struct usb_phy_io_ops musb_ulpi_access
= {
213 .read
= musb_ulpi_read
,
214 .write
= musb_ulpi_write
,
217 /*-------------------------------------------------------------------------*/
219 static u32
musb_default_fifo_offset(u8 epnum
)
221 return 0x20 + (epnum
* 4);
224 /* "flat" mapping: each endpoint has its own i/o address */
225 static void musb_flat_ep_select(void __iomem
*mbase
, u8 epnum
)
229 static u32
musb_flat_ep_offset(u8 epnum
, u16 offset
)
231 return 0x100 + (0x10 * epnum
) + offset
;
234 /* "indexed" mapping: INDEX register controls register bank select */
235 static void musb_indexed_ep_select(void __iomem
*mbase
, u8 epnum
)
237 musb_writeb(mbase
, MUSB_INDEX
, epnum
);
240 static u32
musb_indexed_ep_offset(u8 epnum
, u16 offset
)
242 return 0x10 + offset
;
245 static u32
musb_default_busctl_offset(u8 epnum
, u16 offset
)
247 return 0x80 + (0x08 * epnum
) + offset
;
250 static u8
musb_default_readb(void __iomem
*addr
, u32 offset
)
252 u8 data
= __raw_readb(addr
+ offset
);
254 trace_musb_readb(__builtin_return_address(0), addr
, offset
, data
);
258 static void musb_default_writeb(void __iomem
*addr
, u32 offset
, u8 data
)
260 trace_musb_writeb(__builtin_return_address(0), addr
, offset
, data
);
261 __raw_writeb(data
, addr
+ offset
);
264 static u16
musb_default_readw(void __iomem
*addr
, u32 offset
)
266 u16 data
= __raw_readw(addr
+ offset
);
268 trace_musb_readw(__builtin_return_address(0), addr
, offset
, data
);
272 static void musb_default_writew(void __iomem
*addr
, u32 offset
, u16 data
)
274 trace_musb_writew(__builtin_return_address(0), addr
, offset
, data
);
275 __raw_writew(data
, addr
+ offset
);
278 static u16
musb_default_get_toggle(struct musb_qh
*qh
, int is_out
)
280 void __iomem
*epio
= qh
->hw_ep
->regs
;
284 csr
= musb_readw(epio
, MUSB_TXCSR
) & MUSB_TXCSR_H_DATATOGGLE
;
286 csr
= musb_readw(epio
, MUSB_RXCSR
) & MUSB_RXCSR_H_DATATOGGLE
;
291 static u16
musb_default_set_toggle(struct musb_qh
*qh
, int is_out
,
297 toggle
= usb_gettoggle(urb
->dev
, qh
->epnum
, is_out
);
300 csr
= toggle
? (MUSB_TXCSR_H_WR_DATATOGGLE
301 | MUSB_TXCSR_H_DATATOGGLE
)
302 : MUSB_TXCSR_CLRDATATOG
;
304 csr
= toggle
? (MUSB_RXCSR_H_WR_DATATOGGLE
305 | MUSB_RXCSR_H_DATATOGGLE
) : 0;
311 * Load an endpoint's FIFO
313 static void musb_default_write_fifo(struct musb_hw_ep
*hw_ep
, u16 len
,
316 struct musb
*musb
= hw_ep
->musb
;
317 void __iomem
*fifo
= hw_ep
->fifo
;
319 if (unlikely(len
== 0))
324 dev_dbg(musb
->controller
, "%cX ep%d fifo %p count %d buf %p\n",
325 'T', hw_ep
->epnum
, fifo
, len
, src
);
327 /* we can't assume unaligned reads work */
328 if (likely((0x01 & (unsigned long) src
) == 0)) {
331 /* best case is 32bit-aligned source address */
332 if ((0x02 & (unsigned long) src
) == 0) {
334 iowrite32_rep(fifo
, src
+ index
, len
>> 2);
335 index
+= len
& ~0x03;
338 __raw_writew(*(u16
*)&src
[index
], fifo
);
343 iowrite16_rep(fifo
, src
+ index
, len
>> 1);
344 index
+= len
& ~0x01;
348 __raw_writeb(src
[index
], fifo
);
351 iowrite8_rep(fifo
, src
, len
);
356 * Unload an endpoint's FIFO
358 static void musb_default_read_fifo(struct musb_hw_ep
*hw_ep
, u16 len
, u8
*dst
)
360 struct musb
*musb
= hw_ep
->musb
;
361 void __iomem
*fifo
= hw_ep
->fifo
;
363 if (unlikely(len
== 0))
366 dev_dbg(musb
->controller
, "%cX ep%d fifo %p count %d buf %p\n",
367 'R', hw_ep
->epnum
, fifo
, len
, dst
);
369 /* we can't assume unaligned writes work */
370 if (likely((0x01 & (unsigned long) dst
) == 0)) {
373 /* best case is 32bit-aligned destination address */
374 if ((0x02 & (unsigned long) dst
) == 0) {
376 ioread32_rep(fifo
, dst
, len
>> 2);
380 *(u16
*)&dst
[index
] = __raw_readw(fifo
);
385 ioread16_rep(fifo
, dst
, len
>> 1);
390 dst
[index
] = __raw_readb(fifo
);
393 ioread8_rep(fifo
, dst
, len
);
398 * Old style IO functions
400 u8 (*musb_readb
)(void __iomem
*addr
, u32 offset
);
401 EXPORT_SYMBOL_GPL(musb_readb
);
403 void (*musb_writeb
)(void __iomem
*addr
, u32 offset
, u8 data
);
404 EXPORT_SYMBOL_GPL(musb_writeb
);
406 u8 (*musb_clearb
)(void __iomem
*addr
, u32 offset
);
407 EXPORT_SYMBOL_GPL(musb_clearb
);
409 u16 (*musb_readw
)(void __iomem
*addr
, u32 offset
);
410 EXPORT_SYMBOL_GPL(musb_readw
);
412 void (*musb_writew
)(void __iomem
*addr
, u32 offset
, u16 data
);
413 EXPORT_SYMBOL_GPL(musb_writew
);
415 u16 (*musb_clearw
)(void __iomem
*addr
, u32 offset
);
416 EXPORT_SYMBOL_GPL(musb_clearw
);
418 u32
musb_readl(void __iomem
*addr
, u32 offset
)
420 u32 data
= __raw_readl(addr
+ offset
);
422 trace_musb_readl(__builtin_return_address(0), addr
, offset
, data
);
425 EXPORT_SYMBOL_GPL(musb_readl
);
427 void musb_writel(void __iomem
*addr
, u32 offset
, u32 data
)
429 trace_musb_writel(__builtin_return_address(0), addr
, offset
, data
);
430 __raw_writel(data
, addr
+ offset
);
432 EXPORT_SYMBOL_GPL(musb_writel
);
434 #ifndef CONFIG_MUSB_PIO_ONLY
435 struct dma_controller
*
436 (*musb_dma_controller_create
)(struct musb
*musb
, void __iomem
*base
);
437 EXPORT_SYMBOL(musb_dma_controller_create
);
439 void (*musb_dma_controller_destroy
)(struct dma_controller
*c
);
440 EXPORT_SYMBOL(musb_dma_controller_destroy
);
444 * New style IO functions
446 void musb_read_fifo(struct musb_hw_ep
*hw_ep
, u16 len
, u8
*dst
)
448 return hw_ep
->musb
->io
.read_fifo(hw_ep
, len
, dst
);
451 void musb_write_fifo(struct musb_hw_ep
*hw_ep
, u16 len
, const u8
*src
)
453 return hw_ep
->musb
->io
.write_fifo(hw_ep
, len
, src
);
456 static u8
musb_read_devctl(struct musb
*musb
)
458 return musb_readb(musb
->mregs
, MUSB_DEVCTL
);
462 * musb_set_host - set and initialize host mode
463 * @musb: musb controller driver data
465 * At least some musb revisions need to enable devctl session bit in
466 * peripheral mode to switch to host mode. Initializes things to host
467 * mode and sets A_IDLE. SoC glue needs to advance state further
468 * based on phy provided VBUS state.
470 * Note that the SoC glue code may need to wait for musb to settle
471 * on enable before calling this to avoid babble.
473 int musb_set_host(struct musb
*musb
)
481 devctl
= musb_read_devctl(musb
);
482 if (!(devctl
& MUSB_DEVCTL_BDEVICE
)) {
483 dev_info(musb
->controller
,
484 "%s: already in host mode: %02x\n",
489 devctl
|= MUSB_DEVCTL_SESSION
;
490 musb_writeb(musb
->mregs
, MUSB_DEVCTL
, devctl
);
492 error
= readx_poll_timeout(musb_read_devctl
, musb
, devctl
,
493 !(devctl
& MUSB_DEVCTL_BDEVICE
), 5000,
496 dev_err(musb
->controller
, "%s: could not set host: %02x\n",
504 musb
->xceiv
->otg
->state
= OTG_STATE_A_IDLE
;
509 EXPORT_SYMBOL_GPL(musb_set_host
);
512 * musb_set_peripheral - set and initialize peripheral mode
513 * @musb: musb controller driver data
515 * Clears devctl session bit and initializes things for peripheral
516 * mode and sets B_IDLE. SoC glue needs to advance state further
517 * based on phy provided VBUS state.
519 int musb_set_peripheral(struct musb
*musb
)
527 devctl
= musb_read_devctl(musb
);
528 if (devctl
& MUSB_DEVCTL_BDEVICE
) {
529 dev_info(musb
->controller
,
530 "%s: already in peripheral mode: %02x\n",
536 devctl
&= ~MUSB_DEVCTL_SESSION
;
537 musb_writeb(musb
->mregs
, MUSB_DEVCTL
, devctl
);
539 error
= readx_poll_timeout(musb_read_devctl
, musb
, devctl
,
540 devctl
& MUSB_DEVCTL_BDEVICE
, 5000,
543 dev_err(musb
->controller
, "%s: could not set peripheral: %02x\n",
551 musb
->xceiv
->otg
->state
= OTG_STATE_B_IDLE
;
556 EXPORT_SYMBOL_GPL(musb_set_peripheral
);
558 /*-------------------------------------------------------------------------*/
560 /* for high speed test mode; see USB 2.0 spec 7.1.20 */
561 static const u8 musb_test_packet
[53] = {
562 /* implicit SYNC then DATA0 to start */
565 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
567 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
569 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
570 /* JJJJJJJKKKKKKK x8 */
571 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
573 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
574 /* JKKKKKKK x10, JK */
575 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
577 /* implicit CRC16 then EOP to end */
580 void musb_load_testpacket(struct musb
*musb
)
582 void __iomem
*regs
= musb
->endpoints
[0].regs
;
584 musb_ep_select(musb
->mregs
, 0);
585 musb_write_fifo(musb
->control_ep
,
586 sizeof(musb_test_packet
), musb_test_packet
);
587 musb_writew(regs
, MUSB_CSR0
, MUSB_CSR0_TXPKTRDY
);
590 /*-------------------------------------------------------------------------*/
593 * Handles OTG hnp timeouts, such as b_ase0_brst
595 static void musb_otg_timer_func(struct timer_list
*t
)
597 struct musb
*musb
= from_timer(musb
, t
, otg_timer
);
600 spin_lock_irqsave(&musb
->lock
, flags
);
601 switch (musb
->xceiv
->otg
->state
) {
602 case OTG_STATE_B_WAIT_ACON
:
604 "HNP: b_wait_acon timeout; back to b_peripheral");
605 musb_g_disconnect(musb
);
606 musb
->xceiv
->otg
->state
= OTG_STATE_B_PERIPHERAL
;
609 case OTG_STATE_A_SUSPEND
:
610 case OTG_STATE_A_WAIT_BCON
:
611 musb_dbg(musb
, "HNP: %s timeout",
612 usb_otg_state_string(musb
->xceiv
->otg
->state
));
613 musb_platform_set_vbus(musb
, 0);
614 musb
->xceiv
->otg
->state
= OTG_STATE_A_WAIT_VFALL
;
617 musb_dbg(musb
, "HNP: Unhandled mode %s",
618 usb_otg_state_string(musb
->xceiv
->otg
->state
));
620 spin_unlock_irqrestore(&musb
->lock
, flags
);
624 * Stops the HNP transition. Caller must take care of locking.
626 void musb_hnp_stop(struct musb
*musb
)
628 struct usb_hcd
*hcd
= musb
->hcd
;
629 void __iomem
*mbase
= musb
->mregs
;
632 musb_dbg(musb
, "HNP: stop from %s",
633 usb_otg_state_string(musb
->xceiv
->otg
->state
));
635 switch (musb
->xceiv
->otg
->state
) {
636 case OTG_STATE_A_PERIPHERAL
:
637 musb_g_disconnect(musb
);
638 musb_dbg(musb
, "HNP: back to %s",
639 usb_otg_state_string(musb
->xceiv
->otg
->state
));
641 case OTG_STATE_B_HOST
:
642 musb_dbg(musb
, "HNP: Disabling HR");
644 hcd
->self
.is_b_host
= 0;
645 musb
->xceiv
->otg
->state
= OTG_STATE_B_PERIPHERAL
;
647 reg
= musb_readb(mbase
, MUSB_POWER
);
648 reg
|= MUSB_POWER_SUSPENDM
;
649 musb_writeb(mbase
, MUSB_POWER
, reg
);
650 /* REVISIT: Start SESSION_REQUEST here? */
653 musb_dbg(musb
, "HNP: Stopping in unknown state %s",
654 usb_otg_state_string(musb
->xceiv
->otg
->state
));
658 * When returning to A state after HNP, avoid hub_port_rebounce(),
659 * which cause occasional OPT A "Did not receive reset after connect"
662 musb
->port1_status
&= ~(USB_PORT_STAT_C_CONNECTION
<< 16);
665 static void musb_recover_from_babble(struct musb
*musb
);
667 static void musb_handle_intr_resume(struct musb
*musb
, u8 devctl
)
669 musb_dbg(musb
, "RESUME (%s)",
670 usb_otg_state_string(musb
->xceiv
->otg
->state
));
672 if (devctl
& MUSB_DEVCTL_HM
) {
673 switch (musb
->xceiv
->otg
->state
) {
674 case OTG_STATE_A_SUSPEND
:
676 musb
->port1_status
|=
677 (USB_PORT_STAT_C_SUSPEND
<< 16)
678 | MUSB_PORT_STAT_RESUME
;
679 musb
->rh_timer
= jiffies
680 + msecs_to_jiffies(USB_RESUME_TIMEOUT
);
681 musb
->xceiv
->otg
->state
= OTG_STATE_A_HOST
;
683 musb_host_resume_root_hub(musb
);
684 schedule_delayed_work(&musb
->finish_resume_work
,
685 msecs_to_jiffies(USB_RESUME_TIMEOUT
));
687 case OTG_STATE_B_WAIT_ACON
:
688 musb
->xceiv
->otg
->state
= OTG_STATE_B_PERIPHERAL
;
693 WARNING("bogus %s RESUME (%s)\n",
695 usb_otg_state_string(musb
->xceiv
->otg
->state
));
698 switch (musb
->xceiv
->otg
->state
) {
699 case OTG_STATE_A_SUSPEND
:
700 /* possibly DISCONNECT is upcoming */
701 musb
->xceiv
->otg
->state
= OTG_STATE_A_HOST
;
702 musb_host_resume_root_hub(musb
);
704 case OTG_STATE_B_WAIT_ACON
:
705 case OTG_STATE_B_PERIPHERAL
:
706 /* disconnect while suspended? we may
707 * not get a disconnect irq...
709 if ((devctl
& MUSB_DEVCTL_VBUS
)
710 != (3 << MUSB_DEVCTL_VBUS_SHIFT
)
712 musb
->int_usb
|= MUSB_INTR_DISCONNECT
;
713 musb
->int_usb
&= ~MUSB_INTR_SUSPEND
;
718 case OTG_STATE_B_IDLE
:
719 musb
->int_usb
&= ~MUSB_INTR_SUSPEND
;
722 WARNING("bogus %s RESUME (%s)\n",
724 usb_otg_state_string(musb
->xceiv
->otg
->state
));
729 /* return IRQ_HANDLED to tell the caller to return immediately */
730 static irqreturn_t
musb_handle_intr_sessreq(struct musb
*musb
, u8 devctl
)
732 void __iomem
*mbase
= musb
->mregs
;
734 if ((devctl
& MUSB_DEVCTL_VBUS
) == MUSB_DEVCTL_VBUS
735 && (devctl
& MUSB_DEVCTL_BDEVICE
)) {
736 musb_dbg(musb
, "SessReq while on B state");
740 musb_dbg(musb
, "SESSION_REQUEST (%s)",
741 usb_otg_state_string(musb
->xceiv
->otg
->state
));
743 /* IRQ arrives from ID pin sense or (later, if VBUS power
744 * is removed) SRP. responses are time critical:
745 * - turn on VBUS (with silicon-specific mechanism)
746 * - go through A_WAIT_VRISE
747 * - ... to A_WAIT_BCON.
748 * a_wait_vrise_tmout triggers VBUS_ERROR transitions
750 musb_writeb(mbase
, MUSB_DEVCTL
, MUSB_DEVCTL_SESSION
);
751 musb
->ep0_stage
= MUSB_EP0_START
;
752 musb
->xceiv
->otg
->state
= OTG_STATE_A_IDLE
;
754 musb_platform_set_vbus(musb
, 1);
759 static void musb_handle_intr_vbuserr(struct musb
*musb
, u8 devctl
)
763 /* During connection as an A-Device, we may see a short
764 * current spikes causing voltage drop, because of cable
765 * and peripheral capacitance combined with vbus draw.
766 * (So: less common with truly self-powered devices, where
767 * vbus doesn't act like a power supply.)
769 * Such spikes are short; usually less than ~500 usec, max
770 * of ~2 msec. That is, they're not sustained overcurrent
771 * errors, though they're reported using VBUSERROR irqs.
773 * Workarounds: (a) hardware: use self powered devices.
774 * (b) software: ignore non-repeated VBUS errors.
776 * REVISIT: do delays from lots of DEBUG_KERNEL checks
777 * make trouble here, keeping VBUS < 4.4V ?
779 switch (musb
->xceiv
->otg
->state
) {
780 case OTG_STATE_A_HOST
:
781 /* recovery is dicey once we've gotten past the
782 * initial stages of enumeration, but if VBUS
783 * stayed ok at the other end of the link, and
784 * another reset is due (at least for high speed,
785 * to redo the chirp etc), it might work OK...
787 case OTG_STATE_A_WAIT_BCON
:
788 case OTG_STATE_A_WAIT_VRISE
:
789 if (musb
->vbuserr_retry
) {
790 void __iomem
*mbase
= musb
->mregs
;
792 musb
->vbuserr_retry
--;
794 devctl
|= MUSB_DEVCTL_SESSION
;
795 musb_writeb(mbase
, MUSB_DEVCTL
, devctl
);
797 musb
->port1_status
|=
798 USB_PORT_STAT_OVERCURRENT
799 | (USB_PORT_STAT_C_OVERCURRENT
<< 16);
806 dev_printk(ignore
? KERN_DEBUG
: KERN_ERR
, musb
->controller
,
807 "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
808 usb_otg_state_string(musb
->xceiv
->otg
->state
),
811 switch (devctl
& MUSB_DEVCTL_VBUS
) {
812 case 0 << MUSB_DEVCTL_VBUS_SHIFT
:
813 s
= "<SessEnd"; break;
814 case 1 << MUSB_DEVCTL_VBUS_SHIFT
:
815 s
= "<AValid"; break;
816 case 2 << MUSB_DEVCTL_VBUS_SHIFT
:
817 s
= "<VBusValid"; break;
818 /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
822 VBUSERR_RETRY_COUNT
- musb
->vbuserr_retry
,
825 /* go through A_WAIT_VFALL then start a new session */
827 musb_platform_set_vbus(musb
, 0);
830 static void musb_handle_intr_suspend(struct musb
*musb
, u8 devctl
)
832 musb_dbg(musb
, "SUSPEND (%s) devctl %02x",
833 usb_otg_state_string(musb
->xceiv
->otg
->state
), devctl
);
835 switch (musb
->xceiv
->otg
->state
) {
836 case OTG_STATE_A_PERIPHERAL
:
837 /* We also come here if the cable is removed, since
838 * this silicon doesn't report ID-no-longer-grounded.
840 * We depend on T(a_wait_bcon) to shut us down, and
841 * hope users don't do anything dicey during this
842 * undesired detour through A_WAIT_BCON.
845 musb_host_resume_root_hub(musb
);
846 musb_root_disconnect(musb
);
847 musb_platform_try_idle(musb
, jiffies
848 + msecs_to_jiffies(musb
->a_wait_bcon
849 ? : OTG_TIME_A_WAIT_BCON
));
852 case OTG_STATE_B_IDLE
:
853 if (!musb
->is_active
)
856 case OTG_STATE_B_PERIPHERAL
:
857 musb_g_suspend(musb
);
858 musb
->is_active
= musb
->g
.b_hnp_enable
;
859 if (musb
->is_active
) {
860 musb
->xceiv
->otg
->state
= OTG_STATE_B_WAIT_ACON
;
861 musb_dbg(musb
, "HNP: Setting timer for b_ase0_brst");
862 mod_timer(&musb
->otg_timer
, jiffies
864 OTG_TIME_B_ASE0_BRST
));
867 case OTG_STATE_A_WAIT_BCON
:
868 if (musb
->a_wait_bcon
!= 0)
869 musb_platform_try_idle(musb
, jiffies
870 + msecs_to_jiffies(musb
->a_wait_bcon
));
872 case OTG_STATE_A_HOST
:
873 musb
->xceiv
->otg
->state
= OTG_STATE_A_SUSPEND
;
874 musb
->is_active
= musb
->hcd
->self
.b_hnp_enable
;
876 case OTG_STATE_B_HOST
:
877 /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
878 musb_dbg(musb
, "REVISIT: SUSPEND as B_HOST");
881 /* "should not happen" */
887 static void musb_handle_intr_connect(struct musb
*musb
, u8 devctl
, u8 int_usb
)
889 struct usb_hcd
*hcd
= musb
->hcd
;
892 musb
->ep0_stage
= MUSB_EP0_START
;
894 musb
->intrtxe
= musb
->epmask
;
895 musb_writew(musb
->mregs
, MUSB_INTRTXE
, musb
->intrtxe
);
896 musb
->intrrxe
= musb
->epmask
& 0xfffe;
897 musb_writew(musb
->mregs
, MUSB_INTRRXE
, musb
->intrrxe
);
898 musb_writeb(musb
->mregs
, MUSB_INTRUSBE
, 0xf7);
899 musb
->port1_status
&= ~(USB_PORT_STAT_LOW_SPEED
900 |USB_PORT_STAT_HIGH_SPEED
901 |USB_PORT_STAT_ENABLE
903 musb
->port1_status
|= USB_PORT_STAT_CONNECTION
904 |(USB_PORT_STAT_C_CONNECTION
<< 16);
906 /* high vs full speed is just a guess until after reset */
907 if (devctl
& MUSB_DEVCTL_LSDEV
)
908 musb
->port1_status
|= USB_PORT_STAT_LOW_SPEED
;
910 /* indicate new connection to OTG machine */
911 switch (musb
->xceiv
->otg
->state
) {
912 case OTG_STATE_B_PERIPHERAL
:
913 if (int_usb
& MUSB_INTR_SUSPEND
) {
914 musb_dbg(musb
, "HNP: SUSPEND+CONNECT, now b_host");
915 int_usb
&= ~MUSB_INTR_SUSPEND
;
918 musb_dbg(musb
, "CONNECT as b_peripheral???");
920 case OTG_STATE_B_WAIT_ACON
:
921 musb_dbg(musb
, "HNP: CONNECT, now b_host");
923 musb
->xceiv
->otg
->state
= OTG_STATE_B_HOST
;
925 musb
->hcd
->self
.is_b_host
= 1;
926 del_timer(&musb
->otg_timer
);
929 if ((devctl
& MUSB_DEVCTL_VBUS
)
930 == (3 << MUSB_DEVCTL_VBUS_SHIFT
)) {
931 musb
->xceiv
->otg
->state
= OTG_STATE_A_HOST
;
933 hcd
->self
.is_b_host
= 0;
938 musb_host_poke_root_hub(musb
);
940 musb_dbg(musb
, "CONNECT (%s) devctl %02x",
941 usb_otg_state_string(musb
->xceiv
->otg
->state
), devctl
);
944 static void musb_handle_intr_disconnect(struct musb
*musb
, u8 devctl
)
946 musb_dbg(musb
, "DISCONNECT (%s) as %s, devctl %02x",
947 usb_otg_state_string(musb
->xceiv
->otg
->state
),
948 MUSB_MODE(musb
), devctl
);
950 switch (musb
->xceiv
->otg
->state
) {
951 case OTG_STATE_A_HOST
:
952 case OTG_STATE_A_SUSPEND
:
953 musb_host_resume_root_hub(musb
);
954 musb_root_disconnect(musb
);
955 if (musb
->a_wait_bcon
!= 0)
956 musb_platform_try_idle(musb
, jiffies
957 + msecs_to_jiffies(musb
->a_wait_bcon
));
959 case OTG_STATE_B_HOST
:
960 /* REVISIT this behaves for "real disconnect"
961 * cases; make sure the other transitions from
962 * from B_HOST act right too. The B_HOST code
963 * in hnp_stop() is currently not used...
965 musb_root_disconnect(musb
);
967 musb
->hcd
->self
.is_b_host
= 0;
968 musb
->xceiv
->otg
->state
= OTG_STATE_B_PERIPHERAL
;
970 musb_g_disconnect(musb
);
972 case OTG_STATE_A_PERIPHERAL
:
974 musb_root_disconnect(musb
);
976 case OTG_STATE_B_WAIT_ACON
:
977 case OTG_STATE_B_PERIPHERAL
:
978 case OTG_STATE_B_IDLE
:
979 musb_g_disconnect(musb
);
982 WARNING("unhandled DISCONNECT transition (%s)\n",
983 usb_otg_state_string(musb
->xceiv
->otg
->state
));
989 * mentor saves a bit: bus reset and babble share the same irq.
990 * only host sees babble; only peripheral sees bus reset.
992 static void musb_handle_intr_reset(struct musb
*musb
)
994 if (is_host_active(musb
)) {
996 * When BABBLE happens what we can depends on which
997 * platform MUSB is running, because some platforms
998 * implemented proprietary means for 'recovering' from
999 * Babble conditions. One such platform is AM335x. In
1000 * most cases, however, the only thing we can do is
1003 dev_err(musb
->controller
, "Babble\n");
1004 musb_recover_from_babble(musb
);
1006 musb_dbg(musb
, "BUS RESET as %s",
1007 usb_otg_state_string(musb
->xceiv
->otg
->state
));
1008 switch (musb
->xceiv
->otg
->state
) {
1009 case OTG_STATE_A_SUSPEND
:
1012 case OTG_STATE_A_WAIT_BCON
: /* OPT TD.4.7-900ms */
1013 /* never use invalid T(a_wait_bcon) */
1014 musb_dbg(musb
, "HNP: in %s, %d msec timeout",
1015 usb_otg_state_string(musb
->xceiv
->otg
->state
),
1016 TA_WAIT_BCON(musb
));
1017 mod_timer(&musb
->otg_timer
, jiffies
1018 + msecs_to_jiffies(TA_WAIT_BCON(musb
)));
1020 case OTG_STATE_A_PERIPHERAL
:
1021 del_timer(&musb
->otg_timer
);
1024 case OTG_STATE_B_WAIT_ACON
:
1025 musb_dbg(musb
, "HNP: RESET (%s), to b_peripheral",
1026 usb_otg_state_string(musb
->xceiv
->otg
->state
));
1027 musb
->xceiv
->otg
->state
= OTG_STATE_B_PERIPHERAL
;
1030 case OTG_STATE_B_IDLE
:
1031 musb
->xceiv
->otg
->state
= OTG_STATE_B_PERIPHERAL
;
1033 case OTG_STATE_B_PERIPHERAL
:
1037 musb_dbg(musb
, "Unhandled BUS RESET as %s",
1038 usb_otg_state_string(musb
->xceiv
->otg
->state
));
1044 * Interrupt Service Routine to record USB "global" interrupts.
1045 * Since these do not happen often and signify things of
1046 * paramount importance, it seems OK to check them individually;
1047 * the order of the tests is specified in the manual
1049 * @param musb instance pointer
1050 * @param int_usb register contents
1054 static irqreturn_t
musb_stage0_irq(struct musb
*musb
, u8 int_usb
,
1057 irqreturn_t handled
= IRQ_NONE
;
1059 musb_dbg(musb
, "<== DevCtl=%02x, int_usb=0x%x", devctl
, int_usb
);
1061 /* in host mode, the peripheral may issue remote wakeup.
1062 * in peripheral mode, the host may resume the link.
1063 * spurious RESUME irqs happen too, paired with SUSPEND.
1065 if (int_usb
& MUSB_INTR_RESUME
) {
1066 musb_handle_intr_resume(musb
, devctl
);
1067 handled
= IRQ_HANDLED
;
1070 /* see manual for the order of the tests */
1071 if (int_usb
& MUSB_INTR_SESSREQ
) {
1072 if (musb_handle_intr_sessreq(musb
, devctl
))
1074 handled
= IRQ_HANDLED
;
1077 if (int_usb
& MUSB_INTR_VBUSERROR
) {
1078 musb_handle_intr_vbuserr(musb
, devctl
);
1079 handled
= IRQ_HANDLED
;
1082 if (int_usb
& MUSB_INTR_SUSPEND
) {
1083 musb_handle_intr_suspend(musb
, devctl
);
1084 handled
= IRQ_HANDLED
;
1087 if (int_usb
& MUSB_INTR_CONNECT
) {
1088 musb_handle_intr_connect(musb
, devctl
, int_usb
);
1089 handled
= IRQ_HANDLED
;
1092 if (int_usb
& MUSB_INTR_DISCONNECT
) {
1093 musb_handle_intr_disconnect(musb
, devctl
);
1094 handled
= IRQ_HANDLED
;
1097 if (int_usb
& MUSB_INTR_RESET
) {
1098 musb_handle_intr_reset(musb
);
1099 handled
= IRQ_HANDLED
;
1103 /* REVISIT ... this would be for multiplexing periodic endpoints, or
1104 * supporting transfer phasing to prevent exceeding ISO bandwidth
1105 * limits of a given frame or microframe.
1107 * It's not needed for peripheral side, which dedicates endpoints;
1108 * though it _might_ use SOF irqs for other purposes.
1110 * And it's not currently needed for host side, which also dedicates
1111 * endpoints, relies on TX/RX interval registers, and isn't claimed
1112 * to support ISO transfers yet.
1114 if (int_usb
& MUSB_INTR_SOF
) {
1115 void __iomem
*mbase
= musb
->mregs
;
1116 struct musb_hw_ep
*ep
;
1120 dev_dbg(musb
->controller
, "START_OF_FRAME\n");
1121 handled
= IRQ_HANDLED
;
1123 /* start any periodic Tx transfers waiting for current frame */
1124 frame
= musb_readw(mbase
, MUSB_FRAME
);
1125 ep
= musb
->endpoints
;
1126 for (epnum
= 1; (epnum
< musb
->nr_endpoints
)
1127 && (musb
->epmask
>= (1 << epnum
));
1130 * FIXME handle framecounter wraps (12 bits)
1131 * eliminate duplicated StartUrb logic
1133 if (ep
->dwWaitFrame
>= frame
) {
1134 ep
->dwWaitFrame
= 0;
1135 pr_debug("SOF --> periodic TX%s on %d\n",
1136 ep
->tx_channel
? " DMA" : "",
1138 if (!ep
->tx_channel
)
1139 musb_h_tx_start(musb
, epnum
);
1141 cppi_hostdma_start(musb
, epnum
);
1143 } /* end of for loop */
1147 schedule_delayed_work(&musb
->irq_work
, 0);
1152 /*-------------------------------------------------------------------------*/
1154 static void musb_disable_interrupts(struct musb
*musb
)
1156 void __iomem
*mbase
= musb
->mregs
;
1158 /* disable interrupts */
1159 musb_writeb(mbase
, MUSB_INTRUSBE
, 0);
1161 musb_writew(mbase
, MUSB_INTRTXE
, 0);
1163 musb_writew(mbase
, MUSB_INTRRXE
, 0);
1165 /* flush pending interrupts */
1166 musb_clearb(mbase
, MUSB_INTRUSB
);
1167 musb_clearw(mbase
, MUSB_INTRTX
);
1168 musb_clearw(mbase
, MUSB_INTRRX
);
1171 static void musb_enable_interrupts(struct musb
*musb
)
1173 void __iomem
*regs
= musb
->mregs
;
1175 /* Set INT enable registers, enable interrupts */
1176 musb
->intrtxe
= musb
->epmask
;
1177 musb_writew(regs
, MUSB_INTRTXE
, musb
->intrtxe
);
1178 musb
->intrrxe
= musb
->epmask
& 0xfffe;
1179 musb_writew(regs
, MUSB_INTRRXE
, musb
->intrrxe
);
1180 musb_writeb(regs
, MUSB_INTRUSBE
, 0xf7);
1185 * Program the HDRC to start (enable interrupts, dma, etc.).
1187 void musb_start(struct musb
*musb
)
1189 void __iomem
*regs
= musb
->mregs
;
1190 u8 devctl
= musb_readb(regs
, MUSB_DEVCTL
);
1193 musb_dbg(musb
, "<== devctl %02x", devctl
);
1195 musb_enable_interrupts(musb
);
1196 musb_writeb(regs
, MUSB_TESTMODE
, 0);
1198 power
= MUSB_POWER_ISOUPDATE
;
1200 * treating UNKNOWN as unspecified maximum speed, in which case
1201 * we will default to high-speed.
1203 if (musb
->config
->maximum_speed
== USB_SPEED_HIGH
||
1204 musb
->config
->maximum_speed
== USB_SPEED_UNKNOWN
)
1205 power
|= MUSB_POWER_HSENAB
;
1206 musb_writeb(regs
, MUSB_POWER
, power
);
1208 musb
->is_active
= 0;
1209 devctl
= musb_readb(regs
, MUSB_DEVCTL
);
1210 devctl
&= ~MUSB_DEVCTL_SESSION
;
1212 /* session started after:
1213 * (a) ID-grounded irq, host mode;
1214 * (b) vbus present/connect IRQ, peripheral mode;
1215 * (c) peripheral initiates, using SRP
1217 if (musb
->port_mode
!= MUSB_HOST
&&
1218 musb
->xceiv
->otg
->state
!= OTG_STATE_A_WAIT_BCON
&&
1219 (devctl
& MUSB_DEVCTL_VBUS
) == MUSB_DEVCTL_VBUS
) {
1220 musb
->is_active
= 1;
1222 devctl
|= MUSB_DEVCTL_SESSION
;
1225 musb_platform_enable(musb
);
1226 musb_writeb(regs
, MUSB_DEVCTL
, devctl
);
1230 * Make the HDRC stop (disable interrupts, etc.);
1231 * reversible by musb_start
1232 * called on gadget driver unregister
1233 * with controller locked, irqs blocked
1234 * acts as a NOP unless some role activated the hardware
1236 void musb_stop(struct musb
*musb
)
1238 /* stop IRQs, timers, ... */
1239 musb_platform_disable(musb
);
1240 musb_disable_interrupts(musb
);
1241 musb_writeb(musb
->mregs
, MUSB_DEVCTL
, 0);
1244 * - mark host and/or peripheral drivers unusable/inactive
1245 * - disable DMA (and enable it in HdrcStart)
1246 * - make sure we can musb_start() after musb_stop(); with
1247 * OTG mode, gadget driver module rmmod/modprobe cycles that
1250 musb_platform_try_idle(musb
, 0);
1253 /*-------------------------------------------------------------------------*/
1256 * The silicon either has hard-wired endpoint configurations, or else
1257 * "dynamic fifo" sizing. The driver has support for both, though at this
1258 * writing only the dynamic sizing is very well tested. Since we switched
1259 * away from compile-time hardware parameters, we can no longer rely on
1260 * dead code elimination to leave only the relevant one in the object file.
1262 * We don't currently use dynamic fifo setup capability to do anything
1263 * more than selecting one of a bunch of predefined configurations.
1265 static ushort fifo_mode
;
1267 /* "modprobe ... fifo_mode=1" etc */
1268 module_param(fifo_mode
, ushort
, 0);
1269 MODULE_PARM_DESC(fifo_mode
, "initial endpoint configuration");
1272 * tables defining fifo_mode values. define more if you like.
1273 * for host side, make sure both halves of ep1 are set up.
1276 /* mode 0 - fits in 2KB */
1277 static struct musb_fifo_cfg mode_0_cfg
[] = {
1278 { .hw_ep_num
= 1, .style
= FIFO_TX
, .maxpacket
= 512, },
1279 { .hw_ep_num
= 1, .style
= FIFO_RX
, .maxpacket
= 512, },
1280 { .hw_ep_num
= 2, .style
= FIFO_RXTX
, .maxpacket
= 512, },
1281 { .hw_ep_num
= 3, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1282 { .hw_ep_num
= 4, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1285 /* mode 1 - fits in 4KB */
1286 static struct musb_fifo_cfg mode_1_cfg
[] = {
1287 { .hw_ep_num
= 1, .style
= FIFO_TX
, .maxpacket
= 512, .mode
= BUF_DOUBLE
, },
1288 { .hw_ep_num
= 1, .style
= FIFO_RX
, .maxpacket
= 512, .mode
= BUF_DOUBLE
, },
1289 { .hw_ep_num
= 2, .style
= FIFO_RXTX
, .maxpacket
= 512, .mode
= BUF_DOUBLE
, },
1290 { .hw_ep_num
= 3, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1291 { .hw_ep_num
= 4, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1294 /* mode 2 - fits in 4KB */
1295 static struct musb_fifo_cfg mode_2_cfg
[] = {
1296 { .hw_ep_num
= 1, .style
= FIFO_TX
, .maxpacket
= 512, },
1297 { .hw_ep_num
= 1, .style
= FIFO_RX
, .maxpacket
= 512, },
1298 { .hw_ep_num
= 2, .style
= FIFO_TX
, .maxpacket
= 512, },
1299 { .hw_ep_num
= 2, .style
= FIFO_RX
, .maxpacket
= 512, },
1300 { .hw_ep_num
= 3, .style
= FIFO_RXTX
, .maxpacket
= 960, },
1301 { .hw_ep_num
= 4, .style
= FIFO_RXTX
, .maxpacket
= 1024, },
1304 /* mode 3 - fits in 4KB */
1305 static struct musb_fifo_cfg mode_3_cfg
[] = {
1306 { .hw_ep_num
= 1, .style
= FIFO_TX
, .maxpacket
= 512, .mode
= BUF_DOUBLE
, },
1307 { .hw_ep_num
= 1, .style
= FIFO_RX
, .maxpacket
= 512, .mode
= BUF_DOUBLE
, },
1308 { .hw_ep_num
= 2, .style
= FIFO_TX
, .maxpacket
= 512, },
1309 { .hw_ep_num
= 2, .style
= FIFO_RX
, .maxpacket
= 512, },
1310 { .hw_ep_num
= 3, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1311 { .hw_ep_num
= 4, .style
= FIFO_RXTX
, .maxpacket
= 256, },
1314 /* mode 4 - fits in 16KB */
1315 static struct musb_fifo_cfg mode_4_cfg
[] = {
1316 { .hw_ep_num
= 1, .style
= FIFO_TX
, .maxpacket
= 512, },
1317 { .hw_ep_num
= 1, .style
= FIFO_RX
, .maxpacket
= 512, },
1318 { .hw_ep_num
= 2, .style
= FIFO_TX
, .maxpacket
= 512, },
1319 { .hw_ep_num
= 2, .style
= FIFO_RX
, .maxpacket
= 512, },
1320 { .hw_ep_num
= 3, .style
= FIFO_TX
, .maxpacket
= 512, },
1321 { .hw_ep_num
= 3, .style
= FIFO_RX
, .maxpacket
= 512, },
1322 { .hw_ep_num
= 4, .style
= FIFO_TX
, .maxpacket
= 512, },
1323 { .hw_ep_num
= 4, .style
= FIFO_RX
, .maxpacket
= 512, },
1324 { .hw_ep_num
= 5, .style
= FIFO_TX
, .maxpacket
= 512, },
1325 { .hw_ep_num
= 5, .style
= FIFO_RX
, .maxpacket
= 512, },
1326 { .hw_ep_num
= 6, .style
= FIFO_TX
, .maxpacket
= 512, },
1327 { .hw_ep_num
= 6, .style
= FIFO_RX
, .maxpacket
= 512, },
1328 { .hw_ep_num
= 7, .style
= FIFO_TX
, .maxpacket
= 512, },
1329 { .hw_ep_num
= 7, .style
= FIFO_RX
, .maxpacket
= 512, },
1330 { .hw_ep_num
= 8, .style
= FIFO_TX
, .maxpacket
= 512, },
1331 { .hw_ep_num
= 8, .style
= FIFO_RX
, .maxpacket
= 512, },
1332 { .hw_ep_num
= 9, .style
= FIFO_TX
, .maxpacket
= 512, },
1333 { .hw_ep_num
= 9, .style
= FIFO_RX
, .maxpacket
= 512, },
1334 { .hw_ep_num
= 10, .style
= FIFO_TX
, .maxpacket
= 256, },
1335 { .hw_ep_num
= 10, .style
= FIFO_RX
, .maxpacket
= 64, },
1336 { .hw_ep_num
= 11, .style
= FIFO_TX
, .maxpacket
= 256, },
1337 { .hw_ep_num
= 11, .style
= FIFO_RX
, .maxpacket
= 64, },
1338 { .hw_ep_num
= 12, .style
= FIFO_TX
, .maxpacket
= 256, },
1339 { .hw_ep_num
= 12, .style
= FIFO_RX
, .maxpacket
= 64, },
1340 { .hw_ep_num
= 13, .style
= FIFO_RXTX
, .maxpacket
= 4096, },
1341 { .hw_ep_num
= 14, .style
= FIFO_RXTX
, .maxpacket
= 1024, },
1342 { .hw_ep_num
= 15, .style
= FIFO_RXTX
, .maxpacket
= 1024, },
1345 /* mode 5 - fits in 8KB */
1346 static struct musb_fifo_cfg mode_5_cfg
[] = {
1347 { .hw_ep_num
= 1, .style
= FIFO_TX
, .maxpacket
= 512, },
1348 { .hw_ep_num
= 1, .style
= FIFO_RX
, .maxpacket
= 512, },
1349 { .hw_ep_num
= 2, .style
= FIFO_TX
, .maxpacket
= 512, },
1350 { .hw_ep_num
= 2, .style
= FIFO_RX
, .maxpacket
= 512, },
1351 { .hw_ep_num
= 3, .style
= FIFO_TX
, .maxpacket
= 512, },
1352 { .hw_ep_num
= 3, .style
= FIFO_RX
, .maxpacket
= 512, },
1353 { .hw_ep_num
= 4, .style
= FIFO_TX
, .maxpacket
= 512, },
1354 { .hw_ep_num
= 4, .style
= FIFO_RX
, .maxpacket
= 512, },
1355 { .hw_ep_num
= 5, .style
= FIFO_TX
, .maxpacket
= 512, },
1356 { .hw_ep_num
= 5, .style
= FIFO_RX
, .maxpacket
= 512, },
1357 { .hw_ep_num
= 6, .style
= FIFO_TX
, .maxpacket
= 32, },
1358 { .hw_ep_num
= 6, .style
= FIFO_RX
, .maxpacket
= 32, },
1359 { .hw_ep_num
= 7, .style
= FIFO_TX
, .maxpacket
= 32, },
1360 { .hw_ep_num
= 7, .style
= FIFO_RX
, .maxpacket
= 32, },
1361 { .hw_ep_num
= 8, .style
= FIFO_TX
, .maxpacket
= 32, },
1362 { .hw_ep_num
= 8, .style
= FIFO_RX
, .maxpacket
= 32, },
1363 { .hw_ep_num
= 9, .style
= FIFO_TX
, .maxpacket
= 32, },
1364 { .hw_ep_num
= 9, .style
= FIFO_RX
, .maxpacket
= 32, },
1365 { .hw_ep_num
= 10, .style
= FIFO_TX
, .maxpacket
= 32, },
1366 { .hw_ep_num
= 10, .style
= FIFO_RX
, .maxpacket
= 32, },
1367 { .hw_ep_num
= 11, .style
= FIFO_TX
, .maxpacket
= 32, },
1368 { .hw_ep_num
= 11, .style
= FIFO_RX
, .maxpacket
= 32, },
1369 { .hw_ep_num
= 12, .style
= FIFO_TX
, .maxpacket
= 32, },
1370 { .hw_ep_num
= 12, .style
= FIFO_RX
, .maxpacket
= 32, },
1371 { .hw_ep_num
= 13, .style
= FIFO_RXTX
, .maxpacket
= 512, },
1372 { .hw_ep_num
= 14, .style
= FIFO_RXTX
, .maxpacket
= 1024, },
1373 { .hw_ep_num
= 15, .style
= FIFO_RXTX
, .maxpacket
= 1024, },
1377 * configure a fifo; for non-shared endpoints, this may be called
1378 * once for a tx fifo and once for an rx fifo.
1380 * returns negative errno or offset for next fifo.
1383 fifo_setup(struct musb
*musb
, struct musb_hw_ep
*hw_ep
,
1384 const struct musb_fifo_cfg
*cfg
, u16 offset
)
1386 void __iomem
*mbase
= musb
->mregs
;
1388 u16 maxpacket
= cfg
->maxpacket
;
1389 u16 c_off
= offset
>> 3;
1392 /* expect hw_ep has already been zero-initialized */
1394 size
= ffs(max(maxpacket
, (u16
) 8)) - 1;
1395 maxpacket
= 1 << size
;
1398 if (cfg
->mode
== BUF_DOUBLE
) {
1399 if ((offset
+ (maxpacket
<< 1)) >
1400 (1 << (musb
->config
->ram_bits
+ 2)))
1402 c_size
|= MUSB_FIFOSZ_DPB
;
1404 if ((offset
+ maxpacket
) > (1 << (musb
->config
->ram_bits
+ 2)))
1408 /* configure the FIFO */
1409 musb_writeb(mbase
, MUSB_INDEX
, hw_ep
->epnum
);
1411 /* EP0 reserved endpoint for control, bidirectional;
1412 * EP1 reserved for bulk, two unidirectional halves.
1414 if (hw_ep
->epnum
== 1)
1415 musb
->bulk_ep
= hw_ep
;
1416 /* REVISIT error check: be sure ep0 can both rx and tx ... */
1417 switch (cfg
->style
) {
1419 musb_writeb(mbase
, MUSB_TXFIFOSZ
, c_size
);
1420 musb_writew(mbase
, MUSB_TXFIFOADD
, c_off
);
1421 hw_ep
->tx_double_buffered
= !!(c_size
& MUSB_FIFOSZ_DPB
);
1422 hw_ep
->max_packet_sz_tx
= maxpacket
;
1425 musb_writeb(mbase
, MUSB_RXFIFOSZ
, c_size
);
1426 musb_writew(mbase
, MUSB_RXFIFOADD
, c_off
);
1427 hw_ep
->rx_double_buffered
= !!(c_size
& MUSB_FIFOSZ_DPB
);
1428 hw_ep
->max_packet_sz_rx
= maxpacket
;
1431 musb_writeb(mbase
, MUSB_TXFIFOSZ
, c_size
);
1432 musb_writew(mbase
, MUSB_TXFIFOADD
, c_off
);
1433 hw_ep
->rx_double_buffered
= !!(c_size
& MUSB_FIFOSZ_DPB
);
1434 hw_ep
->max_packet_sz_rx
= maxpacket
;
1436 musb_writeb(mbase
, MUSB_RXFIFOSZ
, c_size
);
1437 musb_writew(mbase
, MUSB_RXFIFOADD
, c_off
);
1438 hw_ep
->tx_double_buffered
= hw_ep
->rx_double_buffered
;
1439 hw_ep
->max_packet_sz_tx
= maxpacket
;
1441 hw_ep
->is_shared_fifo
= true;
1445 /* NOTE rx and tx endpoint irqs aren't managed separately,
1446 * which happens to be ok
1448 musb
->epmask
|= (1 << hw_ep
->epnum
);
1450 return offset
+ (maxpacket
<< ((c_size
& MUSB_FIFOSZ_DPB
) ? 1 : 0));
1453 static struct musb_fifo_cfg ep0_cfg
= {
1454 .style
= FIFO_RXTX
, .maxpacket
= 64,
1457 static int ep_config_from_table(struct musb
*musb
)
1459 const struct musb_fifo_cfg
*cfg
;
1462 struct musb_hw_ep
*hw_ep
= musb
->endpoints
;
1464 if (musb
->config
->fifo_cfg
) {
1465 cfg
= musb
->config
->fifo_cfg
;
1466 n
= musb
->config
->fifo_cfg_size
;
1470 switch (fifo_mode
) {
1476 n
= ARRAY_SIZE(mode_0_cfg
);
1480 n
= ARRAY_SIZE(mode_1_cfg
);
1484 n
= ARRAY_SIZE(mode_2_cfg
);
1488 n
= ARRAY_SIZE(mode_3_cfg
);
1492 n
= ARRAY_SIZE(mode_4_cfg
);
1496 n
= ARRAY_SIZE(mode_5_cfg
);
1500 pr_debug("%s: setup fifo_mode %d\n", musb_driver_name
, fifo_mode
);
1504 offset
= fifo_setup(musb
, hw_ep
, &ep0_cfg
, 0);
1505 /* assert(offset > 0) */
1507 /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would
1508 * be better than static musb->config->num_eps and DYN_FIFO_SIZE...
1511 for (i
= 0; i
< n
; i
++) {
1512 u8 epn
= cfg
->hw_ep_num
;
1514 if (epn
>= musb
->config
->num_eps
) {
1515 pr_debug("%s: invalid ep %d\n",
1516 musb_driver_name
, epn
);
1519 offset
= fifo_setup(musb
, hw_ep
+ epn
, cfg
++, offset
);
1521 pr_debug("%s: mem overrun, ep %d\n",
1522 musb_driver_name
, epn
);
1526 musb
->nr_endpoints
= max(epn
, musb
->nr_endpoints
);
1529 pr_debug("%s: %d/%d max ep, %d/%d memory\n",
1531 n
+ 1, musb
->config
->num_eps
* 2 - 1,
1532 offset
, (1 << (musb
->config
->ram_bits
+ 2)));
1534 if (!musb
->bulk_ep
) {
1535 pr_debug("%s: missing bulk\n", musb_driver_name
);
1544 * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
1545 * @param musb the controller
1547 static int ep_config_from_hw(struct musb
*musb
)
1550 struct musb_hw_ep
*hw_ep
;
1551 void __iomem
*mbase
= musb
->mregs
;
1554 musb_dbg(musb
, "<== static silicon ep config");
1556 /* FIXME pick up ep0 maxpacket size */
1558 for (epnum
= 1; epnum
< musb
->config
->num_eps
; epnum
++) {
1559 musb_ep_select(mbase
, epnum
);
1560 hw_ep
= musb
->endpoints
+ epnum
;
1562 ret
= musb_read_fifosize(musb
, hw_ep
, epnum
);
1566 /* FIXME set up hw_ep->{rx,tx}_double_buffered */
1568 /* pick an RX/TX endpoint for bulk */
1569 if (hw_ep
->max_packet_sz_tx
< 512
1570 || hw_ep
->max_packet_sz_rx
< 512)
1573 /* REVISIT: this algorithm is lazy, we should at least
1574 * try to pick a double buffered endpoint.
1578 musb
->bulk_ep
= hw_ep
;
1581 if (!musb
->bulk_ep
) {
1582 pr_debug("%s: missing bulk\n", musb_driver_name
);
1589 enum { MUSB_CONTROLLER_MHDRC
, MUSB_CONTROLLER_HDRC
, };
1591 /* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
1592 * configure endpoints, or take their config from silicon
1594 static int musb_core_init(u16 musb_type
, struct musb
*musb
)
1599 void __iomem
*mbase
= musb
->mregs
;
1603 /* log core options (read using indexed model) */
1604 reg
= musb_read_configdata(mbase
);
1606 strcpy(aInfo
, (reg
& MUSB_CONFIGDATA_UTMIDW
) ? "UTMI-16" : "UTMI-8");
1607 if (reg
& MUSB_CONFIGDATA_DYNFIFO
) {
1608 strcat(aInfo
, ", dyn FIFOs");
1609 musb
->dyn_fifo
= true;
1611 if (reg
& MUSB_CONFIGDATA_MPRXE
) {
1612 strcat(aInfo
, ", bulk combine");
1613 musb
->bulk_combine
= true;
1615 if (reg
& MUSB_CONFIGDATA_MPTXE
) {
1616 strcat(aInfo
, ", bulk split");
1617 musb
->bulk_split
= true;
1619 if (reg
& MUSB_CONFIGDATA_HBRXE
) {
1620 strcat(aInfo
, ", HB-ISO Rx");
1621 musb
->hb_iso_rx
= true;
1623 if (reg
& MUSB_CONFIGDATA_HBTXE
) {
1624 strcat(aInfo
, ", HB-ISO Tx");
1625 musb
->hb_iso_tx
= true;
1627 if (reg
& MUSB_CONFIGDATA_SOFTCONE
)
1628 strcat(aInfo
, ", SoftConn");
1630 pr_debug("%s: ConfigData=0x%02x (%s)\n", musb_driver_name
, reg
, aInfo
);
1632 if (MUSB_CONTROLLER_MHDRC
== musb_type
) {
1633 musb
->is_multipoint
= 1;
1636 musb
->is_multipoint
= 0;
1638 if (IS_ENABLED(CONFIG_USB
) &&
1639 !IS_ENABLED(CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB
)) {
1640 pr_err("%s: kernel must disable external hubs, please fix the configuration\n",
1645 /* log release info */
1646 musb
->hwvers
= musb_readw(mbase
, MUSB_HWVERS
);
1647 pr_debug("%s: %sHDRC RTL version %d.%d%s\n",
1648 musb_driver_name
, type
, MUSB_HWVERS_MAJOR(musb
->hwvers
),
1649 MUSB_HWVERS_MINOR(musb
->hwvers
),
1650 (musb
->hwvers
& MUSB_HWVERS_RC
) ? "RC" : "");
1653 musb_configure_ep0(musb
);
1655 /* discover endpoint configuration */
1656 musb
->nr_endpoints
= 1;
1660 status
= ep_config_from_table(musb
);
1662 status
= ep_config_from_hw(musb
);
1667 /* finish init, and print endpoint config */
1668 for (i
= 0; i
< musb
->nr_endpoints
; i
++) {
1669 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ i
;
1671 hw_ep
->fifo
= musb
->io
.fifo_offset(i
) + mbase
;
1672 #if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
1673 if (musb
->ops
->quirks
& MUSB_IN_TUSB
) {
1674 hw_ep
->fifo_async
= musb
->async
+ 0x400 +
1675 musb
->io
.fifo_offset(i
);
1676 hw_ep
->fifo_sync
= musb
->sync
+ 0x400 +
1677 musb
->io
.fifo_offset(i
);
1678 hw_ep
->fifo_sync_va
=
1679 musb
->sync_va
+ 0x400 + musb
->io
.fifo_offset(i
);
1682 hw_ep
->conf
= mbase
- 0x400 + TUSB_EP0_CONF
;
1684 hw_ep
->conf
= mbase
+ 0x400 +
1685 (((i
- 1) & 0xf) << 2);
1689 hw_ep
->regs
= musb
->io
.ep_offset(i
, 0) + mbase
;
1690 hw_ep
->rx_reinit
= 1;
1691 hw_ep
->tx_reinit
= 1;
1693 if (hw_ep
->max_packet_sz_tx
) {
1694 musb_dbg(musb
, "%s: hw_ep %d%s, %smax %d",
1695 musb_driver_name
, i
,
1696 hw_ep
->is_shared_fifo
? "shared" : "tx",
1697 hw_ep
->tx_double_buffered
1698 ? "doublebuffer, " : "",
1699 hw_ep
->max_packet_sz_tx
);
1701 if (hw_ep
->max_packet_sz_rx
&& !hw_ep
->is_shared_fifo
) {
1702 musb_dbg(musb
, "%s: hw_ep %d%s, %smax %d",
1703 musb_driver_name
, i
,
1705 hw_ep
->rx_double_buffered
1706 ? "doublebuffer, " : "",
1707 hw_ep
->max_packet_sz_rx
);
1709 if (!(hw_ep
->max_packet_sz_tx
|| hw_ep
->max_packet_sz_rx
))
1710 musb_dbg(musb
, "hw_ep %d not configured", i
);
1716 /*-------------------------------------------------------------------------*/
1719 * handle all the irqs defined by the HDRC core. for now we expect: other
1720 * irq sources (phy, dma, etc) will be handled first, musb->int_* values
1721 * will be assigned, and the irq will already have been acked.
1723 * called in irq context with spinlock held, irqs blocked
1725 irqreturn_t
musb_interrupt(struct musb
*musb
)
1727 irqreturn_t retval
= IRQ_NONE
;
1728 unsigned long status
;
1729 unsigned long epnum
;
1732 if (!musb
->int_usb
&& !musb
->int_tx
&& !musb
->int_rx
)
1735 devctl
= musb_readb(musb
->mregs
, MUSB_DEVCTL
);
1737 trace_musb_isr(musb
);
1740 * According to Mentor Graphics' documentation, flowchart on page 98,
1741 * IRQ should be handled as follows:
1744 * . Session Request IRQ
1749 * . Reset/Babble IRQ
1750 * . SOF IRQ (we're not using this one)
1755 * We will be following that flowchart in order to avoid any problems
1756 * that might arise with internal Finite State Machine.
1760 retval
|= musb_stage0_irq(musb
, musb
->int_usb
, devctl
);
1762 if (musb
->int_tx
& 1) {
1763 if (is_host_active(musb
))
1764 retval
|= musb_h_ep0_irq(musb
);
1766 retval
|= musb_g_ep0_irq(musb
);
1768 /* we have just handled endpoint 0 IRQ, clear it */
1769 musb
->int_tx
&= ~BIT(0);
1772 status
= musb
->int_tx
;
1774 for_each_set_bit(epnum
, &status
, 16) {
1775 retval
= IRQ_HANDLED
;
1776 if (is_host_active(musb
))
1777 musb_host_tx(musb
, epnum
);
1779 musb_g_tx(musb
, epnum
);
1782 status
= musb
->int_rx
;
1784 for_each_set_bit(epnum
, &status
, 16) {
1785 retval
= IRQ_HANDLED
;
1786 if (is_host_active(musb
))
1787 musb_host_rx(musb
, epnum
);
1789 musb_g_rx(musb
, epnum
);
1794 EXPORT_SYMBOL_GPL(musb_interrupt
);
1796 #ifndef CONFIG_MUSB_PIO_ONLY
1797 static bool use_dma
= true;
1799 /* "modprobe ... use_dma=0" etc */
1800 module_param(use_dma
, bool, 0644);
1801 MODULE_PARM_DESC(use_dma
, "enable/disable use of DMA");
1803 void musb_dma_completion(struct musb
*musb
, u8 epnum
, u8 transmit
)
1805 /* called with controller lock already held */
1808 if (!is_cppi_enabled(musb
)) {
1810 if (is_host_active(musb
))
1811 musb_h_ep0_irq(musb
);
1813 musb_g_ep0_irq(musb
);
1816 /* endpoints 1..15 */
1818 if (is_host_active(musb
))
1819 musb_host_tx(musb
, epnum
);
1821 musb_g_tx(musb
, epnum
);
1824 if (is_host_active(musb
))
1825 musb_host_rx(musb
, epnum
);
1827 musb_g_rx(musb
, epnum
);
1831 EXPORT_SYMBOL_GPL(musb_dma_completion
);
1837 static int (*musb_phy_callback
)(enum musb_vbus_id_status status
);
1840 * musb_mailbox - optional phy notifier function
1841 * @status phy state change
1843 * Optionally gets called from the USB PHY. Note that the USB PHY must be
1844 * disabled at the point the phy_callback is registered or unregistered.
1846 int musb_mailbox(enum musb_vbus_id_status status
)
1848 if (musb_phy_callback
)
1849 return musb_phy_callback(status
);
1853 EXPORT_SYMBOL_GPL(musb_mailbox
);
1855 /*-------------------------------------------------------------------------*/
1858 mode_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1860 struct musb
*musb
= dev_to_musb(dev
);
1861 unsigned long flags
;
1864 spin_lock_irqsave(&musb
->lock
, flags
);
1865 ret
= sprintf(buf
, "%s\n", usb_otg_state_string(musb
->xceiv
->otg
->state
));
1866 spin_unlock_irqrestore(&musb
->lock
, flags
);
1872 mode_store(struct device
*dev
, struct device_attribute
*attr
,
1873 const char *buf
, size_t n
)
1875 struct musb
*musb
= dev_to_musb(dev
);
1876 unsigned long flags
;
1879 spin_lock_irqsave(&musb
->lock
, flags
);
1880 if (sysfs_streq(buf
, "host"))
1881 status
= musb_platform_set_mode(musb
, MUSB_HOST
);
1882 else if (sysfs_streq(buf
, "peripheral"))
1883 status
= musb_platform_set_mode(musb
, MUSB_PERIPHERAL
);
1884 else if (sysfs_streq(buf
, "otg"))
1885 status
= musb_platform_set_mode(musb
, MUSB_OTG
);
1888 spin_unlock_irqrestore(&musb
->lock
, flags
);
1890 return (status
== 0) ? n
: status
;
1892 static DEVICE_ATTR_RW(mode
);
1895 vbus_store(struct device
*dev
, struct device_attribute
*attr
,
1896 const char *buf
, size_t n
)
1898 struct musb
*musb
= dev_to_musb(dev
);
1899 unsigned long flags
;
1902 if (sscanf(buf
, "%lu", &val
) < 1) {
1903 dev_err(dev
, "Invalid VBUS timeout ms value\n");
1907 spin_lock_irqsave(&musb
->lock
, flags
);
1908 /* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
1909 musb
->a_wait_bcon
= val
? max_t(int, val
, OTG_TIME_A_WAIT_BCON
) : 0 ;
1910 if (musb
->xceiv
->otg
->state
== OTG_STATE_A_WAIT_BCON
)
1911 musb
->is_active
= 0;
1912 musb_platform_try_idle(musb
, jiffies
+ msecs_to_jiffies(val
));
1913 spin_unlock_irqrestore(&musb
->lock
, flags
);
1919 vbus_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1921 struct musb
*musb
= dev_to_musb(dev
);
1922 unsigned long flags
;
1927 pm_runtime_get_sync(dev
);
1928 spin_lock_irqsave(&musb
->lock
, flags
);
1929 val
= musb
->a_wait_bcon
;
1930 vbus
= musb_platform_get_vbus_status(musb
);
1932 /* Use default MUSB method by means of DEVCTL register */
1933 devctl
= musb_readb(musb
->mregs
, MUSB_DEVCTL
);
1934 if ((devctl
& MUSB_DEVCTL_VBUS
)
1935 == (3 << MUSB_DEVCTL_VBUS_SHIFT
))
1940 spin_unlock_irqrestore(&musb
->lock
, flags
);
1941 pm_runtime_put_sync(dev
);
1943 return sprintf(buf
, "Vbus %s, timeout %lu msec\n",
1944 vbus
? "on" : "off", val
);
1946 static DEVICE_ATTR_RW(vbus
);
1948 /* Gadget drivers can't know that a host is connected so they might want
1949 * to start SRP, but users can. This allows userspace to trigger SRP.
1951 static ssize_t
srp_store(struct device
*dev
, struct device_attribute
*attr
,
1952 const char *buf
, size_t n
)
1954 struct musb
*musb
= dev_to_musb(dev
);
1957 if (sscanf(buf
, "%hu", &srp
) != 1
1959 dev_err(dev
, "SRP: Value must be 1\n");
1964 musb_g_wakeup(musb
);
1968 static DEVICE_ATTR_WO(srp
);
1970 static struct attribute
*musb_attrs
[] = {
1971 &dev_attr_mode
.attr
,
1972 &dev_attr_vbus
.attr
,
1976 ATTRIBUTE_GROUPS(musb
);
1978 #define MUSB_QUIRK_B_INVALID_VBUS_91 (MUSB_DEVCTL_BDEVICE | \
1979 (2 << MUSB_DEVCTL_VBUS_SHIFT) | \
1980 MUSB_DEVCTL_SESSION)
1981 #define MUSB_QUIRK_B_DISCONNECT_99 (MUSB_DEVCTL_BDEVICE | \
1982 (3 << MUSB_DEVCTL_VBUS_SHIFT) | \
1983 MUSB_DEVCTL_SESSION)
1984 #define MUSB_QUIRK_A_DISCONNECT_19 ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \
1985 MUSB_DEVCTL_SESSION)
1988 * Check the musb devctl session bit to determine if we want to
1989 * allow PM runtime for the device. In general, we want to keep things
1990 * active when the session bit is set except after host disconnect.
1992 * Only called from musb_irq_work. If this ever needs to get called
1993 * elsewhere, proper locking must be implemented for musb->session.
1995 static void musb_pm_runtime_check_session(struct musb
*musb
)
2000 devctl
= musb_readb(musb
->mregs
, MUSB_DEVCTL
);
2002 /* Handle session status quirks first */
2003 s
= MUSB_DEVCTL_FSDEV
| MUSB_DEVCTL_LSDEV
|
2005 switch (devctl
& ~s
) {
2006 case MUSB_QUIRK_B_DISCONNECT_99
:
2007 musb_dbg(musb
, "Poll devctl in case of suspend after disconnect\n");
2008 schedule_delayed_work(&musb
->irq_work
,
2009 msecs_to_jiffies(1000));
2011 case MUSB_QUIRK_B_INVALID_VBUS_91
:
2012 if (musb
->quirk_retries
&& !musb
->flush_irq_work
) {
2014 "Poll devctl on invalid vbus, assume no session");
2015 schedule_delayed_work(&musb
->irq_work
,
2016 msecs_to_jiffies(1000));
2017 musb
->quirk_retries
--;
2021 case MUSB_QUIRK_A_DISCONNECT_19
:
2022 if (musb
->quirk_retries
&& !musb
->flush_irq_work
) {
2024 "Poll devctl on possible host mode disconnect");
2025 schedule_delayed_work(&musb
->irq_work
,
2026 msecs_to_jiffies(1000));
2027 musb
->quirk_retries
--;
2032 musb_dbg(musb
, "Allow PM on possible host mode disconnect");
2033 pm_runtime_mark_last_busy(musb
->controller
);
2034 pm_runtime_put_autosuspend(musb
->controller
);
2035 musb
->session
= false;
2041 /* No need to do anything if session has not changed */
2042 s
= devctl
& MUSB_DEVCTL_SESSION
;
2043 if (s
== musb
->session
)
2046 /* Block PM or allow PM? */
2048 musb_dbg(musb
, "Block PM on active session: %02x", devctl
);
2049 error
= pm_runtime_get_sync(musb
->controller
);
2051 dev_err(musb
->controller
, "Could not enable: %i\n",
2053 musb
->quirk_retries
= 3;
2055 musb_dbg(musb
, "Allow PM with no session: %02x", devctl
);
2056 pm_runtime_mark_last_busy(musb
->controller
);
2057 pm_runtime_put_autosuspend(musb
->controller
);
2063 /* Only used to provide driver mode change events */
2064 static void musb_irq_work(struct work_struct
*data
)
2066 struct musb
*musb
= container_of(data
, struct musb
, irq_work
.work
);
2069 error
= pm_runtime_get_sync(musb
->controller
);
2071 dev_err(musb
->controller
, "Could not enable: %i\n", error
);
2076 musb_pm_runtime_check_session(musb
);
2078 if (musb
->xceiv
->otg
->state
!= musb
->xceiv_old_state
) {
2079 musb
->xceiv_old_state
= musb
->xceiv
->otg
->state
;
2080 sysfs_notify(&musb
->controller
->kobj
, NULL
, "mode");
2083 pm_runtime_mark_last_busy(musb
->controller
);
2084 pm_runtime_put_autosuspend(musb
->controller
);
2087 static void musb_recover_from_babble(struct musb
*musb
)
2092 musb_disable_interrupts(musb
);
2095 * wait at least 320 cycles of 60MHz clock. That's 5.3us, we will give
2096 * it some slack and wait for 10us.
2100 ret
= musb_platform_recover(musb
);
2102 musb_enable_interrupts(musb
);
2106 /* drop session bit */
2107 devctl
= musb_readb(musb
->mregs
, MUSB_DEVCTL
);
2108 devctl
&= ~MUSB_DEVCTL_SESSION
;
2109 musb_writeb(musb
->mregs
, MUSB_DEVCTL
, devctl
);
2111 /* tell usbcore about it */
2112 musb_root_disconnect(musb
);
2115 * When a babble condition occurs, the musb controller
2116 * removes the session bit and the endpoint config is lost.
2119 ret
= ep_config_from_table(musb
);
2121 ret
= ep_config_from_hw(musb
);
2123 /* restart session */
2128 /* --------------------------------------------------------------------------
2132 static struct musb
*allocate_instance(struct device
*dev
,
2133 const struct musb_hdrc_config
*config
, void __iomem
*mbase
)
2136 struct musb_hw_ep
*ep
;
2140 musb
= devm_kzalloc(dev
, sizeof(*musb
), GFP_KERNEL
);
2144 INIT_LIST_HEAD(&musb
->control
);
2145 INIT_LIST_HEAD(&musb
->in_bulk
);
2146 INIT_LIST_HEAD(&musb
->out_bulk
);
2147 INIT_LIST_HEAD(&musb
->pending_list
);
2149 musb
->vbuserr_retry
= VBUSERR_RETRY_COUNT
;
2150 musb
->a_wait_bcon
= OTG_TIME_A_WAIT_BCON
;
2151 musb
->mregs
= mbase
;
2152 musb
->ctrl_base
= mbase
;
2153 musb
->nIrq
= -ENODEV
;
2154 musb
->config
= config
;
2155 BUG_ON(musb
->config
->num_eps
> MUSB_C_NUM_EPS
);
2156 for (epnum
= 0, ep
= musb
->endpoints
;
2157 epnum
< musb
->config
->num_eps
;
2163 musb
->controller
= dev
;
2165 ret
= musb_host_alloc(musb
);
2169 dev_set_drvdata(dev
, musb
);
2177 static void musb_free(struct musb
*musb
)
2179 /* this has multiple entry modes. it handles fault cleanup after
2180 * probe(), where things may be partially set up, as well as rmmod
2181 * cleanup after everything's been de-activated.
2184 if (musb
->nIrq
>= 0) {
2186 disable_irq_wake(musb
->nIrq
);
2187 free_irq(musb
->nIrq
, musb
);
2190 musb_host_free(musb
);
2193 struct musb_pending_work
{
2194 int (*callback
)(struct musb
*musb
, void *data
);
2196 struct list_head node
;
2201 * Called from musb_runtime_resume(), musb_resume(), and
2202 * musb_queue_resume_work(). Callers must take musb->lock.
2204 static int musb_run_resume_work(struct musb
*musb
)
2206 struct musb_pending_work
*w
, *_w
;
2207 unsigned long flags
;
2210 spin_lock_irqsave(&musb
->list_lock
, flags
);
2211 list_for_each_entry_safe(w
, _w
, &musb
->pending_list
, node
) {
2213 error
= w
->callback(musb
, w
->data
);
2215 dev_err(musb
->controller
,
2216 "resume callback %p failed: %i\n",
2217 w
->callback
, error
);
2221 devm_kfree(musb
->controller
, w
);
2223 spin_unlock_irqrestore(&musb
->list_lock
, flags
);
2230 * Called to run work if device is active or else queue the work to happen
2231 * on resume. Caller must take musb->lock and must hold an RPM reference.
2233 * Note that we cowardly refuse queuing work after musb PM runtime
2234 * resume is done calling musb_run_resume_work() and return -EINPROGRESS
2237 int musb_queue_resume_work(struct musb
*musb
,
2238 int (*callback
)(struct musb
*musb
, void *data
),
2241 struct musb_pending_work
*w
;
2242 unsigned long flags
;
2245 if (WARN_ON(!callback
))
2248 if (pm_runtime_active(musb
->controller
))
2249 return callback(musb
, data
);
2251 w
= devm_kzalloc(musb
->controller
, sizeof(*w
), GFP_ATOMIC
);
2255 w
->callback
= callback
;
2257 spin_lock_irqsave(&musb
->list_lock
, flags
);
2258 if (musb
->is_runtime_suspended
) {
2259 list_add_tail(&w
->node
, &musb
->pending_list
);
2262 dev_err(musb
->controller
, "could not add resume work %p\n",
2264 devm_kfree(musb
->controller
, w
);
2265 error
= -EINPROGRESS
;
2267 spin_unlock_irqrestore(&musb
->list_lock
, flags
);
2271 EXPORT_SYMBOL_GPL(musb_queue_resume_work
);
2273 static void musb_deassert_reset(struct work_struct
*work
)
2276 unsigned long flags
;
2278 musb
= container_of(work
, struct musb
, deassert_reset_work
.work
);
2280 spin_lock_irqsave(&musb
->lock
, flags
);
2282 if (musb
->port1_status
& USB_PORT_STAT_RESET
)
2283 musb_port_reset(musb
, false);
2285 spin_unlock_irqrestore(&musb
->lock
, flags
);
2289 * Perform generic per-controller initialization.
2291 * @dev: the controller (already clocked, etc)
2293 * @ctrl: virtual address of controller registers,
2294 * not yet corrected for platform-specific offsets
2297 musb_init_controller(struct device
*dev
, int nIrq
, void __iomem
*ctrl
)
2301 struct musb_hdrc_platform_data
*plat
= dev_get_platdata(dev
);
2303 /* The driver might handle more features than the board; OK.
2304 * Fail when the board needs a feature that's not enabled.
2307 dev_err(dev
, "no platform_data?\n");
2313 musb
= allocate_instance(dev
, plat
->config
, ctrl
);
2319 spin_lock_init(&musb
->lock
);
2320 spin_lock_init(&musb
->list_lock
);
2321 musb
->board_set_power
= plat
->set_power
;
2322 musb
->min_power
= plat
->min_power
;
2323 musb
->ops
= plat
->platform_ops
;
2324 musb
->port_mode
= plat
->mode
;
2327 * Initialize the default IO functions. At least omap2430 needs
2328 * these early. We initialize the platform specific IO functions
2331 musb_readb
= musb_default_readb
;
2332 musb_writeb
= musb_default_writeb
;
2333 musb_readw
= musb_default_readw
;
2334 musb_writew
= musb_default_writew
;
2336 /* The musb_platform_init() call:
2337 * - adjusts musb->mregs
2338 * - sets the musb->isr
2339 * - may initialize an integrated transceiver
2340 * - initializes musb->xceiv, usually by otg_get_phy()
2341 * - stops powering VBUS
2343 * There are various transceiver configurations.
2344 * DaVinci, TUSB60x0, and others integrate them. OMAP3 uses
2345 * external/discrete ones in various flavors (twl4030 family,
2346 * isp1504, non-OTG, etc) mostly hooking up through ULPI.
2348 status
= musb_platform_init(musb
);
2358 /* Most devices use indexed offset or flat offset */
2359 if (musb
->ops
->quirks
& MUSB_INDEXED_EP
) {
2360 musb
->io
.ep_offset
= musb_indexed_ep_offset
;
2361 musb
->io
.ep_select
= musb_indexed_ep_select
;
2363 musb
->io
.ep_offset
= musb_flat_ep_offset
;
2364 musb
->io
.ep_select
= musb_flat_ep_select
;
2367 if (musb
->ops
->quirks
& MUSB_G_NO_SKB_RESERVE
)
2368 musb
->g
.quirk_avoids_skb_reserve
= 1;
2370 /* At least tusb6010 has its own offsets */
2371 if (musb
->ops
->ep_offset
)
2372 musb
->io
.ep_offset
= musb
->ops
->ep_offset
;
2373 if (musb
->ops
->ep_select
)
2374 musb
->io
.ep_select
= musb
->ops
->ep_select
;
2376 if (musb
->ops
->fifo_mode
)
2377 fifo_mode
= musb
->ops
->fifo_mode
;
2381 if (musb
->ops
->fifo_offset
)
2382 musb
->io
.fifo_offset
= musb
->ops
->fifo_offset
;
2384 musb
->io
.fifo_offset
= musb_default_fifo_offset
;
2386 if (musb
->ops
->busctl_offset
)
2387 musb
->io
.busctl_offset
= musb
->ops
->busctl_offset
;
2389 musb
->io
.busctl_offset
= musb_default_busctl_offset
;
2391 if (musb
->ops
->readb
)
2392 musb_readb
= musb
->ops
->readb
;
2393 if (musb
->ops
->writeb
)
2394 musb_writeb
= musb
->ops
->writeb
;
2395 if (musb
->ops
->clearb
)
2396 musb_clearb
= musb
->ops
->clearb
;
2398 musb_clearb
= musb_readb
;
2400 if (musb
->ops
->readw
)
2401 musb_readw
= musb
->ops
->readw
;
2402 if (musb
->ops
->writew
)
2403 musb_writew
= musb
->ops
->writew
;
2404 if (musb
->ops
->clearw
)
2405 musb_clearw
= musb
->ops
->clearw
;
2407 musb_clearw
= musb_readw
;
2409 #ifndef CONFIG_MUSB_PIO_ONLY
2410 if (!musb
->ops
->dma_init
|| !musb
->ops
->dma_exit
) {
2411 dev_err(dev
, "DMA controller not set\n");
2415 musb_dma_controller_create
= musb
->ops
->dma_init
;
2416 musb_dma_controller_destroy
= musb
->ops
->dma_exit
;
2419 if (musb
->ops
->read_fifo
)
2420 musb
->io
.read_fifo
= musb
->ops
->read_fifo
;
2422 musb
->io
.read_fifo
= musb_default_read_fifo
;
2424 if (musb
->ops
->write_fifo
)
2425 musb
->io
.write_fifo
= musb
->ops
->write_fifo
;
2427 musb
->io
.write_fifo
= musb_default_write_fifo
;
2429 if (musb
->ops
->get_toggle
)
2430 musb
->io
.get_toggle
= musb
->ops
->get_toggle
;
2432 musb
->io
.get_toggle
= musb_default_get_toggle
;
2434 if (musb
->ops
->set_toggle
)
2435 musb
->io
.set_toggle
= musb
->ops
->set_toggle
;
2437 musb
->io
.set_toggle
= musb_default_set_toggle
;
2439 if (!musb
->xceiv
->io_ops
) {
2440 musb
->xceiv
->io_dev
= musb
->controller
;
2441 musb
->xceiv
->io_priv
= musb
->mregs
;
2442 musb
->xceiv
->io_ops
= &musb_ulpi_access
;
2445 if (musb
->ops
->phy_callback
)
2446 musb_phy_callback
= musb
->ops
->phy_callback
;
2449 * We need musb_read/write functions initialized for PM.
2450 * Note that at least 2430 glue needs autosuspend delay
2451 * somewhere above 300 ms for the hardware to idle properly
2452 * after disconnecting the cable in host mode. Let's use
2453 * 500 ms for some margin.
2455 pm_runtime_use_autosuspend(musb
->controller
);
2456 pm_runtime_set_autosuspend_delay(musb
->controller
, 500);
2457 pm_runtime_enable(musb
->controller
);
2458 pm_runtime_get_sync(musb
->controller
);
2460 status
= usb_phy_init(musb
->xceiv
);
2462 goto err_usb_phy_init
;
2464 if (use_dma
&& dev
->dma_mask
) {
2465 musb
->dma_controller
=
2466 musb_dma_controller_create(musb
, musb
->mregs
);
2467 if (IS_ERR(musb
->dma_controller
)) {
2468 status
= PTR_ERR(musb
->dma_controller
);
2473 /* be sure interrupts are disabled before connecting ISR */
2474 musb_platform_disable(musb
);
2475 musb_disable_interrupts(musb
);
2476 musb_writeb(musb
->mregs
, MUSB_DEVCTL
, 0);
2478 /* MUSB_POWER_SOFTCONN might be already set, JZ4740 does this. */
2479 musb_writeb(musb
->mregs
, MUSB_POWER
, 0);
2481 /* Init IRQ workqueue before request_irq */
2482 INIT_DELAYED_WORK(&musb
->irq_work
, musb_irq_work
);
2483 INIT_DELAYED_WORK(&musb
->deassert_reset_work
, musb_deassert_reset
);
2484 INIT_DELAYED_WORK(&musb
->finish_resume_work
, musb_host_finish_resume
);
2486 /* setup musb parts of the core (especially endpoints) */
2487 status
= musb_core_init(plat
->config
->multipoint
2488 ? MUSB_CONTROLLER_MHDRC
2489 : MUSB_CONTROLLER_HDRC
, musb
);
2493 timer_setup(&musb
->otg_timer
, musb_otg_timer_func
, 0);
2495 /* attach to the IRQ */
2496 if (request_irq(nIrq
, musb
->isr
, IRQF_SHARED
, dev_name(dev
), musb
)) {
2497 dev_err(dev
, "request_irq %d failed!\n", nIrq
);
2502 /* FIXME this handles wakeup irqs wrong */
2503 if (enable_irq_wake(nIrq
) == 0) {
2505 device_init_wakeup(dev
, 1);
2510 /* program PHY to use external vBus if required */
2511 if (plat
->extvbus
) {
2512 u8 busctl
= musb_readb(musb
->mregs
, MUSB_ULPI_BUSCONTROL
);
2513 busctl
|= MUSB_ULPI_USE_EXTVBUS
;
2514 musb_writeb(musb
->mregs
, MUSB_ULPI_BUSCONTROL
, busctl
);
2517 MUSB_DEV_MODE(musb
);
2518 musb
->xceiv
->otg
->state
= OTG_STATE_B_IDLE
;
2520 switch (musb
->port_mode
) {
2522 status
= musb_host_setup(musb
, plat
->power
);
2525 status
= musb_platform_set_mode(musb
, MUSB_HOST
);
2527 case MUSB_PERIPHERAL
:
2528 status
= musb_gadget_setup(musb
);
2531 status
= musb_platform_set_mode(musb
, MUSB_PERIPHERAL
);
2534 status
= musb_host_setup(musb
, plat
->power
);
2537 status
= musb_gadget_setup(musb
);
2539 musb_host_cleanup(musb
);
2542 status
= musb_platform_set_mode(musb
, MUSB_OTG
);
2545 dev_err(dev
, "unsupported port mode %d\n", musb
->port_mode
);
2552 musb_init_debugfs(musb
);
2554 musb
->is_initialized
= 1;
2555 pm_runtime_mark_last_busy(musb
->controller
);
2556 pm_runtime_put_autosuspend(musb
->controller
);
2561 cancel_delayed_work_sync(&musb
->irq_work
);
2562 cancel_delayed_work_sync(&musb
->finish_resume_work
);
2563 cancel_delayed_work_sync(&musb
->deassert_reset_work
);
2564 if (musb
->dma_controller
)
2565 musb_dma_controller_destroy(musb
->dma_controller
);
2568 usb_phy_shutdown(musb
->xceiv
);
2571 pm_runtime_dont_use_autosuspend(musb
->controller
);
2572 pm_runtime_put_sync(musb
->controller
);
2573 pm_runtime_disable(musb
->controller
);
2577 device_init_wakeup(dev
, 0);
2578 musb_platform_exit(musb
);
2581 if (status
!= -EPROBE_DEFER
)
2582 dev_err(musb
->controller
,
2583 "%s failed with status %d\n", __func__
, status
);
2593 /*-------------------------------------------------------------------------*/
2595 /* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
2596 * bridge to a platform device; this driver then suffices.
2598 static int musb_probe(struct platform_device
*pdev
)
2600 struct device
*dev
= &pdev
->dev
;
2601 int irq
= platform_get_irq_byname(pdev
, "mc");
2607 base
= devm_platform_ioremap_resource(pdev
, 0);
2609 return PTR_ERR(base
);
2611 return musb_init_controller(dev
, irq
, base
);
2614 static int musb_remove(struct platform_device
*pdev
)
2616 struct device
*dev
= &pdev
->dev
;
2617 struct musb
*musb
= dev_to_musb(dev
);
2618 unsigned long flags
;
2620 /* this gets called on rmmod.
2621 * - Host mode: host may still be active
2622 * - Peripheral mode: peripheral is deactivated (or never-activated)
2623 * - OTG mode: both roles are deactivated (or never-activated)
2625 musb_exit_debugfs(musb
);
2627 cancel_delayed_work_sync(&musb
->irq_work
);
2628 cancel_delayed_work_sync(&musb
->finish_resume_work
);
2629 cancel_delayed_work_sync(&musb
->deassert_reset_work
);
2630 pm_runtime_get_sync(musb
->controller
);
2631 musb_host_cleanup(musb
);
2632 musb_gadget_cleanup(musb
);
2634 musb_platform_disable(musb
);
2635 spin_lock_irqsave(&musb
->lock
, flags
);
2636 musb_disable_interrupts(musb
);
2637 musb_writeb(musb
->mregs
, MUSB_DEVCTL
, 0);
2638 spin_unlock_irqrestore(&musb
->lock
, flags
);
2639 musb_platform_exit(musb
);
2641 pm_runtime_dont_use_autosuspend(musb
->controller
);
2642 pm_runtime_put_sync(musb
->controller
);
2643 pm_runtime_disable(musb
->controller
);
2644 musb_phy_callback
= NULL
;
2645 if (musb
->dma_controller
)
2646 musb_dma_controller_destroy(musb
->dma_controller
);
2647 usb_phy_shutdown(musb
->xceiv
);
2649 device_init_wakeup(dev
, 0);
2655 static void musb_save_context(struct musb
*musb
)
2658 void __iomem
*musb_base
= musb
->mregs
;
2661 musb
->context
.frame
= musb_readw(musb_base
, MUSB_FRAME
);
2662 musb
->context
.testmode
= musb_readb(musb_base
, MUSB_TESTMODE
);
2663 musb
->context
.busctl
= musb_readb(musb_base
, MUSB_ULPI_BUSCONTROL
);
2664 musb
->context
.power
= musb_readb(musb_base
, MUSB_POWER
);
2665 musb
->context
.intrusbe
= musb_readb(musb_base
, MUSB_INTRUSBE
);
2666 musb
->context
.index
= musb_readb(musb_base
, MUSB_INDEX
);
2667 musb
->context
.devctl
= musb_readb(musb_base
, MUSB_DEVCTL
);
2669 for (i
= 0; i
< musb
->config
->num_eps
; ++i
) {
2670 struct musb_hw_ep
*hw_ep
;
2672 hw_ep
= &musb
->endpoints
[i
];
2680 musb_writeb(musb_base
, MUSB_INDEX
, i
);
2681 musb
->context
.index_regs
[i
].txmaxp
=
2682 musb_readw(epio
, MUSB_TXMAXP
);
2683 musb
->context
.index_regs
[i
].txcsr
=
2684 musb_readw(epio
, MUSB_TXCSR
);
2685 musb
->context
.index_regs
[i
].rxmaxp
=
2686 musb_readw(epio
, MUSB_RXMAXP
);
2687 musb
->context
.index_regs
[i
].rxcsr
=
2688 musb_readw(epio
, MUSB_RXCSR
);
2690 if (musb
->dyn_fifo
) {
2691 musb
->context
.index_regs
[i
].txfifoadd
=
2692 musb_readw(musb_base
, MUSB_TXFIFOADD
);
2693 musb
->context
.index_regs
[i
].rxfifoadd
=
2694 musb_readw(musb_base
, MUSB_RXFIFOADD
);
2695 musb
->context
.index_regs
[i
].txfifosz
=
2696 musb_readb(musb_base
, MUSB_TXFIFOSZ
);
2697 musb
->context
.index_regs
[i
].rxfifosz
=
2698 musb_readb(musb_base
, MUSB_RXFIFOSZ
);
2701 musb
->context
.index_regs
[i
].txtype
=
2702 musb_readb(epio
, MUSB_TXTYPE
);
2703 musb
->context
.index_regs
[i
].txinterval
=
2704 musb_readb(epio
, MUSB_TXINTERVAL
);
2705 musb
->context
.index_regs
[i
].rxtype
=
2706 musb_readb(epio
, MUSB_RXTYPE
);
2707 musb
->context
.index_regs
[i
].rxinterval
=
2708 musb_readb(epio
, MUSB_RXINTERVAL
);
2710 musb
->context
.index_regs
[i
].txfunaddr
=
2711 musb_read_txfunaddr(musb
, i
);
2712 musb
->context
.index_regs
[i
].txhubaddr
=
2713 musb_read_txhubaddr(musb
, i
);
2714 musb
->context
.index_regs
[i
].txhubport
=
2715 musb_read_txhubport(musb
, i
);
2717 musb
->context
.index_regs
[i
].rxfunaddr
=
2718 musb_read_rxfunaddr(musb
, i
);
2719 musb
->context
.index_regs
[i
].rxhubaddr
=
2720 musb_read_rxhubaddr(musb
, i
);
2721 musb
->context
.index_regs
[i
].rxhubport
=
2722 musb_read_rxhubport(musb
, i
);
2726 static void musb_restore_context(struct musb
*musb
)
2729 void __iomem
*musb_base
= musb
->mregs
;
2733 musb_writew(musb_base
, MUSB_FRAME
, musb
->context
.frame
);
2734 musb_writeb(musb_base
, MUSB_TESTMODE
, musb
->context
.testmode
);
2735 musb_writeb(musb_base
, MUSB_ULPI_BUSCONTROL
, musb
->context
.busctl
);
2737 /* Don't affect SUSPENDM/RESUME bits in POWER reg */
2738 power
= musb_readb(musb_base
, MUSB_POWER
);
2739 power
&= MUSB_POWER_SUSPENDM
| MUSB_POWER_RESUME
;
2740 musb
->context
.power
&= ~(MUSB_POWER_SUSPENDM
| MUSB_POWER_RESUME
);
2741 power
|= musb
->context
.power
;
2742 musb_writeb(musb_base
, MUSB_POWER
, power
);
2744 musb_writew(musb_base
, MUSB_INTRTXE
, musb
->intrtxe
);
2745 musb_writew(musb_base
, MUSB_INTRRXE
, musb
->intrrxe
);
2746 musb_writeb(musb_base
, MUSB_INTRUSBE
, musb
->context
.intrusbe
);
2747 if (musb
->context
.devctl
& MUSB_DEVCTL_SESSION
)
2748 musb_writeb(musb_base
, MUSB_DEVCTL
, musb
->context
.devctl
);
2750 for (i
= 0; i
< musb
->config
->num_eps
; ++i
) {
2751 struct musb_hw_ep
*hw_ep
;
2753 hw_ep
= &musb
->endpoints
[i
];
2761 musb_writeb(musb_base
, MUSB_INDEX
, i
);
2762 musb_writew(epio
, MUSB_TXMAXP
,
2763 musb
->context
.index_regs
[i
].txmaxp
);
2764 musb_writew(epio
, MUSB_TXCSR
,
2765 musb
->context
.index_regs
[i
].txcsr
);
2766 musb_writew(epio
, MUSB_RXMAXP
,
2767 musb
->context
.index_regs
[i
].rxmaxp
);
2768 musb_writew(epio
, MUSB_RXCSR
,
2769 musb
->context
.index_regs
[i
].rxcsr
);
2771 if (musb
->dyn_fifo
) {
2772 musb_writeb(musb_base
, MUSB_TXFIFOSZ
,
2773 musb
->context
.index_regs
[i
].txfifosz
);
2774 musb_writeb(musb_base
, MUSB_RXFIFOSZ
,
2775 musb
->context
.index_regs
[i
].rxfifosz
);
2776 musb_writew(musb_base
, MUSB_TXFIFOADD
,
2777 musb
->context
.index_regs
[i
].txfifoadd
);
2778 musb_writew(musb_base
, MUSB_RXFIFOADD
,
2779 musb
->context
.index_regs
[i
].rxfifoadd
);
2782 musb_writeb(epio
, MUSB_TXTYPE
,
2783 musb
->context
.index_regs
[i
].txtype
);
2784 musb_writeb(epio
, MUSB_TXINTERVAL
,
2785 musb
->context
.index_regs
[i
].txinterval
);
2786 musb_writeb(epio
, MUSB_RXTYPE
,
2787 musb
->context
.index_regs
[i
].rxtype
);
2788 musb_writeb(epio
, MUSB_RXINTERVAL
,
2790 musb
->context
.index_regs
[i
].rxinterval
);
2791 musb_write_txfunaddr(musb
, i
,
2792 musb
->context
.index_regs
[i
].txfunaddr
);
2793 musb_write_txhubaddr(musb
, i
,
2794 musb
->context
.index_regs
[i
].txhubaddr
);
2795 musb_write_txhubport(musb
, i
,
2796 musb
->context
.index_regs
[i
].txhubport
);
2798 musb_write_rxfunaddr(musb
, i
,
2799 musb
->context
.index_regs
[i
].rxfunaddr
);
2800 musb_write_rxhubaddr(musb
, i
,
2801 musb
->context
.index_regs
[i
].rxhubaddr
);
2802 musb_write_rxhubport(musb
, i
,
2803 musb
->context
.index_regs
[i
].rxhubport
);
2805 musb_writeb(musb_base
, MUSB_INDEX
, musb
->context
.index
);
2808 static int musb_suspend(struct device
*dev
)
2810 struct musb
*musb
= dev_to_musb(dev
);
2811 unsigned long flags
;
2814 ret
= pm_runtime_get_sync(dev
);
2816 pm_runtime_put_noidle(dev
);
2820 musb_platform_disable(musb
);
2821 musb_disable_interrupts(musb
);
2823 musb
->flush_irq_work
= true;
2824 while (flush_delayed_work(&musb
->irq_work
))
2826 musb
->flush_irq_work
= false;
2828 if (!(musb
->ops
->quirks
& MUSB_PRESERVE_SESSION
))
2829 musb_writeb(musb
->mregs
, MUSB_DEVCTL
, 0);
2831 WARN_ON(!list_empty(&musb
->pending_list
));
2833 spin_lock_irqsave(&musb
->lock
, flags
);
2835 if (is_peripheral_active(musb
)) {
2836 /* FIXME force disconnect unless we know USB will wake
2837 * the system up quickly enough to respond ...
2839 } else if (is_host_active(musb
)) {
2840 /* we know all the children are suspended; sometimes
2841 * they will even be wakeup-enabled.
2845 musb_save_context(musb
);
2847 spin_unlock_irqrestore(&musb
->lock
, flags
);
2851 static int musb_resume(struct device
*dev
)
2853 struct musb
*musb
= dev_to_musb(dev
);
2854 unsigned long flags
;
2860 * For static cmos like DaVinci, register values were preserved
2861 * unless for some reason the whole soc powered down or the USB
2862 * module got reset through the PSC (vs just being disabled).
2864 * For the DSPS glue layer though, a full register restore has to
2865 * be done. As it shouldn't harm other platforms, we do it
2869 musb_restore_context(musb
);
2871 devctl
= musb_readb(musb
->mregs
, MUSB_DEVCTL
);
2872 mask
= MUSB_DEVCTL_BDEVICE
| MUSB_DEVCTL_FSDEV
| MUSB_DEVCTL_LSDEV
;
2873 if ((devctl
& mask
) != (musb
->context
.devctl
& mask
))
2874 musb
->port1_status
= 0;
2876 musb_enable_interrupts(musb
);
2877 musb_platform_enable(musb
);
2879 /* session might be disabled in suspend */
2880 if (musb
->port_mode
== MUSB_HOST
&&
2881 !(musb
->ops
->quirks
& MUSB_PRESERVE_SESSION
)) {
2882 devctl
|= MUSB_DEVCTL_SESSION
;
2883 musb_writeb(musb
->mregs
, MUSB_DEVCTL
, devctl
);
2886 spin_lock_irqsave(&musb
->lock
, flags
);
2887 error
= musb_run_resume_work(musb
);
2889 dev_err(musb
->controller
, "resume work failed with %i\n",
2891 spin_unlock_irqrestore(&musb
->lock
, flags
);
2893 pm_runtime_mark_last_busy(dev
);
2894 pm_runtime_put_autosuspend(dev
);
2899 static int musb_runtime_suspend(struct device
*dev
)
2901 struct musb
*musb
= dev_to_musb(dev
);
2903 musb_save_context(musb
);
2904 musb
->is_runtime_suspended
= 1;
2909 static int musb_runtime_resume(struct device
*dev
)
2911 struct musb
*musb
= dev_to_musb(dev
);
2912 unsigned long flags
;
2916 * When pm_runtime_get_sync called for the first time in driver
2917 * init, some of the structure is still not initialized which is
2918 * used in restore function. But clock needs to be
2919 * enabled before any register access, so
2920 * pm_runtime_get_sync has to be called.
2921 * Also context restore without save does not make
2924 if (!musb
->is_initialized
)
2927 musb_restore_context(musb
);
2929 spin_lock_irqsave(&musb
->lock
, flags
);
2930 error
= musb_run_resume_work(musb
);
2932 dev_err(musb
->controller
, "resume work failed with %i\n",
2934 musb
->is_runtime_suspended
= 0;
2935 spin_unlock_irqrestore(&musb
->lock
, flags
);
2940 static const struct dev_pm_ops musb_dev_pm_ops
= {
2941 .suspend
= musb_suspend
,
2942 .resume
= musb_resume
,
2943 .runtime_suspend
= musb_runtime_suspend
,
2944 .runtime_resume
= musb_runtime_resume
,
2947 #define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
2949 #define MUSB_DEV_PM_OPS NULL
2952 static struct platform_driver musb_driver
= {
2954 .name
= musb_driver_name
,
2955 .bus
= &platform_bus_type
,
2956 .pm
= MUSB_DEV_PM_OPS
,
2957 .dev_groups
= musb_groups
,
2959 .probe
= musb_probe
,
2960 .remove
= musb_remove
,
2963 module_platform_driver(musb_driver
);