bpf: Prevent memory disambiguation attack
[linux/fpc-iii.git] / drivers / usb / musb / musb_core.c
blob4d723077be2b990e109e6896d3e8e68cdcd08639
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * MUSB OTG driver core code
5 * Copyright 2005 Mentor Graphics Corporation
6 * Copyright (C) 2005-2006 by Texas Instruments
7 * Copyright (C) 2006-2007 Nokia Corporation
8 */
11 * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
13 * This consists of a Host Controller Driver (HCD) and a peripheral
14 * controller driver implementing the "Gadget" API; OTG support is
15 * in the works. These are normal Linux-USB controller drivers which
16 * use IRQs and have no dedicated thread.
18 * This version of the driver has only been used with products from
19 * Texas Instruments. Those products integrate the Inventra logic
20 * with other DMA, IRQ, and bus modules, as well as other logic that
21 * needs to be reflected in this driver.
24 * NOTE: the original Mentor code here was pretty much a collection
25 * of mechanisms that don't seem to have been fully integrated/working
26 * for *any* Linux kernel version. This version aims at Linux 2.6.now,
27 * Key open issues include:
29 * - Lack of host-side transaction scheduling, for all transfer types.
30 * The hardware doesn't do it; instead, software must.
32 * This is not an issue for OTG devices that don't support external
33 * hubs, but for more "normal" USB hosts it's a user issue that the
34 * "multipoint" support doesn't scale in the expected ways. That
35 * includes DaVinci EVM in a common non-OTG mode.
37 * * Control and bulk use dedicated endpoints, and there's as
38 * yet no mechanism to either (a) reclaim the hardware when
39 * peripherals are NAKing, which gets complicated with bulk
40 * endpoints, or (b) use more than a single bulk endpoint in
41 * each direction.
43 * RESULT: one device may be perceived as blocking another one.
45 * * Interrupt and isochronous will dynamically allocate endpoint
46 * hardware, but (a) there's no record keeping for bandwidth;
47 * (b) in the common case that few endpoints are available, there
48 * is no mechanism to reuse endpoints to talk to multiple devices.
50 * RESULT: At one extreme, bandwidth can be overcommitted in
51 * some hardware configurations, no faults will be reported.
52 * At the other extreme, the bandwidth capabilities which do
53 * exist tend to be severely undercommitted. You can't yet hook
54 * up both a keyboard and a mouse to an external USB hub.
58 * This gets many kinds of configuration information:
59 * - Kconfig for everything user-configurable
60 * - platform_device for addressing, irq, and platform_data
61 * - platform_data is mostly for board-specific information
62 * (plus recentrly, SOC or family details)
64 * Most of the conditional compilation will (someday) vanish.
67 #include <linux/module.h>
68 #include <linux/kernel.h>
69 #include <linux/sched.h>
70 #include <linux/slab.h>
71 #include <linux/list.h>
72 #include <linux/kobject.h>
73 #include <linux/prefetch.h>
74 #include <linux/platform_device.h>
75 #include <linux/io.h>
76 #include <linux/dma-mapping.h>
77 #include <linux/usb.h>
78 #include <linux/usb/of.h>
80 #include "musb_core.h"
81 #include "musb_trace.h"
83 #define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
86 #define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
87 #define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
89 #define MUSB_VERSION "6.0"
91 #define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
93 #define MUSB_DRIVER_NAME "musb-hdrc"
94 const char musb_driver_name[] = MUSB_DRIVER_NAME;
96 MODULE_DESCRIPTION(DRIVER_INFO);
97 MODULE_AUTHOR(DRIVER_AUTHOR);
98 MODULE_LICENSE("GPL");
99 MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
102 /*-------------------------------------------------------------------------*/
104 static inline struct musb *dev_to_musb(struct device *dev)
106 return dev_get_drvdata(dev);
109 enum musb_mode musb_get_mode(struct device *dev)
111 enum usb_dr_mode mode;
113 mode = usb_get_dr_mode(dev);
114 switch (mode) {
115 case USB_DR_MODE_HOST:
116 return MUSB_HOST;
117 case USB_DR_MODE_PERIPHERAL:
118 return MUSB_PERIPHERAL;
119 case USB_DR_MODE_OTG:
120 case USB_DR_MODE_UNKNOWN:
121 default:
122 return MUSB_OTG;
125 EXPORT_SYMBOL_GPL(musb_get_mode);
127 /*-------------------------------------------------------------------------*/
129 #ifndef CONFIG_BLACKFIN
130 static int musb_ulpi_read(struct usb_phy *phy, u32 reg)
132 void __iomem *addr = phy->io_priv;
133 int i = 0;
134 u8 r;
135 u8 power;
136 int ret;
138 pm_runtime_get_sync(phy->io_dev);
140 /* Make sure the transceiver is not in low power mode */
141 power = musb_readb(addr, MUSB_POWER);
142 power &= ~MUSB_POWER_SUSPENDM;
143 musb_writeb(addr, MUSB_POWER, power);
145 /* REVISIT: musbhdrc_ulpi_an.pdf recommends setting the
146 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
149 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
150 musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
151 MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
153 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
154 & MUSB_ULPI_REG_CMPLT)) {
155 i++;
156 if (i == 10000) {
157 ret = -ETIMEDOUT;
158 goto out;
162 r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
163 r &= ~MUSB_ULPI_REG_CMPLT;
164 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
166 ret = musb_readb(addr, MUSB_ULPI_REG_DATA);
168 out:
169 pm_runtime_put(phy->io_dev);
171 return ret;
174 static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
176 void __iomem *addr = phy->io_priv;
177 int i = 0;
178 u8 r = 0;
179 u8 power;
180 int ret = 0;
182 pm_runtime_get_sync(phy->io_dev);
184 /* Make sure the transceiver is not in low power mode */
185 power = musb_readb(addr, MUSB_POWER);
186 power &= ~MUSB_POWER_SUSPENDM;
187 musb_writeb(addr, MUSB_POWER, power);
189 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
190 musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val);
191 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
193 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
194 & MUSB_ULPI_REG_CMPLT)) {
195 i++;
196 if (i == 10000) {
197 ret = -ETIMEDOUT;
198 goto out;
202 r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
203 r &= ~MUSB_ULPI_REG_CMPLT;
204 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
206 out:
207 pm_runtime_put(phy->io_dev);
209 return ret;
211 #else
212 #define musb_ulpi_read NULL
213 #define musb_ulpi_write NULL
214 #endif
216 static struct usb_phy_io_ops musb_ulpi_access = {
217 .read = musb_ulpi_read,
218 .write = musb_ulpi_write,
221 /*-------------------------------------------------------------------------*/
223 static u32 musb_default_fifo_offset(u8 epnum)
225 return 0x20 + (epnum * 4);
228 /* "flat" mapping: each endpoint has its own i/o address */
229 static void musb_flat_ep_select(void __iomem *mbase, u8 epnum)
233 static u32 musb_flat_ep_offset(u8 epnum, u16 offset)
235 return 0x100 + (0x10 * epnum) + offset;
238 /* "indexed" mapping: INDEX register controls register bank select */
239 static void musb_indexed_ep_select(void __iomem *mbase, u8 epnum)
241 musb_writeb(mbase, MUSB_INDEX, epnum);
244 static u32 musb_indexed_ep_offset(u8 epnum, u16 offset)
246 return 0x10 + offset;
249 static u32 musb_default_busctl_offset(u8 epnum, u16 offset)
251 return 0x80 + (0x08 * epnum) + offset;
254 static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
256 u8 data = __raw_readb(addr + offset);
258 trace_musb_readb(__builtin_return_address(0), addr, offset, data);
259 return data;
262 static void musb_default_writeb(void __iomem *addr, unsigned offset, u8 data)
264 trace_musb_writeb(__builtin_return_address(0), addr, offset, data);
265 __raw_writeb(data, addr + offset);
268 static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
270 u16 data = __raw_readw(addr + offset);
272 trace_musb_readw(__builtin_return_address(0), addr, offset, data);
273 return data;
276 static void musb_default_writew(void __iomem *addr, unsigned offset, u16 data)
278 trace_musb_writew(__builtin_return_address(0), addr, offset, data);
279 __raw_writew(data, addr + offset);
282 static u32 musb_default_readl(const void __iomem *addr, unsigned offset)
284 u32 data = __raw_readl(addr + offset);
286 trace_musb_readl(__builtin_return_address(0), addr, offset, data);
287 return data;
290 static void musb_default_writel(void __iomem *addr, unsigned offset, u32 data)
292 trace_musb_writel(__builtin_return_address(0), addr, offset, data);
293 __raw_writel(data, addr + offset);
297 * Load an endpoint's FIFO
299 static void musb_default_write_fifo(struct musb_hw_ep *hw_ep, u16 len,
300 const u8 *src)
302 struct musb *musb = hw_ep->musb;
303 void __iomem *fifo = hw_ep->fifo;
305 if (unlikely(len == 0))
306 return;
308 prefetch((u8 *)src);
310 dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
311 'T', hw_ep->epnum, fifo, len, src);
313 /* we can't assume unaligned reads work */
314 if (likely((0x01 & (unsigned long) src) == 0)) {
315 u16 index = 0;
317 /* best case is 32bit-aligned source address */
318 if ((0x02 & (unsigned long) src) == 0) {
319 if (len >= 4) {
320 iowrite32_rep(fifo, src + index, len >> 2);
321 index += len & ~0x03;
323 if (len & 0x02) {
324 __raw_writew(*(u16 *)&src[index], fifo);
325 index += 2;
327 } else {
328 if (len >= 2) {
329 iowrite16_rep(fifo, src + index, len >> 1);
330 index += len & ~0x01;
333 if (len & 0x01)
334 __raw_writeb(src[index], fifo);
335 } else {
336 /* byte aligned */
337 iowrite8_rep(fifo, src, len);
342 * Unload an endpoint's FIFO
344 static void musb_default_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
346 struct musb *musb = hw_ep->musb;
347 void __iomem *fifo = hw_ep->fifo;
349 if (unlikely(len == 0))
350 return;
352 dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
353 'R', hw_ep->epnum, fifo, len, dst);
355 /* we can't assume unaligned writes work */
356 if (likely((0x01 & (unsigned long) dst) == 0)) {
357 u16 index = 0;
359 /* best case is 32bit-aligned destination address */
360 if ((0x02 & (unsigned long) dst) == 0) {
361 if (len >= 4) {
362 ioread32_rep(fifo, dst, len >> 2);
363 index = len & ~0x03;
365 if (len & 0x02) {
366 *(u16 *)&dst[index] = __raw_readw(fifo);
367 index += 2;
369 } else {
370 if (len >= 2) {
371 ioread16_rep(fifo, dst, len >> 1);
372 index = len & ~0x01;
375 if (len & 0x01)
376 dst[index] = __raw_readb(fifo);
377 } else {
378 /* byte aligned */
379 ioread8_rep(fifo, dst, len);
384 * Old style IO functions
386 u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
387 EXPORT_SYMBOL_GPL(musb_readb);
389 void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
390 EXPORT_SYMBOL_GPL(musb_writeb);
392 u16 (*musb_readw)(const void __iomem *addr, unsigned offset);
393 EXPORT_SYMBOL_GPL(musb_readw);
395 void (*musb_writew)(void __iomem *addr, unsigned offset, u16 data);
396 EXPORT_SYMBOL_GPL(musb_writew);
398 u32 (*musb_readl)(const void __iomem *addr, unsigned offset);
399 EXPORT_SYMBOL_GPL(musb_readl);
401 void (*musb_writel)(void __iomem *addr, unsigned offset, u32 data);
402 EXPORT_SYMBOL_GPL(musb_writel);
404 #ifndef CONFIG_MUSB_PIO_ONLY
405 struct dma_controller *
406 (*musb_dma_controller_create)(struct musb *musb, void __iomem *base);
407 EXPORT_SYMBOL(musb_dma_controller_create);
409 void (*musb_dma_controller_destroy)(struct dma_controller *c);
410 EXPORT_SYMBOL(musb_dma_controller_destroy);
411 #endif
414 * New style IO functions
416 void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
418 return hw_ep->musb->io.read_fifo(hw_ep, len, dst);
421 void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
423 return hw_ep->musb->io.write_fifo(hw_ep, len, src);
426 /*-------------------------------------------------------------------------*/
428 /* for high speed test mode; see USB 2.0 spec 7.1.20 */
429 static const u8 musb_test_packet[53] = {
430 /* implicit SYNC then DATA0 to start */
432 /* JKJKJKJK x9 */
433 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
434 /* JJKKJJKK x8 */
435 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
436 /* JJJJKKKK x8 */
437 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
438 /* JJJJJJJKKKKKKK x8 */
439 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
440 /* JJJJJJJK x8 */
441 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
442 /* JKKKKKKK x10, JK */
443 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
445 /* implicit CRC16 then EOP to end */
448 void musb_load_testpacket(struct musb *musb)
450 void __iomem *regs = musb->endpoints[0].regs;
452 musb_ep_select(musb->mregs, 0);
453 musb_write_fifo(musb->control_ep,
454 sizeof(musb_test_packet), musb_test_packet);
455 musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
458 /*-------------------------------------------------------------------------*/
461 * Handles OTG hnp timeouts, such as b_ase0_brst
463 static void musb_otg_timer_func(struct timer_list *t)
465 struct musb *musb = from_timer(musb, t, otg_timer);
466 unsigned long flags;
468 spin_lock_irqsave(&musb->lock, flags);
469 switch (musb->xceiv->otg->state) {
470 case OTG_STATE_B_WAIT_ACON:
471 musb_dbg(musb,
472 "HNP: b_wait_acon timeout; back to b_peripheral");
473 musb_g_disconnect(musb);
474 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
475 musb->is_active = 0;
476 break;
477 case OTG_STATE_A_SUSPEND:
478 case OTG_STATE_A_WAIT_BCON:
479 musb_dbg(musb, "HNP: %s timeout",
480 usb_otg_state_string(musb->xceiv->otg->state));
481 musb_platform_set_vbus(musb, 0);
482 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
483 break;
484 default:
485 musb_dbg(musb, "HNP: Unhandled mode %s",
486 usb_otg_state_string(musb->xceiv->otg->state));
488 spin_unlock_irqrestore(&musb->lock, flags);
492 * Stops the HNP transition. Caller must take care of locking.
494 void musb_hnp_stop(struct musb *musb)
496 struct usb_hcd *hcd = musb->hcd;
497 void __iomem *mbase = musb->mregs;
498 u8 reg;
500 musb_dbg(musb, "HNP: stop from %s",
501 usb_otg_state_string(musb->xceiv->otg->state));
503 switch (musb->xceiv->otg->state) {
504 case OTG_STATE_A_PERIPHERAL:
505 musb_g_disconnect(musb);
506 musb_dbg(musb, "HNP: back to %s",
507 usb_otg_state_string(musb->xceiv->otg->state));
508 break;
509 case OTG_STATE_B_HOST:
510 musb_dbg(musb, "HNP: Disabling HR");
511 if (hcd)
512 hcd->self.is_b_host = 0;
513 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
514 MUSB_DEV_MODE(musb);
515 reg = musb_readb(mbase, MUSB_POWER);
516 reg |= MUSB_POWER_SUSPENDM;
517 musb_writeb(mbase, MUSB_POWER, reg);
518 /* REVISIT: Start SESSION_REQUEST here? */
519 break;
520 default:
521 musb_dbg(musb, "HNP: Stopping in unknown state %s",
522 usb_otg_state_string(musb->xceiv->otg->state));
526 * When returning to A state after HNP, avoid hub_port_rebounce(),
527 * which cause occasional OPT A "Did not receive reset after connect"
528 * errors.
530 musb->port1_status &= ~(USB_PORT_STAT_C_CONNECTION << 16);
533 static void musb_recover_from_babble(struct musb *musb);
536 * Interrupt Service Routine to record USB "global" interrupts.
537 * Since these do not happen often and signify things of
538 * paramount importance, it seems OK to check them individually;
539 * the order of the tests is specified in the manual
541 * @param musb instance pointer
542 * @param int_usb register contents
543 * @param devctl
544 * @param power
547 static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
548 u8 devctl)
550 irqreturn_t handled = IRQ_NONE;
552 musb_dbg(musb, "<== DevCtl=%02x, int_usb=0x%x", devctl, int_usb);
554 /* in host mode, the peripheral may issue remote wakeup.
555 * in peripheral mode, the host may resume the link.
556 * spurious RESUME irqs happen too, paired with SUSPEND.
558 if (int_usb & MUSB_INTR_RESUME) {
559 handled = IRQ_HANDLED;
560 musb_dbg(musb, "RESUME (%s)",
561 usb_otg_state_string(musb->xceiv->otg->state));
563 if (devctl & MUSB_DEVCTL_HM) {
564 switch (musb->xceiv->otg->state) {
565 case OTG_STATE_A_SUSPEND:
566 /* remote wakeup? */
567 musb->port1_status |=
568 (USB_PORT_STAT_C_SUSPEND << 16)
569 | MUSB_PORT_STAT_RESUME;
570 musb->rh_timer = jiffies
571 + msecs_to_jiffies(USB_RESUME_TIMEOUT);
572 musb->xceiv->otg->state = OTG_STATE_A_HOST;
573 musb->is_active = 1;
574 musb_host_resume_root_hub(musb);
575 schedule_delayed_work(&musb->finish_resume_work,
576 msecs_to_jiffies(USB_RESUME_TIMEOUT));
577 break;
578 case OTG_STATE_B_WAIT_ACON:
579 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
580 musb->is_active = 1;
581 MUSB_DEV_MODE(musb);
582 break;
583 default:
584 WARNING("bogus %s RESUME (%s)\n",
585 "host",
586 usb_otg_state_string(musb->xceiv->otg->state));
588 } else {
589 switch (musb->xceiv->otg->state) {
590 case OTG_STATE_A_SUSPEND:
591 /* possibly DISCONNECT is upcoming */
592 musb->xceiv->otg->state = OTG_STATE_A_HOST;
593 musb_host_resume_root_hub(musb);
594 break;
595 case OTG_STATE_B_WAIT_ACON:
596 case OTG_STATE_B_PERIPHERAL:
597 /* disconnect while suspended? we may
598 * not get a disconnect irq...
600 if ((devctl & MUSB_DEVCTL_VBUS)
601 != (3 << MUSB_DEVCTL_VBUS_SHIFT)
603 musb->int_usb |= MUSB_INTR_DISCONNECT;
604 musb->int_usb &= ~MUSB_INTR_SUSPEND;
605 break;
607 musb_g_resume(musb);
608 break;
609 case OTG_STATE_B_IDLE:
610 musb->int_usb &= ~MUSB_INTR_SUSPEND;
611 break;
612 default:
613 WARNING("bogus %s RESUME (%s)\n",
614 "peripheral",
615 usb_otg_state_string(musb->xceiv->otg->state));
620 /* see manual for the order of the tests */
621 if (int_usb & MUSB_INTR_SESSREQ) {
622 void __iomem *mbase = musb->mregs;
624 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
625 && (devctl & MUSB_DEVCTL_BDEVICE)) {
626 musb_dbg(musb, "SessReq while on B state");
627 return IRQ_HANDLED;
630 musb_dbg(musb, "SESSION_REQUEST (%s)",
631 usb_otg_state_string(musb->xceiv->otg->state));
633 /* IRQ arrives from ID pin sense or (later, if VBUS power
634 * is removed) SRP. responses are time critical:
635 * - turn on VBUS (with silicon-specific mechanism)
636 * - go through A_WAIT_VRISE
637 * - ... to A_WAIT_BCON.
638 * a_wait_vrise_tmout triggers VBUS_ERROR transitions
640 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
641 musb->ep0_stage = MUSB_EP0_START;
642 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
643 MUSB_HST_MODE(musb);
644 musb_platform_set_vbus(musb, 1);
646 handled = IRQ_HANDLED;
649 if (int_usb & MUSB_INTR_VBUSERROR) {
650 int ignore = 0;
652 /* During connection as an A-Device, we may see a short
653 * current spikes causing voltage drop, because of cable
654 * and peripheral capacitance combined with vbus draw.
655 * (So: less common with truly self-powered devices, where
656 * vbus doesn't act like a power supply.)
658 * Such spikes are short; usually less than ~500 usec, max
659 * of ~2 msec. That is, they're not sustained overcurrent
660 * errors, though they're reported using VBUSERROR irqs.
662 * Workarounds: (a) hardware: use self powered devices.
663 * (b) software: ignore non-repeated VBUS errors.
665 * REVISIT: do delays from lots of DEBUG_KERNEL checks
666 * make trouble here, keeping VBUS < 4.4V ?
668 switch (musb->xceiv->otg->state) {
669 case OTG_STATE_A_HOST:
670 /* recovery is dicey once we've gotten past the
671 * initial stages of enumeration, but if VBUS
672 * stayed ok at the other end of the link, and
673 * another reset is due (at least for high speed,
674 * to redo the chirp etc), it might work OK...
676 case OTG_STATE_A_WAIT_BCON:
677 case OTG_STATE_A_WAIT_VRISE:
678 if (musb->vbuserr_retry) {
679 void __iomem *mbase = musb->mregs;
681 musb->vbuserr_retry--;
682 ignore = 1;
683 devctl |= MUSB_DEVCTL_SESSION;
684 musb_writeb(mbase, MUSB_DEVCTL, devctl);
685 } else {
686 musb->port1_status |=
687 USB_PORT_STAT_OVERCURRENT
688 | (USB_PORT_STAT_C_OVERCURRENT << 16);
690 break;
691 default:
692 break;
695 dev_printk(ignore ? KERN_DEBUG : KERN_ERR, musb->controller,
696 "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
697 usb_otg_state_string(musb->xceiv->otg->state),
698 devctl,
699 ({ char *s;
700 switch (devctl & MUSB_DEVCTL_VBUS) {
701 case 0 << MUSB_DEVCTL_VBUS_SHIFT:
702 s = "<SessEnd"; break;
703 case 1 << MUSB_DEVCTL_VBUS_SHIFT:
704 s = "<AValid"; break;
705 case 2 << MUSB_DEVCTL_VBUS_SHIFT:
706 s = "<VBusValid"; break;
707 /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
708 default:
709 s = "VALID"; break;
710 } s; }),
711 VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
712 musb->port1_status);
714 /* go through A_WAIT_VFALL then start a new session */
715 if (!ignore)
716 musb_platform_set_vbus(musb, 0);
717 handled = IRQ_HANDLED;
720 if (int_usb & MUSB_INTR_SUSPEND) {
721 musb_dbg(musb, "SUSPEND (%s) devctl %02x",
722 usb_otg_state_string(musb->xceiv->otg->state), devctl);
723 handled = IRQ_HANDLED;
725 switch (musb->xceiv->otg->state) {
726 case OTG_STATE_A_PERIPHERAL:
727 /* We also come here if the cable is removed, since
728 * this silicon doesn't report ID-no-longer-grounded.
730 * We depend on T(a_wait_bcon) to shut us down, and
731 * hope users don't do anything dicey during this
732 * undesired detour through A_WAIT_BCON.
734 musb_hnp_stop(musb);
735 musb_host_resume_root_hub(musb);
736 musb_root_disconnect(musb);
737 musb_platform_try_idle(musb, jiffies
738 + msecs_to_jiffies(musb->a_wait_bcon
739 ? : OTG_TIME_A_WAIT_BCON));
741 break;
742 case OTG_STATE_B_IDLE:
743 if (!musb->is_active)
744 break;
745 /* fall through */
746 case OTG_STATE_B_PERIPHERAL:
747 musb_g_suspend(musb);
748 musb->is_active = musb->g.b_hnp_enable;
749 if (musb->is_active) {
750 musb->xceiv->otg->state = OTG_STATE_B_WAIT_ACON;
751 musb_dbg(musb, "HNP: Setting timer for b_ase0_brst");
752 mod_timer(&musb->otg_timer, jiffies
753 + msecs_to_jiffies(
754 OTG_TIME_B_ASE0_BRST));
756 break;
757 case OTG_STATE_A_WAIT_BCON:
758 if (musb->a_wait_bcon != 0)
759 musb_platform_try_idle(musb, jiffies
760 + msecs_to_jiffies(musb->a_wait_bcon));
761 break;
762 case OTG_STATE_A_HOST:
763 musb->xceiv->otg->state = OTG_STATE_A_SUSPEND;
764 musb->is_active = musb->hcd->self.b_hnp_enable;
765 break;
766 case OTG_STATE_B_HOST:
767 /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
768 musb_dbg(musb, "REVISIT: SUSPEND as B_HOST");
769 break;
770 default:
771 /* "should not happen" */
772 musb->is_active = 0;
773 break;
777 if (int_usb & MUSB_INTR_CONNECT) {
778 struct usb_hcd *hcd = musb->hcd;
780 handled = IRQ_HANDLED;
781 musb->is_active = 1;
783 musb->ep0_stage = MUSB_EP0_START;
785 musb->intrtxe = musb->epmask;
786 musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
787 musb->intrrxe = musb->epmask & 0xfffe;
788 musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
789 musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7);
790 musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
791 |USB_PORT_STAT_HIGH_SPEED
792 |USB_PORT_STAT_ENABLE
794 musb->port1_status |= USB_PORT_STAT_CONNECTION
795 |(USB_PORT_STAT_C_CONNECTION << 16);
797 /* high vs full speed is just a guess until after reset */
798 if (devctl & MUSB_DEVCTL_LSDEV)
799 musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
801 /* indicate new connection to OTG machine */
802 switch (musb->xceiv->otg->state) {
803 case OTG_STATE_B_PERIPHERAL:
804 if (int_usb & MUSB_INTR_SUSPEND) {
805 musb_dbg(musb, "HNP: SUSPEND+CONNECT, now b_host");
806 int_usb &= ~MUSB_INTR_SUSPEND;
807 goto b_host;
808 } else
809 musb_dbg(musb, "CONNECT as b_peripheral???");
810 break;
811 case OTG_STATE_B_WAIT_ACON:
812 musb_dbg(musb, "HNP: CONNECT, now b_host");
813 b_host:
814 musb->xceiv->otg->state = OTG_STATE_B_HOST;
815 if (musb->hcd)
816 musb->hcd->self.is_b_host = 1;
817 del_timer(&musb->otg_timer);
818 break;
819 default:
820 if ((devctl & MUSB_DEVCTL_VBUS)
821 == (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
822 musb->xceiv->otg->state = OTG_STATE_A_HOST;
823 if (hcd)
824 hcd->self.is_b_host = 0;
826 break;
829 musb_host_poke_root_hub(musb);
831 musb_dbg(musb, "CONNECT (%s) devctl %02x",
832 usb_otg_state_string(musb->xceiv->otg->state), devctl);
835 if (int_usb & MUSB_INTR_DISCONNECT) {
836 musb_dbg(musb, "DISCONNECT (%s) as %s, devctl %02x",
837 usb_otg_state_string(musb->xceiv->otg->state),
838 MUSB_MODE(musb), devctl);
839 handled = IRQ_HANDLED;
841 switch (musb->xceiv->otg->state) {
842 case OTG_STATE_A_HOST:
843 case OTG_STATE_A_SUSPEND:
844 musb_host_resume_root_hub(musb);
845 musb_root_disconnect(musb);
846 if (musb->a_wait_bcon != 0)
847 musb_platform_try_idle(musb, jiffies
848 + msecs_to_jiffies(musb->a_wait_bcon));
849 break;
850 case OTG_STATE_B_HOST:
851 /* REVISIT this behaves for "real disconnect"
852 * cases; make sure the other transitions from
853 * from B_HOST act right too. The B_HOST code
854 * in hnp_stop() is currently not used...
856 musb_root_disconnect(musb);
857 if (musb->hcd)
858 musb->hcd->self.is_b_host = 0;
859 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
860 MUSB_DEV_MODE(musb);
861 musb_g_disconnect(musb);
862 break;
863 case OTG_STATE_A_PERIPHERAL:
864 musb_hnp_stop(musb);
865 musb_root_disconnect(musb);
866 /* FALLTHROUGH */
867 case OTG_STATE_B_WAIT_ACON:
868 /* FALLTHROUGH */
869 case OTG_STATE_B_PERIPHERAL:
870 case OTG_STATE_B_IDLE:
871 musb_g_disconnect(musb);
872 break;
873 default:
874 WARNING("unhandled DISCONNECT transition (%s)\n",
875 usb_otg_state_string(musb->xceiv->otg->state));
876 break;
880 /* mentor saves a bit: bus reset and babble share the same irq.
881 * only host sees babble; only peripheral sees bus reset.
883 if (int_usb & MUSB_INTR_RESET) {
884 handled = IRQ_HANDLED;
885 if (is_host_active(musb)) {
887 * When BABBLE happens what we can depends on which
888 * platform MUSB is running, because some platforms
889 * implemented proprietary means for 'recovering' from
890 * Babble conditions. One such platform is AM335x. In
891 * most cases, however, the only thing we can do is
892 * drop the session.
894 dev_err(musb->controller, "Babble\n");
895 musb_recover_from_babble(musb);
896 } else {
897 musb_dbg(musb, "BUS RESET as %s",
898 usb_otg_state_string(musb->xceiv->otg->state));
899 switch (musb->xceiv->otg->state) {
900 case OTG_STATE_A_SUSPEND:
901 musb_g_reset(musb);
902 /* FALLTHROUGH */
903 case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
904 /* never use invalid T(a_wait_bcon) */
905 musb_dbg(musb, "HNP: in %s, %d msec timeout",
906 usb_otg_state_string(musb->xceiv->otg->state),
907 TA_WAIT_BCON(musb));
908 mod_timer(&musb->otg_timer, jiffies
909 + msecs_to_jiffies(TA_WAIT_BCON(musb)));
910 break;
911 case OTG_STATE_A_PERIPHERAL:
912 del_timer(&musb->otg_timer);
913 musb_g_reset(musb);
914 break;
915 case OTG_STATE_B_WAIT_ACON:
916 musb_dbg(musb, "HNP: RESET (%s), to b_peripheral",
917 usb_otg_state_string(musb->xceiv->otg->state));
918 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
919 musb_g_reset(musb);
920 break;
921 case OTG_STATE_B_IDLE:
922 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
923 /* FALLTHROUGH */
924 case OTG_STATE_B_PERIPHERAL:
925 musb_g_reset(musb);
926 break;
927 default:
928 musb_dbg(musb, "Unhandled BUS RESET as %s",
929 usb_otg_state_string(musb->xceiv->otg->state));
934 #if 0
935 /* REVISIT ... this would be for multiplexing periodic endpoints, or
936 * supporting transfer phasing to prevent exceeding ISO bandwidth
937 * limits of a given frame or microframe.
939 * It's not needed for peripheral side, which dedicates endpoints;
940 * though it _might_ use SOF irqs for other purposes.
942 * And it's not currently needed for host side, which also dedicates
943 * endpoints, relies on TX/RX interval registers, and isn't claimed
944 * to support ISO transfers yet.
946 if (int_usb & MUSB_INTR_SOF) {
947 void __iomem *mbase = musb->mregs;
948 struct musb_hw_ep *ep;
949 u8 epnum;
950 u16 frame;
952 dev_dbg(musb->controller, "START_OF_FRAME\n");
953 handled = IRQ_HANDLED;
955 /* start any periodic Tx transfers waiting for current frame */
956 frame = musb_readw(mbase, MUSB_FRAME);
957 ep = musb->endpoints;
958 for (epnum = 1; (epnum < musb->nr_endpoints)
959 && (musb->epmask >= (1 << epnum));
960 epnum++, ep++) {
962 * FIXME handle framecounter wraps (12 bits)
963 * eliminate duplicated StartUrb logic
965 if (ep->dwWaitFrame >= frame) {
966 ep->dwWaitFrame = 0;
967 pr_debug("SOF --> periodic TX%s on %d\n",
968 ep->tx_channel ? " DMA" : "",
969 epnum);
970 if (!ep->tx_channel)
971 musb_h_tx_start(musb, epnum);
972 else
973 cppi_hostdma_start(musb, epnum);
975 } /* end of for loop */
977 #endif
979 schedule_delayed_work(&musb->irq_work, 0);
981 return handled;
984 /*-------------------------------------------------------------------------*/
986 static void musb_disable_interrupts(struct musb *musb)
988 void __iomem *mbase = musb->mregs;
989 u16 temp;
991 /* disable interrupts */
992 musb_writeb(mbase, MUSB_INTRUSBE, 0);
993 musb->intrtxe = 0;
994 musb_writew(mbase, MUSB_INTRTXE, 0);
995 musb->intrrxe = 0;
996 musb_writew(mbase, MUSB_INTRRXE, 0);
998 /* flush pending interrupts */
999 temp = musb_readb(mbase, MUSB_INTRUSB);
1000 temp = musb_readw(mbase, MUSB_INTRTX);
1001 temp = musb_readw(mbase, MUSB_INTRRX);
1004 static void musb_enable_interrupts(struct musb *musb)
1006 void __iomem *regs = musb->mregs;
1008 /* Set INT enable registers, enable interrupts */
1009 musb->intrtxe = musb->epmask;
1010 musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
1011 musb->intrrxe = musb->epmask & 0xfffe;
1012 musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
1013 musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
1018 * Program the HDRC to start (enable interrupts, dma, etc.).
1020 void musb_start(struct musb *musb)
1022 void __iomem *regs = musb->mregs;
1023 u8 devctl = musb_readb(regs, MUSB_DEVCTL);
1024 u8 power;
1026 musb_dbg(musb, "<== devctl %02x", devctl);
1028 musb_enable_interrupts(musb);
1029 musb_writeb(regs, MUSB_TESTMODE, 0);
1031 power = MUSB_POWER_ISOUPDATE;
1033 * treating UNKNOWN as unspecified maximum speed, in which case
1034 * we will default to high-speed.
1036 if (musb->config->maximum_speed == USB_SPEED_HIGH ||
1037 musb->config->maximum_speed == USB_SPEED_UNKNOWN)
1038 power |= MUSB_POWER_HSENAB;
1039 musb_writeb(regs, MUSB_POWER, power);
1041 musb->is_active = 0;
1042 devctl = musb_readb(regs, MUSB_DEVCTL);
1043 devctl &= ~MUSB_DEVCTL_SESSION;
1045 /* session started after:
1046 * (a) ID-grounded irq, host mode;
1047 * (b) vbus present/connect IRQ, peripheral mode;
1048 * (c) peripheral initiates, using SRP
1050 if (musb->port_mode != MUSB_PORT_MODE_HOST &&
1051 musb->xceiv->otg->state != OTG_STATE_A_WAIT_BCON &&
1052 (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
1053 musb->is_active = 1;
1054 } else {
1055 devctl |= MUSB_DEVCTL_SESSION;
1058 musb_platform_enable(musb);
1059 musb_writeb(regs, MUSB_DEVCTL, devctl);
1063 * Make the HDRC stop (disable interrupts, etc.);
1064 * reversible by musb_start
1065 * called on gadget driver unregister
1066 * with controller locked, irqs blocked
1067 * acts as a NOP unless some role activated the hardware
1069 void musb_stop(struct musb *musb)
1071 /* stop IRQs, timers, ... */
1072 musb_platform_disable(musb);
1073 musb_disable_interrupts(musb);
1074 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
1076 /* FIXME
1077 * - mark host and/or peripheral drivers unusable/inactive
1078 * - disable DMA (and enable it in HdrcStart)
1079 * - make sure we can musb_start() after musb_stop(); with
1080 * OTG mode, gadget driver module rmmod/modprobe cycles that
1081 * - ...
1083 musb_platform_try_idle(musb, 0);
1086 /*-------------------------------------------------------------------------*/
1089 * The silicon either has hard-wired endpoint configurations, or else
1090 * "dynamic fifo" sizing. The driver has support for both, though at this
1091 * writing only the dynamic sizing is very well tested. Since we switched
1092 * away from compile-time hardware parameters, we can no longer rely on
1093 * dead code elimination to leave only the relevant one in the object file.
1095 * We don't currently use dynamic fifo setup capability to do anything
1096 * more than selecting one of a bunch of predefined configurations.
1098 static ushort fifo_mode;
1100 /* "modprobe ... fifo_mode=1" etc */
1101 module_param(fifo_mode, ushort, 0);
1102 MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
1105 * tables defining fifo_mode values. define more if you like.
1106 * for host side, make sure both halves of ep1 are set up.
1109 /* mode 0 - fits in 2KB */
1110 static struct musb_fifo_cfg mode_0_cfg[] = {
1111 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1112 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1113 { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
1114 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1115 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1118 /* mode 1 - fits in 4KB */
1119 static struct musb_fifo_cfg mode_1_cfg[] = {
1120 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1121 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1122 { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1123 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1124 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1127 /* mode 2 - fits in 4KB */
1128 static struct musb_fifo_cfg mode_2_cfg[] = {
1129 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1130 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1131 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1132 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1133 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 960, },
1134 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 1024, },
1137 /* mode 3 - fits in 4KB */
1138 static struct musb_fifo_cfg mode_3_cfg[] = {
1139 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1140 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1141 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1142 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1143 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1144 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1147 /* mode 4 - fits in 16KB */
1148 static struct musb_fifo_cfg mode_4_cfg[] = {
1149 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1150 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1151 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1152 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1153 { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
1154 { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
1155 { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
1156 { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
1157 { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
1158 { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
1159 { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, },
1160 { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, },
1161 { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
1162 { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, },
1163 { .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, },
1164 { .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, },
1165 { .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, },
1166 { .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, },
1167 { .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 256, },
1168 { .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 64, },
1169 { .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 256, },
1170 { .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 64, },
1171 { .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 256, },
1172 { .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 64, },
1173 { .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 4096, },
1174 { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
1175 { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
1178 /* mode 5 - fits in 8KB */
1179 static struct musb_fifo_cfg mode_5_cfg[] = {
1180 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1181 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1182 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1183 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1184 { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
1185 { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
1186 { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
1187 { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
1188 { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
1189 { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
1190 { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 32, },
1191 { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 32, },
1192 { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 32, },
1193 { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 32, },
1194 { .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 32, },
1195 { .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 32, },
1196 { .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 32, },
1197 { .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 32, },
1198 { .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 32, },
1199 { .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 32, },
1200 { .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 32, },
1201 { .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 32, },
1202 { .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 32, },
1203 { .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 32, },
1204 { .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, },
1205 { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
1206 { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
1210 * configure a fifo; for non-shared endpoints, this may be called
1211 * once for a tx fifo and once for an rx fifo.
1213 * returns negative errno or offset for next fifo.
1215 static int
1216 fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
1217 const struct musb_fifo_cfg *cfg, u16 offset)
1219 void __iomem *mbase = musb->mregs;
1220 int size = 0;
1221 u16 maxpacket = cfg->maxpacket;
1222 u16 c_off = offset >> 3;
1223 u8 c_size;
1225 /* expect hw_ep has already been zero-initialized */
1227 size = ffs(max(maxpacket, (u16) 8)) - 1;
1228 maxpacket = 1 << size;
1230 c_size = size - 3;
1231 if (cfg->mode == BUF_DOUBLE) {
1232 if ((offset + (maxpacket << 1)) >
1233 (1 << (musb->config->ram_bits + 2)))
1234 return -EMSGSIZE;
1235 c_size |= MUSB_FIFOSZ_DPB;
1236 } else {
1237 if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2)))
1238 return -EMSGSIZE;
1241 /* configure the FIFO */
1242 musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
1244 /* EP0 reserved endpoint for control, bidirectional;
1245 * EP1 reserved for bulk, two unidirectional halves.
1247 if (hw_ep->epnum == 1)
1248 musb->bulk_ep = hw_ep;
1249 /* REVISIT error check: be sure ep0 can both rx and tx ... */
1250 switch (cfg->style) {
1251 case FIFO_TX:
1252 musb_write_txfifosz(mbase, c_size);
1253 musb_write_txfifoadd(mbase, c_off);
1254 hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1255 hw_ep->max_packet_sz_tx = maxpacket;
1256 break;
1257 case FIFO_RX:
1258 musb_write_rxfifosz(mbase, c_size);
1259 musb_write_rxfifoadd(mbase, c_off);
1260 hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1261 hw_ep->max_packet_sz_rx = maxpacket;
1262 break;
1263 case FIFO_RXTX:
1264 musb_write_txfifosz(mbase, c_size);
1265 musb_write_txfifoadd(mbase, c_off);
1266 hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1267 hw_ep->max_packet_sz_rx = maxpacket;
1269 musb_write_rxfifosz(mbase, c_size);
1270 musb_write_rxfifoadd(mbase, c_off);
1271 hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
1272 hw_ep->max_packet_sz_tx = maxpacket;
1274 hw_ep->is_shared_fifo = true;
1275 break;
1278 /* NOTE rx and tx endpoint irqs aren't managed separately,
1279 * which happens to be ok
1281 musb->epmask |= (1 << hw_ep->epnum);
1283 return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
1286 static struct musb_fifo_cfg ep0_cfg = {
1287 .style = FIFO_RXTX, .maxpacket = 64,
1290 static int ep_config_from_table(struct musb *musb)
1292 const struct musb_fifo_cfg *cfg;
1293 unsigned i, n;
1294 int offset;
1295 struct musb_hw_ep *hw_ep = musb->endpoints;
1297 if (musb->config->fifo_cfg) {
1298 cfg = musb->config->fifo_cfg;
1299 n = musb->config->fifo_cfg_size;
1300 goto done;
1303 switch (fifo_mode) {
1304 default:
1305 fifo_mode = 0;
1306 /* FALLTHROUGH */
1307 case 0:
1308 cfg = mode_0_cfg;
1309 n = ARRAY_SIZE(mode_0_cfg);
1310 break;
1311 case 1:
1312 cfg = mode_1_cfg;
1313 n = ARRAY_SIZE(mode_1_cfg);
1314 break;
1315 case 2:
1316 cfg = mode_2_cfg;
1317 n = ARRAY_SIZE(mode_2_cfg);
1318 break;
1319 case 3:
1320 cfg = mode_3_cfg;
1321 n = ARRAY_SIZE(mode_3_cfg);
1322 break;
1323 case 4:
1324 cfg = mode_4_cfg;
1325 n = ARRAY_SIZE(mode_4_cfg);
1326 break;
1327 case 5:
1328 cfg = mode_5_cfg;
1329 n = ARRAY_SIZE(mode_5_cfg);
1330 break;
1333 pr_debug("%s: setup fifo_mode %d\n", musb_driver_name, fifo_mode);
1336 done:
1337 offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
1338 /* assert(offset > 0) */
1340 /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would
1341 * be better than static musb->config->num_eps and DYN_FIFO_SIZE...
1344 for (i = 0; i < n; i++) {
1345 u8 epn = cfg->hw_ep_num;
1347 if (epn >= musb->config->num_eps) {
1348 pr_debug("%s: invalid ep %d\n",
1349 musb_driver_name, epn);
1350 return -EINVAL;
1352 offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
1353 if (offset < 0) {
1354 pr_debug("%s: mem overrun, ep %d\n",
1355 musb_driver_name, epn);
1356 return offset;
1358 epn++;
1359 musb->nr_endpoints = max(epn, musb->nr_endpoints);
1362 pr_debug("%s: %d/%d max ep, %d/%d memory\n",
1363 musb_driver_name,
1364 n + 1, musb->config->num_eps * 2 - 1,
1365 offset, (1 << (musb->config->ram_bits + 2)));
1367 if (!musb->bulk_ep) {
1368 pr_debug("%s: missing bulk\n", musb_driver_name);
1369 return -EINVAL;
1372 return 0;
1377 * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
1378 * @param musb the controller
1380 static int ep_config_from_hw(struct musb *musb)
1382 u8 epnum = 0;
1383 struct musb_hw_ep *hw_ep;
1384 void __iomem *mbase = musb->mregs;
1385 int ret = 0;
1387 musb_dbg(musb, "<== static silicon ep config");
1389 /* FIXME pick up ep0 maxpacket size */
1391 for (epnum = 1; epnum < musb->config->num_eps; epnum++) {
1392 musb_ep_select(mbase, epnum);
1393 hw_ep = musb->endpoints + epnum;
1395 ret = musb_read_fifosize(musb, hw_ep, epnum);
1396 if (ret < 0)
1397 break;
1399 /* FIXME set up hw_ep->{rx,tx}_double_buffered */
1401 /* pick an RX/TX endpoint for bulk */
1402 if (hw_ep->max_packet_sz_tx < 512
1403 || hw_ep->max_packet_sz_rx < 512)
1404 continue;
1406 /* REVISIT: this algorithm is lazy, we should at least
1407 * try to pick a double buffered endpoint.
1409 if (musb->bulk_ep)
1410 continue;
1411 musb->bulk_ep = hw_ep;
1414 if (!musb->bulk_ep) {
1415 pr_debug("%s: missing bulk\n", musb_driver_name);
1416 return -EINVAL;
1419 return 0;
1422 enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
1424 /* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
1425 * configure endpoints, or take their config from silicon
1427 static int musb_core_init(u16 musb_type, struct musb *musb)
1429 u8 reg;
1430 char *type;
1431 char aInfo[90];
1432 void __iomem *mbase = musb->mregs;
1433 int status = 0;
1434 int i;
1436 /* log core options (read using indexed model) */
1437 reg = musb_read_configdata(mbase);
1439 strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
1440 if (reg & MUSB_CONFIGDATA_DYNFIFO) {
1441 strcat(aInfo, ", dyn FIFOs");
1442 musb->dyn_fifo = true;
1444 if (reg & MUSB_CONFIGDATA_MPRXE) {
1445 strcat(aInfo, ", bulk combine");
1446 musb->bulk_combine = true;
1448 if (reg & MUSB_CONFIGDATA_MPTXE) {
1449 strcat(aInfo, ", bulk split");
1450 musb->bulk_split = true;
1452 if (reg & MUSB_CONFIGDATA_HBRXE) {
1453 strcat(aInfo, ", HB-ISO Rx");
1454 musb->hb_iso_rx = true;
1456 if (reg & MUSB_CONFIGDATA_HBTXE) {
1457 strcat(aInfo, ", HB-ISO Tx");
1458 musb->hb_iso_tx = true;
1460 if (reg & MUSB_CONFIGDATA_SOFTCONE)
1461 strcat(aInfo, ", SoftConn");
1463 pr_debug("%s: ConfigData=0x%02x (%s)\n", musb_driver_name, reg, aInfo);
1465 if (MUSB_CONTROLLER_MHDRC == musb_type) {
1466 musb->is_multipoint = 1;
1467 type = "M";
1468 } else {
1469 musb->is_multipoint = 0;
1470 type = "";
1471 #ifndef CONFIG_USB_OTG_BLACKLIST_HUB
1472 pr_err("%s: kernel must blacklist external hubs\n",
1473 musb_driver_name);
1474 #endif
1477 /* log release info */
1478 musb->hwvers = musb_read_hwvers(mbase);
1479 pr_debug("%s: %sHDRC RTL version %d.%d%s\n",
1480 musb_driver_name, type, MUSB_HWVERS_MAJOR(musb->hwvers),
1481 MUSB_HWVERS_MINOR(musb->hwvers),
1482 (musb->hwvers & MUSB_HWVERS_RC) ? "RC" : "");
1484 /* configure ep0 */
1485 musb_configure_ep0(musb);
1487 /* discover endpoint configuration */
1488 musb->nr_endpoints = 1;
1489 musb->epmask = 1;
1491 if (musb->dyn_fifo)
1492 status = ep_config_from_table(musb);
1493 else
1494 status = ep_config_from_hw(musb);
1496 if (status < 0)
1497 return status;
1499 /* finish init, and print endpoint config */
1500 for (i = 0; i < musb->nr_endpoints; i++) {
1501 struct musb_hw_ep *hw_ep = musb->endpoints + i;
1503 hw_ep->fifo = musb->io.fifo_offset(i) + mbase;
1504 #if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
1505 if (musb->io.quirks & MUSB_IN_TUSB) {
1506 hw_ep->fifo_async = musb->async + 0x400 +
1507 musb->io.fifo_offset(i);
1508 hw_ep->fifo_sync = musb->sync + 0x400 +
1509 musb->io.fifo_offset(i);
1510 hw_ep->fifo_sync_va =
1511 musb->sync_va + 0x400 + musb->io.fifo_offset(i);
1513 if (i == 0)
1514 hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
1515 else
1516 hw_ep->conf = mbase + 0x400 +
1517 (((i - 1) & 0xf) << 2);
1519 #endif
1521 hw_ep->regs = musb->io.ep_offset(i, 0) + mbase;
1522 hw_ep->rx_reinit = 1;
1523 hw_ep->tx_reinit = 1;
1525 if (hw_ep->max_packet_sz_tx) {
1526 musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
1527 musb_driver_name, i,
1528 hw_ep->is_shared_fifo ? "shared" : "tx",
1529 hw_ep->tx_double_buffered
1530 ? "doublebuffer, " : "",
1531 hw_ep->max_packet_sz_tx);
1533 if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
1534 musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
1535 musb_driver_name, i,
1536 "rx",
1537 hw_ep->rx_double_buffered
1538 ? "doublebuffer, " : "",
1539 hw_ep->max_packet_sz_rx);
1541 if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
1542 musb_dbg(musb, "hw_ep %d not configured", i);
1545 return 0;
1548 /*-------------------------------------------------------------------------*/
1551 * handle all the irqs defined by the HDRC core. for now we expect: other
1552 * irq sources (phy, dma, etc) will be handled first, musb->int_* values
1553 * will be assigned, and the irq will already have been acked.
1555 * called in irq context with spinlock held, irqs blocked
1557 irqreturn_t musb_interrupt(struct musb *musb)
1559 irqreturn_t retval = IRQ_NONE;
1560 unsigned long status;
1561 unsigned long epnum;
1562 u8 devctl;
1564 if (!musb->int_usb && !musb->int_tx && !musb->int_rx)
1565 return IRQ_NONE;
1567 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1569 trace_musb_isr(musb);
1572 * According to Mentor Graphics' documentation, flowchart on page 98,
1573 * IRQ should be handled as follows:
1575 * . Resume IRQ
1576 * . Session Request IRQ
1577 * . VBUS Error IRQ
1578 * . Suspend IRQ
1579 * . Connect IRQ
1580 * . Disconnect IRQ
1581 * . Reset/Babble IRQ
1582 * . SOF IRQ (we're not using this one)
1583 * . Endpoint 0 IRQ
1584 * . TX Endpoints
1585 * . RX Endpoints
1587 * We will be following that flowchart in order to avoid any problems
1588 * that might arise with internal Finite State Machine.
1591 if (musb->int_usb)
1592 retval |= musb_stage0_irq(musb, musb->int_usb, devctl);
1594 if (musb->int_tx & 1) {
1595 if (is_host_active(musb))
1596 retval |= musb_h_ep0_irq(musb);
1597 else
1598 retval |= musb_g_ep0_irq(musb);
1600 /* we have just handled endpoint 0 IRQ, clear it */
1601 musb->int_tx &= ~BIT(0);
1604 status = musb->int_tx;
1606 for_each_set_bit(epnum, &status, 16) {
1607 retval = IRQ_HANDLED;
1608 if (is_host_active(musb))
1609 musb_host_tx(musb, epnum);
1610 else
1611 musb_g_tx(musb, epnum);
1614 status = musb->int_rx;
1616 for_each_set_bit(epnum, &status, 16) {
1617 retval = IRQ_HANDLED;
1618 if (is_host_active(musb))
1619 musb_host_rx(musb, epnum);
1620 else
1621 musb_g_rx(musb, epnum);
1624 return retval;
1626 EXPORT_SYMBOL_GPL(musb_interrupt);
1628 #ifndef CONFIG_MUSB_PIO_ONLY
1629 static bool use_dma = 1;
1631 /* "modprobe ... use_dma=0" etc */
1632 module_param(use_dma, bool, 0644);
1633 MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
1635 void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
1637 /* called with controller lock already held */
1639 if (!epnum) {
1640 if (!is_cppi_enabled(musb)) {
1641 /* endpoint 0 */
1642 if (is_host_active(musb))
1643 musb_h_ep0_irq(musb);
1644 else
1645 musb_g_ep0_irq(musb);
1647 } else {
1648 /* endpoints 1..15 */
1649 if (transmit) {
1650 if (is_host_active(musb))
1651 musb_host_tx(musb, epnum);
1652 else
1653 musb_g_tx(musb, epnum);
1654 } else {
1655 /* receive */
1656 if (is_host_active(musb))
1657 musb_host_rx(musb, epnum);
1658 else
1659 musb_g_rx(musb, epnum);
1663 EXPORT_SYMBOL_GPL(musb_dma_completion);
1665 #else
1666 #define use_dma 0
1667 #endif
1669 static int (*musb_phy_callback)(enum musb_vbus_id_status status);
1672 * musb_mailbox - optional phy notifier function
1673 * @status phy state change
1675 * Optionally gets called from the USB PHY. Note that the USB PHY must be
1676 * disabled at the point the phy_callback is registered or unregistered.
1678 int musb_mailbox(enum musb_vbus_id_status status)
1680 if (musb_phy_callback)
1681 return musb_phy_callback(status);
1683 return -ENODEV;
1685 EXPORT_SYMBOL_GPL(musb_mailbox);
1687 /*-------------------------------------------------------------------------*/
1689 static ssize_t
1690 mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1692 struct musb *musb = dev_to_musb(dev);
1693 unsigned long flags;
1694 int ret = -EINVAL;
1696 spin_lock_irqsave(&musb->lock, flags);
1697 ret = sprintf(buf, "%s\n", usb_otg_state_string(musb->xceiv->otg->state));
1698 spin_unlock_irqrestore(&musb->lock, flags);
1700 return ret;
1703 static ssize_t
1704 mode_store(struct device *dev, struct device_attribute *attr,
1705 const char *buf, size_t n)
1707 struct musb *musb = dev_to_musb(dev);
1708 unsigned long flags;
1709 int status;
1711 spin_lock_irqsave(&musb->lock, flags);
1712 if (sysfs_streq(buf, "host"))
1713 status = musb_platform_set_mode(musb, MUSB_HOST);
1714 else if (sysfs_streq(buf, "peripheral"))
1715 status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
1716 else if (sysfs_streq(buf, "otg"))
1717 status = musb_platform_set_mode(musb, MUSB_OTG);
1718 else
1719 status = -EINVAL;
1720 spin_unlock_irqrestore(&musb->lock, flags);
1722 return (status == 0) ? n : status;
1724 static DEVICE_ATTR_RW(mode);
1726 static ssize_t
1727 vbus_store(struct device *dev, struct device_attribute *attr,
1728 const char *buf, size_t n)
1730 struct musb *musb = dev_to_musb(dev);
1731 unsigned long flags;
1732 unsigned long val;
1734 if (sscanf(buf, "%lu", &val) < 1) {
1735 dev_err(dev, "Invalid VBUS timeout ms value\n");
1736 return -EINVAL;
1739 spin_lock_irqsave(&musb->lock, flags);
1740 /* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
1741 musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ;
1742 if (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)
1743 musb->is_active = 0;
1744 musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
1745 spin_unlock_irqrestore(&musb->lock, flags);
1747 return n;
1750 static ssize_t
1751 vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
1753 struct musb *musb = dev_to_musb(dev);
1754 unsigned long flags;
1755 unsigned long val;
1756 int vbus;
1757 u8 devctl;
1759 pm_runtime_get_sync(dev);
1760 spin_lock_irqsave(&musb->lock, flags);
1761 val = musb->a_wait_bcon;
1762 vbus = musb_platform_get_vbus_status(musb);
1763 if (vbus < 0) {
1764 /* Use default MUSB method by means of DEVCTL register */
1765 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1766 if ((devctl & MUSB_DEVCTL_VBUS)
1767 == (3 << MUSB_DEVCTL_VBUS_SHIFT))
1768 vbus = 1;
1769 else
1770 vbus = 0;
1772 spin_unlock_irqrestore(&musb->lock, flags);
1773 pm_runtime_put_sync(dev);
1775 return sprintf(buf, "Vbus %s, timeout %lu msec\n",
1776 vbus ? "on" : "off", val);
1778 static DEVICE_ATTR_RW(vbus);
1780 /* Gadget drivers can't know that a host is connected so they might want
1781 * to start SRP, but users can. This allows userspace to trigger SRP.
1783 static ssize_t srp_store(struct device *dev, struct device_attribute *attr,
1784 const char *buf, size_t n)
1786 struct musb *musb = dev_to_musb(dev);
1787 unsigned short srp;
1789 if (sscanf(buf, "%hu", &srp) != 1
1790 || (srp != 1)) {
1791 dev_err(dev, "SRP: Value must be 1\n");
1792 return -EINVAL;
1795 if (srp == 1)
1796 musb_g_wakeup(musb);
1798 return n;
1800 static DEVICE_ATTR_WO(srp);
1802 static struct attribute *musb_attributes[] = {
1803 &dev_attr_mode.attr,
1804 &dev_attr_vbus.attr,
1805 &dev_attr_srp.attr,
1806 NULL
1809 static const struct attribute_group musb_attr_group = {
1810 .attrs = musb_attributes,
1813 #define MUSB_QUIRK_B_INVALID_VBUS_91 (MUSB_DEVCTL_BDEVICE | \
1814 (2 << MUSB_DEVCTL_VBUS_SHIFT) | \
1815 MUSB_DEVCTL_SESSION)
1816 #define MUSB_QUIRK_A_DISCONNECT_19 ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \
1817 MUSB_DEVCTL_SESSION)
1820 * Check the musb devctl session bit to determine if we want to
1821 * allow PM runtime for the device. In general, we want to keep things
1822 * active when the session bit is set except after host disconnect.
1824 * Only called from musb_irq_work. If this ever needs to get called
1825 * elsewhere, proper locking must be implemented for musb->session.
1827 static void musb_pm_runtime_check_session(struct musb *musb)
1829 u8 devctl, s;
1830 int error;
1832 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1834 /* Handle session status quirks first */
1835 s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV |
1836 MUSB_DEVCTL_HR;
1837 switch (devctl & ~s) {
1838 case MUSB_QUIRK_B_INVALID_VBUS_91:
1839 if (musb->quirk_retries && !musb->flush_irq_work) {
1840 musb_dbg(musb,
1841 "Poll devctl on invalid vbus, assume no session");
1842 schedule_delayed_work(&musb->irq_work,
1843 msecs_to_jiffies(1000));
1844 musb->quirk_retries--;
1845 return;
1847 /* fall through */
1848 case MUSB_QUIRK_A_DISCONNECT_19:
1849 if (musb->quirk_retries && !musb->flush_irq_work) {
1850 musb_dbg(musb,
1851 "Poll devctl on possible host mode disconnect");
1852 schedule_delayed_work(&musb->irq_work,
1853 msecs_to_jiffies(1000));
1854 musb->quirk_retries--;
1855 return;
1857 if (!musb->session)
1858 break;
1859 musb_dbg(musb, "Allow PM on possible host mode disconnect");
1860 pm_runtime_mark_last_busy(musb->controller);
1861 pm_runtime_put_autosuspend(musb->controller);
1862 musb->session = false;
1863 return;
1864 default:
1865 break;
1868 /* No need to do anything if session has not changed */
1869 s = devctl & MUSB_DEVCTL_SESSION;
1870 if (s == musb->session)
1871 return;
1873 /* Block PM or allow PM? */
1874 if (s) {
1875 musb_dbg(musb, "Block PM on active session: %02x", devctl);
1876 error = pm_runtime_get_sync(musb->controller);
1877 if (error < 0)
1878 dev_err(musb->controller, "Could not enable: %i\n",
1879 error);
1880 musb->quirk_retries = 3;
1881 } else {
1882 musb_dbg(musb, "Allow PM with no session: %02x", devctl);
1883 pm_runtime_mark_last_busy(musb->controller);
1884 pm_runtime_put_autosuspend(musb->controller);
1887 musb->session = s;
1890 /* Only used to provide driver mode change events */
1891 static void musb_irq_work(struct work_struct *data)
1893 struct musb *musb = container_of(data, struct musb, irq_work.work);
1894 int error;
1896 error = pm_runtime_get_sync(musb->controller);
1897 if (error < 0) {
1898 dev_err(musb->controller, "Could not enable: %i\n", error);
1900 return;
1903 musb_pm_runtime_check_session(musb);
1905 if (musb->xceiv->otg->state != musb->xceiv_old_state) {
1906 musb->xceiv_old_state = musb->xceiv->otg->state;
1907 sysfs_notify(&musb->controller->kobj, NULL, "mode");
1910 pm_runtime_mark_last_busy(musb->controller);
1911 pm_runtime_put_autosuspend(musb->controller);
1914 static void musb_recover_from_babble(struct musb *musb)
1916 int ret;
1917 u8 devctl;
1919 musb_disable_interrupts(musb);
1922 * wait at least 320 cycles of 60MHz clock. That's 5.3us, we will give
1923 * it some slack and wait for 10us.
1925 udelay(10);
1927 ret = musb_platform_recover(musb);
1928 if (ret) {
1929 musb_enable_interrupts(musb);
1930 return;
1933 /* drop session bit */
1934 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1935 devctl &= ~MUSB_DEVCTL_SESSION;
1936 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
1938 /* tell usbcore about it */
1939 musb_root_disconnect(musb);
1942 * When a babble condition occurs, the musb controller
1943 * removes the session bit and the endpoint config is lost.
1945 if (musb->dyn_fifo)
1946 ret = ep_config_from_table(musb);
1947 else
1948 ret = ep_config_from_hw(musb);
1950 /* restart session */
1951 if (ret == 0)
1952 musb_start(musb);
1955 /* --------------------------------------------------------------------------
1956 * Init support
1959 static struct musb *allocate_instance(struct device *dev,
1960 const struct musb_hdrc_config *config, void __iomem *mbase)
1962 struct musb *musb;
1963 struct musb_hw_ep *ep;
1964 int epnum;
1965 int ret;
1967 musb = devm_kzalloc(dev, sizeof(*musb), GFP_KERNEL);
1968 if (!musb)
1969 return NULL;
1971 INIT_LIST_HEAD(&musb->control);
1972 INIT_LIST_HEAD(&musb->in_bulk);
1973 INIT_LIST_HEAD(&musb->out_bulk);
1974 INIT_LIST_HEAD(&musb->pending_list);
1976 musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
1977 musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
1978 musb->mregs = mbase;
1979 musb->ctrl_base = mbase;
1980 musb->nIrq = -ENODEV;
1981 musb->config = config;
1982 BUG_ON(musb->config->num_eps > MUSB_C_NUM_EPS);
1983 for (epnum = 0, ep = musb->endpoints;
1984 epnum < musb->config->num_eps;
1985 epnum++, ep++) {
1986 ep->musb = musb;
1987 ep->epnum = epnum;
1990 musb->controller = dev;
1992 ret = musb_host_alloc(musb);
1993 if (ret < 0)
1994 goto err_free;
1996 dev_set_drvdata(dev, musb);
1998 return musb;
2000 err_free:
2001 return NULL;
2004 static void musb_free(struct musb *musb)
2006 /* this has multiple entry modes. it handles fault cleanup after
2007 * probe(), where things may be partially set up, as well as rmmod
2008 * cleanup after everything's been de-activated.
2011 #ifdef CONFIG_SYSFS
2012 sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
2013 #endif
2015 if (musb->nIrq >= 0) {
2016 if (musb->irq_wake)
2017 disable_irq_wake(musb->nIrq);
2018 free_irq(musb->nIrq, musb);
2021 musb_host_free(musb);
2024 struct musb_pending_work {
2025 int (*callback)(struct musb *musb, void *data);
2026 void *data;
2027 struct list_head node;
2030 #ifdef CONFIG_PM
2032 * Called from musb_runtime_resume(), musb_resume(), and
2033 * musb_queue_resume_work(). Callers must take musb->lock.
2035 static int musb_run_resume_work(struct musb *musb)
2037 struct musb_pending_work *w, *_w;
2038 unsigned long flags;
2039 int error = 0;
2041 spin_lock_irqsave(&musb->list_lock, flags);
2042 list_for_each_entry_safe(w, _w, &musb->pending_list, node) {
2043 if (w->callback) {
2044 error = w->callback(musb, w->data);
2045 if (error < 0) {
2046 dev_err(musb->controller,
2047 "resume callback %p failed: %i\n",
2048 w->callback, error);
2051 list_del(&w->node);
2052 devm_kfree(musb->controller, w);
2054 spin_unlock_irqrestore(&musb->list_lock, flags);
2056 return error;
2058 #endif
2061 * Called to run work if device is active or else queue the work to happen
2062 * on resume. Caller must take musb->lock and must hold an RPM reference.
2064 * Note that we cowardly refuse queuing work after musb PM runtime
2065 * resume is done calling musb_run_resume_work() and return -EINPROGRESS
2066 * instead.
2068 int musb_queue_resume_work(struct musb *musb,
2069 int (*callback)(struct musb *musb, void *data),
2070 void *data)
2072 struct musb_pending_work *w;
2073 unsigned long flags;
2074 int error;
2076 if (WARN_ON(!callback))
2077 return -EINVAL;
2079 if (pm_runtime_active(musb->controller))
2080 return callback(musb, data);
2082 w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
2083 if (!w)
2084 return -ENOMEM;
2086 w->callback = callback;
2087 w->data = data;
2088 spin_lock_irqsave(&musb->list_lock, flags);
2089 if (musb->is_runtime_suspended) {
2090 list_add_tail(&w->node, &musb->pending_list);
2091 error = 0;
2092 } else {
2093 dev_err(musb->controller, "could not add resume work %p\n",
2094 callback);
2095 devm_kfree(musb->controller, w);
2096 error = -EINPROGRESS;
2098 spin_unlock_irqrestore(&musb->list_lock, flags);
2100 return error;
2102 EXPORT_SYMBOL_GPL(musb_queue_resume_work);
2104 static void musb_deassert_reset(struct work_struct *work)
2106 struct musb *musb;
2107 unsigned long flags;
2109 musb = container_of(work, struct musb, deassert_reset_work.work);
2111 spin_lock_irqsave(&musb->lock, flags);
2113 if (musb->port1_status & USB_PORT_STAT_RESET)
2114 musb_port_reset(musb, false);
2116 spin_unlock_irqrestore(&musb->lock, flags);
2120 * Perform generic per-controller initialization.
2122 * @dev: the controller (already clocked, etc)
2123 * @nIrq: IRQ number
2124 * @ctrl: virtual address of controller registers,
2125 * not yet corrected for platform-specific offsets
2127 static int
2128 musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2130 int status;
2131 struct musb *musb;
2132 struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
2134 /* The driver might handle more features than the board; OK.
2135 * Fail when the board needs a feature that's not enabled.
2137 if (!plat) {
2138 dev_err(dev, "no platform_data?\n");
2139 status = -ENODEV;
2140 goto fail0;
2143 /* allocate */
2144 musb = allocate_instance(dev, plat->config, ctrl);
2145 if (!musb) {
2146 status = -ENOMEM;
2147 goto fail0;
2150 spin_lock_init(&musb->lock);
2151 spin_lock_init(&musb->list_lock);
2152 musb->board_set_power = plat->set_power;
2153 musb->min_power = plat->min_power;
2154 musb->ops = plat->platform_ops;
2155 musb->port_mode = plat->mode;
2158 * Initialize the default IO functions. At least omap2430 needs
2159 * these early. We initialize the platform specific IO functions
2160 * later on.
2162 musb_readb = musb_default_readb;
2163 musb_writeb = musb_default_writeb;
2164 musb_readw = musb_default_readw;
2165 musb_writew = musb_default_writew;
2166 musb_readl = musb_default_readl;
2167 musb_writel = musb_default_writel;
2169 /* The musb_platform_init() call:
2170 * - adjusts musb->mregs
2171 * - sets the musb->isr
2172 * - may initialize an integrated transceiver
2173 * - initializes musb->xceiv, usually by otg_get_phy()
2174 * - stops powering VBUS
2176 * There are various transceiver configurations. Blackfin,
2177 * DaVinci, TUSB60x0, and others integrate them. OMAP3 uses
2178 * external/discrete ones in various flavors (twl4030 family,
2179 * isp1504, non-OTG, etc) mostly hooking up through ULPI.
2181 status = musb_platform_init(musb);
2182 if (status < 0)
2183 goto fail1;
2185 if (!musb->isr) {
2186 status = -ENODEV;
2187 goto fail2;
2190 if (musb->ops->quirks)
2191 musb->io.quirks = musb->ops->quirks;
2193 /* Most devices use indexed offset or flat offset */
2194 if (musb->io.quirks & MUSB_INDEXED_EP) {
2195 musb->io.ep_offset = musb_indexed_ep_offset;
2196 musb->io.ep_select = musb_indexed_ep_select;
2197 } else {
2198 musb->io.ep_offset = musb_flat_ep_offset;
2199 musb->io.ep_select = musb_flat_ep_select;
2202 if (musb->io.quirks & MUSB_G_NO_SKB_RESERVE)
2203 musb->g.quirk_avoids_skb_reserve = 1;
2205 /* At least tusb6010 has its own offsets */
2206 if (musb->ops->ep_offset)
2207 musb->io.ep_offset = musb->ops->ep_offset;
2208 if (musb->ops->ep_select)
2209 musb->io.ep_select = musb->ops->ep_select;
2211 if (musb->ops->fifo_mode)
2212 fifo_mode = musb->ops->fifo_mode;
2213 else
2214 fifo_mode = 4;
2216 if (musb->ops->fifo_offset)
2217 musb->io.fifo_offset = musb->ops->fifo_offset;
2218 else
2219 musb->io.fifo_offset = musb_default_fifo_offset;
2221 if (musb->ops->busctl_offset)
2222 musb->io.busctl_offset = musb->ops->busctl_offset;
2223 else
2224 musb->io.busctl_offset = musb_default_busctl_offset;
2226 if (musb->ops->readb)
2227 musb_readb = musb->ops->readb;
2228 if (musb->ops->writeb)
2229 musb_writeb = musb->ops->writeb;
2230 if (musb->ops->readw)
2231 musb_readw = musb->ops->readw;
2232 if (musb->ops->writew)
2233 musb_writew = musb->ops->writew;
2234 if (musb->ops->readl)
2235 musb_readl = musb->ops->readl;
2236 if (musb->ops->writel)
2237 musb_writel = musb->ops->writel;
2239 #ifndef CONFIG_MUSB_PIO_ONLY
2240 if (!musb->ops->dma_init || !musb->ops->dma_exit) {
2241 dev_err(dev, "DMA controller not set\n");
2242 status = -ENODEV;
2243 goto fail2;
2245 musb_dma_controller_create = musb->ops->dma_init;
2246 musb_dma_controller_destroy = musb->ops->dma_exit;
2247 #endif
2249 if (musb->ops->read_fifo)
2250 musb->io.read_fifo = musb->ops->read_fifo;
2251 else
2252 musb->io.read_fifo = musb_default_read_fifo;
2254 if (musb->ops->write_fifo)
2255 musb->io.write_fifo = musb->ops->write_fifo;
2256 else
2257 musb->io.write_fifo = musb_default_write_fifo;
2259 if (!musb->xceiv->io_ops) {
2260 musb->xceiv->io_dev = musb->controller;
2261 musb->xceiv->io_priv = musb->mregs;
2262 musb->xceiv->io_ops = &musb_ulpi_access;
2265 if (musb->ops->phy_callback)
2266 musb_phy_callback = musb->ops->phy_callback;
2269 * We need musb_read/write functions initialized for PM.
2270 * Note that at least 2430 glue needs autosuspend delay
2271 * somewhere above 300 ms for the hardware to idle properly
2272 * after disconnecting the cable in host mode. Let's use
2273 * 500 ms for some margin.
2275 pm_runtime_use_autosuspend(musb->controller);
2276 pm_runtime_set_autosuspend_delay(musb->controller, 500);
2277 pm_runtime_enable(musb->controller);
2278 pm_runtime_get_sync(musb->controller);
2280 status = usb_phy_init(musb->xceiv);
2281 if (status < 0)
2282 goto err_usb_phy_init;
2284 if (use_dma && dev->dma_mask) {
2285 musb->dma_controller =
2286 musb_dma_controller_create(musb, musb->mregs);
2287 if (IS_ERR(musb->dma_controller)) {
2288 status = PTR_ERR(musb->dma_controller);
2289 goto fail2_5;
2293 /* be sure interrupts are disabled before connecting ISR */
2294 musb_platform_disable(musb);
2295 musb_disable_interrupts(musb);
2296 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2298 /* Init IRQ workqueue before request_irq */
2299 INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
2300 INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
2301 INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume);
2303 /* setup musb parts of the core (especially endpoints) */
2304 status = musb_core_init(plat->config->multipoint
2305 ? MUSB_CONTROLLER_MHDRC
2306 : MUSB_CONTROLLER_HDRC, musb);
2307 if (status < 0)
2308 goto fail3;
2310 timer_setup(&musb->otg_timer, musb_otg_timer_func, 0);
2312 /* attach to the IRQ */
2313 if (request_irq(nIrq, musb->isr, IRQF_SHARED, dev_name(dev), musb)) {
2314 dev_err(dev, "request_irq %d failed!\n", nIrq);
2315 status = -ENODEV;
2316 goto fail3;
2318 musb->nIrq = nIrq;
2319 /* FIXME this handles wakeup irqs wrong */
2320 if (enable_irq_wake(nIrq) == 0) {
2321 musb->irq_wake = 1;
2322 device_init_wakeup(dev, 1);
2323 } else {
2324 musb->irq_wake = 0;
2327 /* program PHY to use external vBus if required */
2328 if (plat->extvbus) {
2329 u8 busctl = musb_read_ulpi_buscontrol(musb->mregs);
2330 busctl |= MUSB_ULPI_USE_EXTVBUS;
2331 musb_write_ulpi_buscontrol(musb->mregs, busctl);
2334 if (musb->xceiv->otg->default_a) {
2335 MUSB_HST_MODE(musb);
2336 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2337 } else {
2338 MUSB_DEV_MODE(musb);
2339 musb->xceiv->otg->state = OTG_STATE_B_IDLE;
2342 switch (musb->port_mode) {
2343 case MUSB_PORT_MODE_HOST:
2344 status = musb_host_setup(musb, plat->power);
2345 if (status < 0)
2346 goto fail3;
2347 status = musb_platform_set_mode(musb, MUSB_HOST);
2348 break;
2349 case MUSB_PORT_MODE_GADGET:
2350 status = musb_gadget_setup(musb);
2351 if (status < 0)
2352 goto fail3;
2353 status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
2354 break;
2355 case MUSB_PORT_MODE_DUAL_ROLE:
2356 status = musb_host_setup(musb, plat->power);
2357 if (status < 0)
2358 goto fail3;
2359 status = musb_gadget_setup(musb);
2360 if (status) {
2361 musb_host_cleanup(musb);
2362 goto fail3;
2364 status = musb_platform_set_mode(musb, MUSB_OTG);
2365 break;
2366 default:
2367 dev_err(dev, "unsupported port mode %d\n", musb->port_mode);
2368 break;
2371 if (status < 0)
2372 goto fail3;
2374 status = musb_init_debugfs(musb);
2375 if (status < 0)
2376 goto fail4;
2378 status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group);
2379 if (status)
2380 goto fail5;
2382 musb->is_initialized = 1;
2383 pm_runtime_mark_last_busy(musb->controller);
2384 pm_runtime_put_autosuspend(musb->controller);
2386 return 0;
2388 fail5:
2389 musb_exit_debugfs(musb);
2391 fail4:
2392 musb_gadget_cleanup(musb);
2393 musb_host_cleanup(musb);
2395 fail3:
2396 cancel_delayed_work_sync(&musb->irq_work);
2397 cancel_delayed_work_sync(&musb->finish_resume_work);
2398 cancel_delayed_work_sync(&musb->deassert_reset_work);
2399 if (musb->dma_controller)
2400 musb_dma_controller_destroy(musb->dma_controller);
2402 fail2_5:
2403 usb_phy_shutdown(musb->xceiv);
2405 err_usb_phy_init:
2406 pm_runtime_dont_use_autosuspend(musb->controller);
2407 pm_runtime_put_sync(musb->controller);
2408 pm_runtime_disable(musb->controller);
2410 fail2:
2411 if (musb->irq_wake)
2412 device_init_wakeup(dev, 0);
2413 musb_platform_exit(musb);
2415 fail1:
2416 if (status != -EPROBE_DEFER)
2417 dev_err(musb->controller,
2418 "%s failed with status %d\n", __func__, status);
2420 musb_free(musb);
2422 fail0:
2424 return status;
2428 /*-------------------------------------------------------------------------*/
2430 /* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
2431 * bridge to a platform device; this driver then suffices.
2433 static int musb_probe(struct platform_device *pdev)
2435 struct device *dev = &pdev->dev;
2436 int irq = platform_get_irq_byname(pdev, "mc");
2437 struct resource *iomem;
2438 void __iomem *base;
2440 if (irq <= 0)
2441 return -ENODEV;
2443 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2444 base = devm_ioremap_resource(dev, iomem);
2445 if (IS_ERR(base))
2446 return PTR_ERR(base);
2448 return musb_init_controller(dev, irq, base);
2451 static int musb_remove(struct platform_device *pdev)
2453 struct device *dev = &pdev->dev;
2454 struct musb *musb = dev_to_musb(dev);
2455 unsigned long flags;
2457 /* this gets called on rmmod.
2458 * - Host mode: host may still be active
2459 * - Peripheral mode: peripheral is deactivated (or never-activated)
2460 * - OTG mode: both roles are deactivated (or never-activated)
2462 musb_exit_debugfs(musb);
2464 cancel_delayed_work_sync(&musb->irq_work);
2465 cancel_delayed_work_sync(&musb->finish_resume_work);
2466 cancel_delayed_work_sync(&musb->deassert_reset_work);
2467 pm_runtime_get_sync(musb->controller);
2468 musb_host_cleanup(musb);
2469 musb_gadget_cleanup(musb);
2471 musb_platform_disable(musb);
2472 spin_lock_irqsave(&musb->lock, flags);
2473 musb_disable_interrupts(musb);
2474 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2475 spin_unlock_irqrestore(&musb->lock, flags);
2476 musb_platform_exit(musb);
2478 pm_runtime_dont_use_autosuspend(musb->controller);
2479 pm_runtime_put_sync(musb->controller);
2480 pm_runtime_disable(musb->controller);
2481 musb_phy_callback = NULL;
2482 if (musb->dma_controller)
2483 musb_dma_controller_destroy(musb->dma_controller);
2484 usb_phy_shutdown(musb->xceiv);
2485 musb_free(musb);
2486 device_init_wakeup(dev, 0);
2487 return 0;
2490 #ifdef CONFIG_PM
2492 static void musb_save_context(struct musb *musb)
2494 int i;
2495 void __iomem *musb_base = musb->mregs;
2496 void __iomem *epio;
2498 musb->context.frame = musb_readw(musb_base, MUSB_FRAME);
2499 musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
2500 musb->context.busctl = musb_read_ulpi_buscontrol(musb->mregs);
2501 musb->context.power = musb_readb(musb_base, MUSB_POWER);
2502 musb->context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
2503 musb->context.index = musb_readb(musb_base, MUSB_INDEX);
2504 musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
2506 for (i = 0; i < musb->config->num_eps; ++i) {
2507 struct musb_hw_ep *hw_ep;
2509 hw_ep = &musb->endpoints[i];
2510 if (!hw_ep)
2511 continue;
2513 epio = hw_ep->regs;
2514 if (!epio)
2515 continue;
2517 musb_writeb(musb_base, MUSB_INDEX, i);
2518 musb->context.index_regs[i].txmaxp =
2519 musb_readw(epio, MUSB_TXMAXP);
2520 musb->context.index_regs[i].txcsr =
2521 musb_readw(epio, MUSB_TXCSR);
2522 musb->context.index_regs[i].rxmaxp =
2523 musb_readw(epio, MUSB_RXMAXP);
2524 musb->context.index_regs[i].rxcsr =
2525 musb_readw(epio, MUSB_RXCSR);
2527 if (musb->dyn_fifo) {
2528 musb->context.index_regs[i].txfifoadd =
2529 musb_read_txfifoadd(musb_base);
2530 musb->context.index_regs[i].rxfifoadd =
2531 musb_read_rxfifoadd(musb_base);
2532 musb->context.index_regs[i].txfifosz =
2533 musb_read_txfifosz(musb_base);
2534 musb->context.index_regs[i].rxfifosz =
2535 musb_read_rxfifosz(musb_base);
2538 musb->context.index_regs[i].txtype =
2539 musb_readb(epio, MUSB_TXTYPE);
2540 musb->context.index_regs[i].txinterval =
2541 musb_readb(epio, MUSB_TXINTERVAL);
2542 musb->context.index_regs[i].rxtype =
2543 musb_readb(epio, MUSB_RXTYPE);
2544 musb->context.index_regs[i].rxinterval =
2545 musb_readb(epio, MUSB_RXINTERVAL);
2547 musb->context.index_regs[i].txfunaddr =
2548 musb_read_txfunaddr(musb, i);
2549 musb->context.index_regs[i].txhubaddr =
2550 musb_read_txhubaddr(musb, i);
2551 musb->context.index_regs[i].txhubport =
2552 musb_read_txhubport(musb, i);
2554 musb->context.index_regs[i].rxfunaddr =
2555 musb_read_rxfunaddr(musb, i);
2556 musb->context.index_regs[i].rxhubaddr =
2557 musb_read_rxhubaddr(musb, i);
2558 musb->context.index_regs[i].rxhubport =
2559 musb_read_rxhubport(musb, i);
2563 static void musb_restore_context(struct musb *musb)
2565 int i;
2566 void __iomem *musb_base = musb->mregs;
2567 void __iomem *epio;
2568 u8 power;
2570 musb_writew(musb_base, MUSB_FRAME, musb->context.frame);
2571 musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode);
2572 musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl);
2574 /* Don't affect SUSPENDM/RESUME bits in POWER reg */
2575 power = musb_readb(musb_base, MUSB_POWER);
2576 power &= MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME;
2577 musb->context.power &= ~(MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME);
2578 power |= musb->context.power;
2579 musb_writeb(musb_base, MUSB_POWER, power);
2581 musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
2582 musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
2583 musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
2584 if (musb->context.devctl & MUSB_DEVCTL_SESSION)
2585 musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
2587 for (i = 0; i < musb->config->num_eps; ++i) {
2588 struct musb_hw_ep *hw_ep;
2590 hw_ep = &musb->endpoints[i];
2591 if (!hw_ep)
2592 continue;
2594 epio = hw_ep->regs;
2595 if (!epio)
2596 continue;
2598 musb_writeb(musb_base, MUSB_INDEX, i);
2599 musb_writew(epio, MUSB_TXMAXP,
2600 musb->context.index_regs[i].txmaxp);
2601 musb_writew(epio, MUSB_TXCSR,
2602 musb->context.index_regs[i].txcsr);
2603 musb_writew(epio, MUSB_RXMAXP,
2604 musb->context.index_regs[i].rxmaxp);
2605 musb_writew(epio, MUSB_RXCSR,
2606 musb->context.index_regs[i].rxcsr);
2608 if (musb->dyn_fifo) {
2609 musb_write_txfifosz(musb_base,
2610 musb->context.index_regs[i].txfifosz);
2611 musb_write_rxfifosz(musb_base,
2612 musb->context.index_regs[i].rxfifosz);
2613 musb_write_txfifoadd(musb_base,
2614 musb->context.index_regs[i].txfifoadd);
2615 musb_write_rxfifoadd(musb_base,
2616 musb->context.index_regs[i].rxfifoadd);
2619 musb_writeb(epio, MUSB_TXTYPE,
2620 musb->context.index_regs[i].txtype);
2621 musb_writeb(epio, MUSB_TXINTERVAL,
2622 musb->context.index_regs[i].txinterval);
2623 musb_writeb(epio, MUSB_RXTYPE,
2624 musb->context.index_regs[i].rxtype);
2625 musb_writeb(epio, MUSB_RXINTERVAL,
2627 musb->context.index_regs[i].rxinterval);
2628 musb_write_txfunaddr(musb, i,
2629 musb->context.index_regs[i].txfunaddr);
2630 musb_write_txhubaddr(musb, i,
2631 musb->context.index_regs[i].txhubaddr);
2632 musb_write_txhubport(musb, i,
2633 musb->context.index_regs[i].txhubport);
2635 musb_write_rxfunaddr(musb, i,
2636 musb->context.index_regs[i].rxfunaddr);
2637 musb_write_rxhubaddr(musb, i,
2638 musb->context.index_regs[i].rxhubaddr);
2639 musb_write_rxhubport(musb, i,
2640 musb->context.index_regs[i].rxhubport);
2642 musb_writeb(musb_base, MUSB_INDEX, musb->context.index);
2645 static int musb_suspend(struct device *dev)
2647 struct musb *musb = dev_to_musb(dev);
2648 unsigned long flags;
2649 int ret;
2651 ret = pm_runtime_get_sync(dev);
2652 if (ret < 0) {
2653 pm_runtime_put_noidle(dev);
2654 return ret;
2657 musb_platform_disable(musb);
2658 musb_disable_interrupts(musb);
2660 musb->flush_irq_work = true;
2661 while (flush_delayed_work(&musb->irq_work))
2663 musb->flush_irq_work = false;
2665 if (!(musb->io.quirks & MUSB_PRESERVE_SESSION))
2666 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2668 WARN_ON(!list_empty(&musb->pending_list));
2670 spin_lock_irqsave(&musb->lock, flags);
2672 if (is_peripheral_active(musb)) {
2673 /* FIXME force disconnect unless we know USB will wake
2674 * the system up quickly enough to respond ...
2676 } else if (is_host_active(musb)) {
2677 /* we know all the children are suspended; sometimes
2678 * they will even be wakeup-enabled.
2682 musb_save_context(musb);
2684 spin_unlock_irqrestore(&musb->lock, flags);
2685 return 0;
2688 static int musb_resume(struct device *dev)
2690 struct musb *musb = dev_to_musb(dev);
2691 unsigned long flags;
2692 int error;
2693 u8 devctl;
2694 u8 mask;
2697 * For static cmos like DaVinci, register values were preserved
2698 * unless for some reason the whole soc powered down or the USB
2699 * module got reset through the PSC (vs just being disabled).
2701 * For the DSPS glue layer though, a full register restore has to
2702 * be done. As it shouldn't harm other platforms, we do it
2703 * unconditionally.
2706 musb_restore_context(musb);
2708 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2709 mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
2710 if ((devctl & mask) != (musb->context.devctl & mask))
2711 musb->port1_status = 0;
2713 musb_enable_interrupts(musb);
2714 musb_platform_enable(musb);
2716 spin_lock_irqsave(&musb->lock, flags);
2717 error = musb_run_resume_work(musb);
2718 if (error)
2719 dev_err(musb->controller, "resume work failed with %i\n",
2720 error);
2721 spin_unlock_irqrestore(&musb->lock, flags);
2723 pm_runtime_mark_last_busy(dev);
2724 pm_runtime_put_autosuspend(dev);
2726 return 0;
2729 static int musb_runtime_suspend(struct device *dev)
2731 struct musb *musb = dev_to_musb(dev);
2733 musb_save_context(musb);
2734 musb->is_runtime_suspended = 1;
2736 return 0;
2739 static int musb_runtime_resume(struct device *dev)
2741 struct musb *musb = dev_to_musb(dev);
2742 unsigned long flags;
2743 int error;
2746 * When pm_runtime_get_sync called for the first time in driver
2747 * init, some of the structure is still not initialized which is
2748 * used in restore function. But clock needs to be
2749 * enabled before any register access, so
2750 * pm_runtime_get_sync has to be called.
2751 * Also context restore without save does not make
2752 * any sense
2754 if (!musb->is_initialized)
2755 return 0;
2757 musb_restore_context(musb);
2759 spin_lock_irqsave(&musb->lock, flags);
2760 error = musb_run_resume_work(musb);
2761 if (error)
2762 dev_err(musb->controller, "resume work failed with %i\n",
2763 error);
2764 musb->is_runtime_suspended = 0;
2765 spin_unlock_irqrestore(&musb->lock, flags);
2767 return 0;
2770 static const struct dev_pm_ops musb_dev_pm_ops = {
2771 .suspend = musb_suspend,
2772 .resume = musb_resume,
2773 .runtime_suspend = musb_runtime_suspend,
2774 .runtime_resume = musb_runtime_resume,
2777 #define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
2778 #else
2779 #define MUSB_DEV_PM_OPS NULL
2780 #endif
2782 static struct platform_driver musb_driver = {
2783 .driver = {
2784 .name = (char *)musb_driver_name,
2785 .bus = &platform_bus_type,
2786 .pm = MUSB_DEV_PM_OPS,
2788 .probe = musb_probe,
2789 .remove = musb_remove,
2792 module_platform_driver(musb_driver);