1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * hcd.c - DesignWare HS OTG Controller host-mode routines
5 * Copyright (C) 2004-2013 Synopsys, Inc.
9 * This file contains the core HCD code, and implements the Linux hc_driver
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
16 #include <linux/platform_device.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/delay.h>
20 #include <linux/slab.h>
21 #include <linux/usb.h>
23 #include <linux/usb/hcd.h>
24 #include <linux/usb/ch11.h>
25 #include <linux/usb/of.h>
31 * =========================================================================
32 * Host Core Layer Functions
33 * =========================================================================
37 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
38 * used in both device and host modes
40 * @hsotg: Programming view of the DWC_otg controller
42 static void dwc2_enable_common_interrupts(struct dwc2_hsotg
*hsotg
)
46 /* Clear any pending OTG Interrupts */
47 dwc2_writel(hsotg
, 0xffffffff, GOTGINT
);
49 /* Clear any pending interrupts */
50 dwc2_writel(hsotg
, 0xffffffff, GINTSTS
);
52 /* Enable the interrupts in the GINTMSK */
53 intmsk
= GINTSTS_MODEMIS
| GINTSTS_OTGINT
;
55 if (!hsotg
->params
.host_dma
)
56 intmsk
|= GINTSTS_RXFLVL
;
57 if (!hsotg
->params
.external_id_pin_ctl
)
58 intmsk
|= GINTSTS_CONIDSTSCHNG
;
60 intmsk
|= GINTSTS_WKUPINT
| GINTSTS_USBSUSP
|
63 if (dwc2_is_device_mode(hsotg
) && hsotg
->params
.lpm
)
64 intmsk
|= GINTSTS_LPMTRANRCVD
;
66 dwc2_writel(hsotg
, intmsk
, GINTMSK
);
69 static int dwc2_gahbcfg_init(struct dwc2_hsotg
*hsotg
)
71 u32 ahbcfg
= dwc2_readl(hsotg
, GAHBCFG
);
73 switch (hsotg
->hw_params
.arch
) {
74 case GHWCFG2_EXT_DMA_ARCH
:
75 dev_err(hsotg
->dev
, "External DMA Mode not supported\n");
78 case GHWCFG2_INT_DMA_ARCH
:
79 dev_dbg(hsotg
->dev
, "Internal DMA Mode\n");
80 if (hsotg
->params
.ahbcfg
!= -1) {
81 ahbcfg
&= GAHBCFG_CTRL_MASK
;
82 ahbcfg
|= hsotg
->params
.ahbcfg
&
87 case GHWCFG2_SLAVE_ONLY_ARCH
:
89 dev_dbg(hsotg
->dev
, "Slave Only Mode\n");
93 if (hsotg
->params
.host_dma
)
94 ahbcfg
|= GAHBCFG_DMA_EN
;
96 hsotg
->params
.dma_desc_enable
= false;
98 dwc2_writel(hsotg
, ahbcfg
, GAHBCFG
);
103 static void dwc2_gusbcfg_init(struct dwc2_hsotg
*hsotg
)
107 usbcfg
= dwc2_readl(hsotg
, GUSBCFG
);
108 usbcfg
&= ~(GUSBCFG_HNPCAP
| GUSBCFG_SRPCAP
);
110 switch (hsotg
->hw_params
.op_mode
) {
111 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE
:
112 if (hsotg
->params
.otg_caps
.hnp_support
&&
113 hsotg
->params
.otg_caps
.srp_support
)
114 usbcfg
|= GUSBCFG_HNPCAP
;
117 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE
:
118 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE
:
119 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST
:
120 if (hsotg
->params
.otg_caps
.srp_support
)
121 usbcfg
|= GUSBCFG_SRPCAP
;
124 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE
:
125 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE
:
126 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST
:
131 dwc2_writel(hsotg
, usbcfg
, GUSBCFG
);
134 static int dwc2_vbus_supply_init(struct dwc2_hsotg
*hsotg
)
136 if (hsotg
->vbus_supply
)
137 return regulator_enable(hsotg
->vbus_supply
);
142 static int dwc2_vbus_supply_exit(struct dwc2_hsotg
*hsotg
)
144 if (hsotg
->vbus_supply
)
145 return regulator_disable(hsotg
->vbus_supply
);
151 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
153 * @hsotg: Programming view of DWC_otg controller
155 static void dwc2_enable_host_interrupts(struct dwc2_hsotg
*hsotg
)
159 dev_dbg(hsotg
->dev
, "%s()\n", __func__
);
161 /* Disable all interrupts */
162 dwc2_writel(hsotg
, 0, GINTMSK
);
163 dwc2_writel(hsotg
, 0, HAINTMSK
);
165 /* Enable the common interrupts */
166 dwc2_enable_common_interrupts(hsotg
);
168 /* Enable host mode interrupts without disturbing common interrupts */
169 intmsk
= dwc2_readl(hsotg
, GINTMSK
);
170 intmsk
|= GINTSTS_DISCONNINT
| GINTSTS_PRTINT
| GINTSTS_HCHINT
;
171 dwc2_writel(hsotg
, intmsk
, GINTMSK
);
175 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
177 * @hsotg: Programming view of DWC_otg controller
179 static void dwc2_disable_host_interrupts(struct dwc2_hsotg
*hsotg
)
181 u32 intmsk
= dwc2_readl(hsotg
, GINTMSK
);
183 /* Disable host mode interrupts without disturbing common interrupts */
184 intmsk
&= ~(GINTSTS_SOF
| GINTSTS_PRTINT
| GINTSTS_HCHINT
|
185 GINTSTS_PTXFEMP
| GINTSTS_NPTXFEMP
| GINTSTS_DISCONNINT
);
186 dwc2_writel(hsotg
, intmsk
, GINTMSK
);
190 * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
191 * For system that have a total fifo depth that is smaller than the default
194 * @hsotg: Programming view of DWC_otg controller
196 static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg
*hsotg
)
198 struct dwc2_core_params
*params
= &hsotg
->params
;
199 struct dwc2_hw_params
*hw
= &hsotg
->hw_params
;
200 u32 rxfsiz
, nptxfsiz
, ptxfsiz
, total_fifo_size
;
202 total_fifo_size
= hw
->total_fifo_size
;
203 rxfsiz
= params
->host_rx_fifo_size
;
204 nptxfsiz
= params
->host_nperio_tx_fifo_size
;
205 ptxfsiz
= params
->host_perio_tx_fifo_size
;
208 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
209 * allocation with support for high bandwidth endpoints. Synopsys
210 * defines MPS(Max Packet size) for a periodic EP=1024, and for
211 * non-periodic as 512.
213 if (total_fifo_size
< (rxfsiz
+ nptxfsiz
+ ptxfsiz
)) {
215 * For Buffer DMA mode/Scatter Gather DMA mode
216 * 2 * ((Largest Packet size / 4) + 1 + 1) + n
217 * with n = number of host channel.
218 * 2 * ((1024/4) + 2) = 516
220 rxfsiz
= 516 + hw
->host_channels
;
223 * min non-periodic tx fifo depth
224 * 2 * (largest non-periodic USB packet used / 4)
230 * min periodic tx fifo depth
231 * (largest packet size*MC)/4
236 params
->host_rx_fifo_size
= rxfsiz
;
237 params
->host_nperio_tx_fifo_size
= nptxfsiz
;
238 params
->host_perio_tx_fifo_size
= ptxfsiz
;
242 * If the summation of RX, NPTX and PTX fifo sizes is still
243 * bigger than the total_fifo_size, then we have a problem.
245 * We won't be able to allocate as many endpoints. Right now,
246 * we're just printing an error message, but ideally this FIFO
247 * allocation algorithm would be improved in the future.
249 * FIXME improve this FIFO allocation algorithm.
251 if (unlikely(total_fifo_size
< (rxfsiz
+ nptxfsiz
+ ptxfsiz
)))
252 dev_err(hsotg
->dev
, "invalid fifo sizes\n");
255 static void dwc2_config_fifos(struct dwc2_hsotg
*hsotg
)
257 struct dwc2_core_params
*params
= &hsotg
->params
;
258 u32 nptxfsiz
, hptxfsiz
, dfifocfg
, grxfsiz
;
260 if (!params
->enable_dynamic_fifo
)
263 dwc2_calculate_dynamic_fifo(hsotg
);
266 grxfsiz
= dwc2_readl(hsotg
, GRXFSIZ
);
267 dev_dbg(hsotg
->dev
, "initial grxfsiz=%08x\n", grxfsiz
);
268 grxfsiz
&= ~GRXFSIZ_DEPTH_MASK
;
269 grxfsiz
|= params
->host_rx_fifo_size
<<
270 GRXFSIZ_DEPTH_SHIFT
& GRXFSIZ_DEPTH_MASK
;
271 dwc2_writel(hsotg
, grxfsiz
, GRXFSIZ
);
272 dev_dbg(hsotg
->dev
, "new grxfsiz=%08x\n",
273 dwc2_readl(hsotg
, GRXFSIZ
));
275 /* Non-periodic Tx FIFO */
276 dev_dbg(hsotg
->dev
, "initial gnptxfsiz=%08x\n",
277 dwc2_readl(hsotg
, GNPTXFSIZ
));
278 nptxfsiz
= params
->host_nperio_tx_fifo_size
<<
279 FIFOSIZE_DEPTH_SHIFT
& FIFOSIZE_DEPTH_MASK
;
280 nptxfsiz
|= params
->host_rx_fifo_size
<<
281 FIFOSIZE_STARTADDR_SHIFT
& FIFOSIZE_STARTADDR_MASK
;
282 dwc2_writel(hsotg
, nptxfsiz
, GNPTXFSIZ
);
283 dev_dbg(hsotg
->dev
, "new gnptxfsiz=%08x\n",
284 dwc2_readl(hsotg
, GNPTXFSIZ
));
286 /* Periodic Tx FIFO */
287 dev_dbg(hsotg
->dev
, "initial hptxfsiz=%08x\n",
288 dwc2_readl(hsotg
, HPTXFSIZ
));
289 hptxfsiz
= params
->host_perio_tx_fifo_size
<<
290 FIFOSIZE_DEPTH_SHIFT
& FIFOSIZE_DEPTH_MASK
;
291 hptxfsiz
|= (params
->host_rx_fifo_size
+
292 params
->host_nperio_tx_fifo_size
) <<
293 FIFOSIZE_STARTADDR_SHIFT
& FIFOSIZE_STARTADDR_MASK
;
294 dwc2_writel(hsotg
, hptxfsiz
, HPTXFSIZ
);
295 dev_dbg(hsotg
->dev
, "new hptxfsiz=%08x\n",
296 dwc2_readl(hsotg
, HPTXFSIZ
));
298 if (hsotg
->params
.en_multiple_tx_fifo
&&
299 hsotg
->hw_params
.snpsid
>= DWC2_CORE_REV_2_91a
) {
301 * This feature was implemented in 2.91a version
302 * Global DFIFOCFG calculation for Host mode -
303 * include RxFIFO, NPTXFIFO and HPTXFIFO
305 dfifocfg
= dwc2_readl(hsotg
, GDFIFOCFG
);
306 dfifocfg
&= ~GDFIFOCFG_EPINFOBASE_MASK
;
307 dfifocfg
|= (params
->host_rx_fifo_size
+
308 params
->host_nperio_tx_fifo_size
+
309 params
->host_perio_tx_fifo_size
) <<
310 GDFIFOCFG_EPINFOBASE_SHIFT
&
311 GDFIFOCFG_EPINFOBASE_MASK
;
312 dwc2_writel(hsotg
, dfifocfg
, GDFIFOCFG
);
317 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
318 * the HFIR register according to PHY type and speed
320 * @hsotg: Programming view of DWC_otg controller
322 * NOTE: The caller can modify the value of the HFIR register only after the
323 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
326 u32
dwc2_calc_frame_interval(struct dwc2_hsotg
*hsotg
)
330 int clock
= 60; /* default value */
332 usbcfg
= dwc2_readl(hsotg
, GUSBCFG
);
333 hprt0
= dwc2_readl(hsotg
, HPRT0
);
335 if (!(usbcfg
& GUSBCFG_PHYSEL
) && (usbcfg
& GUSBCFG_ULPI_UTMI_SEL
) &&
336 !(usbcfg
& GUSBCFG_PHYIF16
))
338 if ((usbcfg
& GUSBCFG_PHYSEL
) && hsotg
->hw_params
.fs_phy_type
==
339 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI
)
341 if (!(usbcfg
& GUSBCFG_PHY_LP_CLK_SEL
) && !(usbcfg
& GUSBCFG_PHYSEL
) &&
342 !(usbcfg
& GUSBCFG_ULPI_UTMI_SEL
) && (usbcfg
& GUSBCFG_PHYIF16
))
344 if (!(usbcfg
& GUSBCFG_PHY_LP_CLK_SEL
) && !(usbcfg
& GUSBCFG_PHYSEL
) &&
345 !(usbcfg
& GUSBCFG_ULPI_UTMI_SEL
) && !(usbcfg
& GUSBCFG_PHYIF16
))
347 if ((usbcfg
& GUSBCFG_PHY_LP_CLK_SEL
) && !(usbcfg
& GUSBCFG_PHYSEL
) &&
348 !(usbcfg
& GUSBCFG_ULPI_UTMI_SEL
) && (usbcfg
& GUSBCFG_PHYIF16
))
350 if ((usbcfg
& GUSBCFG_PHYSEL
) && !(usbcfg
& GUSBCFG_PHYIF16
) &&
351 hsotg
->hw_params
.fs_phy_type
== GHWCFG2_FS_PHY_TYPE_SHARED_UTMI
)
353 if ((usbcfg
& GUSBCFG_PHYSEL
) &&
354 hsotg
->hw_params
.fs_phy_type
== GHWCFG2_FS_PHY_TYPE_DEDICATED
)
357 if ((hprt0
& HPRT0_SPD_MASK
) >> HPRT0_SPD_SHIFT
== HPRT0_SPD_HIGH_SPEED
)
358 /* High speed case */
359 return 125 * clock
- 1;
362 return 1000 * clock
- 1;
366 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
369 * @hsotg: Programming view of DWC_otg controller
370 * @dest: Destination buffer for the packet
371 * @bytes: Number of bytes to copy to the destination
373 void dwc2_read_packet(struct dwc2_hsotg
*hsotg
, u8
*dest
, u16 bytes
)
375 u32
*data_buf
= (u32
*)dest
;
376 int word_count
= (bytes
+ 3) / 4;
380 * Todo: Account for the case where dest is not dword aligned. This
381 * requires reading data from the FIFO into a u32 temp buffer, then
382 * moving it into the data buffer.
385 dev_vdbg(hsotg
->dev
, "%s(%p,%p,%d)\n", __func__
, hsotg
, dest
, bytes
);
387 for (i
= 0; i
< word_count
; i
++, data_buf
++)
388 *data_buf
= dwc2_readl(hsotg
, HCFIFO(0));
392 * dwc2_dump_channel_info() - Prints the state of a host channel
394 * @hsotg: Programming view of DWC_otg controller
395 * @chan: Pointer to the channel to dump
397 * Must be called with interrupt disabled and spinlock held
399 * NOTE: This function will be removed once the peripheral controller code
400 * is integrated and the driver is stable
402 static void dwc2_dump_channel_info(struct dwc2_hsotg
*hsotg
,
403 struct dwc2_host_chan
*chan
)
406 int num_channels
= hsotg
->params
.host_channels
;
417 hcchar
= dwc2_readl(hsotg
, HCCHAR(chan
->hc_num
));
418 hcsplt
= dwc2_readl(hsotg
, HCSPLT(chan
->hc_num
));
419 hctsiz
= dwc2_readl(hsotg
, HCTSIZ(chan
->hc_num
));
420 hc_dma
= dwc2_readl(hsotg
, HCDMA(chan
->hc_num
));
422 dev_dbg(hsotg
->dev
, " Assigned to channel %p:\n", chan
);
423 dev_dbg(hsotg
->dev
, " hcchar 0x%08x, hcsplt 0x%08x\n",
425 dev_dbg(hsotg
->dev
, " hctsiz 0x%08x, hc_dma 0x%08x\n",
427 dev_dbg(hsotg
->dev
, " dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
428 chan
->dev_addr
, chan
->ep_num
, chan
->ep_is_in
);
429 dev_dbg(hsotg
->dev
, " ep_type: %d\n", chan
->ep_type
);
430 dev_dbg(hsotg
->dev
, " max_packet: %d\n", chan
->max_packet
);
431 dev_dbg(hsotg
->dev
, " data_pid_start: %d\n", chan
->data_pid_start
);
432 dev_dbg(hsotg
->dev
, " xfer_started: %d\n", chan
->xfer_started
);
433 dev_dbg(hsotg
->dev
, " halt_status: %d\n", chan
->halt_status
);
434 dev_dbg(hsotg
->dev
, " xfer_buf: %p\n", chan
->xfer_buf
);
435 dev_dbg(hsotg
->dev
, " xfer_dma: %08lx\n",
436 (unsigned long)chan
->xfer_dma
);
437 dev_dbg(hsotg
->dev
, " xfer_len: %d\n", chan
->xfer_len
);
438 dev_dbg(hsotg
->dev
, " qh: %p\n", chan
->qh
);
439 dev_dbg(hsotg
->dev
, " NP inactive sched:\n");
440 list_for_each_entry(qh
, &hsotg
->non_periodic_sched_inactive
,
442 dev_dbg(hsotg
->dev
, " %p\n", qh
);
443 dev_dbg(hsotg
->dev
, " NP waiting sched:\n");
444 list_for_each_entry(qh
, &hsotg
->non_periodic_sched_waiting
,
446 dev_dbg(hsotg
->dev
, " %p\n", qh
);
447 dev_dbg(hsotg
->dev
, " NP active sched:\n");
448 list_for_each_entry(qh
, &hsotg
->non_periodic_sched_active
,
450 dev_dbg(hsotg
->dev
, " %p\n", qh
);
451 dev_dbg(hsotg
->dev
, " Channels:\n");
452 for (i
= 0; i
< num_channels
; i
++) {
453 struct dwc2_host_chan
*chan
= hsotg
->hc_ptr_array
[i
];
455 dev_dbg(hsotg
->dev
, " %2d: %p\n", i
, chan
);
457 #endif /* VERBOSE_DEBUG */
460 static int _dwc2_hcd_start(struct usb_hcd
*hcd
);
462 static void dwc2_host_start(struct dwc2_hsotg
*hsotg
)
464 struct usb_hcd
*hcd
= dwc2_hsotg_to_hcd(hsotg
);
466 hcd
->self
.is_b_host
= dwc2_hcd_is_b_host(hsotg
);
467 _dwc2_hcd_start(hcd
);
470 static void dwc2_host_disconnect(struct dwc2_hsotg
*hsotg
)
472 struct usb_hcd
*hcd
= dwc2_hsotg_to_hcd(hsotg
);
474 hcd
->self
.is_b_host
= 0;
477 static void dwc2_host_hub_info(struct dwc2_hsotg
*hsotg
, void *context
,
478 int *hub_addr
, int *hub_port
)
480 struct urb
*urb
= context
;
483 *hub_addr
= urb
->dev
->tt
->hub
->devnum
;
486 *hub_port
= urb
->dev
->ttport
;
490 * =========================================================================
491 * Low Level Host Channel Access Functions
492 * =========================================================================
495 static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg
*hsotg
,
496 struct dwc2_host_chan
*chan
)
498 u32 hcintmsk
= HCINTMSK_CHHLTD
;
500 switch (chan
->ep_type
) {
501 case USB_ENDPOINT_XFER_CONTROL
:
502 case USB_ENDPOINT_XFER_BULK
:
503 dev_vdbg(hsotg
->dev
, "control/bulk\n");
504 hcintmsk
|= HCINTMSK_XFERCOMPL
;
505 hcintmsk
|= HCINTMSK_STALL
;
506 hcintmsk
|= HCINTMSK_XACTERR
;
507 hcintmsk
|= HCINTMSK_DATATGLERR
;
508 if (chan
->ep_is_in
) {
509 hcintmsk
|= HCINTMSK_BBLERR
;
511 hcintmsk
|= HCINTMSK_NAK
;
512 hcintmsk
|= HCINTMSK_NYET
;
514 hcintmsk
|= HCINTMSK_ACK
;
517 if (chan
->do_split
) {
518 hcintmsk
|= HCINTMSK_NAK
;
519 if (chan
->complete_split
)
520 hcintmsk
|= HCINTMSK_NYET
;
522 hcintmsk
|= HCINTMSK_ACK
;
525 if (chan
->error_state
)
526 hcintmsk
|= HCINTMSK_ACK
;
529 case USB_ENDPOINT_XFER_INT
:
531 dev_vdbg(hsotg
->dev
, "intr\n");
532 hcintmsk
|= HCINTMSK_XFERCOMPL
;
533 hcintmsk
|= HCINTMSK_NAK
;
534 hcintmsk
|= HCINTMSK_STALL
;
535 hcintmsk
|= HCINTMSK_XACTERR
;
536 hcintmsk
|= HCINTMSK_DATATGLERR
;
537 hcintmsk
|= HCINTMSK_FRMOVRUN
;
540 hcintmsk
|= HCINTMSK_BBLERR
;
541 if (chan
->error_state
)
542 hcintmsk
|= HCINTMSK_ACK
;
543 if (chan
->do_split
) {
544 if (chan
->complete_split
)
545 hcintmsk
|= HCINTMSK_NYET
;
547 hcintmsk
|= HCINTMSK_ACK
;
551 case USB_ENDPOINT_XFER_ISOC
:
553 dev_vdbg(hsotg
->dev
, "isoc\n");
554 hcintmsk
|= HCINTMSK_XFERCOMPL
;
555 hcintmsk
|= HCINTMSK_FRMOVRUN
;
556 hcintmsk
|= HCINTMSK_ACK
;
558 if (chan
->ep_is_in
) {
559 hcintmsk
|= HCINTMSK_XACTERR
;
560 hcintmsk
|= HCINTMSK_BBLERR
;
564 dev_err(hsotg
->dev
, "## Unknown EP type ##\n");
568 dwc2_writel(hsotg
, hcintmsk
, HCINTMSK(chan
->hc_num
));
570 dev_vdbg(hsotg
->dev
, "set HCINTMSK to %08x\n", hcintmsk
);
573 static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg
*hsotg
,
574 struct dwc2_host_chan
*chan
)
576 u32 hcintmsk
= HCINTMSK_CHHLTD
;
579 * For Descriptor DMA mode core halts the channel on AHB error.
580 * Interrupt is not required.
582 if (!hsotg
->params
.dma_desc_enable
) {
584 dev_vdbg(hsotg
->dev
, "desc DMA disabled\n");
585 hcintmsk
|= HCINTMSK_AHBERR
;
588 dev_vdbg(hsotg
->dev
, "desc DMA enabled\n");
589 if (chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
)
590 hcintmsk
|= HCINTMSK_XFERCOMPL
;
593 if (chan
->error_state
&& !chan
->do_split
&&
594 chan
->ep_type
!= USB_ENDPOINT_XFER_ISOC
) {
596 dev_vdbg(hsotg
->dev
, "setting ACK\n");
597 hcintmsk
|= HCINTMSK_ACK
;
598 if (chan
->ep_is_in
) {
599 hcintmsk
|= HCINTMSK_DATATGLERR
;
600 if (chan
->ep_type
!= USB_ENDPOINT_XFER_INT
)
601 hcintmsk
|= HCINTMSK_NAK
;
605 dwc2_writel(hsotg
, hcintmsk
, HCINTMSK(chan
->hc_num
));
607 dev_vdbg(hsotg
->dev
, "set HCINTMSK to %08x\n", hcintmsk
);
610 static void dwc2_hc_enable_ints(struct dwc2_hsotg
*hsotg
,
611 struct dwc2_host_chan
*chan
)
615 if (hsotg
->params
.host_dma
) {
617 dev_vdbg(hsotg
->dev
, "DMA enabled\n");
618 dwc2_hc_enable_dma_ints(hsotg
, chan
);
621 dev_vdbg(hsotg
->dev
, "DMA disabled\n");
622 dwc2_hc_enable_slave_ints(hsotg
, chan
);
625 /* Enable the top level host channel interrupt */
626 intmsk
= dwc2_readl(hsotg
, HAINTMSK
);
627 intmsk
|= 1 << chan
->hc_num
;
628 dwc2_writel(hsotg
, intmsk
, HAINTMSK
);
630 dev_vdbg(hsotg
->dev
, "set HAINTMSK to %08x\n", intmsk
);
632 /* Make sure host channel interrupts are enabled */
633 intmsk
= dwc2_readl(hsotg
, GINTMSK
);
634 intmsk
|= GINTSTS_HCHINT
;
635 dwc2_writel(hsotg
, intmsk
, GINTMSK
);
637 dev_vdbg(hsotg
->dev
, "set GINTMSK to %08x\n", intmsk
);
641 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
642 * a specific endpoint
644 * @hsotg: Programming view of DWC_otg controller
645 * @chan: Information needed to initialize the host channel
647 * The HCCHARn register is set up with the characteristics specified in chan.
648 * Host channel interrupts that may need to be serviced while this transfer is
649 * in progress are enabled.
651 static void dwc2_hc_init(struct dwc2_hsotg
*hsotg
, struct dwc2_host_chan
*chan
)
653 u8 hc_num
= chan
->hc_num
;
659 dev_vdbg(hsotg
->dev
, "%s()\n", __func__
);
661 /* Clear old interrupt conditions for this host channel */
662 hcintmsk
= 0xffffffff;
663 hcintmsk
&= ~HCINTMSK_RESERVED14_31
;
664 dwc2_writel(hsotg
, hcintmsk
, HCINT(hc_num
));
666 /* Enable channel interrupts required for this transfer */
667 dwc2_hc_enable_ints(hsotg
, chan
);
670 * Program the HCCHARn register with the endpoint characteristics for
671 * the current transfer
673 hcchar
= chan
->dev_addr
<< HCCHAR_DEVADDR_SHIFT
& HCCHAR_DEVADDR_MASK
;
674 hcchar
|= chan
->ep_num
<< HCCHAR_EPNUM_SHIFT
& HCCHAR_EPNUM_MASK
;
676 hcchar
|= HCCHAR_EPDIR
;
677 if (chan
->speed
== USB_SPEED_LOW
)
678 hcchar
|= HCCHAR_LSPDDEV
;
679 hcchar
|= chan
->ep_type
<< HCCHAR_EPTYPE_SHIFT
& HCCHAR_EPTYPE_MASK
;
680 hcchar
|= chan
->max_packet
<< HCCHAR_MPS_SHIFT
& HCCHAR_MPS_MASK
;
681 dwc2_writel(hsotg
, hcchar
, HCCHAR(hc_num
));
683 dev_vdbg(hsotg
->dev
, "set HCCHAR(%d) to %08x\n",
686 dev_vdbg(hsotg
->dev
, "%s: Channel %d\n",
688 dev_vdbg(hsotg
->dev
, " Dev Addr: %d\n",
690 dev_vdbg(hsotg
->dev
, " Ep Num: %d\n",
692 dev_vdbg(hsotg
->dev
, " Is In: %d\n",
694 dev_vdbg(hsotg
->dev
, " Is Low Speed: %d\n",
695 chan
->speed
== USB_SPEED_LOW
);
696 dev_vdbg(hsotg
->dev
, " Ep Type: %d\n",
698 dev_vdbg(hsotg
->dev
, " Max Pkt: %d\n",
702 /* Program the HCSPLT register for SPLITs */
703 if (chan
->do_split
) {
706 "Programming HC %d with split --> %s\n",
708 chan
->complete_split
? "CSPLIT" : "SSPLIT");
709 if (chan
->complete_split
)
710 hcsplt
|= HCSPLT_COMPSPLT
;
711 hcsplt
|= chan
->xact_pos
<< HCSPLT_XACTPOS_SHIFT
&
713 hcsplt
|= chan
->hub_addr
<< HCSPLT_HUBADDR_SHIFT
&
715 hcsplt
|= chan
->hub_port
<< HCSPLT_PRTADDR_SHIFT
&
718 dev_vdbg(hsotg
->dev
, " comp split %d\n",
719 chan
->complete_split
);
720 dev_vdbg(hsotg
->dev
, " xact pos %d\n",
722 dev_vdbg(hsotg
->dev
, " hub addr %d\n",
724 dev_vdbg(hsotg
->dev
, " hub port %d\n",
726 dev_vdbg(hsotg
->dev
, " is_in %d\n",
728 dev_vdbg(hsotg
->dev
, " Max Pkt %d\n",
730 dev_vdbg(hsotg
->dev
, " xferlen %d\n",
735 dwc2_writel(hsotg
, hcsplt
, HCSPLT(hc_num
));
739 * dwc2_hc_halt() - Attempts to halt a host channel
741 * @hsotg: Controller register interface
742 * @chan: Host channel to halt
743 * @halt_status: Reason for halting the channel
745 * This function should only be called in Slave mode or to abort a transfer in
746 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
747 * controller halts the channel when the transfer is complete or a condition
748 * occurs that requires application intervention.
750 * In slave mode, checks for a free request queue entry, then sets the Channel
751 * Enable and Channel Disable bits of the Host Channel Characteristics
752 * register of the specified channel to intiate the halt. If there is no free
753 * request queue entry, sets only the Channel Disable bit of the HCCHARn
754 * register to flush requests for this channel. In the latter case, sets a
755 * flag to indicate that the host channel needs to be halted when a request
756 * queue slot is open.
758 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
759 * HCCHARn register. The controller ensures there is space in the request
760 * queue before submitting the halt request.
762 * Some time may elapse before the core flushes any posted requests for this
763 * host channel and halts. The Channel Halted interrupt handler completes the
764 * deactivation of the host channel.
766 void dwc2_hc_halt(struct dwc2_hsotg
*hsotg
, struct dwc2_host_chan
*chan
,
767 enum dwc2_halt_status halt_status
)
769 u32 nptxsts
, hptxsts
, hcchar
;
772 dev_vdbg(hsotg
->dev
, "%s()\n", __func__
);
775 * In buffer DMA or external DMA mode channel can't be halted
776 * for non-split periodic channels. At the end of the next
777 * uframe/frame (in the worst case), the core generates a channel
778 * halted and disables the channel automatically.
780 if ((hsotg
->params
.g_dma
&& !hsotg
->params
.g_dma_desc
) ||
781 hsotg
->hw_params
.arch
== GHWCFG2_EXT_DMA_ARCH
) {
782 if (!chan
->do_split
&&
783 (chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
||
784 chan
->ep_type
== USB_ENDPOINT_XFER_INT
)) {
785 dev_err(hsotg
->dev
, "%s() Channel can't be halted\n",
791 if (halt_status
== DWC2_HC_XFER_NO_HALT_STATUS
)
792 dev_err(hsotg
->dev
, "!!! halt_status = %d !!!\n", halt_status
);
794 if (halt_status
== DWC2_HC_XFER_URB_DEQUEUE
||
795 halt_status
== DWC2_HC_XFER_AHB_ERR
) {
797 * Disable all channel interrupts except Ch Halted. The QTD
798 * and QH state associated with this transfer has been cleared
799 * (in the case of URB_DEQUEUE), so the channel needs to be
800 * shut down carefully to prevent crashes.
802 u32 hcintmsk
= HCINTMSK_CHHLTD
;
804 dev_vdbg(hsotg
->dev
, "dequeue/error\n");
805 dwc2_writel(hsotg
, hcintmsk
, HCINTMSK(chan
->hc_num
));
808 * Make sure no other interrupts besides halt are currently
809 * pending. Handling another interrupt could cause a crash due
810 * to the QTD and QH state.
812 dwc2_writel(hsotg
, ~hcintmsk
, HCINT(chan
->hc_num
));
815 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
816 * even if the channel was already halted for some other
819 chan
->halt_status
= halt_status
;
821 hcchar
= dwc2_readl(hsotg
, HCCHAR(chan
->hc_num
));
822 if (!(hcchar
& HCCHAR_CHENA
)) {
824 * The channel is either already halted or it hasn't
825 * started yet. In DMA mode, the transfer may halt if
826 * it finishes normally or a condition occurs that
827 * requires driver intervention. Don't want to halt
828 * the channel again. In either Slave or DMA mode,
829 * it's possible that the transfer has been assigned
830 * to a channel, but not started yet when an URB is
831 * dequeued. Don't want to halt a channel that hasn't
837 if (chan
->halt_pending
) {
839 * A halt has already been issued for this channel. This might
840 * happen when a transfer is aborted by a higher level in
844 "*** %s: Channel %d, chan->halt_pending already set ***\n",
845 __func__
, chan
->hc_num
);
849 hcchar
= dwc2_readl(hsotg
, HCCHAR(chan
->hc_num
));
851 /* No need to set the bit in DDMA for disabling the channel */
852 /* TODO check it everywhere channel is disabled */
853 if (!hsotg
->params
.dma_desc_enable
) {
855 dev_vdbg(hsotg
->dev
, "desc DMA disabled\n");
856 hcchar
|= HCCHAR_CHENA
;
859 dev_dbg(hsotg
->dev
, "desc DMA enabled\n");
861 hcchar
|= HCCHAR_CHDIS
;
863 if (!hsotg
->params
.host_dma
) {
865 dev_vdbg(hsotg
->dev
, "DMA not enabled\n");
866 hcchar
|= HCCHAR_CHENA
;
868 /* Check for space in the request queue to issue the halt */
869 if (chan
->ep_type
== USB_ENDPOINT_XFER_CONTROL
||
870 chan
->ep_type
== USB_ENDPOINT_XFER_BULK
) {
871 dev_vdbg(hsotg
->dev
, "control/bulk\n");
872 nptxsts
= dwc2_readl(hsotg
, GNPTXSTS
);
873 if ((nptxsts
& TXSTS_QSPCAVAIL_MASK
) == 0) {
874 dev_vdbg(hsotg
->dev
, "Disabling channel\n");
875 hcchar
&= ~HCCHAR_CHENA
;
879 dev_vdbg(hsotg
->dev
, "isoc/intr\n");
880 hptxsts
= dwc2_readl(hsotg
, HPTXSTS
);
881 if ((hptxsts
& TXSTS_QSPCAVAIL_MASK
) == 0 ||
882 hsotg
->queuing_high_bandwidth
) {
884 dev_vdbg(hsotg
->dev
, "Disabling channel\n");
885 hcchar
&= ~HCCHAR_CHENA
;
890 dev_vdbg(hsotg
->dev
, "DMA enabled\n");
893 dwc2_writel(hsotg
, hcchar
, HCCHAR(chan
->hc_num
));
894 chan
->halt_status
= halt_status
;
896 if (hcchar
& HCCHAR_CHENA
) {
898 dev_vdbg(hsotg
->dev
, "Channel enabled\n");
899 chan
->halt_pending
= 1;
900 chan
->halt_on_queue
= 0;
903 dev_vdbg(hsotg
->dev
, "Channel disabled\n");
904 chan
->halt_on_queue
= 1;
908 dev_vdbg(hsotg
->dev
, "%s: Channel %d\n", __func__
,
910 dev_vdbg(hsotg
->dev
, " hcchar: 0x%08x\n",
912 dev_vdbg(hsotg
->dev
, " halt_pending: %d\n",
914 dev_vdbg(hsotg
->dev
, " halt_on_queue: %d\n",
915 chan
->halt_on_queue
);
916 dev_vdbg(hsotg
->dev
, " halt_status: %d\n",
922 * dwc2_hc_cleanup() - Clears the transfer state for a host channel
924 * @hsotg: Programming view of DWC_otg controller
925 * @chan: Identifies the host channel to clean up
927 * This function is normally called after a transfer is done and the host
928 * channel is being released
930 void dwc2_hc_cleanup(struct dwc2_hsotg
*hsotg
, struct dwc2_host_chan
*chan
)
934 chan
->xfer_started
= 0;
936 list_del_init(&chan
->split_order_list_entry
);
939 * Clear channel interrupt enables and any unhandled channel interrupt
942 dwc2_writel(hsotg
, 0, HCINTMSK(chan
->hc_num
));
943 hcintmsk
= 0xffffffff;
944 hcintmsk
&= ~HCINTMSK_RESERVED14_31
;
945 dwc2_writel(hsotg
, hcintmsk
, HCINT(chan
->hc_num
));
949 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
950 * which frame a periodic transfer should occur
952 * @hsotg: Programming view of DWC_otg controller
953 * @chan: Identifies the host channel to set up and its properties
954 * @hcchar: Current value of the HCCHAR register for the specified host channel
956 * This function has no effect on non-periodic transfers
958 static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg
*hsotg
,
959 struct dwc2_host_chan
*chan
, u32
*hcchar
)
961 if (chan
->ep_type
== USB_ENDPOINT_XFER_INT
||
962 chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
) {
972 * Try to figure out if we're an even or odd frame. If we set
973 * even and the current frame number is even the transfer
974 * will happen immediately. Similar if both are odd. If one is
975 * even and the other is odd then the transfer will happen when
976 * the frame number ticks.
978 * There's a bit of a balancing act to get this right.
979 * Sometimes we may want to send data in the current frame (AK
980 * right away). We might want to do this if the frame number
981 * _just_ ticked, but we might also want to do this in order
982 * to continue a split transaction that happened late in a
983 * microframe (so we didn't know to queue the next transfer
984 * until the frame number had ticked). The problem is that we
985 * need a lot of knowledge to know if there's actually still
986 * time to send things or if it would be better to wait until
989 * We can look at how much time is left in the current frame
990 * and make a guess about whether we'll have time to transfer.
994 /* Get speed host is running at */
995 host_speed
= (chan
->speed
!= USB_SPEED_HIGH
&&
996 !chan
->do_split
) ? chan
->speed
: USB_SPEED_HIGH
;
998 /* See how many bytes are in the periodic FIFO right now */
999 fifo_space
= (dwc2_readl(hsotg
, HPTXSTS
) &
1000 TXSTS_FSPCAVAIL_MASK
) >> TXSTS_FSPCAVAIL_SHIFT
;
1001 bytes_in_fifo
= sizeof(u32
) *
1002 (hsotg
->params
.host_perio_tx_fifo_size
-
1006 * Roughly estimate bus time for everything in the periodic
1007 * queue + our new transfer. This is "rough" because we're
1008 * using a function that makes takes into account IN/OUT
1009 * and INT/ISO and we're just slamming in one value for all
1010 * transfers. This should be an over-estimate and that should
1011 * be OK, but we can probably tighten it.
1013 xfer_ns
= usb_calc_bus_time(host_speed
, false, false,
1014 chan
->xfer_len
+ bytes_in_fifo
);
1015 xfer_us
= NS_TO_US(xfer_ns
);
1017 /* See what frame number we'll be at by the time we finish */
1018 frame_number
= dwc2_hcd_get_future_frame_number(hsotg
, xfer_us
);
1020 /* This is when we were scheduled to be on the wire */
1021 wire_frame
= dwc2_frame_num_inc(chan
->qh
->next_active_frame
, 1);
1024 * If we'd finish _after_ the frame we're scheduled in then
1025 * it's hopeless. Just schedule right away and hope for the
1026 * best. Note that it _might_ be wise to call back into the
1027 * scheduler to pick a better frame, but this is better than
1030 if (dwc2_frame_num_gt(frame_number
, wire_frame
)) {
1031 dwc2_sch_vdbg(hsotg
,
1032 "QH=%p EO MISS fr=%04x=>%04x (%+d)\n",
1033 chan
->qh
, wire_frame
, frame_number
,
1034 dwc2_frame_num_dec(frame_number
,
1036 wire_frame
= frame_number
;
1039 * We picked a different frame number; communicate this
1040 * back to the scheduler so it doesn't try to schedule
1041 * another in the same frame.
1043 * Remember that next_active_frame is 1 before the wire
1046 chan
->qh
->next_active_frame
=
1047 dwc2_frame_num_dec(frame_number
, 1);
1051 *hcchar
|= HCCHAR_ODDFRM
;
1053 *hcchar
&= ~HCCHAR_ODDFRM
;
1057 static void dwc2_set_pid_isoc(struct dwc2_host_chan
*chan
)
1059 /* Set up the initial PID for the transfer */
1060 if (chan
->speed
== USB_SPEED_HIGH
) {
1061 if (chan
->ep_is_in
) {
1062 if (chan
->multi_count
== 1)
1063 chan
->data_pid_start
= DWC2_HC_PID_DATA0
;
1064 else if (chan
->multi_count
== 2)
1065 chan
->data_pid_start
= DWC2_HC_PID_DATA1
;
1067 chan
->data_pid_start
= DWC2_HC_PID_DATA2
;
1069 if (chan
->multi_count
== 1)
1070 chan
->data_pid_start
= DWC2_HC_PID_DATA0
;
1072 chan
->data_pid_start
= DWC2_HC_PID_MDATA
;
1075 chan
->data_pid_start
= DWC2_HC_PID_DATA0
;
1080 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1083 * @hsotg: Programming view of DWC_otg controller
1084 * @chan: Information needed to initialize the host channel
1086 * This function should only be called in Slave mode. For a channel associated
1087 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1088 * associated with a periodic EP, the periodic Tx FIFO is written.
1090 * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1091 * the number of bytes written to the Tx FIFO.
1093 static void dwc2_hc_write_packet(struct dwc2_hsotg
*hsotg
,
1094 struct dwc2_host_chan
*chan
)
1097 u32 remaining_count
;
1100 u32
*data_buf
= (u32
*)chan
->xfer_buf
;
1103 dev_vdbg(hsotg
->dev
, "%s()\n", __func__
);
1105 remaining_count
= chan
->xfer_len
- chan
->xfer_count
;
1106 if (remaining_count
> chan
->max_packet
)
1107 byte_count
= chan
->max_packet
;
1109 byte_count
= remaining_count
;
1111 dword_count
= (byte_count
+ 3) / 4;
1113 if (((unsigned long)data_buf
& 0x3) == 0) {
1114 /* xfer_buf is DWORD aligned */
1115 for (i
= 0; i
< dword_count
; i
++, data_buf
++)
1116 dwc2_writel(hsotg
, *data_buf
, HCFIFO(chan
->hc_num
));
1118 /* xfer_buf is not DWORD aligned */
1119 for (i
= 0; i
< dword_count
; i
++, data_buf
++) {
1120 u32 data
= data_buf
[0] | data_buf
[1] << 8 |
1121 data_buf
[2] << 16 | data_buf
[3] << 24;
1122 dwc2_writel(hsotg
, data
, HCFIFO(chan
->hc_num
));
1126 chan
->xfer_count
+= byte_count
;
1127 chan
->xfer_buf
+= byte_count
;
1131 * dwc2_hc_do_ping() - Starts a PING transfer
1133 * @hsotg: Programming view of DWC_otg controller
1134 * @chan: Information needed to initialize the host channel
1136 * This function should only be called in Slave mode. The Do Ping bit is set in
1137 * the HCTSIZ register, then the channel is enabled.
1139 static void dwc2_hc_do_ping(struct dwc2_hsotg
*hsotg
,
1140 struct dwc2_host_chan
*chan
)
1146 dev_vdbg(hsotg
->dev
, "%s: Channel %d\n", __func__
,
1149 hctsiz
= TSIZ_DOPNG
;
1150 hctsiz
|= 1 << TSIZ_PKTCNT_SHIFT
;
1151 dwc2_writel(hsotg
, hctsiz
, HCTSIZ(chan
->hc_num
));
1153 hcchar
= dwc2_readl(hsotg
, HCCHAR(chan
->hc_num
));
1154 hcchar
|= HCCHAR_CHENA
;
1155 hcchar
&= ~HCCHAR_CHDIS
;
1156 dwc2_writel(hsotg
, hcchar
, HCCHAR(chan
->hc_num
));
1160 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1161 * channel and starts the transfer
1163 * @hsotg: Programming view of DWC_otg controller
1164 * @chan: Information needed to initialize the host channel. The xfer_len value
1165 * may be reduced to accommodate the max widths of the XferSize and
1166 * PktCnt fields in the HCTSIZn register. The multi_count value may be
1167 * changed to reflect the final xfer_len value.
1169 * This function may be called in either Slave mode or DMA mode. In Slave mode,
1170 * the caller must ensure that there is sufficient space in the request queue
1173 * For an OUT transfer in Slave mode, it loads a data packet into the
1174 * appropriate FIFO. If necessary, additional data packets are loaded in the
1177 * For an IN transfer in Slave mode, a data packet is requested. The data
1178 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1179 * additional data packets are requested in the Host ISR.
1181 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1182 * register along with a packet count of 1 and the channel is enabled. This
1183 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1184 * simply set to 0 since no data transfer occurs in this case.
1186 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1187 * all the information required to perform the subsequent data transfer. In
1188 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1189 * controller performs the entire PING protocol, then starts the data
1192 static void dwc2_hc_start_transfer(struct dwc2_hsotg
*hsotg
,
1193 struct dwc2_host_chan
*chan
)
1195 u32 max_hc_xfer_size
= hsotg
->params
.max_transfer_size
;
1196 u16 max_hc_pkt_count
= hsotg
->params
.max_packet_count
;
1203 dev_vdbg(hsotg
->dev
, "%s()\n", __func__
);
1205 if (chan
->do_ping
) {
1206 if (!hsotg
->params
.host_dma
) {
1208 dev_vdbg(hsotg
->dev
, "ping, no DMA\n");
1209 dwc2_hc_do_ping(hsotg
, chan
);
1210 chan
->xfer_started
= 1;
1215 dev_vdbg(hsotg
->dev
, "ping, DMA\n");
1217 hctsiz
|= TSIZ_DOPNG
;
1220 if (chan
->do_split
) {
1222 dev_vdbg(hsotg
->dev
, "split\n");
1225 if (chan
->complete_split
&& !chan
->ep_is_in
)
1227 * For CSPLIT OUT Transfer, set the size to 0 so the
1228 * core doesn't expect any data written to the FIFO
1231 else if (chan
->ep_is_in
|| chan
->xfer_len
> chan
->max_packet
)
1232 chan
->xfer_len
= chan
->max_packet
;
1233 else if (!chan
->ep_is_in
&& chan
->xfer_len
> 188)
1234 chan
->xfer_len
= 188;
1236 hctsiz
|= chan
->xfer_len
<< TSIZ_XFERSIZE_SHIFT
&
1239 /* For split set ec_mc for immediate retries */
1240 if (chan
->ep_type
== USB_ENDPOINT_XFER_INT
||
1241 chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
)
1247 dev_vdbg(hsotg
->dev
, "no split\n");
1249 * Ensure that the transfer length and packet count will fit
1250 * in the widths allocated for them in the HCTSIZn register
1252 if (chan
->ep_type
== USB_ENDPOINT_XFER_INT
||
1253 chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
) {
1255 * Make sure the transfer size is no larger than one
1256 * (micro)frame's worth of data. (A check was done
1257 * when the periodic transfer was accepted to ensure
1258 * that a (micro)frame's worth of data can be
1259 * programmed into a channel.)
1261 u32 max_periodic_len
=
1262 chan
->multi_count
* chan
->max_packet
;
1264 if (chan
->xfer_len
> max_periodic_len
)
1265 chan
->xfer_len
= max_periodic_len
;
1266 } else if (chan
->xfer_len
> max_hc_xfer_size
) {
1268 * Make sure that xfer_len is a multiple of max packet
1272 max_hc_xfer_size
- chan
->max_packet
+ 1;
1275 if (chan
->xfer_len
> 0) {
1276 num_packets
= (chan
->xfer_len
+ chan
->max_packet
- 1) /
1278 if (num_packets
> max_hc_pkt_count
) {
1279 num_packets
= max_hc_pkt_count
;
1280 chan
->xfer_len
= num_packets
* chan
->max_packet
;
1281 } else if (chan
->ep_is_in
) {
1283 * Always program an integral # of max packets
1285 * Note: This assumes that the input buffer is
1286 * aligned and sized accordingly.
1288 chan
->xfer_len
= num_packets
* chan
->max_packet
;
1291 /* Need 1 packet for transfer length of 0 */
1295 if (chan
->ep_type
== USB_ENDPOINT_XFER_INT
||
1296 chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
)
1298 * Make sure that the multi_count field matches the
1299 * actual transfer length
1301 chan
->multi_count
= num_packets
;
1303 if (chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
)
1304 dwc2_set_pid_isoc(chan
);
1306 hctsiz
|= chan
->xfer_len
<< TSIZ_XFERSIZE_SHIFT
&
1309 /* The ec_mc gets the multi_count for non-split */
1310 ec_mc
= chan
->multi_count
;
1313 chan
->start_pkt_count
= num_packets
;
1314 hctsiz
|= num_packets
<< TSIZ_PKTCNT_SHIFT
& TSIZ_PKTCNT_MASK
;
1315 hctsiz
|= chan
->data_pid_start
<< TSIZ_SC_MC_PID_SHIFT
&
1316 TSIZ_SC_MC_PID_MASK
;
1317 dwc2_writel(hsotg
, hctsiz
, HCTSIZ(chan
->hc_num
));
1319 dev_vdbg(hsotg
->dev
, "Wrote %08x to HCTSIZ(%d)\n",
1320 hctsiz
, chan
->hc_num
);
1322 dev_vdbg(hsotg
->dev
, "%s: Channel %d\n", __func__
,
1324 dev_vdbg(hsotg
->dev
, " Xfer Size: %d\n",
1325 (hctsiz
& TSIZ_XFERSIZE_MASK
) >>
1326 TSIZ_XFERSIZE_SHIFT
);
1327 dev_vdbg(hsotg
->dev
, " Num Pkts: %d\n",
1328 (hctsiz
& TSIZ_PKTCNT_MASK
) >>
1330 dev_vdbg(hsotg
->dev
, " Start PID: %d\n",
1331 (hctsiz
& TSIZ_SC_MC_PID_MASK
) >>
1332 TSIZ_SC_MC_PID_SHIFT
);
1335 if (hsotg
->params
.host_dma
) {
1336 dma_addr_t dma_addr
;
1338 if (chan
->align_buf
) {
1340 dev_vdbg(hsotg
->dev
, "align_buf\n");
1341 dma_addr
= chan
->align_buf
;
1343 dma_addr
= chan
->xfer_dma
;
1345 dwc2_writel(hsotg
, (u32
)dma_addr
, HCDMA(chan
->hc_num
));
1348 dev_vdbg(hsotg
->dev
, "Wrote %08lx to HCDMA(%d)\n",
1349 (unsigned long)dma_addr
, chan
->hc_num
);
1352 /* Start the split */
1353 if (chan
->do_split
) {
1354 u32 hcsplt
= dwc2_readl(hsotg
, HCSPLT(chan
->hc_num
));
1356 hcsplt
|= HCSPLT_SPLTENA
;
1357 dwc2_writel(hsotg
, hcsplt
, HCSPLT(chan
->hc_num
));
1360 hcchar
= dwc2_readl(hsotg
, HCCHAR(chan
->hc_num
));
1361 hcchar
&= ~HCCHAR_MULTICNT_MASK
;
1362 hcchar
|= (ec_mc
<< HCCHAR_MULTICNT_SHIFT
) & HCCHAR_MULTICNT_MASK
;
1363 dwc2_hc_set_even_odd_frame(hsotg
, chan
, &hcchar
);
1365 if (hcchar
& HCCHAR_CHDIS
)
1366 dev_warn(hsotg
->dev
,
1367 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1368 __func__
, chan
->hc_num
, hcchar
);
1370 /* Set host channel enable after all other setup is complete */
1371 hcchar
|= HCCHAR_CHENA
;
1372 hcchar
&= ~HCCHAR_CHDIS
;
1375 dev_vdbg(hsotg
->dev
, " Multi Cnt: %d\n",
1376 (hcchar
& HCCHAR_MULTICNT_MASK
) >>
1377 HCCHAR_MULTICNT_SHIFT
);
1379 dwc2_writel(hsotg
, hcchar
, HCCHAR(chan
->hc_num
));
1381 dev_vdbg(hsotg
->dev
, "Wrote %08x to HCCHAR(%d)\n", hcchar
,
1384 chan
->xfer_started
= 1;
1387 if (!hsotg
->params
.host_dma
&&
1388 !chan
->ep_is_in
&& chan
->xfer_len
> 0)
1389 /* Load OUT packet into the appropriate Tx FIFO */
1390 dwc2_hc_write_packet(hsotg
, chan
);
1394 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1395 * host channel and starts the transfer in Descriptor DMA mode
1397 * @hsotg: Programming view of DWC_otg controller
1398 * @chan: Information needed to initialize the host channel
1400 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1401 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1402 * with micro-frame bitmap.
1404 * Initializes HCDMA register with descriptor list address and CTD value then
1405 * starts the transfer via enabling the channel.
1407 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg
*hsotg
,
1408 struct dwc2_host_chan
*chan
)
1414 hctsiz
|= TSIZ_DOPNG
;
1416 if (chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
)
1417 dwc2_set_pid_isoc(chan
);
1419 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1420 hctsiz
|= chan
->data_pid_start
<< TSIZ_SC_MC_PID_SHIFT
&
1421 TSIZ_SC_MC_PID_MASK
;
1423 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1424 hctsiz
|= (chan
->ntd
- 1) << TSIZ_NTD_SHIFT
& TSIZ_NTD_MASK
;
1426 /* Non-zero only for high-speed interrupt endpoints */
1427 hctsiz
|= chan
->schinfo
<< TSIZ_SCHINFO_SHIFT
& TSIZ_SCHINFO_MASK
;
1430 dev_vdbg(hsotg
->dev
, "%s: Channel %d\n", __func__
,
1432 dev_vdbg(hsotg
->dev
, " Start PID: %d\n",
1433 chan
->data_pid_start
);
1434 dev_vdbg(hsotg
->dev
, " NTD: %d\n", chan
->ntd
- 1);
1437 dwc2_writel(hsotg
, hctsiz
, HCTSIZ(chan
->hc_num
));
1439 dma_sync_single_for_device(hsotg
->dev
, chan
->desc_list_addr
,
1440 chan
->desc_list_sz
, DMA_TO_DEVICE
);
1442 dwc2_writel(hsotg
, chan
->desc_list_addr
, HCDMA(chan
->hc_num
));
1445 dev_vdbg(hsotg
->dev
, "Wrote %pad to HCDMA(%d)\n",
1446 &chan
->desc_list_addr
, chan
->hc_num
);
1448 hcchar
= dwc2_readl(hsotg
, HCCHAR(chan
->hc_num
));
1449 hcchar
&= ~HCCHAR_MULTICNT_MASK
;
1450 hcchar
|= chan
->multi_count
<< HCCHAR_MULTICNT_SHIFT
&
1451 HCCHAR_MULTICNT_MASK
;
1453 if (hcchar
& HCCHAR_CHDIS
)
1454 dev_warn(hsotg
->dev
,
1455 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1456 __func__
, chan
->hc_num
, hcchar
);
1458 /* Set host channel enable after all other setup is complete */
1459 hcchar
|= HCCHAR_CHENA
;
1460 hcchar
&= ~HCCHAR_CHDIS
;
1463 dev_vdbg(hsotg
->dev
, " Multi Cnt: %d\n",
1464 (hcchar
& HCCHAR_MULTICNT_MASK
) >>
1465 HCCHAR_MULTICNT_SHIFT
);
1467 dwc2_writel(hsotg
, hcchar
, HCCHAR(chan
->hc_num
));
1469 dev_vdbg(hsotg
->dev
, "Wrote %08x to HCCHAR(%d)\n", hcchar
,
1472 chan
->xfer_started
= 1;
1477 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1478 * a previous call to dwc2_hc_start_transfer()
1480 * @hsotg: Programming view of DWC_otg controller
1481 * @chan: Information needed to initialize the host channel
1483 * The caller must ensure there is sufficient space in the request queue and Tx
1484 * Data FIFO. This function should only be called in Slave mode. In DMA mode,
1485 * the controller acts autonomously to complete transfers programmed to a host
1488 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1489 * if there is any data remaining to be queued. For an IN transfer, another
1490 * data packet is always requested. For the SETUP phase of a control transfer,
1491 * this function does nothing.
1493 * Return: 1 if a new request is queued, 0 if no more requests are required
1496 static int dwc2_hc_continue_transfer(struct dwc2_hsotg
*hsotg
,
1497 struct dwc2_host_chan
*chan
)
1500 dev_vdbg(hsotg
->dev
, "%s: Channel %d\n", __func__
,
1504 /* SPLITs always queue just once per channel */
1507 if (chan
->data_pid_start
== DWC2_HC_PID_SETUP
)
1508 /* SETUPs are queued only once since they can't be NAK'd */
1511 if (chan
->ep_is_in
) {
1513 * Always queue another request for other IN transfers. If
1514 * back-to-back INs are issued and NAKs are received for both,
1515 * the driver may still be processing the first NAK when the
1516 * second NAK is received. When the interrupt handler clears
1517 * the NAK interrupt for the first NAK, the second NAK will
1518 * not be seen. So we can't depend on the NAK interrupt
1519 * handler to requeue a NAK'd request. Instead, IN requests
1520 * are issued each time this function is called. When the
1521 * transfer completes, the extra requests for the channel will
1524 u32 hcchar
= dwc2_readl(hsotg
, HCCHAR(chan
->hc_num
));
1526 dwc2_hc_set_even_odd_frame(hsotg
, chan
, &hcchar
);
1527 hcchar
|= HCCHAR_CHENA
;
1528 hcchar
&= ~HCCHAR_CHDIS
;
1530 dev_vdbg(hsotg
->dev
, " IN xfer: hcchar = 0x%08x\n",
1532 dwc2_writel(hsotg
, hcchar
, HCCHAR(chan
->hc_num
));
1539 if (chan
->xfer_count
< chan
->xfer_len
) {
1540 if (chan
->ep_type
== USB_ENDPOINT_XFER_INT
||
1541 chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
) {
1542 u32 hcchar
= dwc2_readl(hsotg
,
1543 HCCHAR(chan
->hc_num
));
1545 dwc2_hc_set_even_odd_frame(hsotg
, chan
,
1549 /* Load OUT packet into the appropriate Tx FIFO */
1550 dwc2_hc_write_packet(hsotg
, chan
);
1559 * =========================================================================
1561 * =========================================================================
1565 * Processes all the URBs in a single list of QHs. Completes them with
1566 * -ETIMEDOUT and frees the QTD.
1568 * Must be called with interrupt disabled and spinlock held
1570 static void dwc2_kill_urbs_in_qh_list(struct dwc2_hsotg
*hsotg
,
1571 struct list_head
*qh_list
)
1573 struct dwc2_qh
*qh
, *qh_tmp
;
1574 struct dwc2_qtd
*qtd
, *qtd_tmp
;
1576 list_for_each_entry_safe(qh
, qh_tmp
, qh_list
, qh_list_entry
) {
1577 list_for_each_entry_safe(qtd
, qtd_tmp
, &qh
->qtd_list
,
1579 dwc2_host_complete(hsotg
, qtd
, -ECONNRESET
);
1580 dwc2_hcd_qtd_unlink_and_free(hsotg
, qtd
, qh
);
1585 static void dwc2_qh_list_free(struct dwc2_hsotg
*hsotg
,
1586 struct list_head
*qh_list
)
1588 struct dwc2_qtd
*qtd
, *qtd_tmp
;
1589 struct dwc2_qh
*qh
, *qh_tmp
;
1590 unsigned long flags
;
1593 /* The list hasn't been initialized yet */
1596 spin_lock_irqsave(&hsotg
->lock
, flags
);
1598 /* Ensure there are no QTDs or URBs left */
1599 dwc2_kill_urbs_in_qh_list(hsotg
, qh_list
);
1601 list_for_each_entry_safe(qh
, qh_tmp
, qh_list
, qh_list_entry
) {
1602 dwc2_hcd_qh_unlink(hsotg
, qh
);
1604 /* Free each QTD in the QH's QTD list */
1605 list_for_each_entry_safe(qtd
, qtd_tmp
, &qh
->qtd_list
,
1607 dwc2_hcd_qtd_unlink_and_free(hsotg
, qtd
, qh
);
1609 if (qh
->channel
&& qh
->channel
->qh
== qh
)
1610 qh
->channel
->qh
= NULL
;
1612 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
1613 dwc2_hcd_qh_free(hsotg
, qh
);
1614 spin_lock_irqsave(&hsotg
->lock
, flags
);
1617 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
1621 * Responds with an error status of -ETIMEDOUT to all URBs in the non-periodic
1622 * and periodic schedules. The QTD associated with each URB is removed from
1623 * the schedule and freed. This function may be called when a disconnect is
1624 * detected or when the HCD is being stopped.
1626 * Must be called with interrupt disabled and spinlock held
1628 static void dwc2_kill_all_urbs(struct dwc2_hsotg
*hsotg
)
1630 dwc2_kill_urbs_in_qh_list(hsotg
, &hsotg
->non_periodic_sched_inactive
);
1631 dwc2_kill_urbs_in_qh_list(hsotg
, &hsotg
->non_periodic_sched_waiting
);
1632 dwc2_kill_urbs_in_qh_list(hsotg
, &hsotg
->non_periodic_sched_active
);
1633 dwc2_kill_urbs_in_qh_list(hsotg
, &hsotg
->periodic_sched_inactive
);
1634 dwc2_kill_urbs_in_qh_list(hsotg
, &hsotg
->periodic_sched_ready
);
1635 dwc2_kill_urbs_in_qh_list(hsotg
, &hsotg
->periodic_sched_assigned
);
1636 dwc2_kill_urbs_in_qh_list(hsotg
, &hsotg
->periodic_sched_queued
);
1640 * dwc2_hcd_start() - Starts the HCD when switching to Host mode
1642 * @hsotg: Pointer to struct dwc2_hsotg
1644 void dwc2_hcd_start(struct dwc2_hsotg
*hsotg
)
1648 if (hsotg
->op_state
== OTG_STATE_B_HOST
) {
1650 * Reset the port. During a HNP mode switch the reset
1651 * needs to occur within 1ms and have a duration of at
1654 hprt0
= dwc2_read_hprt0(hsotg
);
1656 dwc2_writel(hsotg
, hprt0
, HPRT0
);
1659 queue_delayed_work(hsotg
->wq_otg
, &hsotg
->start_work
,
1660 msecs_to_jiffies(50));
1663 /* Must be called with interrupt disabled and spinlock held */
1664 static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg
*hsotg
)
1666 int num_channels
= hsotg
->params
.host_channels
;
1667 struct dwc2_host_chan
*channel
;
1671 if (!hsotg
->params
.host_dma
) {
1672 /* Flush out any channel requests in slave mode */
1673 for (i
= 0; i
< num_channels
; i
++) {
1674 channel
= hsotg
->hc_ptr_array
[i
];
1675 if (!list_empty(&channel
->hc_list_entry
))
1677 hcchar
= dwc2_readl(hsotg
, HCCHAR(i
));
1678 if (hcchar
& HCCHAR_CHENA
) {
1679 hcchar
&= ~(HCCHAR_CHENA
| HCCHAR_EPDIR
);
1680 hcchar
|= HCCHAR_CHDIS
;
1681 dwc2_writel(hsotg
, hcchar
, HCCHAR(i
));
1686 for (i
= 0; i
< num_channels
; i
++) {
1687 channel
= hsotg
->hc_ptr_array
[i
];
1688 if (!list_empty(&channel
->hc_list_entry
))
1690 hcchar
= dwc2_readl(hsotg
, HCCHAR(i
));
1691 if (hcchar
& HCCHAR_CHENA
) {
1692 /* Halt the channel */
1693 hcchar
|= HCCHAR_CHDIS
;
1694 dwc2_writel(hsotg
, hcchar
, HCCHAR(i
));
1697 dwc2_hc_cleanup(hsotg
, channel
);
1698 list_add_tail(&channel
->hc_list_entry
, &hsotg
->free_hc_list
);
1700 * Added for Descriptor DMA to prevent channel double cleanup in
1701 * release_channel_ddma(), which is called from ep_disable when
1702 * device disconnects
1706 /* All channels have been freed, mark them available */
1707 if (hsotg
->params
.uframe_sched
) {
1708 hsotg
->available_host_channels
=
1709 hsotg
->params
.host_channels
;
1711 hsotg
->non_periodic_channels
= 0;
1712 hsotg
->periodic_channels
= 0;
1717 * dwc2_hcd_connect() - Handles connect of the HCD
1719 * @hsotg: Pointer to struct dwc2_hsotg
1721 * Must be called with interrupt disabled and spinlock held
1723 void dwc2_hcd_connect(struct dwc2_hsotg
*hsotg
)
1725 if (hsotg
->lx_state
!= DWC2_L0
)
1726 usb_hcd_resume_root_hub(hsotg
->priv
);
1728 hsotg
->flags
.b
.port_connect_status_change
= 1;
1729 hsotg
->flags
.b
.port_connect_status
= 1;
1733 * dwc2_hcd_disconnect() - Handles disconnect of the HCD
1735 * @hsotg: Pointer to struct dwc2_hsotg
1736 * @force: If true, we won't try to reconnect even if we see device connected.
1738 * Must be called with interrupt disabled and spinlock held
1740 void dwc2_hcd_disconnect(struct dwc2_hsotg
*hsotg
, bool force
)
1745 /* Set status flags for the hub driver */
1746 hsotg
->flags
.b
.port_connect_status_change
= 1;
1747 hsotg
->flags
.b
.port_connect_status
= 0;
1750 * Shutdown any transfers in process by clearing the Tx FIFO Empty
1751 * interrupt mask and status bits and disabling subsequent host
1752 * channel interrupts.
1754 intr
= dwc2_readl(hsotg
, GINTMSK
);
1755 intr
&= ~(GINTSTS_NPTXFEMP
| GINTSTS_PTXFEMP
| GINTSTS_HCHINT
);
1756 dwc2_writel(hsotg
, intr
, GINTMSK
);
1757 intr
= GINTSTS_NPTXFEMP
| GINTSTS_PTXFEMP
| GINTSTS_HCHINT
;
1758 dwc2_writel(hsotg
, intr
, GINTSTS
);
1761 * Turn off the vbus power only if the core has transitioned to device
1762 * mode. If still in host mode, need to keep power on to detect a
1765 if (dwc2_is_device_mode(hsotg
)) {
1766 if (hsotg
->op_state
!= OTG_STATE_A_SUSPEND
) {
1767 dev_dbg(hsotg
->dev
, "Disconnect: PortPower off\n");
1768 dwc2_writel(hsotg
, 0, HPRT0
);
1771 dwc2_disable_host_interrupts(hsotg
);
1774 /* Respond with an error status to all URBs in the schedule */
1775 dwc2_kill_all_urbs(hsotg
);
1777 if (dwc2_is_host_mode(hsotg
))
1778 /* Clean up any host channels that were in use */
1779 dwc2_hcd_cleanup_channels(hsotg
);
1781 dwc2_host_disconnect(hsotg
);
1784 * Add an extra check here to see if we're actually connected but
1785 * we don't have a detection interrupt pending. This can happen if:
1786 * 1. hardware sees connect
1787 * 2. hardware sees disconnect
1788 * 3. hardware sees connect
1789 * 4. dwc2_port_intr() - clears connect interrupt
1790 * 5. dwc2_handle_common_intr() - calls here
1792 * Without the extra check here we will end calling disconnect
1793 * and won't get any future interrupts to handle the connect.
1796 hprt0
= dwc2_readl(hsotg
, HPRT0
);
1797 if (!(hprt0
& HPRT0_CONNDET
) && (hprt0
& HPRT0_CONNSTS
))
1798 dwc2_hcd_connect(hsotg
);
1803 * dwc2_hcd_rem_wakeup() - Handles Remote Wakeup
1805 * @hsotg: Pointer to struct dwc2_hsotg
1807 static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg
*hsotg
)
1809 if (hsotg
->bus_suspended
) {
1810 hsotg
->flags
.b
.port_suspend_change
= 1;
1811 usb_hcd_resume_root_hub(hsotg
->priv
);
1814 if (hsotg
->lx_state
== DWC2_L1
)
1815 hsotg
->flags
.b
.port_l1_change
= 1;
1819 * dwc2_hcd_stop() - Halts the DWC_otg host mode operations in a clean manner
1821 * @hsotg: Pointer to struct dwc2_hsotg
1823 * Must be called with interrupt disabled and spinlock held
1825 void dwc2_hcd_stop(struct dwc2_hsotg
*hsotg
)
1827 dev_dbg(hsotg
->dev
, "DWC OTG HCD STOP\n");
1830 * The root hub should be disconnected before this function is called.
1831 * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue)
1832 * and the QH lists (via ..._hcd_endpoint_disable).
1835 /* Turn off all host-specific interrupts */
1836 dwc2_disable_host_interrupts(hsotg
);
1838 /* Turn off the vbus power */
1839 dev_dbg(hsotg
->dev
, "PortPower off\n");
1840 dwc2_writel(hsotg
, 0, HPRT0
);
1843 /* Caller must hold driver lock */
1844 static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg
*hsotg
,
1845 struct dwc2_hcd_urb
*urb
, struct dwc2_qh
*qh
,
1846 struct dwc2_qtd
*qtd
)
1852 if (!hsotg
->flags
.b
.port_connect_status
) {
1853 /* No longer connected */
1854 dev_err(hsotg
->dev
, "Not connected\n");
1858 dev_speed
= dwc2_host_get_speed(hsotg
, urb
->priv
);
1860 /* Some configurations cannot support LS traffic on a FS root port */
1861 if ((dev_speed
== USB_SPEED_LOW
) &&
1862 (hsotg
->hw_params
.fs_phy_type
== GHWCFG2_FS_PHY_TYPE_DEDICATED
) &&
1863 (hsotg
->hw_params
.hs_phy_type
== GHWCFG2_HS_PHY_TYPE_UTMI
)) {
1864 u32 hprt0
= dwc2_readl(hsotg
, HPRT0
);
1865 u32 prtspd
= (hprt0
& HPRT0_SPD_MASK
) >> HPRT0_SPD_SHIFT
;
1867 if (prtspd
== HPRT0_SPD_FULL_SPEED
)
1874 dwc2_hcd_qtd_init(qtd
, urb
);
1875 retval
= dwc2_hcd_qtd_add(hsotg
, qtd
, qh
);
1878 "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n",
1883 intr_mask
= dwc2_readl(hsotg
, GINTMSK
);
1884 if (!(intr_mask
& GINTSTS_SOF
)) {
1885 enum dwc2_transaction_type tr_type
;
1887 if (qtd
->qh
->ep_type
== USB_ENDPOINT_XFER_BULK
&&
1888 !(qtd
->urb
->flags
& URB_GIVEBACK_ASAP
))
1890 * Do not schedule SG transactions until qtd has
1891 * URB_GIVEBACK_ASAP set
1895 tr_type
= dwc2_hcd_select_transactions(hsotg
);
1896 if (tr_type
!= DWC2_TRANSACTION_NONE
)
1897 dwc2_hcd_queue_transactions(hsotg
, tr_type
);
1903 /* Must be called with interrupt disabled and spinlock held */
1904 static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg
*hsotg
,
1905 struct dwc2_hcd_urb
*urb
)
1908 struct dwc2_qtd
*urb_qtd
;
1912 dev_dbg(hsotg
->dev
, "## Urb QTD is NULL ##\n");
1918 dev_dbg(hsotg
->dev
, "## Urb QTD QH is NULL ##\n");
1924 if (urb_qtd
->in_process
&& qh
->channel
) {
1925 dwc2_dump_channel_info(hsotg
, qh
->channel
);
1927 /* The QTD is in process (it has been assigned to a channel) */
1928 if (hsotg
->flags
.b
.port_connect_status
)
1930 * If still connected (i.e. in host mode), halt the
1931 * channel so it can be used for other transfers. If
1932 * no longer connected, the host registers can't be
1933 * written to halt the channel since the core is in
1936 dwc2_hc_halt(hsotg
, qh
->channel
,
1937 DWC2_HC_XFER_URB_DEQUEUE
);
1941 * Free the QTD and clean up the associated QH. Leave the QH in the
1942 * schedule if it has any remaining QTDs.
1944 if (!hsotg
->params
.dma_desc_enable
) {
1945 u8 in_process
= urb_qtd
->in_process
;
1947 dwc2_hcd_qtd_unlink_and_free(hsotg
, urb_qtd
, qh
);
1949 dwc2_hcd_qh_deactivate(hsotg
, qh
, 0);
1951 } else if (list_empty(&qh
->qtd_list
)) {
1952 dwc2_hcd_qh_unlink(hsotg
, qh
);
1955 dwc2_hcd_qtd_unlink_and_free(hsotg
, urb_qtd
, qh
);
1961 /* Must NOT be called with interrupt disabled or spinlock held */
1962 static int dwc2_hcd_endpoint_disable(struct dwc2_hsotg
*hsotg
,
1963 struct usb_host_endpoint
*ep
, int retry
)
1965 struct dwc2_qtd
*qtd
, *qtd_tmp
;
1967 unsigned long flags
;
1970 spin_lock_irqsave(&hsotg
->lock
, flags
);
1978 while (!list_empty(&qh
->qtd_list
) && retry
--) {
1981 "## timeout in dwc2_hcd_endpoint_disable() ##\n");
1986 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
1988 spin_lock_irqsave(&hsotg
->lock
, flags
);
1996 dwc2_hcd_qh_unlink(hsotg
, qh
);
1998 /* Free each QTD in the QH's QTD list */
1999 list_for_each_entry_safe(qtd
, qtd_tmp
, &qh
->qtd_list
, qtd_list_entry
)
2000 dwc2_hcd_qtd_unlink_and_free(hsotg
, qtd
, qh
);
2004 if (qh
->channel
&& qh
->channel
->qh
== qh
)
2005 qh
->channel
->qh
= NULL
;
2007 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
2009 dwc2_hcd_qh_free(hsotg
, qh
);
2015 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
2020 /* Must be called with interrupt disabled and spinlock held */
2021 static int dwc2_hcd_endpoint_reset(struct dwc2_hsotg
*hsotg
,
2022 struct usb_host_endpoint
*ep
)
2024 struct dwc2_qh
*qh
= ep
->hcpriv
;
2029 qh
->data_toggle
= DWC2_HC_PID_DATA0
;
2035 * dwc2_core_init() - Initializes the DWC_otg controller registers and
2036 * prepares the core for device mode or host mode operation
2038 * @hsotg: Programming view of the DWC_otg controller
2039 * @initial_setup: If true then this is the first init for this instance.
2041 int dwc2_core_init(struct dwc2_hsotg
*hsotg
, bool initial_setup
)
2046 dev_dbg(hsotg
->dev
, "%s(%p)\n", __func__
, hsotg
);
2048 usbcfg
= dwc2_readl(hsotg
, GUSBCFG
);
2050 /* Set ULPI External VBUS bit if needed */
2051 usbcfg
&= ~GUSBCFG_ULPI_EXT_VBUS_DRV
;
2052 if (hsotg
->params
.phy_ulpi_ext_vbus
)
2053 usbcfg
|= GUSBCFG_ULPI_EXT_VBUS_DRV
;
2055 /* Set external TS Dline pulsing bit if needed */
2056 usbcfg
&= ~GUSBCFG_TERMSELDLPULSE
;
2057 if (hsotg
->params
.ts_dline
)
2058 usbcfg
|= GUSBCFG_TERMSELDLPULSE
;
2060 dwc2_writel(hsotg
, usbcfg
, GUSBCFG
);
2063 * Reset the Controller
2065 * We only need to reset the controller if this is a re-init.
2066 * For the first init we know for sure that earlier code reset us (it
2067 * needed to in order to properly detect various parameters).
2069 if (!initial_setup
) {
2070 retval
= dwc2_core_reset(hsotg
, false);
2072 dev_err(hsotg
->dev
, "%s(): Reset failed, aborting\n",
2079 * This needs to happen in FS mode before any other programming occurs
2081 retval
= dwc2_phy_init(hsotg
, initial_setup
);
2085 /* Program the GAHBCFG Register */
2086 retval
= dwc2_gahbcfg_init(hsotg
);
2090 /* Program the GUSBCFG register */
2091 dwc2_gusbcfg_init(hsotg
);
2093 /* Program the GOTGCTL register */
2094 otgctl
= dwc2_readl(hsotg
, GOTGCTL
);
2095 otgctl
&= ~GOTGCTL_OTGVER
;
2096 dwc2_writel(hsotg
, otgctl
, GOTGCTL
);
2098 /* Clear the SRP success bit for FS-I2c */
2099 hsotg
->srp_success
= 0;
2101 /* Enable common interrupts */
2102 dwc2_enable_common_interrupts(hsotg
);
2105 * Do device or host initialization based on mode during PCD and
2106 * HCD initialization
2108 if (dwc2_is_host_mode(hsotg
)) {
2109 dev_dbg(hsotg
->dev
, "Host Mode\n");
2110 hsotg
->op_state
= OTG_STATE_A_HOST
;
2112 dev_dbg(hsotg
->dev
, "Device Mode\n");
2113 hsotg
->op_state
= OTG_STATE_B_PERIPHERAL
;
2120 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
2123 * @hsotg: Programming view of DWC_otg controller
2125 * This function flushes the Tx and Rx FIFOs and flushes any entries in the
2126 * request queues. Host channels are reset to ensure that they are ready for
2127 * performing transfers.
2129 static void dwc2_core_host_init(struct dwc2_hsotg
*hsotg
)
2131 u32 hcfg
, hfir
, otgctl
, usbcfg
;
2133 dev_dbg(hsotg
->dev
, "%s(%p)\n", __func__
, hsotg
);
2135 /* Set HS/FS Timeout Calibration to 7 (max available value).
2136 * The number of PHY clocks that the application programs in
2137 * this field is added to the high/full speed interpacket timeout
2138 * duration in the core to account for any additional delays
2139 * introduced by the PHY. This can be required, because the delay
2140 * introduced by the PHY in generating the linestate condition
2141 * can vary from one PHY to another.
2143 usbcfg
= dwc2_readl(hsotg
, GUSBCFG
);
2144 usbcfg
|= GUSBCFG_TOUTCAL(7);
2145 dwc2_writel(hsotg
, usbcfg
, GUSBCFG
);
2147 /* Restart the Phy Clock */
2148 dwc2_writel(hsotg
, 0, PCGCTL
);
2150 /* Initialize Host Configuration Register */
2151 dwc2_init_fs_ls_pclk_sel(hsotg
);
2152 if (hsotg
->params
.speed
== DWC2_SPEED_PARAM_FULL
||
2153 hsotg
->params
.speed
== DWC2_SPEED_PARAM_LOW
) {
2154 hcfg
= dwc2_readl(hsotg
, HCFG
);
2155 hcfg
|= HCFG_FSLSSUPP
;
2156 dwc2_writel(hsotg
, hcfg
, HCFG
);
2160 * This bit allows dynamic reloading of the HFIR register during
2161 * runtime. This bit needs to be programmed during initial configuration
2162 * and its value must not be changed during runtime.
2164 if (hsotg
->params
.reload_ctl
) {
2165 hfir
= dwc2_readl(hsotg
, HFIR
);
2166 hfir
|= HFIR_RLDCTRL
;
2167 dwc2_writel(hsotg
, hfir
, HFIR
);
2170 if (hsotg
->params
.dma_desc_enable
) {
2171 u32 op_mode
= hsotg
->hw_params
.op_mode
;
2173 if (hsotg
->hw_params
.snpsid
< DWC2_CORE_REV_2_90a
||
2174 !hsotg
->hw_params
.dma_desc_enable
||
2175 op_mode
== GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE
||
2176 op_mode
== GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE
||
2177 op_mode
== GHWCFG2_OP_MODE_UNDEFINED
) {
2179 "Hardware does not support descriptor DMA mode -\n");
2181 "falling back to buffer DMA mode.\n");
2182 hsotg
->params
.dma_desc_enable
= false;
2184 hcfg
= dwc2_readl(hsotg
, HCFG
);
2185 hcfg
|= HCFG_DESCDMA
;
2186 dwc2_writel(hsotg
, hcfg
, HCFG
);
2190 /* Configure data FIFO sizes */
2191 dwc2_config_fifos(hsotg
);
2193 /* TODO - check this */
2194 /* Clear Host Set HNP Enable in the OTG Control Register */
2195 otgctl
= dwc2_readl(hsotg
, GOTGCTL
);
2196 otgctl
&= ~GOTGCTL_HSTSETHNPEN
;
2197 dwc2_writel(hsotg
, otgctl
, GOTGCTL
);
2199 /* Make sure the FIFOs are flushed */
2200 dwc2_flush_tx_fifo(hsotg
, 0x10 /* all TX FIFOs */);
2201 dwc2_flush_rx_fifo(hsotg
);
2203 /* Clear Host Set HNP Enable in the OTG Control Register */
2204 otgctl
= dwc2_readl(hsotg
, GOTGCTL
);
2205 otgctl
&= ~GOTGCTL_HSTSETHNPEN
;
2206 dwc2_writel(hsotg
, otgctl
, GOTGCTL
);
2208 if (!hsotg
->params
.dma_desc_enable
) {
2209 int num_channels
, i
;
2212 /* Flush out any leftover queued requests */
2213 num_channels
= hsotg
->params
.host_channels
;
2214 for (i
= 0; i
< num_channels
; i
++) {
2215 hcchar
= dwc2_readl(hsotg
, HCCHAR(i
));
2216 if (hcchar
& HCCHAR_CHENA
) {
2217 hcchar
&= ~HCCHAR_CHENA
;
2218 hcchar
|= HCCHAR_CHDIS
;
2219 hcchar
&= ~HCCHAR_EPDIR
;
2220 dwc2_writel(hsotg
, hcchar
, HCCHAR(i
));
2224 /* Halt all channels to put them into a known state */
2225 for (i
= 0; i
< num_channels
; i
++) {
2226 hcchar
= dwc2_readl(hsotg
, HCCHAR(i
));
2227 if (hcchar
& HCCHAR_CHENA
) {
2228 hcchar
|= HCCHAR_CHENA
| HCCHAR_CHDIS
;
2229 hcchar
&= ~HCCHAR_EPDIR
;
2230 dwc2_writel(hsotg
, hcchar
, HCCHAR(i
));
2231 dev_dbg(hsotg
->dev
, "%s: Halt channel %d\n",
2234 if (dwc2_hsotg_wait_bit_clear(hsotg
, HCCHAR(i
),
2237 dev_warn(hsotg
->dev
,
2238 "Unable to clear enable on channel %d\n",
2245 /* Enable ACG feature in host mode, if supported */
2246 dwc2_enable_acg(hsotg
);
2248 /* Turn on the vbus power */
2249 dev_dbg(hsotg
->dev
, "Init: Port Power? op_state=%d\n", hsotg
->op_state
);
2250 if (hsotg
->op_state
== OTG_STATE_A_HOST
) {
2251 u32 hprt0
= dwc2_read_hprt0(hsotg
);
2253 dev_dbg(hsotg
->dev
, "Init: Power Port (%d)\n",
2254 !!(hprt0
& HPRT0_PWR
));
2255 if (!(hprt0
& HPRT0_PWR
)) {
2257 dwc2_writel(hsotg
, hprt0
, HPRT0
);
2261 dwc2_enable_host_interrupts(hsotg
);
2265 * Initializes dynamic portions of the DWC_otg HCD state
2267 * Must be called with interrupt disabled and spinlock held
2269 static void dwc2_hcd_reinit(struct dwc2_hsotg
*hsotg
)
2271 struct dwc2_host_chan
*chan
, *chan_tmp
;
2275 hsotg
->flags
.d32
= 0;
2276 hsotg
->non_periodic_qh_ptr
= &hsotg
->non_periodic_sched_active
;
2278 if (hsotg
->params
.uframe_sched
) {
2279 hsotg
->available_host_channels
=
2280 hsotg
->params
.host_channels
;
2282 hsotg
->non_periodic_channels
= 0;
2283 hsotg
->periodic_channels
= 0;
2287 * Put all channels in the free channel list and clean up channel
2290 list_for_each_entry_safe(chan
, chan_tmp
, &hsotg
->free_hc_list
,
2292 list_del_init(&chan
->hc_list_entry
);
2294 num_channels
= hsotg
->params
.host_channels
;
2295 for (i
= 0; i
< num_channels
; i
++) {
2296 chan
= hsotg
->hc_ptr_array
[i
];
2297 list_add_tail(&chan
->hc_list_entry
, &hsotg
->free_hc_list
);
2298 dwc2_hc_cleanup(hsotg
, chan
);
2301 /* Initialize the DWC core for host mode operation */
2302 dwc2_core_host_init(hsotg
);
2305 static void dwc2_hc_init_split(struct dwc2_hsotg
*hsotg
,
2306 struct dwc2_host_chan
*chan
,
2307 struct dwc2_qtd
*qtd
, struct dwc2_hcd_urb
*urb
)
2309 int hub_addr
, hub_port
;
2312 chan
->xact_pos
= qtd
->isoc_split_pos
;
2313 chan
->complete_split
= qtd
->complete_split
;
2314 dwc2_host_hub_info(hsotg
, urb
->priv
, &hub_addr
, &hub_port
);
2315 chan
->hub_addr
= (u8
)hub_addr
;
2316 chan
->hub_port
= (u8
)hub_port
;
2319 static void dwc2_hc_init_xfer(struct dwc2_hsotg
*hsotg
,
2320 struct dwc2_host_chan
*chan
,
2321 struct dwc2_qtd
*qtd
)
2323 struct dwc2_hcd_urb
*urb
= qtd
->urb
;
2324 struct dwc2_hcd_iso_packet_desc
*frame_desc
;
2326 switch (dwc2_hcd_get_pipe_type(&urb
->pipe_info
)) {
2327 case USB_ENDPOINT_XFER_CONTROL
:
2328 chan
->ep_type
= USB_ENDPOINT_XFER_CONTROL
;
2330 switch (qtd
->control_phase
) {
2331 case DWC2_CONTROL_SETUP
:
2332 dev_vdbg(hsotg
->dev
, " Control setup transaction\n");
2335 chan
->data_pid_start
= DWC2_HC_PID_SETUP
;
2336 if (hsotg
->params
.host_dma
)
2337 chan
->xfer_dma
= urb
->setup_dma
;
2339 chan
->xfer_buf
= urb
->setup_packet
;
2343 case DWC2_CONTROL_DATA
:
2344 dev_vdbg(hsotg
->dev
, " Control data transaction\n");
2345 chan
->data_pid_start
= qtd
->data_toggle
;
2348 case DWC2_CONTROL_STATUS
:
2350 * Direction is opposite of data direction or IN if no
2353 dev_vdbg(hsotg
->dev
, " Control status transaction\n");
2354 if (urb
->length
== 0)
2358 dwc2_hcd_is_pipe_out(&urb
->pipe_info
);
2361 chan
->data_pid_start
= DWC2_HC_PID_DATA1
;
2363 if (hsotg
->params
.host_dma
)
2364 chan
->xfer_dma
= hsotg
->status_buf_dma
;
2366 chan
->xfer_buf
= hsotg
->status_buf
;
2371 case USB_ENDPOINT_XFER_BULK
:
2372 chan
->ep_type
= USB_ENDPOINT_XFER_BULK
;
2375 case USB_ENDPOINT_XFER_INT
:
2376 chan
->ep_type
= USB_ENDPOINT_XFER_INT
;
2379 case USB_ENDPOINT_XFER_ISOC
:
2380 chan
->ep_type
= USB_ENDPOINT_XFER_ISOC
;
2381 if (hsotg
->params
.dma_desc_enable
)
2384 frame_desc
= &urb
->iso_descs
[qtd
->isoc_frame_index
];
2385 frame_desc
->status
= 0;
2387 if (hsotg
->params
.host_dma
) {
2388 chan
->xfer_dma
= urb
->dma
;
2389 chan
->xfer_dma
+= frame_desc
->offset
+
2390 qtd
->isoc_split_offset
;
2392 chan
->xfer_buf
= urb
->buf
;
2393 chan
->xfer_buf
+= frame_desc
->offset
+
2394 qtd
->isoc_split_offset
;
2397 chan
->xfer_len
= frame_desc
->length
- qtd
->isoc_split_offset
;
2399 if (chan
->xact_pos
== DWC2_HCSPLT_XACTPOS_ALL
) {
2400 if (chan
->xfer_len
<= 188)
2401 chan
->xact_pos
= DWC2_HCSPLT_XACTPOS_ALL
;
2403 chan
->xact_pos
= DWC2_HCSPLT_XACTPOS_BEGIN
;
2409 static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg
*hsotg
,
2411 struct dwc2_host_chan
*chan
)
2413 if (!hsotg
->unaligned_cache
||
2414 chan
->max_packet
> DWC2_KMEM_UNALIGNED_BUF_SIZE
)
2417 if (!qh
->dw_align_buf
) {
2418 qh
->dw_align_buf
= kmem_cache_alloc(hsotg
->unaligned_cache
,
2419 GFP_ATOMIC
| GFP_DMA
);
2420 if (!qh
->dw_align_buf
)
2424 qh
->dw_align_buf_dma
= dma_map_single(hsotg
->dev
, qh
->dw_align_buf
,
2425 DWC2_KMEM_UNALIGNED_BUF_SIZE
,
2428 if (dma_mapping_error(hsotg
->dev
, qh
->dw_align_buf_dma
)) {
2429 dev_err(hsotg
->dev
, "can't map align_buf\n");
2430 chan
->align_buf
= 0;
2434 chan
->align_buf
= qh
->dw_align_buf_dma
;
2438 #define DWC2_USB_DMA_ALIGN 4
2440 static void dwc2_free_dma_aligned_buffer(struct urb
*urb
)
2442 void *stored_xfer_buffer
;
2445 if (!(urb
->transfer_flags
& URB_ALIGNED_TEMP_BUFFER
))
2448 /* Restore urb->transfer_buffer from the end of the allocated area */
2449 memcpy(&stored_xfer_buffer
,
2450 PTR_ALIGN(urb
->transfer_buffer
+ urb
->transfer_buffer_length
,
2451 dma_get_cache_alignment()),
2452 sizeof(urb
->transfer_buffer
));
2454 if (usb_urb_dir_in(urb
)) {
2455 if (usb_pipeisoc(urb
->pipe
))
2456 length
= urb
->transfer_buffer_length
;
2458 length
= urb
->actual_length
;
2460 memcpy(stored_xfer_buffer
, urb
->transfer_buffer
, length
);
2462 kfree(urb
->transfer_buffer
);
2463 urb
->transfer_buffer
= stored_xfer_buffer
;
2465 urb
->transfer_flags
&= ~URB_ALIGNED_TEMP_BUFFER
;
2468 static int dwc2_alloc_dma_aligned_buffer(struct urb
*urb
, gfp_t mem_flags
)
2471 size_t kmalloc_size
;
2473 if (urb
->num_sgs
|| urb
->sg
||
2474 urb
->transfer_buffer_length
== 0 ||
2475 !((uintptr_t)urb
->transfer_buffer
& (DWC2_USB_DMA_ALIGN
- 1)))
2479 * Allocate a buffer with enough padding for original transfer_buffer
2480 * pointer. This allocation is guaranteed to be aligned properly for
2483 kmalloc_size
= urb
->transfer_buffer_length
+
2484 (dma_get_cache_alignment() - 1) +
2485 sizeof(urb
->transfer_buffer
);
2487 kmalloc_ptr
= kmalloc(kmalloc_size
, mem_flags
);
2492 * Position value of original urb->transfer_buffer pointer to the end
2493 * of allocation for later referencing
2495 memcpy(PTR_ALIGN(kmalloc_ptr
+ urb
->transfer_buffer_length
,
2496 dma_get_cache_alignment()),
2497 &urb
->transfer_buffer
, sizeof(urb
->transfer_buffer
));
2499 if (usb_urb_dir_out(urb
))
2500 memcpy(kmalloc_ptr
, urb
->transfer_buffer
,
2501 urb
->transfer_buffer_length
);
2502 urb
->transfer_buffer
= kmalloc_ptr
;
2504 urb
->transfer_flags
|= URB_ALIGNED_TEMP_BUFFER
;
2509 static int dwc2_map_urb_for_dma(struct usb_hcd
*hcd
, struct urb
*urb
,
2514 /* We assume setup_dma is always aligned; warn if not */
2515 WARN_ON_ONCE(urb
->setup_dma
&&
2516 (urb
->setup_dma
& (DWC2_USB_DMA_ALIGN
- 1)));
2518 ret
= dwc2_alloc_dma_aligned_buffer(urb
, mem_flags
);
2522 ret
= usb_hcd_map_urb_for_dma(hcd
, urb
, mem_flags
);
2524 dwc2_free_dma_aligned_buffer(urb
);
2529 static void dwc2_unmap_urb_for_dma(struct usb_hcd
*hcd
, struct urb
*urb
)
2531 usb_hcd_unmap_urb_for_dma(hcd
, urb
);
2532 dwc2_free_dma_aligned_buffer(urb
);
2536 * dwc2_assign_and_init_hc() - Assigns transactions from a QTD to a free host
2537 * channel and initializes the host channel to perform the transactions. The
2538 * host channel is removed from the free list.
2540 * @hsotg: The HCD state structure
2541 * @qh: Transactions from the first QTD for this QH are selected and assigned
2542 * to a free host channel
2544 static int dwc2_assign_and_init_hc(struct dwc2_hsotg
*hsotg
, struct dwc2_qh
*qh
)
2546 struct dwc2_host_chan
*chan
;
2547 struct dwc2_hcd_urb
*urb
;
2548 struct dwc2_qtd
*qtd
;
2551 dev_vdbg(hsotg
->dev
, "%s(%p,%p)\n", __func__
, hsotg
, qh
);
2553 if (list_empty(&qh
->qtd_list
)) {
2554 dev_dbg(hsotg
->dev
, "No QTDs in QH list\n");
2558 if (list_empty(&hsotg
->free_hc_list
)) {
2559 dev_dbg(hsotg
->dev
, "No free channel to assign\n");
2563 chan
= list_first_entry(&hsotg
->free_hc_list
, struct dwc2_host_chan
,
2566 /* Remove host channel from free list */
2567 list_del_init(&chan
->hc_list_entry
);
2569 qtd
= list_first_entry(&qh
->qtd_list
, struct dwc2_qtd
, qtd_list_entry
);
2572 qtd
->in_process
= 1;
2575 * Use usb_pipedevice to determine device address. This address is
2576 * 0 before the SET_ADDRESS command and the correct address afterward.
2578 chan
->dev_addr
= dwc2_hcd_get_dev_addr(&urb
->pipe_info
);
2579 chan
->ep_num
= dwc2_hcd_get_ep_num(&urb
->pipe_info
);
2580 chan
->speed
= qh
->dev_speed
;
2581 chan
->max_packet
= qh
->maxp
;
2583 chan
->xfer_started
= 0;
2584 chan
->halt_status
= DWC2_HC_XFER_NO_HALT_STATUS
;
2585 chan
->error_state
= (qtd
->error_count
> 0);
2586 chan
->halt_on_queue
= 0;
2587 chan
->halt_pending
= 0;
2591 * The following values may be modified in the transfer type section
2592 * below. The xfer_len value may be reduced when the transfer is
2593 * started to accommodate the max widths of the XferSize and PktCnt
2594 * fields in the HCTSIZn register.
2597 chan
->ep_is_in
= (dwc2_hcd_is_pipe_in(&urb
->pipe_info
) != 0);
2601 chan
->do_ping
= qh
->ping_state
;
2603 chan
->data_pid_start
= qh
->data_toggle
;
2604 chan
->multi_count
= 1;
2606 if (urb
->actual_length
> urb
->length
&&
2607 !dwc2_hcd_is_pipe_in(&urb
->pipe_info
))
2608 urb
->actual_length
= urb
->length
;
2610 if (hsotg
->params
.host_dma
)
2611 chan
->xfer_dma
= urb
->dma
+ urb
->actual_length
;
2613 chan
->xfer_buf
= (u8
*)urb
->buf
+ urb
->actual_length
;
2615 chan
->xfer_len
= urb
->length
- urb
->actual_length
;
2616 chan
->xfer_count
= 0;
2618 /* Set the split attributes if required */
2620 dwc2_hc_init_split(hsotg
, chan
, qtd
, urb
);
2624 /* Set the transfer attributes */
2625 dwc2_hc_init_xfer(hsotg
, chan
, qtd
);
2627 /* For non-dword aligned buffers */
2628 if (hsotg
->params
.host_dma
&& qh
->do_split
&&
2629 chan
->ep_is_in
&& (chan
->xfer_dma
& 0x3)) {
2630 dev_vdbg(hsotg
->dev
, "Non-aligned buffer\n");
2631 if (dwc2_alloc_split_dma_aligned_buf(hsotg
, qh
, chan
)) {
2633 "Failed to allocate memory to handle non-aligned buffer\n");
2634 /* Add channel back to free list */
2635 chan
->align_buf
= 0;
2636 chan
->multi_count
= 0;
2637 list_add_tail(&chan
->hc_list_entry
,
2638 &hsotg
->free_hc_list
);
2639 qtd
->in_process
= 0;
2645 * We assume that DMA is always aligned in non-split
2646 * case or split out case. Warn if not.
2648 WARN_ON_ONCE(hsotg
->params
.host_dma
&&
2649 (chan
->xfer_dma
& 0x3));
2650 chan
->align_buf
= 0;
2653 if (chan
->ep_type
== USB_ENDPOINT_XFER_INT
||
2654 chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
)
2656 * This value may be modified when the transfer is started
2657 * to reflect the actual transfer length
2659 chan
->multi_count
= qh
->maxp_mult
;
2661 if (hsotg
->params
.dma_desc_enable
) {
2662 chan
->desc_list_addr
= qh
->desc_list_dma
;
2663 chan
->desc_list_sz
= qh
->desc_list_sz
;
2666 dwc2_hc_init(hsotg
, chan
);
2673 * dwc2_hcd_select_transactions() - Selects transactions from the HCD transfer
2674 * schedule and assigns them to available host channels. Called from the HCD
2675 * interrupt handler functions.
2677 * @hsotg: The HCD state structure
2679 * Return: The types of new transactions that were assigned to host channels
2681 enum dwc2_transaction_type
dwc2_hcd_select_transactions(
2682 struct dwc2_hsotg
*hsotg
)
2684 enum dwc2_transaction_type ret_val
= DWC2_TRANSACTION_NONE
;
2685 struct list_head
*qh_ptr
;
2689 #ifdef DWC2_DEBUG_SOF
2690 dev_vdbg(hsotg
->dev
, " Select Transactions\n");
2693 /* Process entries in the periodic ready list */
2694 qh_ptr
= hsotg
->periodic_sched_ready
.next
;
2695 while (qh_ptr
!= &hsotg
->periodic_sched_ready
) {
2696 if (list_empty(&hsotg
->free_hc_list
))
2698 if (hsotg
->params
.uframe_sched
) {
2699 if (hsotg
->available_host_channels
<= 1)
2701 hsotg
->available_host_channels
--;
2703 qh
= list_entry(qh_ptr
, struct dwc2_qh
, qh_list_entry
);
2704 if (dwc2_assign_and_init_hc(hsotg
, qh
)) {
2705 if (hsotg
->params
.uframe_sched
)
2706 hsotg
->available_host_channels
++;
2711 * Move the QH from the periodic ready schedule to the
2712 * periodic assigned schedule
2714 qh_ptr
= qh_ptr
->next
;
2715 list_move_tail(&qh
->qh_list_entry
,
2716 &hsotg
->periodic_sched_assigned
);
2717 ret_val
= DWC2_TRANSACTION_PERIODIC
;
2721 * Process entries in the inactive portion of the non-periodic
2722 * schedule. Some free host channels may not be used if they are
2723 * reserved for periodic transfers.
2725 num_channels
= hsotg
->params
.host_channels
;
2726 qh_ptr
= hsotg
->non_periodic_sched_inactive
.next
;
2727 while (qh_ptr
!= &hsotg
->non_periodic_sched_inactive
) {
2728 if (!hsotg
->params
.uframe_sched
&&
2729 hsotg
->non_periodic_channels
>= num_channels
-
2730 hsotg
->periodic_channels
)
2732 if (list_empty(&hsotg
->free_hc_list
))
2734 qh
= list_entry(qh_ptr
, struct dwc2_qh
, qh_list_entry
);
2735 if (hsotg
->params
.uframe_sched
) {
2736 if (hsotg
->available_host_channels
< 1)
2738 hsotg
->available_host_channels
--;
2741 if (dwc2_assign_and_init_hc(hsotg
, qh
)) {
2742 if (hsotg
->params
.uframe_sched
)
2743 hsotg
->available_host_channels
++;
2748 * Move the QH from the non-periodic inactive schedule to the
2749 * non-periodic active schedule
2751 qh_ptr
= qh_ptr
->next
;
2752 list_move_tail(&qh
->qh_list_entry
,
2753 &hsotg
->non_periodic_sched_active
);
2755 if (ret_val
== DWC2_TRANSACTION_NONE
)
2756 ret_val
= DWC2_TRANSACTION_NON_PERIODIC
;
2758 ret_val
= DWC2_TRANSACTION_ALL
;
2760 if (!hsotg
->params
.uframe_sched
)
2761 hsotg
->non_periodic_channels
++;
2768 * dwc2_queue_transaction() - Attempts to queue a single transaction request for
2769 * a host channel associated with either a periodic or non-periodic transfer
2771 * @hsotg: The HCD state structure
2772 * @chan: Host channel descriptor associated with either a periodic or
2773 * non-periodic transfer
2774 * @fifo_dwords_avail: Number of DWORDs available in the periodic Tx FIFO
2775 * for periodic transfers or the non-periodic Tx FIFO
2776 * for non-periodic transfers
2778 * Return: 1 if a request is queued and more requests may be needed to
2779 * complete the transfer, 0 if no more requests are required for this
2780 * transfer, -1 if there is insufficient space in the Tx FIFO
2782 * This function assumes that there is space available in the appropriate
2783 * request queue. For an OUT transfer or SETUP transaction in Slave mode,
2784 * it checks whether space is available in the appropriate Tx FIFO.
2786 * Must be called with interrupt disabled and spinlock held
2788 static int dwc2_queue_transaction(struct dwc2_hsotg
*hsotg
,
2789 struct dwc2_host_chan
*chan
,
2790 u16 fifo_dwords_avail
)
2795 /* Put ourselves on the list to keep order straight */
2796 list_move_tail(&chan
->split_order_list_entry
,
2797 &hsotg
->split_order
);
2799 if (hsotg
->params
.host_dma
&& chan
->qh
) {
2800 if (hsotg
->params
.dma_desc_enable
) {
2801 if (!chan
->xfer_started
||
2802 chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
) {
2803 dwc2_hcd_start_xfer_ddma(hsotg
, chan
->qh
);
2804 chan
->qh
->ping_state
= 0;
2806 } else if (!chan
->xfer_started
) {
2807 dwc2_hc_start_transfer(hsotg
, chan
);
2808 chan
->qh
->ping_state
= 0;
2810 } else if (chan
->halt_pending
) {
2811 /* Don't queue a request if the channel has been halted */
2812 } else if (chan
->halt_on_queue
) {
2813 dwc2_hc_halt(hsotg
, chan
, chan
->halt_status
);
2814 } else if (chan
->do_ping
) {
2815 if (!chan
->xfer_started
)
2816 dwc2_hc_start_transfer(hsotg
, chan
);
2817 } else if (!chan
->ep_is_in
||
2818 chan
->data_pid_start
== DWC2_HC_PID_SETUP
) {
2819 if ((fifo_dwords_avail
* 4) >= chan
->max_packet
) {
2820 if (!chan
->xfer_started
) {
2821 dwc2_hc_start_transfer(hsotg
, chan
);
2824 retval
= dwc2_hc_continue_transfer(hsotg
, chan
);
2830 if (!chan
->xfer_started
) {
2831 dwc2_hc_start_transfer(hsotg
, chan
);
2834 retval
= dwc2_hc_continue_transfer(hsotg
, chan
);
2842 * Processes periodic channels for the next frame and queues transactions for
2843 * these channels to the DWC_otg controller. After queueing transactions, the
2844 * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions
2845 * to queue as Periodic Tx FIFO or request queue space becomes available.
2846 * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled.
2848 * Must be called with interrupt disabled and spinlock held
2850 static void dwc2_process_periodic_channels(struct dwc2_hsotg
*hsotg
)
2852 struct list_head
*qh_ptr
;
2858 bool no_queue_space
= false;
2859 bool no_fifo_space
= false;
2862 /* If empty list then just adjust interrupt enables */
2863 if (list_empty(&hsotg
->periodic_sched_assigned
))
2867 dev_vdbg(hsotg
->dev
, "Queue periodic transactions\n");
2869 tx_status
= dwc2_readl(hsotg
, HPTXSTS
);
2870 qspcavail
= (tx_status
& TXSTS_QSPCAVAIL_MASK
) >>
2871 TXSTS_QSPCAVAIL_SHIFT
;
2872 fspcavail
= (tx_status
& TXSTS_FSPCAVAIL_MASK
) >>
2873 TXSTS_FSPCAVAIL_SHIFT
;
2876 dev_vdbg(hsotg
->dev
, " P Tx Req Queue Space Avail (before queue): %d\n",
2878 dev_vdbg(hsotg
->dev
, " P Tx FIFO Space Avail (before queue): %d\n",
2882 qh_ptr
= hsotg
->periodic_sched_assigned
.next
;
2883 while (qh_ptr
!= &hsotg
->periodic_sched_assigned
) {
2884 tx_status
= dwc2_readl(hsotg
, HPTXSTS
);
2885 qspcavail
= (tx_status
& TXSTS_QSPCAVAIL_MASK
) >>
2886 TXSTS_QSPCAVAIL_SHIFT
;
2887 if (qspcavail
== 0) {
2888 no_queue_space
= true;
2892 qh
= list_entry(qh_ptr
, struct dwc2_qh
, qh_list_entry
);
2894 qh_ptr
= qh_ptr
->next
;
2898 /* Make sure EP's TT buffer is clean before queueing qtds */
2899 if (qh
->tt_buffer_dirty
) {
2900 qh_ptr
= qh_ptr
->next
;
2905 * Set a flag if we're queuing high-bandwidth in slave mode.
2906 * The flag prevents any halts to get into the request queue in
2907 * the middle of multiple high-bandwidth packets getting queued.
2909 if (!hsotg
->params
.host_dma
&&
2910 qh
->channel
->multi_count
> 1)
2911 hsotg
->queuing_high_bandwidth
= 1;
2913 fspcavail
= (tx_status
& TXSTS_FSPCAVAIL_MASK
) >>
2914 TXSTS_FSPCAVAIL_SHIFT
;
2915 status
= dwc2_queue_transaction(hsotg
, qh
->channel
, fspcavail
);
2917 no_fifo_space
= true;
2922 * In Slave mode, stay on the current transfer until there is
2923 * nothing more to do or the high-bandwidth request count is
2924 * reached. In DMA mode, only need to queue one request. The
2925 * controller automatically handles multiple packets for
2926 * high-bandwidth transfers.
2928 if (hsotg
->params
.host_dma
|| status
== 0 ||
2929 qh
->channel
->requests
== qh
->channel
->multi_count
) {
2930 qh_ptr
= qh_ptr
->next
;
2932 * Move the QH from the periodic assigned schedule to
2933 * the periodic queued schedule
2935 list_move_tail(&qh
->qh_list_entry
,
2936 &hsotg
->periodic_sched_queued
);
2938 /* done queuing high bandwidth */
2939 hsotg
->queuing_high_bandwidth
= 0;
2944 if (no_queue_space
|| no_fifo_space
||
2945 (!hsotg
->params
.host_dma
&&
2946 !list_empty(&hsotg
->periodic_sched_assigned
))) {
2948 * May need to queue more transactions as the request
2949 * queue or Tx FIFO empties. Enable the periodic Tx
2950 * FIFO empty interrupt. (Always use the half-empty
2951 * level to ensure that new requests are loaded as
2952 * soon as possible.)
2954 gintmsk
= dwc2_readl(hsotg
, GINTMSK
);
2955 if (!(gintmsk
& GINTSTS_PTXFEMP
)) {
2956 gintmsk
|= GINTSTS_PTXFEMP
;
2957 dwc2_writel(hsotg
, gintmsk
, GINTMSK
);
2961 * Disable the Tx FIFO empty interrupt since there are
2962 * no more transactions that need to be queued right
2963 * now. This function is called from interrupt
2964 * handlers to queue more transactions as transfer
2967 gintmsk
= dwc2_readl(hsotg
, GINTMSK
);
2968 if (gintmsk
& GINTSTS_PTXFEMP
) {
2969 gintmsk
&= ~GINTSTS_PTXFEMP
;
2970 dwc2_writel(hsotg
, gintmsk
, GINTMSK
);
2976 * Processes active non-periodic channels and queues transactions for these
2977 * channels to the DWC_otg controller. After queueing transactions, the NP Tx
2978 * FIFO Empty interrupt is enabled if there are more transactions to queue as
2979 * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx
2980 * FIFO Empty interrupt is disabled.
2982 * Must be called with interrupt disabled and spinlock held
2984 static void dwc2_process_non_periodic_channels(struct dwc2_hsotg
*hsotg
)
2986 struct list_head
*orig_qh_ptr
;
2993 int no_queue_space
= 0;
2994 int no_fifo_space
= 0;
2997 dev_vdbg(hsotg
->dev
, "Queue non-periodic transactions\n");
2999 tx_status
= dwc2_readl(hsotg
, GNPTXSTS
);
3000 qspcavail
= (tx_status
& TXSTS_QSPCAVAIL_MASK
) >>
3001 TXSTS_QSPCAVAIL_SHIFT
;
3002 fspcavail
= (tx_status
& TXSTS_FSPCAVAIL_MASK
) >>
3003 TXSTS_FSPCAVAIL_SHIFT
;
3004 dev_vdbg(hsotg
->dev
, " NP Tx Req Queue Space Avail (before queue): %d\n",
3006 dev_vdbg(hsotg
->dev
, " NP Tx FIFO Space Avail (before queue): %d\n",
3010 * Keep track of the starting point. Skip over the start-of-list
3013 if (hsotg
->non_periodic_qh_ptr
== &hsotg
->non_periodic_sched_active
)
3014 hsotg
->non_periodic_qh_ptr
= hsotg
->non_periodic_qh_ptr
->next
;
3015 orig_qh_ptr
= hsotg
->non_periodic_qh_ptr
;
3018 * Process once through the active list or until no more space is
3019 * available in the request queue or the Tx FIFO
3022 tx_status
= dwc2_readl(hsotg
, GNPTXSTS
);
3023 qspcavail
= (tx_status
& TXSTS_QSPCAVAIL_MASK
) >>
3024 TXSTS_QSPCAVAIL_SHIFT
;
3025 if (!hsotg
->params
.host_dma
&& qspcavail
== 0) {
3030 qh
= list_entry(hsotg
->non_periodic_qh_ptr
, struct dwc2_qh
,
3035 /* Make sure EP's TT buffer is clean before queueing qtds */
3036 if (qh
->tt_buffer_dirty
)
3039 fspcavail
= (tx_status
& TXSTS_FSPCAVAIL_MASK
) >>
3040 TXSTS_FSPCAVAIL_SHIFT
;
3041 status
= dwc2_queue_transaction(hsotg
, qh
->channel
, fspcavail
);
3045 } else if (status
< 0) {
3050 /* Advance to next QH, skipping start-of-list entry */
3051 hsotg
->non_periodic_qh_ptr
= hsotg
->non_periodic_qh_ptr
->next
;
3052 if (hsotg
->non_periodic_qh_ptr
==
3053 &hsotg
->non_periodic_sched_active
)
3054 hsotg
->non_periodic_qh_ptr
=
3055 hsotg
->non_periodic_qh_ptr
->next
;
3056 } while (hsotg
->non_periodic_qh_ptr
!= orig_qh_ptr
);
3058 if (!hsotg
->params
.host_dma
) {
3059 tx_status
= dwc2_readl(hsotg
, GNPTXSTS
);
3060 qspcavail
= (tx_status
& TXSTS_QSPCAVAIL_MASK
) >>
3061 TXSTS_QSPCAVAIL_SHIFT
;
3062 fspcavail
= (tx_status
& TXSTS_FSPCAVAIL_MASK
) >>
3063 TXSTS_FSPCAVAIL_SHIFT
;
3064 dev_vdbg(hsotg
->dev
,
3065 " NP Tx Req Queue Space Avail (after queue): %d\n",
3067 dev_vdbg(hsotg
->dev
,
3068 " NP Tx FIFO Space Avail (after queue): %d\n",
3071 if (more_to_do
|| no_queue_space
|| no_fifo_space
) {
3073 * May need to queue more transactions as the request
3074 * queue or Tx FIFO empties. Enable the non-periodic
3075 * Tx FIFO empty interrupt. (Always use the half-empty
3076 * level to ensure that new requests are loaded as
3077 * soon as possible.)
3079 gintmsk
= dwc2_readl(hsotg
, GINTMSK
);
3080 gintmsk
|= GINTSTS_NPTXFEMP
;
3081 dwc2_writel(hsotg
, gintmsk
, GINTMSK
);
3084 * Disable the Tx FIFO empty interrupt since there are
3085 * no more transactions that need to be queued right
3086 * now. This function is called from interrupt
3087 * handlers to queue more transactions as transfer
3090 gintmsk
= dwc2_readl(hsotg
, GINTMSK
);
3091 gintmsk
&= ~GINTSTS_NPTXFEMP
;
3092 dwc2_writel(hsotg
, gintmsk
, GINTMSK
);
3098 * dwc2_hcd_queue_transactions() - Processes the currently active host channels
3099 * and queues transactions for these channels to the DWC_otg controller. Called
3100 * from the HCD interrupt handler functions.
3102 * @hsotg: The HCD state structure
3103 * @tr_type: The type(s) of transactions to queue (non-periodic, periodic,
3106 * Must be called with interrupt disabled and spinlock held
3108 void dwc2_hcd_queue_transactions(struct dwc2_hsotg
*hsotg
,
3109 enum dwc2_transaction_type tr_type
)
3111 #ifdef DWC2_DEBUG_SOF
3112 dev_vdbg(hsotg
->dev
, "Queue Transactions\n");
3114 /* Process host channels associated with periodic transfers */
3115 if (tr_type
== DWC2_TRANSACTION_PERIODIC
||
3116 tr_type
== DWC2_TRANSACTION_ALL
)
3117 dwc2_process_periodic_channels(hsotg
);
3119 /* Process host channels associated with non-periodic transfers */
3120 if (tr_type
== DWC2_TRANSACTION_NON_PERIODIC
||
3121 tr_type
== DWC2_TRANSACTION_ALL
) {
3122 if (!list_empty(&hsotg
->non_periodic_sched_active
)) {
3123 dwc2_process_non_periodic_channels(hsotg
);
3126 * Ensure NP Tx FIFO empty interrupt is disabled when
3127 * there are no non-periodic transfers to process
3129 u32 gintmsk
= dwc2_readl(hsotg
, GINTMSK
);
3131 gintmsk
&= ~GINTSTS_NPTXFEMP
;
3132 dwc2_writel(hsotg
, gintmsk
, GINTMSK
);
3137 static void dwc2_conn_id_status_change(struct work_struct
*work
)
3139 struct dwc2_hsotg
*hsotg
= container_of(work
, struct dwc2_hsotg
,
3143 unsigned long flags
;
3145 dev_dbg(hsotg
->dev
, "%s()\n", __func__
);
3147 gotgctl
= dwc2_readl(hsotg
, GOTGCTL
);
3148 dev_dbg(hsotg
->dev
, "gotgctl=%0x\n", gotgctl
);
3149 dev_dbg(hsotg
->dev
, "gotgctl.b.conidsts=%d\n",
3150 !!(gotgctl
& GOTGCTL_CONID_B
));
3152 /* B-Device connector (Device Mode) */
3153 if (gotgctl
& GOTGCTL_CONID_B
) {
3154 dwc2_vbus_supply_exit(hsotg
);
3155 /* Wait for switch to device mode */
3156 dev_dbg(hsotg
->dev
, "connId B\n");
3157 if (hsotg
->bus_suspended
) {
3158 dev_info(hsotg
->dev
,
3159 "Do port resume before switching to device mode\n");
3160 dwc2_port_resume(hsotg
);
3162 while (!dwc2_is_device_mode(hsotg
)) {
3163 dev_info(hsotg
->dev
,
3164 "Waiting for Peripheral Mode, Mode=%s\n",
3165 dwc2_is_host_mode(hsotg
) ? "Host" :
3169 * Sometimes the initial GOTGCTRL read is wrong, so
3170 * check it again and jump to host mode if that was
3173 gotgctl
= dwc2_readl(hsotg
, GOTGCTL
);
3174 if (!(gotgctl
& GOTGCTL_CONID_B
))
3181 "Connection id status change timed out\n");
3184 * Exit Partial Power Down without restoring registers.
3185 * No need to check the return value as registers
3186 * are not being restored.
3188 if (hsotg
->in_ppd
&& hsotg
->lx_state
== DWC2_L2
)
3189 dwc2_exit_partial_power_down(hsotg
, 0, false);
3191 hsotg
->op_state
= OTG_STATE_B_PERIPHERAL
;
3192 dwc2_core_init(hsotg
, false);
3193 dwc2_enable_global_interrupts(hsotg
);
3194 spin_lock_irqsave(&hsotg
->lock
, flags
);
3195 dwc2_hsotg_core_init_disconnected(hsotg
, false);
3196 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
3197 /* Enable ACG feature in device mode,if supported */
3198 dwc2_enable_acg(hsotg
);
3199 dwc2_hsotg_core_connect(hsotg
);
3202 /* A-Device connector (Host Mode) */
3203 dev_dbg(hsotg
->dev
, "connId A\n");
3204 while (!dwc2_is_host_mode(hsotg
)) {
3205 dev_info(hsotg
->dev
, "Waiting for Host Mode, Mode=%s\n",
3206 dwc2_is_host_mode(hsotg
) ?
3207 "Host" : "Peripheral");
3214 "Connection id status change timed out\n");
3216 spin_lock_irqsave(&hsotg
->lock
, flags
);
3217 dwc2_hsotg_disconnect(hsotg
);
3218 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
3220 hsotg
->op_state
= OTG_STATE_A_HOST
;
3221 /* Initialize the Core for Host mode */
3222 dwc2_core_init(hsotg
, false);
3223 dwc2_enable_global_interrupts(hsotg
);
3224 dwc2_hcd_start(hsotg
);
3228 static void dwc2_wakeup_detected(struct timer_list
*t
)
3230 struct dwc2_hsotg
*hsotg
= from_timer(hsotg
, t
, wkp_timer
);
3233 dev_dbg(hsotg
->dev
, "%s()\n", __func__
);
3236 * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms
3237 * so that OPT tests pass with all PHYs.)
3239 hprt0
= dwc2_read_hprt0(hsotg
);
3240 dev_dbg(hsotg
->dev
, "Resume: HPRT0=%0x\n", hprt0
);
3241 hprt0
&= ~HPRT0_RES
;
3242 dwc2_writel(hsotg
, hprt0
, HPRT0
);
3243 dev_dbg(hsotg
->dev
, "Clear Resume: HPRT0=%0x\n",
3244 dwc2_readl(hsotg
, HPRT0
));
3246 dwc2_hcd_rem_wakeup(hsotg
);
3247 hsotg
->bus_suspended
= false;
3249 /* Change to L0 state */
3250 hsotg
->lx_state
= DWC2_L0
;
3253 static int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg
*hsotg
)
3255 struct usb_hcd
*hcd
= dwc2_hsotg_to_hcd(hsotg
);
3257 return hcd
->self
.b_hnp_enable
;
3261 * dwc2_port_suspend() - Put controller in suspend mode for host.
3263 * @hsotg: Programming view of the DWC_otg controller
3264 * @windex: The control request wIndex field
3266 * Return: non-zero if failed to enter suspend mode for host.
3268 * This function is for entering Host mode suspend.
3269 * Must NOT be called with interrupt disabled or spinlock held.
3271 int dwc2_port_suspend(struct dwc2_hsotg
*hsotg
, u16 windex
)
3273 unsigned long flags
;
3278 dev_dbg(hsotg
->dev
, "%s()\n", __func__
);
3280 spin_lock_irqsave(&hsotg
->lock
, flags
);
3282 if (windex
== hsotg
->otg_port
&& dwc2_host_is_b_hnp_enabled(hsotg
)) {
3283 gotgctl
= dwc2_readl(hsotg
, GOTGCTL
);
3284 gotgctl
|= GOTGCTL_HSTSETHNPEN
;
3285 dwc2_writel(hsotg
, gotgctl
, GOTGCTL
);
3286 hsotg
->op_state
= OTG_STATE_A_SUSPEND
;
3289 switch (hsotg
->params
.power_down
) {
3290 case DWC2_POWER_DOWN_PARAM_PARTIAL
:
3291 ret
= dwc2_enter_partial_power_down(hsotg
);
3294 "enter partial_power_down failed.\n");
3296 case DWC2_POWER_DOWN_PARAM_HIBERNATION
:
3298 * Perform spin unlock and lock because in
3299 * "dwc2_host_enter_hibernation()" function there is a spinlock
3300 * logic which prevents servicing of any IRQ during entering
3303 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
3304 ret
= dwc2_enter_hibernation(hsotg
, 1);
3306 dev_err(hsotg
->dev
, "enter hibernation failed.\n");
3307 spin_lock_irqsave(&hsotg
->lock
, flags
);
3309 case DWC2_POWER_DOWN_PARAM_NONE
:
3311 * If not hibernation nor partial power down are supported,
3312 * clock gating is used to save power.
3314 if (!hsotg
->params
.no_clock_gating
)
3315 dwc2_host_enter_clock_gating(hsotg
);
3319 /* For HNP the bus must be suspended for at least 200ms */
3320 if (dwc2_host_is_b_hnp_enabled(hsotg
)) {
3321 pcgctl
= dwc2_readl(hsotg
, PCGCTL
);
3322 pcgctl
&= ~PCGCTL_STOPPCLK
;
3323 dwc2_writel(hsotg
, pcgctl
, PCGCTL
);
3325 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
3329 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
3336 * dwc2_port_resume() - Exit controller from suspend mode for host.
3338 * @hsotg: Programming view of the DWC_otg controller
3340 * Return: non-zero if failed to exit suspend mode for host.
3342 * This function is for exiting Host mode suspend.
3343 * Must NOT be called with interrupt disabled or spinlock held.
3345 int dwc2_port_resume(struct dwc2_hsotg
*hsotg
)
3347 unsigned long flags
;
3350 spin_lock_irqsave(&hsotg
->lock
, flags
);
3352 switch (hsotg
->params
.power_down
) {
3353 case DWC2_POWER_DOWN_PARAM_PARTIAL
:
3354 ret
= dwc2_exit_partial_power_down(hsotg
, 0, true);
3357 "exit partial_power_down failed.\n");
3359 case DWC2_POWER_DOWN_PARAM_HIBERNATION
:
3360 /* Exit host hibernation. */
3361 ret
= dwc2_exit_hibernation(hsotg
, 0, 0, 1);
3363 dev_err(hsotg
->dev
, "exit hibernation failed.\n");
3365 case DWC2_POWER_DOWN_PARAM_NONE
:
3367 * If not hibernation nor partial power down are supported,
3368 * port resume is done using the clock gating programming flow.
3370 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
3371 dwc2_host_exit_clock_gating(hsotg
, 0);
3372 spin_lock_irqsave(&hsotg
->lock
, flags
);
3376 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
3381 /* Handles hub class-specific requests */
3382 static int dwc2_hcd_hub_control(struct dwc2_hsotg
*hsotg
, u16 typereq
,
3383 u16 wvalue
, u16 windex
, char *buf
, u16 wlength
)
3385 struct usb_hub_descriptor
*hub_desc
;
3394 case ClearHubFeature
:
3395 dev_dbg(hsotg
->dev
, "ClearHubFeature %1xh\n", wvalue
);
3398 case C_HUB_LOCAL_POWER
:
3399 case C_HUB_OVER_CURRENT
:
3400 /* Nothing required here */
3406 "ClearHubFeature request %1xh unknown\n",
3411 case ClearPortFeature
:
3412 if (wvalue
!= USB_PORT_FEAT_L1
)
3413 if (!windex
|| windex
> 1)
3416 case USB_PORT_FEAT_ENABLE
:
3418 "ClearPortFeature USB_PORT_FEAT_ENABLE\n");
3419 hprt0
= dwc2_read_hprt0(hsotg
);
3421 dwc2_writel(hsotg
, hprt0
, HPRT0
);
3424 case USB_PORT_FEAT_SUSPEND
:
3426 "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
3428 if (hsotg
->bus_suspended
)
3429 retval
= dwc2_port_resume(hsotg
);
3432 case USB_PORT_FEAT_POWER
:
3434 "ClearPortFeature USB_PORT_FEAT_POWER\n");
3435 hprt0
= dwc2_read_hprt0(hsotg
);
3436 pwr
= hprt0
& HPRT0_PWR
;
3437 hprt0
&= ~HPRT0_PWR
;
3438 dwc2_writel(hsotg
, hprt0
, HPRT0
);
3440 dwc2_vbus_supply_exit(hsotg
);
3443 case USB_PORT_FEAT_INDICATOR
:
3445 "ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
3446 /* Port indicator not supported */
3449 case USB_PORT_FEAT_C_CONNECTION
:
3451 * Clears driver's internal Connect Status Change flag
3454 "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n");
3455 hsotg
->flags
.b
.port_connect_status_change
= 0;
3458 case USB_PORT_FEAT_C_RESET
:
3459 /* Clears driver's internal Port Reset Change flag */
3461 "ClearPortFeature USB_PORT_FEAT_C_RESET\n");
3462 hsotg
->flags
.b
.port_reset_change
= 0;
3465 case USB_PORT_FEAT_C_ENABLE
:
3467 * Clears the driver's internal Port Enable/Disable
3471 "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n");
3472 hsotg
->flags
.b
.port_enable_change
= 0;
3475 case USB_PORT_FEAT_C_SUSPEND
:
3477 * Clears the driver's internal Port Suspend Change
3478 * flag, which is set when resume signaling on the host
3482 "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n");
3483 hsotg
->flags
.b
.port_suspend_change
= 0;
3486 case USB_PORT_FEAT_C_PORT_L1
:
3488 "ClearPortFeature USB_PORT_FEAT_C_PORT_L1\n");
3489 hsotg
->flags
.b
.port_l1_change
= 0;
3492 case USB_PORT_FEAT_C_OVER_CURRENT
:
3494 "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n");
3495 hsotg
->flags
.b
.port_over_current_change
= 0;
3501 "ClearPortFeature request %1xh unknown or unsupported\n",
3506 case GetHubDescriptor
:
3507 dev_dbg(hsotg
->dev
, "GetHubDescriptor\n");
3508 hub_desc
= (struct usb_hub_descriptor
*)buf
;
3509 hub_desc
->bDescLength
= 9;
3510 hub_desc
->bDescriptorType
= USB_DT_HUB
;
3511 hub_desc
->bNbrPorts
= 1;
3512 hub_desc
->wHubCharacteristics
=
3513 cpu_to_le16(HUB_CHAR_COMMON_LPSM
|
3514 HUB_CHAR_INDV_PORT_OCPM
);
3515 hub_desc
->bPwrOn2PwrGood
= 1;
3516 hub_desc
->bHubContrCurrent
= 0;
3517 hub_desc
->u
.hs
.DeviceRemovable
[0] = 0;
3518 hub_desc
->u
.hs
.DeviceRemovable
[1] = 0xff;
3522 dev_dbg(hsotg
->dev
, "GetHubStatus\n");
3527 dev_vdbg(hsotg
->dev
,
3528 "GetPortStatus wIndex=0x%04x flags=0x%08x\n", windex
,
3530 if (!windex
|| windex
> 1)
3534 if (hsotg
->flags
.b
.port_connect_status_change
)
3535 port_status
|= USB_PORT_STAT_C_CONNECTION
<< 16;
3536 if (hsotg
->flags
.b
.port_enable_change
)
3537 port_status
|= USB_PORT_STAT_C_ENABLE
<< 16;
3538 if (hsotg
->flags
.b
.port_suspend_change
)
3539 port_status
|= USB_PORT_STAT_C_SUSPEND
<< 16;
3540 if (hsotg
->flags
.b
.port_l1_change
)
3541 port_status
|= USB_PORT_STAT_C_L1
<< 16;
3542 if (hsotg
->flags
.b
.port_reset_change
)
3543 port_status
|= USB_PORT_STAT_C_RESET
<< 16;
3544 if (hsotg
->flags
.b
.port_over_current_change
) {
3545 dev_warn(hsotg
->dev
, "Overcurrent change detected\n");
3546 port_status
|= USB_PORT_STAT_C_OVERCURRENT
<< 16;
3549 if (!hsotg
->flags
.b
.port_connect_status
) {
3551 * The port is disconnected, which means the core is
3552 * either in device mode or it soon will be. Just
3553 * return 0's for the remainder of the port status
3554 * since the port register can't be read if the core
3555 * is in device mode.
3557 *(__le32
*)buf
= cpu_to_le32(port_status
);
3561 hprt0
= dwc2_readl(hsotg
, HPRT0
);
3562 dev_vdbg(hsotg
->dev
, " HPRT0: 0x%08x\n", hprt0
);
3564 if (hprt0
& HPRT0_CONNSTS
)
3565 port_status
|= USB_PORT_STAT_CONNECTION
;
3566 if (hprt0
& HPRT0_ENA
)
3567 port_status
|= USB_PORT_STAT_ENABLE
;
3568 if (hprt0
& HPRT0_SUSP
)
3569 port_status
|= USB_PORT_STAT_SUSPEND
;
3570 if (hprt0
& HPRT0_OVRCURRACT
)
3571 port_status
|= USB_PORT_STAT_OVERCURRENT
;
3572 if (hprt0
& HPRT0_RST
)
3573 port_status
|= USB_PORT_STAT_RESET
;
3574 if (hprt0
& HPRT0_PWR
)
3575 port_status
|= USB_PORT_STAT_POWER
;
3577 speed
= (hprt0
& HPRT0_SPD_MASK
) >> HPRT0_SPD_SHIFT
;
3578 if (speed
== HPRT0_SPD_HIGH_SPEED
)
3579 port_status
|= USB_PORT_STAT_HIGH_SPEED
;
3580 else if (speed
== HPRT0_SPD_LOW_SPEED
)
3581 port_status
|= USB_PORT_STAT_LOW_SPEED
;
3583 if (hprt0
& HPRT0_TSTCTL_MASK
)
3584 port_status
|= USB_PORT_STAT_TEST
;
3585 /* USB_PORT_FEAT_INDICATOR unsupported always 0 */
3587 if (hsotg
->params
.dma_desc_fs_enable
) {
3589 * Enable descriptor DMA only if a full speed
3590 * device is connected.
3592 if (hsotg
->new_connection
&&
3594 (USB_PORT_STAT_CONNECTION
|
3595 USB_PORT_STAT_HIGH_SPEED
|
3596 USB_PORT_STAT_LOW_SPEED
)) ==
3597 USB_PORT_STAT_CONNECTION
)) {
3600 dev_info(hsotg
->dev
, "Enabling descriptor DMA mode\n");
3601 hsotg
->params
.dma_desc_enable
= true;
3602 hcfg
= dwc2_readl(hsotg
, HCFG
);
3603 hcfg
|= HCFG_DESCDMA
;
3604 dwc2_writel(hsotg
, hcfg
, HCFG
);
3605 hsotg
->new_connection
= false;
3609 dev_vdbg(hsotg
->dev
, "port_status=%08x\n", port_status
);
3610 *(__le32
*)buf
= cpu_to_le32(port_status
);
3614 dev_dbg(hsotg
->dev
, "SetHubFeature\n");
3615 /* No HUB features supported */
3618 case SetPortFeature
:
3619 dev_dbg(hsotg
->dev
, "SetPortFeature\n");
3620 if (wvalue
!= USB_PORT_FEAT_TEST
&& (!windex
|| windex
> 1))
3623 if (!hsotg
->flags
.b
.port_connect_status
) {
3625 * The port is disconnected, which means the core is
3626 * either in device mode or it soon will be. Just
3627 * return without doing anything since the port
3628 * register can't be written if the core is in device
3635 case USB_PORT_FEAT_SUSPEND
:
3637 "SetPortFeature - USB_PORT_FEAT_SUSPEND\n");
3638 if (windex
!= hsotg
->otg_port
)
3640 if (!hsotg
->bus_suspended
)
3641 retval
= dwc2_port_suspend(hsotg
, windex
);
3644 case USB_PORT_FEAT_POWER
:
3646 "SetPortFeature - USB_PORT_FEAT_POWER\n");
3647 hprt0
= dwc2_read_hprt0(hsotg
);
3648 pwr
= hprt0
& HPRT0_PWR
;
3650 dwc2_writel(hsotg
, hprt0
, HPRT0
);
3652 dwc2_vbus_supply_init(hsotg
);
3655 case USB_PORT_FEAT_RESET
:
3657 "SetPortFeature - USB_PORT_FEAT_RESET\n");
3659 hprt0
= dwc2_read_hprt0(hsotg
);
3661 if (hsotg
->hibernated
) {
3662 retval
= dwc2_exit_hibernation(hsotg
, 0, 1, 1);
3665 "exit hibernation failed\n");
3668 if (hsotg
->in_ppd
) {
3669 retval
= dwc2_exit_partial_power_down(hsotg
, 1,
3673 "exit partial_power_down failed\n");
3676 if (hsotg
->params
.power_down
==
3677 DWC2_POWER_DOWN_PARAM_NONE
&& hsotg
->bus_suspended
)
3678 dwc2_host_exit_clock_gating(hsotg
, 0);
3680 pcgctl
= dwc2_readl(hsotg
, PCGCTL
);
3681 pcgctl
&= ~(PCGCTL_ENBL_SLEEP_GATING
| PCGCTL_STOPPCLK
);
3682 dwc2_writel(hsotg
, pcgctl
, PCGCTL
);
3683 /* ??? Original driver does this */
3684 dwc2_writel(hsotg
, 0, PCGCTL
);
3686 hprt0
= dwc2_read_hprt0(hsotg
);
3687 pwr
= hprt0
& HPRT0_PWR
;
3688 /* Clear suspend bit if resetting from suspend state */
3689 hprt0
&= ~HPRT0_SUSP
;
3692 * When B-Host the Port reset bit is set in the Start
3693 * HCD Callback function, so that the reset is started
3694 * within 1ms of the HNP success interrupt
3696 if (!dwc2_hcd_is_b_host(hsotg
)) {
3697 hprt0
|= HPRT0_PWR
| HPRT0_RST
;
3699 "In host mode, hprt0=%08x\n", hprt0
);
3700 dwc2_writel(hsotg
, hprt0
, HPRT0
);
3702 dwc2_vbus_supply_init(hsotg
);
3705 /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
3707 hprt0
&= ~HPRT0_RST
;
3708 dwc2_writel(hsotg
, hprt0
, HPRT0
);
3709 hsotg
->lx_state
= DWC2_L0
; /* Now back to On state */
3712 case USB_PORT_FEAT_INDICATOR
:
3714 "SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
3718 case USB_PORT_FEAT_TEST
:
3719 hprt0
= dwc2_read_hprt0(hsotg
);
3721 "SetPortFeature - USB_PORT_FEAT_TEST\n");
3722 hprt0
&= ~HPRT0_TSTCTL_MASK
;
3723 hprt0
|= (windex
>> 8) << HPRT0_TSTCTL_SHIFT
;
3724 dwc2_writel(hsotg
, hprt0
, HPRT0
);
3730 "SetPortFeature %1xh unknown or unsupported\n",
3740 "Unknown hub control request: %1xh wIndex: %1xh wValue: %1xh\n",
3741 typereq
, windex
, wvalue
);
3748 static int dwc2_hcd_is_status_changed(struct dwc2_hsotg
*hsotg
, int port
)
3755 retval
= (hsotg
->flags
.b
.port_connect_status_change
||
3756 hsotg
->flags
.b
.port_reset_change
||
3757 hsotg
->flags
.b
.port_enable_change
||
3758 hsotg
->flags
.b
.port_suspend_change
||
3759 hsotg
->flags
.b
.port_over_current_change
);
3763 "DWC OTG HCD HUB STATUS DATA: Root port status changed\n");
3764 dev_dbg(hsotg
->dev
, " port_connect_status_change: %d\n",
3765 hsotg
->flags
.b
.port_connect_status_change
);
3766 dev_dbg(hsotg
->dev
, " port_reset_change: %d\n",
3767 hsotg
->flags
.b
.port_reset_change
);
3768 dev_dbg(hsotg
->dev
, " port_enable_change: %d\n",
3769 hsotg
->flags
.b
.port_enable_change
);
3770 dev_dbg(hsotg
->dev
, " port_suspend_change: %d\n",
3771 hsotg
->flags
.b
.port_suspend_change
);
3772 dev_dbg(hsotg
->dev
, " port_over_current_change: %d\n",
3773 hsotg
->flags
.b
.port_over_current_change
);
3779 int dwc2_hcd_get_frame_number(struct dwc2_hsotg
*hsotg
)
3781 u32 hfnum
= dwc2_readl(hsotg
, HFNUM
);
3783 #ifdef DWC2_DEBUG_SOF
3784 dev_vdbg(hsotg
->dev
, "DWC OTG HCD GET FRAME NUMBER %d\n",
3785 (hfnum
& HFNUM_FRNUM_MASK
) >> HFNUM_FRNUM_SHIFT
);
3787 return (hfnum
& HFNUM_FRNUM_MASK
) >> HFNUM_FRNUM_SHIFT
;
3790 int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg
*hsotg
, int us
)
3792 u32 hprt
= dwc2_readl(hsotg
, HPRT0
);
3793 u32 hfir
= dwc2_readl(hsotg
, HFIR
);
3794 u32 hfnum
= dwc2_readl(hsotg
, HFNUM
);
3795 unsigned int us_per_frame
;
3796 unsigned int frame_number
;
3797 unsigned int remaining
;
3798 unsigned int interval
;
3799 unsigned int phy_clks
;
3801 /* High speed has 125 us per (micro) frame; others are 1 ms per */
3802 us_per_frame
= (hprt
& HPRT0_SPD_MASK
) ? 1000 : 125;
3804 /* Extract fields */
3805 frame_number
= (hfnum
& HFNUM_FRNUM_MASK
) >> HFNUM_FRNUM_SHIFT
;
3806 remaining
= (hfnum
& HFNUM_FRREM_MASK
) >> HFNUM_FRREM_SHIFT
;
3807 interval
= (hfir
& HFIR_FRINT_MASK
) >> HFIR_FRINT_SHIFT
;
3810 * Number of phy clocks since the last tick of the frame number after
3813 phy_clks
= (interval
- remaining
) +
3814 DIV_ROUND_UP(interval
* us
, us_per_frame
);
3816 return dwc2_frame_num_inc(frame_number
, phy_clks
/ interval
);
3819 int dwc2_hcd_is_b_host(struct dwc2_hsotg
*hsotg
)
3821 return hsotg
->op_state
== OTG_STATE_B_HOST
;
3824 static struct dwc2_hcd_urb
*dwc2_hcd_urb_alloc(struct dwc2_hsotg
*hsotg
,
3828 struct dwc2_hcd_urb
*urb
;
3830 urb
= kzalloc(struct_size(urb
, iso_descs
, iso_desc_count
), mem_flags
);
3832 urb
->packet_count
= iso_desc_count
;
3836 static void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg
*hsotg
,
3837 struct dwc2_hcd_urb
*urb
, u8 dev_addr
,
3838 u8 ep_num
, u8 ep_type
, u8 ep_dir
,
3839 u16 maxp
, u16 maxp_mult
)
3842 ep_type
== USB_ENDPOINT_XFER_BULK
||
3843 ep_type
== USB_ENDPOINT_XFER_CONTROL
)
3844 dev_vdbg(hsotg
->dev
,
3845 "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, maxp=%d (%d mult)\n",
3846 dev_addr
, ep_num
, ep_dir
, ep_type
, maxp
, maxp_mult
);
3847 urb
->pipe_info
.dev_addr
= dev_addr
;
3848 urb
->pipe_info
.ep_num
= ep_num
;
3849 urb
->pipe_info
.pipe_type
= ep_type
;
3850 urb
->pipe_info
.pipe_dir
= ep_dir
;
3851 urb
->pipe_info
.maxp
= maxp
;
3852 urb
->pipe_info
.maxp_mult
= maxp_mult
;
3856 * NOTE: This function will be removed once the peripheral controller code
3857 * is integrated and the driver is stable
3859 void dwc2_hcd_dump_state(struct dwc2_hsotg
*hsotg
)
3862 struct dwc2_host_chan
*chan
;
3863 struct dwc2_hcd_urb
*urb
;
3864 struct dwc2_qtd
*qtd
;
3870 num_channels
= hsotg
->params
.host_channels
;
3871 dev_dbg(hsotg
->dev
, "\n");
3873 "************************************************************\n");
3874 dev_dbg(hsotg
->dev
, "HCD State:\n");
3875 dev_dbg(hsotg
->dev
, " Num channels: %d\n", num_channels
);
3877 for (i
= 0; i
< num_channels
; i
++) {
3878 chan
= hsotg
->hc_ptr_array
[i
];
3879 dev_dbg(hsotg
->dev
, " Channel %d:\n", i
);
3881 " dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
3882 chan
->dev_addr
, chan
->ep_num
, chan
->ep_is_in
);
3883 dev_dbg(hsotg
->dev
, " speed: %d\n", chan
->speed
);
3884 dev_dbg(hsotg
->dev
, " ep_type: %d\n", chan
->ep_type
);
3885 dev_dbg(hsotg
->dev
, " max_packet: %d\n", chan
->max_packet
);
3886 dev_dbg(hsotg
->dev
, " data_pid_start: %d\n",
3887 chan
->data_pid_start
);
3888 dev_dbg(hsotg
->dev
, " multi_count: %d\n", chan
->multi_count
);
3889 dev_dbg(hsotg
->dev
, " xfer_started: %d\n",
3890 chan
->xfer_started
);
3891 dev_dbg(hsotg
->dev
, " xfer_buf: %p\n", chan
->xfer_buf
);
3892 dev_dbg(hsotg
->dev
, " xfer_dma: %08lx\n",
3893 (unsigned long)chan
->xfer_dma
);
3894 dev_dbg(hsotg
->dev
, " xfer_len: %d\n", chan
->xfer_len
);
3895 dev_dbg(hsotg
->dev
, " xfer_count: %d\n", chan
->xfer_count
);
3896 dev_dbg(hsotg
->dev
, " halt_on_queue: %d\n",
3897 chan
->halt_on_queue
);
3898 dev_dbg(hsotg
->dev
, " halt_pending: %d\n",
3899 chan
->halt_pending
);
3900 dev_dbg(hsotg
->dev
, " halt_status: %d\n", chan
->halt_status
);
3901 dev_dbg(hsotg
->dev
, " do_split: %d\n", chan
->do_split
);
3902 dev_dbg(hsotg
->dev
, " complete_split: %d\n",
3903 chan
->complete_split
);
3904 dev_dbg(hsotg
->dev
, " hub_addr: %d\n", chan
->hub_addr
);
3905 dev_dbg(hsotg
->dev
, " hub_port: %d\n", chan
->hub_port
);
3906 dev_dbg(hsotg
->dev
, " xact_pos: %d\n", chan
->xact_pos
);
3907 dev_dbg(hsotg
->dev
, " requests: %d\n", chan
->requests
);
3908 dev_dbg(hsotg
->dev
, " qh: %p\n", chan
->qh
);
3910 if (chan
->xfer_started
) {
3911 u32 hfnum
, hcchar
, hctsiz
, hcint
, hcintmsk
;
3913 hfnum
= dwc2_readl(hsotg
, HFNUM
);
3914 hcchar
= dwc2_readl(hsotg
, HCCHAR(i
));
3915 hctsiz
= dwc2_readl(hsotg
, HCTSIZ(i
));
3916 hcint
= dwc2_readl(hsotg
, HCINT(i
));
3917 hcintmsk
= dwc2_readl(hsotg
, HCINTMSK(i
));
3918 dev_dbg(hsotg
->dev
, " hfnum: 0x%08x\n", hfnum
);
3919 dev_dbg(hsotg
->dev
, " hcchar: 0x%08x\n", hcchar
);
3920 dev_dbg(hsotg
->dev
, " hctsiz: 0x%08x\n", hctsiz
);
3921 dev_dbg(hsotg
->dev
, " hcint: 0x%08x\n", hcint
);
3922 dev_dbg(hsotg
->dev
, " hcintmsk: 0x%08x\n", hcintmsk
);
3925 if (!(chan
->xfer_started
&& chan
->qh
))
3928 list_for_each_entry(qtd
, &chan
->qh
->qtd_list
, qtd_list_entry
) {
3929 if (!qtd
->in_process
)
3932 dev_dbg(hsotg
->dev
, " URB Info:\n");
3933 dev_dbg(hsotg
->dev
, " qtd: %p, urb: %p\n",
3937 " Dev: %d, EP: %d %s\n",
3938 dwc2_hcd_get_dev_addr(&urb
->pipe_info
),
3939 dwc2_hcd_get_ep_num(&urb
->pipe_info
),
3940 dwc2_hcd_is_pipe_in(&urb
->pipe_info
) ?
3943 " Max packet size: %d (%d mult)\n",
3944 dwc2_hcd_get_maxp(&urb
->pipe_info
),
3945 dwc2_hcd_get_maxp_mult(&urb
->pipe_info
));
3947 " transfer_buffer: %p\n",
3950 " transfer_dma: %08lx\n",
3951 (unsigned long)urb
->dma
);
3953 " transfer_buffer_length: %d\n",
3955 dev_dbg(hsotg
->dev
, " actual_length: %d\n",
3956 urb
->actual_length
);
3961 dev_dbg(hsotg
->dev
, " non_periodic_channels: %d\n",
3962 hsotg
->non_periodic_channels
);
3963 dev_dbg(hsotg
->dev
, " periodic_channels: %d\n",
3964 hsotg
->periodic_channels
);
3965 dev_dbg(hsotg
->dev
, " periodic_usecs: %d\n", hsotg
->periodic_usecs
);
3966 np_tx_status
= dwc2_readl(hsotg
, GNPTXSTS
);
3967 dev_dbg(hsotg
->dev
, " NP Tx Req Queue Space Avail: %d\n",
3968 (np_tx_status
& TXSTS_QSPCAVAIL_MASK
) >> TXSTS_QSPCAVAIL_SHIFT
);
3969 dev_dbg(hsotg
->dev
, " NP Tx FIFO Space Avail: %d\n",
3970 (np_tx_status
& TXSTS_FSPCAVAIL_MASK
) >> TXSTS_FSPCAVAIL_SHIFT
);
3971 p_tx_status
= dwc2_readl(hsotg
, HPTXSTS
);
3972 dev_dbg(hsotg
->dev
, " P Tx Req Queue Space Avail: %d\n",
3973 (p_tx_status
& TXSTS_QSPCAVAIL_MASK
) >> TXSTS_QSPCAVAIL_SHIFT
);
3974 dev_dbg(hsotg
->dev
, " P Tx FIFO Space Avail: %d\n",
3975 (p_tx_status
& TXSTS_FSPCAVAIL_MASK
) >> TXSTS_FSPCAVAIL_SHIFT
);
3976 dwc2_dump_global_registers(hsotg
);
3977 dwc2_dump_host_registers(hsotg
);
3979 "************************************************************\n");
3980 dev_dbg(hsotg
->dev
, "\n");
3984 struct wrapper_priv_data
{
3985 struct dwc2_hsotg
*hsotg
;
3988 /* Gets the dwc2_hsotg from a usb_hcd */
3989 static struct dwc2_hsotg
*dwc2_hcd_to_hsotg(struct usb_hcd
*hcd
)
3991 struct wrapper_priv_data
*p
;
3993 p
= (struct wrapper_priv_data
*)&hcd
->hcd_priv
;
3998 * dwc2_host_get_tt_info() - Get the dwc2_tt associated with context
4000 * This will get the dwc2_tt structure (and ttport) associated with the given
4001 * context (which is really just a struct urb pointer).
4003 * The first time this is called for a given TT we allocate memory for our
4004 * structure. When everyone is done and has called dwc2_host_put_tt_info()
4005 * then the refcount for the structure will go to 0 and we'll free it.
4007 * @hsotg: The HCD state structure for the DWC OTG controller.
4008 * @context: The priv pointer from a struct dwc2_hcd_urb.
4009 * @mem_flags: Flags for allocating memory.
4010 * @ttport: We'll return this device's port number here. That's used to
4011 * reference into the bitmap if we're on a multi_tt hub.
4013 * Return: a pointer to a struct dwc2_tt. Don't forget to call
4014 * dwc2_host_put_tt_info()! Returns NULL upon memory alloc failure.
4017 struct dwc2_tt
*dwc2_host_get_tt_info(struct dwc2_hsotg
*hsotg
, void *context
,
4018 gfp_t mem_flags
, int *ttport
)
4020 struct urb
*urb
= context
;
4021 struct dwc2_tt
*dwc_tt
= NULL
;
4024 *ttport
= urb
->dev
->ttport
;
4026 dwc_tt
= urb
->dev
->tt
->hcpriv
;
4031 * For single_tt we need one schedule. For multi_tt
4032 * we need one per port.
4034 bitmap_size
= DWC2_ELEMENTS_PER_LS_BITMAP
*
4035 sizeof(dwc_tt
->periodic_bitmaps
[0]);
4036 if (urb
->dev
->tt
->multi
)
4037 bitmap_size
*= urb
->dev
->tt
->hub
->maxchild
;
4039 dwc_tt
= kzalloc(sizeof(*dwc_tt
) + bitmap_size
,
4044 dwc_tt
->usb_tt
= urb
->dev
->tt
;
4045 dwc_tt
->usb_tt
->hcpriv
= dwc_tt
;
4055 * dwc2_host_put_tt_info() - Put the dwc2_tt from dwc2_host_get_tt_info()
4057 * Frees resources allocated by dwc2_host_get_tt_info() if all current holders
4058 * of the structure are done.
4060 * It's OK to call this with NULL.
4062 * @hsotg: The HCD state structure for the DWC OTG controller.
4063 * @dwc_tt: The pointer returned by dwc2_host_get_tt_info.
4065 void dwc2_host_put_tt_info(struct dwc2_hsotg
*hsotg
, struct dwc2_tt
*dwc_tt
)
4067 /* Model kfree and make put of NULL a no-op */
4071 WARN_ON(dwc_tt
->refcount
< 1);
4074 if (!dwc_tt
->refcount
) {
4075 dwc_tt
->usb_tt
->hcpriv
= NULL
;
4080 int dwc2_host_get_speed(struct dwc2_hsotg
*hsotg
, void *context
)
4082 struct urb
*urb
= context
;
4084 return urb
->dev
->speed
;
4087 static void dwc2_allocate_bus_bandwidth(struct usb_hcd
*hcd
, u16 bw
,
4090 struct usb_bus
*bus
= hcd_to_bus(hcd
);
4093 bus
->bandwidth_allocated
+= bw
/ urb
->interval
;
4094 if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
)
4095 bus
->bandwidth_isoc_reqs
++;
4097 bus
->bandwidth_int_reqs
++;
4100 static void dwc2_free_bus_bandwidth(struct usb_hcd
*hcd
, u16 bw
,
4103 struct usb_bus
*bus
= hcd_to_bus(hcd
);
4106 bus
->bandwidth_allocated
-= bw
/ urb
->interval
;
4107 if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
)
4108 bus
->bandwidth_isoc_reqs
--;
4110 bus
->bandwidth_int_reqs
--;
4114 * Sets the final status of an URB and returns it to the upper layer. Any
4115 * required cleanup of the URB is performed.
4117 * Must be called with interrupt disabled and spinlock held
4119 void dwc2_host_complete(struct dwc2_hsotg
*hsotg
, struct dwc2_qtd
*qtd
,
4126 dev_dbg(hsotg
->dev
, "## %s: qtd is NULL ##\n", __func__
);
4131 dev_dbg(hsotg
->dev
, "## %s: qtd->urb is NULL ##\n", __func__
);
4135 urb
= qtd
->urb
->priv
;
4137 dev_dbg(hsotg
->dev
, "## %s: urb->priv is NULL ##\n", __func__
);
4141 urb
->actual_length
= dwc2_hcd_urb_get_actual_length(qtd
->urb
);
4144 dev_vdbg(hsotg
->dev
,
4145 "%s: urb %p device %d ep %d-%s status %d actual %d\n",
4146 __func__
, urb
, usb_pipedevice(urb
->pipe
),
4147 usb_pipeendpoint(urb
->pipe
),
4148 usb_pipein(urb
->pipe
) ? "IN" : "OUT", status
,
4149 urb
->actual_length
);
4151 if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
) {
4152 if (!hsotg
->params
.dma_desc_enable
)
4153 urb
->start_frame
= qtd
->qh
->start_active_frame
;
4154 urb
->error_count
= dwc2_hcd_urb_get_error_count(qtd
->urb
);
4155 for (i
= 0; i
< urb
->number_of_packets
; ++i
) {
4156 urb
->iso_frame_desc
[i
].actual_length
=
4157 dwc2_hcd_urb_get_iso_desc_actual_length(
4159 urb
->iso_frame_desc
[i
].status
=
4160 dwc2_hcd_urb_get_iso_desc_status(qtd
->urb
, i
);
4164 if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
&& dbg_perio()) {
4165 for (i
= 0; i
< urb
->number_of_packets
; i
++)
4166 dev_vdbg(hsotg
->dev
, " ISO Desc %d status %d\n",
4167 i
, urb
->iso_frame_desc
[i
].status
);
4170 urb
->status
= status
;
4172 if ((urb
->transfer_flags
& URB_SHORT_NOT_OK
) &&
4173 urb
->actual_length
< urb
->transfer_buffer_length
)
4174 urb
->status
= -EREMOTEIO
;
4177 if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
||
4178 usb_pipetype(urb
->pipe
) == PIPE_INTERRUPT
) {
4179 struct usb_host_endpoint
*ep
= urb
->ep
;
4182 dwc2_free_bus_bandwidth(dwc2_hsotg_to_hcd(hsotg
),
4183 dwc2_hcd_get_ep_bandwidth(hsotg
, ep
),
4187 usb_hcd_unlink_urb_from_ep(dwc2_hsotg_to_hcd(hsotg
), urb
);
4192 usb_hcd_giveback_urb(dwc2_hsotg_to_hcd(hsotg
), urb
, status
);
4196 * Work queue function for starting the HCD when A-Cable is connected
4198 static void dwc2_hcd_start_func(struct work_struct
*work
)
4200 struct dwc2_hsotg
*hsotg
= container_of(work
, struct dwc2_hsotg
,
4203 dev_dbg(hsotg
->dev
, "%s() %p\n", __func__
, hsotg
);
4204 dwc2_host_start(hsotg
);
4208 * Reset work queue function
4210 static void dwc2_hcd_reset_func(struct work_struct
*work
)
4212 struct dwc2_hsotg
*hsotg
= container_of(work
, struct dwc2_hsotg
,
4214 unsigned long flags
;
4217 dev_dbg(hsotg
->dev
, "USB RESET function called\n");
4219 spin_lock_irqsave(&hsotg
->lock
, flags
);
4221 hprt0
= dwc2_read_hprt0(hsotg
);
4222 hprt0
&= ~HPRT0_RST
;
4223 dwc2_writel(hsotg
, hprt0
, HPRT0
);
4224 hsotg
->flags
.b
.port_reset_change
= 1;
4226 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4229 static void dwc2_hcd_phy_reset_func(struct work_struct
*work
)
4231 struct dwc2_hsotg
*hsotg
= container_of(work
, struct dwc2_hsotg
,
4235 ret
= phy_reset(hsotg
->phy
);
4237 dev_warn(hsotg
->dev
, "PHY reset failed\n");
4241 * =========================================================================
4242 * Linux HC Driver Functions
4243 * =========================================================================
4247 * Initializes the DWC_otg controller and its root hub and prepares it for host
4248 * mode operation. Activates the root port. Returns 0 on success and a negative
4249 * error code on failure.
4251 static int _dwc2_hcd_start(struct usb_hcd
*hcd
)
4253 struct dwc2_hsotg
*hsotg
= dwc2_hcd_to_hsotg(hcd
);
4254 struct usb_bus
*bus
= hcd_to_bus(hcd
);
4255 unsigned long flags
;
4259 dev_dbg(hsotg
->dev
, "DWC OTG HCD START\n");
4261 spin_lock_irqsave(&hsotg
->lock
, flags
);
4262 hsotg
->lx_state
= DWC2_L0
;
4263 hcd
->state
= HC_STATE_RUNNING
;
4264 set_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
);
4266 if (dwc2_is_device_mode(hsotg
)) {
4267 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4268 return 0; /* why 0 ?? */
4271 dwc2_hcd_reinit(hsotg
);
4273 hprt0
= dwc2_read_hprt0(hsotg
);
4274 /* Has vbus power been turned on in dwc2_core_host_init ? */
4275 if (hprt0
& HPRT0_PWR
) {
4276 /* Enable external vbus supply before resuming root hub */
4277 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4278 ret
= dwc2_vbus_supply_init(hsotg
);
4281 spin_lock_irqsave(&hsotg
->lock
, flags
);
4284 /* Initialize and connect root hub if one is not already attached */
4285 if (bus
->root_hub
) {
4286 dev_dbg(hsotg
->dev
, "DWC OTG HCD Has Root Hub\n");
4287 /* Inform the HUB driver to resume */
4288 usb_hcd_resume_root_hub(hcd
);
4291 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4297 * Halts the DWC_otg host mode operations in a clean manner. USB transfers are
4300 static void _dwc2_hcd_stop(struct usb_hcd
*hcd
)
4302 struct dwc2_hsotg
*hsotg
= dwc2_hcd_to_hsotg(hcd
);
4303 unsigned long flags
;
4306 /* Turn off all host-specific interrupts */
4307 dwc2_disable_host_interrupts(hsotg
);
4309 /* Wait for interrupt processing to finish */
4310 synchronize_irq(hcd
->irq
);
4312 spin_lock_irqsave(&hsotg
->lock
, flags
);
4313 hprt0
= dwc2_read_hprt0(hsotg
);
4314 /* Ensure hcd is disconnected */
4315 dwc2_hcd_disconnect(hsotg
, true);
4316 dwc2_hcd_stop(hsotg
);
4317 hsotg
->lx_state
= DWC2_L3
;
4318 hcd
->state
= HC_STATE_HALT
;
4319 clear_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
);
4320 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4322 /* keep balanced supply init/exit by checking HPRT0_PWR */
4323 if (hprt0
& HPRT0_PWR
)
4324 dwc2_vbus_supply_exit(hsotg
);
4326 usleep_range(1000, 3000);
4329 static int _dwc2_hcd_suspend(struct usb_hcd
*hcd
)
4331 struct dwc2_hsotg
*hsotg
= dwc2_hcd_to_hsotg(hcd
);
4332 unsigned long flags
;
4335 spin_lock_irqsave(&hsotg
->lock
, flags
);
4337 if (dwc2_is_device_mode(hsotg
))
4340 if (hsotg
->lx_state
!= DWC2_L0
)
4343 if (!HCD_HW_ACCESSIBLE(hcd
))
4346 if (hsotg
->op_state
== OTG_STATE_B_PERIPHERAL
)
4349 if (hsotg
->bus_suspended
)
4350 goto skip_power_saving
;
4352 if (hsotg
->flags
.b
.port_connect_status
== 0)
4353 goto skip_power_saving
;
4355 switch (hsotg
->params
.power_down
) {
4356 case DWC2_POWER_DOWN_PARAM_PARTIAL
:
4357 /* Enter partial_power_down */
4358 ret
= dwc2_enter_partial_power_down(hsotg
);
4361 "enter partial_power_down failed\n");
4362 /* After entering suspend, hardware is not accessible */
4363 clear_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
);
4365 case DWC2_POWER_DOWN_PARAM_HIBERNATION
:
4366 /* Enter hibernation */
4367 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4368 ret
= dwc2_enter_hibernation(hsotg
, 1);
4370 dev_err(hsotg
->dev
, "enter hibernation failed\n");
4371 spin_lock_irqsave(&hsotg
->lock
, flags
);
4373 /* After entering suspend, hardware is not accessible */
4374 clear_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
);
4376 case DWC2_POWER_DOWN_PARAM_NONE
:
4378 * If not hibernation nor partial power down are supported,
4379 * clock gating is used to save power.
4381 if (!hsotg
->params
.no_clock_gating
) {
4382 dwc2_host_enter_clock_gating(hsotg
);
4384 /* After entering suspend, hardware is not accessible */
4385 clear_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
);
4389 goto skip_power_saving
;
4392 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4393 dwc2_vbus_supply_exit(hsotg
);
4394 spin_lock_irqsave(&hsotg
->lock
, flags
);
4396 /* Ask phy to be suspended */
4397 if (!IS_ERR_OR_NULL(hsotg
->uphy
)) {
4398 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4399 usb_phy_set_suspend(hsotg
->uphy
, true);
4400 spin_lock_irqsave(&hsotg
->lock
, flags
);
4404 hsotg
->lx_state
= DWC2_L2
;
4406 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4411 static int _dwc2_hcd_resume(struct usb_hcd
*hcd
)
4413 struct dwc2_hsotg
*hsotg
= dwc2_hcd_to_hsotg(hcd
);
4414 unsigned long flags
;
4418 spin_lock_irqsave(&hsotg
->lock
, flags
);
4420 if (dwc2_is_device_mode(hsotg
))
4423 if (hsotg
->lx_state
!= DWC2_L2
)
4426 hprt0
= dwc2_read_hprt0(hsotg
);
4429 * Added port connection status checking which prevents exiting from
4430 * Partial Power Down mode from _dwc2_hcd_resume() if not in Partial
4433 if (hprt0
& HPRT0_CONNSTS
) {
4434 hsotg
->lx_state
= DWC2_L0
;
4438 switch (hsotg
->params
.power_down
) {
4439 case DWC2_POWER_DOWN_PARAM_PARTIAL
:
4440 ret
= dwc2_exit_partial_power_down(hsotg
, 0, true);
4443 "exit partial_power_down failed\n");
4445 * Set HW accessible bit before powering on the controller
4446 * since an interrupt may rise.
4448 set_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
);
4450 case DWC2_POWER_DOWN_PARAM_HIBERNATION
:
4451 ret
= dwc2_exit_hibernation(hsotg
, 0, 0, 1);
4453 dev_err(hsotg
->dev
, "exit hibernation failed.\n");
4456 * Set HW accessible bit before powering on the controller
4457 * since an interrupt may rise.
4459 set_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
);
4461 case DWC2_POWER_DOWN_PARAM_NONE
:
4463 * If not hibernation nor partial power down are supported,
4464 * port resume is done using the clock gating programming flow.
4466 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4467 dwc2_host_exit_clock_gating(hsotg
, 0);
4470 * Initialize the Core for Host mode, as after system resume
4471 * the global interrupts are disabled.
4473 dwc2_core_init(hsotg
, false);
4474 dwc2_enable_global_interrupts(hsotg
);
4475 dwc2_hcd_reinit(hsotg
);
4476 spin_lock_irqsave(&hsotg
->lock
, flags
);
4479 * Set HW accessible bit before powering on the controller
4480 * since an interrupt may rise.
4482 set_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
);
4485 hsotg
->lx_state
= DWC2_L0
;
4489 /* Change Root port status, as port status change occurred after resume.*/
4490 hsotg
->flags
.b
.port_suspend_change
= 1;
4493 * Enable power if not already done.
4494 * This must not be spinlocked since duration
4495 * of this call is unknown.
4497 if (!IS_ERR_OR_NULL(hsotg
->uphy
)) {
4498 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4499 usb_phy_set_suspend(hsotg
->uphy
, false);
4500 spin_lock_irqsave(&hsotg
->lock
, flags
);
4503 /* Enable external vbus supply after resuming the port. */
4504 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4505 dwc2_vbus_supply_init(hsotg
);
4507 /* Wait for controller to correctly update D+/D- level */
4508 usleep_range(3000, 5000);
4509 spin_lock_irqsave(&hsotg
->lock
, flags
);
4512 * Clear Port Enable and Port Status changes.
4513 * Enable Port Power.
4515 dwc2_writel(hsotg
, HPRT0_PWR
| HPRT0_CONNDET
|
4516 HPRT0_ENACHG
, HPRT0
);
4518 /* Wait for controller to detect Port Connect */
4519 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4520 usleep_range(5000, 7000);
4521 spin_lock_irqsave(&hsotg
->lock
, flags
);
4523 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4528 /* Returns the current frame number */
4529 static int _dwc2_hcd_get_frame_number(struct usb_hcd
*hcd
)
4531 struct dwc2_hsotg
*hsotg
= dwc2_hcd_to_hsotg(hcd
);
4533 return dwc2_hcd_get_frame_number(hsotg
);
4536 static void dwc2_dump_urb_info(struct usb_hcd
*hcd
, struct urb
*urb
,
4539 #ifdef VERBOSE_DEBUG
4540 struct dwc2_hsotg
*hsotg
= dwc2_hcd_to_hsotg(hcd
);
4541 char *pipetype
= NULL
;
4544 dev_vdbg(hsotg
->dev
, "%s, urb %p\n", fn_name
, urb
);
4545 dev_vdbg(hsotg
->dev
, " Device address: %d\n",
4546 usb_pipedevice(urb
->pipe
));
4547 dev_vdbg(hsotg
->dev
, " Endpoint: %d, %s\n",
4548 usb_pipeendpoint(urb
->pipe
),
4549 usb_pipein(urb
->pipe
) ? "IN" : "OUT");
4551 switch (usb_pipetype(urb
->pipe
)) {
4553 pipetype
= "CONTROL";
4558 case PIPE_INTERRUPT
:
4559 pipetype
= "INTERRUPT";
4561 case PIPE_ISOCHRONOUS
:
4562 pipetype
= "ISOCHRONOUS";
4566 dev_vdbg(hsotg
->dev
, " Endpoint type: %s %s (%s)\n", pipetype
,
4567 usb_urb_dir_in(urb
) ? "IN" : "OUT", usb_pipein(urb
->pipe
) ?
4570 switch (urb
->dev
->speed
) {
4571 case USB_SPEED_HIGH
:
4574 case USB_SPEED_FULL
:
4585 dev_vdbg(hsotg
->dev
, " Speed: %s\n", speed
);
4586 dev_vdbg(hsotg
->dev
, " Max packet size: %d (%d mult)\n",
4587 usb_endpoint_maxp(&urb
->ep
->desc
),
4588 usb_endpoint_maxp_mult(&urb
->ep
->desc
));
4590 dev_vdbg(hsotg
->dev
, " Data buffer length: %d\n",
4591 urb
->transfer_buffer_length
);
4592 dev_vdbg(hsotg
->dev
, " Transfer buffer: %p, Transfer DMA: %08lx\n",
4593 urb
->transfer_buffer
, (unsigned long)urb
->transfer_dma
);
4594 dev_vdbg(hsotg
->dev
, " Setup buffer: %p, Setup DMA: %08lx\n",
4595 urb
->setup_packet
, (unsigned long)urb
->setup_dma
);
4596 dev_vdbg(hsotg
->dev
, " Interval: %d\n", urb
->interval
);
4598 if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
) {
4601 for (i
= 0; i
< urb
->number_of_packets
; i
++) {
4602 dev_vdbg(hsotg
->dev
, " ISO Desc %d:\n", i
);
4603 dev_vdbg(hsotg
->dev
, " offset: %d, length %d\n",
4604 urb
->iso_frame_desc
[i
].offset
,
4605 urb
->iso_frame_desc
[i
].length
);
4612 * Starts processing a USB transfer request specified by a USB Request Block
4613 * (URB). mem_flags indicates the type of memory allocation to use while
4614 * processing this URB.
4616 static int _dwc2_hcd_urb_enqueue(struct usb_hcd
*hcd
, struct urb
*urb
,
4619 struct dwc2_hsotg
*hsotg
= dwc2_hcd_to_hsotg(hcd
);
4620 struct usb_host_endpoint
*ep
= urb
->ep
;
4621 struct dwc2_hcd_urb
*dwc2_urb
;
4624 int alloc_bandwidth
= 0;
4628 unsigned long flags
;
4630 bool qh_allocated
= false;
4631 struct dwc2_qtd
*qtd
;
4632 struct dwc2_gregs_backup
*gr
;
4634 gr
= &hsotg
->gr_backup
;
4637 dev_vdbg(hsotg
->dev
, "DWC OTG HCD URB Enqueue\n");
4638 dwc2_dump_urb_info(hcd
, urb
, "urb_enqueue");
4641 if (hsotg
->hibernated
) {
4642 if (gr
->gotgctl
& GOTGCTL_CURMODE_HOST
)
4643 retval
= dwc2_exit_hibernation(hsotg
, 0, 0, 1);
4645 retval
= dwc2_exit_hibernation(hsotg
, 0, 0, 0);
4649 "exit hibernation failed.\n");
4652 if (hsotg
->in_ppd
) {
4653 retval
= dwc2_exit_partial_power_down(hsotg
, 0, true);
4656 "exit partial_power_down failed\n");
4659 if (hsotg
->params
.power_down
== DWC2_POWER_DOWN_PARAM_NONE
&&
4660 hsotg
->bus_suspended
&& !hsotg
->params
.no_clock_gating
) {
4661 if (dwc2_is_device_mode(hsotg
))
4662 dwc2_gadget_exit_clock_gating(hsotg
, 0);
4664 dwc2_host_exit_clock_gating(hsotg
, 0);
4670 if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
||
4671 usb_pipetype(urb
->pipe
) == PIPE_INTERRUPT
) {
4672 spin_lock_irqsave(&hsotg
->lock
, flags
);
4673 if (!dwc2_hcd_is_bandwidth_allocated(hsotg
, ep
))
4674 alloc_bandwidth
= 1;
4675 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4678 switch (usb_pipetype(urb
->pipe
)) {
4680 ep_type
= USB_ENDPOINT_XFER_CONTROL
;
4682 case PIPE_ISOCHRONOUS
:
4683 ep_type
= USB_ENDPOINT_XFER_ISOC
;
4686 ep_type
= USB_ENDPOINT_XFER_BULK
;
4688 case PIPE_INTERRUPT
:
4689 ep_type
= USB_ENDPOINT_XFER_INT
;
4693 dwc2_urb
= dwc2_hcd_urb_alloc(hsotg
, urb
->number_of_packets
,
4698 dwc2_hcd_urb_set_pipeinfo(hsotg
, dwc2_urb
, usb_pipedevice(urb
->pipe
),
4699 usb_pipeendpoint(urb
->pipe
), ep_type
,
4700 usb_pipein(urb
->pipe
),
4701 usb_endpoint_maxp(&ep
->desc
),
4702 usb_endpoint_maxp_mult(&ep
->desc
));
4704 buf
= urb
->transfer_buffer
;
4706 if (hcd_uses_dma(hcd
)) {
4707 if (!buf
&& (urb
->transfer_dma
& 3)) {
4709 "%s: unaligned transfer with no transfer_buffer",
4716 if (!(urb
->transfer_flags
& URB_NO_INTERRUPT
))
4717 tflags
|= URB_GIVEBACK_ASAP
;
4718 if (urb
->transfer_flags
& URB_ZERO_PACKET
)
4719 tflags
|= URB_SEND_ZERO_PACKET
;
4721 dwc2_urb
->priv
= urb
;
4722 dwc2_urb
->buf
= buf
;
4723 dwc2_urb
->dma
= urb
->transfer_dma
;
4724 dwc2_urb
->length
= urb
->transfer_buffer_length
;
4725 dwc2_urb
->setup_packet
= urb
->setup_packet
;
4726 dwc2_urb
->setup_dma
= urb
->setup_dma
;
4727 dwc2_urb
->flags
= tflags
;
4728 dwc2_urb
->interval
= urb
->interval
;
4729 dwc2_urb
->status
= -EINPROGRESS
;
4731 for (i
= 0; i
< urb
->number_of_packets
; ++i
)
4732 dwc2_hcd_urb_set_iso_desc_params(dwc2_urb
, i
,
4733 urb
->iso_frame_desc
[i
].offset
,
4734 urb
->iso_frame_desc
[i
].length
);
4736 urb
->hcpriv
= dwc2_urb
;
4737 qh
= (struct dwc2_qh
*)ep
->hcpriv
;
4738 /* Create QH for the endpoint if it doesn't exist */
4740 qh
= dwc2_hcd_qh_create(hsotg
, dwc2_urb
, mem_flags
);
4746 qh_allocated
= true;
4749 qtd
= kzalloc(sizeof(*qtd
), mem_flags
);
4755 spin_lock_irqsave(&hsotg
->lock
, flags
);
4756 retval
= usb_hcd_link_urb_to_ep(hcd
, urb
);
4760 retval
= dwc2_hcd_urb_enqueue(hsotg
, dwc2_urb
, qh
, qtd
);
4764 if (alloc_bandwidth
) {
4765 dwc2_allocate_bus_bandwidth(hcd
,
4766 dwc2_hcd_get_ep_bandwidth(hsotg
, ep
),
4770 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4775 dwc2_urb
->priv
= NULL
;
4776 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
4777 if (qh_allocated
&& qh
->channel
&& qh
->channel
->qh
== qh
)
4778 qh
->channel
->qh
= NULL
;
4781 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4785 struct dwc2_qtd
*qtd2
, *qtd2_tmp
;
4788 dwc2_hcd_qh_unlink(hsotg
, qh
);
4789 /* Free each QTD in the QH's QTD list */
4790 list_for_each_entry_safe(qtd2
, qtd2_tmp
, &qh
->qtd_list
,
4792 dwc2_hcd_qtd_unlink_and_free(hsotg
, qtd2
, qh
);
4793 dwc2_hcd_qh_free(hsotg
, qh
);
4802 * Aborts/cancels a USB transfer request. Always returns 0 to indicate success.
4804 static int _dwc2_hcd_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
,
4807 struct dwc2_hsotg
*hsotg
= dwc2_hcd_to_hsotg(hcd
);
4809 unsigned long flags
;
4811 dev_dbg(hsotg
->dev
, "DWC OTG HCD URB Dequeue\n");
4812 dwc2_dump_urb_info(hcd
, urb
, "urb_dequeue");
4814 spin_lock_irqsave(&hsotg
->lock
, flags
);
4816 rc
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
4821 dev_dbg(hsotg
->dev
, "## urb->hcpriv is NULL ##\n");
4825 rc
= dwc2_hcd_urb_dequeue(hsotg
, urb
->hcpriv
);
4827 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
4832 /* Higher layer software sets URB status */
4833 spin_unlock(&hsotg
->lock
);
4834 usb_hcd_giveback_urb(hcd
, urb
, status
);
4835 spin_lock(&hsotg
->lock
);
4837 dev_dbg(hsotg
->dev
, "Called usb_hcd_giveback_urb()\n");
4838 dev_dbg(hsotg
->dev
, " urb->status = %d\n", urb
->status
);
4840 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4846 * Frees resources in the DWC_otg controller related to a given endpoint. Also
4847 * clears state in the HCD related to the endpoint. Any URBs for the endpoint
4848 * must already be dequeued.
4850 static void _dwc2_hcd_endpoint_disable(struct usb_hcd
*hcd
,
4851 struct usb_host_endpoint
*ep
)
4853 struct dwc2_hsotg
*hsotg
= dwc2_hcd_to_hsotg(hcd
);
4856 "DWC OTG HCD EP DISABLE: bEndpointAddress=0x%02x, ep->hcpriv=%p\n",
4857 ep
->desc
.bEndpointAddress
, ep
->hcpriv
);
4858 dwc2_hcd_endpoint_disable(hsotg
, ep
, 250);
4862 * Resets endpoint specific parameter values, in current version used to reset
4863 * the data toggle (as a WA). This function can be called from usb_clear_halt
4866 static void _dwc2_hcd_endpoint_reset(struct usb_hcd
*hcd
,
4867 struct usb_host_endpoint
*ep
)
4869 struct dwc2_hsotg
*hsotg
= dwc2_hcd_to_hsotg(hcd
);
4870 unsigned long flags
;
4873 "DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n",
4874 ep
->desc
.bEndpointAddress
);
4876 spin_lock_irqsave(&hsotg
->lock
, flags
);
4877 dwc2_hcd_endpoint_reset(hsotg
, ep
);
4878 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4882 * Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if
4883 * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid
4886 * This function is called by the USB core when an interrupt occurs
4888 static irqreturn_t
_dwc2_hcd_irq(struct usb_hcd
*hcd
)
4890 struct dwc2_hsotg
*hsotg
= dwc2_hcd_to_hsotg(hcd
);
4892 return dwc2_handle_hcd_intr(hsotg
);
4896 * Creates Status Change bitmap for the root hub and root port. The bitmap is
4897 * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1
4898 * is the status change indicator for the single root port. Returns 1 if either
4899 * change indicator is 1, otherwise returns 0.
4901 static int _dwc2_hcd_hub_status_data(struct usb_hcd
*hcd
, char *buf
)
4903 struct dwc2_hsotg
*hsotg
= dwc2_hcd_to_hsotg(hcd
);
4905 buf
[0] = dwc2_hcd_is_status_changed(hsotg
, 1) << 1;
4909 /* Handles hub class-specific requests */
4910 static int _dwc2_hcd_hub_control(struct usb_hcd
*hcd
, u16 typereq
, u16 wvalue
,
4911 u16 windex
, char *buf
, u16 wlength
)
4913 int retval
= dwc2_hcd_hub_control(dwc2_hcd_to_hsotg(hcd
), typereq
,
4914 wvalue
, windex
, buf
, wlength
);
4918 /* Handles hub TT buffer clear completions */
4919 static void _dwc2_hcd_clear_tt_buffer_complete(struct usb_hcd
*hcd
,
4920 struct usb_host_endpoint
*ep
)
4922 struct dwc2_hsotg
*hsotg
= dwc2_hcd_to_hsotg(hcd
);
4924 unsigned long flags
;
4930 spin_lock_irqsave(&hsotg
->lock
, flags
);
4931 qh
->tt_buffer_dirty
= 0;
4933 if (hsotg
->flags
.b
.port_connect_status
)
4934 dwc2_hcd_queue_transactions(hsotg
, DWC2_TRANSACTION_ALL
);
4936 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4940 * HPRT0_SPD_HIGH_SPEED: high speed
4941 * HPRT0_SPD_FULL_SPEED: full speed
4943 static void dwc2_change_bus_speed(struct usb_hcd
*hcd
, int speed
)
4945 struct dwc2_hsotg
*hsotg
= dwc2_hcd_to_hsotg(hcd
);
4947 if (hsotg
->params
.speed
== speed
)
4950 hsotg
->params
.speed
= speed
;
4951 queue_work(hsotg
->wq_otg
, &hsotg
->wf_otg
);
4954 static void dwc2_free_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
)
4956 struct dwc2_hsotg
*hsotg
= dwc2_hcd_to_hsotg(hcd
);
4958 if (!hsotg
->params
.change_speed_quirk
)
4962 * On removal, set speed to default high-speed.
4964 if (udev
->parent
&& udev
->parent
->speed
> USB_SPEED_UNKNOWN
&&
4965 udev
->parent
->speed
< USB_SPEED_HIGH
) {
4966 dev_info(hsotg
->dev
, "Set speed to default high-speed\n");
4967 dwc2_change_bus_speed(hcd
, HPRT0_SPD_HIGH_SPEED
);
4971 static int dwc2_reset_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
4973 struct dwc2_hsotg
*hsotg
= dwc2_hcd_to_hsotg(hcd
);
4975 if (!hsotg
->params
.change_speed_quirk
)
4978 if (udev
->speed
== USB_SPEED_HIGH
) {
4979 dev_info(hsotg
->dev
, "Set speed to high-speed\n");
4980 dwc2_change_bus_speed(hcd
, HPRT0_SPD_HIGH_SPEED
);
4981 } else if ((udev
->speed
== USB_SPEED_FULL
||
4982 udev
->speed
== USB_SPEED_LOW
)) {
4984 * Change speed setting to full-speed if there's
4985 * a full-speed or low-speed device plugged in.
4987 dev_info(hsotg
->dev
, "Set speed to full-speed\n");
4988 dwc2_change_bus_speed(hcd
, HPRT0_SPD_FULL_SPEED
);
4994 static struct hc_driver dwc2_hc_driver
= {
4995 .description
= "dwc2_hsotg",
4996 .product_desc
= "DWC OTG Controller",
4997 .hcd_priv_size
= sizeof(struct wrapper_priv_data
),
4999 .irq
= _dwc2_hcd_irq
,
5000 .flags
= HCD_MEMORY
| HCD_USB2
| HCD_BH
,
5002 .start
= _dwc2_hcd_start
,
5003 .stop
= _dwc2_hcd_stop
,
5004 .urb_enqueue
= _dwc2_hcd_urb_enqueue
,
5005 .urb_dequeue
= _dwc2_hcd_urb_dequeue
,
5006 .endpoint_disable
= _dwc2_hcd_endpoint_disable
,
5007 .endpoint_reset
= _dwc2_hcd_endpoint_reset
,
5008 .get_frame_number
= _dwc2_hcd_get_frame_number
,
5010 .hub_status_data
= _dwc2_hcd_hub_status_data
,
5011 .hub_control
= _dwc2_hcd_hub_control
,
5012 .clear_tt_buffer_complete
= _dwc2_hcd_clear_tt_buffer_complete
,
5014 .bus_suspend
= _dwc2_hcd_suspend
,
5015 .bus_resume
= _dwc2_hcd_resume
,
5017 .map_urb_for_dma
= dwc2_map_urb_for_dma
,
5018 .unmap_urb_for_dma
= dwc2_unmap_urb_for_dma
,
5022 * Frees secondary storage associated with the dwc2_hsotg structure contained
5023 * in the struct usb_hcd field
5025 static void dwc2_hcd_free(struct dwc2_hsotg
*hsotg
)
5031 dev_dbg(hsotg
->dev
, "DWC OTG HCD FREE\n");
5033 /* Free memory for QH/QTD lists */
5034 dwc2_qh_list_free(hsotg
, &hsotg
->non_periodic_sched_inactive
);
5035 dwc2_qh_list_free(hsotg
, &hsotg
->non_periodic_sched_waiting
);
5036 dwc2_qh_list_free(hsotg
, &hsotg
->non_periodic_sched_active
);
5037 dwc2_qh_list_free(hsotg
, &hsotg
->periodic_sched_inactive
);
5038 dwc2_qh_list_free(hsotg
, &hsotg
->periodic_sched_ready
);
5039 dwc2_qh_list_free(hsotg
, &hsotg
->periodic_sched_assigned
);
5040 dwc2_qh_list_free(hsotg
, &hsotg
->periodic_sched_queued
);
5042 /* Free memory for the host channels */
5043 for (i
= 0; i
< MAX_EPS_CHANNELS
; i
++) {
5044 struct dwc2_host_chan
*chan
= hsotg
->hc_ptr_array
[i
];
5047 dev_dbg(hsotg
->dev
, "HCD Free channel #%i, chan=%p\n",
5049 hsotg
->hc_ptr_array
[i
] = NULL
;
5054 if (hsotg
->params
.host_dma
) {
5055 if (hsotg
->status_buf
) {
5056 dma_free_coherent(hsotg
->dev
, DWC2_HCD_STATUS_BUF_SIZE
,
5058 hsotg
->status_buf_dma
);
5059 hsotg
->status_buf
= NULL
;
5062 kfree(hsotg
->status_buf
);
5063 hsotg
->status_buf
= NULL
;
5066 ahbcfg
= dwc2_readl(hsotg
, GAHBCFG
);
5068 /* Disable all interrupts */
5069 ahbcfg
&= ~GAHBCFG_GLBL_INTR_EN
;
5070 dwc2_writel(hsotg
, ahbcfg
, GAHBCFG
);
5071 dwc2_writel(hsotg
, 0, GINTMSK
);
5073 if (hsotg
->hw_params
.snpsid
>= DWC2_CORE_REV_3_00a
) {
5074 dctl
= dwc2_readl(hsotg
, DCTL
);
5075 dctl
|= DCTL_SFTDISCON
;
5076 dwc2_writel(hsotg
, dctl
, DCTL
);
5079 if (hsotg
->wq_otg
) {
5080 if (!cancel_work_sync(&hsotg
->wf_otg
))
5081 flush_workqueue(hsotg
->wq_otg
);
5082 destroy_workqueue(hsotg
->wq_otg
);
5085 cancel_work_sync(&hsotg
->phy_reset_work
);
5087 del_timer(&hsotg
->wkp_timer
);
5090 static void dwc2_hcd_release(struct dwc2_hsotg
*hsotg
)
5092 /* Turn off all host-specific interrupts */
5093 dwc2_disable_host_interrupts(hsotg
);
5095 dwc2_hcd_free(hsotg
);
5099 * Initializes the HCD. This function allocates memory for and initializes the
5100 * static parts of the usb_hcd and dwc2_hsotg structures. It also registers the
5101 * USB bus with the core and calls the hc_driver->start() function. It returns
5102 * a negative error on failure.
5104 int dwc2_hcd_init(struct dwc2_hsotg
*hsotg
)
5106 struct platform_device
*pdev
= to_platform_device(hsotg
->dev
);
5107 struct resource
*res
;
5108 struct usb_hcd
*hcd
;
5109 struct dwc2_host_chan
*channel
;
5111 int i
, num_channels
;
5117 dev_dbg(hsotg
->dev
, "DWC OTG HCD INIT\n");
5121 hcfg
= dwc2_readl(hsotg
, HCFG
);
5122 dev_dbg(hsotg
->dev
, "hcfg=%08x\n", hcfg
);
5124 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
5125 hsotg
->frame_num_array
= kcalloc(FRAME_NUM_ARRAY_SIZE
,
5126 sizeof(*hsotg
->frame_num_array
),
5128 if (!hsotg
->frame_num_array
)
5130 hsotg
->last_frame_num_array
=
5131 kcalloc(FRAME_NUM_ARRAY_SIZE
,
5132 sizeof(*hsotg
->last_frame_num_array
), GFP_KERNEL
);
5133 if (!hsotg
->last_frame_num_array
)
5136 hsotg
->last_frame_num
= HFNUM_MAX_FRNUM
;
5138 /* Check if the bus driver or platform code has setup a dma_mask */
5139 if (hsotg
->params
.host_dma
&&
5140 !hsotg
->dev
->dma_mask
) {
5141 dev_warn(hsotg
->dev
,
5142 "dma_mask not set, disabling DMA\n");
5143 hsotg
->params
.host_dma
= false;
5144 hsotg
->params
.dma_desc_enable
= false;
5147 /* Set device flags indicating whether the HCD supports DMA */
5148 if (hsotg
->params
.host_dma
) {
5149 if (dma_set_mask(hsotg
->dev
, DMA_BIT_MASK(32)) < 0)
5150 dev_warn(hsotg
->dev
, "can't set DMA mask\n");
5151 if (dma_set_coherent_mask(hsotg
->dev
, DMA_BIT_MASK(32)) < 0)
5152 dev_warn(hsotg
->dev
, "can't set coherent DMA mask\n");
5155 if (hsotg
->params
.change_speed_quirk
) {
5156 dwc2_hc_driver
.free_dev
= dwc2_free_dev
;
5157 dwc2_hc_driver
.reset_device
= dwc2_reset_device
;
5160 if (hsotg
->params
.host_dma
)
5161 dwc2_hc_driver
.flags
|= HCD_DMA
;
5163 hcd
= usb_create_hcd(&dwc2_hc_driver
, hsotg
->dev
, dev_name(hsotg
->dev
));
5169 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
5174 hcd
->rsrc_start
= res
->start
;
5175 hcd
->rsrc_len
= resource_size(res
);
5177 ((struct wrapper_priv_data
*)&hcd
->hcd_priv
)->hsotg
= hsotg
;
5181 * Disable the global interrupt until all the interrupt handlers are
5184 dwc2_disable_global_interrupts(hsotg
);
5186 /* Initialize the DWC_otg core, and select the Phy type */
5187 retval
= dwc2_core_init(hsotg
, true);
5191 /* Create new workqueue and init work */
5193 hsotg
->wq_otg
= alloc_ordered_workqueue("dwc2", 0);
5194 if (!hsotg
->wq_otg
) {
5195 dev_err(hsotg
->dev
, "Failed to create workqueue\n");
5198 INIT_WORK(&hsotg
->wf_otg
, dwc2_conn_id_status_change
);
5200 timer_setup(&hsotg
->wkp_timer
, dwc2_wakeup_detected
, 0);
5202 /* Initialize the non-periodic schedule */
5203 INIT_LIST_HEAD(&hsotg
->non_periodic_sched_inactive
);
5204 INIT_LIST_HEAD(&hsotg
->non_periodic_sched_waiting
);
5205 INIT_LIST_HEAD(&hsotg
->non_periodic_sched_active
);
5207 /* Initialize the periodic schedule */
5208 INIT_LIST_HEAD(&hsotg
->periodic_sched_inactive
);
5209 INIT_LIST_HEAD(&hsotg
->periodic_sched_ready
);
5210 INIT_LIST_HEAD(&hsotg
->periodic_sched_assigned
);
5211 INIT_LIST_HEAD(&hsotg
->periodic_sched_queued
);
5213 INIT_LIST_HEAD(&hsotg
->split_order
);
5216 * Create a host channel descriptor for each host channel implemented
5217 * in the controller. Initialize the channel descriptor array.
5219 INIT_LIST_HEAD(&hsotg
->free_hc_list
);
5220 num_channels
= hsotg
->params
.host_channels
;
5221 memset(&hsotg
->hc_ptr_array
[0], 0, sizeof(hsotg
->hc_ptr_array
));
5223 for (i
= 0; i
< num_channels
; i
++) {
5224 channel
= kzalloc(sizeof(*channel
), GFP_KERNEL
);
5227 channel
->hc_num
= i
;
5228 INIT_LIST_HEAD(&channel
->split_order_list_entry
);
5229 hsotg
->hc_ptr_array
[i
] = channel
;
5232 /* Initialize work */
5233 INIT_DELAYED_WORK(&hsotg
->start_work
, dwc2_hcd_start_func
);
5234 INIT_DELAYED_WORK(&hsotg
->reset_work
, dwc2_hcd_reset_func
);
5235 INIT_WORK(&hsotg
->phy_reset_work
, dwc2_hcd_phy_reset_func
);
5238 * Allocate space for storing data on status transactions. Normally no
5239 * data is sent, but this space acts as a bit bucket. This must be
5240 * done after usb_add_hcd since that function allocates the DMA buffer
5243 if (hsotg
->params
.host_dma
)
5244 hsotg
->status_buf
= dma_alloc_coherent(hsotg
->dev
,
5245 DWC2_HCD_STATUS_BUF_SIZE
,
5246 &hsotg
->status_buf_dma
, GFP_KERNEL
);
5248 hsotg
->status_buf
= kzalloc(DWC2_HCD_STATUS_BUF_SIZE
,
5251 if (!hsotg
->status_buf
)
5255 * Create kmem caches to handle descriptor buffers in descriptor
5257 * Alignment must be set to 512 bytes.
5259 if (hsotg
->params
.dma_desc_enable
||
5260 hsotg
->params
.dma_desc_fs_enable
) {
5261 hsotg
->desc_gen_cache
= kmem_cache_create("dwc2-gen-desc",
5262 sizeof(struct dwc2_dma_desc
) *
5263 MAX_DMA_DESC_NUM_GENERIC
, 512, SLAB_CACHE_DMA
,
5265 if (!hsotg
->desc_gen_cache
) {
5267 "unable to create dwc2 generic desc cache\n");
5270 * Disable descriptor dma mode since it will not be
5273 hsotg
->params
.dma_desc_enable
= false;
5274 hsotg
->params
.dma_desc_fs_enable
= false;
5277 hsotg
->desc_hsisoc_cache
= kmem_cache_create("dwc2-hsisoc-desc",
5278 sizeof(struct dwc2_dma_desc
) *
5279 MAX_DMA_DESC_NUM_HS_ISOC
, 512, 0, NULL
);
5280 if (!hsotg
->desc_hsisoc_cache
) {
5282 "unable to create dwc2 hs isoc desc cache\n");
5284 kmem_cache_destroy(hsotg
->desc_gen_cache
);
5287 * Disable descriptor dma mode since it will not be
5290 hsotg
->params
.dma_desc_enable
= false;
5291 hsotg
->params
.dma_desc_fs_enable
= false;
5295 if (hsotg
->params
.host_dma
) {
5297 * Create kmem caches to handle non-aligned buffer
5298 * in Buffer DMA mode.
5300 hsotg
->unaligned_cache
= kmem_cache_create("dwc2-unaligned-dma",
5301 DWC2_KMEM_UNALIGNED_BUF_SIZE
, 4,
5302 SLAB_CACHE_DMA
, NULL
);
5303 if (!hsotg
->unaligned_cache
)
5305 "unable to create dwc2 unaligned cache\n");
5308 hsotg
->otg_port
= 1;
5309 hsotg
->frame_list
= NULL
;
5310 hsotg
->frame_list_dma
= 0;
5311 hsotg
->periodic_qh_count
= 0;
5313 /* Initiate lx_state to L3 disconnected state */
5314 hsotg
->lx_state
= DWC2_L3
;
5316 hcd
->self
.otg_port
= hsotg
->otg_port
;
5318 /* Don't support SG list at this point */
5319 hcd
->self
.sg_tablesize
= 0;
5321 hcd
->tpl_support
= of_usb_host_tpl_support(hsotg
->dev
->of_node
);
5323 if (!IS_ERR_OR_NULL(hsotg
->uphy
))
5324 otg_set_host(hsotg
->uphy
->otg
, &hcd
->self
);
5327 * Finish generic HCD initialization and start the HCD. This function
5328 * allocates the DMA buffer pool, registers the USB bus, requests the
5329 * IRQ line, and calls hcd_start method.
5331 retval
= usb_add_hcd(hcd
, hsotg
->irq
, IRQF_SHARED
);
5335 device_wakeup_enable(hcd
->self
.controller
);
5337 dwc2_hcd_dump_state(hsotg
);
5339 dwc2_enable_global_interrupts(hsotg
);
5344 kmem_cache_destroy(hsotg
->unaligned_cache
);
5345 kmem_cache_destroy(hsotg
->desc_hsisoc_cache
);
5346 kmem_cache_destroy(hsotg
->desc_gen_cache
);
5348 dwc2_hcd_release(hsotg
);
5353 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
5354 kfree(hsotg
->last_frame_num_array
);
5355 kfree(hsotg
->frame_num_array
);
5358 dev_err(hsotg
->dev
, "%s() FAILED, returning %d\n", __func__
, retval
);
5364 * Frees memory and resources associated with the HCD and deregisters the bus.
5366 void dwc2_hcd_remove(struct dwc2_hsotg
*hsotg
)
5368 struct usb_hcd
*hcd
;
5370 dev_dbg(hsotg
->dev
, "DWC OTG HCD REMOVE\n");
5372 hcd
= dwc2_hsotg_to_hcd(hsotg
);
5373 dev_dbg(hsotg
->dev
, "hsotg->hcd = %p\n", hcd
);
5376 dev_dbg(hsotg
->dev
, "%s: dwc2_hsotg_to_hcd(hsotg) NULL!\n",
5381 if (!IS_ERR_OR_NULL(hsotg
->uphy
))
5382 otg_set_host(hsotg
->uphy
->otg
, NULL
);
5384 usb_remove_hcd(hcd
);
5387 kmem_cache_destroy(hsotg
->unaligned_cache
);
5388 kmem_cache_destroy(hsotg
->desc_hsisoc_cache
);
5389 kmem_cache_destroy(hsotg
->desc_gen_cache
);
5391 dwc2_hcd_release(hsotg
);
5394 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
5395 kfree(hsotg
->last_frame_num_array
);
5396 kfree(hsotg
->frame_num_array
);
5401 * dwc2_backup_host_registers() - Backup controller host registers.
5402 * When suspending usb bus, registers needs to be backuped
5403 * if controller power is disabled once suspended.
5405 * @hsotg: Programming view of the DWC_otg controller
5407 int dwc2_backup_host_registers(struct dwc2_hsotg
*hsotg
)
5409 struct dwc2_hregs_backup
*hr
;
5412 dev_dbg(hsotg
->dev
, "%s\n", __func__
);
5414 /* Backup Host regs */
5415 hr
= &hsotg
->hr_backup
;
5416 hr
->hcfg
= dwc2_readl(hsotg
, HCFG
);
5417 hr
->hflbaddr
= dwc2_readl(hsotg
, HFLBADDR
);
5418 hr
->haintmsk
= dwc2_readl(hsotg
, HAINTMSK
);
5419 for (i
= 0; i
< hsotg
->params
.host_channels
; ++i
) {
5420 hr
->hcchar
[i
] = dwc2_readl(hsotg
, HCCHAR(i
));
5421 hr
->hcsplt
[i
] = dwc2_readl(hsotg
, HCSPLT(i
));
5422 hr
->hcintmsk
[i
] = dwc2_readl(hsotg
, HCINTMSK(i
));
5423 hr
->hctsiz
[i
] = dwc2_readl(hsotg
, HCTSIZ(i
));
5424 hr
->hcidma
[i
] = dwc2_readl(hsotg
, HCDMA(i
));
5425 hr
->hcidmab
[i
] = dwc2_readl(hsotg
, HCDMAB(i
));
5428 hr
->hprt0
= dwc2_read_hprt0(hsotg
);
5429 hr
->hfir
= dwc2_readl(hsotg
, HFIR
);
5430 hr
->hptxfsiz
= dwc2_readl(hsotg
, HPTXFSIZ
);
5437 * dwc2_restore_host_registers() - Restore controller host registers.
5438 * When resuming usb bus, device registers needs to be restored
5439 * if controller power were disabled.
5441 * @hsotg: Programming view of the DWC_otg controller
5443 int dwc2_restore_host_registers(struct dwc2_hsotg
*hsotg
)
5445 struct dwc2_hregs_backup
*hr
;
5448 dev_dbg(hsotg
->dev
, "%s\n", __func__
);
5450 /* Restore host regs */
5451 hr
= &hsotg
->hr_backup
;
5453 dev_err(hsotg
->dev
, "%s: no host registers to restore\n",
5459 dwc2_writel(hsotg
, hr
->hcfg
, HCFG
);
5460 dwc2_writel(hsotg
, hr
->hflbaddr
, HFLBADDR
);
5461 dwc2_writel(hsotg
, hr
->haintmsk
, HAINTMSK
);
5463 for (i
= 0; i
< hsotg
->params
.host_channels
; ++i
) {
5464 dwc2_writel(hsotg
, hr
->hcchar
[i
], HCCHAR(i
));
5465 dwc2_writel(hsotg
, hr
->hcsplt
[i
], HCSPLT(i
));
5466 dwc2_writel(hsotg
, hr
->hcintmsk
[i
], HCINTMSK(i
));
5467 dwc2_writel(hsotg
, hr
->hctsiz
[i
], HCTSIZ(i
));
5468 dwc2_writel(hsotg
, hr
->hcidma
[i
], HCDMA(i
));
5469 dwc2_writel(hsotg
, hr
->hcidmab
[i
], HCDMAB(i
));
5472 dwc2_writel(hsotg
, hr
->hprt0
, HPRT0
);
5473 dwc2_writel(hsotg
, hr
->hfir
, HFIR
);
5474 dwc2_writel(hsotg
, hr
->hptxfsiz
, HPTXFSIZ
);
5475 hsotg
->frame_number
= 0;
5481 * dwc2_host_enter_hibernation() - Put controller in Hibernation.
5483 * @hsotg: Programming view of the DWC_otg controller
5485 int dwc2_host_enter_hibernation(struct dwc2_hsotg
*hsotg
)
5487 unsigned long flags
;
5494 dev_dbg(hsotg
->dev
, "Preparing host for hibernation\n");
5495 ret
= dwc2_backup_global_registers(hsotg
);
5497 dev_err(hsotg
->dev
, "%s: failed to backup global registers\n",
5501 ret
= dwc2_backup_host_registers(hsotg
);
5503 dev_err(hsotg
->dev
, "%s: failed to backup host registers\n",
5508 /* Enter USB Suspend Mode */
5509 hprt0
= dwc2_readl(hsotg
, HPRT0
);
5510 hprt0
|= HPRT0_SUSP
;
5511 hprt0
&= ~HPRT0_ENA
;
5512 dwc2_writel(hsotg
, hprt0
, HPRT0
);
5514 /* Wait for the HPRT0.PrtSusp register field to be set */
5515 if (dwc2_hsotg_wait_bit_set(hsotg
, HPRT0
, HPRT0_SUSP
, 5000))
5516 dev_warn(hsotg
->dev
, "Suspend wasn't generated\n");
5519 * We need to disable interrupts to prevent servicing of any IRQ
5520 * during going to hibernation
5522 spin_lock_irqsave(&hsotg
->lock
, flags
);
5523 hsotg
->lx_state
= DWC2_L2
;
5525 gusbcfg
= dwc2_readl(hsotg
, GUSBCFG
);
5526 if (gusbcfg
& GUSBCFG_ULPI_UTMI_SEL
) {
5527 /* ULPI interface */
5529 gpwrdn
= dwc2_readl(hsotg
, GPWRDN
);
5530 gpwrdn
|= GPWRDN_ULPI_LATCH_EN_DURING_HIB_ENTRY
;
5531 dwc2_writel(hsotg
, gpwrdn
, GPWRDN
);
5533 /* Suspend the Phy Clock */
5534 pcgcctl
= dwc2_readl(hsotg
, PCGCTL
);
5535 pcgcctl
|= PCGCTL_STOPPCLK
;
5536 dwc2_writel(hsotg
, pcgcctl
, PCGCTL
);
5539 gpwrdn
= dwc2_readl(hsotg
, GPWRDN
);
5540 gpwrdn
|= GPWRDN_PMUACTV
;
5541 dwc2_writel(hsotg
, gpwrdn
, GPWRDN
);
5544 /* UTMI+ Interface */
5545 gpwrdn
= dwc2_readl(hsotg
, GPWRDN
);
5546 gpwrdn
|= GPWRDN_PMUACTV
;
5547 dwc2_writel(hsotg
, gpwrdn
, GPWRDN
);
5550 pcgcctl
= dwc2_readl(hsotg
, PCGCTL
);
5551 pcgcctl
|= PCGCTL_STOPPCLK
;
5552 dwc2_writel(hsotg
, pcgcctl
, PCGCTL
);
5556 /* Enable interrupts from wake up logic */
5557 gpwrdn
= dwc2_readl(hsotg
, GPWRDN
);
5558 gpwrdn
|= GPWRDN_PMUINTSEL
;
5559 dwc2_writel(hsotg
, gpwrdn
, GPWRDN
);
5562 /* Unmask host mode interrupts in GPWRDN */
5563 gpwrdn
= dwc2_readl(hsotg
, GPWRDN
);
5564 gpwrdn
|= GPWRDN_DISCONN_DET_MSK
;
5565 gpwrdn
|= GPWRDN_LNSTSCHG_MSK
;
5566 gpwrdn
|= GPWRDN_STS_CHGINT_MSK
;
5567 dwc2_writel(hsotg
, gpwrdn
, GPWRDN
);
5570 /* Enable Power Down Clamp */
5571 gpwrdn
= dwc2_readl(hsotg
, GPWRDN
);
5572 gpwrdn
|= GPWRDN_PWRDNCLMP
;
5573 dwc2_writel(hsotg
, gpwrdn
, GPWRDN
);
5576 /* Switch off VDD */
5577 gpwrdn
= dwc2_readl(hsotg
, GPWRDN
);
5578 gpwrdn
|= GPWRDN_PWRDNSWTCH
;
5579 dwc2_writel(hsotg
, gpwrdn
, GPWRDN
);
5581 hsotg
->hibernated
= 1;
5582 hsotg
->bus_suspended
= 1;
5583 dev_dbg(hsotg
->dev
, "Host hibernation completed\n");
5584 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
5589 * dwc2_host_exit_hibernation()
5591 * @hsotg: Programming view of the DWC_otg controller
5592 * @rem_wakeup: indicates whether resume is initiated by Device or Host.
5593 * @param reset: indicates whether resume is initiated by Reset.
5595 * Return: non-zero if failed to enter to hibernation.
5597 * This function is for exiting from Host mode hibernation by
5598 * Host Initiated Resume/Reset and Device Initiated Remote-Wakeup.
5600 int dwc2_host_exit_hibernation(struct dwc2_hsotg
*hsotg
, int rem_wakeup
,
5606 struct dwc2_gregs_backup
*gr
;
5607 struct dwc2_hregs_backup
*hr
;
5609 gr
= &hsotg
->gr_backup
;
5610 hr
= &hsotg
->hr_backup
;
5613 "%s: called with rem_wakeup = %d reset = %d\n",
5614 __func__
, rem_wakeup
, reset
);
5616 dwc2_hib_restore_common(hsotg
, rem_wakeup
, 1);
5617 hsotg
->hibernated
= 0;
5620 * This step is not described in functional spec but if not wait for
5621 * this delay, mismatch interrupts occurred because just after restore
5622 * core is in Device mode(gintsts.curmode == 0)
5626 /* Clear all pending interupts */
5627 dwc2_writel(hsotg
, 0xffffffff, GINTSTS
);
5629 /* De-assert Restore */
5630 gpwrdn
= dwc2_readl(hsotg
, GPWRDN
);
5631 gpwrdn
&= ~GPWRDN_RESTORE
;
5632 dwc2_writel(hsotg
, gpwrdn
, GPWRDN
);
5635 /* Restore GUSBCFG, HCFG */
5636 dwc2_writel(hsotg
, gr
->gusbcfg
, GUSBCFG
);
5637 dwc2_writel(hsotg
, hr
->hcfg
, HCFG
);
5639 /* Reset ULPI latch */
5640 gpwrdn
= dwc2_readl(hsotg
, GPWRDN
);
5641 gpwrdn
&= ~GPWRDN_ULPI_LATCH_EN_DURING_HIB_ENTRY
;
5642 dwc2_writel(hsotg
, gpwrdn
, GPWRDN
);
5644 /* De-assert Wakeup Logic */
5645 if (!(rem_wakeup
&& hsotg
->hw_params
.snpsid
>= DWC2_CORE_REV_4_30a
)) {
5646 gpwrdn
= dwc2_readl(hsotg
, GPWRDN
);
5647 gpwrdn
&= ~GPWRDN_PMUACTV
;
5648 dwc2_writel(hsotg
, gpwrdn
, GPWRDN
);
5654 hprt0
&= ~HPRT0_ENA
;
5655 hprt0
&= ~HPRT0_SUSP
;
5656 dwc2_writel(hsotg
, hprt0
, HPRT0
);
5660 hprt0
&= ~HPRT0_ENA
;
5661 hprt0
&= ~HPRT0_SUSP
;
5665 dwc2_writel(hsotg
, hprt0
, HPRT0
);
5667 /* Wait for Resume time and then program HPRT again */
5669 hprt0
&= ~HPRT0_RST
;
5670 dwc2_writel(hsotg
, hprt0
, HPRT0
);
5673 dwc2_writel(hsotg
, hprt0
, HPRT0
);
5675 /* De-assert Wakeup Logic */
5676 if ((rem_wakeup
&& hsotg
->hw_params
.snpsid
>= DWC2_CORE_REV_4_30a
)) {
5677 gpwrdn
= dwc2_readl(hsotg
, GPWRDN
);
5678 gpwrdn
&= ~GPWRDN_PMUACTV
;
5679 dwc2_writel(hsotg
, gpwrdn
, GPWRDN
);
5682 /* Wait for Resume time and then program HPRT again */
5684 hprt0
&= ~HPRT0_RES
;
5685 dwc2_writel(hsotg
, hprt0
, HPRT0
);
5687 /* Clear all interrupt status */
5688 hprt0
= dwc2_readl(hsotg
, HPRT0
);
5689 hprt0
|= HPRT0_CONNDET
;
5690 hprt0
|= HPRT0_ENACHG
;
5691 hprt0
&= ~HPRT0_ENA
;
5692 dwc2_writel(hsotg
, hprt0
, HPRT0
);
5694 hprt0
= dwc2_readl(hsotg
, HPRT0
);
5696 /* Clear all pending interupts */
5697 dwc2_writel(hsotg
, 0xffffffff, GINTSTS
);
5699 /* Restore global registers */
5700 ret
= dwc2_restore_global_registers(hsotg
);
5702 dev_err(hsotg
->dev
, "%s: failed to restore registers\n",
5707 /* Restore host registers */
5708 ret
= dwc2_restore_host_registers(hsotg
);
5710 dev_err(hsotg
->dev
, "%s: failed to restore host registers\n",
5716 dwc2_hcd_rem_wakeup(hsotg
);
5718 * Change "port_connect_status_change" flag to re-enumerate,
5719 * because after exit from hibernation port connection status
5722 hsotg
->flags
.b
.port_connect_status_change
= 1;
5725 hsotg
->hibernated
= 0;
5726 hsotg
->bus_suspended
= 0;
5727 hsotg
->lx_state
= DWC2_L0
;
5728 dev_dbg(hsotg
->dev
, "Host hibernation restore complete\n");
5732 bool dwc2_host_can_poweroff_phy(struct dwc2_hsotg
*dwc2
)
5734 struct usb_device
*root_hub
= dwc2_hsotg_to_hcd(dwc2
)->self
.root_hub
;
5736 /* If the controller isn't allowed to wakeup then we can power off. */
5737 if (!device_may_wakeup(dwc2
->dev
))
5741 * We don't want to power off the PHY if something under the
5742 * root hub has wakeup enabled.
5744 if (usb_wakeup_enabled_descendants(root_hub
))
5747 /* No reason to keep the PHY powered, so allow poweroff */
5752 * dwc2_host_enter_partial_power_down() - Put controller in partial
5755 * @hsotg: Programming view of the DWC_otg controller
5757 * Return: non-zero if failed to enter host partial power down.
5759 * This function is for entering Host mode partial power down.
5761 int dwc2_host_enter_partial_power_down(struct dwc2_hsotg
*hsotg
)
5767 dev_dbg(hsotg
->dev
, "Entering host partial power down started.\n");
5769 /* Put this port in suspend mode. */
5770 hprt0
= dwc2_read_hprt0(hsotg
);
5771 hprt0
|= HPRT0_SUSP
;
5772 dwc2_writel(hsotg
, hprt0
, HPRT0
);
5775 /* Wait for the HPRT0.PrtSusp register field to be set */
5776 if (dwc2_hsotg_wait_bit_set(hsotg
, HPRT0
, HPRT0_SUSP
, 3000))
5777 dev_warn(hsotg
->dev
, "Suspend wasn't generated\n");
5779 /* Backup all registers */
5780 ret
= dwc2_backup_global_registers(hsotg
);
5782 dev_err(hsotg
->dev
, "%s: failed to backup global registers\n",
5787 ret
= dwc2_backup_host_registers(hsotg
);
5789 dev_err(hsotg
->dev
, "%s: failed to backup host registers\n",
5795 * Clear any pending interrupts since dwc2 will not be able to
5796 * clear them after entering partial_power_down.
5798 dwc2_writel(hsotg
, 0xffffffff, GINTSTS
);
5800 /* Put the controller in low power state */
5801 pcgcctl
= dwc2_readl(hsotg
, PCGCTL
);
5803 pcgcctl
|= PCGCTL_PWRCLMP
;
5804 dwc2_writel(hsotg
, pcgcctl
, PCGCTL
);
5807 pcgcctl
|= PCGCTL_RSTPDWNMODULE
;
5808 dwc2_writel(hsotg
, pcgcctl
, PCGCTL
);
5811 pcgcctl
|= PCGCTL_STOPPCLK
;
5812 dwc2_writel(hsotg
, pcgcctl
, PCGCTL
);
5814 /* Set in_ppd flag to 1 as here core enters suspend. */
5816 hsotg
->lx_state
= DWC2_L2
;
5817 hsotg
->bus_suspended
= true;
5819 dev_dbg(hsotg
->dev
, "Entering host partial power down completed.\n");
5825 * dwc2_host_exit_partial_power_down() - Exit controller from host partial
5828 * @hsotg: Programming view of the DWC_otg controller
5829 * @rem_wakeup: indicates whether resume is initiated by Reset.
5830 * @restore: indicates whether need to restore the registers or not.
5832 * Return: non-zero if failed to exit host partial power down.
5834 * This function is for exiting from Host mode partial power down.
5836 int dwc2_host_exit_partial_power_down(struct dwc2_hsotg
*hsotg
,
5837 int rem_wakeup
, bool restore
)
5843 dev_dbg(hsotg
->dev
, "Exiting host partial power down started.\n");
5845 pcgcctl
= dwc2_readl(hsotg
, PCGCTL
);
5846 pcgcctl
&= ~PCGCTL_STOPPCLK
;
5847 dwc2_writel(hsotg
, pcgcctl
, PCGCTL
);
5850 pcgcctl
= dwc2_readl(hsotg
, PCGCTL
);
5851 pcgcctl
&= ~PCGCTL_PWRCLMP
;
5852 dwc2_writel(hsotg
, pcgcctl
, PCGCTL
);
5855 pcgcctl
= dwc2_readl(hsotg
, PCGCTL
);
5856 pcgcctl
&= ~PCGCTL_RSTPDWNMODULE
;
5857 dwc2_writel(hsotg
, pcgcctl
, PCGCTL
);
5861 ret
= dwc2_restore_global_registers(hsotg
);
5863 dev_err(hsotg
->dev
, "%s: failed to restore registers\n",
5868 ret
= dwc2_restore_host_registers(hsotg
);
5870 dev_err(hsotg
->dev
, "%s: failed to restore host registers\n",
5876 /* Drive resume signaling and exit suspend mode on the port. */
5877 hprt0
= dwc2_read_hprt0(hsotg
);
5879 hprt0
&= ~HPRT0_SUSP
;
5880 dwc2_writel(hsotg
, hprt0
, HPRT0
);
5884 /* Stop driveing resume signaling on the port. */
5885 hprt0
= dwc2_read_hprt0(hsotg
);
5886 hprt0
&= ~HPRT0_RES
;
5887 dwc2_writel(hsotg
, hprt0
, HPRT0
);
5889 hsotg
->bus_suspended
= false;
5891 /* Turn on the port power bit. */
5892 hprt0
= dwc2_read_hprt0(hsotg
);
5894 dwc2_writel(hsotg
, hprt0
, HPRT0
);
5897 dwc2_hcd_connect(hsotg
);
5899 mod_timer(&hsotg
->wkp_timer
,
5900 jiffies
+ msecs_to_jiffies(71));
5903 /* Set lx_state to and in_ppd to 0 as here core exits from suspend. */
5905 hsotg
->lx_state
= DWC2_L0
;
5907 dev_dbg(hsotg
->dev
, "Exiting host partial power down completed.\n");
5912 * dwc2_host_enter_clock_gating() - Put controller in clock gating.
5914 * @hsotg: Programming view of the DWC_otg controller
5916 * This function is for entering Host mode clock gating.
5918 void dwc2_host_enter_clock_gating(struct dwc2_hsotg
*hsotg
)
5923 dev_dbg(hsotg
->dev
, "Entering host clock gating.\n");
5925 /* Put this port in suspend mode. */
5926 hprt0
= dwc2_read_hprt0(hsotg
);
5927 hprt0
|= HPRT0_SUSP
;
5928 dwc2_writel(hsotg
, hprt0
, HPRT0
);
5930 /* Set the Phy Clock bit as suspend is received. */
5931 pcgctl
= dwc2_readl(hsotg
, PCGCTL
);
5932 pcgctl
|= PCGCTL_STOPPCLK
;
5933 dwc2_writel(hsotg
, pcgctl
, PCGCTL
);
5936 /* Set the Gate hclk as suspend is received. */
5937 pcgctl
= dwc2_readl(hsotg
, PCGCTL
);
5938 pcgctl
|= PCGCTL_GATEHCLK
;
5939 dwc2_writel(hsotg
, pcgctl
, PCGCTL
);
5942 hsotg
->bus_suspended
= true;
5943 hsotg
->lx_state
= DWC2_L2
;
5947 * dwc2_host_exit_clock_gating() - Exit controller from clock gating.
5949 * @hsotg: Programming view of the DWC_otg controller
5950 * @rem_wakeup: indicates whether resume is initiated by remote wakeup
5952 * This function is for exiting Host mode clock gating.
5954 void dwc2_host_exit_clock_gating(struct dwc2_hsotg
*hsotg
, int rem_wakeup
)
5959 dev_dbg(hsotg
->dev
, "Exiting host clock gating.\n");
5961 /* Clear the Gate hclk. */
5962 pcgctl
= dwc2_readl(hsotg
, PCGCTL
);
5963 pcgctl
&= ~PCGCTL_GATEHCLK
;
5964 dwc2_writel(hsotg
, pcgctl
, PCGCTL
);
5967 /* Phy Clock bit. */
5968 pcgctl
= dwc2_readl(hsotg
, PCGCTL
);
5969 pcgctl
&= ~PCGCTL_STOPPCLK
;
5970 dwc2_writel(hsotg
, pcgctl
, PCGCTL
);
5973 /* Drive resume signaling and exit suspend mode on the port. */
5974 hprt0
= dwc2_read_hprt0(hsotg
);
5976 hprt0
&= ~HPRT0_SUSP
;
5977 dwc2_writel(hsotg
, hprt0
, HPRT0
);
5981 /* In case of port resume need to wait for 40 ms */
5982 msleep(USB_RESUME_TIMEOUT
);
5984 /* Stop driveing resume signaling on the port. */
5985 hprt0
= dwc2_read_hprt0(hsotg
);
5986 hprt0
&= ~HPRT0_RES
;
5987 dwc2_writel(hsotg
, hprt0
, HPRT0
);
5989 hsotg
->bus_suspended
= false;
5990 hsotg
->lx_state
= DWC2_L0
;
5992 mod_timer(&hsotg
->wkp_timer
,
5993 jiffies
+ msecs_to_jiffies(71));