2 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Copyright 2008 Openmoko, Inc.
6 * Copyright 2008 Simtec Electronics
7 * Ben Dooks <ben@simtec.co.uk>
8 * http://armlinux.simtec.co.uk/
10 * S3C USB2.0 High-speed / OtG driver
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/spinlock.h>
20 #include <linux/interrupt.h>
21 #include <linux/platform_device.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/debugfs.h>
24 #include <linux/mutex.h>
25 #include <linux/seq_file.h>
26 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/clk.h>
30 #include <linux/regulator/consumer.h>
31 #include <linux/of_platform.h>
32 #include <linux/phy/phy.h>
34 #include <linux/usb/ch9.h>
35 #include <linux/usb/gadget.h>
36 #include <linux/usb/phy.h>
37 #include <linux/platform_data/s3c-hsotg.h>
38 #include <linux/uaccess.h>
43 /* conversion functions */
44 static inline struct s3c_hsotg_req
*our_req(struct usb_request
*req
)
46 return container_of(req
, struct s3c_hsotg_req
, req
);
49 static inline struct s3c_hsotg_ep
*our_ep(struct usb_ep
*ep
)
51 return container_of(ep
, struct s3c_hsotg_ep
, ep
);
54 static inline struct dwc2_hsotg
*to_hsotg(struct usb_gadget
*gadget
)
56 return container_of(gadget
, struct dwc2_hsotg
, gadget
);
59 static inline void __orr32(void __iomem
*ptr
, u32 val
)
61 writel(readl(ptr
) | val
, ptr
);
64 static inline void __bic32(void __iomem
*ptr
, u32 val
)
66 writel(readl(ptr
) & ~val
, ptr
);
69 static inline struct s3c_hsotg_ep
*index_to_ep(struct dwc2_hsotg
*hsotg
,
70 u32 ep_index
, u32 dir_in
)
73 return hsotg
->eps_in
[ep_index
];
75 return hsotg
->eps_out
[ep_index
];
78 /* forward declaration of functions */
79 static void s3c_hsotg_dump(struct dwc2_hsotg
*hsotg
);
82 * using_dma - return the DMA status of the driver.
83 * @hsotg: The driver state.
85 * Return true if we're using DMA.
87 * Currently, we have the DMA support code worked into everywhere
88 * that needs it, but the AMBA DMA implementation in the hardware can
89 * only DMA from 32bit aligned addresses. This means that gadgets such
90 * as the CDC Ethernet cannot work as they often pass packets which are
93 * Unfortunately the choice to use DMA or not is global to the controller
94 * and seems to be only settable when the controller is being put through
95 * a core reset. This means we either need to fix the gadgets to take
96 * account of DMA alignment, or add bounce buffers (yuerk).
98 * g_using_dma is set depending on dts flag.
100 static inline bool using_dma(struct dwc2_hsotg
*hsotg
)
102 return hsotg
->g_using_dma
;
106 * s3c_hsotg_en_gsint - enable one or more of the general interrupt
107 * @hsotg: The device state
108 * @ints: A bitmask of the interrupts to enable
110 static void s3c_hsotg_en_gsint(struct dwc2_hsotg
*hsotg
, u32 ints
)
112 u32 gsintmsk
= readl(hsotg
->regs
+ GINTMSK
);
115 new_gsintmsk
= gsintmsk
| ints
;
117 if (new_gsintmsk
!= gsintmsk
) {
118 dev_dbg(hsotg
->dev
, "gsintmsk now 0x%08x\n", new_gsintmsk
);
119 writel(new_gsintmsk
, hsotg
->regs
+ GINTMSK
);
124 * s3c_hsotg_disable_gsint - disable one or more of the general interrupt
125 * @hsotg: The device state
126 * @ints: A bitmask of the interrupts to enable
128 static void s3c_hsotg_disable_gsint(struct dwc2_hsotg
*hsotg
, u32 ints
)
130 u32 gsintmsk
= readl(hsotg
->regs
+ GINTMSK
);
133 new_gsintmsk
= gsintmsk
& ~ints
;
135 if (new_gsintmsk
!= gsintmsk
)
136 writel(new_gsintmsk
, hsotg
->regs
+ GINTMSK
);
140 * s3c_hsotg_ctrl_epint - enable/disable an endpoint irq
141 * @hsotg: The device state
142 * @ep: The endpoint index
143 * @dir_in: True if direction is in.
144 * @en: The enable value, true to enable
146 * Set or clear the mask for an individual endpoint's interrupt
149 static void s3c_hsotg_ctrl_epint(struct dwc2_hsotg
*hsotg
,
150 unsigned int ep
, unsigned int dir_in
,
160 local_irq_save(flags
);
161 daint
= readl(hsotg
->regs
+ DAINTMSK
);
166 writel(daint
, hsotg
->regs
+ DAINTMSK
);
167 local_irq_restore(flags
);
171 * s3c_hsotg_init_fifo - initialise non-periodic FIFOs
172 * @hsotg: The device instance.
174 static void s3c_hsotg_init_fifo(struct dwc2_hsotg
*hsotg
)
181 /* Reset fifo map if not correctly cleared during previous session */
182 WARN_ON(hsotg
->fifo_map
);
185 /* set RX/NPTX FIFO sizes */
186 writel(hsotg
->g_rx_fifo_sz
, hsotg
->regs
+ GRXFSIZ
);
187 writel((hsotg
->g_rx_fifo_sz
<< FIFOSIZE_STARTADDR_SHIFT
) |
188 (hsotg
->g_np_g_tx_fifo_sz
<< FIFOSIZE_DEPTH_SHIFT
),
189 hsotg
->regs
+ GNPTXFSIZ
);
192 * arange all the rest of the TX FIFOs, as some versions of this
193 * block have overlapping default addresses. This also ensures
194 * that if the settings have been changed, then they are set to
198 /* start at the end of the GNPTXFSIZ, rounded up */
199 addr
= hsotg
->g_rx_fifo_sz
+ hsotg
->g_np_g_tx_fifo_sz
;
202 * Configure fifos sizes from provided configuration and assign
203 * them to endpoints dynamically according to maxpacket size value of
206 for (ep
= 1; ep
< MAX_EPS_CHANNELS
; ep
++) {
207 if (!hsotg
->g_tx_fifo_sz
[ep
])
210 val
|= hsotg
->g_tx_fifo_sz
[ep
] << FIFOSIZE_DEPTH_SHIFT
;
211 WARN_ONCE(addr
+ hsotg
->g_tx_fifo_sz
[ep
] > hsotg
->fifo_mem
,
212 "insufficient fifo memory");
213 addr
+= hsotg
->g_tx_fifo_sz
[ep
];
215 writel(val
, hsotg
->regs
+ DPTXFSIZN(ep
));
219 * according to p428 of the design guide, we need to ensure that
220 * all fifos are flushed before continuing
223 writel(GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH
|
224 GRSTCTL_RXFFLSH
, hsotg
->regs
+ GRSTCTL
);
226 /* wait until the fifos are both flushed */
229 val
= readl(hsotg
->regs
+ GRSTCTL
);
231 if ((val
& (GRSTCTL_TXFFLSH
| GRSTCTL_RXFFLSH
)) == 0)
234 if (--timeout
== 0) {
236 "%s: timeout flushing fifos (GRSTCTL=%08x)\n",
244 dev_dbg(hsotg
->dev
, "FIFOs reset, timeout at %d\n", timeout
);
248 * @ep: USB endpoint to allocate request for.
249 * @flags: Allocation flags
251 * Allocate a new USB request structure appropriate for the specified endpoint
253 static struct usb_request
*s3c_hsotg_ep_alloc_request(struct usb_ep
*ep
,
256 struct s3c_hsotg_req
*req
;
258 req
= kzalloc(sizeof(struct s3c_hsotg_req
), flags
);
262 INIT_LIST_HEAD(&req
->queue
);
268 * is_ep_periodic - return true if the endpoint is in periodic mode.
269 * @hs_ep: The endpoint to query.
271 * Returns true if the endpoint is in periodic mode, meaning it is being
272 * used for an Interrupt or ISO transfer.
274 static inline int is_ep_periodic(struct s3c_hsotg_ep
*hs_ep
)
276 return hs_ep
->periodic
;
280 * s3c_hsotg_unmap_dma - unmap the DMA memory being used for the request
281 * @hsotg: The device state.
282 * @hs_ep: The endpoint for the request
283 * @hs_req: The request being processed.
285 * This is the reverse of s3c_hsotg_map_dma(), called for the completion
286 * of a request to ensure the buffer is ready for access by the caller.
288 static void s3c_hsotg_unmap_dma(struct dwc2_hsotg
*hsotg
,
289 struct s3c_hsotg_ep
*hs_ep
,
290 struct s3c_hsotg_req
*hs_req
)
292 struct usb_request
*req
= &hs_req
->req
;
294 /* ignore this if we're not moving any data */
295 if (hs_req
->req
.length
== 0)
298 usb_gadget_unmap_request(&hsotg
->gadget
, req
, hs_ep
->dir_in
);
302 * s3c_hsotg_write_fifo - write packet Data to the TxFIFO
303 * @hsotg: The controller state.
304 * @hs_ep: The endpoint we're going to write for.
305 * @hs_req: The request to write data for.
307 * This is called when the TxFIFO has some space in it to hold a new
308 * transmission and we have something to give it. The actual setup of
309 * the data size is done elsewhere, so all we have to do is to actually
312 * The return value is zero if there is more space (or nothing was done)
313 * otherwise -ENOSPC is returned if the FIFO space was used up.
315 * This routine is only needed for PIO
317 static int s3c_hsotg_write_fifo(struct dwc2_hsotg
*hsotg
,
318 struct s3c_hsotg_ep
*hs_ep
,
319 struct s3c_hsotg_req
*hs_req
)
321 bool periodic
= is_ep_periodic(hs_ep
);
322 u32 gnptxsts
= readl(hsotg
->regs
+ GNPTXSTS
);
323 int buf_pos
= hs_req
->req
.actual
;
324 int to_write
= hs_ep
->size_loaded
;
330 to_write
-= (buf_pos
- hs_ep
->last_load
);
332 /* if there's nothing to write, get out early */
336 if (periodic
&& !hsotg
->dedicated_fifos
) {
337 u32 epsize
= readl(hsotg
->regs
+ DIEPTSIZ(hs_ep
->index
));
342 * work out how much data was loaded so we can calculate
343 * how much data is left in the fifo.
346 size_left
= DXEPTSIZ_XFERSIZE_GET(epsize
);
349 * if shared fifo, we cannot write anything until the
350 * previous data has been completely sent.
352 if (hs_ep
->fifo_load
!= 0) {
353 s3c_hsotg_en_gsint(hsotg
, GINTSTS_PTXFEMP
);
357 dev_dbg(hsotg
->dev
, "%s: left=%d, load=%d, fifo=%d, size %d\n",
359 hs_ep
->size_loaded
, hs_ep
->fifo_load
, hs_ep
->fifo_size
);
361 /* how much of the data has moved */
362 size_done
= hs_ep
->size_loaded
- size_left
;
364 /* how much data is left in the fifo */
365 can_write
= hs_ep
->fifo_load
- size_done
;
366 dev_dbg(hsotg
->dev
, "%s: => can_write1=%d\n",
367 __func__
, can_write
);
369 can_write
= hs_ep
->fifo_size
- can_write
;
370 dev_dbg(hsotg
->dev
, "%s: => can_write2=%d\n",
371 __func__
, can_write
);
373 if (can_write
<= 0) {
374 s3c_hsotg_en_gsint(hsotg
, GINTSTS_PTXFEMP
);
377 } else if (hsotg
->dedicated_fifos
&& hs_ep
->index
!= 0) {
378 can_write
= readl(hsotg
->regs
+ DTXFSTS(hs_ep
->index
));
383 if (GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(gnptxsts
) == 0) {
385 "%s: no queue slots available (0x%08x)\n",
388 s3c_hsotg_en_gsint(hsotg
, GINTSTS_NPTXFEMP
);
392 can_write
= GNPTXSTS_NP_TXF_SPC_AVAIL_GET(gnptxsts
);
393 can_write
*= 4; /* fifo size is in 32bit quantities. */
396 max_transfer
= hs_ep
->ep
.maxpacket
* hs_ep
->mc
;
398 dev_dbg(hsotg
->dev
, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n",
399 __func__
, gnptxsts
, can_write
, to_write
, max_transfer
);
402 * limit to 512 bytes of data, it seems at least on the non-periodic
403 * FIFO, requests of >512 cause the endpoint to get stuck with a
404 * fragment of the end of the transfer in it.
406 if (can_write
> 512 && !periodic
)
410 * limit the write to one max-packet size worth of data, but allow
411 * the transfer to return that it did not run out of fifo space
414 if (to_write
> max_transfer
) {
415 to_write
= max_transfer
;
417 /* it's needed only when we do not use dedicated fifos */
418 if (!hsotg
->dedicated_fifos
)
419 s3c_hsotg_en_gsint(hsotg
,
420 periodic
? GINTSTS_PTXFEMP
:
424 /* see if we can write data */
426 if (to_write
> can_write
) {
427 to_write
= can_write
;
428 pkt_round
= to_write
% max_transfer
;
431 * Round the write down to an
432 * exact number of packets.
434 * Note, we do not currently check to see if we can ever
435 * write a full packet or not to the FIFO.
439 to_write
-= pkt_round
;
442 * enable correct FIFO interrupt to alert us when there
446 /* it's needed only when we do not use dedicated fifos */
447 if (!hsotg
->dedicated_fifos
)
448 s3c_hsotg_en_gsint(hsotg
,
449 periodic
? GINTSTS_PTXFEMP
:
453 dev_dbg(hsotg
->dev
, "write %d/%d, can_write %d, done %d\n",
454 to_write
, hs_req
->req
.length
, can_write
, buf_pos
);
459 hs_req
->req
.actual
= buf_pos
+ to_write
;
460 hs_ep
->total_data
+= to_write
;
463 hs_ep
->fifo_load
+= to_write
;
465 to_write
= DIV_ROUND_UP(to_write
, 4);
466 data
= hs_req
->req
.buf
+ buf_pos
;
468 iowrite32_rep(hsotg
->regs
+ EPFIFO(hs_ep
->index
), data
, to_write
);
470 return (to_write
>= can_write
) ? -ENOSPC
: 0;
474 * get_ep_limit - get the maximum data legnth for this endpoint
475 * @hs_ep: The endpoint
477 * Return the maximum data that can be queued in one go on a given endpoint
478 * so that transfers that are too long can be split.
480 static unsigned get_ep_limit(struct s3c_hsotg_ep
*hs_ep
)
482 int index
= hs_ep
->index
;
487 maxsize
= DXEPTSIZ_XFERSIZE_LIMIT
+ 1;
488 maxpkt
= DXEPTSIZ_PKTCNT_LIMIT
+ 1;
492 maxpkt
= DIEPTSIZ0_PKTCNT_LIMIT
+ 1;
497 /* we made the constant loading easier above by using +1 */
502 * constrain by packet count if maxpkts*pktsize is greater
503 * than the length register size.
506 if ((maxpkt
* hs_ep
->ep
.maxpacket
) < maxsize
)
507 maxsize
= maxpkt
* hs_ep
->ep
.maxpacket
;
513 * s3c_hsotg_start_req - start a USB request from an endpoint's queue
514 * @hsotg: The controller state.
515 * @hs_ep: The endpoint to process a request for
516 * @hs_req: The request to start.
517 * @continuing: True if we are doing more for the current request.
519 * Start the given request running by setting the endpoint registers
520 * appropriately, and writing any data to the FIFOs.
522 static void s3c_hsotg_start_req(struct dwc2_hsotg
*hsotg
,
523 struct s3c_hsotg_ep
*hs_ep
,
524 struct s3c_hsotg_req
*hs_req
,
527 struct usb_request
*ureq
= &hs_req
->req
;
528 int index
= hs_ep
->index
;
529 int dir_in
= hs_ep
->dir_in
;
539 if (hs_ep
->req
&& !continuing
) {
540 dev_err(hsotg
->dev
, "%s: active request\n", __func__
);
543 } else if (hs_ep
->req
!= hs_req
&& continuing
) {
545 "%s: continue different req\n", __func__
);
551 epctrl_reg
= dir_in
? DIEPCTL(index
) : DOEPCTL(index
);
552 epsize_reg
= dir_in
? DIEPTSIZ(index
) : DOEPTSIZ(index
);
554 dev_dbg(hsotg
->dev
, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
555 __func__
, readl(hsotg
->regs
+ epctrl_reg
), index
,
556 hs_ep
->dir_in
? "in" : "out");
558 /* If endpoint is stalled, we will restart request later */
559 ctrl
= readl(hsotg
->regs
+ epctrl_reg
);
561 if (ctrl
& DXEPCTL_STALL
) {
562 dev_warn(hsotg
->dev
, "%s: ep%d is stalled\n", __func__
, index
);
566 length
= ureq
->length
- ureq
->actual
;
567 dev_dbg(hsotg
->dev
, "ureq->length:%d ureq->actual:%d\n",
568 ureq
->length
, ureq
->actual
);
570 maxreq
= get_ep_limit(hs_ep
);
571 if (length
> maxreq
) {
572 int round
= maxreq
% hs_ep
->ep
.maxpacket
;
574 dev_dbg(hsotg
->dev
, "%s: length %d, max-req %d, r %d\n",
575 __func__
, length
, maxreq
, round
);
577 /* round down to multiple of packets */
585 packets
= DIV_ROUND_UP(length
, hs_ep
->ep
.maxpacket
);
587 packets
= 1; /* send one packet if length is zero. */
589 if (hs_ep
->isochronous
&& length
> (hs_ep
->mc
* hs_ep
->ep
.maxpacket
)) {
590 dev_err(hsotg
->dev
, "req length > maxpacket*mc\n");
594 if (dir_in
&& index
!= 0)
595 if (hs_ep
->isochronous
)
596 epsize
= DXEPTSIZ_MC(packets
);
598 epsize
= DXEPTSIZ_MC(1);
603 * zero length packet should be programmed on its own and should not
604 * be counted in DIEPTSIZ.PktCnt with other packets.
606 if (dir_in
&& ureq
->zero
&& !continuing
) {
607 /* Test if zlp is actually required. */
608 if ((ureq
->length
>= hs_ep
->ep
.maxpacket
) &&
609 !(ureq
->length
% hs_ep
->ep
.maxpacket
))
613 epsize
|= DXEPTSIZ_PKTCNT(packets
);
614 epsize
|= DXEPTSIZ_XFERSIZE(length
);
616 dev_dbg(hsotg
->dev
, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
617 __func__
, packets
, length
, ureq
->length
, epsize
, epsize_reg
);
619 /* store the request as the current one we're doing */
622 /* write size / packets */
623 writel(epsize
, hsotg
->regs
+ epsize_reg
);
625 if (using_dma(hsotg
) && !continuing
) {
626 unsigned int dma_reg
;
629 * write DMA address to control register, buffer already
630 * synced by s3c_hsotg_ep_queue().
633 dma_reg
= dir_in
? DIEPDMA(index
) : DOEPDMA(index
);
634 writel(ureq
->dma
, hsotg
->regs
+ dma_reg
);
636 dev_dbg(hsotg
->dev
, "%s: %pad => 0x%08x\n",
637 __func__
, &ureq
->dma
, dma_reg
);
640 ctrl
|= DXEPCTL_EPENA
; /* ensure ep enabled */
641 ctrl
|= DXEPCTL_USBACTEP
;
643 dev_dbg(hsotg
->dev
, "ep0 state:%d\n", hsotg
->ep0_state
);
645 /* For Setup request do not clear NAK */
646 if (!(index
== 0 && hsotg
->ep0_state
== DWC2_EP0_SETUP
))
647 ctrl
|= DXEPCTL_CNAK
; /* clear NAK set by core */
649 dev_dbg(hsotg
->dev
, "%s: DxEPCTL=0x%08x\n", __func__
, ctrl
);
650 writel(ctrl
, hsotg
->regs
+ epctrl_reg
);
653 * set these, it seems that DMA support increments past the end
654 * of the packet buffer so we need to calculate the length from
657 hs_ep
->size_loaded
= length
;
658 hs_ep
->last_load
= ureq
->actual
;
660 if (dir_in
&& !using_dma(hsotg
)) {
661 /* set these anyway, we may need them for non-periodic in */
662 hs_ep
->fifo_load
= 0;
664 s3c_hsotg_write_fifo(hsotg
, hs_ep
, hs_req
);
668 * clear the INTknTXFEmpMsk when we start request, more as a aide
669 * to debugging to see what is going on.
672 writel(DIEPMSK_INTKNTXFEMPMSK
,
673 hsotg
->regs
+ DIEPINT(index
));
676 * Note, trying to clear the NAK here causes problems with transmit
677 * on the S3C6400 ending up with the TXFIFO becoming full.
680 /* check ep is enabled */
681 if (!(readl(hsotg
->regs
+ epctrl_reg
) & DXEPCTL_EPENA
))
683 "ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n",
684 index
, readl(hsotg
->regs
+ epctrl_reg
));
686 dev_dbg(hsotg
->dev
, "%s: DXEPCTL=0x%08x\n",
687 __func__
, readl(hsotg
->regs
+ epctrl_reg
));
689 /* enable ep interrupts */
690 s3c_hsotg_ctrl_epint(hsotg
, hs_ep
->index
, hs_ep
->dir_in
, 1);
694 * s3c_hsotg_map_dma - map the DMA memory being used for the request
695 * @hsotg: The device state.
696 * @hs_ep: The endpoint the request is on.
697 * @req: The request being processed.
699 * We've been asked to queue a request, so ensure that the memory buffer
700 * is correctly setup for DMA. If we've been passed an extant DMA address
701 * then ensure the buffer has been synced to memory. If our buffer has no
702 * DMA memory, then we map the memory and mark our request to allow us to
703 * cleanup on completion.
705 static int s3c_hsotg_map_dma(struct dwc2_hsotg
*hsotg
,
706 struct s3c_hsotg_ep
*hs_ep
,
707 struct usb_request
*req
)
709 struct s3c_hsotg_req
*hs_req
= our_req(req
);
712 /* if the length is zero, ignore the DMA data */
713 if (hs_req
->req
.length
== 0)
716 ret
= usb_gadget_map_request(&hsotg
->gadget
, req
, hs_ep
->dir_in
);
723 dev_err(hsotg
->dev
, "%s: failed to map buffer %p, %d bytes\n",
724 __func__
, req
->buf
, req
->length
);
729 static int s3c_hsotg_handle_unaligned_buf_start(struct dwc2_hsotg
*hsotg
,
730 struct s3c_hsotg_ep
*hs_ep
, struct s3c_hsotg_req
*hs_req
)
732 void *req_buf
= hs_req
->req
.buf
;
734 /* If dma is not being used or buffer is aligned */
735 if (!using_dma(hsotg
) || !((long)req_buf
& 3))
738 WARN_ON(hs_req
->saved_req_buf
);
740 dev_dbg(hsotg
->dev
, "%s: %s: buf=%p length=%d\n", __func__
,
741 hs_ep
->ep
.name
, req_buf
, hs_req
->req
.length
);
743 hs_req
->req
.buf
= kmalloc(hs_req
->req
.length
, GFP_ATOMIC
);
744 if (!hs_req
->req
.buf
) {
745 hs_req
->req
.buf
= req_buf
;
747 "%s: unable to allocate memory for bounce buffer\n",
752 /* Save actual buffer */
753 hs_req
->saved_req_buf
= req_buf
;
756 memcpy(hs_req
->req
.buf
, req_buf
, hs_req
->req
.length
);
760 static void s3c_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg
*hsotg
,
761 struct s3c_hsotg_ep
*hs_ep
, struct s3c_hsotg_req
*hs_req
)
763 /* If dma is not being used or buffer was aligned */
764 if (!using_dma(hsotg
) || !hs_req
->saved_req_buf
)
767 dev_dbg(hsotg
->dev
, "%s: %s: status=%d actual-length=%d\n", __func__
,
768 hs_ep
->ep
.name
, hs_req
->req
.status
, hs_req
->req
.actual
);
770 /* Copy data from bounce buffer on successful out transfer */
771 if (!hs_ep
->dir_in
&& !hs_req
->req
.status
)
772 memcpy(hs_req
->saved_req_buf
, hs_req
->req
.buf
,
775 /* Free bounce buffer */
776 kfree(hs_req
->req
.buf
);
778 hs_req
->req
.buf
= hs_req
->saved_req_buf
;
779 hs_req
->saved_req_buf
= NULL
;
782 static int s3c_hsotg_ep_queue(struct usb_ep
*ep
, struct usb_request
*req
,
785 struct s3c_hsotg_req
*hs_req
= our_req(req
);
786 struct s3c_hsotg_ep
*hs_ep
= our_ep(ep
);
787 struct dwc2_hsotg
*hs
= hs_ep
->parent
;
791 dev_dbg(hs
->dev
, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
792 ep
->name
, req
, req
->length
, req
->buf
, req
->no_interrupt
,
793 req
->zero
, req
->short_not_ok
);
795 /* initialise status of the request */
796 INIT_LIST_HEAD(&hs_req
->queue
);
798 req
->status
= -EINPROGRESS
;
800 ret
= s3c_hsotg_handle_unaligned_buf_start(hs
, hs_ep
, hs_req
);
804 /* if we're using DMA, sync the buffers as necessary */
806 ret
= s3c_hsotg_map_dma(hs
, hs_ep
, req
);
811 first
= list_empty(&hs_ep
->queue
);
812 list_add_tail(&hs_req
->queue
, &hs_ep
->queue
);
815 s3c_hsotg_start_req(hs
, hs_ep
, hs_req
, false);
820 static int s3c_hsotg_ep_queue_lock(struct usb_ep
*ep
, struct usb_request
*req
,
823 struct s3c_hsotg_ep
*hs_ep
= our_ep(ep
);
824 struct dwc2_hsotg
*hs
= hs_ep
->parent
;
825 unsigned long flags
= 0;
828 spin_lock_irqsave(&hs
->lock
, flags
);
829 ret
= s3c_hsotg_ep_queue(ep
, req
, gfp_flags
);
830 spin_unlock_irqrestore(&hs
->lock
, flags
);
835 static void s3c_hsotg_ep_free_request(struct usb_ep
*ep
,
836 struct usb_request
*req
)
838 struct s3c_hsotg_req
*hs_req
= our_req(req
);
844 * s3c_hsotg_complete_oursetup - setup completion callback
845 * @ep: The endpoint the request was on.
846 * @req: The request completed.
848 * Called on completion of any requests the driver itself
849 * submitted that need cleaning up.
851 static void s3c_hsotg_complete_oursetup(struct usb_ep
*ep
,
852 struct usb_request
*req
)
854 struct s3c_hsotg_ep
*hs_ep
= our_ep(ep
);
855 struct dwc2_hsotg
*hsotg
= hs_ep
->parent
;
857 dev_dbg(hsotg
->dev
, "%s: ep %p, req %p\n", __func__
, ep
, req
);
859 s3c_hsotg_ep_free_request(ep
, req
);
863 * ep_from_windex - convert control wIndex value to endpoint
864 * @hsotg: The driver state.
865 * @windex: The control request wIndex field (in host order).
867 * Convert the given wIndex into a pointer to an driver endpoint
868 * structure, or return NULL if it is not a valid endpoint.
870 static struct s3c_hsotg_ep
*ep_from_windex(struct dwc2_hsotg
*hsotg
,
873 struct s3c_hsotg_ep
*ep
;
874 int dir
= (windex
& USB_DIR_IN
) ? 1 : 0;
875 int idx
= windex
& 0x7F;
880 if (idx
> hsotg
->num_of_eps
)
883 ep
= index_to_ep(hsotg
, idx
, dir
);
885 if (idx
&& ep
->dir_in
!= dir
)
892 * s3c_hsotg_set_test_mode - Enable usb Test Modes
893 * @hsotg: The driver state.
894 * @testmode: requested usb test mode
895 * Enable usb Test Mode requested by the Host.
897 static int s3c_hsotg_set_test_mode(struct dwc2_hsotg
*hsotg
, int testmode
)
899 int dctl
= readl(hsotg
->regs
+ DCTL
);
901 dctl
&= ~DCTL_TSTCTL_MASK
;
908 dctl
|= testmode
<< DCTL_TSTCTL_SHIFT
;
913 writel(dctl
, hsotg
->regs
+ DCTL
);
918 * s3c_hsotg_send_reply - send reply to control request
919 * @hsotg: The device state
921 * @buff: Buffer for request
922 * @length: Length of reply.
924 * Create a request and queue it on the given endpoint. This is useful as
925 * an internal method of sending replies to certain control requests, etc.
927 static int s3c_hsotg_send_reply(struct dwc2_hsotg
*hsotg
,
928 struct s3c_hsotg_ep
*ep
,
932 struct usb_request
*req
;
935 dev_dbg(hsotg
->dev
, "%s: buff %p, len %d\n", __func__
, buff
, length
);
937 req
= s3c_hsotg_ep_alloc_request(&ep
->ep
, GFP_ATOMIC
);
938 hsotg
->ep0_reply
= req
;
940 dev_warn(hsotg
->dev
, "%s: cannot alloc req\n", __func__
);
944 req
->buf
= hsotg
->ep0_buff
;
945 req
->length
= length
;
947 * zero flag is for sending zlp in DATA IN stage. It has no impact on
951 req
->complete
= s3c_hsotg_complete_oursetup
;
954 memcpy(req
->buf
, buff
, length
);
956 ret
= s3c_hsotg_ep_queue(&ep
->ep
, req
, GFP_ATOMIC
);
958 dev_warn(hsotg
->dev
, "%s: cannot queue req\n", __func__
);
966 * s3c_hsotg_process_req_status - process request GET_STATUS
967 * @hsotg: The device state
968 * @ctrl: USB control request
970 static int s3c_hsotg_process_req_status(struct dwc2_hsotg
*hsotg
,
971 struct usb_ctrlrequest
*ctrl
)
973 struct s3c_hsotg_ep
*ep0
= hsotg
->eps_out
[0];
974 struct s3c_hsotg_ep
*ep
;
978 dev_dbg(hsotg
->dev
, "%s: USB_REQ_GET_STATUS\n", __func__
);
981 dev_warn(hsotg
->dev
, "%s: direction out?\n", __func__
);
985 switch (ctrl
->bRequestType
& USB_RECIP_MASK
) {
986 case USB_RECIP_DEVICE
:
987 reply
= cpu_to_le16(0); /* bit 0 => self powered,
988 * bit 1 => remote wakeup */
991 case USB_RECIP_INTERFACE
:
992 /* currently, the data result should be zero */
993 reply
= cpu_to_le16(0);
996 case USB_RECIP_ENDPOINT
:
997 ep
= ep_from_windex(hsotg
, le16_to_cpu(ctrl
->wIndex
));
1001 reply
= cpu_to_le16(ep
->halted
? 1 : 0);
1008 if (le16_to_cpu(ctrl
->wLength
) != 2)
1011 ret
= s3c_hsotg_send_reply(hsotg
, ep0
, &reply
, 2);
1013 dev_err(hsotg
->dev
, "%s: failed to send reply\n", __func__
);
1020 static int s3c_hsotg_ep_sethalt(struct usb_ep
*ep
, int value
);
1023 * get_ep_head - return the first request on the endpoint
1024 * @hs_ep: The controller endpoint to get
1026 * Get the first request on the endpoint.
1028 static struct s3c_hsotg_req
*get_ep_head(struct s3c_hsotg_ep
*hs_ep
)
1030 if (list_empty(&hs_ep
->queue
))
1033 return list_first_entry(&hs_ep
->queue
, struct s3c_hsotg_req
, queue
);
1037 * s3c_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE
1038 * @hsotg: The device state
1039 * @ctrl: USB control request
1041 static int s3c_hsotg_process_req_feature(struct dwc2_hsotg
*hsotg
,
1042 struct usb_ctrlrequest
*ctrl
)
1044 struct s3c_hsotg_ep
*ep0
= hsotg
->eps_out
[0];
1045 struct s3c_hsotg_req
*hs_req
;
1047 bool set
= (ctrl
->bRequest
== USB_REQ_SET_FEATURE
);
1048 struct s3c_hsotg_ep
*ep
;
1055 dev_dbg(hsotg
->dev
, "%s: %s_FEATURE\n",
1056 __func__
, set
? "SET" : "CLEAR");
1058 wValue
= le16_to_cpu(ctrl
->wValue
);
1059 wIndex
= le16_to_cpu(ctrl
->wIndex
);
1060 recip
= ctrl
->bRequestType
& USB_RECIP_MASK
;
1063 case USB_RECIP_DEVICE
:
1065 case USB_DEVICE_TEST_MODE
:
1066 if ((wIndex
& 0xff) != 0)
1071 hsotg
->test_mode
= wIndex
>> 8;
1072 ret
= s3c_hsotg_send_reply(hsotg
, ep0
, NULL
, 0);
1075 "%s: failed to send reply\n", __func__
);
1084 case USB_RECIP_ENDPOINT
:
1085 ep
= ep_from_windex(hsotg
, wIndex
);
1087 dev_dbg(hsotg
->dev
, "%s: no endpoint for 0x%04x\n",
1093 case USB_ENDPOINT_HALT
:
1094 halted
= ep
->halted
;
1096 s3c_hsotg_ep_sethalt(&ep
->ep
, set
);
1098 ret
= s3c_hsotg_send_reply(hsotg
, ep0
, NULL
, 0);
1101 "%s: failed to send reply\n", __func__
);
1106 * we have to complete all requests for ep if it was
1107 * halted, and the halt was cleared by CLEAR_FEATURE
1110 if (!set
&& halted
) {
1112 * If we have request in progress,
1118 list_del_init(&hs_req
->queue
);
1119 if (hs_req
->req
.complete
) {
1120 spin_unlock(&hsotg
->lock
);
1121 usb_gadget_giveback_request(
1122 &ep
->ep
, &hs_req
->req
);
1123 spin_lock(&hsotg
->lock
);
1127 /* If we have pending request, then start it */
1129 restart
= !list_empty(&ep
->queue
);
1131 hs_req
= get_ep_head(ep
);
1132 s3c_hsotg_start_req(hsotg
, ep
,
1150 static void s3c_hsotg_enqueue_setup(struct dwc2_hsotg
*hsotg
);
1153 * s3c_hsotg_stall_ep0 - stall ep0
1154 * @hsotg: The device state
1156 * Set stall for ep0 as response for setup request.
1158 static void s3c_hsotg_stall_ep0(struct dwc2_hsotg
*hsotg
)
1160 struct s3c_hsotg_ep
*ep0
= hsotg
->eps_out
[0];
1164 dev_dbg(hsotg
->dev
, "ep0 stall (dir=%d)\n", ep0
->dir_in
);
1165 reg
= (ep0
->dir_in
) ? DIEPCTL0
: DOEPCTL0
;
1168 * DxEPCTL_Stall will be cleared by EP once it has
1169 * taken effect, so no need to clear later.
1172 ctrl
= readl(hsotg
->regs
+ reg
);
1173 ctrl
|= DXEPCTL_STALL
;
1174 ctrl
|= DXEPCTL_CNAK
;
1175 writel(ctrl
, hsotg
->regs
+ reg
);
1178 "written DXEPCTL=0x%08x to %08x (DXEPCTL=0x%08x)\n",
1179 ctrl
, reg
, readl(hsotg
->regs
+ reg
));
1182 * complete won't be called, so we enqueue
1183 * setup request here
1185 s3c_hsotg_enqueue_setup(hsotg
);
1189 * s3c_hsotg_process_control - process a control request
1190 * @hsotg: The device state
1191 * @ctrl: The control request received
1193 * The controller has received the SETUP phase of a control request, and
1194 * needs to work out what to do next (and whether to pass it on to the
1197 static void s3c_hsotg_process_control(struct dwc2_hsotg
*hsotg
,
1198 struct usb_ctrlrequest
*ctrl
)
1200 struct s3c_hsotg_ep
*ep0
= hsotg
->eps_out
[0];
1204 dev_dbg(hsotg
->dev
, "ctrl Req=%02x, Type=%02x, V=%04x, L=%04x\n",
1205 ctrl
->bRequest
, ctrl
->bRequestType
,
1206 ctrl
->wValue
, ctrl
->wLength
);
1208 if (ctrl
->wLength
== 0) {
1210 hsotg
->ep0_state
= DWC2_EP0_STATUS_IN
;
1211 } else if (ctrl
->bRequestType
& USB_DIR_IN
) {
1213 hsotg
->ep0_state
= DWC2_EP0_DATA_IN
;
1216 hsotg
->ep0_state
= DWC2_EP0_DATA_OUT
;
1219 if ((ctrl
->bRequestType
& USB_TYPE_MASK
) == USB_TYPE_STANDARD
) {
1220 switch (ctrl
->bRequest
) {
1221 case USB_REQ_SET_ADDRESS
:
1222 hsotg
->connected
= 1;
1223 dcfg
= readl(hsotg
->regs
+ DCFG
);
1224 dcfg
&= ~DCFG_DEVADDR_MASK
;
1225 dcfg
|= (le16_to_cpu(ctrl
->wValue
) <<
1226 DCFG_DEVADDR_SHIFT
) & DCFG_DEVADDR_MASK
;
1227 writel(dcfg
, hsotg
->regs
+ DCFG
);
1229 dev_info(hsotg
->dev
, "new address %d\n", ctrl
->wValue
);
1231 ret
= s3c_hsotg_send_reply(hsotg
, ep0
, NULL
, 0);
1234 case USB_REQ_GET_STATUS
:
1235 ret
= s3c_hsotg_process_req_status(hsotg
, ctrl
);
1238 case USB_REQ_CLEAR_FEATURE
:
1239 case USB_REQ_SET_FEATURE
:
1240 ret
= s3c_hsotg_process_req_feature(hsotg
, ctrl
);
1245 /* as a fallback, try delivering it to the driver to deal with */
1247 if (ret
== 0 && hsotg
->driver
) {
1248 spin_unlock(&hsotg
->lock
);
1249 ret
= hsotg
->driver
->setup(&hsotg
->gadget
, ctrl
);
1250 spin_lock(&hsotg
->lock
);
1252 dev_dbg(hsotg
->dev
, "driver->setup() ret %d\n", ret
);
1256 * the request is either unhandlable, or is not formatted correctly
1257 * so respond with a STALL for the status stage to indicate failure.
1261 s3c_hsotg_stall_ep0(hsotg
);
1265 * s3c_hsotg_complete_setup - completion of a setup transfer
1266 * @ep: The endpoint the request was on.
1267 * @req: The request completed.
1269 * Called on completion of any requests the driver itself submitted for
1272 static void s3c_hsotg_complete_setup(struct usb_ep
*ep
,
1273 struct usb_request
*req
)
1275 struct s3c_hsotg_ep
*hs_ep
= our_ep(ep
);
1276 struct dwc2_hsotg
*hsotg
= hs_ep
->parent
;
1278 if (req
->status
< 0) {
1279 dev_dbg(hsotg
->dev
, "%s: failed %d\n", __func__
, req
->status
);
1283 spin_lock(&hsotg
->lock
);
1284 if (req
->actual
== 0)
1285 s3c_hsotg_enqueue_setup(hsotg
);
1287 s3c_hsotg_process_control(hsotg
, req
->buf
);
1288 spin_unlock(&hsotg
->lock
);
1292 * s3c_hsotg_enqueue_setup - start a request for EP0 packets
1293 * @hsotg: The device state.
1295 * Enqueue a request on EP0 if necessary to received any SETUP packets
1296 * received from the host.
1298 static void s3c_hsotg_enqueue_setup(struct dwc2_hsotg
*hsotg
)
1300 struct usb_request
*req
= hsotg
->ctrl_req
;
1301 struct s3c_hsotg_req
*hs_req
= our_req(req
);
1304 dev_dbg(hsotg
->dev
, "%s: queueing setup request\n", __func__
);
1308 req
->buf
= hsotg
->ctrl_buff
;
1309 req
->complete
= s3c_hsotg_complete_setup
;
1311 if (!list_empty(&hs_req
->queue
)) {
1312 dev_dbg(hsotg
->dev
, "%s already queued???\n", __func__
);
1316 hsotg
->eps_out
[0]->dir_in
= 0;
1317 hsotg
->eps_out
[0]->send_zlp
= 0;
1318 hsotg
->ep0_state
= DWC2_EP0_SETUP
;
1320 ret
= s3c_hsotg_ep_queue(&hsotg
->eps_out
[0]->ep
, req
, GFP_ATOMIC
);
1322 dev_err(hsotg
->dev
, "%s: failed queue (%d)\n", __func__
, ret
);
1324 * Don't think there's much we can do other than watch the
1330 static void s3c_hsotg_program_zlp(struct dwc2_hsotg
*hsotg
,
1331 struct s3c_hsotg_ep
*hs_ep
)
1334 u8 index
= hs_ep
->index
;
1335 u32 epctl_reg
= hs_ep
->dir_in
? DIEPCTL(index
) : DOEPCTL(index
);
1336 u32 epsiz_reg
= hs_ep
->dir_in
? DIEPTSIZ(index
) : DOEPTSIZ(index
);
1339 dev_dbg(hsotg
->dev
, "Sending zero-length packet on ep%d\n",
1342 dev_dbg(hsotg
->dev
, "Receiving zero-length packet on ep%d\n",
1345 writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
1346 DXEPTSIZ_XFERSIZE(0), hsotg
->regs
+
1349 ctrl
= readl(hsotg
->regs
+ epctl_reg
);
1350 ctrl
|= DXEPCTL_CNAK
; /* clear NAK set by core */
1351 ctrl
|= DXEPCTL_EPENA
; /* ensure ep enabled */
1352 ctrl
|= DXEPCTL_USBACTEP
;
1353 writel(ctrl
, hsotg
->regs
+ epctl_reg
);
1357 * s3c_hsotg_complete_request - complete a request given to us
1358 * @hsotg: The device state.
1359 * @hs_ep: The endpoint the request was on.
1360 * @hs_req: The request to complete.
1361 * @result: The result code (0 => Ok, otherwise errno)
1363 * The given request has finished, so call the necessary completion
1364 * if it has one and then look to see if we can start a new request
1367 * Note, expects the ep to already be locked as appropriate.
1369 static void s3c_hsotg_complete_request(struct dwc2_hsotg
*hsotg
,
1370 struct s3c_hsotg_ep
*hs_ep
,
1371 struct s3c_hsotg_req
*hs_req
,
1377 dev_dbg(hsotg
->dev
, "%s: nothing to complete?\n", __func__
);
1381 dev_dbg(hsotg
->dev
, "complete: ep %p %s, req %p, %d => %p\n",
1382 hs_ep
, hs_ep
->ep
.name
, hs_req
, result
, hs_req
->req
.complete
);
1385 * only replace the status if we've not already set an error
1386 * from a previous transaction
1389 if (hs_req
->req
.status
== -EINPROGRESS
)
1390 hs_req
->req
.status
= result
;
1392 s3c_hsotg_handle_unaligned_buf_complete(hsotg
, hs_ep
, hs_req
);
1395 list_del_init(&hs_req
->queue
);
1397 if (using_dma(hsotg
))
1398 s3c_hsotg_unmap_dma(hsotg
, hs_ep
, hs_req
);
1401 * call the complete request with the locks off, just in case the
1402 * request tries to queue more work for this endpoint.
1405 if (hs_req
->req
.complete
) {
1406 spin_unlock(&hsotg
->lock
);
1407 usb_gadget_giveback_request(&hs_ep
->ep
, &hs_req
->req
);
1408 spin_lock(&hsotg
->lock
);
1412 * Look to see if there is anything else to do. Note, the completion
1413 * of the previous request may have caused a new request to be started
1414 * so be careful when doing this.
1417 if (!hs_ep
->req
&& result
>= 0) {
1418 restart
= !list_empty(&hs_ep
->queue
);
1420 hs_req
= get_ep_head(hs_ep
);
1421 s3c_hsotg_start_req(hsotg
, hs_ep
, hs_req
, false);
1427 * s3c_hsotg_rx_data - receive data from the FIFO for an endpoint
1428 * @hsotg: The device state.
1429 * @ep_idx: The endpoint index for the data
1430 * @size: The size of data in the fifo, in bytes
1432 * The FIFO status shows there is data to read from the FIFO for a given
1433 * endpoint, so sort out whether we need to read the data into a request
1434 * that has been made for that endpoint.
1436 static void s3c_hsotg_rx_data(struct dwc2_hsotg
*hsotg
, int ep_idx
, int size
)
1438 struct s3c_hsotg_ep
*hs_ep
= hsotg
->eps_out
[ep_idx
];
1439 struct s3c_hsotg_req
*hs_req
= hs_ep
->req
;
1440 void __iomem
*fifo
= hsotg
->regs
+ EPFIFO(ep_idx
);
1447 u32 epctl
= readl(hsotg
->regs
+ DOEPCTL(ep_idx
));
1451 "%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n",
1452 __func__
, size
, ep_idx
, epctl
);
1454 /* dump the data from the FIFO, we've nothing we can do */
1455 for (ptr
= 0; ptr
< size
; ptr
+= 4)
1462 read_ptr
= hs_req
->req
.actual
;
1463 max_req
= hs_req
->req
.length
- read_ptr
;
1465 dev_dbg(hsotg
->dev
, "%s: read %d/%d, done %d/%d\n",
1466 __func__
, to_read
, max_req
, read_ptr
, hs_req
->req
.length
);
1468 if (to_read
> max_req
) {
1470 * more data appeared than we where willing
1471 * to deal with in this request.
1474 /* currently we don't deal this */
1478 hs_ep
->total_data
+= to_read
;
1479 hs_req
->req
.actual
+= to_read
;
1480 to_read
= DIV_ROUND_UP(to_read
, 4);
1483 * note, we might over-write the buffer end by 3 bytes depending on
1484 * alignment of the data.
1486 ioread32_rep(fifo
, hs_req
->req
.buf
+ read_ptr
, to_read
);
1490 * s3c_hsotg_ep0_zlp - send/receive zero-length packet on control endpoint
1491 * @hsotg: The device instance
1492 * @dir_in: If IN zlp
1494 * Generate a zero-length IN packet request for terminating a SETUP
1497 * Note, since we don't write any data to the TxFIFO, then it is
1498 * currently believed that we do not need to wait for any space in
1501 static void s3c_hsotg_ep0_zlp(struct dwc2_hsotg
*hsotg
, bool dir_in
)
1503 /* eps_out[0] is used in both directions */
1504 hsotg
->eps_out
[0]->dir_in
= dir_in
;
1505 hsotg
->ep0_state
= dir_in
? DWC2_EP0_STATUS_IN
: DWC2_EP0_STATUS_OUT
;
1507 s3c_hsotg_program_zlp(hsotg
, hsotg
->eps_out
[0]);
1511 * s3c_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
1512 * @hsotg: The device instance
1513 * @epnum: The endpoint received from
1515 * The RXFIFO has delivered an OutDone event, which means that the data
1516 * transfer for an OUT endpoint has been completed, either by a short
1517 * packet or by the finish of a transfer.
1519 static void s3c_hsotg_handle_outdone(struct dwc2_hsotg
*hsotg
, int epnum
)
1521 u32 epsize
= readl(hsotg
->regs
+ DOEPTSIZ(epnum
));
1522 struct s3c_hsotg_ep
*hs_ep
= hsotg
->eps_out
[epnum
];
1523 struct s3c_hsotg_req
*hs_req
= hs_ep
->req
;
1524 struct usb_request
*req
= &hs_req
->req
;
1525 unsigned size_left
= DXEPTSIZ_XFERSIZE_GET(epsize
);
1529 dev_dbg(hsotg
->dev
, "%s: no request active\n", __func__
);
1533 if (epnum
== 0 && hsotg
->ep0_state
== DWC2_EP0_STATUS_OUT
) {
1534 dev_dbg(hsotg
->dev
, "zlp packet received\n");
1535 s3c_hsotg_complete_request(hsotg
, hs_ep
, hs_req
, 0);
1536 s3c_hsotg_enqueue_setup(hsotg
);
1540 if (using_dma(hsotg
)) {
1544 * Calculate the size of the transfer by checking how much
1545 * is left in the endpoint size register and then working it
1546 * out from the amount we loaded for the transfer.
1548 * We need to do this as DMA pointers are always 32bit aligned
1549 * so may overshoot/undershoot the transfer.
1552 size_done
= hs_ep
->size_loaded
- size_left
;
1553 size_done
+= hs_ep
->last_load
;
1555 req
->actual
= size_done
;
1558 /* if there is more request to do, schedule new transfer */
1559 if (req
->actual
< req
->length
&& size_left
== 0) {
1560 s3c_hsotg_start_req(hsotg
, hs_ep
, hs_req
, true);
1564 if (req
->actual
< req
->length
&& req
->short_not_ok
) {
1565 dev_dbg(hsotg
->dev
, "%s: got %d/%d (short not ok) => error\n",
1566 __func__
, req
->actual
, req
->length
);
1569 * todo - what should we return here? there's no one else
1570 * even bothering to check the status.
1574 if (epnum
== 0 && hsotg
->ep0_state
== DWC2_EP0_DATA_OUT
) {
1575 /* Move to STATUS IN */
1576 s3c_hsotg_ep0_zlp(hsotg
, true);
1580 s3c_hsotg_complete_request(hsotg
, hs_ep
, hs_req
, result
);
1584 * s3c_hsotg_read_frameno - read current frame number
1585 * @hsotg: The device instance
1587 * Return the current frame number
1589 static u32
s3c_hsotg_read_frameno(struct dwc2_hsotg
*hsotg
)
1593 dsts
= readl(hsotg
->regs
+ DSTS
);
1594 dsts
&= DSTS_SOFFN_MASK
;
1595 dsts
>>= DSTS_SOFFN_SHIFT
;
1601 * s3c_hsotg_handle_rx - RX FIFO has data
1602 * @hsotg: The device instance
1604 * The IRQ handler has detected that the RX FIFO has some data in it
1605 * that requires processing, so find out what is in there and do the
1608 * The RXFIFO is a true FIFO, the packets coming out are still in packet
1609 * chunks, so if you have x packets received on an endpoint you'll get x
1610 * FIFO events delivered, each with a packet's worth of data in it.
1612 * When using DMA, we should not be processing events from the RXFIFO
1613 * as the actual data should be sent to the memory directly and we turn
1614 * on the completion interrupts to get notifications of transfer completion.
1616 static void s3c_hsotg_handle_rx(struct dwc2_hsotg
*hsotg
)
1618 u32 grxstsr
= readl(hsotg
->regs
+ GRXSTSP
);
1619 u32 epnum
, status
, size
;
1621 WARN_ON(using_dma(hsotg
));
1623 epnum
= grxstsr
& GRXSTS_EPNUM_MASK
;
1624 status
= grxstsr
& GRXSTS_PKTSTS_MASK
;
1626 size
= grxstsr
& GRXSTS_BYTECNT_MASK
;
1627 size
>>= GRXSTS_BYTECNT_SHIFT
;
1629 dev_dbg(hsotg
->dev
, "%s: GRXSTSP=0x%08x (%d@%d)\n",
1630 __func__
, grxstsr
, size
, epnum
);
1632 switch ((status
& GRXSTS_PKTSTS_MASK
) >> GRXSTS_PKTSTS_SHIFT
) {
1633 case GRXSTS_PKTSTS_GLOBALOUTNAK
:
1634 dev_dbg(hsotg
->dev
, "GLOBALOUTNAK\n");
1637 case GRXSTS_PKTSTS_OUTDONE
:
1638 dev_dbg(hsotg
->dev
, "OutDone (Frame=0x%08x)\n",
1639 s3c_hsotg_read_frameno(hsotg
));
1641 if (!using_dma(hsotg
))
1642 s3c_hsotg_handle_outdone(hsotg
, epnum
);
1645 case GRXSTS_PKTSTS_SETUPDONE
:
1647 "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
1648 s3c_hsotg_read_frameno(hsotg
),
1649 readl(hsotg
->regs
+ DOEPCTL(0)));
1651 * Call s3c_hsotg_handle_outdone here if it was not called from
1652 * GRXSTS_PKTSTS_OUTDONE. That is, if the core didn't
1653 * generate GRXSTS_PKTSTS_OUTDONE for setup packet.
1655 if (hsotg
->ep0_state
== DWC2_EP0_SETUP
)
1656 s3c_hsotg_handle_outdone(hsotg
, epnum
);
1659 case GRXSTS_PKTSTS_OUTRX
:
1660 s3c_hsotg_rx_data(hsotg
, epnum
, size
);
1663 case GRXSTS_PKTSTS_SETUPRX
:
1665 "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
1666 s3c_hsotg_read_frameno(hsotg
),
1667 readl(hsotg
->regs
+ DOEPCTL(0)));
1669 WARN_ON(hsotg
->ep0_state
!= DWC2_EP0_SETUP
);
1671 s3c_hsotg_rx_data(hsotg
, epnum
, size
);
1675 dev_warn(hsotg
->dev
, "%s: unknown status %08x\n",
1678 s3c_hsotg_dump(hsotg
);
1684 * s3c_hsotg_ep0_mps - turn max packet size into register setting
1685 * @mps: The maximum packet size in bytes.
1687 static u32
s3c_hsotg_ep0_mps(unsigned int mps
)
1691 return D0EPCTL_MPS_64
;
1693 return D0EPCTL_MPS_32
;
1695 return D0EPCTL_MPS_16
;
1697 return D0EPCTL_MPS_8
;
1700 /* bad max packet size, warn and return invalid result */
1706 * s3c_hsotg_set_ep_maxpacket - set endpoint's max-packet field
1707 * @hsotg: The driver state.
1708 * @ep: The index number of the endpoint
1709 * @mps: The maximum packet size in bytes
1711 * Configure the maximum packet size for the given endpoint, updating
1712 * the hardware control registers to reflect this.
1714 static void s3c_hsotg_set_ep_maxpacket(struct dwc2_hsotg
*hsotg
,
1715 unsigned int ep
, unsigned int mps
, unsigned int dir_in
)
1717 struct s3c_hsotg_ep
*hs_ep
;
1718 void __iomem
*regs
= hsotg
->regs
;
1723 hs_ep
= index_to_ep(hsotg
, ep
, dir_in
);
1728 /* EP0 is a special case */
1729 mpsval
= s3c_hsotg_ep0_mps(mps
);
1732 hs_ep
->ep
.maxpacket
= mps
;
1735 mpsval
= mps
& DXEPCTL_MPS_MASK
;
1738 mcval
= ((mps
>> 11) & 0x3) + 1;
1742 hs_ep
->ep
.maxpacket
= mpsval
;
1746 reg
= readl(regs
+ DIEPCTL(ep
));
1747 reg
&= ~DXEPCTL_MPS_MASK
;
1749 writel(reg
, regs
+ DIEPCTL(ep
));
1751 reg
= readl(regs
+ DOEPCTL(ep
));
1752 reg
&= ~DXEPCTL_MPS_MASK
;
1754 writel(reg
, regs
+ DOEPCTL(ep
));
1760 dev_err(hsotg
->dev
, "ep%d: bad mps of %d\n", ep
, mps
);
1764 * s3c_hsotg_txfifo_flush - flush Tx FIFO
1765 * @hsotg: The driver state
1766 * @idx: The index for the endpoint (0..15)
1768 static void s3c_hsotg_txfifo_flush(struct dwc2_hsotg
*hsotg
, unsigned int idx
)
1773 writel(GRSTCTL_TXFNUM(idx
) | GRSTCTL_TXFFLSH
,
1774 hsotg
->regs
+ GRSTCTL
);
1776 /* wait until the fifo is flushed */
1780 val
= readl(hsotg
->regs
+ GRSTCTL
);
1782 if ((val
& (GRSTCTL_TXFFLSH
)) == 0)
1785 if (--timeout
== 0) {
1787 "%s: timeout flushing fifo (GRSTCTL=%08x)\n",
1797 * s3c_hsotg_trytx - check to see if anything needs transmitting
1798 * @hsotg: The driver state
1799 * @hs_ep: The driver endpoint to check.
1801 * Check to see if there is a request that has data to send, and if so
1802 * make an attempt to write data into the FIFO.
1804 static int s3c_hsotg_trytx(struct dwc2_hsotg
*hsotg
,
1805 struct s3c_hsotg_ep
*hs_ep
)
1807 struct s3c_hsotg_req
*hs_req
= hs_ep
->req
;
1809 if (!hs_ep
->dir_in
|| !hs_req
) {
1811 * if request is not enqueued, we disable interrupts
1812 * for endpoints, excepting ep0
1814 if (hs_ep
->index
!= 0)
1815 s3c_hsotg_ctrl_epint(hsotg
, hs_ep
->index
,
1820 if (hs_req
->req
.actual
< hs_req
->req
.length
) {
1821 dev_dbg(hsotg
->dev
, "trying to write more for ep%d\n",
1823 return s3c_hsotg_write_fifo(hsotg
, hs_ep
, hs_req
);
1830 * s3c_hsotg_complete_in - complete IN transfer
1831 * @hsotg: The device state.
1832 * @hs_ep: The endpoint that has just completed.
1834 * An IN transfer has been completed, update the transfer's state and then
1835 * call the relevant completion routines.
1837 static void s3c_hsotg_complete_in(struct dwc2_hsotg
*hsotg
,
1838 struct s3c_hsotg_ep
*hs_ep
)
1840 struct s3c_hsotg_req
*hs_req
= hs_ep
->req
;
1841 u32 epsize
= readl(hsotg
->regs
+ DIEPTSIZ(hs_ep
->index
));
1842 int size_left
, size_done
;
1845 dev_dbg(hsotg
->dev
, "XferCompl but no req\n");
1849 /* Finish ZLP handling for IN EP0 transactions */
1850 if (hs_ep
->index
== 0 && hsotg
->ep0_state
== DWC2_EP0_STATUS_IN
) {
1851 dev_dbg(hsotg
->dev
, "zlp packet sent\n");
1852 s3c_hsotg_complete_request(hsotg
, hs_ep
, hs_req
, 0);
1853 if (hsotg
->test_mode
) {
1856 ret
= s3c_hsotg_set_test_mode(hsotg
, hsotg
->test_mode
);
1858 dev_dbg(hsotg
->dev
, "Invalid Test #%d\n",
1860 s3c_hsotg_stall_ep0(hsotg
);
1864 s3c_hsotg_enqueue_setup(hsotg
);
1869 * Calculate the size of the transfer by checking how much is left
1870 * in the endpoint size register and then working it out from
1871 * the amount we loaded for the transfer.
1873 * We do this even for DMA, as the transfer may have incremented
1874 * past the end of the buffer (DMA transfers are always 32bit
1878 size_left
= DXEPTSIZ_XFERSIZE_GET(epsize
);
1880 size_done
= hs_ep
->size_loaded
- size_left
;
1881 size_done
+= hs_ep
->last_load
;
1883 if (hs_req
->req
.actual
!= size_done
)
1884 dev_dbg(hsotg
->dev
, "%s: adjusting size done %d => %d\n",
1885 __func__
, hs_req
->req
.actual
, size_done
);
1887 hs_req
->req
.actual
= size_done
;
1888 dev_dbg(hsotg
->dev
, "req->length:%d req->actual:%d req->zero:%d\n",
1889 hs_req
->req
.length
, hs_req
->req
.actual
, hs_req
->req
.zero
);
1891 if (!size_left
&& hs_req
->req
.actual
< hs_req
->req
.length
) {
1892 dev_dbg(hsotg
->dev
, "%s trying more for req...\n", __func__
);
1893 s3c_hsotg_start_req(hsotg
, hs_ep
, hs_req
, true);
1897 /* Zlp for all endpoints, for ep0 only in DATA IN stage */
1898 if (hs_ep
->send_zlp
) {
1899 s3c_hsotg_program_zlp(hsotg
, hs_ep
);
1900 hs_ep
->send_zlp
= 0;
1901 /* transfer will be completed on next complete interrupt */
1905 if (hs_ep
->index
== 0 && hsotg
->ep0_state
== DWC2_EP0_DATA_IN
) {
1906 /* Move to STATUS OUT */
1907 s3c_hsotg_ep0_zlp(hsotg
, false);
1911 s3c_hsotg_complete_request(hsotg
, hs_ep
, hs_req
, 0);
1915 * s3c_hsotg_epint - handle an in/out endpoint interrupt
1916 * @hsotg: The driver state
1917 * @idx: The index for the endpoint (0..15)
1918 * @dir_in: Set if this is an IN endpoint
1920 * Process and clear any interrupt pending for an individual endpoint
1922 static void s3c_hsotg_epint(struct dwc2_hsotg
*hsotg
, unsigned int idx
,
1925 struct s3c_hsotg_ep
*hs_ep
= index_to_ep(hsotg
, idx
, dir_in
);
1926 u32 epint_reg
= dir_in
? DIEPINT(idx
) : DOEPINT(idx
);
1927 u32 epctl_reg
= dir_in
? DIEPCTL(idx
) : DOEPCTL(idx
);
1928 u32 epsiz_reg
= dir_in
? DIEPTSIZ(idx
) : DOEPTSIZ(idx
);
1932 ints
= readl(hsotg
->regs
+ epint_reg
);
1933 ctrl
= readl(hsotg
->regs
+ epctl_reg
);
1935 /* Clear endpoint interrupts */
1936 writel(ints
, hsotg
->regs
+ epint_reg
);
1939 dev_err(hsotg
->dev
, "%s:Interrupt for unconfigured ep%d(%s)\n",
1940 __func__
, idx
, dir_in
? "in" : "out");
1944 dev_dbg(hsotg
->dev
, "%s: ep%d(%s) DxEPINT=0x%08x\n",
1945 __func__
, idx
, dir_in
? "in" : "out", ints
);
1947 /* Don't process XferCompl interrupt if it is a setup packet */
1948 if (idx
== 0 && (ints
& (DXEPINT_SETUP
| DXEPINT_SETUP_RCVD
)))
1949 ints
&= ~DXEPINT_XFERCOMPL
;
1951 if (ints
& DXEPINT_XFERCOMPL
) {
1952 if (hs_ep
->isochronous
&& hs_ep
->interval
== 1) {
1953 if (ctrl
& DXEPCTL_EOFRNUM
)
1954 ctrl
|= DXEPCTL_SETEVENFR
;
1956 ctrl
|= DXEPCTL_SETODDFR
;
1957 writel(ctrl
, hsotg
->regs
+ epctl_reg
);
1961 "%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n",
1962 __func__
, readl(hsotg
->regs
+ epctl_reg
),
1963 readl(hsotg
->regs
+ epsiz_reg
));
1966 * we get OutDone from the FIFO, so we only need to look
1967 * at completing IN requests here
1970 s3c_hsotg_complete_in(hsotg
, hs_ep
);
1972 if (idx
== 0 && !hs_ep
->req
)
1973 s3c_hsotg_enqueue_setup(hsotg
);
1974 } else if (using_dma(hsotg
)) {
1976 * We're using DMA, we need to fire an OutDone here
1977 * as we ignore the RXFIFO.
1980 s3c_hsotg_handle_outdone(hsotg
, idx
);
1984 if (ints
& DXEPINT_EPDISBLD
) {
1985 dev_dbg(hsotg
->dev
, "%s: EPDisbld\n", __func__
);
1988 int epctl
= readl(hsotg
->regs
+ epctl_reg
);
1990 s3c_hsotg_txfifo_flush(hsotg
, hs_ep
->fifo_index
);
1992 if ((epctl
& DXEPCTL_STALL
) &&
1993 (epctl
& DXEPCTL_EPTYPE_BULK
)) {
1994 int dctl
= readl(hsotg
->regs
+ DCTL
);
1996 dctl
|= DCTL_CGNPINNAK
;
1997 writel(dctl
, hsotg
->regs
+ DCTL
);
2002 if (ints
& DXEPINT_AHBERR
)
2003 dev_dbg(hsotg
->dev
, "%s: AHBErr\n", __func__
);
2005 if (ints
& DXEPINT_SETUP
) { /* Setup or Timeout */
2006 dev_dbg(hsotg
->dev
, "%s: Setup/Timeout\n", __func__
);
2008 if (using_dma(hsotg
) && idx
== 0) {
2010 * this is the notification we've received a
2011 * setup packet. In non-DMA mode we'd get this
2012 * from the RXFIFO, instead we need to process
2019 s3c_hsotg_handle_outdone(hsotg
, 0);
2023 if (ints
& DXEPINT_BACK2BACKSETUP
)
2024 dev_dbg(hsotg
->dev
, "%s: B2BSetup/INEPNakEff\n", __func__
);
2026 if (dir_in
&& !hs_ep
->isochronous
) {
2027 /* not sure if this is important, but we'll clear it anyway */
2028 if (ints
& DIEPMSK_INTKNTXFEMPMSK
) {
2029 dev_dbg(hsotg
->dev
, "%s: ep%d: INTknTXFEmpMsk\n",
2033 /* this probably means something bad is happening */
2034 if (ints
& DIEPMSK_INTKNEPMISMSK
) {
2035 dev_warn(hsotg
->dev
, "%s: ep%d: INTknEP\n",
2039 /* FIFO has space or is empty (see GAHBCFG) */
2040 if (hsotg
->dedicated_fifos
&&
2041 ints
& DIEPMSK_TXFIFOEMPTY
) {
2042 dev_dbg(hsotg
->dev
, "%s: ep%d: TxFIFOEmpty\n",
2044 if (!using_dma(hsotg
))
2045 s3c_hsotg_trytx(hsotg
, hs_ep
);
2051 * s3c_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
2052 * @hsotg: The device state.
2054 * Handle updating the device settings after the enumeration phase has
2057 static void s3c_hsotg_irq_enumdone(struct dwc2_hsotg
*hsotg
)
2059 u32 dsts
= readl(hsotg
->regs
+ DSTS
);
2060 int ep0_mps
= 0, ep_mps
= 8;
2063 * This should signal the finish of the enumeration phase
2064 * of the USB handshaking, so we should now know what rate
2068 dev_dbg(hsotg
->dev
, "EnumDone (DSTS=0x%08x)\n", dsts
);
2071 * note, since we're limited by the size of transfer on EP0, and
2072 * it seems IN transfers must be a even number of packets we do
2073 * not advertise a 64byte MPS on EP0.
2076 /* catch both EnumSpd_FS and EnumSpd_FS48 */
2077 switch (dsts
& DSTS_ENUMSPD_MASK
) {
2078 case DSTS_ENUMSPD_FS
:
2079 case DSTS_ENUMSPD_FS48
:
2080 hsotg
->gadget
.speed
= USB_SPEED_FULL
;
2081 ep0_mps
= EP0_MPS_LIMIT
;
2085 case DSTS_ENUMSPD_HS
:
2086 hsotg
->gadget
.speed
= USB_SPEED_HIGH
;
2087 ep0_mps
= EP0_MPS_LIMIT
;
2091 case DSTS_ENUMSPD_LS
:
2092 hsotg
->gadget
.speed
= USB_SPEED_LOW
;
2094 * note, we don't actually support LS in this driver at the
2095 * moment, and the documentation seems to imply that it isn't
2096 * supported by the PHYs on some of the devices.
2100 dev_info(hsotg
->dev
, "new device is %s\n",
2101 usb_speed_string(hsotg
->gadget
.speed
));
2104 * we should now know the maximum packet size for an
2105 * endpoint, so set the endpoints to a default value.
2110 /* Initialize ep0 for both in and out directions */
2111 s3c_hsotg_set_ep_maxpacket(hsotg
, 0, ep0_mps
, 1);
2112 s3c_hsotg_set_ep_maxpacket(hsotg
, 0, ep0_mps
, 0);
2113 for (i
= 1; i
< hsotg
->num_of_eps
; i
++) {
2114 if (hsotg
->eps_in
[i
])
2115 s3c_hsotg_set_ep_maxpacket(hsotg
, i
, ep_mps
, 1);
2116 if (hsotg
->eps_out
[i
])
2117 s3c_hsotg_set_ep_maxpacket(hsotg
, i
, ep_mps
, 0);
2121 /* ensure after enumeration our EP0 is active */
2123 s3c_hsotg_enqueue_setup(hsotg
);
2125 dev_dbg(hsotg
->dev
, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
2126 readl(hsotg
->regs
+ DIEPCTL0
),
2127 readl(hsotg
->regs
+ DOEPCTL0
));
2131 * kill_all_requests - remove all requests from the endpoint's queue
2132 * @hsotg: The device state.
2133 * @ep: The endpoint the requests may be on.
2134 * @result: The result code to use.
2136 * Go through the requests on the given endpoint and mark them
2137 * completed with the given result code.
2139 static void kill_all_requests(struct dwc2_hsotg
*hsotg
,
2140 struct s3c_hsotg_ep
*ep
,
2143 struct s3c_hsotg_req
*req
, *treq
;
2148 list_for_each_entry_safe(req
, treq
, &ep
->queue
, queue
)
2149 s3c_hsotg_complete_request(hsotg
, ep
, req
,
2152 if (!hsotg
->dedicated_fifos
)
2154 size
= (readl(hsotg
->regs
+ DTXFSTS(ep
->index
)) & 0xffff) * 4;
2155 if (size
< ep
->fifo_size
)
2156 s3c_hsotg_txfifo_flush(hsotg
, ep
->fifo_index
);
2160 * s3c_hsotg_disconnect - disconnect service
2161 * @hsotg: The device state.
2163 * The device has been disconnected. Remove all current
2164 * transactions and signal the gadget driver that this
2167 void s3c_hsotg_disconnect(struct dwc2_hsotg
*hsotg
)
2171 if (!hsotg
->connected
)
2174 hsotg
->connected
= 0;
2175 hsotg
->test_mode
= 0;
2177 for (ep
= 0; ep
< hsotg
->num_of_eps
; ep
++) {
2178 if (hsotg
->eps_in
[ep
])
2179 kill_all_requests(hsotg
, hsotg
->eps_in
[ep
],
2181 if (hsotg
->eps_out
[ep
])
2182 kill_all_requests(hsotg
, hsotg
->eps_out
[ep
],
2186 call_gadget(hsotg
, disconnect
);
2188 EXPORT_SYMBOL_GPL(s3c_hsotg_disconnect
);
2191 * s3c_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
2192 * @hsotg: The device state:
2193 * @periodic: True if this is a periodic FIFO interrupt
2195 static void s3c_hsotg_irq_fifoempty(struct dwc2_hsotg
*hsotg
, bool periodic
)
2197 struct s3c_hsotg_ep
*ep
;
2200 /* look through for any more data to transmit */
2201 for (epno
= 0; epno
< hsotg
->num_of_eps
; epno
++) {
2202 ep
= index_to_ep(hsotg
, epno
, 1);
2210 if ((periodic
&& !ep
->periodic
) ||
2211 (!periodic
&& ep
->periodic
))
2214 ret
= s3c_hsotg_trytx(hsotg
, ep
);
2220 /* IRQ flags which will trigger a retry around the IRQ loop */
2221 #define IRQ_RETRY_MASK (GINTSTS_NPTXFEMP | \
2226 * s3c_hsotg_corereset - issue softreset to the core
2227 * @hsotg: The device state
2229 * Issue a soft reset to the core, and await the core finishing it.
2231 static int s3c_hsotg_corereset(struct dwc2_hsotg
*hsotg
)
2236 dev_dbg(hsotg
->dev
, "resetting core\n");
2238 /* issue soft reset */
2239 writel(GRSTCTL_CSFTRST
, hsotg
->regs
+ GRSTCTL
);
2243 grstctl
= readl(hsotg
->regs
+ GRSTCTL
);
2244 } while ((grstctl
& GRSTCTL_CSFTRST
) && timeout
-- > 0);
2246 if (grstctl
& GRSTCTL_CSFTRST
) {
2247 dev_err(hsotg
->dev
, "Failed to get CSftRst asserted\n");
2254 u32 grstctl
= readl(hsotg
->regs
+ GRSTCTL
);
2256 if (timeout
-- < 0) {
2257 dev_info(hsotg
->dev
,
2258 "%s: reset failed, GRSTCTL=%08x\n",
2263 if (!(grstctl
& GRSTCTL_AHBIDLE
))
2266 break; /* reset done */
2269 dev_dbg(hsotg
->dev
, "reset successful\n");
2274 * s3c_hsotg_core_init - issue softreset to the core
2275 * @hsotg: The device state
2277 * Issue a soft reset to the core, and await the core finishing it.
2279 void s3c_hsotg_core_init_disconnected(struct dwc2_hsotg
*hsotg
,
2285 s3c_hsotg_corereset(hsotg
);
2288 * we must now enable ep0 ready for host detection and then
2289 * set configuration.
2292 /* set the PLL on, remove the HNP/SRP and set the PHY */
2293 val
= (hsotg
->phyif
== GUSBCFG_PHYIF8
) ? 9 : 5;
2294 writel(hsotg
->phyif
| GUSBCFG_TOUTCAL(7) |
2295 (val
<< GUSBCFG_USBTRDTIM_SHIFT
), hsotg
->regs
+ GUSBCFG
);
2297 s3c_hsotg_init_fifo(hsotg
);
2300 __orr32(hsotg
->regs
+ DCTL
, DCTL_SFTDISCON
);
2302 writel(DCFG_EPMISCNT(1) | DCFG_DEVSPD_HS
, hsotg
->regs
+ DCFG
);
2304 /* Clear any pending OTG interrupts */
2305 writel(0xffffffff, hsotg
->regs
+ GOTGINT
);
2307 /* Clear any pending interrupts */
2308 writel(0xffffffff, hsotg
->regs
+ GINTSTS
);
2310 writel(GINTSTS_ERLYSUSP
| GINTSTS_SESSREQINT
|
2311 GINTSTS_GOUTNAKEFF
| GINTSTS_GINNAKEFF
|
2312 GINTSTS_CONIDSTSCHNG
| GINTSTS_USBRST
|
2313 GINTSTS_ENUMDONE
| GINTSTS_OTGINT
|
2314 GINTSTS_USBSUSP
| GINTSTS_WKUPINT
,
2315 hsotg
->regs
+ GINTMSK
);
2317 if (using_dma(hsotg
))
2318 writel(GAHBCFG_GLBL_INTR_EN
| GAHBCFG_DMA_EN
|
2319 (GAHBCFG_HBSTLEN_INCR4
<< GAHBCFG_HBSTLEN_SHIFT
),
2320 hsotg
->regs
+ GAHBCFG
);
2322 writel(((hsotg
->dedicated_fifos
) ? (GAHBCFG_NP_TXF_EMP_LVL
|
2323 GAHBCFG_P_TXF_EMP_LVL
) : 0) |
2324 GAHBCFG_GLBL_INTR_EN
,
2325 hsotg
->regs
+ GAHBCFG
);
2328 * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts
2329 * when we have no data to transfer. Otherwise we get being flooded by
2333 writel(((hsotg
->dedicated_fifos
&& !using_dma(hsotg
)) ?
2334 DIEPMSK_TXFIFOEMPTY
| DIEPMSK_INTKNTXFEMPMSK
: 0) |
2335 DIEPMSK_EPDISBLDMSK
| DIEPMSK_XFERCOMPLMSK
|
2336 DIEPMSK_TIMEOUTMSK
| DIEPMSK_AHBERRMSK
|
2337 DIEPMSK_INTKNEPMISMSK
,
2338 hsotg
->regs
+ DIEPMSK
);
2341 * don't need XferCompl, we get that from RXFIFO in slave mode. In
2342 * DMA mode we may need this.
2344 writel((using_dma(hsotg
) ? (DIEPMSK_XFERCOMPLMSK
|
2345 DIEPMSK_TIMEOUTMSK
) : 0) |
2346 DOEPMSK_EPDISBLDMSK
| DOEPMSK_AHBERRMSK
|
2348 hsotg
->regs
+ DOEPMSK
);
2350 writel(0, hsotg
->regs
+ DAINTMSK
);
2352 dev_dbg(hsotg
->dev
, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
2353 readl(hsotg
->regs
+ DIEPCTL0
),
2354 readl(hsotg
->regs
+ DOEPCTL0
));
2356 /* enable in and out endpoint interrupts */
2357 s3c_hsotg_en_gsint(hsotg
, GINTSTS_OEPINT
| GINTSTS_IEPINT
);
2360 * Enable the RXFIFO when in slave mode, as this is how we collect
2361 * the data. In DMA mode, we get events from the FIFO but also
2362 * things we cannot process, so do not use it.
2364 if (!using_dma(hsotg
))
2365 s3c_hsotg_en_gsint(hsotg
, GINTSTS_RXFLVL
);
2367 /* Enable interrupts for EP0 in and out */
2368 s3c_hsotg_ctrl_epint(hsotg
, 0, 0, 1);
2369 s3c_hsotg_ctrl_epint(hsotg
, 0, 1, 1);
2371 if (!is_usb_reset
) {
2372 __orr32(hsotg
->regs
+ DCTL
, DCTL_PWRONPRGDONE
);
2373 udelay(10); /* see openiboot */
2374 __bic32(hsotg
->regs
+ DCTL
, DCTL_PWRONPRGDONE
);
2377 dev_dbg(hsotg
->dev
, "DCTL=0x%08x\n", readl(hsotg
->regs
+ DCTL
));
2380 * DxEPCTL_USBActEp says RO in manual, but seems to be set by
2381 * writing to the EPCTL register..
2384 /* set to read 1 8byte packet */
2385 writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
2386 DXEPTSIZ_XFERSIZE(8), hsotg
->regs
+ DOEPTSIZ0
);
2388 writel(s3c_hsotg_ep0_mps(hsotg
->eps_out
[0]->ep
.maxpacket
) |
2389 DXEPCTL_CNAK
| DXEPCTL_EPENA
|
2391 hsotg
->regs
+ DOEPCTL0
);
2393 /* enable, but don't activate EP0in */
2394 writel(s3c_hsotg_ep0_mps(hsotg
->eps_out
[0]->ep
.maxpacket
) |
2395 DXEPCTL_USBACTEP
, hsotg
->regs
+ DIEPCTL0
);
2397 s3c_hsotg_enqueue_setup(hsotg
);
2399 dev_dbg(hsotg
->dev
, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
2400 readl(hsotg
->regs
+ DIEPCTL0
),
2401 readl(hsotg
->regs
+ DOEPCTL0
));
2403 /* clear global NAKs */
2404 val
= DCTL_CGOUTNAK
| DCTL_CGNPINNAK
;
2406 val
|= DCTL_SFTDISCON
;
2407 __orr32(hsotg
->regs
+ DCTL
, val
);
2409 /* must be at-least 3ms to allow bus to see disconnect */
2412 hsotg
->last_rst
= jiffies
;
2415 static void s3c_hsotg_core_disconnect(struct dwc2_hsotg
*hsotg
)
2417 /* set the soft-disconnect bit */
2418 __orr32(hsotg
->regs
+ DCTL
, DCTL_SFTDISCON
);
2421 void s3c_hsotg_core_connect(struct dwc2_hsotg
*hsotg
)
2423 /* remove the soft-disconnect and let's go */
2424 __bic32(hsotg
->regs
+ DCTL
, DCTL_SFTDISCON
);
2428 * s3c_hsotg_irq - handle device interrupt
2429 * @irq: The IRQ number triggered
2430 * @pw: The pw value when registered the handler.
2432 static irqreturn_t
s3c_hsotg_irq(int irq
, void *pw
)
2434 struct dwc2_hsotg
*hsotg
= pw
;
2435 int retry_count
= 8;
2439 spin_lock(&hsotg
->lock
);
2441 gintsts
= readl(hsotg
->regs
+ GINTSTS
);
2442 gintmsk
= readl(hsotg
->regs
+ GINTMSK
);
2444 dev_dbg(hsotg
->dev
, "%s: %08x %08x (%08x) retry %d\n",
2445 __func__
, gintsts
, gintsts
& gintmsk
, gintmsk
, retry_count
);
2449 if (gintsts
& GINTSTS_ENUMDONE
) {
2450 writel(GINTSTS_ENUMDONE
, hsotg
->regs
+ GINTSTS
);
2452 s3c_hsotg_irq_enumdone(hsotg
);
2455 if (gintsts
& (GINTSTS_OEPINT
| GINTSTS_IEPINT
)) {
2456 u32 daint
= readl(hsotg
->regs
+ DAINT
);
2457 u32 daintmsk
= readl(hsotg
->regs
+ DAINTMSK
);
2458 u32 daint_out
, daint_in
;
2462 daint_out
= daint
>> DAINT_OUTEP_SHIFT
;
2463 daint_in
= daint
& ~(daint_out
<< DAINT_OUTEP_SHIFT
);
2465 dev_dbg(hsotg
->dev
, "%s: daint=%08x\n", __func__
, daint
);
2467 for (ep
= 0; ep
< hsotg
->num_of_eps
&& daint_out
;
2468 ep
++, daint_out
>>= 1) {
2470 s3c_hsotg_epint(hsotg
, ep
, 0);
2473 for (ep
= 0; ep
< hsotg
->num_of_eps
&& daint_in
;
2474 ep
++, daint_in
>>= 1) {
2476 s3c_hsotg_epint(hsotg
, ep
, 1);
2480 if (gintsts
& GINTSTS_USBRST
) {
2482 u32 usb_status
= readl(hsotg
->regs
+ GOTGCTL
);
2484 dev_dbg(hsotg
->dev
, "%s: USBRst\n", __func__
);
2485 dev_dbg(hsotg
->dev
, "GNPTXSTS=%08x\n",
2486 readl(hsotg
->regs
+ GNPTXSTS
));
2488 writel(GINTSTS_USBRST
, hsotg
->regs
+ GINTSTS
);
2490 /* Report disconnection if it is not already done. */
2491 s3c_hsotg_disconnect(hsotg
);
2493 if (usb_status
& GOTGCTL_BSESVLD
) {
2494 if (time_after(jiffies
, hsotg
->last_rst
+
2495 msecs_to_jiffies(200))) {
2497 kill_all_requests(hsotg
, hsotg
->eps_out
[0],
2500 s3c_hsotg_core_init_disconnected(hsotg
, true);
2505 /* check both FIFOs */
2507 if (gintsts
& GINTSTS_NPTXFEMP
) {
2508 dev_dbg(hsotg
->dev
, "NPTxFEmp\n");
2511 * Disable the interrupt to stop it happening again
2512 * unless one of these endpoint routines decides that
2513 * it needs re-enabling
2516 s3c_hsotg_disable_gsint(hsotg
, GINTSTS_NPTXFEMP
);
2517 s3c_hsotg_irq_fifoempty(hsotg
, false);
2520 if (gintsts
& GINTSTS_PTXFEMP
) {
2521 dev_dbg(hsotg
->dev
, "PTxFEmp\n");
2523 /* See note in GINTSTS_NPTxFEmp */
2525 s3c_hsotg_disable_gsint(hsotg
, GINTSTS_PTXFEMP
);
2526 s3c_hsotg_irq_fifoempty(hsotg
, true);
2529 if (gintsts
& GINTSTS_RXFLVL
) {
2531 * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
2532 * we need to retry s3c_hsotg_handle_rx if this is still
2536 s3c_hsotg_handle_rx(hsotg
);
2539 if (gintsts
& GINTSTS_ERLYSUSP
) {
2540 dev_dbg(hsotg
->dev
, "GINTSTS_ErlySusp\n");
2541 writel(GINTSTS_ERLYSUSP
, hsotg
->regs
+ GINTSTS
);
2545 * these next two seem to crop-up occasionally causing the core
2546 * to shutdown the USB transfer, so try clearing them and logging
2550 if (gintsts
& GINTSTS_GOUTNAKEFF
) {
2551 dev_info(hsotg
->dev
, "GOUTNakEff triggered\n");
2553 writel(DCTL_CGOUTNAK
, hsotg
->regs
+ DCTL
);
2555 s3c_hsotg_dump(hsotg
);
2558 if (gintsts
& GINTSTS_GINNAKEFF
) {
2559 dev_info(hsotg
->dev
, "GINNakEff triggered\n");
2561 writel(DCTL_CGNPINNAK
, hsotg
->regs
+ DCTL
);
2563 s3c_hsotg_dump(hsotg
);
2567 * if we've had fifo events, we should try and go around the
2568 * loop again to see if there's any point in returning yet.
2571 if (gintsts
& IRQ_RETRY_MASK
&& --retry_count
> 0)
2574 spin_unlock(&hsotg
->lock
);
2580 * s3c_hsotg_ep_enable - enable the given endpoint
2581 * @ep: The USB endpint to configure
2582 * @desc: The USB endpoint descriptor to configure with.
2584 * This is called from the USB gadget code's usb_ep_enable().
2586 static int s3c_hsotg_ep_enable(struct usb_ep
*ep
,
2587 const struct usb_endpoint_descriptor
*desc
)
2589 struct s3c_hsotg_ep
*hs_ep
= our_ep(ep
);
2590 struct dwc2_hsotg
*hsotg
= hs_ep
->parent
;
2591 unsigned long flags
;
2592 unsigned int index
= hs_ep
->index
;
2596 unsigned int dir_in
;
2597 unsigned int i
, val
, size
;
2601 "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
2602 __func__
, ep
->name
, desc
->bEndpointAddress
, desc
->bmAttributes
,
2603 desc
->wMaxPacketSize
, desc
->bInterval
);
2605 /* not to be called for EP0 */
2606 WARN_ON(index
== 0);
2608 dir_in
= (desc
->bEndpointAddress
& USB_ENDPOINT_DIR_MASK
) ? 1 : 0;
2609 if (dir_in
!= hs_ep
->dir_in
) {
2610 dev_err(hsotg
->dev
, "%s: direction mismatch!\n", __func__
);
2614 mps
= usb_endpoint_maxp(desc
);
2616 /* note, we handle this here instead of s3c_hsotg_set_ep_maxpacket */
2618 epctrl_reg
= dir_in
? DIEPCTL(index
) : DOEPCTL(index
);
2619 epctrl
= readl(hsotg
->regs
+ epctrl_reg
);
2621 dev_dbg(hsotg
->dev
, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
2622 __func__
, epctrl
, epctrl_reg
);
2624 spin_lock_irqsave(&hsotg
->lock
, flags
);
2626 epctrl
&= ~(DXEPCTL_EPTYPE_MASK
| DXEPCTL_MPS_MASK
);
2627 epctrl
|= DXEPCTL_MPS(mps
);
2630 * mark the endpoint as active, otherwise the core may ignore
2631 * transactions entirely for this endpoint
2633 epctrl
|= DXEPCTL_USBACTEP
;
2636 * set the NAK status on the endpoint, otherwise we might try and
2637 * do something with data that we've yet got a request to process
2638 * since the RXFIFO will take data for an endpoint even if the
2639 * size register hasn't been set.
2642 epctrl
|= DXEPCTL_SNAK
;
2644 /* update the endpoint state */
2645 s3c_hsotg_set_ep_maxpacket(hsotg
, hs_ep
->index
, mps
, dir_in
);
2647 /* default, set to non-periodic */
2648 hs_ep
->isochronous
= 0;
2649 hs_ep
->periodic
= 0;
2651 hs_ep
->interval
= desc
->bInterval
;
2653 if (hs_ep
->interval
> 1 && hs_ep
->mc
> 1)
2654 dev_err(hsotg
->dev
, "MC > 1 when interval is not 1\n");
2656 switch (desc
->bmAttributes
& USB_ENDPOINT_XFERTYPE_MASK
) {
2657 case USB_ENDPOINT_XFER_ISOC
:
2658 epctrl
|= DXEPCTL_EPTYPE_ISO
;
2659 epctrl
|= DXEPCTL_SETEVENFR
;
2660 hs_ep
->isochronous
= 1;
2662 hs_ep
->periodic
= 1;
2665 case USB_ENDPOINT_XFER_BULK
:
2666 epctrl
|= DXEPCTL_EPTYPE_BULK
;
2669 case USB_ENDPOINT_XFER_INT
:
2671 hs_ep
->periodic
= 1;
2673 epctrl
|= DXEPCTL_EPTYPE_INTERRUPT
;
2676 case USB_ENDPOINT_XFER_CONTROL
:
2677 epctrl
|= DXEPCTL_EPTYPE_CONTROL
;
2681 /* If fifo is already allocated for this ep */
2682 if (hs_ep
->fifo_index
) {
2683 size
= hs_ep
->ep
.maxpacket
* hs_ep
->mc
;
2684 /* If bigger fifo is required deallocate current one */
2685 if (size
> hs_ep
->fifo_size
) {
2686 hsotg
->fifo_map
&= ~(1 << hs_ep
->fifo_index
);
2687 hs_ep
->fifo_index
= 0;
2688 hs_ep
->fifo_size
= 0;
2693 * if the hardware has dedicated fifos, we must give each IN EP
2694 * a unique tx-fifo even if it is non-periodic.
2696 if (dir_in
&& hsotg
->dedicated_fifos
&& !hs_ep
->fifo_index
) {
2698 u32 fifo_size
= UINT_MAX
;
2699 size
= hs_ep
->ep
.maxpacket
*hs_ep
->mc
;
2700 for (i
= 1; i
< hsotg
->num_of_eps
; ++i
) {
2701 if (hsotg
->fifo_map
& (1<<i
))
2703 val
= readl(hsotg
->regs
+ DPTXFSIZN(i
));
2704 val
= (val
>> FIFOSIZE_DEPTH_SHIFT
)*4;
2707 /* Search for smallest acceptable fifo */
2708 if (val
< fifo_size
) {
2715 "%s: No suitable fifo found\n", __func__
);
2719 hsotg
->fifo_map
|= 1 << fifo_index
;
2720 epctrl
|= DXEPCTL_TXFNUM(fifo_index
);
2721 hs_ep
->fifo_index
= fifo_index
;
2722 hs_ep
->fifo_size
= fifo_size
;
2725 /* for non control endpoints, set PID to D0 */
2727 epctrl
|= DXEPCTL_SETD0PID
;
2729 dev_dbg(hsotg
->dev
, "%s: write DxEPCTL=0x%08x\n",
2732 writel(epctrl
, hsotg
->regs
+ epctrl_reg
);
2733 dev_dbg(hsotg
->dev
, "%s: read DxEPCTL=0x%08x\n",
2734 __func__
, readl(hsotg
->regs
+ epctrl_reg
));
2736 /* enable the endpoint interrupt */
2737 s3c_hsotg_ctrl_epint(hsotg
, index
, dir_in
, 1);
2740 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
2745 * s3c_hsotg_ep_disable - disable given endpoint
2746 * @ep: The endpoint to disable.
2748 static int s3c_hsotg_ep_disable_force(struct usb_ep
*ep
, bool force
)
2750 struct s3c_hsotg_ep
*hs_ep
= our_ep(ep
);
2751 struct dwc2_hsotg
*hsotg
= hs_ep
->parent
;
2752 int dir_in
= hs_ep
->dir_in
;
2753 int index
= hs_ep
->index
;
2754 unsigned long flags
;
2758 dev_dbg(hsotg
->dev
, "%s(ep %p)\n", __func__
, ep
);
2760 if (ep
== &hsotg
->eps_out
[0]->ep
) {
2761 dev_err(hsotg
->dev
, "%s: called for ep0\n", __func__
);
2765 epctrl_reg
= dir_in
? DIEPCTL(index
) : DOEPCTL(index
);
2767 spin_lock_irqsave(&hsotg
->lock
, flags
);
2769 hsotg
->fifo_map
&= ~(1<<hs_ep
->fifo_index
);
2770 hs_ep
->fifo_index
= 0;
2771 hs_ep
->fifo_size
= 0;
2773 ctrl
= readl(hsotg
->regs
+ epctrl_reg
);
2774 ctrl
&= ~DXEPCTL_EPENA
;
2775 ctrl
&= ~DXEPCTL_USBACTEP
;
2776 ctrl
|= DXEPCTL_SNAK
;
2778 dev_dbg(hsotg
->dev
, "%s: DxEPCTL=0x%08x\n", __func__
, ctrl
);
2779 writel(ctrl
, hsotg
->regs
+ epctrl_reg
);
2781 /* disable endpoint interrupts */
2782 s3c_hsotg_ctrl_epint(hsotg
, hs_ep
->index
, hs_ep
->dir_in
, 0);
2784 /* terminate all requests with shutdown */
2785 kill_all_requests(hsotg
, hs_ep
, -ESHUTDOWN
);
2787 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
2791 static int s3c_hsotg_ep_disable(struct usb_ep
*ep
)
2793 return s3c_hsotg_ep_disable_force(ep
, false);
2796 * on_list - check request is on the given endpoint
2797 * @ep: The endpoint to check.
2798 * @test: The request to test if it is on the endpoint.
2800 static bool on_list(struct s3c_hsotg_ep
*ep
, struct s3c_hsotg_req
*test
)
2802 struct s3c_hsotg_req
*req
, *treq
;
2804 list_for_each_entry_safe(req
, treq
, &ep
->queue
, queue
) {
2813 * s3c_hsotg_ep_dequeue - dequeue given endpoint
2814 * @ep: The endpoint to dequeue.
2815 * @req: The request to be removed from a queue.
2817 static int s3c_hsotg_ep_dequeue(struct usb_ep
*ep
, struct usb_request
*req
)
2819 struct s3c_hsotg_req
*hs_req
= our_req(req
);
2820 struct s3c_hsotg_ep
*hs_ep
= our_ep(ep
);
2821 struct dwc2_hsotg
*hs
= hs_ep
->parent
;
2822 unsigned long flags
;
2824 dev_dbg(hs
->dev
, "ep_dequeue(%p,%p)\n", ep
, req
);
2826 spin_lock_irqsave(&hs
->lock
, flags
);
2828 if (!on_list(hs_ep
, hs_req
)) {
2829 spin_unlock_irqrestore(&hs
->lock
, flags
);
2833 s3c_hsotg_complete_request(hs
, hs_ep
, hs_req
, -ECONNRESET
);
2834 spin_unlock_irqrestore(&hs
->lock
, flags
);
2840 * s3c_hsotg_ep_sethalt - set halt on a given endpoint
2841 * @ep: The endpoint to set halt.
2842 * @value: Set or unset the halt.
2844 static int s3c_hsotg_ep_sethalt(struct usb_ep
*ep
, int value
)
2846 struct s3c_hsotg_ep
*hs_ep
= our_ep(ep
);
2847 struct dwc2_hsotg
*hs
= hs_ep
->parent
;
2848 int index
= hs_ep
->index
;
2853 dev_info(hs
->dev
, "%s(ep %p %s, %d)\n", __func__
, ep
, ep
->name
, value
);
2857 s3c_hsotg_stall_ep0(hs
);
2860 "%s: can't clear halt on ep0\n", __func__
);
2864 if (hs_ep
->dir_in
) {
2865 epreg
= DIEPCTL(index
);
2866 epctl
= readl(hs
->regs
+ epreg
);
2869 epctl
|= DXEPCTL_STALL
+ DXEPCTL_SNAK
;
2870 if (epctl
& DXEPCTL_EPENA
)
2871 epctl
|= DXEPCTL_EPDIS
;
2873 epctl
&= ~DXEPCTL_STALL
;
2874 xfertype
= epctl
& DXEPCTL_EPTYPE_MASK
;
2875 if (xfertype
== DXEPCTL_EPTYPE_BULK
||
2876 xfertype
== DXEPCTL_EPTYPE_INTERRUPT
)
2877 epctl
|= DXEPCTL_SETD0PID
;
2879 writel(epctl
, hs
->regs
+ epreg
);
2882 epreg
= DOEPCTL(index
);
2883 epctl
= readl(hs
->regs
+ epreg
);
2886 epctl
|= DXEPCTL_STALL
;
2888 epctl
&= ~DXEPCTL_STALL
;
2889 xfertype
= epctl
& DXEPCTL_EPTYPE_MASK
;
2890 if (xfertype
== DXEPCTL_EPTYPE_BULK
||
2891 xfertype
== DXEPCTL_EPTYPE_INTERRUPT
)
2892 epctl
|= DXEPCTL_SETD0PID
;
2894 writel(epctl
, hs
->regs
+ epreg
);
2897 hs_ep
->halted
= value
;
2903 * s3c_hsotg_ep_sethalt_lock - set halt on a given endpoint with lock held
2904 * @ep: The endpoint to set halt.
2905 * @value: Set or unset the halt.
2907 static int s3c_hsotg_ep_sethalt_lock(struct usb_ep
*ep
, int value
)
2909 struct s3c_hsotg_ep
*hs_ep
= our_ep(ep
);
2910 struct dwc2_hsotg
*hs
= hs_ep
->parent
;
2911 unsigned long flags
= 0;
2914 spin_lock_irqsave(&hs
->lock
, flags
);
2915 ret
= s3c_hsotg_ep_sethalt(ep
, value
);
2916 spin_unlock_irqrestore(&hs
->lock
, flags
);
2921 static struct usb_ep_ops s3c_hsotg_ep_ops
= {
2922 .enable
= s3c_hsotg_ep_enable
,
2923 .disable
= s3c_hsotg_ep_disable
,
2924 .alloc_request
= s3c_hsotg_ep_alloc_request
,
2925 .free_request
= s3c_hsotg_ep_free_request
,
2926 .queue
= s3c_hsotg_ep_queue_lock
,
2927 .dequeue
= s3c_hsotg_ep_dequeue
,
2928 .set_halt
= s3c_hsotg_ep_sethalt_lock
,
2929 /* note, don't believe we have any call for the fifo routines */
2933 * s3c_hsotg_phy_enable - enable platform phy dev
2934 * @hsotg: The driver state
2936 * A wrapper for platform code responsible for controlling
2937 * low-level USB code
2939 static void s3c_hsotg_phy_enable(struct dwc2_hsotg
*hsotg
)
2941 struct platform_device
*pdev
= to_platform_device(hsotg
->dev
);
2943 dev_dbg(hsotg
->dev
, "pdev 0x%p\n", pdev
);
2946 usb_phy_init(hsotg
->uphy
);
2947 else if (hsotg
->plat
&& hsotg
->plat
->phy_init
)
2948 hsotg
->plat
->phy_init(pdev
, hsotg
->plat
->phy_type
);
2950 phy_init(hsotg
->phy
);
2951 phy_power_on(hsotg
->phy
);
2956 * s3c_hsotg_phy_disable - disable platform phy dev
2957 * @hsotg: The driver state
2959 * A wrapper for platform code responsible for controlling
2960 * low-level USB code
2962 static void s3c_hsotg_phy_disable(struct dwc2_hsotg
*hsotg
)
2964 struct platform_device
*pdev
= to_platform_device(hsotg
->dev
);
2967 usb_phy_shutdown(hsotg
->uphy
);
2968 else if (hsotg
->plat
&& hsotg
->plat
->phy_exit
)
2969 hsotg
->plat
->phy_exit(pdev
, hsotg
->plat
->phy_type
);
2971 phy_power_off(hsotg
->phy
);
2972 phy_exit(hsotg
->phy
);
2977 * s3c_hsotg_init - initalize the usb core
2978 * @hsotg: The driver state
2980 static void s3c_hsotg_init(struct dwc2_hsotg
*hsotg
)
2983 /* unmask subset of endpoint interrupts */
2985 writel(DIEPMSK_TIMEOUTMSK
| DIEPMSK_AHBERRMSK
|
2986 DIEPMSK_EPDISBLDMSK
| DIEPMSK_XFERCOMPLMSK
,
2987 hsotg
->regs
+ DIEPMSK
);
2989 writel(DOEPMSK_SETUPMSK
| DOEPMSK_AHBERRMSK
|
2990 DOEPMSK_EPDISBLDMSK
| DOEPMSK_XFERCOMPLMSK
,
2991 hsotg
->regs
+ DOEPMSK
);
2993 writel(0, hsotg
->regs
+ DAINTMSK
);
2995 /* Be in disconnected state until gadget is registered */
2996 __orr32(hsotg
->regs
+ DCTL
, DCTL_SFTDISCON
);
3000 dev_dbg(hsotg
->dev
, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
3001 readl(hsotg
->regs
+ GRXFSIZ
),
3002 readl(hsotg
->regs
+ GNPTXFSIZ
));
3004 s3c_hsotg_init_fifo(hsotg
);
3006 /* set the PLL on, remove the HNP/SRP and set the PHY */
3007 trdtim
= (hsotg
->phyif
== GUSBCFG_PHYIF8
) ? 9 : 5;
3008 writel(hsotg
->phyif
| GUSBCFG_TOUTCAL(7) |
3009 (trdtim
<< GUSBCFG_USBTRDTIM_SHIFT
),
3010 hsotg
->regs
+ GUSBCFG
);
3012 if (using_dma(hsotg
))
3013 __orr32(hsotg
->regs
+ GAHBCFG
, GAHBCFG_DMA_EN
);
3017 * s3c_hsotg_udc_start - prepare the udc for work
3018 * @gadget: The usb gadget state
3019 * @driver: The usb gadget driver
3021 * Perform initialization to prepare udc device and driver
3024 static int s3c_hsotg_udc_start(struct usb_gadget
*gadget
,
3025 struct usb_gadget_driver
*driver
)
3027 struct dwc2_hsotg
*hsotg
= to_hsotg(gadget
);
3028 unsigned long flags
;
3032 pr_err("%s: called with no device\n", __func__
);
3037 dev_err(hsotg
->dev
, "%s: no driver\n", __func__
);
3041 if (driver
->max_speed
< USB_SPEED_FULL
)
3042 dev_err(hsotg
->dev
, "%s: bad speed\n", __func__
);
3044 if (!driver
->setup
) {
3045 dev_err(hsotg
->dev
, "%s: missing entry points\n", __func__
);
3049 mutex_lock(&hsotg
->init_mutex
);
3050 WARN_ON(hsotg
->driver
);
3052 driver
->driver
.bus
= NULL
;
3053 hsotg
->driver
= driver
;
3054 hsotg
->gadget
.dev
.of_node
= hsotg
->dev
->of_node
;
3055 hsotg
->gadget
.speed
= USB_SPEED_UNKNOWN
;
3057 clk_enable(hsotg
->clk
);
3059 ret
= regulator_bulk_enable(ARRAY_SIZE(hsotg
->supplies
),
3062 dev_err(hsotg
->dev
, "failed to enable supplies: %d\n", ret
);
3066 s3c_hsotg_phy_enable(hsotg
);
3067 if (!IS_ERR_OR_NULL(hsotg
->uphy
))
3068 otg_set_peripheral(hsotg
->uphy
->otg
, &hsotg
->gadget
);
3070 spin_lock_irqsave(&hsotg
->lock
, flags
);
3071 s3c_hsotg_init(hsotg
);
3072 s3c_hsotg_core_init_disconnected(hsotg
, false);
3074 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
3076 dev_info(hsotg
->dev
, "bound driver %s\n", driver
->driver
.name
);
3078 mutex_unlock(&hsotg
->init_mutex
);
3083 mutex_unlock(&hsotg
->init_mutex
);
3084 hsotg
->driver
= NULL
;
3089 * s3c_hsotg_udc_stop - stop the udc
3090 * @gadget: The usb gadget state
3091 * @driver: The usb gadget driver
3093 * Stop udc hw block and stay tunned for future transmissions
3095 static int s3c_hsotg_udc_stop(struct usb_gadget
*gadget
)
3097 struct dwc2_hsotg
*hsotg
= to_hsotg(gadget
);
3098 unsigned long flags
= 0;
3104 mutex_lock(&hsotg
->init_mutex
);
3106 /* all endpoints should be shutdown */
3107 for (ep
= 1; ep
< hsotg
->num_of_eps
; ep
++) {
3108 if (hsotg
->eps_in
[ep
])
3109 s3c_hsotg_ep_disable(&hsotg
->eps_in
[ep
]->ep
);
3110 if (hsotg
->eps_out
[ep
])
3111 s3c_hsotg_ep_disable(&hsotg
->eps_out
[ep
]->ep
);
3114 spin_lock_irqsave(&hsotg
->lock
, flags
);
3116 hsotg
->driver
= NULL
;
3117 hsotg
->gadget
.speed
= USB_SPEED_UNKNOWN
;
3120 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
3122 if (!IS_ERR_OR_NULL(hsotg
->uphy
))
3123 otg_set_peripheral(hsotg
->uphy
->otg
, NULL
);
3124 s3c_hsotg_phy_disable(hsotg
);
3126 regulator_bulk_disable(ARRAY_SIZE(hsotg
->supplies
), hsotg
->supplies
);
3128 clk_disable(hsotg
->clk
);
3130 mutex_unlock(&hsotg
->init_mutex
);
3136 * s3c_hsotg_gadget_getframe - read the frame number
3137 * @gadget: The usb gadget state
3139 * Read the {micro} frame number
3141 static int s3c_hsotg_gadget_getframe(struct usb_gadget
*gadget
)
3143 return s3c_hsotg_read_frameno(to_hsotg(gadget
));
3147 * s3c_hsotg_pullup - connect/disconnect the USB PHY
3148 * @gadget: The usb gadget state
3149 * @is_on: Current state of the USB PHY
3151 * Connect/Disconnect the USB PHY pullup
3153 static int s3c_hsotg_pullup(struct usb_gadget
*gadget
, int is_on
)
3155 struct dwc2_hsotg
*hsotg
= to_hsotg(gadget
);
3156 unsigned long flags
= 0;
3158 dev_dbg(hsotg
->dev
, "%s: is_on: %d\n", __func__
, is_on
);
3160 mutex_lock(&hsotg
->init_mutex
);
3161 spin_lock_irqsave(&hsotg
->lock
, flags
);
3163 clk_enable(hsotg
->clk
);
3165 s3c_hsotg_core_init_disconnected(hsotg
, false);
3166 s3c_hsotg_core_connect(hsotg
);
3168 s3c_hsotg_core_disconnect(hsotg
);
3169 s3c_hsotg_disconnect(hsotg
);
3171 clk_disable(hsotg
->clk
);
3174 hsotg
->gadget
.speed
= USB_SPEED_UNKNOWN
;
3175 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
3176 mutex_unlock(&hsotg
->init_mutex
);
3181 static int s3c_hsotg_vbus_session(struct usb_gadget
*gadget
, int is_active
)
3183 struct dwc2_hsotg
*hsotg
= to_hsotg(gadget
);
3184 unsigned long flags
;
3186 dev_dbg(hsotg
->dev
, "%s: is_active: %d\n", __func__
, is_active
);
3187 spin_lock_irqsave(&hsotg
->lock
, flags
);
3190 /* Kill any ep0 requests as controller will be reinitialized */
3191 kill_all_requests(hsotg
, hsotg
->eps_out
[0], -ECONNRESET
);
3192 s3c_hsotg_core_init_disconnected(hsotg
, false);
3194 s3c_hsotg_core_connect(hsotg
);
3196 s3c_hsotg_core_disconnect(hsotg
);
3197 s3c_hsotg_disconnect(hsotg
);
3200 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
3205 * s3c_hsotg_vbus_draw - report bMaxPower field
3206 * @gadget: The usb gadget state
3207 * @mA: Amount of current
3209 * Report how much power the device may consume to the phy.
3211 static int s3c_hsotg_vbus_draw(struct usb_gadget
*gadget
, unsigned mA
)
3213 struct dwc2_hsotg
*hsotg
= to_hsotg(gadget
);
3215 if (IS_ERR_OR_NULL(hsotg
->uphy
))
3217 return usb_phy_set_power(hsotg
->uphy
, mA
);
3220 static const struct usb_gadget_ops s3c_hsotg_gadget_ops
= {
3221 .get_frame
= s3c_hsotg_gadget_getframe
,
3222 .udc_start
= s3c_hsotg_udc_start
,
3223 .udc_stop
= s3c_hsotg_udc_stop
,
3224 .pullup
= s3c_hsotg_pullup
,
3225 .vbus_session
= s3c_hsotg_vbus_session
,
3226 .vbus_draw
= s3c_hsotg_vbus_draw
,
3230 * s3c_hsotg_initep - initialise a single endpoint
3231 * @hsotg: The device state.
3232 * @hs_ep: The endpoint to be initialised.
3233 * @epnum: The endpoint number
3235 * Initialise the given endpoint (as part of the probe and device state
3236 * creation) to give to the gadget driver. Setup the endpoint name, any
3237 * direction information and other state that may be required.
3239 static void s3c_hsotg_initep(struct dwc2_hsotg
*hsotg
,
3240 struct s3c_hsotg_ep
*hs_ep
,
3253 hs_ep
->dir_in
= dir_in
;
3254 hs_ep
->index
= epnum
;
3256 snprintf(hs_ep
->name
, sizeof(hs_ep
->name
), "ep%d%s", epnum
, dir
);
3258 INIT_LIST_HEAD(&hs_ep
->queue
);
3259 INIT_LIST_HEAD(&hs_ep
->ep
.ep_list
);
3261 /* add to the list of endpoints known by the gadget driver */
3263 list_add_tail(&hs_ep
->ep
.ep_list
, &hsotg
->gadget
.ep_list
);
3265 hs_ep
->parent
= hsotg
;
3266 hs_ep
->ep
.name
= hs_ep
->name
;
3267 usb_ep_set_maxpacket_limit(&hs_ep
->ep
, epnum
? 1024 : EP0_MPS_LIMIT
);
3268 hs_ep
->ep
.ops
= &s3c_hsotg_ep_ops
;
3271 * if we're using dma, we need to set the next-endpoint pointer
3272 * to be something valid.
3275 if (using_dma(hsotg
)) {
3276 u32 next
= DXEPCTL_NEXTEP((epnum
+ 1) % 15);
3278 writel(next
, hsotg
->regs
+ DIEPCTL(epnum
));
3280 writel(next
, hsotg
->regs
+ DOEPCTL(epnum
));
3285 * s3c_hsotg_hw_cfg - read HW configuration registers
3286 * @param: The device state
3288 * Read the USB core HW configuration registers
3290 static int s3c_hsotg_hw_cfg(struct dwc2_hsotg
*hsotg
)
3296 /* check hardware configuration */
3298 cfg
= readl(hsotg
->regs
+ GHWCFG2
);
3299 hsotg
->num_of_eps
= (cfg
>> GHWCFG2_NUM_DEV_EP_SHIFT
) & 0xF;
3301 hsotg
->num_of_eps
++;
3303 hsotg
->eps_in
[0] = devm_kzalloc(hsotg
->dev
, sizeof(struct s3c_hsotg_ep
),
3305 if (!hsotg
->eps_in
[0])
3307 /* Same s3c_hsotg_ep is used in both directions for ep0 */
3308 hsotg
->eps_out
[0] = hsotg
->eps_in
[0];
3310 cfg
= readl(hsotg
->regs
+ GHWCFG1
);
3311 for (i
= 1, cfg
>>= 2; i
< hsotg
->num_of_eps
; i
++, cfg
>>= 2) {
3313 /* Direction in or both */
3314 if (!(ep_type
& 2)) {
3315 hsotg
->eps_in
[i
] = devm_kzalloc(hsotg
->dev
,
3316 sizeof(struct s3c_hsotg_ep
), GFP_KERNEL
);
3317 if (!hsotg
->eps_in
[i
])
3320 /* Direction out or both */
3321 if (!(ep_type
& 1)) {
3322 hsotg
->eps_out
[i
] = devm_kzalloc(hsotg
->dev
,
3323 sizeof(struct s3c_hsotg_ep
), GFP_KERNEL
);
3324 if (!hsotg
->eps_out
[i
])
3329 cfg
= readl(hsotg
->regs
+ GHWCFG3
);
3330 hsotg
->fifo_mem
= (cfg
>> GHWCFG3_DFIFO_DEPTH_SHIFT
);
3332 cfg
= readl(hsotg
->regs
+ GHWCFG4
);
3333 hsotg
->dedicated_fifos
= (cfg
>> GHWCFG4_DED_FIFO_SHIFT
) & 1;
3335 dev_info(hsotg
->dev
, "EPs: %d, %s fifos, %d entries in SPRAM\n",
3337 hsotg
->dedicated_fifos
? "dedicated" : "shared",
3343 * s3c_hsotg_dump - dump state of the udc
3344 * @param: The device state
3346 static void s3c_hsotg_dump(struct dwc2_hsotg
*hsotg
)
3349 struct device
*dev
= hsotg
->dev
;
3350 void __iomem
*regs
= hsotg
->regs
;
3354 dev_info(dev
, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
3355 readl(regs
+ DCFG
), readl(regs
+ DCTL
),
3356 readl(regs
+ DIEPMSK
));
3358 dev_info(dev
, "GAHBCFG=0x%08x, GHWCFG1=0x%08x\n",
3359 readl(regs
+ GAHBCFG
), readl(regs
+ GHWCFG1
));
3361 dev_info(dev
, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
3362 readl(regs
+ GRXFSIZ
), readl(regs
+ GNPTXFSIZ
));
3364 /* show periodic fifo settings */
3366 for (idx
= 1; idx
< hsotg
->num_of_eps
; idx
++) {
3367 val
= readl(regs
+ DPTXFSIZN(idx
));
3368 dev_info(dev
, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx
,
3369 val
>> FIFOSIZE_DEPTH_SHIFT
,
3370 val
& FIFOSIZE_STARTADDR_MASK
);
3373 for (idx
= 0; idx
< hsotg
->num_of_eps
; idx
++) {
3375 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx
,
3376 readl(regs
+ DIEPCTL(idx
)),
3377 readl(regs
+ DIEPTSIZ(idx
)),
3378 readl(regs
+ DIEPDMA(idx
)));
3380 val
= readl(regs
+ DOEPCTL(idx
));
3382 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
3383 idx
, readl(regs
+ DOEPCTL(idx
)),
3384 readl(regs
+ DOEPTSIZ(idx
)),
3385 readl(regs
+ DOEPDMA(idx
)));
3389 dev_info(dev
, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
3390 readl(regs
+ DVBUSDIS
), readl(regs
+ DVBUSPULSE
));
3395 * testmode_write - debugfs: change usb test mode
3396 * @seq: The seq file to write to.
3397 * @v: Unused parameter.
3399 * This debugfs entry modify the current usb test mode.
3401 static ssize_t
testmode_write(struct file
*file
, const char __user
*ubuf
, size_t
3402 count
, loff_t
*ppos
)
3404 struct seq_file
*s
= file
->private_data
;
3405 struct dwc2_hsotg
*hsotg
= s
->private;
3406 unsigned long flags
;
3410 if (copy_from_user(&buf
, ubuf
, min_t(size_t, sizeof(buf
) - 1, count
)))
3413 if (!strncmp(buf
, "test_j", 6))
3415 else if (!strncmp(buf
, "test_k", 6))
3417 else if (!strncmp(buf
, "test_se0_nak", 12))
3418 testmode
= TEST_SE0_NAK
;
3419 else if (!strncmp(buf
, "test_packet", 11))
3420 testmode
= TEST_PACKET
;
3421 else if (!strncmp(buf
, "test_force_enable", 17))
3422 testmode
= TEST_FORCE_EN
;
3426 spin_lock_irqsave(&hsotg
->lock
, flags
);
3427 s3c_hsotg_set_test_mode(hsotg
, testmode
);
3428 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
3433 * testmode_show - debugfs: show usb test mode state
3434 * @seq: The seq file to write to.
3435 * @v: Unused parameter.
3437 * This debugfs entry shows which usb test mode is currently enabled.
3439 static int testmode_show(struct seq_file
*s
, void *unused
)
3441 struct dwc2_hsotg
*hsotg
= s
->private;
3442 unsigned long flags
;
3445 spin_lock_irqsave(&hsotg
->lock
, flags
);
3446 dctl
= readl(hsotg
->regs
+ DCTL
);
3447 dctl
&= DCTL_TSTCTL_MASK
;
3448 dctl
>>= DCTL_TSTCTL_SHIFT
;
3449 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
3453 seq_puts(s
, "no test\n");
3456 seq_puts(s
, "test_j\n");
3459 seq_puts(s
, "test_k\n");
3462 seq_puts(s
, "test_se0_nak\n");
3465 seq_puts(s
, "test_packet\n");
3468 seq_puts(s
, "test_force_enable\n");
3471 seq_printf(s
, "UNKNOWN %d\n", dctl
);
3477 static int testmode_open(struct inode
*inode
, struct file
*file
)
3479 return single_open(file
, testmode_show
, inode
->i_private
);
3482 static const struct file_operations testmode_fops
= {
3483 .owner
= THIS_MODULE
,
3484 .open
= testmode_open
,
3485 .write
= testmode_write
,
3487 .llseek
= seq_lseek
,
3488 .release
= single_release
,
3492 * state_show - debugfs: show overall driver and device state.
3493 * @seq: The seq file to write to.
3494 * @v: Unused parameter.
3496 * This debugfs entry shows the overall state of the hardware and
3497 * some general information about each of the endpoints available
3500 static int state_show(struct seq_file
*seq
, void *v
)
3502 struct dwc2_hsotg
*hsotg
= seq
->private;
3503 void __iomem
*regs
= hsotg
->regs
;
3506 seq_printf(seq
, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n",
3509 readl(regs
+ DSTS
));
3511 seq_printf(seq
, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n",
3512 readl(regs
+ DIEPMSK
), readl(regs
+ DOEPMSK
));
3514 seq_printf(seq
, "GINTMSK=0x%08x, GINTSTS=0x%08x\n",
3515 readl(regs
+ GINTMSK
),
3516 readl(regs
+ GINTSTS
));
3518 seq_printf(seq
, "DAINTMSK=0x%08x, DAINT=0x%08x\n",
3519 readl(regs
+ DAINTMSK
),
3520 readl(regs
+ DAINT
));
3522 seq_printf(seq
, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n",
3523 readl(regs
+ GNPTXSTS
),
3524 readl(regs
+ GRXSTSR
));
3526 seq_puts(seq
, "\nEndpoint status:\n");
3528 for (idx
= 0; idx
< hsotg
->num_of_eps
; idx
++) {
3531 in
= readl(regs
+ DIEPCTL(idx
));
3532 out
= readl(regs
+ DOEPCTL(idx
));
3534 seq_printf(seq
, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x",
3537 in
= readl(regs
+ DIEPTSIZ(idx
));
3538 out
= readl(regs
+ DOEPTSIZ(idx
));
3540 seq_printf(seq
, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x",
3543 seq_puts(seq
, "\n");
3549 static int state_open(struct inode
*inode
, struct file
*file
)
3551 return single_open(file
, state_show
, inode
->i_private
);
3554 static const struct file_operations state_fops
= {
3555 .owner
= THIS_MODULE
,
3558 .llseek
= seq_lseek
,
3559 .release
= single_release
,
3563 * fifo_show - debugfs: show the fifo information
3564 * @seq: The seq_file to write data to.
3565 * @v: Unused parameter.
3567 * Show the FIFO information for the overall fifo and all the
3568 * periodic transmission FIFOs.
3570 static int fifo_show(struct seq_file
*seq
, void *v
)
3572 struct dwc2_hsotg
*hsotg
= seq
->private;
3573 void __iomem
*regs
= hsotg
->regs
;
3577 seq_puts(seq
, "Non-periodic FIFOs:\n");
3578 seq_printf(seq
, "RXFIFO: Size %d\n", readl(regs
+ GRXFSIZ
));
3580 val
= readl(regs
+ GNPTXFSIZ
);
3581 seq_printf(seq
, "NPTXFIFO: Size %d, Start 0x%08x\n",
3582 val
>> FIFOSIZE_DEPTH_SHIFT
,
3583 val
& FIFOSIZE_DEPTH_MASK
);
3585 seq_puts(seq
, "\nPeriodic TXFIFOs:\n");
3587 for (idx
= 1; idx
< hsotg
->num_of_eps
; idx
++) {
3588 val
= readl(regs
+ DPTXFSIZN(idx
));
3590 seq_printf(seq
, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx
,
3591 val
>> FIFOSIZE_DEPTH_SHIFT
,
3592 val
& FIFOSIZE_STARTADDR_MASK
);
3598 static int fifo_open(struct inode
*inode
, struct file
*file
)
3600 return single_open(file
, fifo_show
, inode
->i_private
);
3603 static const struct file_operations fifo_fops
= {
3604 .owner
= THIS_MODULE
,
3607 .llseek
= seq_lseek
,
3608 .release
= single_release
,
3612 static const char *decode_direction(int is_in
)
3614 return is_in
? "in" : "out";
3618 * ep_show - debugfs: show the state of an endpoint.
3619 * @seq: The seq_file to write data to.
3620 * @v: Unused parameter.
3622 * This debugfs entry shows the state of the given endpoint (one is
3623 * registered for each available).
3625 static int ep_show(struct seq_file
*seq
, void *v
)
3627 struct s3c_hsotg_ep
*ep
= seq
->private;
3628 struct dwc2_hsotg
*hsotg
= ep
->parent
;
3629 struct s3c_hsotg_req
*req
;
3630 void __iomem
*regs
= hsotg
->regs
;
3631 int index
= ep
->index
;
3632 int show_limit
= 15;
3633 unsigned long flags
;
3635 seq_printf(seq
, "Endpoint index %d, named %s, dir %s:\n",
3636 ep
->index
, ep
->ep
.name
, decode_direction(ep
->dir_in
));
3638 /* first show the register state */
3640 seq_printf(seq
, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n",
3641 readl(regs
+ DIEPCTL(index
)),
3642 readl(regs
+ DOEPCTL(index
)));
3644 seq_printf(seq
, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n",
3645 readl(regs
+ DIEPDMA(index
)),
3646 readl(regs
+ DOEPDMA(index
)));
3648 seq_printf(seq
, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n",
3649 readl(regs
+ DIEPINT(index
)),
3650 readl(regs
+ DOEPINT(index
)));
3652 seq_printf(seq
, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n",
3653 readl(regs
+ DIEPTSIZ(index
)),
3654 readl(regs
+ DOEPTSIZ(index
)));
3656 seq_puts(seq
, "\n");
3657 seq_printf(seq
, "mps %d\n", ep
->ep
.maxpacket
);
3658 seq_printf(seq
, "total_data=%ld\n", ep
->total_data
);
3660 seq_printf(seq
, "request list (%p,%p):\n",
3661 ep
->queue
.next
, ep
->queue
.prev
);
3663 spin_lock_irqsave(&hsotg
->lock
, flags
);
3665 list_for_each_entry(req
, &ep
->queue
, queue
) {
3666 if (--show_limit
< 0) {
3667 seq_puts(seq
, "not showing more requests...\n");
3671 seq_printf(seq
, "%c req %p: %d bytes @%p, ",
3672 req
== ep
->req
? '*' : ' ',
3673 req
, req
->req
.length
, req
->req
.buf
);
3674 seq_printf(seq
, "%d done, res %d\n",
3675 req
->req
.actual
, req
->req
.status
);
3678 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
3683 static int ep_open(struct inode
*inode
, struct file
*file
)
3685 return single_open(file
, ep_show
, inode
->i_private
);
3688 static const struct file_operations ep_fops
= {
3689 .owner
= THIS_MODULE
,
3692 .llseek
= seq_lseek
,
3693 .release
= single_release
,
3697 * s3c_hsotg_create_debug - create debugfs directory and files
3698 * @hsotg: The driver state
3700 * Create the debugfs files to allow the user to get information
3701 * about the state of the system. The directory name is created
3702 * with the same name as the device itself, in case we end up
3703 * with multiple blocks in future systems.
3705 static void s3c_hsotg_create_debug(struct dwc2_hsotg
*hsotg
)
3707 struct dentry
*root
;
3710 root
= debugfs_create_dir(dev_name(hsotg
->dev
), NULL
);
3711 hsotg
->debug_root
= root
;
3713 dev_err(hsotg
->dev
, "cannot create debug root\n");
3717 /* create general state file */
3719 hsotg
->debug_file
= debugfs_create_file("state", S_IRUGO
, root
,
3720 hsotg
, &state_fops
);
3722 if (IS_ERR(hsotg
->debug_file
))
3723 dev_err(hsotg
->dev
, "%s: failed to create state\n", __func__
);
3725 hsotg
->debug_testmode
= debugfs_create_file("testmode",
3726 S_IRUGO
| S_IWUSR
, root
,
3727 hsotg
, &testmode_fops
);
3729 if (IS_ERR(hsotg
->debug_testmode
))
3730 dev_err(hsotg
->dev
, "%s: failed to create testmode\n",
3733 hsotg
->debug_fifo
= debugfs_create_file("fifo", S_IRUGO
, root
,
3736 if (IS_ERR(hsotg
->debug_fifo
))
3737 dev_err(hsotg
->dev
, "%s: failed to create fifo\n", __func__
);
3739 /* Create one file for each out endpoint */
3740 for (epidx
= 0; epidx
< hsotg
->num_of_eps
; epidx
++) {
3741 struct s3c_hsotg_ep
*ep
;
3743 ep
= hsotg
->eps_out
[epidx
];
3745 ep
->debugfs
= debugfs_create_file(ep
->name
, S_IRUGO
,
3746 root
, ep
, &ep_fops
);
3748 if (IS_ERR(ep
->debugfs
))
3749 dev_err(hsotg
->dev
, "failed to create %s debug file\n",
3753 /* Create one file for each in endpoint. EP0 is handled with out eps */
3754 for (epidx
= 1; epidx
< hsotg
->num_of_eps
; epidx
++) {
3755 struct s3c_hsotg_ep
*ep
;
3757 ep
= hsotg
->eps_in
[epidx
];
3759 ep
->debugfs
= debugfs_create_file(ep
->name
, S_IRUGO
,
3760 root
, ep
, &ep_fops
);
3762 if (IS_ERR(ep
->debugfs
))
3763 dev_err(hsotg
->dev
, "failed to create %s debug file\n",
3770 * s3c_hsotg_delete_debug - cleanup debugfs entries
3771 * @hsotg: The driver state
3773 * Cleanup (remove) the debugfs files for use on module exit.
3775 static void s3c_hsotg_delete_debug(struct dwc2_hsotg
*hsotg
)
3779 for (epidx
= 0; epidx
< hsotg
->num_of_eps
; epidx
++) {
3780 if (hsotg
->eps_in
[epidx
])
3781 debugfs_remove(hsotg
->eps_in
[epidx
]->debugfs
);
3782 if (hsotg
->eps_out
[epidx
])
3783 debugfs_remove(hsotg
->eps_out
[epidx
]->debugfs
);
3786 debugfs_remove(hsotg
->debug_file
);
3787 debugfs_remove(hsotg
->debug_testmode
);
3788 debugfs_remove(hsotg
->debug_fifo
);
3789 debugfs_remove(hsotg
->debug_root
);
3793 static void s3c_hsotg_of_probe(struct dwc2_hsotg
*hsotg
)
3795 struct device_node
*np
= hsotg
->dev
->of_node
;
3799 /* Enable dma if requested in device tree */
3800 hsotg
->g_using_dma
= of_property_read_bool(np
, "g-use-dma");
3803 * Register TX periodic fifo size per endpoint.
3804 * EP0 is excluded since it has no fifo configuration.
3806 if (!of_find_property(np
, "g-tx-fifo-size", &len
))
3811 /* Read tx fifo sizes other than ep0 */
3812 if (of_property_read_u32_array(np
, "g-tx-fifo-size",
3813 &hsotg
->g_tx_fifo_sz
[1], len
))
3819 /* Make remaining TX fifos unavailable */
3820 if (len
< MAX_EPS_CHANNELS
) {
3821 for (i
= len
; i
< MAX_EPS_CHANNELS
; i
++)
3822 hsotg
->g_tx_fifo_sz
[i
] = 0;
3826 /* Register RX fifo size */
3827 of_property_read_u32(np
, "g-rx-fifo-size", &hsotg
->g_rx_fifo_sz
);
3829 /* Register NPTX fifo size */
3830 of_property_read_u32(np
, "g-np-tx-fifo-size",
3831 &hsotg
->g_np_g_tx_fifo_sz
);
3834 static inline void s3c_hsotg_of_probe(struct dwc2_hsotg
*hsotg
) { }
3838 * dwc2_gadget_init - init function for gadget
3839 * @dwc2: The data structure for the DWC2 driver.
3840 * @irq: The IRQ number for the controller.
3842 int dwc2_gadget_init(struct dwc2_hsotg
*hsotg
, int irq
)
3844 struct device
*dev
= hsotg
->dev
;
3845 struct s3c_hsotg_plat
*plat
= dev
->platform_data
;
3849 u32 p_tx_fifo
[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE
;
3851 /* Set default UTMI width */
3852 hsotg
->phyif
= GUSBCFG_PHYIF16
;
3854 s3c_hsotg_of_probe(hsotg
);
3856 /* Initialize to legacy fifo configuration values */
3857 hsotg
->g_rx_fifo_sz
= 2048;
3858 hsotg
->g_np_g_tx_fifo_sz
= 1024;
3859 memcpy(&hsotg
->g_tx_fifo_sz
[1], p_tx_fifo
, sizeof(p_tx_fifo
));
3860 /* Device tree specific probe */
3861 s3c_hsotg_of_probe(hsotg
);
3862 /* Dump fifo information */
3863 dev_dbg(dev
, "NonPeriodic TXFIFO size: %d\n",
3864 hsotg
->g_np_g_tx_fifo_sz
);
3865 dev_dbg(dev
, "RXFIFO size: %d\n", hsotg
->g_rx_fifo_sz
);
3866 for (i
= 0; i
< MAX_EPS_CHANNELS
; i
++)
3867 dev_dbg(dev
, "Periodic TXFIFO%2d size: %d\n", i
,
3868 hsotg
->g_tx_fifo_sz
[i
]);
3870 * If platform probe couldn't find a generic PHY or an old style
3871 * USB PHY, fall back to pdata
3873 if (IS_ERR_OR_NULL(hsotg
->phy
) && IS_ERR_OR_NULL(hsotg
->uphy
)) {
3874 plat
= dev_get_platdata(dev
);
3877 "no platform data or transceiver defined\n");
3878 return -EPROBE_DEFER
;
3881 } else if (hsotg
->phy
) {
3883 * If using the generic PHY framework, check if the PHY bus
3884 * width is 8-bit and set the phyif appropriately.
3886 if (phy_get_bus_width(hsotg
->phy
) == 8)
3887 hsotg
->phyif
= GUSBCFG_PHYIF8
;
3890 hsotg
->clk
= devm_clk_get(dev
, "otg");
3891 if (IS_ERR(hsotg
->clk
)) {
3893 dev_dbg(dev
, "cannot get otg clock\n");
3896 hsotg
->gadget
.max_speed
= USB_SPEED_HIGH
;
3897 hsotg
->gadget
.ops
= &s3c_hsotg_gadget_ops
;
3898 hsotg
->gadget
.name
= dev_name(dev
);
3900 /* reset the system */
3902 ret
= clk_prepare_enable(hsotg
->clk
);
3904 dev_err(dev
, "failed to enable otg clk\n");
3911 for (i
= 0; i
< ARRAY_SIZE(hsotg
->supplies
); i
++)
3912 hsotg
->supplies
[i
].supply
= s3c_hsotg_supply_names
[i
];
3914 ret
= devm_regulator_bulk_get(dev
, ARRAY_SIZE(hsotg
->supplies
),
3917 dev_err(dev
, "failed to request supplies: %d\n", ret
);
3921 ret
= regulator_bulk_enable(ARRAY_SIZE(hsotg
->supplies
),
3925 dev_err(dev
, "failed to enable supplies: %d\n", ret
);
3929 /* usb phy enable */
3930 s3c_hsotg_phy_enable(hsotg
);
3933 * Force Device mode before initialization.
3934 * This allows correctly configuring fifo for device mode.
3936 __bic32(hsotg
->regs
+ GUSBCFG
, GUSBCFG_FORCEHOSTMODE
);
3937 __orr32(hsotg
->regs
+ GUSBCFG
, GUSBCFG_FORCEDEVMODE
);
3940 * According to Synopsys databook, this sleep is needed for the force
3941 * device mode to take effect.
3945 s3c_hsotg_corereset(hsotg
);
3946 ret
= s3c_hsotg_hw_cfg(hsotg
);
3948 dev_err(hsotg
->dev
, "Hardware configuration failed: %d\n", ret
);
3952 s3c_hsotg_init(hsotg
);
3954 /* Switch back to default configuration */
3955 __bic32(hsotg
->regs
+ GUSBCFG
, GUSBCFG_FORCEDEVMODE
);
3957 hsotg
->ctrl_buff
= devm_kzalloc(hsotg
->dev
,
3958 DWC2_CTRL_BUFF_SIZE
, GFP_KERNEL
);
3959 if (!hsotg
->ctrl_buff
) {
3960 dev_err(dev
, "failed to allocate ctrl request buff\n");
3965 hsotg
->ep0_buff
= devm_kzalloc(hsotg
->dev
,
3966 DWC2_CTRL_BUFF_SIZE
, GFP_KERNEL
);
3967 if (!hsotg
->ep0_buff
) {
3968 dev_err(dev
, "failed to allocate ctrl reply buff\n");
3973 ret
= devm_request_irq(hsotg
->dev
, irq
, s3c_hsotg_irq
, IRQF_SHARED
,
3974 dev_name(hsotg
->dev
), hsotg
);
3976 s3c_hsotg_phy_disable(hsotg
);
3977 clk_disable_unprepare(hsotg
->clk
);
3978 regulator_bulk_disable(ARRAY_SIZE(hsotg
->supplies
),
3980 dev_err(dev
, "cannot claim IRQ for gadget\n");
3984 /* hsotg->num_of_eps holds number of EPs other than ep0 */
3986 if (hsotg
->num_of_eps
== 0) {
3987 dev_err(dev
, "wrong number of EPs (zero)\n");
3992 /* setup endpoint information */
3994 INIT_LIST_HEAD(&hsotg
->gadget
.ep_list
);
3995 hsotg
->gadget
.ep0
= &hsotg
->eps_out
[0]->ep
;
3997 /* allocate EP0 request */
3999 hsotg
->ctrl_req
= s3c_hsotg_ep_alloc_request(&hsotg
->eps_out
[0]->ep
,
4001 if (!hsotg
->ctrl_req
) {
4002 dev_err(dev
, "failed to allocate ctrl req\n");
4007 /* initialise the endpoints now the core has been initialised */
4008 for (epnum
= 0; epnum
< hsotg
->num_of_eps
; epnum
++) {
4009 if (hsotg
->eps_in
[epnum
])
4010 s3c_hsotg_initep(hsotg
, hsotg
->eps_in
[epnum
],
4012 if (hsotg
->eps_out
[epnum
])
4013 s3c_hsotg_initep(hsotg
, hsotg
->eps_out
[epnum
],
4017 /* disable power and clock */
4018 s3c_hsotg_phy_disable(hsotg
);
4020 ret
= regulator_bulk_disable(ARRAY_SIZE(hsotg
->supplies
),
4023 dev_err(dev
, "failed to disable supplies: %d\n", ret
);
4027 ret
= usb_add_gadget_udc(dev
, &hsotg
->gadget
);
4031 s3c_hsotg_create_debug(hsotg
);
4033 s3c_hsotg_dump(hsotg
);
4038 s3c_hsotg_phy_disable(hsotg
);
4040 clk_disable_unprepare(hsotg
->clk
);
4044 EXPORT_SYMBOL_GPL(dwc2_gadget_init
);
4047 * s3c_hsotg_remove - remove function for hsotg driver
4048 * @pdev: The platform information for the driver
4050 int s3c_hsotg_remove(struct dwc2_hsotg
*hsotg
)
4052 usb_del_gadget_udc(&hsotg
->gadget
);
4053 s3c_hsotg_delete_debug(hsotg
);
4054 clk_disable_unprepare(hsotg
->clk
);
4058 EXPORT_SYMBOL_GPL(s3c_hsotg_remove
);
4060 int s3c_hsotg_suspend(struct dwc2_hsotg
*hsotg
)
4062 unsigned long flags
;
4065 mutex_lock(&hsotg
->init_mutex
);
4067 if (hsotg
->driver
) {
4070 dev_info(hsotg
->dev
, "suspending usb gadget %s\n",
4071 hsotg
->driver
->driver
.name
);
4073 spin_lock_irqsave(&hsotg
->lock
, flags
);
4075 s3c_hsotg_core_disconnect(hsotg
);
4076 s3c_hsotg_disconnect(hsotg
);
4077 hsotg
->gadget
.speed
= USB_SPEED_UNKNOWN
;
4078 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4080 s3c_hsotg_phy_disable(hsotg
);
4082 for (ep
= 0; ep
< hsotg
->num_of_eps
; ep
++) {
4083 if (hsotg
->eps_in
[ep
])
4084 s3c_hsotg_ep_disable(&hsotg
->eps_in
[ep
]->ep
);
4085 if (hsotg
->eps_out
[ep
])
4086 s3c_hsotg_ep_disable(&hsotg
->eps_out
[ep
]->ep
);
4089 ret
= regulator_bulk_disable(ARRAY_SIZE(hsotg
->supplies
),
4091 clk_disable(hsotg
->clk
);
4094 mutex_unlock(&hsotg
->init_mutex
);
4098 EXPORT_SYMBOL_GPL(s3c_hsotg_suspend
);
4100 int s3c_hsotg_resume(struct dwc2_hsotg
*hsotg
)
4102 unsigned long flags
;
4105 mutex_lock(&hsotg
->init_mutex
);
4107 if (hsotg
->driver
) {
4108 dev_info(hsotg
->dev
, "resuming usb gadget %s\n",
4109 hsotg
->driver
->driver
.name
);
4111 clk_enable(hsotg
->clk
);
4112 ret
= regulator_bulk_enable(ARRAY_SIZE(hsotg
->supplies
),
4115 s3c_hsotg_phy_enable(hsotg
);
4117 spin_lock_irqsave(&hsotg
->lock
, flags
);
4118 s3c_hsotg_core_init_disconnected(hsotg
, false);
4120 s3c_hsotg_core_connect(hsotg
);
4121 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
4123 mutex_unlock(&hsotg
->init_mutex
);
4127 EXPORT_SYMBOL_GPL(s3c_hsotg_resume
);