1 // SPDX-License-Identifier: GPL-2.0
3 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
5 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
7 * Authors: Felipe Balbi <balbi@ti.com>,
8 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
11 #include <linux/kernel.h>
12 #include <linux/delay.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/interrupt.h>
19 #include <linux/list.h>
20 #include <linux/dma-mapping.h>
22 #include <linux/usb/ch9.h>
23 #include <linux/usb/gadget.h>
30 #define DWC3_ALIGN_FRAME(d, n) (((d)->frame_number + ((d)->interval * (n))) \
31 & ~((d)->interval - 1))
34 * dwc3_gadget_set_test_mode - enables usb2 test modes
35 * @dwc: pointer to our context structure
36 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
38 * Caller should take care of locking. This function will return 0 on
39 * success or -EINVAL if wrong Test Selector is passed.
41 int dwc3_gadget_set_test_mode(struct dwc3
*dwc
, int mode
)
45 reg
= dwc3_readl(dwc
->regs
, DWC3_DCTL
);
46 reg
&= ~DWC3_DCTL_TSTCTRL_MASK
;
60 dwc3_gadget_dctl_write_safe(dwc
, reg
);
66 * dwc3_gadget_get_link_state - gets current state of usb link
67 * @dwc: pointer to our context structure
69 * Caller should take care of locking. This function will
70 * return the link state on success (>= 0) or -ETIMEDOUT.
72 int dwc3_gadget_get_link_state(struct dwc3
*dwc
)
76 reg
= dwc3_readl(dwc
->regs
, DWC3_DSTS
);
78 return DWC3_DSTS_USBLNKST(reg
);
82 * dwc3_gadget_set_link_state - sets usb link to a particular state
83 * @dwc: pointer to our context structure
84 * @state: the state to put link into
86 * Caller should take care of locking. This function will
87 * return 0 on success or -ETIMEDOUT.
89 int dwc3_gadget_set_link_state(struct dwc3
*dwc
, enum dwc3_link_state state
)
95 * Wait until device controller is ready. Only applies to 1.94a and
98 if (dwc
->revision
>= DWC3_REVISION_194A
) {
100 reg
= dwc3_readl(dwc
->regs
, DWC3_DSTS
);
101 if (reg
& DWC3_DSTS_DCNRD
)
111 reg
= dwc3_readl(dwc
->regs
, DWC3_DCTL
);
112 reg
&= ~DWC3_DCTL_ULSTCHNGREQ_MASK
;
114 /* set no action before sending new link state change */
115 dwc3_writel(dwc
->regs
, DWC3_DCTL
, reg
);
117 /* set requested state */
118 reg
|= DWC3_DCTL_ULSTCHNGREQ(state
);
119 dwc3_writel(dwc
->regs
, DWC3_DCTL
, reg
);
122 * The following code is racy when called from dwc3_gadget_wakeup,
123 * and is not needed, at least on newer versions
125 if (dwc
->revision
>= DWC3_REVISION_194A
)
128 /* wait for a change in DSTS */
131 reg
= dwc3_readl(dwc
->regs
, DWC3_DSTS
);
133 if (DWC3_DSTS_USBLNKST(reg
) == state
)
143 * dwc3_ep_inc_trb - increment a trb index.
144 * @index: Pointer to the TRB index to increment.
146 * The index should never point to the link TRB. After incrementing,
147 * if it is point to the link TRB, wrap around to the beginning. The
148 * link TRB is always at the last TRB entry.
150 static void dwc3_ep_inc_trb(u8
*index
)
153 if (*index
== (DWC3_TRB_NUM
- 1))
158 * dwc3_ep_inc_enq - increment endpoint's enqueue pointer
159 * @dep: The endpoint whose enqueue pointer we're incrementing
161 static void dwc3_ep_inc_enq(struct dwc3_ep
*dep
)
163 dwc3_ep_inc_trb(&dep
->trb_enqueue
);
167 * dwc3_ep_inc_deq - increment endpoint's dequeue pointer
168 * @dep: The endpoint whose enqueue pointer we're incrementing
170 static void dwc3_ep_inc_deq(struct dwc3_ep
*dep
)
172 dwc3_ep_inc_trb(&dep
->trb_dequeue
);
175 static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep
*dep
,
176 struct dwc3_request
*req
, int status
)
178 struct dwc3
*dwc
= dep
->dwc
;
180 list_del(&req
->list
);
182 req
->needs_extra_trb
= false;
184 if (req
->request
.status
== -EINPROGRESS
)
185 req
->request
.status
= status
;
188 usb_gadget_unmap_request_by_dev(dwc
->sysdev
,
189 &req
->request
, req
->direction
);
192 trace_dwc3_gadget_giveback(req
);
195 pm_runtime_put(dwc
->dev
);
199 * dwc3_gadget_giveback - call struct usb_request's ->complete callback
200 * @dep: The endpoint to whom the request belongs to
201 * @req: The request we're giving back
202 * @status: completion code for the request
204 * Must be called with controller's lock held and interrupts disabled. This
205 * function will unmap @req and call its ->complete() callback to notify upper
206 * layers that it has completed.
208 void dwc3_gadget_giveback(struct dwc3_ep
*dep
, struct dwc3_request
*req
,
211 struct dwc3
*dwc
= dep
->dwc
;
213 dwc3_gadget_del_and_unmap_request(dep
, req
, status
);
214 req
->status
= DWC3_REQUEST_STATUS_COMPLETED
;
216 spin_unlock(&dwc
->lock
);
217 usb_gadget_giveback_request(&dep
->endpoint
, &req
->request
);
218 spin_lock(&dwc
->lock
);
222 * dwc3_send_gadget_generic_command - issue a generic command for the controller
223 * @dwc: pointer to the controller context
224 * @cmd: the command to be issued
225 * @param: command parameter
227 * Caller should take care of locking. Issue @cmd with a given @param to @dwc
228 * and wait for its completion.
230 int dwc3_send_gadget_generic_command(struct dwc3
*dwc
, unsigned cmd
, u32 param
)
237 dwc3_writel(dwc
->regs
, DWC3_DGCMDPAR
, param
);
238 dwc3_writel(dwc
->regs
, DWC3_DGCMD
, cmd
| DWC3_DGCMD_CMDACT
);
241 reg
= dwc3_readl(dwc
->regs
, DWC3_DGCMD
);
242 if (!(reg
& DWC3_DGCMD_CMDACT
)) {
243 status
= DWC3_DGCMD_STATUS(reg
);
255 trace_dwc3_gadget_generic_cmd(cmd
, param
, status
);
260 static int __dwc3_gadget_wakeup(struct dwc3
*dwc
);
263 * dwc3_send_gadget_ep_cmd - issue an endpoint command
264 * @dep: the endpoint to which the command is going to be issued
265 * @cmd: the command to be issued
266 * @params: parameters to the command
268 * Caller should handle locking. This function will issue @cmd with given
269 * @params to @dep and wait for its completion.
271 int dwc3_send_gadget_ep_cmd(struct dwc3_ep
*dep
, unsigned cmd
,
272 struct dwc3_gadget_ep_cmd_params
*params
)
274 const struct usb_endpoint_descriptor
*desc
= dep
->endpoint
.desc
;
275 struct dwc3
*dwc
= dep
->dwc
;
277 u32 saved_config
= 0;
284 * When operating in USB 2.0 speeds (HS/FS), if GUSB2PHYCFG.ENBLSLPM or
285 * GUSB2PHYCFG.SUSPHY is set, it must be cleared before issuing an
288 * Save and clear both GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY
289 * settings. Restore them after the command is completed.
291 * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2
293 if (dwc
->gadget
.speed
<= USB_SPEED_HIGH
) {
294 reg
= dwc3_readl(dwc
->regs
, DWC3_GUSB2PHYCFG(0));
295 if (unlikely(reg
& DWC3_GUSB2PHYCFG_SUSPHY
)) {
296 saved_config
|= DWC3_GUSB2PHYCFG_SUSPHY
;
297 reg
&= ~DWC3_GUSB2PHYCFG_SUSPHY
;
300 if (reg
& DWC3_GUSB2PHYCFG_ENBLSLPM
) {
301 saved_config
|= DWC3_GUSB2PHYCFG_ENBLSLPM
;
302 reg
&= ~DWC3_GUSB2PHYCFG_ENBLSLPM
;
306 dwc3_writel(dwc
->regs
, DWC3_GUSB2PHYCFG(0), reg
);
309 if (DWC3_DEPCMD_CMD(cmd
) == DWC3_DEPCMD_STARTTRANSFER
) {
312 needs_wakeup
= (dwc
->link_state
== DWC3_LINK_STATE_U1
||
313 dwc
->link_state
== DWC3_LINK_STATE_U2
||
314 dwc
->link_state
== DWC3_LINK_STATE_U3
);
316 if (unlikely(needs_wakeup
)) {
317 ret
= __dwc3_gadget_wakeup(dwc
);
318 dev_WARN_ONCE(dwc
->dev
, ret
, "wakeup failed --> %d\n",
323 dwc3_writel(dep
->regs
, DWC3_DEPCMDPAR0
, params
->param0
);
324 dwc3_writel(dep
->regs
, DWC3_DEPCMDPAR1
, params
->param1
);
325 dwc3_writel(dep
->regs
, DWC3_DEPCMDPAR2
, params
->param2
);
328 * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're
329 * not relying on XferNotReady, we can make use of a special "No
330 * Response Update Transfer" command where we should clear both CmdAct
333 * With this, we don't need to wait for command completion and can
334 * straight away issue further commands to the endpoint.
336 * NOTICE: We're making an assumption that control endpoints will never
337 * make use of Update Transfer command. This is a safe assumption
338 * because we can never have more than one request at a time with
339 * Control Endpoints. If anybody changes that assumption, this chunk
340 * needs to be updated accordingly.
342 if (DWC3_DEPCMD_CMD(cmd
) == DWC3_DEPCMD_UPDATETRANSFER
&&
343 !usb_endpoint_xfer_isoc(desc
))
344 cmd
&= ~(DWC3_DEPCMD_CMDIOC
| DWC3_DEPCMD_CMDACT
);
346 cmd
|= DWC3_DEPCMD_CMDACT
;
348 dwc3_writel(dep
->regs
, DWC3_DEPCMD
, cmd
);
350 reg
= dwc3_readl(dep
->regs
, DWC3_DEPCMD
);
351 if (!(reg
& DWC3_DEPCMD_CMDACT
)) {
352 cmd_status
= DWC3_DEPCMD_STATUS(reg
);
354 switch (cmd_status
) {
358 case DEPEVT_TRANSFER_NO_RESOURCE
:
361 case DEPEVT_TRANSFER_BUS_EXPIRY
:
363 * SW issues START TRANSFER command to
364 * isochronous ep with future frame interval. If
365 * future interval time has already passed when
366 * core receives the command, it will respond
367 * with an error status of 'Bus Expiry'.
369 * Instead of always returning -EINVAL, let's
370 * give a hint to the gadget driver that this is
371 * the case by returning -EAGAIN.
376 dev_WARN(dwc
->dev
, "UNKNOWN cmd status\n");
385 cmd_status
= -ETIMEDOUT
;
388 trace_dwc3_gadget_ep_cmd(dep
, cmd
, params
, cmd_status
);
390 if (ret
== 0 && DWC3_DEPCMD_CMD(cmd
) == DWC3_DEPCMD_STARTTRANSFER
) {
391 dep
->flags
|= DWC3_EP_TRANSFER_STARTED
;
392 dwc3_gadget_ep_get_transfer_index(dep
);
396 reg
= dwc3_readl(dwc
->regs
, DWC3_GUSB2PHYCFG(0));
398 dwc3_writel(dwc
->regs
, DWC3_GUSB2PHYCFG(0), reg
);
404 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep
*dep
)
406 struct dwc3
*dwc
= dep
->dwc
;
407 struct dwc3_gadget_ep_cmd_params params
;
408 u32 cmd
= DWC3_DEPCMD_CLEARSTALL
;
411 * As of core revision 2.60a the recommended programming model
412 * is to set the ClearPendIN bit when issuing a Clear Stall EP
413 * command for IN endpoints. This is to prevent an issue where
414 * some (non-compliant) hosts may not send ACK TPs for pending
415 * IN transfers due to a mishandled error condition. Synopsys
418 if (dep
->direction
&& (dwc
->revision
>= DWC3_REVISION_260A
) &&
419 (dwc
->gadget
.speed
>= USB_SPEED_SUPER
))
420 cmd
|= DWC3_DEPCMD_CLEARPENDIN
;
422 memset(¶ms
, 0, sizeof(params
));
424 return dwc3_send_gadget_ep_cmd(dep
, cmd
, ¶ms
);
427 static dma_addr_t
dwc3_trb_dma_offset(struct dwc3_ep
*dep
,
428 struct dwc3_trb
*trb
)
430 u32 offset
= (char *) trb
- (char *) dep
->trb_pool
;
432 return dep
->trb_pool_dma
+ offset
;
435 static int dwc3_alloc_trb_pool(struct dwc3_ep
*dep
)
437 struct dwc3
*dwc
= dep
->dwc
;
442 dep
->trb_pool
= dma_alloc_coherent(dwc
->sysdev
,
443 sizeof(struct dwc3_trb
) * DWC3_TRB_NUM
,
444 &dep
->trb_pool_dma
, GFP_KERNEL
);
445 if (!dep
->trb_pool
) {
446 dev_err(dep
->dwc
->dev
, "failed to allocate trb pool for %s\n",
454 static void dwc3_free_trb_pool(struct dwc3_ep
*dep
)
456 struct dwc3
*dwc
= dep
->dwc
;
458 dma_free_coherent(dwc
->sysdev
, sizeof(struct dwc3_trb
) * DWC3_TRB_NUM
,
459 dep
->trb_pool
, dep
->trb_pool_dma
);
461 dep
->trb_pool
= NULL
;
462 dep
->trb_pool_dma
= 0;
465 static int dwc3_gadget_set_xfer_resource(struct dwc3_ep
*dep
)
467 struct dwc3_gadget_ep_cmd_params params
;
469 memset(¶ms
, 0x00, sizeof(params
));
471 params
.param0
= DWC3_DEPXFERCFG_NUM_XFER_RES(1);
473 return dwc3_send_gadget_ep_cmd(dep
, DWC3_DEPCMD_SETTRANSFRESOURCE
,
478 * dwc3_gadget_start_config - configure ep resources
479 * @dep: endpoint that is being enabled
481 * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
482 * completion, it will set Transfer Resource for all available endpoints.
484 * The assignment of transfer resources cannot perfectly follow the data book
485 * due to the fact that the controller driver does not have all knowledge of the
486 * configuration in advance. It is given this information piecemeal by the
487 * composite gadget framework after every SET_CONFIGURATION and
488 * SET_INTERFACE. Trying to follow the databook programming model in this
489 * scenario can cause errors. For two reasons:
491 * 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every
492 * %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is
493 * incorrect in the scenario of multiple interfaces.
495 * 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new
496 * endpoint on alt setting (8.1.6).
498 * The following simplified method is used instead:
500 * All hardware endpoints can be assigned a transfer resource and this setting
501 * will stay persistent until either a core reset or hibernation. So whenever we
502 * do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do
503 * %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are
504 * guaranteed that there are as many transfer resources as endpoints.
506 * This function is called for each endpoint when it is being enabled but is
507 * triggered only when called for EP0-out, which always happens first, and which
508 * should only happen in one of the above conditions.
510 static int dwc3_gadget_start_config(struct dwc3_ep
*dep
)
512 struct dwc3_gadget_ep_cmd_params params
;
521 memset(¶ms
, 0x00, sizeof(params
));
522 cmd
= DWC3_DEPCMD_DEPSTARTCFG
;
525 ret
= dwc3_send_gadget_ep_cmd(dep
, cmd
, ¶ms
);
529 for (i
= 0; i
< DWC3_ENDPOINTS_NUM
; i
++) {
530 struct dwc3_ep
*dep
= dwc
->eps
[i
];
535 ret
= dwc3_gadget_set_xfer_resource(dep
);
543 static int dwc3_gadget_set_ep_config(struct dwc3_ep
*dep
, unsigned int action
)
545 const struct usb_ss_ep_comp_descriptor
*comp_desc
;
546 const struct usb_endpoint_descriptor
*desc
;
547 struct dwc3_gadget_ep_cmd_params params
;
548 struct dwc3
*dwc
= dep
->dwc
;
550 comp_desc
= dep
->endpoint
.comp_desc
;
551 desc
= dep
->endpoint
.desc
;
553 memset(¶ms
, 0x00, sizeof(params
));
555 params
.param0
= DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc
))
556 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc
));
558 /* Burst size is only needed in SuperSpeed mode */
559 if (dwc
->gadget
.speed
>= USB_SPEED_SUPER
) {
560 u32 burst
= dep
->endpoint
.maxburst
;
561 params
.param0
|= DWC3_DEPCFG_BURST_SIZE(burst
- 1);
564 params
.param0
|= action
;
565 if (action
== DWC3_DEPCFG_ACTION_RESTORE
)
566 params
.param2
|= dep
->saved_state
;
568 if (usb_endpoint_xfer_control(desc
))
569 params
.param1
= DWC3_DEPCFG_XFER_COMPLETE_EN
;
571 if (dep
->number
<= 1 || usb_endpoint_xfer_isoc(desc
))
572 params
.param1
|= DWC3_DEPCFG_XFER_NOT_READY_EN
;
574 if (usb_ss_max_streams(comp_desc
) && usb_endpoint_xfer_bulk(desc
)) {
575 params
.param1
|= DWC3_DEPCFG_STREAM_CAPABLE
576 | DWC3_DEPCFG_STREAM_EVENT_EN
;
577 dep
->stream_capable
= true;
580 if (!usb_endpoint_xfer_control(desc
))
581 params
.param1
|= DWC3_DEPCFG_XFER_IN_PROGRESS_EN
;
584 * We are doing 1:1 mapping for endpoints, meaning
585 * Physical Endpoints 2 maps to Logical Endpoint 2 and
586 * so on. We consider the direction bit as part of the physical
587 * endpoint number. So USB endpoint 0x81 is 0x03.
589 params
.param1
|= DWC3_DEPCFG_EP_NUMBER(dep
->number
);
592 * We must use the lower 16 TX FIFOs even though
596 params
.param0
|= DWC3_DEPCFG_FIFO_NUMBER(dep
->number
>> 1);
598 if (desc
->bInterval
) {
599 params
.param1
|= DWC3_DEPCFG_BINTERVAL_M1(desc
->bInterval
- 1);
600 dep
->interval
= 1 << (desc
->bInterval
- 1);
603 return dwc3_send_gadget_ep_cmd(dep
, DWC3_DEPCMD_SETEPCONFIG
, ¶ms
);
607 * __dwc3_gadget_ep_enable - initializes a hw endpoint
608 * @dep: endpoint to be initialized
609 * @action: one of INIT, MODIFY or RESTORE
611 * Caller should take care of locking. Execute all necessary commands to
612 * initialize a HW endpoint so it can be used by a gadget driver.
614 static int __dwc3_gadget_ep_enable(struct dwc3_ep
*dep
, unsigned int action
)
616 const struct usb_endpoint_descriptor
*desc
= dep
->endpoint
.desc
;
617 struct dwc3
*dwc
= dep
->dwc
;
622 if (!(dep
->flags
& DWC3_EP_ENABLED
)) {
623 ret
= dwc3_gadget_start_config(dep
);
628 ret
= dwc3_gadget_set_ep_config(dep
, action
);
632 if (!(dep
->flags
& DWC3_EP_ENABLED
)) {
633 struct dwc3_trb
*trb_st_hw
;
634 struct dwc3_trb
*trb_link
;
636 dep
->type
= usb_endpoint_type(desc
);
637 dep
->flags
|= DWC3_EP_ENABLED
;
639 reg
= dwc3_readl(dwc
->regs
, DWC3_DALEPENA
);
640 reg
|= DWC3_DALEPENA_EP(dep
->number
);
641 dwc3_writel(dwc
->regs
, DWC3_DALEPENA
, reg
);
643 if (usb_endpoint_xfer_control(desc
))
646 /* Initialize the TRB ring */
647 dep
->trb_dequeue
= 0;
648 dep
->trb_enqueue
= 0;
649 memset(dep
->trb_pool
, 0,
650 sizeof(struct dwc3_trb
) * DWC3_TRB_NUM
);
652 /* Link TRB. The HWO bit is never reset */
653 trb_st_hw
= &dep
->trb_pool
[0];
655 trb_link
= &dep
->trb_pool
[DWC3_TRB_NUM
- 1];
656 trb_link
->bpl
= lower_32_bits(dwc3_trb_dma_offset(dep
, trb_st_hw
));
657 trb_link
->bph
= upper_32_bits(dwc3_trb_dma_offset(dep
, trb_st_hw
));
658 trb_link
->ctrl
|= DWC3_TRBCTL_LINK_TRB
;
659 trb_link
->ctrl
|= DWC3_TRB_CTRL_HWO
;
663 * Issue StartTransfer here with no-op TRB so we can always rely on No
664 * Response Update Transfer command.
666 if ((usb_endpoint_xfer_bulk(desc
) && !dep
->stream_capable
) ||
667 usb_endpoint_xfer_int(desc
)) {
668 struct dwc3_gadget_ep_cmd_params params
;
669 struct dwc3_trb
*trb
;
673 memset(¶ms
, 0, sizeof(params
));
674 trb
= &dep
->trb_pool
[0];
675 trb_dma
= dwc3_trb_dma_offset(dep
, trb
);
677 params
.param0
= upper_32_bits(trb_dma
);
678 params
.param1
= lower_32_bits(trb_dma
);
680 cmd
= DWC3_DEPCMD_STARTTRANSFER
;
682 ret
= dwc3_send_gadget_ep_cmd(dep
, cmd
, ¶ms
);
688 trace_dwc3_gadget_ep_enable(dep
);
693 static void dwc3_stop_active_transfer(struct dwc3_ep
*dep
, bool force
,
695 static void dwc3_remove_requests(struct dwc3
*dwc
, struct dwc3_ep
*dep
)
697 struct dwc3_request
*req
;
699 dwc3_stop_active_transfer(dep
, true, false);
701 /* - giveback all requests to gadget driver */
702 while (!list_empty(&dep
->started_list
)) {
703 req
= next_request(&dep
->started_list
);
705 dwc3_gadget_giveback(dep
, req
, -ESHUTDOWN
);
708 while (!list_empty(&dep
->pending_list
)) {
709 req
= next_request(&dep
->pending_list
);
711 dwc3_gadget_giveback(dep
, req
, -ESHUTDOWN
);
714 while (!list_empty(&dep
->cancelled_list
)) {
715 req
= next_request(&dep
->cancelled_list
);
717 dwc3_gadget_giveback(dep
, req
, -ESHUTDOWN
);
722 * __dwc3_gadget_ep_disable - disables a hw endpoint
723 * @dep: the endpoint to disable
725 * This function undoes what __dwc3_gadget_ep_enable did and also removes
726 * requests which are currently being processed by the hardware and those which
727 * are not yet scheduled.
729 * Caller should take care of locking.
731 static int __dwc3_gadget_ep_disable(struct dwc3_ep
*dep
)
733 struct dwc3
*dwc
= dep
->dwc
;
736 trace_dwc3_gadget_ep_disable(dep
);
738 dwc3_remove_requests(dwc
, dep
);
740 /* make sure HW endpoint isn't stalled */
741 if (dep
->flags
& DWC3_EP_STALL
)
742 __dwc3_gadget_ep_set_halt(dep
, 0, false);
744 reg
= dwc3_readl(dwc
->regs
, DWC3_DALEPENA
);
745 reg
&= ~DWC3_DALEPENA_EP(dep
->number
);
746 dwc3_writel(dwc
->regs
, DWC3_DALEPENA
, reg
);
748 dep
->stream_capable
= false;
752 /* Clear out the ep descriptors for non-ep0 */
753 if (dep
->number
> 1) {
754 dep
->endpoint
.comp_desc
= NULL
;
755 dep
->endpoint
.desc
= NULL
;
761 /* -------------------------------------------------------------------------- */
763 static int dwc3_gadget_ep0_enable(struct usb_ep
*ep
,
764 const struct usb_endpoint_descriptor
*desc
)
769 static int dwc3_gadget_ep0_disable(struct usb_ep
*ep
)
774 /* -------------------------------------------------------------------------- */
776 static int dwc3_gadget_ep_enable(struct usb_ep
*ep
,
777 const struct usb_endpoint_descriptor
*desc
)
784 if (!ep
|| !desc
|| desc
->bDescriptorType
!= USB_DT_ENDPOINT
) {
785 pr_debug("dwc3: invalid parameters\n");
789 if (!desc
->wMaxPacketSize
) {
790 pr_debug("dwc3: missing wMaxPacketSize\n");
794 dep
= to_dwc3_ep(ep
);
797 if (dev_WARN_ONCE(dwc
->dev
, dep
->flags
& DWC3_EP_ENABLED
,
798 "%s is already enabled\n",
802 spin_lock_irqsave(&dwc
->lock
, flags
);
803 ret
= __dwc3_gadget_ep_enable(dep
, DWC3_DEPCFG_ACTION_INIT
);
804 spin_unlock_irqrestore(&dwc
->lock
, flags
);
809 static int dwc3_gadget_ep_disable(struct usb_ep
*ep
)
817 pr_debug("dwc3: invalid parameters\n");
821 dep
= to_dwc3_ep(ep
);
824 if (dev_WARN_ONCE(dwc
->dev
, !(dep
->flags
& DWC3_EP_ENABLED
),
825 "%s is already disabled\n",
829 spin_lock_irqsave(&dwc
->lock
, flags
);
830 ret
= __dwc3_gadget_ep_disable(dep
);
831 spin_unlock_irqrestore(&dwc
->lock
, flags
);
836 static struct usb_request
*dwc3_gadget_ep_alloc_request(struct usb_ep
*ep
,
839 struct dwc3_request
*req
;
840 struct dwc3_ep
*dep
= to_dwc3_ep(ep
);
842 req
= kzalloc(sizeof(*req
), gfp_flags
);
846 req
->direction
= dep
->direction
;
847 req
->epnum
= dep
->number
;
849 req
->status
= DWC3_REQUEST_STATUS_UNKNOWN
;
851 trace_dwc3_alloc_request(req
);
853 return &req
->request
;
856 static void dwc3_gadget_ep_free_request(struct usb_ep
*ep
,
857 struct usb_request
*request
)
859 struct dwc3_request
*req
= to_dwc3_request(request
);
861 trace_dwc3_free_request(req
);
866 * dwc3_ep_prev_trb - returns the previous TRB in the ring
867 * @dep: The endpoint with the TRB ring
868 * @index: The index of the current TRB in the ring
870 * Returns the TRB prior to the one pointed to by the index. If the
871 * index is 0, we will wrap backwards, skip the link TRB, and return
872 * the one just before that.
874 static struct dwc3_trb
*dwc3_ep_prev_trb(struct dwc3_ep
*dep
, u8 index
)
879 tmp
= DWC3_TRB_NUM
- 1;
881 return &dep
->trb_pool
[tmp
- 1];
884 static u32
dwc3_calc_trbs_left(struct dwc3_ep
*dep
)
886 struct dwc3_trb
*tmp
;
890 * If enqueue & dequeue are equal than it is either full or empty.
892 * One way to know for sure is if the TRB right before us has HWO bit
893 * set or not. If it has, then we're definitely full and can't fit any
894 * more transfers in our ring.
896 if (dep
->trb_enqueue
== dep
->trb_dequeue
) {
897 tmp
= dwc3_ep_prev_trb(dep
, dep
->trb_enqueue
);
898 if (tmp
->ctrl
& DWC3_TRB_CTRL_HWO
)
901 return DWC3_TRB_NUM
- 1;
904 trbs_left
= dep
->trb_dequeue
- dep
->trb_enqueue
;
905 trbs_left
&= (DWC3_TRB_NUM
- 1);
907 if (dep
->trb_dequeue
< dep
->trb_enqueue
)
913 static void __dwc3_prepare_one_trb(struct dwc3_ep
*dep
, struct dwc3_trb
*trb
,
914 dma_addr_t dma
, unsigned length
, unsigned chain
, unsigned node
,
915 unsigned stream_id
, unsigned short_not_ok
, unsigned no_interrupt
)
917 struct dwc3
*dwc
= dep
->dwc
;
918 struct usb_gadget
*gadget
= &dwc
->gadget
;
919 enum usb_device_speed speed
= gadget
->speed
;
921 trb
->size
= DWC3_TRB_SIZE_LENGTH(length
);
922 trb
->bpl
= lower_32_bits(dma
);
923 trb
->bph
= upper_32_bits(dma
);
925 switch (usb_endpoint_type(dep
->endpoint
.desc
)) {
926 case USB_ENDPOINT_XFER_CONTROL
:
927 trb
->ctrl
= DWC3_TRBCTL_CONTROL_SETUP
;
930 case USB_ENDPOINT_XFER_ISOC
:
932 trb
->ctrl
= DWC3_TRBCTL_ISOCHRONOUS_FIRST
;
935 * USB Specification 2.0 Section 5.9.2 states that: "If
936 * there is only a single transaction in the microframe,
937 * only a DATA0 data packet PID is used. If there are
938 * two transactions per microframe, DATA1 is used for
939 * the first transaction data packet and DATA0 is used
940 * for the second transaction data packet. If there are
941 * three transactions per microframe, DATA2 is used for
942 * the first transaction data packet, DATA1 is used for
943 * the second, and DATA0 is used for the third."
945 * IOW, we should satisfy the following cases:
947 * 1) length <= maxpacket
950 * 2) maxpacket < length <= (2 * maxpacket)
953 * 3) (2 * maxpacket) < length <= (3 * maxpacket)
954 * - DATA2, DATA1, DATA0
956 if (speed
== USB_SPEED_HIGH
) {
957 struct usb_ep
*ep
= &dep
->endpoint
;
958 unsigned int mult
= 2;
959 unsigned int maxp
= usb_endpoint_maxp(ep
->desc
);
961 if (length
<= (2 * maxp
))
967 trb
->size
|= DWC3_TRB_SIZE_PCM1(mult
);
970 trb
->ctrl
= DWC3_TRBCTL_ISOCHRONOUS
;
973 /* always enable Interrupt on Missed ISOC */
974 trb
->ctrl
|= DWC3_TRB_CTRL_ISP_IMI
;
977 case USB_ENDPOINT_XFER_BULK
:
978 case USB_ENDPOINT_XFER_INT
:
979 trb
->ctrl
= DWC3_TRBCTL_NORMAL
;
983 * This is only possible with faulty memory because we
984 * checked it already :)
986 dev_WARN(dwc
->dev
, "Unknown endpoint type %d\n",
987 usb_endpoint_type(dep
->endpoint
.desc
));
991 * Enable Continue on Short Packet
992 * when endpoint is not a stream capable
994 if (usb_endpoint_dir_out(dep
->endpoint
.desc
)) {
995 if (!dep
->stream_capable
)
996 trb
->ctrl
|= DWC3_TRB_CTRL_CSP
;
999 trb
->ctrl
|= DWC3_TRB_CTRL_ISP_IMI
;
1002 if ((!no_interrupt
&& !chain
) ||
1003 (dwc3_calc_trbs_left(dep
) == 1))
1004 trb
->ctrl
|= DWC3_TRB_CTRL_IOC
;
1007 trb
->ctrl
|= DWC3_TRB_CTRL_CHN
;
1009 if (usb_endpoint_xfer_bulk(dep
->endpoint
.desc
) && dep
->stream_capable
)
1010 trb
->ctrl
|= DWC3_TRB_CTRL_SID_SOFN(stream_id
);
1012 trb
->ctrl
|= DWC3_TRB_CTRL_HWO
;
1014 dwc3_ep_inc_enq(dep
);
1016 trace_dwc3_prepare_trb(dep
, trb
);
1020 * dwc3_prepare_one_trb - setup one TRB from one request
1021 * @dep: endpoint for which this request is prepared
1022 * @req: dwc3_request pointer
1023 * @chain: should this TRB be chained to the next?
1024 * @node: only for isochronous endpoints. First TRB needs different type.
1026 static void dwc3_prepare_one_trb(struct dwc3_ep
*dep
,
1027 struct dwc3_request
*req
, unsigned chain
, unsigned node
)
1029 struct dwc3_trb
*trb
;
1030 unsigned int length
;
1032 unsigned stream_id
= req
->request
.stream_id
;
1033 unsigned short_not_ok
= req
->request
.short_not_ok
;
1034 unsigned no_interrupt
= req
->request
.no_interrupt
;
1036 if (req
->request
.num_sgs
> 0) {
1037 length
= sg_dma_len(req
->start_sg
);
1038 dma
= sg_dma_address(req
->start_sg
);
1040 length
= req
->request
.length
;
1041 dma
= req
->request
.dma
;
1044 trb
= &dep
->trb_pool
[dep
->trb_enqueue
];
1047 dwc3_gadget_move_started_request(req
);
1049 req
->trb_dma
= dwc3_trb_dma_offset(dep
, trb
);
1054 __dwc3_prepare_one_trb(dep
, trb
, dma
, length
, chain
, node
,
1055 stream_id
, short_not_ok
, no_interrupt
);
1058 static void dwc3_prepare_one_trb_sg(struct dwc3_ep
*dep
,
1059 struct dwc3_request
*req
)
1061 struct scatterlist
*sg
= req
->start_sg
;
1062 struct scatterlist
*s
;
1065 unsigned int remaining
= req
->request
.num_mapped_sgs
1066 - req
->num_queued_sgs
;
1068 for_each_sg(sg
, s
, remaining
, i
) {
1069 unsigned int length
= req
->request
.length
;
1070 unsigned int maxp
= usb_endpoint_maxp(dep
->endpoint
.desc
);
1071 unsigned int rem
= length
% maxp
;
1072 unsigned chain
= true;
1077 if (rem
&& usb_endpoint_dir_out(dep
->endpoint
.desc
) && !chain
) {
1078 struct dwc3
*dwc
= dep
->dwc
;
1079 struct dwc3_trb
*trb
;
1081 req
->needs_extra_trb
= true;
1083 /* prepare normal TRB */
1084 dwc3_prepare_one_trb(dep
, req
, true, i
);
1086 /* Now prepare one extra TRB to align transfer size */
1087 trb
= &dep
->trb_pool
[dep
->trb_enqueue
];
1089 __dwc3_prepare_one_trb(dep
, trb
, dwc
->bounce_addr
,
1090 maxp
- rem
, false, 1,
1091 req
->request
.stream_id
,
1092 req
->request
.short_not_ok
,
1093 req
->request
.no_interrupt
);
1095 dwc3_prepare_one_trb(dep
, req
, chain
, i
);
1099 * There can be a situation where all sgs in sglist are not
1100 * queued because of insufficient trb number. To handle this
1101 * case, update start_sg to next sg to be queued, so that
1102 * we have free trbs we can continue queuing from where we
1103 * previously stopped
1106 req
->start_sg
= sg_next(s
);
1108 req
->num_queued_sgs
++;
1110 if (!dwc3_calc_trbs_left(dep
))
1115 static void dwc3_prepare_one_trb_linear(struct dwc3_ep
*dep
,
1116 struct dwc3_request
*req
)
1118 unsigned int length
= req
->request
.length
;
1119 unsigned int maxp
= usb_endpoint_maxp(dep
->endpoint
.desc
);
1120 unsigned int rem
= length
% maxp
;
1122 if ((!length
|| rem
) && usb_endpoint_dir_out(dep
->endpoint
.desc
)) {
1123 struct dwc3
*dwc
= dep
->dwc
;
1124 struct dwc3_trb
*trb
;
1126 req
->needs_extra_trb
= true;
1128 /* prepare normal TRB */
1129 dwc3_prepare_one_trb(dep
, req
, true, 0);
1131 /* Now prepare one extra TRB to align transfer size */
1132 trb
= &dep
->trb_pool
[dep
->trb_enqueue
];
1134 __dwc3_prepare_one_trb(dep
, trb
, dwc
->bounce_addr
, maxp
- rem
,
1135 false, 1, req
->request
.stream_id
,
1136 req
->request
.short_not_ok
,
1137 req
->request
.no_interrupt
);
1138 } else if (req
->request
.zero
&& req
->request
.length
&&
1139 (IS_ALIGNED(req
->request
.length
, maxp
))) {
1140 struct dwc3
*dwc
= dep
->dwc
;
1141 struct dwc3_trb
*trb
;
1143 req
->needs_extra_trb
= true;
1145 /* prepare normal TRB */
1146 dwc3_prepare_one_trb(dep
, req
, true, 0);
1148 /* Now prepare one extra TRB to handle ZLP */
1149 trb
= &dep
->trb_pool
[dep
->trb_enqueue
];
1151 __dwc3_prepare_one_trb(dep
, trb
, dwc
->bounce_addr
, 0,
1152 false, 1, req
->request
.stream_id
,
1153 req
->request
.short_not_ok
,
1154 req
->request
.no_interrupt
);
1156 dwc3_prepare_one_trb(dep
, req
, false, 0);
1161 * dwc3_prepare_trbs - setup TRBs from requests
1162 * @dep: endpoint for which requests are being prepared
1164 * The function goes through the requests list and sets up TRBs for the
1165 * transfers. The function returns once there are no more TRBs available or
1166 * it runs out of requests.
1168 static void dwc3_prepare_trbs(struct dwc3_ep
*dep
)
1170 struct dwc3_request
*req
, *n
;
1172 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM
);
1175 * We can get in a situation where there's a request in the started list
1176 * but there weren't enough TRBs to fully kick it in the first time
1177 * around, so it has been waiting for more TRBs to be freed up.
1179 * In that case, we should check if we have a request with pending_sgs
1180 * in the started list and prepare TRBs for that request first,
1181 * otherwise we will prepare TRBs completely out of order and that will
1184 list_for_each_entry(req
, &dep
->started_list
, list
) {
1185 if (req
->num_pending_sgs
> 0)
1186 dwc3_prepare_one_trb_sg(dep
, req
);
1188 if (!dwc3_calc_trbs_left(dep
))
1192 list_for_each_entry_safe(req
, n
, &dep
->pending_list
, list
) {
1193 struct dwc3
*dwc
= dep
->dwc
;
1196 ret
= usb_gadget_map_request_by_dev(dwc
->sysdev
, &req
->request
,
1201 req
->sg
= req
->request
.sg
;
1202 req
->start_sg
= req
->sg
;
1203 req
->num_queued_sgs
= 0;
1204 req
->num_pending_sgs
= req
->request
.num_mapped_sgs
;
1206 if (req
->num_pending_sgs
> 0)
1207 dwc3_prepare_one_trb_sg(dep
, req
);
1209 dwc3_prepare_one_trb_linear(dep
, req
);
1211 if (!dwc3_calc_trbs_left(dep
))
1216 static int __dwc3_gadget_kick_transfer(struct dwc3_ep
*dep
)
1218 struct dwc3_gadget_ep_cmd_params params
;
1219 struct dwc3_request
*req
;
1224 if (!dwc3_calc_trbs_left(dep
))
1227 starting
= !(dep
->flags
& DWC3_EP_TRANSFER_STARTED
);
1229 dwc3_prepare_trbs(dep
);
1230 req
= next_request(&dep
->started_list
);
1232 dep
->flags
|= DWC3_EP_PENDING_REQUEST
;
1236 memset(¶ms
, 0, sizeof(params
));
1239 params
.param0
= upper_32_bits(req
->trb_dma
);
1240 params
.param1
= lower_32_bits(req
->trb_dma
);
1241 cmd
= DWC3_DEPCMD_STARTTRANSFER
;
1243 if (dep
->stream_capable
)
1244 cmd
|= DWC3_DEPCMD_PARAM(req
->request
.stream_id
);
1246 if (usb_endpoint_xfer_isoc(dep
->endpoint
.desc
))
1247 cmd
|= DWC3_DEPCMD_PARAM(dep
->frame_number
);
1249 cmd
= DWC3_DEPCMD_UPDATETRANSFER
|
1250 DWC3_DEPCMD_PARAM(dep
->resource_index
);
1253 ret
= dwc3_send_gadget_ep_cmd(dep
, cmd
, ¶ms
);
1256 * FIXME we need to iterate over the list of requests
1257 * here and stop, unmap, free and del each of the linked
1258 * requests instead of what we do now.
1261 memset(req
->trb
, 0, sizeof(struct dwc3_trb
));
1262 dwc3_gadget_del_and_unmap_request(dep
, req
, ret
);
1269 static int __dwc3_gadget_get_frame(struct dwc3
*dwc
)
1273 reg
= dwc3_readl(dwc
->regs
, DWC3_DSTS
);
1274 return DWC3_DSTS_SOFFN(reg
);
1278 * dwc3_gadget_start_isoc_quirk - workaround invalid frame number
1279 * @dep: isoc endpoint
1281 * This function tests for the correct combination of BIT[15:14] from the 16-bit
1282 * microframe number reported by the XferNotReady event for the future frame
1283 * number to start the isoc transfer.
1285 * In DWC_usb31 version 1.70a-ea06 and prior, for highspeed and fullspeed
1286 * isochronous IN, BIT[15:14] of the 16-bit microframe number reported by the
1287 * XferNotReady event are invalid. The driver uses this number to schedule the
1288 * isochronous transfer and passes it to the START TRANSFER command. Because
1289 * this number is invalid, the command may fail. If BIT[15:14] matches the
1290 * internal 16-bit microframe, the START TRANSFER command will pass and the
1291 * transfer will start at the scheduled time, if it is off by 1, the command
1292 * will still pass, but the transfer will start 2 seconds in the future. For all
1293 * other conditions, the START TRANSFER command will fail with bus-expiry.
1295 * In order to workaround this issue, we can test for the correct combination of
1296 * BIT[15:14] by sending START TRANSFER commands with different values of
1297 * BIT[15:14]: 'b00, 'b01, 'b10, and 'b11. Each combination is 2^14 uframe apart
1298 * (or 2 seconds). 4 seconds into the future will result in a bus-expiry status.
1299 * As the result, within the 4 possible combinations for BIT[15:14], there will
1300 * be 2 successful and 2 failure START COMMAND status. One of the 2 successful
1301 * command status will result in a 2-second delay start. The smaller BIT[15:14]
1302 * value is the correct combination.
1304 * Since there are only 4 outcomes and the results are ordered, we can simply
1305 * test 2 START TRANSFER commands with BIT[15:14] combinations 'b00 and 'b01 to
1306 * deduce the smaller successful combination.
1308 * Let test0 = test status for combination 'b00 and test1 = test status for 'b01
1309 * of BIT[15:14]. The correct combination is as follow:
1311 * if test0 fails and test1 passes, BIT[15:14] is 'b01
1312 * if test0 fails and test1 fails, BIT[15:14] is 'b10
1313 * if test0 passes and test1 fails, BIT[15:14] is 'b11
1314 * if test0 passes and test1 passes, BIT[15:14] is 'b00
1316 * Synopsys STAR 9001202023: Wrong microframe number for isochronous IN
1319 static int dwc3_gadget_start_isoc_quirk(struct dwc3_ep
*dep
)
1325 while (dep
->combo_num
< 2) {
1326 struct dwc3_gadget_ep_cmd_params params
;
1327 u32 test_frame_number
;
1331 * Check if we can start isoc transfer on the next interval or
1332 * 4 uframes in the future with BIT[15:14] as dep->combo_num
1334 test_frame_number
= dep
->frame_number
& 0x3fff;
1335 test_frame_number
|= dep
->combo_num
<< 14;
1336 test_frame_number
+= max_t(u32
, 4, dep
->interval
);
1338 params
.param0
= upper_32_bits(dep
->dwc
->bounce_addr
);
1339 params
.param1
= lower_32_bits(dep
->dwc
->bounce_addr
);
1341 cmd
= DWC3_DEPCMD_STARTTRANSFER
;
1342 cmd
|= DWC3_DEPCMD_PARAM(test_frame_number
);
1343 cmd_status
= dwc3_send_gadget_ep_cmd(dep
, cmd
, ¶ms
);
1345 /* Redo if some other failure beside bus-expiry is received */
1346 if (cmd_status
&& cmd_status
!= -EAGAIN
) {
1347 dep
->start_cmd_status
= 0;
1352 /* Store the first test status */
1353 if (dep
->combo_num
== 0)
1354 dep
->start_cmd_status
= cmd_status
;
1359 * End the transfer if the START_TRANSFER command is successful
1360 * to wait for the next XferNotReady to test the command again
1362 if (cmd_status
== 0) {
1363 dwc3_stop_active_transfer(dep
, true, true);
1368 /* test0 and test1 are both completed at this point */
1369 test0
= (dep
->start_cmd_status
== 0);
1370 test1
= (cmd_status
== 0);
1372 if (!test0
&& test1
)
1374 else if (!test0
&& !test1
)
1376 else if (test0
&& !test1
)
1378 else if (test0
&& test1
)
1381 dep
->frame_number
&= 0x3fff;
1382 dep
->frame_number
|= dep
->combo_num
<< 14;
1383 dep
->frame_number
+= max_t(u32
, 4, dep
->interval
);
1385 /* Reinitialize test variables */
1386 dep
->start_cmd_status
= 0;
1389 return __dwc3_gadget_kick_transfer(dep
);
1392 static int __dwc3_gadget_start_isoc(struct dwc3_ep
*dep
)
1394 struct dwc3
*dwc
= dep
->dwc
;
1398 if (list_empty(&dep
->pending_list
)) {
1399 dep
->flags
|= DWC3_EP_PENDING_REQUEST
;
1403 if (!dwc
->dis_start_transfer_quirk
&& dwc3_is_usb31(dwc
) &&
1404 (dwc
->revision
<= DWC3_USB31_REVISION_160A
||
1405 (dwc
->revision
== DWC3_USB31_REVISION_170A
&&
1406 dwc
->version_type
>= DWC31_VERSIONTYPE_EA01
&&
1407 dwc
->version_type
<= DWC31_VERSIONTYPE_EA06
))) {
1409 if (dwc
->gadget
.speed
<= USB_SPEED_HIGH
&& dep
->direction
)
1410 return dwc3_gadget_start_isoc_quirk(dep
);
1413 for (i
= 0; i
< DWC3_ISOC_MAX_RETRIES
; i
++) {
1414 dep
->frame_number
= DWC3_ALIGN_FRAME(dep
, i
+ 1);
1416 ret
= __dwc3_gadget_kick_transfer(dep
);
1424 static int __dwc3_gadget_ep_queue(struct dwc3_ep
*dep
, struct dwc3_request
*req
)
1426 struct dwc3
*dwc
= dep
->dwc
;
1428 if (!dep
->endpoint
.desc
) {
1429 dev_err(dwc
->dev
, "%s: can't queue to disabled endpoint\n",
1434 if (WARN(req
->dep
!= dep
, "request %pK belongs to '%s'\n",
1435 &req
->request
, req
->dep
->name
))
1438 if (WARN(req
->status
< DWC3_REQUEST_STATUS_COMPLETED
,
1439 "%s: request %pK already in flight\n",
1440 dep
->name
, &req
->request
))
1443 pm_runtime_get(dwc
->dev
);
1445 req
->request
.actual
= 0;
1446 req
->request
.status
= -EINPROGRESS
;
1448 trace_dwc3_ep_queue(req
);
1450 list_add_tail(&req
->list
, &dep
->pending_list
);
1451 req
->status
= DWC3_REQUEST_STATUS_QUEUED
;
1453 /* Start the transfer only after the END_TRANSFER is completed */
1454 if (dep
->flags
& DWC3_EP_END_TRANSFER_PENDING
) {
1455 dep
->flags
|= DWC3_EP_DELAY_START
;
1460 * NOTICE: Isochronous endpoints should NEVER be prestarted. We must
1461 * wait for a XferNotReady event so we will know what's the current
1462 * (micro-)frame number.
1464 * Without this trick, we are very, very likely gonna get Bus Expiry
1465 * errors which will force us issue EndTransfer command.
1467 if (usb_endpoint_xfer_isoc(dep
->endpoint
.desc
)) {
1468 if (!(dep
->flags
& DWC3_EP_PENDING_REQUEST
) &&
1469 !(dep
->flags
& DWC3_EP_TRANSFER_STARTED
))
1472 if ((dep
->flags
& DWC3_EP_PENDING_REQUEST
)) {
1473 if (!(dep
->flags
& DWC3_EP_TRANSFER_STARTED
)) {
1474 return __dwc3_gadget_start_isoc(dep
);
1479 return __dwc3_gadget_kick_transfer(dep
);
1482 static int dwc3_gadget_ep_queue(struct usb_ep
*ep
, struct usb_request
*request
,
1485 struct dwc3_request
*req
= to_dwc3_request(request
);
1486 struct dwc3_ep
*dep
= to_dwc3_ep(ep
);
1487 struct dwc3
*dwc
= dep
->dwc
;
1489 unsigned long flags
;
1493 spin_lock_irqsave(&dwc
->lock
, flags
);
1494 ret
= __dwc3_gadget_ep_queue(dep
, req
);
1495 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1500 static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep
*dep
, struct dwc3_request
*req
)
1505 * If request was already started, this means we had to
1506 * stop the transfer. With that we also need to ignore
1507 * all TRBs used by the request, however TRBs can only
1508 * be modified after completion of END_TRANSFER
1509 * command. So what we do here is that we wait for
1510 * END_TRANSFER completion and only after that, we jump
1511 * over TRBs by clearing HWO and incrementing dequeue
1514 for (i
= 0; i
< req
->num_trbs
; i
++) {
1515 struct dwc3_trb
*trb
;
1518 trb
->ctrl
&= ~DWC3_TRB_CTRL_HWO
;
1519 dwc3_ep_inc_deq(dep
);
1525 static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep
*dep
)
1527 struct dwc3_request
*req
;
1528 struct dwc3_request
*tmp
;
1530 list_for_each_entry_safe(req
, tmp
, &dep
->cancelled_list
, list
) {
1531 dwc3_gadget_ep_skip_trbs(dep
, req
);
1532 dwc3_gadget_giveback(dep
, req
, -ECONNRESET
);
1536 static int dwc3_gadget_ep_dequeue(struct usb_ep
*ep
,
1537 struct usb_request
*request
)
1539 struct dwc3_request
*req
= to_dwc3_request(request
);
1540 struct dwc3_request
*r
= NULL
;
1542 struct dwc3_ep
*dep
= to_dwc3_ep(ep
);
1543 struct dwc3
*dwc
= dep
->dwc
;
1545 unsigned long flags
;
1548 trace_dwc3_ep_dequeue(req
);
1550 spin_lock_irqsave(&dwc
->lock
, flags
);
1552 list_for_each_entry(r
, &dep
->pending_list
, list
) {
1558 list_for_each_entry(r
, &dep
->started_list
, list
) {
1563 /* wait until it is processed */
1564 dwc3_stop_active_transfer(dep
, true, true);
1569 dwc3_gadget_move_cancelled_request(req
);
1570 if (dep
->flags
& DWC3_EP_TRANSFER_STARTED
)
1575 dev_err(dwc
->dev
, "request %pK was not queued to %s\n",
1582 dwc3_gadget_giveback(dep
, req
, -ECONNRESET
);
1585 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1590 int __dwc3_gadget_ep_set_halt(struct dwc3_ep
*dep
, int value
, int protocol
)
1592 struct dwc3_gadget_ep_cmd_params params
;
1593 struct dwc3
*dwc
= dep
->dwc
;
1596 if (usb_endpoint_xfer_isoc(dep
->endpoint
.desc
)) {
1597 dev_err(dwc
->dev
, "%s is of Isochronous type\n", dep
->name
);
1601 memset(¶ms
, 0x00, sizeof(params
));
1604 struct dwc3_trb
*trb
;
1606 unsigned transfer_in_flight
;
1609 if (dep
->number
> 1)
1610 trb
= dwc3_ep_prev_trb(dep
, dep
->trb_enqueue
);
1612 trb
= &dwc
->ep0_trb
[dep
->trb_enqueue
];
1614 transfer_in_flight
= trb
->ctrl
& DWC3_TRB_CTRL_HWO
;
1615 started
= !list_empty(&dep
->started_list
);
1617 if (!protocol
&& ((dep
->direction
&& transfer_in_flight
) ||
1618 (!dep
->direction
&& started
))) {
1622 ret
= dwc3_send_gadget_ep_cmd(dep
, DWC3_DEPCMD_SETSTALL
,
1625 dev_err(dwc
->dev
, "failed to set STALL on %s\n",
1628 dep
->flags
|= DWC3_EP_STALL
;
1631 ret
= dwc3_send_clear_stall_ep_cmd(dep
);
1633 dev_err(dwc
->dev
, "failed to clear STALL on %s\n",
1636 dep
->flags
&= ~(DWC3_EP_STALL
| DWC3_EP_WEDGE
);
1642 static int dwc3_gadget_ep_set_halt(struct usb_ep
*ep
, int value
)
1644 struct dwc3_ep
*dep
= to_dwc3_ep(ep
);
1645 struct dwc3
*dwc
= dep
->dwc
;
1647 unsigned long flags
;
1651 spin_lock_irqsave(&dwc
->lock
, flags
);
1652 ret
= __dwc3_gadget_ep_set_halt(dep
, value
, false);
1653 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1658 static int dwc3_gadget_ep_set_wedge(struct usb_ep
*ep
)
1660 struct dwc3_ep
*dep
= to_dwc3_ep(ep
);
1661 struct dwc3
*dwc
= dep
->dwc
;
1662 unsigned long flags
;
1665 spin_lock_irqsave(&dwc
->lock
, flags
);
1666 dep
->flags
|= DWC3_EP_WEDGE
;
1668 if (dep
->number
== 0 || dep
->number
== 1)
1669 ret
= __dwc3_gadget_ep0_set_halt(ep
, 1);
1671 ret
= __dwc3_gadget_ep_set_halt(dep
, 1, false);
1672 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1677 /* -------------------------------------------------------------------------- */
1679 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc
= {
1680 .bLength
= USB_DT_ENDPOINT_SIZE
,
1681 .bDescriptorType
= USB_DT_ENDPOINT
,
1682 .bmAttributes
= USB_ENDPOINT_XFER_CONTROL
,
1685 static const struct usb_ep_ops dwc3_gadget_ep0_ops
= {
1686 .enable
= dwc3_gadget_ep0_enable
,
1687 .disable
= dwc3_gadget_ep0_disable
,
1688 .alloc_request
= dwc3_gadget_ep_alloc_request
,
1689 .free_request
= dwc3_gadget_ep_free_request
,
1690 .queue
= dwc3_gadget_ep0_queue
,
1691 .dequeue
= dwc3_gadget_ep_dequeue
,
1692 .set_halt
= dwc3_gadget_ep0_set_halt
,
1693 .set_wedge
= dwc3_gadget_ep_set_wedge
,
1696 static const struct usb_ep_ops dwc3_gadget_ep_ops
= {
1697 .enable
= dwc3_gadget_ep_enable
,
1698 .disable
= dwc3_gadget_ep_disable
,
1699 .alloc_request
= dwc3_gadget_ep_alloc_request
,
1700 .free_request
= dwc3_gadget_ep_free_request
,
1701 .queue
= dwc3_gadget_ep_queue
,
1702 .dequeue
= dwc3_gadget_ep_dequeue
,
1703 .set_halt
= dwc3_gadget_ep_set_halt
,
1704 .set_wedge
= dwc3_gadget_ep_set_wedge
,
1707 /* -------------------------------------------------------------------------- */
1709 static int dwc3_gadget_get_frame(struct usb_gadget
*g
)
1711 struct dwc3
*dwc
= gadget_to_dwc(g
);
1713 return __dwc3_gadget_get_frame(dwc
);
1716 static int __dwc3_gadget_wakeup(struct dwc3
*dwc
)
1727 * According to the Databook Remote wakeup request should
1728 * be issued only when the device is in early suspend state.
1730 * We can check that via USB Link State bits in DSTS register.
1732 reg
= dwc3_readl(dwc
->regs
, DWC3_DSTS
);
1734 speed
= reg
& DWC3_DSTS_CONNECTSPD
;
1735 if ((speed
== DWC3_DSTS_SUPERSPEED
) ||
1736 (speed
== DWC3_DSTS_SUPERSPEED_PLUS
))
1739 link_state
= DWC3_DSTS_USBLNKST(reg
);
1741 switch (link_state
) {
1742 case DWC3_LINK_STATE_RX_DET
: /* in HS, means Early Suspend */
1743 case DWC3_LINK_STATE_U3
: /* in HS, means SUSPEND */
1749 ret
= dwc3_gadget_set_link_state(dwc
, DWC3_LINK_STATE_RECOV
);
1751 dev_err(dwc
->dev
, "failed to put link in Recovery\n");
1755 /* Recent versions do this automatically */
1756 if (dwc
->revision
< DWC3_REVISION_194A
) {
1757 /* write zeroes to Link Change Request */
1758 reg
= dwc3_readl(dwc
->regs
, DWC3_DCTL
);
1759 reg
&= ~DWC3_DCTL_ULSTCHNGREQ_MASK
;
1760 dwc3_writel(dwc
->regs
, DWC3_DCTL
, reg
);
1763 /* poll until Link State changes to ON */
1767 reg
= dwc3_readl(dwc
->regs
, DWC3_DSTS
);
1769 /* in HS, means ON */
1770 if (DWC3_DSTS_USBLNKST(reg
) == DWC3_LINK_STATE_U0
)
1774 if (DWC3_DSTS_USBLNKST(reg
) != DWC3_LINK_STATE_U0
) {
1775 dev_err(dwc
->dev
, "failed to send remote wakeup\n");
1782 static int dwc3_gadget_wakeup(struct usb_gadget
*g
)
1784 struct dwc3
*dwc
= gadget_to_dwc(g
);
1785 unsigned long flags
;
1788 spin_lock_irqsave(&dwc
->lock
, flags
);
1789 ret
= __dwc3_gadget_wakeup(dwc
);
1790 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1795 static int dwc3_gadget_set_selfpowered(struct usb_gadget
*g
,
1798 struct dwc3
*dwc
= gadget_to_dwc(g
);
1799 unsigned long flags
;
1801 spin_lock_irqsave(&dwc
->lock
, flags
);
1802 g
->is_selfpowered
= !!is_selfpowered
;
1803 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1808 static int dwc3_gadget_run_stop(struct dwc3
*dwc
, int is_on
, int suspend
)
1813 if (pm_runtime_suspended(dwc
->dev
))
1816 reg
= dwc3_readl(dwc
->regs
, DWC3_DCTL
);
1818 if (dwc
->revision
<= DWC3_REVISION_187A
) {
1819 reg
&= ~DWC3_DCTL_TRGTULST_MASK
;
1820 reg
|= DWC3_DCTL_TRGTULST_RX_DET
;
1823 if (dwc
->revision
>= DWC3_REVISION_194A
)
1824 reg
&= ~DWC3_DCTL_KEEP_CONNECT
;
1825 reg
|= DWC3_DCTL_RUN_STOP
;
1827 if (dwc
->has_hibernation
)
1828 reg
|= DWC3_DCTL_KEEP_CONNECT
;
1830 dwc
->pullups_connected
= true;
1832 reg
&= ~DWC3_DCTL_RUN_STOP
;
1834 if (dwc
->has_hibernation
&& !suspend
)
1835 reg
&= ~DWC3_DCTL_KEEP_CONNECT
;
1837 dwc
->pullups_connected
= false;
1840 dwc3_gadget_dctl_write_safe(dwc
, reg
);
1843 reg
= dwc3_readl(dwc
->regs
, DWC3_DSTS
);
1844 reg
&= DWC3_DSTS_DEVCTRLHLT
;
1845 } while (--timeout
&& !(!is_on
^ !reg
));
1853 static int dwc3_gadget_pullup(struct usb_gadget
*g
, int is_on
)
1855 struct dwc3
*dwc
= gadget_to_dwc(g
);
1856 unsigned long flags
;
1862 * Per databook, when we want to stop the gadget, if a control transfer
1863 * is still in process, complete it and get the core into setup phase.
1865 if (!is_on
&& dwc
->ep0state
!= EP0_SETUP_PHASE
) {
1866 reinit_completion(&dwc
->ep0_in_setup
);
1868 ret
= wait_for_completion_timeout(&dwc
->ep0_in_setup
,
1869 msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT
));
1871 dev_err(dwc
->dev
, "timed out waiting for SETUP phase\n");
1876 spin_lock_irqsave(&dwc
->lock
, flags
);
1877 ret
= dwc3_gadget_run_stop(dwc
, is_on
, false);
1878 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1883 static void dwc3_gadget_enable_irq(struct dwc3
*dwc
)
1887 /* Enable all but Start and End of Frame IRQs */
1888 reg
= (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN
|
1889 DWC3_DEVTEN_EVNTOVERFLOWEN
|
1890 DWC3_DEVTEN_CMDCMPLTEN
|
1891 DWC3_DEVTEN_ERRTICERREN
|
1892 DWC3_DEVTEN_WKUPEVTEN
|
1893 DWC3_DEVTEN_CONNECTDONEEN
|
1894 DWC3_DEVTEN_USBRSTEN
|
1895 DWC3_DEVTEN_DISCONNEVTEN
);
1897 if (dwc
->revision
< DWC3_REVISION_250A
)
1898 reg
|= DWC3_DEVTEN_ULSTCNGEN
;
1900 dwc3_writel(dwc
->regs
, DWC3_DEVTEN
, reg
);
1903 static void dwc3_gadget_disable_irq(struct dwc3
*dwc
)
1905 /* mask all interrupts */
1906 dwc3_writel(dwc
->regs
, DWC3_DEVTEN
, 0x00);
1909 static irqreturn_t
dwc3_interrupt(int irq
, void *_dwc
);
1910 static irqreturn_t
dwc3_thread_interrupt(int irq
, void *_dwc
);
1913 * dwc3_gadget_setup_nump - calculate and initialize NUMP field of %DWC3_DCFG
1914 * @dwc: pointer to our context structure
1916 * The following looks like complex but it's actually very simple. In order to
1917 * calculate the number of packets we can burst at once on OUT transfers, we're
1918 * gonna use RxFIFO size.
1920 * To calculate RxFIFO size we need two numbers:
1921 * MDWIDTH = size, in bits, of the internal memory bus
1922 * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
1924 * Given these two numbers, the formula is simple:
1926 * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
1928 * 24 bytes is for 3x SETUP packets
1929 * 16 bytes is a clock domain crossing tolerance
1931 * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
1933 static void dwc3_gadget_setup_nump(struct dwc3
*dwc
)
1940 ram2_depth
= DWC3_GHWPARAMS7_RAM2_DEPTH(dwc
->hwparams
.hwparams7
);
1941 mdwidth
= DWC3_GHWPARAMS0_MDWIDTH(dwc
->hwparams
.hwparams0
);
1943 nump
= ((ram2_depth
* mdwidth
/ 8) - 24 - 16) / 1024;
1944 nump
= min_t(u32
, nump
, 16);
1947 reg
= dwc3_readl(dwc
->regs
, DWC3_DCFG
);
1948 reg
&= ~DWC3_DCFG_NUMP_MASK
;
1949 reg
|= nump
<< DWC3_DCFG_NUMP_SHIFT
;
1950 dwc3_writel(dwc
->regs
, DWC3_DCFG
, reg
);
1953 static int __dwc3_gadget_start(struct dwc3
*dwc
)
1955 struct dwc3_ep
*dep
;
1960 * Use IMOD if enabled via dwc->imod_interval. Otherwise, if
1961 * the core supports IMOD, disable it.
1963 if (dwc
->imod_interval
) {
1964 dwc3_writel(dwc
->regs
, DWC3_DEV_IMOD(0), dwc
->imod_interval
);
1965 dwc3_writel(dwc
->regs
, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB
);
1966 } else if (dwc3_has_imod(dwc
)) {
1967 dwc3_writel(dwc
->regs
, DWC3_DEV_IMOD(0), 0);
1971 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
1972 * field instead of letting dwc3 itself calculate that automatically.
1974 * This way, we maximize the chances that we'll be able to get several
1975 * bursts of data without going through any sort of endpoint throttling.
1977 reg
= dwc3_readl(dwc
->regs
, DWC3_GRXTHRCFG
);
1978 if (dwc3_is_usb31(dwc
))
1979 reg
&= ~DWC31_GRXTHRCFG_PKTCNTSEL
;
1981 reg
&= ~DWC3_GRXTHRCFG_PKTCNTSEL
;
1983 dwc3_writel(dwc
->regs
, DWC3_GRXTHRCFG
, reg
);
1985 dwc3_gadget_setup_nump(dwc
);
1987 /* Start with SuperSpeed Default */
1988 dwc3_gadget_ep0_desc
.wMaxPacketSize
= cpu_to_le16(512);
1991 ret
= __dwc3_gadget_ep_enable(dep
, DWC3_DEPCFG_ACTION_INIT
);
1993 dev_err(dwc
->dev
, "failed to enable %s\n", dep
->name
);
1998 ret
= __dwc3_gadget_ep_enable(dep
, DWC3_DEPCFG_ACTION_INIT
);
2000 dev_err(dwc
->dev
, "failed to enable %s\n", dep
->name
);
2004 /* begin to receive SETUP packets */
2005 dwc
->ep0state
= EP0_SETUP_PHASE
;
2006 dwc
->link_state
= DWC3_LINK_STATE_SS_DIS
;
2007 dwc3_ep0_out_start(dwc
);
2009 dwc3_gadget_enable_irq(dwc
);
2014 __dwc3_gadget_ep_disable(dwc
->eps
[0]);
2020 static int dwc3_gadget_start(struct usb_gadget
*g
,
2021 struct usb_gadget_driver
*driver
)
2023 struct dwc3
*dwc
= gadget_to_dwc(g
);
2024 unsigned long flags
;
2028 irq
= dwc
->irq_gadget
;
2029 ret
= request_threaded_irq(irq
, dwc3_interrupt
, dwc3_thread_interrupt
,
2030 IRQF_SHARED
, "dwc3", dwc
->ev_buf
);
2032 dev_err(dwc
->dev
, "failed to request irq #%d --> %d\n",
2037 spin_lock_irqsave(&dwc
->lock
, flags
);
2038 if (dwc
->gadget_driver
) {
2039 dev_err(dwc
->dev
, "%s is already bound to %s\n",
2041 dwc
->gadget_driver
->driver
.name
);
2046 dwc
->gadget_driver
= driver
;
2048 if (pm_runtime_active(dwc
->dev
))
2049 __dwc3_gadget_start(dwc
);
2051 spin_unlock_irqrestore(&dwc
->lock
, flags
);
2056 spin_unlock_irqrestore(&dwc
->lock
, flags
);
2063 static void __dwc3_gadget_stop(struct dwc3
*dwc
)
2065 dwc3_gadget_disable_irq(dwc
);
2066 __dwc3_gadget_ep_disable(dwc
->eps
[0]);
2067 __dwc3_gadget_ep_disable(dwc
->eps
[1]);
2070 static int dwc3_gadget_stop(struct usb_gadget
*g
)
2072 struct dwc3
*dwc
= gadget_to_dwc(g
);
2073 unsigned long flags
;
2075 spin_lock_irqsave(&dwc
->lock
, flags
);
2077 if (pm_runtime_suspended(dwc
->dev
))
2080 __dwc3_gadget_stop(dwc
);
2083 dwc
->gadget_driver
= NULL
;
2084 spin_unlock_irqrestore(&dwc
->lock
, flags
);
2086 free_irq(dwc
->irq_gadget
, dwc
->ev_buf
);
2091 static void dwc3_gadget_config_params(struct usb_gadget
*g
,
2092 struct usb_dcd_config_params
*params
)
2094 struct dwc3
*dwc
= gadget_to_dwc(g
);
2096 params
->besl_baseline
= USB_DEFAULT_BESL_UNSPECIFIED
;
2097 params
->besl_deep
= USB_DEFAULT_BESL_UNSPECIFIED
;
2099 /* Recommended BESL */
2100 if (!dwc
->dis_enblslpm_quirk
) {
2102 * If the recommended BESL baseline is 0 or if the BESL deep is
2103 * less than 2, Microsoft's Windows 10 host usb stack will issue
2104 * a usb reset immediately after it receives the extended BOS
2105 * descriptor and the enumeration will fail. To maintain
2106 * compatibility with the Windows' usb stack, let's set the
2107 * recommended BESL baseline to 1 and clamp the BESL deep to be
2110 params
->besl_baseline
= 1;
2111 if (dwc
->is_utmi_l1_suspend
)
2113 clamp_t(u8
, dwc
->hird_threshold
, 2, 15);
2116 /* U1 Device exit Latency */
2117 if (dwc
->dis_u1_entry_quirk
)
2118 params
->bU1devExitLat
= 0;
2120 params
->bU1devExitLat
= DWC3_DEFAULT_U1_DEV_EXIT_LAT
;
2122 /* U2 Device exit Latency */
2123 if (dwc
->dis_u2_entry_quirk
)
2124 params
->bU2DevExitLat
= 0;
2126 params
->bU2DevExitLat
=
2127 cpu_to_le16(DWC3_DEFAULT_U2_DEV_EXIT_LAT
);
2130 static void dwc3_gadget_set_speed(struct usb_gadget
*g
,
2131 enum usb_device_speed speed
)
2133 struct dwc3
*dwc
= gadget_to_dwc(g
);
2134 unsigned long flags
;
2137 spin_lock_irqsave(&dwc
->lock
, flags
);
2138 reg
= dwc3_readl(dwc
->regs
, DWC3_DCFG
);
2139 reg
&= ~(DWC3_DCFG_SPEED_MASK
);
2142 * WORKAROUND: DWC3 revision < 2.20a have an issue
2143 * which would cause metastability state on Run/Stop
2144 * bit if we try to force the IP to USB2-only mode.
2146 * Because of that, we cannot configure the IP to any
2147 * speed other than the SuperSpeed
2151 * STAR#9000525659: Clock Domain Crossing on DCTL in
2154 if (dwc
->revision
< DWC3_REVISION_220A
&&
2155 !dwc
->dis_metastability_quirk
) {
2156 reg
|= DWC3_DCFG_SUPERSPEED
;
2160 reg
|= DWC3_DCFG_LOWSPEED
;
2162 case USB_SPEED_FULL
:
2163 reg
|= DWC3_DCFG_FULLSPEED
;
2165 case USB_SPEED_HIGH
:
2166 reg
|= DWC3_DCFG_HIGHSPEED
;
2168 case USB_SPEED_SUPER
:
2169 reg
|= DWC3_DCFG_SUPERSPEED
;
2171 case USB_SPEED_SUPER_PLUS
:
2172 if (dwc3_is_usb31(dwc
))
2173 reg
|= DWC3_DCFG_SUPERSPEED_PLUS
;
2175 reg
|= DWC3_DCFG_SUPERSPEED
;
2178 dev_err(dwc
->dev
, "invalid speed (%d)\n", speed
);
2180 if (dwc
->revision
& DWC3_REVISION_IS_DWC31
)
2181 reg
|= DWC3_DCFG_SUPERSPEED_PLUS
;
2183 reg
|= DWC3_DCFG_SUPERSPEED
;
2186 dwc3_writel(dwc
->regs
, DWC3_DCFG
, reg
);
2188 spin_unlock_irqrestore(&dwc
->lock
, flags
);
2191 static const struct usb_gadget_ops dwc3_gadget_ops
= {
2192 .get_frame
= dwc3_gadget_get_frame
,
2193 .wakeup
= dwc3_gadget_wakeup
,
2194 .set_selfpowered
= dwc3_gadget_set_selfpowered
,
2195 .pullup
= dwc3_gadget_pullup
,
2196 .udc_start
= dwc3_gadget_start
,
2197 .udc_stop
= dwc3_gadget_stop
,
2198 .udc_set_speed
= dwc3_gadget_set_speed
,
2199 .get_config_params
= dwc3_gadget_config_params
,
2202 /* -------------------------------------------------------------------------- */
2204 static int dwc3_gadget_init_control_endpoint(struct dwc3_ep
*dep
)
2206 struct dwc3
*dwc
= dep
->dwc
;
2208 usb_ep_set_maxpacket_limit(&dep
->endpoint
, 512);
2209 dep
->endpoint
.maxburst
= 1;
2210 dep
->endpoint
.ops
= &dwc3_gadget_ep0_ops
;
2211 if (!dep
->direction
)
2212 dwc
->gadget
.ep0
= &dep
->endpoint
;
2214 dep
->endpoint
.caps
.type_control
= true;
2219 static int dwc3_gadget_init_in_endpoint(struct dwc3_ep
*dep
)
2221 struct dwc3
*dwc
= dep
->dwc
;
2226 mdwidth
= DWC3_MDWIDTH(dwc
->hwparams
.hwparams0
);
2227 /* MDWIDTH is represented in bits, we need it in bytes */
2230 size
= dwc3_readl(dwc
->regs
, DWC3_GTXFIFOSIZ(dep
->number
>> 1));
2231 if (dwc3_is_usb31(dwc
))
2232 size
= DWC31_GTXFIFOSIZ_TXFDEF(size
);
2234 size
= DWC3_GTXFIFOSIZ_TXFDEF(size
);
2236 /* FIFO Depth is in MDWDITH bytes. Multiply */
2239 kbytes
= size
/ 1024;
2244 * FIFO sizes account an extra MDWIDTH * (kbytes + 1) bytes for
2245 * internal overhead. We don't really know how these are used,
2246 * but documentation say it exists.
2248 size
-= mdwidth
* (kbytes
+ 1);
2251 usb_ep_set_maxpacket_limit(&dep
->endpoint
, size
);
2253 dep
->endpoint
.max_streams
= 15;
2254 dep
->endpoint
.ops
= &dwc3_gadget_ep_ops
;
2255 list_add_tail(&dep
->endpoint
.ep_list
,
2256 &dwc
->gadget
.ep_list
);
2257 dep
->endpoint
.caps
.type_iso
= true;
2258 dep
->endpoint
.caps
.type_bulk
= true;
2259 dep
->endpoint
.caps
.type_int
= true;
2261 return dwc3_alloc_trb_pool(dep
);
2264 static int dwc3_gadget_init_out_endpoint(struct dwc3_ep
*dep
)
2266 struct dwc3
*dwc
= dep
->dwc
;
2268 usb_ep_set_maxpacket_limit(&dep
->endpoint
, 1024);
2269 dep
->endpoint
.max_streams
= 15;
2270 dep
->endpoint
.ops
= &dwc3_gadget_ep_ops
;
2271 list_add_tail(&dep
->endpoint
.ep_list
,
2272 &dwc
->gadget
.ep_list
);
2273 dep
->endpoint
.caps
.type_iso
= true;
2274 dep
->endpoint
.caps
.type_bulk
= true;
2275 dep
->endpoint
.caps
.type_int
= true;
2277 return dwc3_alloc_trb_pool(dep
);
2280 static int dwc3_gadget_init_endpoint(struct dwc3
*dwc
, u8 epnum
)
2282 struct dwc3_ep
*dep
;
2283 bool direction
= epnum
& 1;
2285 u8 num
= epnum
>> 1;
2287 dep
= kzalloc(sizeof(*dep
), GFP_KERNEL
);
2292 dep
->number
= epnum
;
2293 dep
->direction
= direction
;
2294 dep
->regs
= dwc
->regs
+ DWC3_DEP_BASE(epnum
);
2295 dwc
->eps
[epnum
] = dep
;
2297 dep
->start_cmd_status
= 0;
2299 snprintf(dep
->name
, sizeof(dep
->name
), "ep%u%s", num
,
2300 direction
? "in" : "out");
2302 dep
->endpoint
.name
= dep
->name
;
2304 if (!(dep
->number
> 1)) {
2305 dep
->endpoint
.desc
= &dwc3_gadget_ep0_desc
;
2306 dep
->endpoint
.comp_desc
= NULL
;
2310 ret
= dwc3_gadget_init_control_endpoint(dep
);
2312 ret
= dwc3_gadget_init_in_endpoint(dep
);
2314 ret
= dwc3_gadget_init_out_endpoint(dep
);
2319 dep
->endpoint
.caps
.dir_in
= direction
;
2320 dep
->endpoint
.caps
.dir_out
= !direction
;
2322 INIT_LIST_HEAD(&dep
->pending_list
);
2323 INIT_LIST_HEAD(&dep
->started_list
);
2324 INIT_LIST_HEAD(&dep
->cancelled_list
);
2329 static int dwc3_gadget_init_endpoints(struct dwc3
*dwc
, u8 total
)
2333 INIT_LIST_HEAD(&dwc
->gadget
.ep_list
);
2335 for (epnum
= 0; epnum
< total
; epnum
++) {
2338 ret
= dwc3_gadget_init_endpoint(dwc
, epnum
);
2346 static void dwc3_gadget_free_endpoints(struct dwc3
*dwc
)
2348 struct dwc3_ep
*dep
;
2351 for (epnum
= 0; epnum
< DWC3_ENDPOINTS_NUM
; epnum
++) {
2352 dep
= dwc
->eps
[epnum
];
2356 * Physical endpoints 0 and 1 are special; they form the
2357 * bi-directional USB endpoint 0.
2359 * For those two physical endpoints, we don't allocate a TRB
2360 * pool nor do we add them the endpoints list. Due to that, we
2361 * shouldn't do these two operations otherwise we would end up
2362 * with all sorts of bugs when removing dwc3.ko.
2364 if (epnum
!= 0 && epnum
!= 1) {
2365 dwc3_free_trb_pool(dep
);
2366 list_del(&dep
->endpoint
.ep_list
);
2373 /* -------------------------------------------------------------------------- */
2375 static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep
*dep
,
2376 struct dwc3_request
*req
, struct dwc3_trb
*trb
,
2377 const struct dwc3_event_depevt
*event
, int status
, int chain
)
2381 dwc3_ep_inc_deq(dep
);
2383 trace_dwc3_complete_trb(dep
, trb
);
2387 * If we're in the middle of series of chained TRBs and we
2388 * receive a short transfer along the way, DWC3 will skip
2389 * through all TRBs including the last TRB in the chain (the
2390 * where CHN bit is zero. DWC3 will also avoid clearing HWO
2391 * bit and SW has to do it manually.
2393 * We're going to do that here to avoid problems of HW trying
2394 * to use bogus TRBs for transfers.
2396 if (chain
&& (trb
->ctrl
& DWC3_TRB_CTRL_HWO
))
2397 trb
->ctrl
&= ~DWC3_TRB_CTRL_HWO
;
2400 * For isochronous transfers, the first TRB in a service interval must
2401 * have the Isoc-First type. Track and report its interval frame number.
2403 if (usb_endpoint_xfer_isoc(dep
->endpoint
.desc
) &&
2404 (trb
->ctrl
& DWC3_TRBCTL_ISOCHRONOUS_FIRST
)) {
2405 unsigned int frame_number
;
2407 frame_number
= DWC3_TRB_CTRL_GET_SID_SOFN(trb
->ctrl
);
2408 frame_number
&= ~(dep
->interval
- 1);
2409 req
->request
.frame_number
= frame_number
;
2413 * If we're dealing with unaligned size OUT transfer, we will be left
2414 * with one TRB pending in the ring. We need to manually clear HWO bit
2418 if (req
->needs_extra_trb
&& !(trb
->ctrl
& DWC3_TRB_CTRL_CHN
)) {
2419 trb
->ctrl
&= ~DWC3_TRB_CTRL_HWO
;
2423 count
= trb
->size
& DWC3_TRB_SIZE_MASK
;
2424 req
->remaining
+= count
;
2426 if ((trb
->ctrl
& DWC3_TRB_CTRL_HWO
) && status
!= -ESHUTDOWN
)
2429 if (event
->status
& DEPEVT_STATUS_SHORT
&& !chain
)
2432 if (event
->status
& DEPEVT_STATUS_IOC
)
2438 static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep
*dep
,
2439 struct dwc3_request
*req
, const struct dwc3_event_depevt
*event
,
2442 struct dwc3_trb
*trb
= &dep
->trb_pool
[dep
->trb_dequeue
];
2443 struct scatterlist
*sg
= req
->sg
;
2444 struct scatterlist
*s
;
2445 unsigned int pending
= req
->num_pending_sgs
;
2449 for_each_sg(sg
, s
, pending
, i
) {
2450 trb
= &dep
->trb_pool
[dep
->trb_dequeue
];
2452 if (trb
->ctrl
& DWC3_TRB_CTRL_HWO
)
2455 req
->sg
= sg_next(s
);
2456 req
->num_pending_sgs
--;
2458 ret
= dwc3_gadget_ep_reclaim_completed_trb(dep
, req
,
2459 trb
, event
, status
, true);
2467 static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep
*dep
,
2468 struct dwc3_request
*req
, const struct dwc3_event_depevt
*event
,
2471 struct dwc3_trb
*trb
= &dep
->trb_pool
[dep
->trb_dequeue
];
2473 return dwc3_gadget_ep_reclaim_completed_trb(dep
, req
, trb
,
2474 event
, status
, false);
2477 static bool dwc3_gadget_ep_request_completed(struct dwc3_request
*req
)
2480 * For OUT direction, host may send less than the setup
2481 * length. Return true for all OUT requests.
2483 if (!req
->direction
)
2486 return req
->request
.actual
== req
->request
.length
;
2489 static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep
*dep
,
2490 const struct dwc3_event_depevt
*event
,
2491 struct dwc3_request
*req
, int status
)
2495 if (req
->num_pending_sgs
)
2496 ret
= dwc3_gadget_ep_reclaim_trb_sg(dep
, req
, event
,
2499 ret
= dwc3_gadget_ep_reclaim_trb_linear(dep
, req
, event
,
2502 if (req
->needs_extra_trb
) {
2503 ret
= dwc3_gadget_ep_reclaim_trb_linear(dep
, req
, event
,
2505 req
->needs_extra_trb
= false;
2508 req
->request
.actual
= req
->request
.length
- req
->remaining
;
2510 if (!dwc3_gadget_ep_request_completed(req
) ||
2511 req
->num_pending_sgs
) {
2512 __dwc3_gadget_kick_transfer(dep
);
2516 dwc3_gadget_giveback(dep
, req
, status
);
2522 static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep
*dep
,
2523 const struct dwc3_event_depevt
*event
, int status
)
2525 struct dwc3_request
*req
;
2526 struct dwc3_request
*tmp
;
2528 list_for_each_entry_safe(req
, tmp
, &dep
->started_list
, list
) {
2531 ret
= dwc3_gadget_ep_cleanup_completed_request(dep
, event
,
2538 static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep
*dep
,
2539 const struct dwc3_event_depevt
*event
)
2541 dep
->frame_number
= event
->parameters
;
2544 static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep
*dep
,
2545 const struct dwc3_event_depevt
*event
)
2547 struct dwc3
*dwc
= dep
->dwc
;
2548 unsigned status
= 0;
2551 dwc3_gadget_endpoint_frame_from_event(dep
, event
);
2553 if (event
->status
& DEPEVT_STATUS_BUSERR
)
2554 status
= -ECONNRESET
;
2556 if (event
->status
& DEPEVT_STATUS_MISSED_ISOC
) {
2559 if (list_empty(&dep
->started_list
))
2563 dwc3_gadget_ep_cleanup_completed_requests(dep
, event
, status
);
2566 dwc3_stop_active_transfer(dep
, true, true);
2567 dep
->flags
= DWC3_EP_ENABLED
;
2571 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
2572 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
2574 if (dwc
->revision
< DWC3_REVISION_183A
) {
2578 for (i
= 0; i
< DWC3_ENDPOINTS_NUM
; i
++) {
2581 if (!(dep
->flags
& DWC3_EP_ENABLED
))
2584 if (!list_empty(&dep
->started_list
))
2588 reg
= dwc3_readl(dwc
->regs
, DWC3_DCTL
);
2590 dwc3_writel(dwc
->regs
, DWC3_DCTL
, reg
);
2596 static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep
*dep
,
2597 const struct dwc3_event_depevt
*event
)
2599 dwc3_gadget_endpoint_frame_from_event(dep
, event
);
2600 (void) __dwc3_gadget_start_isoc(dep
);
2603 static void dwc3_endpoint_interrupt(struct dwc3
*dwc
,
2604 const struct dwc3_event_depevt
*event
)
2606 struct dwc3_ep
*dep
;
2607 u8 epnum
= event
->endpoint_number
;
2610 dep
= dwc
->eps
[epnum
];
2612 if (!(dep
->flags
& DWC3_EP_ENABLED
)) {
2613 if (!(dep
->flags
& DWC3_EP_TRANSFER_STARTED
))
2616 /* Handle only EPCMDCMPLT when EP disabled */
2617 if (event
->endpoint_event
!= DWC3_DEPEVT_EPCMDCMPLT
)
2621 if (epnum
== 0 || epnum
== 1) {
2622 dwc3_ep0_interrupt(dwc
, event
);
2626 switch (event
->endpoint_event
) {
2627 case DWC3_DEPEVT_XFERINPROGRESS
:
2628 dwc3_gadget_endpoint_transfer_in_progress(dep
, event
);
2630 case DWC3_DEPEVT_XFERNOTREADY
:
2631 dwc3_gadget_endpoint_transfer_not_ready(dep
, event
);
2633 case DWC3_DEPEVT_EPCMDCMPLT
:
2634 cmd
= DEPEVT_PARAMETER_CMD(event
->parameters
);
2636 if (cmd
== DWC3_DEPCMD_ENDTRANSFER
) {
2637 dep
->flags
&= ~DWC3_EP_END_TRANSFER_PENDING
;
2638 dep
->flags
&= ~DWC3_EP_TRANSFER_STARTED
;
2639 dwc3_gadget_ep_cleanup_cancelled_requests(dep
);
2640 if ((dep
->flags
& DWC3_EP_DELAY_START
) &&
2641 !usb_endpoint_xfer_isoc(dep
->endpoint
.desc
))
2642 __dwc3_gadget_kick_transfer(dep
);
2644 dep
->flags
&= ~DWC3_EP_DELAY_START
;
2647 case DWC3_DEPEVT_STREAMEVT
:
2648 case DWC3_DEPEVT_XFERCOMPLETE
:
2649 case DWC3_DEPEVT_RXTXFIFOEVT
:
2654 static void dwc3_disconnect_gadget(struct dwc3
*dwc
)
2656 if (dwc
->gadget_driver
&& dwc
->gadget_driver
->disconnect
) {
2657 spin_unlock(&dwc
->lock
);
2658 dwc
->gadget_driver
->disconnect(&dwc
->gadget
);
2659 spin_lock(&dwc
->lock
);
2663 static void dwc3_suspend_gadget(struct dwc3
*dwc
)
2665 if (dwc
->gadget_driver
&& dwc
->gadget_driver
->suspend
) {
2666 spin_unlock(&dwc
->lock
);
2667 dwc
->gadget_driver
->suspend(&dwc
->gadget
);
2668 spin_lock(&dwc
->lock
);
2672 static void dwc3_resume_gadget(struct dwc3
*dwc
)
2674 if (dwc
->gadget_driver
&& dwc
->gadget_driver
->resume
) {
2675 spin_unlock(&dwc
->lock
);
2676 dwc
->gadget_driver
->resume(&dwc
->gadget
);
2677 spin_lock(&dwc
->lock
);
2681 static void dwc3_reset_gadget(struct dwc3
*dwc
)
2683 if (!dwc
->gadget_driver
)
2686 if (dwc
->gadget
.speed
!= USB_SPEED_UNKNOWN
) {
2687 spin_unlock(&dwc
->lock
);
2688 usb_gadget_udc_reset(&dwc
->gadget
, dwc
->gadget_driver
);
2689 spin_lock(&dwc
->lock
);
2693 static void dwc3_stop_active_transfer(struct dwc3_ep
*dep
, bool force
,
2696 struct dwc3_gadget_ep_cmd_params params
;
2700 if (!(dep
->flags
& DWC3_EP_TRANSFER_STARTED
) ||
2701 (dep
->flags
& DWC3_EP_END_TRANSFER_PENDING
))
2705 * NOTICE: We are violating what the Databook says about the
2706 * EndTransfer command. Ideally we would _always_ wait for the
2707 * EndTransfer Command Completion IRQ, but that's causing too
2708 * much trouble synchronizing between us and gadget driver.
2710 * We have discussed this with the IP Provider and it was
2711 * suggested to giveback all requests here.
2713 * Note also that a similar handling was tested by Synopsys
2714 * (thanks a lot Paul) and nothing bad has come out of it.
2715 * In short, what we're doing is issuing EndTransfer with
2716 * CMDIOC bit set and delay kicking transfer until the
2717 * EndTransfer command had completed.
2719 * As of IP version 3.10a of the DWC_usb3 IP, the controller
2720 * supports a mode to work around the above limitation. The
2721 * software can poll the CMDACT bit in the DEPCMD register
2722 * after issuing a EndTransfer command. This mode is enabled
2723 * by writing GUCTL2[14]. This polling is already done in the
2724 * dwc3_send_gadget_ep_cmd() function so if the mode is
2725 * enabled, the EndTransfer command will have completed upon
2726 * returning from this function.
2728 * This mode is NOT available on the DWC_usb31 IP.
2731 cmd
= DWC3_DEPCMD_ENDTRANSFER
;
2732 cmd
|= force
? DWC3_DEPCMD_HIPRI_FORCERM
: 0;
2733 cmd
|= interrupt
? DWC3_DEPCMD_CMDIOC
: 0;
2734 cmd
|= DWC3_DEPCMD_PARAM(dep
->resource_index
);
2735 memset(¶ms
, 0, sizeof(params
));
2736 ret
= dwc3_send_gadget_ep_cmd(dep
, cmd
, ¶ms
);
2738 dep
->resource_index
= 0;
2741 dep
->flags
&= ~DWC3_EP_TRANSFER_STARTED
;
2743 dep
->flags
|= DWC3_EP_END_TRANSFER_PENDING
;
2746 static void dwc3_clear_stall_all_ep(struct dwc3
*dwc
)
2750 for (epnum
= 1; epnum
< DWC3_ENDPOINTS_NUM
; epnum
++) {
2751 struct dwc3_ep
*dep
;
2754 dep
= dwc
->eps
[epnum
];
2758 if (!(dep
->flags
& DWC3_EP_STALL
))
2761 dep
->flags
&= ~DWC3_EP_STALL
;
2763 ret
= dwc3_send_clear_stall_ep_cmd(dep
);
2768 static void dwc3_gadget_disconnect_interrupt(struct dwc3
*dwc
)
2772 dwc3_gadget_set_link_state(dwc
, DWC3_LINK_STATE_RX_DET
);
2774 reg
= dwc3_readl(dwc
->regs
, DWC3_DCTL
);
2775 reg
&= ~DWC3_DCTL_INITU1ENA
;
2776 reg
&= ~DWC3_DCTL_INITU2ENA
;
2777 dwc3_gadget_dctl_write_safe(dwc
, reg
);
2779 dwc3_disconnect_gadget(dwc
);
2781 dwc
->gadget
.speed
= USB_SPEED_UNKNOWN
;
2782 dwc
->setup_packet_pending
= false;
2783 usb_gadget_set_state(&dwc
->gadget
, USB_STATE_NOTATTACHED
);
2785 dwc
->connected
= false;
2788 static void dwc3_gadget_reset_interrupt(struct dwc3
*dwc
)
2792 dwc
->connected
= true;
2795 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2796 * would cause a missing Disconnect Event if there's a
2797 * pending Setup Packet in the FIFO.
2799 * There's no suggested workaround on the official Bug
2800 * report, which states that "unless the driver/application
2801 * is doing any special handling of a disconnect event,
2802 * there is no functional issue".
2804 * Unfortunately, it turns out that we _do_ some special
2805 * handling of a disconnect event, namely complete all
2806 * pending transfers, notify gadget driver of the
2807 * disconnection, and so on.
2809 * Our suggested workaround is to follow the Disconnect
2810 * Event steps here, instead, based on a setup_packet_pending
2811 * flag. Such flag gets set whenever we have a SETUP_PENDING
2812 * status for EP0 TRBs and gets cleared on XferComplete for the
2817 * STAR#9000466709: RTL: Device : Disconnect event not
2818 * generated if setup packet pending in FIFO
2820 if (dwc
->revision
< DWC3_REVISION_188A
) {
2821 if (dwc
->setup_packet_pending
)
2822 dwc3_gadget_disconnect_interrupt(dwc
);
2825 dwc3_reset_gadget(dwc
);
2827 reg
= dwc3_readl(dwc
->regs
, DWC3_DCTL
);
2828 reg
&= ~DWC3_DCTL_TSTCTRL_MASK
;
2829 dwc3_gadget_dctl_write_safe(dwc
, reg
);
2830 dwc
->test_mode
= false;
2831 dwc3_clear_stall_all_ep(dwc
);
2833 /* Reset device address to zero */
2834 reg
= dwc3_readl(dwc
->regs
, DWC3_DCFG
);
2835 reg
&= ~(DWC3_DCFG_DEVADDR_MASK
);
2836 dwc3_writel(dwc
->regs
, DWC3_DCFG
, reg
);
2839 static void dwc3_gadget_conndone_interrupt(struct dwc3
*dwc
)
2841 struct dwc3_ep
*dep
;
2846 reg
= dwc3_readl(dwc
->regs
, DWC3_DSTS
);
2847 speed
= reg
& DWC3_DSTS_CONNECTSPD
;
2851 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2852 * each time on Connect Done.
2854 * Currently we always use the reset value. If any platform
2855 * wants to set this to a different value, we need to add a
2856 * setting and update GCTL.RAMCLKSEL here.
2860 case DWC3_DSTS_SUPERSPEED_PLUS
:
2861 dwc3_gadget_ep0_desc
.wMaxPacketSize
= cpu_to_le16(512);
2862 dwc
->gadget
.ep0
->maxpacket
= 512;
2863 dwc
->gadget
.speed
= USB_SPEED_SUPER_PLUS
;
2865 case DWC3_DSTS_SUPERSPEED
:
2867 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2868 * would cause a missing USB3 Reset event.
2870 * In such situations, we should force a USB3 Reset
2871 * event by calling our dwc3_gadget_reset_interrupt()
2876 * STAR#9000483510: RTL: SS : USB3 reset event may
2877 * not be generated always when the link enters poll
2879 if (dwc
->revision
< DWC3_REVISION_190A
)
2880 dwc3_gadget_reset_interrupt(dwc
);
2882 dwc3_gadget_ep0_desc
.wMaxPacketSize
= cpu_to_le16(512);
2883 dwc
->gadget
.ep0
->maxpacket
= 512;
2884 dwc
->gadget
.speed
= USB_SPEED_SUPER
;
2886 case DWC3_DSTS_HIGHSPEED
:
2887 dwc3_gadget_ep0_desc
.wMaxPacketSize
= cpu_to_le16(64);
2888 dwc
->gadget
.ep0
->maxpacket
= 64;
2889 dwc
->gadget
.speed
= USB_SPEED_HIGH
;
2891 case DWC3_DSTS_FULLSPEED
:
2892 dwc3_gadget_ep0_desc
.wMaxPacketSize
= cpu_to_le16(64);
2893 dwc
->gadget
.ep0
->maxpacket
= 64;
2894 dwc
->gadget
.speed
= USB_SPEED_FULL
;
2896 case DWC3_DSTS_LOWSPEED
:
2897 dwc3_gadget_ep0_desc
.wMaxPacketSize
= cpu_to_le16(8);
2898 dwc
->gadget
.ep0
->maxpacket
= 8;
2899 dwc
->gadget
.speed
= USB_SPEED_LOW
;
2903 dwc
->eps
[1]->endpoint
.maxpacket
= dwc
->gadget
.ep0
->maxpacket
;
2905 /* Enable USB2 LPM Capability */
2907 if ((dwc
->revision
> DWC3_REVISION_194A
) &&
2908 (speed
!= DWC3_DSTS_SUPERSPEED
) &&
2909 (speed
!= DWC3_DSTS_SUPERSPEED_PLUS
)) {
2910 reg
= dwc3_readl(dwc
->regs
, DWC3_DCFG
);
2911 reg
|= DWC3_DCFG_LPM_CAP
;
2912 dwc3_writel(dwc
->regs
, DWC3_DCFG
, reg
);
2914 reg
= dwc3_readl(dwc
->regs
, DWC3_DCTL
);
2915 reg
&= ~(DWC3_DCTL_HIRD_THRES_MASK
| DWC3_DCTL_L1_HIBER_EN
);
2917 reg
|= DWC3_DCTL_HIRD_THRES(dwc
->hird_threshold
|
2918 (dwc
->is_utmi_l1_suspend
<< 4));
2921 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2922 * DCFG.LPMCap is set, core responses with an ACK and the
2923 * BESL value in the LPM token is less than or equal to LPM
2926 WARN_ONCE(dwc
->revision
< DWC3_REVISION_240A
2927 && dwc
->has_lpm_erratum
,
2928 "LPM Erratum not available on dwc3 revisions < 2.40a\n");
2930 if (dwc
->has_lpm_erratum
&& dwc
->revision
>= DWC3_REVISION_240A
)
2931 reg
|= DWC3_DCTL_NYET_THRES(dwc
->lpm_nyet_threshold
);
2933 dwc3_gadget_dctl_write_safe(dwc
, reg
);
2935 reg
= dwc3_readl(dwc
->regs
, DWC3_DCTL
);
2936 reg
&= ~DWC3_DCTL_HIRD_THRES_MASK
;
2937 dwc3_gadget_dctl_write_safe(dwc
, reg
);
2941 ret
= __dwc3_gadget_ep_enable(dep
, DWC3_DEPCFG_ACTION_MODIFY
);
2943 dev_err(dwc
->dev
, "failed to enable %s\n", dep
->name
);
2948 ret
= __dwc3_gadget_ep_enable(dep
, DWC3_DEPCFG_ACTION_MODIFY
);
2950 dev_err(dwc
->dev
, "failed to enable %s\n", dep
->name
);
2955 * Configure PHY via GUSB3PIPECTLn if required.
2957 * Update GTXFIFOSIZn
2959 * In both cases reset values should be sufficient.
2963 static void dwc3_gadget_wakeup_interrupt(struct dwc3
*dwc
)
2966 * TODO take core out of low power mode when that's
2970 if (dwc
->gadget_driver
&& dwc
->gadget_driver
->resume
) {
2971 spin_unlock(&dwc
->lock
);
2972 dwc
->gadget_driver
->resume(&dwc
->gadget
);
2973 spin_lock(&dwc
->lock
);
2977 static void dwc3_gadget_linksts_change_interrupt(struct dwc3
*dwc
,
2978 unsigned int evtinfo
)
2980 enum dwc3_link_state next
= evtinfo
& DWC3_LINK_STATE_MASK
;
2981 unsigned int pwropt
;
2984 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2985 * Hibernation mode enabled which would show up when device detects
2986 * host-initiated U3 exit.
2988 * In that case, device will generate a Link State Change Interrupt
2989 * from U3 to RESUME which is only necessary if Hibernation is
2992 * There are no functional changes due to such spurious event and we
2993 * just need to ignore it.
2997 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
3000 pwropt
= DWC3_GHWPARAMS1_EN_PWROPT(dwc
->hwparams
.hwparams1
);
3001 if ((dwc
->revision
< DWC3_REVISION_250A
) &&
3002 (pwropt
!= DWC3_GHWPARAMS1_EN_PWROPT_HIB
)) {
3003 if ((dwc
->link_state
== DWC3_LINK_STATE_U3
) &&
3004 (next
== DWC3_LINK_STATE_RESUME
)) {
3010 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
3011 * on the link partner, the USB session might do multiple entry/exit
3012 * of low power states before a transfer takes place.
3014 * Due to this problem, we might experience lower throughput. The
3015 * suggested workaround is to disable DCTL[12:9] bits if we're
3016 * transitioning from U1/U2 to U0 and enable those bits again
3017 * after a transfer completes and there are no pending transfers
3018 * on any of the enabled endpoints.
3020 * This is the first half of that workaround.
3024 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
3025 * core send LGO_Ux entering U0
3027 if (dwc
->revision
< DWC3_REVISION_183A
) {
3028 if (next
== DWC3_LINK_STATE_U0
) {
3032 switch (dwc
->link_state
) {
3033 case DWC3_LINK_STATE_U1
:
3034 case DWC3_LINK_STATE_U2
:
3035 reg
= dwc3_readl(dwc
->regs
, DWC3_DCTL
);
3036 u1u2
= reg
& (DWC3_DCTL_INITU2ENA
3037 | DWC3_DCTL_ACCEPTU2ENA
3038 | DWC3_DCTL_INITU1ENA
3039 | DWC3_DCTL_ACCEPTU1ENA
);
3042 dwc
->u1u2
= reg
& u1u2
;
3046 dwc3_gadget_dctl_write_safe(dwc
, reg
);
3056 case DWC3_LINK_STATE_U1
:
3057 if (dwc
->speed
== USB_SPEED_SUPER
)
3058 dwc3_suspend_gadget(dwc
);
3060 case DWC3_LINK_STATE_U2
:
3061 case DWC3_LINK_STATE_U3
:
3062 dwc3_suspend_gadget(dwc
);
3064 case DWC3_LINK_STATE_RESUME
:
3065 dwc3_resume_gadget(dwc
);
3072 dwc
->link_state
= next
;
3075 static void dwc3_gadget_suspend_interrupt(struct dwc3
*dwc
,
3076 unsigned int evtinfo
)
3078 enum dwc3_link_state next
= evtinfo
& DWC3_LINK_STATE_MASK
;
3080 if (dwc
->link_state
!= next
&& next
== DWC3_LINK_STATE_U3
)
3081 dwc3_suspend_gadget(dwc
);
3083 dwc
->link_state
= next
;
3086 static void dwc3_gadget_hibernation_interrupt(struct dwc3
*dwc
,
3087 unsigned int evtinfo
)
3089 unsigned int is_ss
= evtinfo
& BIT(4);
3092 * WORKAROUND: DWC3 revison 2.20a with hibernation support
3093 * have a known issue which can cause USB CV TD.9.23 to fail
3096 * Because of this issue, core could generate bogus hibernation
3097 * events which SW needs to ignore.
3101 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
3102 * Device Fallback from SuperSpeed
3104 if (is_ss
^ (dwc
->speed
== USB_SPEED_SUPER
))
3107 /* enter hibernation here */
3110 static void dwc3_gadget_interrupt(struct dwc3
*dwc
,
3111 const struct dwc3_event_devt
*event
)
3113 switch (event
->type
) {
3114 case DWC3_DEVICE_EVENT_DISCONNECT
:
3115 dwc3_gadget_disconnect_interrupt(dwc
);
3117 case DWC3_DEVICE_EVENT_RESET
:
3118 dwc3_gadget_reset_interrupt(dwc
);
3120 case DWC3_DEVICE_EVENT_CONNECT_DONE
:
3121 dwc3_gadget_conndone_interrupt(dwc
);
3123 case DWC3_DEVICE_EVENT_WAKEUP
:
3124 dwc3_gadget_wakeup_interrupt(dwc
);
3126 case DWC3_DEVICE_EVENT_HIBER_REQ
:
3127 if (dev_WARN_ONCE(dwc
->dev
, !dwc
->has_hibernation
,
3128 "unexpected hibernation event\n"))
3131 dwc3_gadget_hibernation_interrupt(dwc
, event
->event_info
);
3133 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE
:
3134 dwc3_gadget_linksts_change_interrupt(dwc
, event
->event_info
);
3136 case DWC3_DEVICE_EVENT_EOPF
:
3137 /* It changed to be suspend event for version 2.30a and above */
3138 if (dwc
->revision
>= DWC3_REVISION_230A
) {
3140 * Ignore suspend event until the gadget enters into
3141 * USB_STATE_CONFIGURED state.
3143 if (dwc
->gadget
.state
>= USB_STATE_CONFIGURED
)
3144 dwc3_gadget_suspend_interrupt(dwc
,
3148 case DWC3_DEVICE_EVENT_SOF
:
3149 case DWC3_DEVICE_EVENT_ERRATIC_ERROR
:
3150 case DWC3_DEVICE_EVENT_CMD_CMPL
:
3151 case DWC3_DEVICE_EVENT_OVERFLOW
:
3154 dev_WARN(dwc
->dev
, "UNKNOWN IRQ %d\n", event
->type
);
3158 static void dwc3_process_event_entry(struct dwc3
*dwc
,
3159 const union dwc3_event
*event
)
3161 trace_dwc3_event(event
->raw
, dwc
);
3163 if (!event
->type
.is_devspec
)
3164 dwc3_endpoint_interrupt(dwc
, &event
->depevt
);
3165 else if (event
->type
.type
== DWC3_EVENT_TYPE_DEV
)
3166 dwc3_gadget_interrupt(dwc
, &event
->devt
);
3168 dev_err(dwc
->dev
, "UNKNOWN IRQ type %d\n", event
->raw
);
3171 static irqreturn_t
dwc3_process_event_buf(struct dwc3_event_buffer
*evt
)
3173 struct dwc3
*dwc
= evt
->dwc
;
3174 irqreturn_t ret
= IRQ_NONE
;
3180 if (!(evt
->flags
& DWC3_EVENT_PENDING
))
3184 union dwc3_event event
;
3186 event
.raw
= *(u32
*) (evt
->cache
+ evt
->lpos
);
3188 dwc3_process_event_entry(dwc
, &event
);
3191 * FIXME we wrap around correctly to the next entry as
3192 * almost all entries are 4 bytes in size. There is one
3193 * entry which has 12 bytes which is a regular entry
3194 * followed by 8 bytes data. ATM I don't know how
3195 * things are organized if we get next to the a
3196 * boundary so I worry about that once we try to handle
3199 evt
->lpos
= (evt
->lpos
+ 4) % evt
->length
;
3204 evt
->flags
&= ~DWC3_EVENT_PENDING
;
3207 /* Unmask interrupt */
3208 reg
= dwc3_readl(dwc
->regs
, DWC3_GEVNTSIZ(0));
3209 reg
&= ~DWC3_GEVNTSIZ_INTMASK
;
3210 dwc3_writel(dwc
->regs
, DWC3_GEVNTSIZ(0), reg
);
3212 if (dwc
->imod_interval
) {
3213 dwc3_writel(dwc
->regs
, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB
);
3214 dwc3_writel(dwc
->regs
, DWC3_DEV_IMOD(0), dwc
->imod_interval
);
3220 static irqreturn_t
dwc3_thread_interrupt(int irq
, void *_evt
)
3222 struct dwc3_event_buffer
*evt
= _evt
;
3223 struct dwc3
*dwc
= evt
->dwc
;
3224 unsigned long flags
;
3225 irqreturn_t ret
= IRQ_NONE
;
3227 spin_lock_irqsave(&dwc
->lock
, flags
);
3228 ret
= dwc3_process_event_buf(evt
);
3229 spin_unlock_irqrestore(&dwc
->lock
, flags
);
3234 static irqreturn_t
dwc3_check_event_buf(struct dwc3_event_buffer
*evt
)
3236 struct dwc3
*dwc
= evt
->dwc
;
3241 if (pm_runtime_suspended(dwc
->dev
)) {
3242 pm_runtime_get(dwc
->dev
);
3243 disable_irq_nosync(dwc
->irq_gadget
);
3244 dwc
->pending_events
= true;
3249 * With PCIe legacy interrupt, test shows that top-half irq handler can
3250 * be called again after HW interrupt deassertion. Check if bottom-half
3251 * irq event handler completes before caching new event to prevent
3254 if (evt
->flags
& DWC3_EVENT_PENDING
)
3257 count
= dwc3_readl(dwc
->regs
, DWC3_GEVNTCOUNT(0));
3258 count
&= DWC3_GEVNTCOUNT_MASK
;
3263 evt
->flags
|= DWC3_EVENT_PENDING
;
3265 /* Mask interrupt */
3266 reg
= dwc3_readl(dwc
->regs
, DWC3_GEVNTSIZ(0));
3267 reg
|= DWC3_GEVNTSIZ_INTMASK
;
3268 dwc3_writel(dwc
->regs
, DWC3_GEVNTSIZ(0), reg
);
3270 amount
= min(count
, evt
->length
- evt
->lpos
);
3271 memcpy(evt
->cache
+ evt
->lpos
, evt
->buf
+ evt
->lpos
, amount
);
3274 memcpy(evt
->cache
, evt
->buf
, count
- amount
);
3276 dwc3_writel(dwc
->regs
, DWC3_GEVNTCOUNT(0), count
);
3278 return IRQ_WAKE_THREAD
;
3281 static irqreturn_t
dwc3_interrupt(int irq
, void *_evt
)
3283 struct dwc3_event_buffer
*evt
= _evt
;
3285 return dwc3_check_event_buf(evt
);
3288 static int dwc3_gadget_get_irq(struct dwc3
*dwc
)
3290 struct platform_device
*dwc3_pdev
= to_platform_device(dwc
->dev
);
3293 irq
= platform_get_irq_byname_optional(dwc3_pdev
, "peripheral");
3297 if (irq
== -EPROBE_DEFER
)
3300 irq
= platform_get_irq_byname_optional(dwc3_pdev
, "dwc_usb3");
3304 if (irq
== -EPROBE_DEFER
)
3307 irq
= platform_get_irq(dwc3_pdev
, 0);
3319 * dwc3_gadget_init - initializes gadget related registers
3320 * @dwc: pointer to our controller context structure
3322 * Returns 0 on success otherwise negative errno.
3324 int dwc3_gadget_init(struct dwc3
*dwc
)
3329 irq
= dwc3_gadget_get_irq(dwc
);
3335 dwc
->irq_gadget
= irq
;
3337 dwc
->ep0_trb
= dma_alloc_coherent(dwc
->sysdev
,
3338 sizeof(*dwc
->ep0_trb
) * 2,
3339 &dwc
->ep0_trb_addr
, GFP_KERNEL
);
3340 if (!dwc
->ep0_trb
) {
3341 dev_err(dwc
->dev
, "failed to allocate ep0 trb\n");
3346 dwc
->setup_buf
= kzalloc(DWC3_EP0_SETUP_SIZE
, GFP_KERNEL
);
3347 if (!dwc
->setup_buf
) {
3352 dwc
->bounce
= dma_alloc_coherent(dwc
->sysdev
, DWC3_BOUNCE_SIZE
,
3353 &dwc
->bounce_addr
, GFP_KERNEL
);
3359 init_completion(&dwc
->ep0_in_setup
);
3361 dwc
->gadget
.ops
= &dwc3_gadget_ops
;
3362 dwc
->gadget
.speed
= USB_SPEED_UNKNOWN
;
3363 dwc
->gadget
.sg_supported
= true;
3364 dwc
->gadget
.name
= "dwc3-gadget";
3365 dwc
->gadget
.lpm_capable
= true;
3368 * FIXME We might be setting max_speed to <SUPER, however versions
3369 * <2.20a of dwc3 have an issue with metastability (documented
3370 * elsewhere in this driver) which tells us we can't set max speed to
3371 * anything lower than SUPER.
3373 * Because gadget.max_speed is only used by composite.c and function
3374 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
3375 * to happen so we avoid sending SuperSpeed Capability descriptor
3376 * together with our BOS descriptor as that could confuse host into
3377 * thinking we can handle super speed.
3379 * Note that, in fact, we won't even support GetBOS requests when speed
3380 * is less than super speed because we don't have means, yet, to tell
3381 * composite.c that we are USB 2.0 + LPM ECN.
3383 if (dwc
->revision
< DWC3_REVISION_220A
&&
3384 !dwc
->dis_metastability_quirk
)
3385 dev_info(dwc
->dev
, "changing max_speed on rev %08x\n",
3388 dwc
->gadget
.max_speed
= dwc
->maximum_speed
;
3391 * REVISIT: Here we should clear all pending IRQs to be
3392 * sure we're starting from a well known location.
3395 ret
= dwc3_gadget_init_endpoints(dwc
, dwc
->num_eps
);
3399 ret
= usb_add_gadget_udc(dwc
->dev
, &dwc
->gadget
);
3401 dev_err(dwc
->dev
, "failed to register udc\n");
3405 dwc3_gadget_set_speed(&dwc
->gadget
, dwc
->maximum_speed
);
3410 dwc3_gadget_free_endpoints(dwc
);
3413 dma_free_coherent(dwc
->sysdev
, DWC3_BOUNCE_SIZE
, dwc
->bounce
,
3417 kfree(dwc
->setup_buf
);
3420 dma_free_coherent(dwc
->sysdev
, sizeof(*dwc
->ep0_trb
) * 2,
3421 dwc
->ep0_trb
, dwc
->ep0_trb_addr
);
3427 /* -------------------------------------------------------------------------- */
3429 void dwc3_gadget_exit(struct dwc3
*dwc
)
3431 usb_del_gadget_udc(&dwc
->gadget
);
3432 dwc3_gadget_free_endpoints(dwc
);
3433 dma_free_coherent(dwc
->sysdev
, DWC3_BOUNCE_SIZE
, dwc
->bounce
,
3435 kfree(dwc
->setup_buf
);
3436 dma_free_coherent(dwc
->sysdev
, sizeof(*dwc
->ep0_trb
) * 2,
3437 dwc
->ep0_trb
, dwc
->ep0_trb_addr
);
3440 int dwc3_gadget_suspend(struct dwc3
*dwc
)
3442 if (!dwc
->gadget_driver
)
3445 dwc3_gadget_run_stop(dwc
, false, false);
3446 dwc3_disconnect_gadget(dwc
);
3447 __dwc3_gadget_stop(dwc
);
3452 int dwc3_gadget_resume(struct dwc3
*dwc
)
3456 if (!dwc
->gadget_driver
)
3459 ret
= __dwc3_gadget_start(dwc
);
3463 ret
= dwc3_gadget_run_stop(dwc
, true, false);
3470 __dwc3_gadget_stop(dwc
);
3476 void dwc3_gadget_process_pending_events(struct dwc3
*dwc
)
3478 if (dwc
->pending_events
) {
3479 dwc3_interrupt(dwc
->irq_gadget
, dwc
->ev_buf
);
3480 dwc
->pending_events
= false;
3481 enable_irq(dwc
->irq_gadget
);