1 // SPDX-License-Identifier: GPL-2.0
3 * mtu3_gadget.c - MediaTek usb3 DRD peripheral support
5 * Copyright (C) 2016 MediaTek Inc.
7 * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
11 #include "mtu3_trace.h"
13 void mtu3_req_complete(struct mtu3_ep
*mep
,
14 struct usb_request
*req
, int status
)
15 __releases(mep
->mtu
->lock
)
16 __acquires(mep
->mtu
->lock
)
18 struct mtu3_request
*mreq
;
22 mreq
= to_mtu3_request(req
);
23 list_del(&mreq
->list
);
24 if (mreq
->request
.status
== -EINPROGRESS
)
25 mreq
->request
.status
= status
;
30 trace_mtu3_req_complete(mreq
);
31 spin_unlock(&mtu
->lock
);
33 /* ep0 makes use of PIO, needn't unmap it */
35 usb_gadget_unmap_request(&mtu
->g
, req
, mep
->is_in
);
37 dev_dbg(mtu
->dev
, "%s complete req: %p, sts %d, %d/%d\n", mep
->name
,
38 req
, req
->status
, mreq
->request
.actual
, mreq
->request
.length
);
40 usb_gadget_giveback_request(&mep
->ep
, &mreq
->request
);
42 spin_lock(&mtu
->lock
);
46 static void nuke(struct mtu3_ep
*mep
, const int status
)
48 struct mtu3_request
*mreq
= NULL
;
51 if (list_empty(&mep
->req_list
))
54 dev_dbg(mep
->mtu
->dev
, "abort %s's req: sts %d\n", mep
->name
, status
);
60 while (!list_empty(&mep
->req_list
)) {
61 mreq
= list_first_entry(&mep
->req_list
,
62 struct mtu3_request
, list
);
63 mtu3_req_complete(mep
, &mreq
->request
, status
);
67 static int mtu3_ep_enable(struct mtu3_ep
*mep
)
69 const struct usb_endpoint_descriptor
*desc
;
70 const struct usb_ss_ep_comp_descriptor
*comp_desc
;
71 struct mtu3
*mtu
= mep
->mtu
;
79 comp_desc
= mep
->comp_desc
;
80 mep
->type
= usb_endpoint_type(desc
);
81 max_packet
= usb_endpoint_maxp(desc
);
82 mep
->maxp
= max_packet
& GENMASK(10, 0);
84 switch (mtu
->g
.speed
) {
86 case USB_SPEED_SUPER_PLUS
:
87 if (usb_endpoint_xfer_int(desc
) ||
88 usb_endpoint_xfer_isoc(desc
)) {
89 interval
= desc
->bInterval
;
90 interval
= clamp_val(interval
, 1, 16) - 1;
91 if (usb_endpoint_xfer_isoc(desc
) && comp_desc
)
92 mult
= comp_desc
->bmAttributes
;
95 burst
= comp_desc
->bMaxBurst
;
99 if (usb_endpoint_xfer_isoc(desc
) ||
100 usb_endpoint_xfer_int(desc
)) {
101 interval
= desc
->bInterval
;
102 interval
= clamp_val(interval
, 1, 16) - 1;
103 burst
= (max_packet
& GENMASK(12, 11)) >> 11;
107 break; /*others are ignored */
110 dev_dbg(mtu
->dev
, "%s maxp:%d, interval:%d, burst:%d, mult:%d\n",
111 __func__
, mep
->maxp
, interval
, burst
, mult
);
113 mep
->ep
.maxpacket
= mep
->maxp
;
115 mep
->ep
.comp_desc
= comp_desc
;
117 /* slot mainly affects bulk/isoc transfer, so ignore int */
118 mep
->slot
= usb_endpoint_xfer_int(desc
) ? 0 : mtu
->slot
;
120 ret
= mtu3_config_ep(mtu
, mep
, interval
, burst
, mult
);
124 ret
= mtu3_gpd_ring_alloc(mep
);
126 mtu3_deconfig_ep(mtu
, mep
);
135 static int mtu3_ep_disable(struct mtu3_ep
*mep
)
137 struct mtu3
*mtu
= mep
->mtu
;
141 /* abort all pending requests */
142 nuke(mep
, -ESHUTDOWN
);
143 mtu3_deconfig_ep(mtu
, mep
);
144 mtu3_gpd_ring_free(mep
);
148 mep
->comp_desc
= NULL
;
155 static int mtu3_gadget_ep_enable(struct usb_ep
*ep
,
156 const struct usb_endpoint_descriptor
*desc
)
163 if (!ep
|| !desc
|| desc
->bDescriptorType
!= USB_DT_ENDPOINT
) {
164 pr_debug("%s invalid parameters\n", __func__
);
168 if (!desc
->wMaxPacketSize
) {
169 pr_debug("%s missing wMaxPacketSize\n", __func__
);
172 mep
= to_mtu3_ep(ep
);
175 /* check ep number and direction against endpoint */
176 if (usb_endpoint_num(desc
) != mep
->epnum
)
179 if (!!usb_endpoint_dir_in(desc
) ^ !!mep
->is_in
)
182 dev_dbg(mtu
->dev
, "%s %s\n", __func__
, ep
->name
);
184 if (mep
->flags
& MTU3_EP_ENABLED
) {
185 dev_WARN_ONCE(mtu
->dev
, true, "%s is already enabled\n",
190 spin_lock_irqsave(&mtu
->lock
, flags
);
192 mep
->comp_desc
= ep
->comp_desc
;
194 ret
= mtu3_ep_enable(mep
);
200 mep
->flags
|= MTU3_EP_ENABLED
;
204 spin_unlock_irqrestore(&mtu
->lock
, flags
);
206 dev_dbg(mtu
->dev
, "%s active_ep=%d\n", __func__
, mtu
->active_ep
);
207 trace_mtu3_gadget_ep_enable(mep
);
212 static int mtu3_gadget_ep_disable(struct usb_ep
*ep
)
214 struct mtu3_ep
*mep
= to_mtu3_ep(ep
);
215 struct mtu3
*mtu
= mep
->mtu
;
218 dev_dbg(mtu
->dev
, "%s %s\n", __func__
, mep
->name
);
219 trace_mtu3_gadget_ep_disable(mep
);
221 if (!(mep
->flags
& MTU3_EP_ENABLED
)) {
222 dev_warn(mtu
->dev
, "%s is already disabled\n", mep
->name
);
226 spin_lock_irqsave(&mtu
->lock
, flags
);
227 mtu3_ep_disable(mep
);
228 mep
->flags
&= ~MTU3_EP_ENABLED
;
230 spin_unlock_irqrestore(&(mtu
->lock
), flags
);
232 dev_dbg(mtu
->dev
, "%s active_ep=%d, mtu3 is_active=%d\n",
233 __func__
, mtu
->active_ep
, mtu
->is_active
);
238 struct usb_request
*mtu3_alloc_request(struct usb_ep
*ep
, gfp_t gfp_flags
)
240 struct mtu3_ep
*mep
= to_mtu3_ep(ep
);
241 struct mtu3_request
*mreq
;
243 mreq
= kzalloc(sizeof(*mreq
), gfp_flags
);
247 mreq
->request
.dma
= DMA_ADDR_INVALID
;
248 mreq
->epnum
= mep
->epnum
;
250 trace_mtu3_alloc_request(mreq
);
252 return &mreq
->request
;
255 void mtu3_free_request(struct usb_ep
*ep
, struct usb_request
*req
)
257 struct mtu3_request
*mreq
= to_mtu3_request(req
);
259 trace_mtu3_free_request(mreq
);
263 static int mtu3_gadget_queue(struct usb_ep
*ep
,
264 struct usb_request
*req
, gfp_t gfp_flags
)
267 struct mtu3_request
*mreq
;
278 mep
= to_mtu3_ep(ep
);
280 mreq
= to_mtu3_request(req
);
283 if (mreq
->mep
!= mep
)
286 dev_dbg(mtu
->dev
, "%s %s EP%d(%s), req=%p, maxp=%d, len#%d\n",
287 __func__
, mep
->is_in
? "TX" : "RX", mreq
->epnum
, ep
->name
,
288 mreq
, ep
->maxpacket
, mreq
->request
.length
);
290 if (req
->length
> GPD_BUF_SIZE
||
291 (mtu
->gen2cp
&& req
->length
> GPD_BUF_SIZE_EL
)) {
293 "req length > supported MAX:%d requested:%d\n",
294 mtu
->gen2cp
? GPD_BUF_SIZE_EL
: GPD_BUF_SIZE
,
299 /* don't queue if the ep is down */
301 dev_dbg(mtu
->dev
, "req=%p queued to %s while it's disabled\n",
306 mreq
->request
.actual
= 0;
307 mreq
->request
.status
= -EINPROGRESS
;
309 ret
= usb_gadget_map_request(&mtu
->g
, req
, mep
->is_in
);
311 dev_err(mtu
->dev
, "dma mapping failed\n");
315 spin_lock_irqsave(&mtu
->lock
, flags
);
317 if (mtu3_prepare_transfer(mep
)) {
322 list_add_tail(&mreq
->list
, &mep
->req_list
);
323 mtu3_insert_gpd(mep
, mreq
);
324 mtu3_qmu_resume(mep
);
327 spin_unlock_irqrestore(&mtu
->lock
, flags
);
328 trace_mtu3_gadget_queue(mreq
);
333 static int mtu3_gadget_dequeue(struct usb_ep
*ep
, struct usb_request
*req
)
335 struct mtu3_ep
*mep
= to_mtu3_ep(ep
);
336 struct mtu3_request
*mreq
= to_mtu3_request(req
);
337 struct mtu3_request
*r
;
340 struct mtu3
*mtu
= mep
->mtu
;
342 if (!ep
|| !req
|| mreq
->mep
!= mep
)
345 dev_dbg(mtu
->dev
, "%s : req=%p\n", __func__
, req
);
346 trace_mtu3_gadget_dequeue(mreq
);
348 spin_lock_irqsave(&mtu
->lock
, flags
);
350 list_for_each_entry(r
, &mep
->req_list
, list
) {
355 dev_dbg(mtu
->dev
, "req=%p not queued to %s\n", req
, ep
->name
);
360 mtu3_qmu_flush(mep
); /* REVISIT: set BPS ?? */
361 mtu3_req_complete(mep
, req
, -ECONNRESET
);
365 spin_unlock_irqrestore(&mtu
->lock
, flags
);
371 * Set or clear the halt bit of an EP.
372 * A halted EP won't TX/RX any data but will queue requests.
374 static int mtu3_gadget_ep_set_halt(struct usb_ep
*ep
, int value
)
376 struct mtu3_ep
*mep
= to_mtu3_ep(ep
);
377 struct mtu3
*mtu
= mep
->mtu
;
378 struct mtu3_request
*mreq
;
385 dev_dbg(mtu
->dev
, "%s : %s...", __func__
, ep
->name
);
387 spin_lock_irqsave(&mtu
->lock
, flags
);
389 if (mep
->type
== USB_ENDPOINT_XFER_ISOC
) {
394 mreq
= next_request(mep
);
397 * If there is not request for TX-EP, QMU will not transfer
398 * data to TX-FIFO, so no need check whether TX-FIFO
399 * holds bytes or not here
402 dev_dbg(mtu
->dev
, "req in progress, cannot halt %s\n",
411 dev_dbg(mtu
->dev
, "%s %s stall\n", ep
->name
, value
? "set" : "clear");
413 mtu3_ep_stall_set(mep
, value
);
416 spin_unlock_irqrestore(&mtu
->lock
, flags
);
417 trace_mtu3_gadget_ep_set_halt(mep
);
422 /* Sets the halt feature with the clear requests ignored */
423 static int mtu3_gadget_ep_set_wedge(struct usb_ep
*ep
)
425 struct mtu3_ep
*mep
= to_mtu3_ep(ep
);
432 return usb_ep_set_halt(ep
);
435 static const struct usb_ep_ops mtu3_ep_ops
= {
436 .enable
= mtu3_gadget_ep_enable
,
437 .disable
= mtu3_gadget_ep_disable
,
438 .alloc_request
= mtu3_alloc_request
,
439 .free_request
= mtu3_free_request
,
440 .queue
= mtu3_gadget_queue
,
441 .dequeue
= mtu3_gadget_dequeue
,
442 .set_halt
= mtu3_gadget_ep_set_halt
,
443 .set_wedge
= mtu3_gadget_ep_set_wedge
,
446 static int mtu3_gadget_get_frame(struct usb_gadget
*gadget
)
448 struct mtu3
*mtu
= gadget_to_mtu3(gadget
);
450 return (int)mtu3_readl(mtu
->mac_base
, U3D_USB20_FRAME_NUM
);
453 static int mtu3_gadget_wakeup(struct usb_gadget
*gadget
)
455 struct mtu3
*mtu
= gadget_to_mtu3(gadget
);
458 dev_dbg(mtu
->dev
, "%s\n", __func__
);
460 /* remote wakeup feature is not enabled by host */
461 if (!mtu
->may_wakeup
)
464 spin_lock_irqsave(&mtu
->lock
, flags
);
465 if (mtu
->g
.speed
>= USB_SPEED_SUPER
) {
466 mtu3_setbits(mtu
->mac_base
, U3D_LINK_POWER_CONTROL
, UX_EXIT
);
468 mtu3_setbits(mtu
->mac_base
, U3D_POWER_MANAGEMENT
, RESUME
);
469 spin_unlock_irqrestore(&mtu
->lock
, flags
);
470 usleep_range(10000, 11000);
471 spin_lock_irqsave(&mtu
->lock
, flags
);
472 mtu3_clrbits(mtu
->mac_base
, U3D_POWER_MANAGEMENT
, RESUME
);
474 spin_unlock_irqrestore(&mtu
->lock
, flags
);
478 static int mtu3_gadget_set_self_powered(struct usb_gadget
*gadget
,
481 struct mtu3
*mtu
= gadget_to_mtu3(gadget
);
483 mtu
->is_self_powered
= !!is_selfpowered
;
487 static int mtu3_gadget_pullup(struct usb_gadget
*gadget
, int is_on
)
489 struct mtu3
*mtu
= gadget_to_mtu3(gadget
);
492 dev_dbg(mtu
->dev
, "%s (%s) for %sactive device\n", __func__
,
493 is_on
? "on" : "off", mtu
->is_active
? "" : "in");
495 /* we'd rather not pullup unless the device is active. */
496 spin_lock_irqsave(&mtu
->lock
, flags
);
499 if (!mtu
->is_active
) {
500 /* save it for mtu3_start() to process the request */
501 mtu
->softconnect
= is_on
;
502 } else if (is_on
!= mtu
->softconnect
) {
503 mtu
->softconnect
= is_on
;
504 mtu3_dev_on_off(mtu
, is_on
);
507 spin_unlock_irqrestore(&mtu
->lock
, flags
);
512 static int mtu3_gadget_start(struct usb_gadget
*gadget
,
513 struct usb_gadget_driver
*driver
)
515 struct mtu3
*mtu
= gadget_to_mtu3(gadget
);
518 if (mtu
->gadget_driver
) {
519 dev_err(mtu
->dev
, "%s is already bound to %s\n",
520 mtu
->g
.name
, mtu
->gadget_driver
->driver
.name
);
524 dev_dbg(mtu
->dev
, "bind driver %s\n", driver
->function
);
526 spin_lock_irqsave(&mtu
->lock
, flags
);
528 mtu
->softconnect
= 0;
529 mtu
->gadget_driver
= driver
;
531 if (mtu
->ssusb
->dr_mode
== USB_DR_MODE_PERIPHERAL
)
534 spin_unlock_irqrestore(&mtu
->lock
, flags
);
539 static void stop_activity(struct mtu3
*mtu
)
541 struct usb_gadget_driver
*driver
= mtu
->gadget_driver
;
544 /* don't disconnect if it's not connected */
545 if (mtu
->g
.speed
== USB_SPEED_UNKNOWN
)
548 mtu
->g
.speed
= USB_SPEED_UNKNOWN
;
550 /* deactivate the hardware */
551 if (mtu
->softconnect
) {
552 mtu
->softconnect
= 0;
553 mtu3_dev_on_off(mtu
, 0);
557 * killing any outstanding requests will quiesce the driver;
558 * then report disconnect
560 nuke(mtu
->ep0
, -ESHUTDOWN
);
561 for (i
= 1; i
< mtu
->num_eps
; i
++) {
562 nuke(mtu
->in_eps
+ i
, -ESHUTDOWN
);
563 nuke(mtu
->out_eps
+ i
, -ESHUTDOWN
);
567 spin_unlock(&mtu
->lock
);
568 driver
->disconnect(&mtu
->g
);
569 spin_lock(&mtu
->lock
);
573 static int mtu3_gadget_stop(struct usb_gadget
*g
)
575 struct mtu3
*mtu
= gadget_to_mtu3(g
);
578 dev_dbg(mtu
->dev
, "%s\n", __func__
);
580 spin_lock_irqsave(&mtu
->lock
, flags
);
583 mtu
->gadget_driver
= NULL
;
585 if (mtu
->ssusb
->dr_mode
== USB_DR_MODE_PERIPHERAL
)
588 spin_unlock_irqrestore(&mtu
->lock
, flags
);
593 static const struct usb_gadget_ops mtu3_gadget_ops
= {
594 .get_frame
= mtu3_gadget_get_frame
,
595 .wakeup
= mtu3_gadget_wakeup
,
596 .set_selfpowered
= mtu3_gadget_set_self_powered
,
597 .pullup
= mtu3_gadget_pullup
,
598 .udc_start
= mtu3_gadget_start
,
599 .udc_stop
= mtu3_gadget_stop
,
602 static void mtu3_state_reset(struct mtu3
*mtu
)
605 mtu
->ep0_state
= MU3D_EP0_STATE_SETUP
;
609 mtu
->delayed_status
= false;
610 mtu
->test_mode
= false;
613 static void init_hw_ep(struct mtu3
*mtu
, struct mtu3_ep
*mep
,
614 u32 epnum
, u32 is_in
)
620 INIT_LIST_HEAD(&mep
->req_list
);
622 sprintf(mep
->name
, "ep%d%s", epnum
,
623 !epnum
? "" : (is_in
? "in" : "out"));
625 mep
->ep
.name
= mep
->name
;
626 INIT_LIST_HEAD(&mep
->ep
.ep_list
);
628 /* initialize maxpacket as SS */
630 usb_ep_set_maxpacket_limit(&mep
->ep
, 512);
631 mep
->ep
.caps
.type_control
= true;
632 mep
->ep
.ops
= &mtu3_ep0_ops
;
633 mtu
->g
.ep0
= &mep
->ep
;
635 usb_ep_set_maxpacket_limit(&mep
->ep
, 1024);
636 mep
->ep
.caps
.type_iso
= true;
637 mep
->ep
.caps
.type_bulk
= true;
638 mep
->ep
.caps
.type_int
= true;
639 mep
->ep
.ops
= &mtu3_ep_ops
;
640 list_add_tail(&mep
->ep
.ep_list
, &mtu
->g
.ep_list
);
643 dev_dbg(mtu
->dev
, "%s, name=%s, maxp=%d\n", __func__
, mep
->ep
.name
,
647 mep
->ep
.caps
.dir_in
= true;
648 mep
->ep
.caps
.dir_out
= true;
650 mep
->ep
.caps
.dir_in
= true;
652 mep
->ep
.caps
.dir_out
= true;
656 static void mtu3_gadget_init_eps(struct mtu3
*mtu
)
660 /* initialize endpoint list just once */
661 INIT_LIST_HEAD(&(mtu
->g
.ep_list
));
663 dev_dbg(mtu
->dev
, "%s num_eps(1 for a pair of tx&rx ep)=%d\n",
664 __func__
, mtu
->num_eps
);
666 init_hw_ep(mtu
, mtu
->ep0
, 0, 0);
667 for (epnum
= 1; epnum
< mtu
->num_eps
; epnum
++) {
668 init_hw_ep(mtu
, mtu
->in_eps
+ epnum
, epnum
, 1);
669 init_hw_ep(mtu
, mtu
->out_eps
+ epnum
, epnum
, 0);
673 int mtu3_gadget_setup(struct mtu3
*mtu
)
677 mtu
->g
.ops
= &mtu3_gadget_ops
;
678 mtu
->g
.max_speed
= mtu
->max_speed
;
679 mtu
->g
.speed
= USB_SPEED_UNKNOWN
;
680 mtu
->g
.sg_supported
= 0;
681 mtu
->g
.name
= MTU3_DRIVER_NAME
;
683 mtu
->delayed_status
= false;
685 mtu3_gadget_init_eps(mtu
);
687 ret
= usb_add_gadget_udc(mtu
->dev
, &mtu
->g
);
689 dev_err(mtu
->dev
, "failed to register udc\n");
694 void mtu3_gadget_cleanup(struct mtu3
*mtu
)
696 usb_del_gadget_udc(&mtu
->g
);
699 void mtu3_gadget_resume(struct mtu3
*mtu
)
701 dev_dbg(mtu
->dev
, "gadget RESUME\n");
702 if (mtu
->gadget_driver
&& mtu
->gadget_driver
->resume
) {
703 spin_unlock(&mtu
->lock
);
704 mtu
->gadget_driver
->resume(&mtu
->g
);
705 spin_lock(&mtu
->lock
);
709 /* called when SOF packets stop for 3+ msec or enters U3 */
710 void mtu3_gadget_suspend(struct mtu3
*mtu
)
712 dev_dbg(mtu
->dev
, "gadget SUSPEND\n");
713 if (mtu
->gadget_driver
&& mtu
->gadget_driver
->suspend
) {
714 spin_unlock(&mtu
->lock
);
715 mtu
->gadget_driver
->suspend(&mtu
->g
);
716 spin_lock(&mtu
->lock
);
720 /* called when VBUS drops below session threshold, and in other cases */
721 void mtu3_gadget_disconnect(struct mtu3
*mtu
)
723 dev_dbg(mtu
->dev
, "gadget DISCONNECT\n");
724 if (mtu
->gadget_driver
&& mtu
->gadget_driver
->disconnect
) {
725 spin_unlock(&mtu
->lock
);
726 mtu
->gadget_driver
->disconnect(&mtu
->g
);
727 spin_lock(&mtu
->lock
);
730 mtu3_state_reset(mtu
);
731 usb_gadget_set_state(&mtu
->g
, USB_STATE_NOTATTACHED
);
734 void mtu3_gadget_reset(struct mtu3
*mtu
)
736 dev_dbg(mtu
->dev
, "gadget RESET\n");
738 /* report disconnect, if we didn't flush EP state */
739 if (mtu
->g
.speed
!= USB_SPEED_UNKNOWN
)
740 mtu3_gadget_disconnect(mtu
);
742 mtu3_state_reset(mtu
);