1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
4 * Copyright (C) 2015-2016 Samsung Electronics
5 * Igor Kotrasinski <i.kotrasinsk@samsung.com>
7 * Based on dummy_hcd.c, which is:
8 * Copyright (C) 2003 David Brownell
9 * Copyright (C) 2003-2005 Alan Stern
12 #include <linux/usb.h>
13 #include <linux/timer.h>
14 #include <linux/usb/ch9.h>
18 #define DEV_REQUEST (USB_TYPE_STANDARD | USB_RECIP_DEVICE)
19 #define DEV_INREQUEST (DEV_REQUEST | USB_DIR_IN)
20 #define INTF_REQUEST (USB_TYPE_STANDARD | USB_RECIP_INTERFACE)
21 #define INTF_INREQUEST (INTF_REQUEST | USB_DIR_IN)
22 #define EP_REQUEST (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)
23 #define EP_INREQUEST (EP_REQUEST | USB_DIR_IN)
25 static int get_frame_limit(enum usb_device_speed speed
)
29 return 8 /*bytes*/ * 12 /*packets*/;
31 return 64 /*bytes*/ * 19 /*packets*/;
33 return 512 /*bytes*/ * 13 /*packets*/ * 8 /*uframes*/;
35 /* Bus speed is 500000 bytes/ms, so use a little less */
45 * handle_control_request() - handles all control transfers
46 * @udc: pointer to vudc
47 * @urb: the urb request to handle
48 * @setup: pointer to the setup data for a USB device control
50 * @status: pointer to request handling status
52 * Return 0 - if the request was handled
53 * 1 - if the request wasn't handles
56 * Adapted from drivers/usb/gadget/udc/dummy_hcd.c
58 static int handle_control_request(struct vudc
*udc
, struct urb
*urb
,
59 struct usb_ctrlrequest
*setup
,
67 w_index
= le16_to_cpu(setup
->wIndex
);
68 w_value
= le16_to_cpu(setup
->wValue
);
69 switch (setup
->bRequest
) {
70 case USB_REQ_SET_ADDRESS
:
71 if (setup
->bRequestType
!= DEV_REQUEST
)
73 udc
->address
= w_value
;
77 case USB_REQ_SET_FEATURE
:
78 if (setup
->bRequestType
== DEV_REQUEST
) {
81 case USB_DEVICE_REMOTE_WAKEUP
:
83 case USB_DEVICE_B_HNP_ENABLE
:
84 udc
->gadget
.b_hnp_enable
= 1;
86 case USB_DEVICE_A_HNP_SUPPORT
:
87 udc
->gadget
.a_hnp_support
= 1;
89 case USB_DEVICE_A_ALT_HNP_SUPPORT
:
90 udc
->gadget
.a_alt_hnp_support
= 1;
93 ret_val
= -EOPNOTSUPP
;
96 udc
->devstatus
|= (1 << w_value
);
99 } else if (setup
->bRequestType
== EP_REQUEST
) {
101 ep2
= vudc_find_endpoint(udc
, w_index
);
102 if (!ep2
|| ep2
->ep
.name
== udc
->ep
[0].ep
.name
) {
103 ret_val
= -EOPNOTSUPP
;
111 case USB_REQ_CLEAR_FEATURE
:
112 if (setup
->bRequestType
== DEV_REQUEST
) {
115 case USB_DEVICE_REMOTE_WAKEUP
:
116 w_value
= USB_DEVICE_REMOTE_WAKEUP
;
119 case USB_DEVICE_U1_ENABLE
:
120 case USB_DEVICE_U2_ENABLE
:
121 case USB_DEVICE_LTM_ENABLE
:
122 ret_val
= -EOPNOTSUPP
;
125 ret_val
= -EOPNOTSUPP
;
129 udc
->devstatus
&= ~(1 << w_value
);
132 } else if (setup
->bRequestType
== EP_REQUEST
) {
134 ep2
= vudc_find_endpoint(udc
, w_index
);
136 ret_val
= -EOPNOTSUPP
;
145 case USB_REQ_GET_STATUS
:
146 if (setup
->bRequestType
== DEV_INREQUEST
147 || setup
->bRequestType
== INTF_INREQUEST
148 || setup
->bRequestType
== EP_INREQUEST
) {
151 * device: remote wakeup, selfpowered
155 buf
= (char *)urb
->transfer_buffer
;
156 if (urb
->transfer_buffer_length
> 0) {
157 if (setup
->bRequestType
== EP_INREQUEST
) {
158 ep2
= vudc_find_endpoint(udc
, w_index
);
160 ret_val
= -EOPNOTSUPP
;
163 buf
[0] = ep2
->halted
;
164 } else if (setup
->bRequestType
==
166 buf
[0] = (u8
)udc
->devstatus
;
170 if (urb
->transfer_buffer_length
> 1)
172 urb
->actual_length
= min_t(u32
, 2,
173 urb
->transfer_buffer_length
);
182 /* Adapted from dummy_hcd.c ; caller must hold lock */
183 static int transfer(struct vudc
*udc
,
184 struct urb
*urb
, struct vep
*ep
, int limit
)
186 struct vrequest
*req
;
189 /* if there's no request queued, the device is NAKing; return */
190 list_for_each_entry(req
, &ep
->req_queue
, req_entry
) {
191 unsigned int host_len
, dev_len
, len
;
192 void *ubuf_pos
, *rbuf_pos
;
193 int is_short
, to_host
;
197 * 1..N packets of ep->ep.maxpacket each ... the last one
198 * may be short (including zero length).
200 * writer can send a zlp explicitly (length 0) or implicitly
201 * (length mod maxpacket zero, and 'zero' flag); they always
204 host_len
= urb
->transfer_buffer_length
- urb
->actual_length
;
205 dev_len
= req
->req
.length
- req
->req
.actual
;
206 len
= min(host_len
, dev_len
);
208 to_host
= usb_pipein(urb
->pipe
);
209 if (unlikely(len
== 0))
212 /* send multiple of maxpacket first, then remainder */
213 if (len
>= ep
->ep
.maxpacket
) {
215 if (len
% ep
->ep
.maxpacket
> 0)
217 len
-= len
% ep
->ep
.maxpacket
;
222 ubuf_pos
= urb
->transfer_buffer
+ urb
->actual_length
;
223 rbuf_pos
= req
->req
.buf
+ req
->req
.actual
;
225 if (urb
->pipe
& USB_DIR_IN
)
226 memcpy(ubuf_pos
, rbuf_pos
, len
);
228 memcpy(rbuf_pos
, ubuf_pos
, len
);
230 urb
->actual_length
+= len
;
231 req
->req
.actual
+= len
;
236 * short packets terminate, maybe with overflow/underflow.
237 * it's only really an error to write too much.
239 * partially filling a buffer optionally blocks queue advances
240 * (so completion handlers can clean up the queue) but we don't
241 * need to emulate such data-in-flight.
244 if (host_len
== dev_len
) {
247 } else if (to_host
) {
249 if (dev_len
> host_len
)
250 urb
->status
= -EOVERFLOW
;
255 if (host_len
> dev_len
)
256 req
->req
.status
= -EOVERFLOW
;
261 /* many requests terminate without a short packet */
262 /* also check if we need to send zlp */
264 if (req
->req
.length
== req
->req
.actual
) {
265 if (req
->req
.zero
&& to_host
)
270 if (urb
->transfer_buffer_length
== urb
->actual_length
) {
271 if (urb
->transfer_flags
& URB_ZERO_PACKET
&&
279 /* device side completion --> continuable */
280 if (req
->req
.status
!= -EINPROGRESS
) {
282 list_del_init(&req
->req_entry
);
283 spin_unlock(&udc
->lock
);
284 usb_gadget_giveback_request(&ep
->ep
, &req
->req
);
285 spin_lock(&udc
->lock
);
287 /* requests might have been unlinked... */
291 /* host side completion --> terminate */
292 if (urb
->status
!= -EINPROGRESS
)
295 /* rescan to continue with any other queued i/o */
302 static void v_timer(struct timer_list
*t
)
304 struct vudc
*udc
= from_timer(udc
, t
, tr_timer
.timer
);
305 struct transfer_timer
*timer
= &udc
->tr_timer
;
306 struct urbp
*urb_p
, *tmp
;
313 spin_lock_irqsave(&udc
->lock
, flags
);
315 total
= get_frame_limit(udc
->gadget
.speed
);
316 if (total
< 0) { /* unknown speed, or not set yet */
317 timer
->state
= VUDC_TR_IDLE
;
318 spin_unlock_irqrestore(&udc
->lock
, flags
);
321 /* is it next frame now? */
322 if (time_after(jiffies
, timer
->frame_start
+ msecs_to_jiffies(1))) {
323 timer
->frame_limit
= total
;
324 /* FIXME: how to make it accurate? */
325 timer
->frame_start
= jiffies
;
327 total
= timer
->frame_limit
;
330 /* We have to clear ep0 flags separately as it's not on the list */
331 udc
->ep
[0].already_seen
= 0;
332 list_for_each_entry(_ep
, &udc
->gadget
.ep_list
, ep_list
) {
334 ep
->already_seen
= 0;
338 list_for_each_entry_safe(urb_p
, tmp
, &udc
->urb_queue
, urb_entry
) {
339 struct urb
*urb
= urb_p
->urb
;
344 if (timer
->state
!= VUDC_TR_RUNNING
)
348 urb
->status
= -EPROTO
;
352 /* Used up bandwidth? */
353 if (total
<= 0 && ep
->type
== USB_ENDPOINT_XFER_BULK
)
356 if (ep
->already_seen
)
358 ep
->already_seen
= 1;
359 if (ep
== &udc
->ep
[0] && urb_p
->new) {
363 if (ep
->halted
&& !ep
->setup_stage
) {
364 urb
->status
= -EPIPE
;
368 if (ep
== &udc
->ep
[0] && ep
->setup_stage
) {
369 /* TODO - flush any stale requests */
373 ret
= handle_control_request(udc
, urb
,
374 (struct usb_ctrlrequest
*) urb
->setup_packet
,
377 spin_unlock(&udc
->lock
);
378 ret
= udc
->driver
->setup(&udc
->gadget
,
379 (struct usb_ctrlrequest
*)
381 spin_lock(&udc
->lock
);
384 /* no delays (max 64kb data stage) */
386 goto treat_control_like_bulk
;
388 urb
->status
= -EPIPE
;
389 urb
->actual_length
= 0;
396 case USB_ENDPOINT_XFER_ISOC
:
398 urb
->status
= -EXDEV
;
401 case USB_ENDPOINT_XFER_INT
:
403 * TODO: figure out bandwidth guarantees
404 * for now, give unlimited bandwidth
406 limit
+= urb
->transfer_buffer_length
;
409 treat_control_like_bulk
:
410 total
-= transfer(udc
, urb
, ep
, limit
);
412 if (urb
->status
== -EINPROGRESS
)
417 ep
->already_seen
= ep
->setup_stage
= 0;
419 spin_lock(&udc
->lock_tx
);
420 list_del(&urb_p
->urb_entry
);
421 if (!urb
->unlinked
) {
422 v_enqueue_ret_submit(udc
, urb_p
);
424 v_enqueue_ret_unlink(udc
, urb_p
->seqnum
,
426 free_urbp_and_urb(urb_p
);
428 wake_up(&udc
->tx_waitq
);
429 spin_unlock(&udc
->lock_tx
);
434 /* TODO - also wait on empty usb_request queues? */
435 if (list_empty(&udc
->urb_queue
))
436 timer
->state
= VUDC_TR_IDLE
;
438 mod_timer(&timer
->timer
,
439 timer
->frame_start
+ msecs_to_jiffies(1));
441 spin_unlock_irqrestore(&udc
->lock
, flags
);
444 /* All timer functions are run with udc->lock held */
446 void v_init_timer(struct vudc
*udc
)
448 struct transfer_timer
*t
= &udc
->tr_timer
;
450 timer_setup(&t
->timer
, v_timer
, 0);
451 t
->state
= VUDC_TR_STOPPED
;
454 void v_start_timer(struct vudc
*udc
)
456 struct transfer_timer
*t
= &udc
->tr_timer
;
458 dev_dbg(&udc
->pdev
->dev
, "timer start");
460 case VUDC_TR_RUNNING
:
463 return v_kick_timer(udc
, jiffies
);
464 case VUDC_TR_STOPPED
:
465 t
->state
= VUDC_TR_IDLE
;
466 t
->frame_start
= jiffies
;
467 t
->frame_limit
= get_frame_limit(udc
->gadget
.speed
);
468 return v_kick_timer(udc
, jiffies
);
472 void v_kick_timer(struct vudc
*udc
, unsigned long time
)
474 struct transfer_timer
*t
= &udc
->tr_timer
;
476 dev_dbg(&udc
->pdev
->dev
, "timer kick");
478 case VUDC_TR_RUNNING
:
481 t
->state
= VUDC_TR_RUNNING
;
483 case VUDC_TR_STOPPED
:
484 /* we may want to kick timer to unqueue urbs */
485 mod_timer(&t
->timer
, time
);
489 void v_stop_timer(struct vudc
*udc
)
491 struct transfer_timer
*t
= &udc
->tr_timer
;
493 /* timer itself will take care of stopping */
494 dev_dbg(&udc
->pdev
->dev
, "timer stop");
495 t
->state
= VUDC_TR_STOPPED
;