2 * bdc_ep.c - BRCM BDC USB3.0 device controller endpoint related functions
4 * Copyright (C) 2014 Broadcom Corporation
6 * Author: Ashwini Pahuja
8 * Based on drivers under drivers/usb/
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/kernel.h>
20 #include <linux/delay.h>
21 #include <linux/dmapool.h>
22 #include <linux/ioport.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/errno.h>
26 #include <linux/init.h>
27 #include <linux/timer.h>
28 #include <linux/list.h>
29 #include <linux/interrupt.h>
30 #include <linux/moduleparam.h>
31 #include <linux/device.h>
32 #include <linux/usb/ch9.h>
33 #include <linux/usb/gadget.h>
34 #include <linux/usb/otg.h>
37 #include <linux/irq.h>
38 #include <asm/unaligned.h>
39 #include <linux/platform_device.h>
40 #include <linux/usb/composite.h>
47 static const char * const ep0_state_string
[] = {
49 "WAIT_FOR_DATA_START",
51 "WAIT_FOR_STATUS_START",
52 "WAIT_FOR_STATUS_XMIT",
56 /* Free the bdl during ep disable */
57 static void ep_bd_list_free(struct bdc_ep
*ep
, u32 num_tabs
)
59 struct bd_list
*bd_list
= &ep
->bd_list
;
60 struct bdc
*bdc
= ep
->bdc
;
61 struct bd_table
*bd_table
;
64 dev_dbg(bdc
->dev
, "%s ep:%s num_tabs:%d\n",
65 __func__
, ep
->name
, num_tabs
);
67 if (!bd_list
->bd_table_array
) {
68 dev_dbg(bdc
->dev
, "%s already freed\n", ep
->name
);
71 for (index
= 0; index
< num_tabs
; index
++) {
73 * check if the bd_table struct is allocated ?
74 * if yes, then check if bd memory has been allocated, then
75 * free the dma_pool and also the bd_table struct memory
77 bd_table
= bd_list
->bd_table_array
[index
];
78 dev_dbg(bdc
->dev
, "bd_table:%p index:%d\n", bd_table
, index
);
80 dev_dbg(bdc
->dev
, "bd_table not allocated\n");
83 if (!bd_table
->start_bd
) {
84 dev_dbg(bdc
->dev
, "bd dma pool not allocted\n");
89 "Free dma pool start_bd:%p dma:%llx\n",
91 (unsigned long long)bd_table
->dma
);
93 dma_pool_free(bdc
->bd_table_pool
,
96 /* Free the bd_table structure */
99 /* Free the bd table array */
100 kfree(ep
->bd_list
.bd_table_array
);
104 * chain the tables, by insteting a chain bd at the end of prev_table, pointing
107 static inline void chain_table(struct bd_table
*prev_table
,
108 struct bd_table
*next_table
,
111 /* Chain the prev table to next table */
112 prev_table
->start_bd
[bd_p_tab
-1].offset
[0] =
113 cpu_to_le32(lower_32_bits(next_table
->dma
));
115 prev_table
->start_bd
[bd_p_tab
-1].offset
[1] =
116 cpu_to_le32(upper_32_bits(next_table
->dma
));
118 prev_table
->start_bd
[bd_p_tab
-1].offset
[2] =
121 prev_table
->start_bd
[bd_p_tab
-1].offset
[3] =
122 cpu_to_le32(MARK_CHAIN_BD
);
125 /* Allocate the bdl for ep, during config ep */
126 static int ep_bd_list_alloc(struct bdc_ep
*ep
)
128 struct bd_table
*prev_table
= NULL
;
129 int index
, num_tabs
, bd_p_tab
;
130 struct bdc
*bdc
= ep
->bdc
;
131 struct bd_table
*bd_table
;
134 if (usb_endpoint_xfer_isoc(ep
->desc
))
135 num_tabs
= NUM_TABLES_ISOCH
;
137 num_tabs
= NUM_TABLES
;
139 bd_p_tab
= NUM_BDS_PER_TABLE
;
140 /* if there is only 1 table in bd list then loop chain to self */
142 "%s ep:%p num_tabs:%d\n",
143 __func__
, ep
, num_tabs
);
145 /* Allocate memory for table array */
146 ep
->bd_list
.bd_table_array
= kzalloc(
147 num_tabs
* sizeof(struct bd_table
*),
149 if (!ep
->bd_list
.bd_table_array
)
152 /* Allocate memory for each table */
153 for (index
= 0; index
< num_tabs
; index
++) {
154 /* Allocate memory for bd_table structure */
155 bd_table
= kzalloc(sizeof(struct bd_table
), GFP_ATOMIC
);
159 bd_table
->start_bd
= dma_pool_alloc(bdc
->bd_table_pool
,
162 if (!bd_table
->start_bd
)
168 "index:%d start_bd:%p dma=%08llx prev_table:%p\n",
169 index
, bd_table
->start_bd
,
170 (unsigned long long)bd_table
->dma
, prev_table
);
172 ep
->bd_list
.bd_table_array
[index
] = bd_table
;
173 memset(bd_table
->start_bd
, 0, bd_p_tab
* sizeof(struct bdc_bd
));
175 chain_table(prev_table
, bd_table
, bd_p_tab
);
177 prev_table
= bd_table
;
179 chain_table(prev_table
, ep
->bd_list
.bd_table_array
[0], bd_p_tab
);
180 /* Memory allocation is successful, now init the internal fields */
181 ep
->bd_list
.num_tabs
= num_tabs
;
182 ep
->bd_list
.max_bdi
= (num_tabs
* bd_p_tab
) - 1;
183 ep
->bd_list
.num_tabs
= num_tabs
;
184 ep
->bd_list
.num_bds_table
= bd_p_tab
;
185 ep
->bd_list
.eqp_bdi
= 0;
186 ep
->bd_list
.hwd_bdi
= 0;
190 /* Free the bd_table_array, bd_table struct, bd's */
191 ep_bd_list_free(ep
, num_tabs
);
196 /* returns how many bd's are need for this transfer */
197 static inline int bd_needed_req(struct bdc_req
*req
)
202 /* 1 bd needed for 0 byte transfer */
203 if (req
->usb_req
.length
== 0)
206 /* remaining bytes after tranfering all max BD size BD's */
207 remaining
= req
->usb_req
.length
% BD_MAX_BUFF_SIZE
;
211 /* How many maximum BUFF size BD's ? */
212 remaining
= req
->usb_req
.length
/ BD_MAX_BUFF_SIZE
;
213 bd_needed
+= remaining
;
218 /* returns the bd index(bdi) corresponding to bd dma address */
219 static int bd_add_to_bdi(struct bdc_ep
*ep
, dma_addr_t bd_dma_addr
)
221 struct bd_list
*bd_list
= &ep
->bd_list
;
222 dma_addr_t dma_first_bd
, dma_last_bd
;
223 struct bdc
*bdc
= ep
->bdc
;
224 struct bd_table
*bd_table
;
228 dma_first_bd
= dma_last_bd
= 0;
229 dev_dbg(bdc
->dev
, "%s %llx\n",
230 __func__
, (unsigned long long)bd_dma_addr
);
232 * Find in which table this bd_dma_addr belongs?, go through the table
233 * array and compare addresses of first and last address of bd of each
236 for (tbi
= 0; tbi
< bd_list
->num_tabs
; tbi
++) {
237 bd_table
= bd_list
->bd_table_array
[tbi
];
238 dma_first_bd
= bd_table
->dma
;
239 dma_last_bd
= bd_table
->dma
+
240 (sizeof(struct bdc_bd
) *
241 (bd_list
->num_bds_table
- 1));
242 dev_dbg(bdc
->dev
, "dma_first_bd:%llx dma_last_bd:%llx\n",
243 (unsigned long long)dma_first_bd
,
244 (unsigned long long)dma_last_bd
);
245 if (bd_dma_addr
>= dma_first_bd
&& bd_dma_addr
<= dma_last_bd
) {
250 if (unlikely(!found
)) {
251 dev_err(bdc
->dev
, "%s FATAL err, bd not found\n", __func__
);
254 /* Now we know the table, find the bdi */
255 bdi
= (bd_dma_addr
- dma_first_bd
) / sizeof(struct bdc_bd
);
257 /* return the global bdi, to compare with ep eqp_bdi */
258 return (bdi
+ (tbi
* bd_list
->num_bds_table
));
261 /* returns the table index(tbi) of the given bdi */
262 static int bdi_to_tbi(struct bdc_ep
*ep
, int bdi
)
266 tbi
= bdi
/ ep
->bd_list
.num_bds_table
;
267 dev_vdbg(ep
->bdc
->dev
,
268 "bdi:%d num_bds_table:%d tbi:%d\n",
269 bdi
, ep
->bd_list
.num_bds_table
, tbi
);
274 /* Find the bdi last bd in the transfer */
275 static inline int find_end_bdi(struct bdc_ep
*ep
, int next_hwd_bdi
)
279 end_bdi
= next_hwd_bdi
- 1;
281 end_bdi
= ep
->bd_list
.max_bdi
- 1;
282 else if ((end_bdi
% (ep
->bd_list
.num_bds_table
-1)) == 0)
289 * How many transfer bd's are available on this ep bdl, chain bds are not
290 * counted in available bds
292 static int bd_available_ep(struct bdc_ep
*ep
)
294 struct bd_list
*bd_list
= &ep
->bd_list
;
295 int available1
, available2
;
296 struct bdc
*bdc
= ep
->bdc
;
297 int chain_bd1
, chain_bd2
;
298 int available_bd
= 0;
300 available1
= available2
= chain_bd1
= chain_bd2
= 0;
301 /* if empty then we have all bd's available - number of chain bd's */
302 if (bd_list
->eqp_bdi
== bd_list
->hwd_bdi
)
303 return bd_list
->max_bdi
- bd_list
->num_tabs
;
306 * Depending upon where eqp and dqp pointers are, caculate number
309 if (bd_list
->hwd_bdi
< bd_list
->eqp_bdi
) {
310 /* available bd's are from eqp..max_bds + 0..dqp - chain_bds */
311 available1
= bd_list
->max_bdi
- bd_list
->eqp_bdi
;
312 available2
= bd_list
->hwd_bdi
;
313 chain_bd1
= available1
/ bd_list
->num_bds_table
;
314 chain_bd2
= available2
/ bd_list
->num_bds_table
;
315 dev_vdbg(bdc
->dev
, "chain_bd1:%d chain_bd2:%d\n",
316 chain_bd1
, chain_bd2
);
317 available_bd
= available1
+ available2
- chain_bd1
- chain_bd2
;
319 /* available bd's are from eqp..dqp - number of chain bd's */
320 available1
= bd_list
->hwd_bdi
- bd_list
->eqp_bdi
;
321 /* if gap between eqp and dqp is less than NUM_BDS_PER_TABLE */
322 if ((bd_list
->hwd_bdi
- bd_list
->eqp_bdi
)
323 <= bd_list
->num_bds_table
) {
324 /* If there any chain bd in between */
325 if (!(bdi_to_tbi(ep
, bd_list
->hwd_bdi
)
326 == bdi_to_tbi(ep
, bd_list
->eqp_bdi
))) {
327 available_bd
= available1
- 1;
330 chain_bd1
= available1
/ bd_list
->num_bds_table
;
331 available_bd
= available1
- chain_bd1
;
335 * we need to keep one extra bd to check if ring is full or empty so
339 dev_vdbg(bdc
->dev
, "available_bd:%d\n", available_bd
);
344 /* Notify the hardware after queueing the bd to bdl */
345 void bdc_notify_xfr(struct bdc
*bdc
, u32 epnum
)
347 struct bdc_ep
*ep
= bdc
->bdc_ep_array
[epnum
];
349 dev_vdbg(bdc
->dev
, "%s epnum:%d\n", __func__
, epnum
);
351 * We don't have anyway to check if ep state is running,
352 * except the software flags.
354 if (unlikely(ep
->flags
& BDC_EP_STOP
))
355 ep
->flags
&= ~BDC_EP_STOP
;
357 bdc_writel(bdc
->regs
, BDC_XSFNTF
, epnum
);
360 /* returns the bd corresponding to bdi */
361 static struct bdc_bd
*bdi_to_bd(struct bdc_ep
*ep
, int bdi
)
363 int tbi
= bdi_to_tbi(ep
, bdi
);
366 local_bdi
= bdi
- (tbi
* ep
->bd_list
.num_bds_table
);
367 dev_vdbg(ep
->bdc
->dev
,
368 "%s bdi:%d local_bdi:%d\n",
369 __func__
, bdi
, local_bdi
);
371 return (ep
->bd_list
.bd_table_array
[tbi
]->start_bd
+ local_bdi
);
374 /* Advance the enqueue pointer */
375 static void ep_bdlist_eqp_adv(struct bdc_ep
*ep
)
377 ep
->bd_list
.eqp_bdi
++;
378 /* if it's chain bd, then move to next */
379 if (((ep
->bd_list
.eqp_bdi
+ 1) % ep
->bd_list
.num_bds_table
) == 0)
380 ep
->bd_list
.eqp_bdi
++;
382 /* if the eqp is pointing to last + 1 then move back to 0 */
383 if (ep
->bd_list
.eqp_bdi
== (ep
->bd_list
.max_bdi
+ 1))
384 ep
->bd_list
.eqp_bdi
= 0;
387 /* Setup the first bd for ep0 transfer */
388 static int setup_first_bd_ep0(struct bdc
*bdc
, struct bdc_req
*req
, u32
*dword3
)
394 req_len
= req
->usb_req
.length
;
395 switch (bdc
->ep0_state
) {
396 case WAIT_FOR_DATA_START
:
397 *dword3
|= BD_TYPE_DS
;
398 if (bdc
->setup_pkt
.bRequestType
& USB_DIR_IN
)
399 *dword3
|= BD_DIR_IN
;
401 /* check if zlp will be needed */
402 wValue
= le16_to_cpu(bdc
->setup_pkt
.wValue
);
403 if ((wValue
> req_len
) &&
404 (req_len
% bdc
->gadget
.ep0
->maxpacket
== 0)) {
405 dev_dbg(bdc
->dev
, "ZLP needed wVal:%d len:%d MaxP:%d\n",
407 bdc
->gadget
.ep0
->maxpacket
);
408 bdc
->zlp_needed
= true;
412 case WAIT_FOR_STATUS_START
:
413 *dword3
|= BD_TYPE_SS
;
414 if (!le16_to_cpu(bdc
->setup_pkt
.wLength
) ||
415 !(bdc
->setup_pkt
.bRequestType
& USB_DIR_IN
))
416 *dword3
|= BD_DIR_IN
;
420 "Unknown ep0 state for queueing bd ep0_state:%s\n",
421 ep0_state_string
[bdc
->ep0_state
]);
428 /* Setup the bd dma descriptor for a given request */
429 static int setup_bd_list_xfr(struct bdc
*bdc
, struct bdc_req
*req
, int num_bds
)
431 dma_addr_t buf_add
= req
->usb_req
.dma
;
432 u32 maxp
, tfs
, dword2
, dword3
;
433 struct bd_transfer
*bd_xfr
;
434 struct bd_list
*bd_list
;
441 bd_list
= &ep
->bd_list
;
442 bd_xfr
= &req
->bd_xfr
;
444 bd_xfr
->start_bdi
= bd_list
->eqp_bdi
;
445 bd
= bdi_to_bd(ep
, bd_list
->eqp_bdi
);
446 req_len
= req
->usb_req
.length
;
447 maxp
= usb_endpoint_maxp(ep
->desc
) & 0x7ff;
448 tfs
= roundup(req
->usb_req
.length
, maxp
);
450 dev_vdbg(bdc
->dev
, "%s ep:%s num_bds:%d tfs:%d r_len:%d bd:%p\n",
451 __func__
, ep
->name
, num_bds
, tfs
, req_len
, bd
);
453 for (bdnum
= 0; bdnum
< num_bds
; bdnum
++) {
457 dword3
|= BD_SOT
|BD_SBF
|(tfs
<<BD_TFS_SHIFT
);
459 /* format of first bd for ep0 is different than other */
460 if (ep
->ep_num
== 1) {
461 ret
= setup_first_bd_ep0(bdc
, req
, &dword3
);
469 if (req_len
> BD_MAX_BUFF_SIZE
) {
470 dword2
|= BD_MAX_BUFF_SIZE
;
471 req_len
-= BD_MAX_BUFF_SIZE
;
473 /* this should be the last bd */
478 /* Currently only 1 INT target is supported */
479 dword2
|= BD_INTR_TARGET(0);
480 bd
= bdi_to_bd(ep
, ep
->bd_list
.eqp_bdi
);
482 dev_err(bdc
->dev
, "Err bd pointing to wrong addr\n");
486 bd
->offset
[0] = cpu_to_le32(lower_32_bits(buf_add
));
487 bd
->offset
[1] = cpu_to_le32(upper_32_bits(buf_add
));
488 bd
->offset
[2] = cpu_to_le32(dword2
);
489 bd
->offset
[3] = cpu_to_le32(dword3
);
490 /* advance eqp pointer */
491 ep_bdlist_eqp_adv(ep
);
492 /* advance the buff pointer */
493 buf_add
+= BD_MAX_BUFF_SIZE
;
494 dev_vdbg(bdc
->dev
, "buf_add:%08llx req_len:%d bd:%p eqp:%d\n",
495 (unsigned long long)buf_add
, req_len
, bd
,
496 ep
->bd_list
.eqp_bdi
);
497 bd
= bdi_to_bd(ep
, ep
->bd_list
.eqp_bdi
);
498 bd
->offset
[3] = cpu_to_le32(BD_SBF
);
500 /* clear the STOP BD fetch bit from the first bd of this xfr */
501 bd
= bdi_to_bd(ep
, bd_xfr
->start_bdi
);
502 bd
->offset
[3] &= cpu_to_le32(~BD_SBF
);
503 /* the new eqp will be next hw dqp */
504 bd_xfr
->num_bds
= num_bds
;
505 bd_xfr
->next_hwd_bdi
= ep
->bd_list
.eqp_bdi
;
506 /* everything is written correctly before notifying the HW */
513 static int bdc_queue_xfr(struct bdc
*bdc
, struct bdc_req
*req
)
515 int num_bds
, bd_available
;
520 dev_dbg(bdc
->dev
, "%s req:%p\n", __func__
, req
);
521 dev_dbg(bdc
->dev
, "eqp_bdi:%d hwd_bdi:%d\n",
522 ep
->bd_list
.eqp_bdi
, ep
->bd_list
.hwd_bdi
);
524 num_bds
= bd_needed_req(req
);
525 bd_available
= bd_available_ep(ep
);
527 /* how many bd's are avaialble on ep */
528 if (num_bds
> bd_available
)
531 ret
= setup_bd_list_xfr(bdc
, req
, num_bds
);
534 list_add_tail(&req
->queue
, &ep
->queue
);
535 bdc_dbg_bd_list(bdc
, ep
);
536 bdc_notify_xfr(bdc
, ep
->ep_num
);
541 /* callback to gadget layer when xfr completes */
542 static void bdc_req_complete(struct bdc_ep
*ep
, struct bdc_req
*req
,
545 struct bdc
*bdc
= ep
->bdc
;
547 if (req
== NULL
|| &req
->queue
== NULL
|| &req
->usb_req
== NULL
)
550 dev_dbg(bdc
->dev
, "%s ep:%s status:%d\n", __func__
, ep
->name
, status
);
551 list_del(&req
->queue
);
552 req
->usb_req
.status
= status
;
553 usb_gadget_unmap_request(&bdc
->gadget
, &req
->usb_req
, ep
->dir
);
554 if (req
->usb_req
.complete
) {
555 spin_unlock(&bdc
->lock
);
556 usb_gadget_giveback_request(&ep
->usb_ep
, &req
->usb_req
);
557 spin_lock(&bdc
->lock
);
561 /* Disable the endpoint */
562 int bdc_ep_disable(struct bdc_ep
*ep
)
570 dev_dbg(bdc
->dev
, "%s() ep->ep_num=%d\n", __func__
, ep
->ep_num
);
571 /* Stop the endpoint */
572 ret
= bdc_stop_ep(bdc
, ep
->ep_num
);
575 * Intentionally don't check the ret value of stop, it can fail in
576 * disconnect scenarios, continue with dconfig
578 /* de-queue any pending requests */
579 while (!list_empty(&ep
->queue
)) {
580 req
= list_entry(ep
->queue
.next
, struct bdc_req
,
582 bdc_req_complete(ep
, req
, -ESHUTDOWN
);
584 /* deconfigure the endpoint */
585 ret
= bdc_dconfig_ep(bdc
, ep
);
588 "dconfig fail but continue with memory free");
591 /* ep0 memory is not freed, but reused on next connect sr */
595 /* Free the bdl memory */
596 ep_bd_list_free(ep
, ep
->bd_list
.num_tabs
);
598 ep
->comp_desc
= NULL
;
599 ep
->usb_ep
.desc
= NULL
;
606 int bdc_ep_enable(struct bdc_ep
*ep
)
612 dev_dbg(bdc
->dev
, "%s NUM_TABLES:%d %d\n",
613 __func__
, NUM_TABLES
, NUM_TABLES_ISOCH
);
615 ret
= ep_bd_list_alloc(ep
);
617 dev_err(bdc
->dev
, "ep bd list allocation failed:%d\n", ret
);
620 bdc_dbg_bd_list(bdc
, ep
);
621 /* only for ep0: config ep is called for ep0 from connect event */
622 ep
->flags
|= BDC_EP_ENABLED
;
626 /* Issue a configure endpoint command */
627 ret
= bdc_config_ep(bdc
, ep
);
631 ep
->usb_ep
.maxpacket
= usb_endpoint_maxp(ep
->desc
);
632 ep
->usb_ep
.desc
= ep
->desc
;
633 ep
->usb_ep
.comp_desc
= ep
->comp_desc
;
634 ep
->ep_type
= usb_endpoint_type(ep
->desc
);
635 ep
->flags
|= BDC_EP_ENABLED
;
640 /* EP0 related code */
642 /* Queue a status stage BD */
643 static int ep0_queue_status_stage(struct bdc
*bdc
)
645 struct bdc_req
*status_req
;
648 status_req
= &bdc
->status_req
;
649 ep
= bdc
->bdc_ep_array
[1];
651 status_req
->usb_req
.length
= 0;
652 status_req
->usb_req
.status
= -EINPROGRESS
;
653 status_req
->usb_req
.actual
= 0;
654 status_req
->usb_req
.complete
= NULL
;
655 bdc_queue_xfr(bdc
, status_req
);
660 /* Queue xfr on ep0 */
661 static int ep0_queue(struct bdc_ep
*ep
, struct bdc_req
*req
)
667 dev_dbg(bdc
->dev
, "%s()\n", __func__
);
668 req
->usb_req
.actual
= 0;
669 req
->usb_req
.status
= -EINPROGRESS
;
670 req
->epnum
= ep
->ep_num
;
672 if (bdc
->delayed_status
) {
673 bdc
->delayed_status
= false;
674 /* if status stage was delayed? */
675 if (bdc
->ep0_state
== WAIT_FOR_STATUS_START
) {
676 /* Queue a status stage BD */
677 ep0_queue_status_stage(bdc
);
678 bdc
->ep0_state
= WAIT_FOR_STATUS_XMIT
;
683 * if delayed status is false and 0 length transfer is requested
684 * i.e. for status stage of some setup request, then just
685 * return from here the status stage is queued independently
687 if (req
->usb_req
.length
== 0)
691 ret
= usb_gadget_map_request(&bdc
->gadget
, &req
->usb_req
, ep
->dir
);
693 dev_err(bdc
->dev
, "dma mapping failed %s\n", ep
->name
);
697 return bdc_queue_xfr(bdc
, req
);
700 /* Queue data stage */
701 static int ep0_queue_data_stage(struct bdc
*bdc
)
703 struct usb_request
*ep0_usb_req
;
706 dev_dbg(bdc
->dev
, "%s\n", __func__
);
707 ep0_usb_req
= &bdc
->ep0_req
.usb_req
;
708 ep
= bdc
->bdc_ep_array
[1];
709 bdc
->ep0_req
.ep
= ep
;
710 bdc
->ep0_req
.usb_req
.complete
= NULL
;
712 return ep0_queue(ep
, &bdc
->ep0_req
);
715 /* Queue req on ep */
716 static int ep_queue(struct bdc_ep
*ep
, struct bdc_req
*req
)
721 if (!req
|| !ep
->usb_ep
.desc
)
726 req
->usb_req
.actual
= 0;
727 req
->usb_req
.status
= -EINPROGRESS
;
728 req
->epnum
= ep
->ep_num
;
730 ret
= usb_gadget_map_request(&bdc
->gadget
, &req
->usb_req
, ep
->dir
);
732 dev_err(bdc
->dev
, "dma mapping failed\n");
736 return bdc_queue_xfr(bdc
, req
);
739 /* Dequeue a request from ep */
740 static int ep_dequeue(struct bdc_ep
*ep
, struct bdc_req
*req
)
742 int start_bdi
, end_bdi
, tbi
, eqp_bdi
, curr_hw_dqpi
;
743 bool start_pending
, end_pending
;
744 bool first_remove
= false;
745 struct bdc_req
*first_req
;
746 struct bdc_bd
*bd_start
;
747 struct bd_table
*table
;
748 dma_addr_t next_bd_dma
;
755 start_pending
= end_pending
= false;
756 eqp_bdi
= ep
->bd_list
.eqp_bdi
- 1;
759 eqp_bdi
= ep
->bd_list
.max_bdi
;
761 start_bdi
= req
->bd_xfr
.start_bdi
;
762 end_bdi
= find_end_bdi(ep
, req
->bd_xfr
.next_hwd_bdi
);
764 dev_dbg(bdc
->dev
, "%s ep:%s start:%d end:%d\n",
765 __func__
, ep
->name
, start_bdi
, end_bdi
);
766 dev_dbg(bdc
->dev
, "ep_dequeue ep=%p ep->desc=%p\n",
767 ep
, (void *)ep
->usb_ep
.desc
);
768 /* Stop the ep to see where the HW is ? */
769 ret
= bdc_stop_ep(bdc
, ep
->ep_num
);
770 /* if there is an issue with stopping ep, then no need to go further */
775 * After endpoint is stopped, there can be 3 cases, the request
776 * is processed, pending or in the middle of processing
779 /* The current hw dequeue pointer */
780 tmp_32
= bdc_readl(bdc
->regs
, BDC_EPSTS0(0));
782 tmp_32
= bdc_readl(bdc
->regs
, BDC_EPSTS0(1));
783 deq_ptr_64
|= ((u64
)tmp_32
<< 32);
785 /* we have the dma addr of next bd that will be fetched by hardware */
786 curr_hw_dqpi
= bd_add_to_bdi(ep
, deq_ptr_64
);
787 if (curr_hw_dqpi
< 0)
791 * curr_hw_dqpi points to actual dqp of HW and HW owns bd's from
792 * curr_hw_dqbdi..eqp_bdi.
795 /* Check if start_bdi and end_bdi are in range of HW owned BD's */
796 if (curr_hw_dqpi
> eqp_bdi
) {
797 /* there is a wrap from last to 0 */
798 if (start_bdi
>= curr_hw_dqpi
|| start_bdi
<= eqp_bdi
) {
799 start_pending
= true;
801 } else if (end_bdi
>= curr_hw_dqpi
|| end_bdi
<= eqp_bdi
) {
805 if (start_bdi
>= curr_hw_dqpi
) {
806 start_pending
= true;
808 } else if (end_bdi
>= curr_hw_dqpi
) {
813 "start_pending:%d end_pending:%d speed:%d\n",
814 start_pending
, end_pending
, bdc
->gadget
.speed
);
816 /* If both start till end are processes, we cannot deq req */
817 if (!start_pending
&& !end_pending
)
821 * if ep_dequeue is called after disconnect then just return
824 if (bdc
->gadget
.speed
== USB_SPEED_UNKNOWN
)
826 tbi
= bdi_to_tbi(ep
, req
->bd_xfr
.next_hwd_bdi
);
827 table
= ep
->bd_list
.bd_table_array
[tbi
];
828 next_bd_dma
= table
->dma
+
829 sizeof(struct bdc_bd
)*(req
->bd_xfr
.next_hwd_bdi
-
830 tbi
* ep
->bd_list
.num_bds_table
);
832 first_req
= list_first_entry(&ep
->queue
, struct bdc_req
,
835 if (req
== first_req
)
839 * Due to HW limitation we need to bypadd chain bd's and issue ep_bla,
840 * incase if start is pending this is the first request in the list
841 * then issue ep_bla instead of marking as chain bd
843 if (start_pending
&& !first_remove
) {
845 * Mark the start bd as Chain bd, and point the chain
848 bd_start
= bdi_to_bd(ep
, start_bdi
);
849 bd_start
->offset
[0] = cpu_to_le32(lower_32_bits(next_bd_dma
));
850 bd_start
->offset
[1] = cpu_to_le32(upper_32_bits(next_bd_dma
));
851 bd_start
->offset
[2] = 0x0;
852 bd_start
->offset
[3] = cpu_to_le32(MARK_CHAIN_BD
);
853 bdc_dbg_bd_list(bdc
, ep
);
854 } else if (end_pending
) {
856 * The transfer is stopped in the middle, move the
857 * HW deq pointer to next_bd_dma
859 ret
= bdc_ep_bla(bdc
, ep
, next_bd_dma
);
861 dev_err(bdc
->dev
, "error in ep_bla:%d\n", ret
);
869 /* Halt/Clear the ep based on value */
870 static int ep_set_halt(struct bdc_ep
*ep
, u32 value
)
876 dev_dbg(bdc
->dev
, "%s ep:%s value=%d\n", __func__
, ep
->name
, value
);
879 dev_dbg(bdc
->dev
, "Halt\n");
881 bdc
->ep0_state
= WAIT_FOR_SETUP
;
883 ret
= bdc_ep_set_stall(bdc
, ep
->ep_num
);
885 dev_err(bdc
->dev
, "failed to set STALL on %s\n",
888 ep
->flags
|= BDC_EP_STALL
;
891 dev_dbg(bdc
->dev
, "Before Clear\n");
892 ret
= bdc_ep_clear_stall(bdc
, ep
->ep_num
);
894 dev_err(bdc
->dev
, "failed to clear STALL on %s\n",
897 ep
->flags
&= ~BDC_EP_STALL
;
898 dev_dbg(bdc
->dev
, "After Clear\n");
904 /* Free all the ep */
905 void bdc_free_ep(struct bdc
*bdc
)
910 dev_dbg(bdc
->dev
, "%s\n", __func__
);
911 for (epnum
= 1; epnum
< bdc
->num_eps
; epnum
++) {
912 ep
= bdc
->bdc_ep_array
[epnum
];
916 if (ep
->flags
& BDC_EP_ENABLED
)
917 ep_bd_list_free(ep
, ep
->bd_list
.num_tabs
);
919 /* ep0 is not in this gadget list */
921 list_del(&ep
->usb_ep
.ep_list
);
927 /* USB2 spec, section 7.1.20 */
928 static int bdc_set_test_mode(struct bdc
*bdc
)
932 usb2_pm
= bdc_readl(bdc
->regs
, BDC_USPPM2
);
933 usb2_pm
&= ~BDC_PTC_MASK
;
934 dev_dbg(bdc
->dev
, "%s\n", __func__
);
935 switch (bdc
->test_mode
) {
941 usb2_pm
|= bdc
->test_mode
<< 28;
946 dev_dbg(bdc
->dev
, "usb2_pm=%08x", usb2_pm
);
947 bdc_writel(bdc
->regs
, BDC_USPPM2
, usb2_pm
);
953 * Helper function to handle Transfer status report with status as either
956 static void handle_xsr_succ_status(struct bdc
*bdc
, struct bdc_ep
*ep
,
957 struct bdc_sr
*sreport
)
959 int short_bdi
, start_bdi
, end_bdi
, max_len_bds
, chain_bds
;
960 struct bd_list
*bd_list
= &ep
->bd_list
;
961 int actual_length
, length_short
;
962 struct bd_transfer
*bd_xfr
;
963 struct bdc_bd
*short_bd
;
970 dev_dbg(bdc
->dev
, "%s ep:%p\n", __func__
, ep
);
972 /* do not process thie sr if ignore flag is set */
973 if (ep
->ignore_next_sr
) {
974 ep
->ignore_next_sr
= false;
978 if (unlikely(list_empty(&ep
->queue
))) {
979 dev_warn(bdc
->dev
, "xfr srr with no BD's queued\n");
982 req
= list_entry(ep
->queue
.next
, struct bdc_req
,
985 bd_xfr
= &req
->bd_xfr
;
986 sr_status
= XSF_STS(le32_to_cpu(sreport
->offset
[3]));
989 * sr_status is short and this transfer has more than 1 bd then it needs
990 * special handling, this is only applicable for bulk and ctrl
992 if (sr_status
== XSF_SHORT
&& bd_xfr
->num_bds
> 1) {
994 * This is multi bd xfr, lets see which bd
995 * caused short transfer and how many bytes have been
996 * transferred so far.
998 tmp_32
= le32_to_cpu(sreport
->offset
[0]);
1000 tmp_32
= le32_to_cpu(sreport
->offset
[1]);
1001 deq_ptr_64
|= ((u64
)tmp_32
<< 32);
1002 short_bdi
= bd_add_to_bdi(ep
, deq_ptr_64
);
1003 if (unlikely(short_bdi
< 0))
1004 dev_warn(bdc
->dev
, "bd doesn't exist?\n");
1006 start_bdi
= bd_xfr
->start_bdi
;
1008 * We know the start_bdi and short_bdi, how many xfr
1011 if (start_bdi
<= short_bdi
) {
1012 max_len_bds
= short_bdi
- start_bdi
;
1013 if (max_len_bds
<= bd_list
->num_bds_table
) {
1014 if (!(bdi_to_tbi(ep
, start_bdi
) ==
1015 bdi_to_tbi(ep
, short_bdi
)))
1018 chain_bds
= max_len_bds
/bd_list
->num_bds_table
;
1019 max_len_bds
-= chain_bds
;
1022 /* there is a wrap in the ring within a xfr */
1023 chain_bds
= (bd_list
->max_bdi
- start_bdi
)/
1024 bd_list
->num_bds_table
;
1025 chain_bds
+= short_bdi
/bd_list
->num_bds_table
;
1026 max_len_bds
= bd_list
->max_bdi
- start_bdi
;
1027 max_len_bds
+= short_bdi
;
1028 max_len_bds
-= chain_bds
;
1030 /* max_len_bds is the number of full length bds */
1031 end_bdi
= find_end_bdi(ep
, bd_xfr
->next_hwd_bdi
);
1032 if (!(end_bdi
== short_bdi
))
1033 ep
->ignore_next_sr
= true;
1035 actual_length
= max_len_bds
* BD_MAX_BUFF_SIZE
;
1036 short_bd
= bdi_to_bd(ep
, short_bdi
);
1038 length_short
= le32_to_cpu(short_bd
->offset
[2]) & 0x1FFFFF;
1039 /* actual length trensfered */
1040 length_short
-= SR_BD_LEN(le32_to_cpu(sreport
->offset
[2]));
1041 actual_length
+= length_short
;
1042 req
->usb_req
.actual
= actual_length
;
1044 req
->usb_req
.actual
= req
->usb_req
.length
-
1045 SR_BD_LEN(le32_to_cpu(sreport
->offset
[2]));
1047 "len=%d actual=%d bd_xfr->next_hwd_bdi:%d\n",
1048 req
->usb_req
.length
, req
->usb_req
.actual
,
1049 bd_xfr
->next_hwd_bdi
);
1052 /* Update the dequeue pointer */
1053 ep
->bd_list
.hwd_bdi
= bd_xfr
->next_hwd_bdi
;
1054 if (req
->usb_req
.actual
< req
->usb_req
.length
) {
1055 dev_dbg(bdc
->dev
, "short xfr on %d\n", ep
->ep_num
);
1056 if (req
->usb_req
.short_not_ok
)
1057 status
= -EREMOTEIO
;
1059 bdc_req_complete(ep
, bd_xfr
->req
, status
);
1062 /* EP0 setup related packet handlers */
1065 * Setup packet received, just store the packet and process on next DS or SS
1068 void bdc_xsf_ep0_setup_recv(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1070 struct usb_ctrlrequest
*setup_pkt
;
1074 "%s ep0_state:%s\n",
1075 __func__
, ep0_state_string
[bdc
->ep0_state
]);
1076 /* Store received setup packet */
1077 setup_pkt
= &bdc
->setup_pkt
;
1078 memcpy(setup_pkt
, &sreport
->offset
[0], sizeof(*setup_pkt
));
1079 len
= le16_to_cpu(setup_pkt
->wLength
);
1081 bdc
->ep0_state
= WAIT_FOR_STATUS_START
;
1083 bdc
->ep0_state
= WAIT_FOR_DATA_START
;
1087 "%s exit ep0_state:%s\n",
1088 __func__
, ep0_state_string
[bdc
->ep0_state
]);
1092 static void ep0_stall(struct bdc
*bdc
)
1094 struct bdc_ep
*ep
= bdc
->bdc_ep_array
[1];
1095 struct bdc_req
*req
;
1097 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1098 bdc
->delayed_status
= false;
1101 /* de-queue any pendig requests */
1102 while (!list_empty(&ep
->queue
)) {
1103 req
= list_entry(ep
->queue
.next
, struct bdc_req
,
1105 bdc_req_complete(ep
, req
, -ESHUTDOWN
);
1109 /* SET_ADD handlers */
1110 static int ep0_set_address(struct bdc
*bdc
, struct usb_ctrlrequest
*ctrl
)
1112 enum usb_device_state state
= bdc
->gadget
.state
;
1116 addr
= le16_to_cpu(ctrl
->wValue
);
1118 "%s addr:%d dev state:%d\n",
1119 __func__
, addr
, state
);
1125 case USB_STATE_DEFAULT
:
1126 case USB_STATE_ADDRESS
:
1127 /* Issue Address device command */
1128 ret
= bdc_address_device(bdc
, addr
);
1133 usb_gadget_set_state(&bdc
->gadget
, USB_STATE_ADDRESS
);
1135 usb_gadget_set_state(&bdc
->gadget
, USB_STATE_DEFAULT
);
1137 bdc
->dev_addr
= addr
;
1141 "SET Address in wrong device state %d\n",
1149 /* Handler for SET/CLEAR FEATURE requests for device */
1150 static int ep0_handle_feature_dev(struct bdc
*bdc
, u16 wValue
,
1151 u16 wIndex
, bool set
)
1153 enum usb_device_state state
= bdc
->gadget
.state
;
1156 dev_dbg(bdc
->dev
, "%s set:%d dev state:%d\n",
1157 __func__
, set
, state
);
1159 case USB_DEVICE_REMOTE_WAKEUP
:
1160 dev_dbg(bdc
->dev
, "USB_DEVICE_REMOTE_WAKEUP\n");
1162 bdc
->devstatus
|= REMOTE_WAKE_ENABLE
;
1164 bdc
->devstatus
&= ~REMOTE_WAKE_ENABLE
;
1167 case USB_DEVICE_TEST_MODE
:
1168 dev_dbg(bdc
->dev
, "USB_DEVICE_TEST_MODE\n");
1169 if ((wIndex
& 0xFF) ||
1170 (bdc
->gadget
.speed
!= USB_SPEED_HIGH
) || !set
)
1173 bdc
->test_mode
= wIndex
>> 8;
1176 case USB_DEVICE_U1_ENABLE
:
1177 dev_dbg(bdc
->dev
, "USB_DEVICE_U1_ENABLE\n");
1179 if (bdc
->gadget
.speed
!= USB_SPEED_SUPER
||
1180 state
!= USB_STATE_CONFIGURED
)
1183 usppms
= bdc_readl(bdc
->regs
, BDC_USPPMS
);
1185 /* clear previous u1t */
1186 usppms
&= ~BDC_U1T(BDC_U1T_MASK
);
1187 usppms
|= BDC_U1T(U1_TIMEOUT
);
1188 usppms
|= BDC_U1E
| BDC_PORT_W1S
;
1189 bdc
->devstatus
|= (1 << USB_DEV_STAT_U1_ENABLED
);
1192 usppms
|= BDC_PORT_W1S
;
1193 bdc
->devstatus
&= ~(1 << USB_DEV_STAT_U1_ENABLED
);
1195 bdc_writel(bdc
->regs
, BDC_USPPMS
, usppms
);
1198 case USB_DEVICE_U2_ENABLE
:
1199 dev_dbg(bdc
->dev
, "USB_DEVICE_U2_ENABLE\n");
1201 if (bdc
->gadget
.speed
!= USB_SPEED_SUPER
||
1202 state
!= USB_STATE_CONFIGURED
)
1205 usppms
= bdc_readl(bdc
->regs
, BDC_USPPMS
);
1209 bdc
->devstatus
|= (1 << USB_DEV_STAT_U2_ENABLED
);
1213 bdc
->devstatus
&= ~(1 << USB_DEV_STAT_U2_ENABLED
);
1215 bdc_writel(bdc
->regs
, BDC_USPPMS
, usppms
);
1218 case USB_DEVICE_LTM_ENABLE
:
1219 dev_dbg(bdc
->dev
, "USB_DEVICE_LTM_ENABLE?\n");
1220 if (bdc
->gadget
.speed
!= USB_SPEED_SUPER
||
1221 state
!= USB_STATE_CONFIGURED
)
1225 dev_err(bdc
->dev
, "Unknown wValue:%d\n", wValue
);
1227 } /* USB_RECIP_DEVICE end */
1232 /* SET/CLEAR FEATURE handler */
1233 static int ep0_handle_feature(struct bdc
*bdc
,
1234 struct usb_ctrlrequest
*setup_pkt
, bool set
)
1236 enum usb_device_state state
= bdc
->gadget
.state
;
1242 wValue
= le16_to_cpu(setup_pkt
->wValue
);
1243 wIndex
= le16_to_cpu(setup_pkt
->wIndex
);
1246 "%s wValue=%d wIndex=%d devstate=%08x speed=%d set=%d",
1247 __func__
, wValue
, wIndex
, state
,
1248 bdc
->gadget
.speed
, set
);
1250 switch (setup_pkt
->bRequestType
& USB_RECIP_MASK
) {
1251 case USB_RECIP_DEVICE
:
1252 return ep0_handle_feature_dev(bdc
, wValue
, wIndex
, set
);
1253 case USB_RECIP_INTERFACE
:
1254 dev_dbg(bdc
->dev
, "USB_RECIP_INTERFACE\n");
1255 /* USB3 spec, sec 9.4.9 */
1256 if (wValue
!= USB_INTRF_FUNC_SUSPEND
)
1258 /* USB3 spec, Table 9-8 */
1260 if (wIndex
& USB_INTRF_FUNC_SUSPEND_RW
) {
1261 dev_dbg(bdc
->dev
, "SET REMOTE_WAKEUP\n");
1262 bdc
->devstatus
|= REMOTE_WAKE_ENABLE
;
1264 dev_dbg(bdc
->dev
, "CLEAR REMOTE_WAKEUP\n");
1265 bdc
->devstatus
&= ~REMOTE_WAKE_ENABLE
;
1270 case USB_RECIP_ENDPOINT
:
1271 dev_dbg(bdc
->dev
, "USB_RECIP_ENDPOINT\n");
1272 if (wValue
!= USB_ENDPOINT_HALT
)
1275 epnum
= wIndex
& USB_ENDPOINT_NUMBER_MASK
;
1277 if ((wIndex
& USB_ENDPOINT_DIR_MASK
) == USB_DIR_IN
)
1278 epnum
= epnum
* 2 + 1;
1285 * If CLEAR_FEATURE on ep0 then don't do anything as the stall
1286 * condition on ep0 has already been cleared when SETUP packet
1289 if (epnum
== 1 && !set
) {
1290 dev_dbg(bdc
->dev
, "ep0 stall already cleared\n");
1293 dev_dbg(bdc
->dev
, "epnum=%d\n", epnum
);
1294 ep
= bdc
->bdc_ep_array
[epnum
];
1298 return ep_set_halt(ep
, set
);
1300 dev_err(bdc
->dev
, "Unknown recipient\n");
1307 /* GET_STATUS request handler */
1308 static int ep0_handle_status(struct bdc
*bdc
,
1309 struct usb_ctrlrequest
*setup_pkt
)
1311 enum usb_device_state state
= bdc
->gadget
.state
;
1317 /* USB2.0 spec sec 9.4.5 */
1318 if (state
== USB_STATE_DEFAULT
)
1320 wIndex
= le16_to_cpu(setup_pkt
->wIndex
);
1321 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1322 usb_status
= bdc
->devstatus
;
1323 switch (setup_pkt
->bRequestType
& USB_RECIP_MASK
) {
1324 case USB_RECIP_DEVICE
:
1326 "USB_RECIP_DEVICE devstatus:%08x\n",
1328 /* USB3 spec, sec 9.4.5 */
1329 if (bdc
->gadget
.speed
== USB_SPEED_SUPER
)
1330 usb_status
&= ~REMOTE_WAKE_ENABLE
;
1333 case USB_RECIP_INTERFACE
:
1334 dev_dbg(bdc
->dev
, "USB_RECIP_INTERFACE\n");
1335 if (bdc
->gadget
.speed
== USB_SPEED_SUPER
) {
1337 * This should come from func for Func remote wkup
1340 if (bdc
->devstatus
& REMOTE_WAKE_ENABLE
)
1341 usb_status
|= REMOTE_WAKE_ENABLE
;
1348 case USB_RECIP_ENDPOINT
:
1349 dev_dbg(bdc
->dev
, "USB_RECIP_ENDPOINT\n");
1350 epnum
= wIndex
& USB_ENDPOINT_NUMBER_MASK
;
1352 if ((wIndex
& USB_ENDPOINT_DIR_MASK
) == USB_DIR_IN
)
1353 epnum
= epnum
*2 + 1;
1357 epnum
= 1; /* EP0 */
1360 ep
= bdc
->bdc_ep_array
[epnum
];
1362 dev_err(bdc
->dev
, "ISSUE, GET_STATUS for invalid EP ?");
1365 if (ep
->flags
& BDC_EP_STALL
)
1366 usb_status
|= 1 << USB_ENDPOINT_HALT
;
1370 dev_err(bdc
->dev
, "Unknown recipient for get_status\n");
1373 /* prepare a data stage for GET_STATUS */
1374 dev_dbg(bdc
->dev
, "usb_status=%08x\n", usb_status
);
1375 *(__le16
*)bdc
->ep0_response_buff
= cpu_to_le16(usb_status
);
1376 bdc
->ep0_req
.usb_req
.length
= 2;
1377 bdc
->ep0_req
.usb_req
.buf
= &bdc
->ep0_response_buff
;
1378 ep0_queue_data_stage(bdc
);
1383 static void ep0_set_sel_cmpl(struct usb_ep
*_ep
, struct usb_request
*_req
)
1385 /* ep0_set_sel_cmpl */
1388 /* Queue data stage to handle 6 byte SET_SEL request */
1389 static int ep0_set_sel(struct bdc
*bdc
,
1390 struct usb_ctrlrequest
*setup_pkt
)
1396 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1397 wValue
= le16_to_cpu(setup_pkt
->wValue
);
1398 wLength
= le16_to_cpu(setup_pkt
->wLength
);
1399 if (unlikely(wLength
!= 6)) {
1400 dev_err(bdc
->dev
, "%s Wrong wLength:%d\n", __func__
, wLength
);
1403 ep
= bdc
->bdc_ep_array
[1];
1404 bdc
->ep0_req
.ep
= ep
;
1405 bdc
->ep0_req
.usb_req
.length
= 6;
1406 bdc
->ep0_req
.usb_req
.buf
= bdc
->ep0_response_buff
;
1407 bdc
->ep0_req
.usb_req
.complete
= ep0_set_sel_cmpl
;
1408 ep0_queue_data_stage(bdc
);
1414 * Queue a 0 byte bd only if wLength is more than the length and and length is
1415 * a multiple of MaxPacket then queue 0 byte BD
1417 static int ep0_queue_zlp(struct bdc
*bdc
)
1421 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1422 bdc
->ep0_req
.ep
= bdc
->bdc_ep_array
[1];
1423 bdc
->ep0_req
.usb_req
.length
= 0;
1424 bdc
->ep0_req
.usb_req
.complete
= NULL
;
1425 bdc
->ep0_state
= WAIT_FOR_DATA_START
;
1426 ret
= bdc_queue_xfr(bdc
, &bdc
->ep0_req
);
1428 dev_err(bdc
->dev
, "err queueing zlp :%d\n", ret
);
1431 bdc
->ep0_state
= WAIT_FOR_DATA_XMIT
;
1436 /* Control request handler */
1437 static int handle_control_request(struct bdc
*bdc
)
1439 enum usb_device_state state
= bdc
->gadget
.state
;
1440 struct usb_ctrlrequest
*setup_pkt
;
1441 int delegate_setup
= 0;
1445 setup_pkt
= &bdc
->setup_pkt
;
1446 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1447 if ((setup_pkt
->bRequestType
& USB_TYPE_MASK
) == USB_TYPE_STANDARD
) {
1448 switch (setup_pkt
->bRequest
) {
1449 case USB_REQ_SET_ADDRESS
:
1450 dev_dbg(bdc
->dev
, "USB_REQ_SET_ADDRESS\n");
1451 ret
= ep0_set_address(bdc
, setup_pkt
);
1452 bdc
->devstatus
&= DEVSTATUS_CLEAR
;
1455 case USB_REQ_SET_CONFIGURATION
:
1456 dev_dbg(bdc
->dev
, "USB_REQ_SET_CONFIGURATION\n");
1457 if (state
== USB_STATE_ADDRESS
) {
1458 usb_gadget_set_state(&bdc
->gadget
,
1459 USB_STATE_CONFIGURED
);
1460 } else if (state
== USB_STATE_CONFIGURED
) {
1462 * USB2 spec sec 9.4.7, if wValue is 0 then dev
1463 * is moved to addressed state
1465 config
= le16_to_cpu(setup_pkt
->wValue
);
1467 usb_gadget_set_state(
1474 case USB_REQ_SET_FEATURE
:
1475 dev_dbg(bdc
->dev
, "USB_REQ_SET_FEATURE\n");
1476 ret
= ep0_handle_feature(bdc
, setup_pkt
, 1);
1479 case USB_REQ_CLEAR_FEATURE
:
1480 dev_dbg(bdc
->dev
, "USB_REQ_CLEAR_FEATURE\n");
1481 ret
= ep0_handle_feature(bdc
, setup_pkt
, 0);
1484 case USB_REQ_GET_STATUS
:
1485 dev_dbg(bdc
->dev
, "USB_REQ_GET_STATUS\n");
1486 ret
= ep0_handle_status(bdc
, setup_pkt
);
1489 case USB_REQ_SET_SEL
:
1490 dev_dbg(bdc
->dev
, "USB_REQ_SET_SEL\n");
1491 ret
= ep0_set_sel(bdc
, setup_pkt
);
1494 case USB_REQ_SET_ISOCH_DELAY
:
1496 "USB_REQ_SET_ISOCH_DELAY not handled\n");
1506 if (delegate_setup
) {
1507 spin_unlock(&bdc
->lock
);
1508 ret
= bdc
->gadget_driver
->setup(&bdc
->gadget
, setup_pkt
);
1509 spin_lock(&bdc
->lock
);
1515 /* EP0: Data stage started */
1516 void bdc_xsf_ep0_data_start(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1521 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1522 ep
= bdc
->bdc_ep_array
[1];
1523 /* If ep0 was stalled, the clear it first */
1524 if (ep
->flags
& BDC_EP_STALL
) {
1525 ret
= ep_set_halt(ep
, 0);
1529 if (bdc
->ep0_state
!= WAIT_FOR_DATA_START
)
1531 "Data stage not expected ep0_state:%s\n",
1532 ep0_state_string
[bdc
->ep0_state
]);
1534 ret
= handle_control_request(bdc
);
1535 if (ret
== USB_GADGET_DELAYED_STATUS
) {
1537 * The ep0 state will remain WAIT_FOR_DATA_START till
1538 * we received ep_queue on ep0
1540 bdc
->delayed_status
= true;
1544 bdc
->ep0_state
= WAIT_FOR_DATA_XMIT
;
1546 "ep0_state:%s", ep0_state_string
[bdc
->ep0_state
]);
1553 /* EP0: status stage started */
1554 void bdc_xsf_ep0_status_start(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1556 struct usb_ctrlrequest
*setup_pkt
;
1562 __func__
, ep0_state_string
[bdc
->ep0_state
]);
1563 ep
= bdc
->bdc_ep_array
[1];
1565 /* check if ZLP was queued? */
1566 if (bdc
->zlp_needed
)
1567 bdc
->zlp_needed
= false;
1569 if (ep
->flags
& BDC_EP_STALL
) {
1570 ret
= ep_set_halt(ep
, 0);
1575 if ((bdc
->ep0_state
!= WAIT_FOR_STATUS_START
) &&
1576 (bdc
->ep0_state
!= WAIT_FOR_DATA_XMIT
))
1578 "Status stage recv but ep0_state:%s\n",
1579 ep0_state_string
[bdc
->ep0_state
]);
1581 /* check if data stage is in progress ? */
1582 if (bdc
->ep0_state
== WAIT_FOR_DATA_XMIT
) {
1583 bdc
->ep0_state
= STATUS_PENDING
;
1584 /* Status stage will be queued upon Data stage transmit event */
1586 "status started but data not transmitted yet\n");
1589 setup_pkt
= &bdc
->setup_pkt
;
1592 * 2 stage setup then only process the setup, for 3 stage setup the date
1593 * stage is already handled
1595 if (!le16_to_cpu(setup_pkt
->wLength
)) {
1596 ret
= handle_control_request(bdc
);
1597 if (ret
== USB_GADGET_DELAYED_STATUS
) {
1598 bdc
->delayed_status
= true;
1599 /* ep0_state will remain WAIT_FOR_STATUS_START */
1604 /* Queue a status stage BD */
1605 ep0_queue_status_stage(bdc
);
1606 bdc
->ep0_state
= WAIT_FOR_STATUS_XMIT
;
1608 "ep0_state:%s", ep0_state_string
[bdc
->ep0_state
]);
1615 /* Helper function to update ep0 upon SR with xsf_succ or xsf_short */
1616 static void ep0_xsf_complete(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1618 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1619 switch (bdc
->ep0_state
) {
1620 case WAIT_FOR_DATA_XMIT
:
1621 bdc
->ep0_state
= WAIT_FOR_STATUS_START
;
1623 case WAIT_FOR_STATUS_XMIT
:
1624 bdc
->ep0_state
= WAIT_FOR_SETUP
;
1625 if (bdc
->test_mode
) {
1628 dev_dbg(bdc
->dev
, "test_mode:%d\n", bdc
->test_mode
);
1629 ret
= bdc_set_test_mode(bdc
);
1631 dev_err(bdc
->dev
, "Err in setting Test mode\n");
1637 case STATUS_PENDING
:
1638 bdc_xsf_ep0_status_start(bdc
, sreport
);
1643 "Unknown ep0_state:%s\n",
1644 ep0_state_string
[bdc
->ep0_state
]);
1649 /* xfr completion status report handler */
1650 void bdc_sr_xsf(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1656 ep_num
= (le32_to_cpu(sreport
->offset
[3])>>4) & 0x1f;
1657 ep
= bdc
->bdc_ep_array
[ep_num
];
1658 if (!ep
|| !(ep
->flags
& BDC_EP_ENABLED
)) {
1659 dev_err(bdc
->dev
, "xsf for ep not enabled\n");
1663 * check if this transfer is after link went from U3->U0 due
1666 if (bdc
->devstatus
& FUNC_WAKE_ISSUED
) {
1667 bdc
->devstatus
&= ~(FUNC_WAKE_ISSUED
);
1668 dev_dbg(bdc
->dev
, "%s clearing FUNC_WAKE_ISSUED flag\n",
1671 sr_status
= XSF_STS(le32_to_cpu(sreport
->offset
[3]));
1672 dev_dbg_ratelimited(bdc
->dev
, "%s sr_status=%d ep:%s\n",
1673 __func__
, sr_status
, ep
->name
);
1675 switch (sr_status
) {
1678 handle_xsr_succ_status(bdc
, ep
, sreport
);
1680 ep0_xsf_complete(bdc
, sreport
);
1683 case XSF_SETUP_RECV
:
1684 case XSF_DATA_START
:
1685 case XSF_STATUS_START
:
1688 "ep0 related packets on non ep0 endpoint");
1691 bdc
->sr_xsf_ep0
[sr_status
- XSF_SETUP_RECV
](bdc
, sreport
);
1696 dev_dbg(bdc
->dev
, "Babble on ep0 zlp_need:%d\n",
1699 * If the last completed transfer had wLength >Data Len,
1700 * and Len is multiple of MaxPacket,then queue ZLP
1702 if (bdc
->zlp_needed
) {
1703 /* queue 0 length bd */
1708 dev_warn(bdc
->dev
, "Babble on ep not handled\n");
1711 dev_warn(bdc
->dev
, "sr status not handled:%x\n", sr_status
);
1716 static int bdc_gadget_ep_queue(struct usb_ep
*_ep
,
1717 struct usb_request
*_req
, gfp_t gfp_flags
)
1719 struct bdc_req
*req
;
1720 unsigned long flags
;
1725 if (!_ep
|| !_ep
->desc
)
1728 if (!_req
|| !_req
->complete
|| !_req
->buf
)
1731 ep
= to_bdc_ep(_ep
);
1732 req
= to_bdc_req(_req
);
1734 dev_dbg(bdc
->dev
, "%s ep:%p req:%p\n", __func__
, ep
, req
);
1735 dev_dbg(bdc
->dev
, "queuing request %p to %s length %d zero:%d\n",
1736 _req
, ep
->name
, _req
->length
, _req
->zero
);
1738 if (!ep
->usb_ep
.desc
) {
1740 "trying to queue req %p to disabled %s\n",
1745 if (_req
->length
> MAX_XFR_LEN
) {
1747 "req length > supported MAX:%d requested:%d\n",
1748 MAX_XFR_LEN
, _req
->length
);
1751 spin_lock_irqsave(&bdc
->lock
, flags
);
1752 if (ep
== bdc
->bdc_ep_array
[1])
1753 ret
= ep0_queue(ep
, req
);
1755 ret
= ep_queue(ep
, req
);
1757 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1762 static int bdc_gadget_ep_dequeue(struct usb_ep
*_ep
,
1763 struct usb_request
*_req
)
1765 struct bdc_req
*req
;
1766 unsigned long flags
;
1774 ep
= to_bdc_ep(_ep
);
1775 req
= to_bdc_req(_req
);
1777 dev_dbg(bdc
->dev
, "%s ep:%s req:%p\n", __func__
, ep
->name
, req
);
1778 bdc_dbg_bd_list(bdc
, ep
);
1779 spin_lock_irqsave(&bdc
->lock
, flags
);
1780 /* make sure it's still queued on this endpoint */
1781 list_for_each_entry(req
, &ep
->queue
, queue
) {
1782 if (&req
->usb_req
== _req
)
1785 if (&req
->usb_req
!= _req
) {
1786 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1787 dev_err(bdc
->dev
, "usb_req !=req n");
1790 ret
= ep_dequeue(ep
, req
);
1795 bdc_req_complete(ep
, req
, -ECONNRESET
);
1798 bdc_dbg_bd_list(bdc
, ep
);
1799 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1804 static int bdc_gadget_ep_set_halt(struct usb_ep
*_ep
, int value
)
1806 unsigned long flags
;
1811 ep
= to_bdc_ep(_ep
);
1813 dev_dbg(bdc
->dev
, "%s ep:%s value=%d\n", __func__
, ep
->name
, value
);
1814 spin_lock_irqsave(&bdc
->lock
, flags
);
1815 if (usb_endpoint_xfer_isoc(ep
->usb_ep
.desc
))
1817 else if (!list_empty(&ep
->queue
))
1820 ret
= ep_set_halt(ep
, value
);
1822 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1827 static struct usb_request
*bdc_gadget_alloc_request(struct usb_ep
*_ep
,
1830 struct bdc_req
*req
;
1833 req
= kzalloc(sizeof(*req
), gfp_flags
);
1837 ep
= to_bdc_ep(_ep
);
1839 req
->epnum
= ep
->ep_num
;
1840 req
->usb_req
.dma
= DMA_ADDR_INVALID
;
1841 dev_dbg(ep
->bdc
->dev
, "%s ep:%s req:%p\n", __func__
, ep
->name
, req
);
1843 return &req
->usb_req
;
1846 static void bdc_gadget_free_request(struct usb_ep
*_ep
,
1847 struct usb_request
*_req
)
1849 struct bdc_req
*req
;
1851 req
= to_bdc_req(_req
);
1855 /* endpoint operations */
1857 /* configure endpoint and also allocate resources */
1858 static int bdc_gadget_ep_enable(struct usb_ep
*_ep
,
1859 const struct usb_endpoint_descriptor
*desc
)
1861 unsigned long flags
;
1866 if (!_ep
|| !desc
|| desc
->bDescriptorType
!= USB_DT_ENDPOINT
) {
1867 pr_debug("bdc_gadget_ep_enable invalid parameters\n");
1871 if (!desc
->wMaxPacketSize
) {
1872 pr_debug("bdc_gadget_ep_enable missing wMaxPacketSize\n");
1876 ep
= to_bdc_ep(_ep
);
1879 /* Sanity check, upper layer will not send enable for ep0 */
1880 if (ep
== bdc
->bdc_ep_array
[1])
1883 if (!bdc
->gadget_driver
1884 || bdc
->gadget
.speed
== USB_SPEED_UNKNOWN
) {
1888 dev_dbg(bdc
->dev
, "%s Enabling %s\n", __func__
, ep
->name
);
1889 spin_lock_irqsave(&bdc
->lock
, flags
);
1891 ep
->comp_desc
= _ep
->comp_desc
;
1892 ret
= bdc_ep_enable(ep
);
1893 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1898 static int bdc_gadget_ep_disable(struct usb_ep
*_ep
)
1900 unsigned long flags
;
1906 pr_debug("bdc: invalid parameters\n");
1909 ep
= to_bdc_ep(_ep
);
1912 /* Upper layer will not call this for ep0, but do a sanity check */
1913 if (ep
== bdc
->bdc_ep_array
[1]) {
1914 dev_warn(bdc
->dev
, "%s called for ep0\n", __func__
);
1918 "%s() ep:%s ep->flags:%08x\n",
1919 __func__
, ep
->name
, ep
->flags
);
1921 if (!(ep
->flags
& BDC_EP_ENABLED
)) {
1922 dev_warn(bdc
->dev
, "%s is already disabled\n", ep
->name
);
1925 spin_lock_irqsave(&bdc
->lock
, flags
);
1926 ret
= bdc_ep_disable(ep
);
1927 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1932 static const struct usb_ep_ops bdc_gadget_ep_ops
= {
1933 .enable
= bdc_gadget_ep_enable
,
1934 .disable
= bdc_gadget_ep_disable
,
1935 .alloc_request
= bdc_gadget_alloc_request
,
1936 .free_request
= bdc_gadget_free_request
,
1937 .queue
= bdc_gadget_ep_queue
,
1938 .dequeue
= bdc_gadget_ep_dequeue
,
1939 .set_halt
= bdc_gadget_ep_set_halt
1943 static int init_ep(struct bdc
*bdc
, u32 epnum
, u32 dir
)
1947 dev_dbg(bdc
->dev
, "%s epnum=%d dir=%d\n", __func__
, epnum
, dir
);
1948 ep
= kzalloc(sizeof(*ep
), GFP_KERNEL
);
1955 /* ep->ep_num is the index inside bdc_ep */
1958 bdc
->bdc_ep_array
[ep
->ep_num
] = ep
;
1959 snprintf(ep
->name
, sizeof(ep
->name
), "ep%d", epnum
- 1);
1960 usb_ep_set_maxpacket_limit(&ep
->usb_ep
, EP0_MAX_PKT_SIZE
);
1961 ep
->comp_desc
= NULL
;
1962 bdc
->gadget
.ep0
= &ep
->usb_ep
;
1965 ep
->ep_num
= epnum
* 2 - 1;
1967 ep
->ep_num
= epnum
* 2 - 2;
1969 bdc
->bdc_ep_array
[ep
->ep_num
] = ep
;
1970 snprintf(ep
->name
, sizeof(ep
->name
), "ep%d%s", epnum
- 1,
1971 dir
& 1 ? "in" : "out");
1973 usb_ep_set_maxpacket_limit(&ep
->usb_ep
, 1024);
1974 ep
->usb_ep
.max_streams
= 0;
1975 list_add_tail(&ep
->usb_ep
.ep_list
, &bdc
->gadget
.ep_list
);
1977 ep
->usb_ep
.ops
= &bdc_gadget_ep_ops
;
1978 ep
->usb_ep
.name
= ep
->name
;
1980 ep
->ignore_next_sr
= false;
1981 dev_dbg(bdc
->dev
, "ep=%p ep->usb_ep.name=%s epnum=%d ep->epnum=%d\n",
1982 ep
, ep
->usb_ep
.name
, epnum
, ep
->ep_num
);
1984 INIT_LIST_HEAD(&ep
->queue
);
1990 int bdc_init_ep(struct bdc
*bdc
)
1995 dev_dbg(bdc
->dev
, "%s()\n", __func__
);
1996 INIT_LIST_HEAD(&bdc
->gadget
.ep_list
);
1998 ret
= init_ep(bdc
, 1, 0);
2000 dev_err(bdc
->dev
, "init ep ep0 fail %d\n", ret
);
2004 for (epnum
= 2; epnum
<= bdc
->num_eps
/ 2; epnum
++) {
2006 ret
= init_ep(bdc
, epnum
, 0);
2009 "init ep failed for:%d error: %d\n",
2015 ret
= init_ep(bdc
, epnum
, 1);
2018 "init ep failed for:%d error: %d\n",