1 // SPDX-License-Identifier: GPL-2.0+
3 * bdc_ep.c - BRCM BDC USB3.0 device controller endpoint related functions
5 * Copyright (C) 2014 Broadcom Corporation
7 * Author: Ashwini Pahuja
9 * Based on drivers under drivers/usb/
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/kernel.h>
15 #include <linux/delay.h>
16 #include <linux/dmapool.h>
17 #include <linux/ioport.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/timer.h>
23 #include <linux/list.h>
24 #include <linux/interrupt.h>
25 #include <linux/moduleparam.h>
26 #include <linux/device.h>
27 #include <linux/usb/ch9.h>
28 #include <linux/usb/gadget.h>
29 #include <linux/usb/otg.h>
32 #include <linux/irq.h>
33 #include <asm/unaligned.h>
34 #include <linux/platform_device.h>
35 #include <linux/usb/composite.h>
42 static const char * const ep0_state_string
[] = {
44 "WAIT_FOR_DATA_START",
46 "WAIT_FOR_STATUS_START",
47 "WAIT_FOR_STATUS_XMIT",
51 /* Free the bdl during ep disable */
52 static void ep_bd_list_free(struct bdc_ep
*ep
, u32 num_tabs
)
54 struct bd_list
*bd_list
= &ep
->bd_list
;
55 struct bdc
*bdc
= ep
->bdc
;
56 struct bd_table
*bd_table
;
59 dev_dbg(bdc
->dev
, "%s ep:%s num_tabs:%d\n",
60 __func__
, ep
->name
, num_tabs
);
62 if (!bd_list
->bd_table_array
) {
63 dev_dbg(bdc
->dev
, "%s already freed\n", ep
->name
);
66 for (index
= 0; index
< num_tabs
; index
++) {
68 * check if the bd_table struct is allocated ?
69 * if yes, then check if bd memory has been allocated, then
70 * free the dma_pool and also the bd_table struct memory
72 bd_table
= bd_list
->bd_table_array
[index
];
73 dev_dbg(bdc
->dev
, "bd_table:%p index:%d\n", bd_table
, index
);
75 dev_dbg(bdc
->dev
, "bd_table not allocated\n");
78 if (!bd_table
->start_bd
) {
79 dev_dbg(bdc
->dev
, "bd dma pool not allocated\n");
84 "Free dma pool start_bd:%p dma:%llx\n",
86 (unsigned long long)bd_table
->dma
);
88 dma_pool_free(bdc
->bd_table_pool
,
91 /* Free the bd_table structure */
94 /* Free the bd table array */
95 kfree(ep
->bd_list
.bd_table_array
);
99 * chain the tables, by insteting a chain bd at the end of prev_table, pointing
102 static inline void chain_table(struct bd_table
*prev_table
,
103 struct bd_table
*next_table
,
106 /* Chain the prev table to next table */
107 prev_table
->start_bd
[bd_p_tab
-1].offset
[0] =
108 cpu_to_le32(lower_32_bits(next_table
->dma
));
110 prev_table
->start_bd
[bd_p_tab
-1].offset
[1] =
111 cpu_to_le32(upper_32_bits(next_table
->dma
));
113 prev_table
->start_bd
[bd_p_tab
-1].offset
[2] =
116 prev_table
->start_bd
[bd_p_tab
-1].offset
[3] =
117 cpu_to_le32(MARK_CHAIN_BD
);
120 /* Allocate the bdl for ep, during config ep */
121 static int ep_bd_list_alloc(struct bdc_ep
*ep
)
123 struct bd_table
*prev_table
= NULL
;
124 int index
, num_tabs
, bd_p_tab
;
125 struct bdc
*bdc
= ep
->bdc
;
126 struct bd_table
*bd_table
;
129 if (usb_endpoint_xfer_isoc(ep
->desc
))
130 num_tabs
= NUM_TABLES_ISOCH
;
132 num_tabs
= NUM_TABLES
;
134 bd_p_tab
= NUM_BDS_PER_TABLE
;
135 /* if there is only 1 table in bd list then loop chain to self */
137 "%s ep:%p num_tabs:%d\n",
138 __func__
, ep
, num_tabs
);
140 /* Allocate memory for table array */
141 ep
->bd_list
.bd_table_array
= kcalloc(num_tabs
,
142 sizeof(struct bd_table
*),
144 if (!ep
->bd_list
.bd_table_array
)
147 /* Allocate memory for each table */
148 for (index
= 0; index
< num_tabs
; index
++) {
149 /* Allocate memory for bd_table structure */
150 bd_table
= kzalloc(sizeof(struct bd_table
), GFP_ATOMIC
);
154 bd_table
->start_bd
= dma_pool_zalloc(bdc
->bd_table_pool
,
157 if (!bd_table
->start_bd
) {
165 "index:%d start_bd:%p dma=%08llx prev_table:%p\n",
166 index
, bd_table
->start_bd
,
167 (unsigned long long)bd_table
->dma
, prev_table
);
169 ep
->bd_list
.bd_table_array
[index
] = bd_table
;
171 chain_table(prev_table
, bd_table
, bd_p_tab
);
173 prev_table
= bd_table
;
175 chain_table(prev_table
, ep
->bd_list
.bd_table_array
[0], bd_p_tab
);
176 /* Memory allocation is successful, now init the internal fields */
177 ep
->bd_list
.num_tabs
= num_tabs
;
178 ep
->bd_list
.max_bdi
= (num_tabs
* bd_p_tab
) - 1;
179 ep
->bd_list
.num_tabs
= num_tabs
;
180 ep
->bd_list
.num_bds_table
= bd_p_tab
;
181 ep
->bd_list
.eqp_bdi
= 0;
182 ep
->bd_list
.hwd_bdi
= 0;
186 /* Free the bd_table_array, bd_table struct, bd's */
187 ep_bd_list_free(ep
, num_tabs
);
192 /* returns how many bd's are need for this transfer */
193 static inline int bd_needed_req(struct bdc_req
*req
)
198 /* 1 bd needed for 0 byte transfer */
199 if (req
->usb_req
.length
== 0)
202 /* remaining bytes after tranfering all max BD size BD's */
203 remaining
= req
->usb_req
.length
% BD_MAX_BUFF_SIZE
;
207 /* How many maximum BUFF size BD's ? */
208 remaining
= req
->usb_req
.length
/ BD_MAX_BUFF_SIZE
;
209 bd_needed
+= remaining
;
214 /* returns the bd index(bdi) corresponding to bd dma address */
215 static int bd_add_to_bdi(struct bdc_ep
*ep
, dma_addr_t bd_dma_addr
)
217 struct bd_list
*bd_list
= &ep
->bd_list
;
218 dma_addr_t dma_first_bd
, dma_last_bd
;
219 struct bdc
*bdc
= ep
->bdc
;
220 struct bd_table
*bd_table
;
224 dma_first_bd
= dma_last_bd
= 0;
225 dev_dbg(bdc
->dev
, "%s %llx\n",
226 __func__
, (unsigned long long)bd_dma_addr
);
228 * Find in which table this bd_dma_addr belongs?, go through the table
229 * array and compare addresses of first and last address of bd of each
232 for (tbi
= 0; tbi
< bd_list
->num_tabs
; tbi
++) {
233 bd_table
= bd_list
->bd_table_array
[tbi
];
234 dma_first_bd
= bd_table
->dma
;
235 dma_last_bd
= bd_table
->dma
+
236 (sizeof(struct bdc_bd
) *
237 (bd_list
->num_bds_table
- 1));
238 dev_dbg(bdc
->dev
, "dma_first_bd:%llx dma_last_bd:%llx\n",
239 (unsigned long long)dma_first_bd
,
240 (unsigned long long)dma_last_bd
);
241 if (bd_dma_addr
>= dma_first_bd
&& bd_dma_addr
<= dma_last_bd
) {
246 if (unlikely(!found
)) {
247 dev_err(bdc
->dev
, "%s FATAL err, bd not found\n", __func__
);
250 /* Now we know the table, find the bdi */
251 bdi
= (bd_dma_addr
- dma_first_bd
) / sizeof(struct bdc_bd
);
253 /* return the global bdi, to compare with ep eqp_bdi */
254 return (bdi
+ (tbi
* bd_list
->num_bds_table
));
257 /* returns the table index(tbi) of the given bdi */
258 static int bdi_to_tbi(struct bdc_ep
*ep
, int bdi
)
262 tbi
= bdi
/ ep
->bd_list
.num_bds_table
;
263 dev_vdbg(ep
->bdc
->dev
,
264 "bdi:%d num_bds_table:%d tbi:%d\n",
265 bdi
, ep
->bd_list
.num_bds_table
, tbi
);
270 /* Find the bdi last bd in the transfer */
271 static inline int find_end_bdi(struct bdc_ep
*ep
, int next_hwd_bdi
)
275 end_bdi
= next_hwd_bdi
- 1;
277 end_bdi
= ep
->bd_list
.max_bdi
- 1;
278 else if ((end_bdi
% (ep
->bd_list
.num_bds_table
-1)) == 0)
285 * How many transfer bd's are available on this ep bdl, chain bds are not
286 * counted in available bds
288 static int bd_available_ep(struct bdc_ep
*ep
)
290 struct bd_list
*bd_list
= &ep
->bd_list
;
291 int available1
, available2
;
292 struct bdc
*bdc
= ep
->bdc
;
293 int chain_bd1
, chain_bd2
;
294 int available_bd
= 0;
296 available1
= available2
= chain_bd1
= chain_bd2
= 0;
297 /* if empty then we have all bd's available - number of chain bd's */
298 if (bd_list
->eqp_bdi
== bd_list
->hwd_bdi
)
299 return bd_list
->max_bdi
- bd_list
->num_tabs
;
302 * Depending upon where eqp and dqp pointers are, caculate number
305 if (bd_list
->hwd_bdi
< bd_list
->eqp_bdi
) {
306 /* available bd's are from eqp..max_bds + 0..dqp - chain_bds */
307 available1
= bd_list
->max_bdi
- bd_list
->eqp_bdi
;
308 available2
= bd_list
->hwd_bdi
;
309 chain_bd1
= available1
/ bd_list
->num_bds_table
;
310 chain_bd2
= available2
/ bd_list
->num_bds_table
;
311 dev_vdbg(bdc
->dev
, "chain_bd1:%d chain_bd2:%d\n",
312 chain_bd1
, chain_bd2
);
313 available_bd
= available1
+ available2
- chain_bd1
- chain_bd2
;
315 /* available bd's are from eqp..dqp - number of chain bd's */
316 available1
= bd_list
->hwd_bdi
- bd_list
->eqp_bdi
;
317 /* if gap between eqp and dqp is less than NUM_BDS_PER_TABLE */
318 if ((bd_list
->hwd_bdi
- bd_list
->eqp_bdi
)
319 <= bd_list
->num_bds_table
) {
320 /* If there any chain bd in between */
321 if (!(bdi_to_tbi(ep
, bd_list
->hwd_bdi
)
322 == bdi_to_tbi(ep
, bd_list
->eqp_bdi
))) {
323 available_bd
= available1
- 1;
326 chain_bd1
= available1
/ bd_list
->num_bds_table
;
327 available_bd
= available1
- chain_bd1
;
331 * we need to keep one extra bd to check if ring is full or empty so
335 dev_vdbg(bdc
->dev
, "available_bd:%d\n", available_bd
);
340 /* Notify the hardware after queueing the bd to bdl */
341 void bdc_notify_xfr(struct bdc
*bdc
, u32 epnum
)
343 struct bdc_ep
*ep
= bdc
->bdc_ep_array
[epnum
];
345 dev_vdbg(bdc
->dev
, "%s epnum:%d\n", __func__
, epnum
);
347 * We don't have anyway to check if ep state is running,
348 * except the software flags.
350 if (unlikely(ep
->flags
& BDC_EP_STOP
))
351 ep
->flags
&= ~BDC_EP_STOP
;
353 bdc_writel(bdc
->regs
, BDC_XSFNTF
, epnum
);
356 /* returns the bd corresponding to bdi */
357 static struct bdc_bd
*bdi_to_bd(struct bdc_ep
*ep
, int bdi
)
359 int tbi
= bdi_to_tbi(ep
, bdi
);
362 local_bdi
= bdi
- (tbi
* ep
->bd_list
.num_bds_table
);
363 dev_vdbg(ep
->bdc
->dev
,
364 "%s bdi:%d local_bdi:%d\n",
365 __func__
, bdi
, local_bdi
);
367 return (ep
->bd_list
.bd_table_array
[tbi
]->start_bd
+ local_bdi
);
370 /* Advance the enqueue pointer */
371 static void ep_bdlist_eqp_adv(struct bdc_ep
*ep
)
373 ep
->bd_list
.eqp_bdi
++;
374 /* if it's chain bd, then move to next */
375 if (((ep
->bd_list
.eqp_bdi
+ 1) % ep
->bd_list
.num_bds_table
) == 0)
376 ep
->bd_list
.eqp_bdi
++;
378 /* if the eqp is pointing to last + 1 then move back to 0 */
379 if (ep
->bd_list
.eqp_bdi
== (ep
->bd_list
.max_bdi
+ 1))
380 ep
->bd_list
.eqp_bdi
= 0;
383 /* Setup the first bd for ep0 transfer */
384 static int setup_first_bd_ep0(struct bdc
*bdc
, struct bdc_req
*req
, u32
*dword3
)
390 req_len
= req
->usb_req
.length
;
391 switch (bdc
->ep0_state
) {
392 case WAIT_FOR_DATA_START
:
393 *dword3
|= BD_TYPE_DS
;
394 if (bdc
->setup_pkt
.bRequestType
& USB_DIR_IN
)
395 *dword3
|= BD_DIR_IN
;
397 /* check if zlp will be needed */
398 wValue
= le16_to_cpu(bdc
->setup_pkt
.wValue
);
399 if ((wValue
> req_len
) &&
400 (req_len
% bdc
->gadget
.ep0
->maxpacket
== 0)) {
401 dev_dbg(bdc
->dev
, "ZLP needed wVal:%d len:%d MaxP:%d\n",
403 bdc
->gadget
.ep0
->maxpacket
);
404 bdc
->zlp_needed
= true;
408 case WAIT_FOR_STATUS_START
:
409 *dword3
|= BD_TYPE_SS
;
410 if (!le16_to_cpu(bdc
->setup_pkt
.wLength
) ||
411 !(bdc
->setup_pkt
.bRequestType
& USB_DIR_IN
))
412 *dword3
|= BD_DIR_IN
;
416 "Unknown ep0 state for queueing bd ep0_state:%s\n",
417 ep0_state_string
[bdc
->ep0_state
]);
424 /* Setup the bd dma descriptor for a given request */
425 static int setup_bd_list_xfr(struct bdc
*bdc
, struct bdc_req
*req
, int num_bds
)
427 dma_addr_t buf_add
= req
->usb_req
.dma
;
428 u32 maxp
, tfs
, dword2
, dword3
;
429 struct bd_transfer
*bd_xfr
;
430 struct bd_list
*bd_list
;
437 bd_list
= &ep
->bd_list
;
438 bd_xfr
= &req
->bd_xfr
;
440 bd_xfr
->start_bdi
= bd_list
->eqp_bdi
;
441 bd
= bdi_to_bd(ep
, bd_list
->eqp_bdi
);
442 req_len
= req
->usb_req
.length
;
443 maxp
= usb_endpoint_maxp(ep
->desc
);
444 tfs
= roundup(req
->usb_req
.length
, maxp
);
446 dev_vdbg(bdc
->dev
, "%s ep:%s num_bds:%d tfs:%d r_len:%d bd:%p\n",
447 __func__
, ep
->name
, num_bds
, tfs
, req_len
, bd
);
449 for (bdnum
= 0; bdnum
< num_bds
; bdnum
++) {
453 dword3
|= BD_SOT
|BD_SBF
|(tfs
<<BD_TFS_SHIFT
);
455 /* format of first bd for ep0 is different than other */
456 if (ep
->ep_num
== 1) {
457 ret
= setup_first_bd_ep0(bdc
, req
, &dword3
);
465 if (req_len
> BD_MAX_BUFF_SIZE
) {
466 dword2
|= BD_MAX_BUFF_SIZE
;
467 req_len
-= BD_MAX_BUFF_SIZE
;
469 /* this should be the last bd */
474 /* Currently only 1 INT target is supported */
475 dword2
|= BD_INTR_TARGET(0);
476 bd
= bdi_to_bd(ep
, ep
->bd_list
.eqp_bdi
);
478 dev_err(bdc
->dev
, "Err bd pointing to wrong addr\n");
482 bd
->offset
[0] = cpu_to_le32(lower_32_bits(buf_add
));
483 bd
->offset
[1] = cpu_to_le32(upper_32_bits(buf_add
));
484 bd
->offset
[2] = cpu_to_le32(dword2
);
485 bd
->offset
[3] = cpu_to_le32(dword3
);
486 /* advance eqp pointer */
487 ep_bdlist_eqp_adv(ep
);
488 /* advance the buff pointer */
489 buf_add
+= BD_MAX_BUFF_SIZE
;
490 dev_vdbg(bdc
->dev
, "buf_add:%08llx req_len:%d bd:%p eqp:%d\n",
491 (unsigned long long)buf_add
, req_len
, bd
,
492 ep
->bd_list
.eqp_bdi
);
493 bd
= bdi_to_bd(ep
, ep
->bd_list
.eqp_bdi
);
494 bd
->offset
[3] = cpu_to_le32(BD_SBF
);
496 /* clear the STOP BD fetch bit from the first bd of this xfr */
497 bd
= bdi_to_bd(ep
, bd_xfr
->start_bdi
);
498 bd
->offset
[3] &= cpu_to_le32(~BD_SBF
);
499 /* the new eqp will be next hw dqp */
500 bd_xfr
->num_bds
= num_bds
;
501 bd_xfr
->next_hwd_bdi
= ep
->bd_list
.eqp_bdi
;
502 /* everything is written correctly before notifying the HW */
509 static int bdc_queue_xfr(struct bdc
*bdc
, struct bdc_req
*req
)
511 int num_bds
, bd_available
;
516 dev_dbg(bdc
->dev
, "%s req:%p\n", __func__
, req
);
517 dev_dbg(bdc
->dev
, "eqp_bdi:%d hwd_bdi:%d\n",
518 ep
->bd_list
.eqp_bdi
, ep
->bd_list
.hwd_bdi
);
520 num_bds
= bd_needed_req(req
);
521 bd_available
= bd_available_ep(ep
);
523 /* how many bd's are avaialble on ep */
524 if (num_bds
> bd_available
)
527 ret
= setup_bd_list_xfr(bdc
, req
, num_bds
);
530 list_add_tail(&req
->queue
, &ep
->queue
);
531 bdc_dbg_bd_list(bdc
, ep
);
532 bdc_notify_xfr(bdc
, ep
->ep_num
);
537 /* callback to gadget layer when xfr completes */
538 static void bdc_req_complete(struct bdc_ep
*ep
, struct bdc_req
*req
,
541 struct bdc
*bdc
= ep
->bdc
;
543 if (req
== NULL
|| &req
->queue
== NULL
|| &req
->usb_req
== NULL
)
546 dev_dbg(bdc
->dev
, "%s ep:%s status:%d\n", __func__
, ep
->name
, status
);
547 list_del(&req
->queue
);
548 req
->usb_req
.status
= status
;
549 usb_gadget_unmap_request(&bdc
->gadget
, &req
->usb_req
, ep
->dir
);
550 if (req
->usb_req
.complete
) {
551 spin_unlock(&bdc
->lock
);
552 usb_gadget_giveback_request(&ep
->usb_ep
, &req
->usb_req
);
553 spin_lock(&bdc
->lock
);
557 /* Disable the endpoint */
558 int bdc_ep_disable(struct bdc_ep
*ep
)
566 dev_dbg(bdc
->dev
, "%s() ep->ep_num=%d\n", __func__
, ep
->ep_num
);
567 /* Stop the endpoint */
568 ret
= bdc_stop_ep(bdc
, ep
->ep_num
);
571 * Intentionally don't check the ret value of stop, it can fail in
572 * disconnect scenarios, continue with dconfig
574 /* de-queue any pending requests */
575 while (!list_empty(&ep
->queue
)) {
576 req
= list_entry(ep
->queue
.next
, struct bdc_req
,
578 bdc_req_complete(ep
, req
, -ESHUTDOWN
);
580 /* deconfigure the endpoint */
581 ret
= bdc_dconfig_ep(bdc
, ep
);
584 "dconfig fail but continue with memory free");
587 /* ep0 memory is not freed, but reused on next connect sr */
591 /* Free the bdl memory */
592 ep_bd_list_free(ep
, ep
->bd_list
.num_tabs
);
594 ep
->comp_desc
= NULL
;
595 ep
->usb_ep
.desc
= NULL
;
602 int bdc_ep_enable(struct bdc_ep
*ep
)
608 dev_dbg(bdc
->dev
, "%s NUM_TABLES:%d %d\n",
609 __func__
, NUM_TABLES
, NUM_TABLES_ISOCH
);
611 ret
= ep_bd_list_alloc(ep
);
613 dev_err(bdc
->dev
, "ep bd list allocation failed:%d\n", ret
);
616 bdc_dbg_bd_list(bdc
, ep
);
617 /* only for ep0: config ep is called for ep0 from connect event */
618 ep
->flags
|= BDC_EP_ENABLED
;
622 /* Issue a configure endpoint command */
623 ret
= bdc_config_ep(bdc
, ep
);
627 ep
->usb_ep
.maxpacket
= usb_endpoint_maxp(ep
->desc
);
628 ep
->usb_ep
.desc
= ep
->desc
;
629 ep
->usb_ep
.comp_desc
= ep
->comp_desc
;
630 ep
->ep_type
= usb_endpoint_type(ep
->desc
);
631 ep
->flags
|= BDC_EP_ENABLED
;
636 /* EP0 related code */
638 /* Queue a status stage BD */
639 static int ep0_queue_status_stage(struct bdc
*bdc
)
641 struct bdc_req
*status_req
;
644 status_req
= &bdc
->status_req
;
645 ep
= bdc
->bdc_ep_array
[1];
647 status_req
->usb_req
.length
= 0;
648 status_req
->usb_req
.status
= -EINPROGRESS
;
649 status_req
->usb_req
.actual
= 0;
650 status_req
->usb_req
.complete
= NULL
;
651 bdc_queue_xfr(bdc
, status_req
);
656 /* Queue xfr on ep0 */
657 static int ep0_queue(struct bdc_ep
*ep
, struct bdc_req
*req
)
663 dev_dbg(bdc
->dev
, "%s()\n", __func__
);
664 req
->usb_req
.actual
= 0;
665 req
->usb_req
.status
= -EINPROGRESS
;
666 req
->epnum
= ep
->ep_num
;
668 if (bdc
->delayed_status
) {
669 bdc
->delayed_status
= false;
670 /* if status stage was delayed? */
671 if (bdc
->ep0_state
== WAIT_FOR_STATUS_START
) {
672 /* Queue a status stage BD */
673 ep0_queue_status_stage(bdc
);
674 bdc
->ep0_state
= WAIT_FOR_STATUS_XMIT
;
679 * if delayed status is false and 0 length transfer is requested
680 * i.e. for status stage of some setup request, then just
681 * return from here the status stage is queued independently
683 if (req
->usb_req
.length
== 0)
687 ret
= usb_gadget_map_request(&bdc
->gadget
, &req
->usb_req
, ep
->dir
);
689 dev_err(bdc
->dev
, "dma mapping failed %s\n", ep
->name
);
693 return bdc_queue_xfr(bdc
, req
);
696 /* Queue data stage */
697 static int ep0_queue_data_stage(struct bdc
*bdc
)
701 dev_dbg(bdc
->dev
, "%s\n", __func__
);
702 ep
= bdc
->bdc_ep_array
[1];
703 bdc
->ep0_req
.ep
= ep
;
704 bdc
->ep0_req
.usb_req
.complete
= NULL
;
706 return ep0_queue(ep
, &bdc
->ep0_req
);
709 /* Queue req on ep */
710 static int ep_queue(struct bdc_ep
*ep
, struct bdc_req
*req
)
715 if (!req
|| !ep
->usb_ep
.desc
)
720 req
->usb_req
.actual
= 0;
721 req
->usb_req
.status
= -EINPROGRESS
;
722 req
->epnum
= ep
->ep_num
;
724 ret
= usb_gadget_map_request(&bdc
->gadget
, &req
->usb_req
, ep
->dir
);
726 dev_err(bdc
->dev
, "dma mapping failed\n");
730 return bdc_queue_xfr(bdc
, req
);
733 /* Dequeue a request from ep */
734 static int ep_dequeue(struct bdc_ep
*ep
, struct bdc_req
*req
)
736 int start_bdi
, end_bdi
, tbi
, eqp_bdi
, curr_hw_dqpi
;
737 bool start_pending
, end_pending
;
738 bool first_remove
= false;
739 struct bdc_req
*first_req
;
740 struct bdc_bd
*bd_start
;
741 struct bd_table
*table
;
742 dma_addr_t next_bd_dma
;
749 start_pending
= end_pending
= false;
750 eqp_bdi
= ep
->bd_list
.eqp_bdi
- 1;
753 eqp_bdi
= ep
->bd_list
.max_bdi
;
755 start_bdi
= req
->bd_xfr
.start_bdi
;
756 end_bdi
= find_end_bdi(ep
, req
->bd_xfr
.next_hwd_bdi
);
758 dev_dbg(bdc
->dev
, "%s ep:%s start:%d end:%d\n",
759 __func__
, ep
->name
, start_bdi
, end_bdi
);
760 dev_dbg(bdc
->dev
, "ep_dequeue ep=%p ep->desc=%p\n",
761 ep
, (void *)ep
->usb_ep
.desc
);
762 /* Stop the ep to see where the HW is ? */
763 ret
= bdc_stop_ep(bdc
, ep
->ep_num
);
764 /* if there is an issue with stopping ep, then no need to go further */
769 * After endpoint is stopped, there can be 3 cases, the request
770 * is processed, pending or in the middle of processing
773 /* The current hw dequeue pointer */
774 tmp_32
= bdc_readl(bdc
->regs
, BDC_EPSTS0
);
776 tmp_32
= bdc_readl(bdc
->regs
, BDC_EPSTS1
);
777 deq_ptr_64
|= ((u64
)tmp_32
<< 32);
779 /* we have the dma addr of next bd that will be fetched by hardware */
780 curr_hw_dqpi
= bd_add_to_bdi(ep
, deq_ptr_64
);
781 if (curr_hw_dqpi
< 0)
785 * curr_hw_dqpi points to actual dqp of HW and HW owns bd's from
786 * curr_hw_dqbdi..eqp_bdi.
789 /* Check if start_bdi and end_bdi are in range of HW owned BD's */
790 if (curr_hw_dqpi
> eqp_bdi
) {
791 /* there is a wrap from last to 0 */
792 if (start_bdi
>= curr_hw_dqpi
|| start_bdi
<= eqp_bdi
) {
793 start_pending
= true;
795 } else if (end_bdi
>= curr_hw_dqpi
|| end_bdi
<= eqp_bdi
) {
799 if (start_bdi
>= curr_hw_dqpi
) {
800 start_pending
= true;
802 } else if (end_bdi
>= curr_hw_dqpi
) {
807 "start_pending:%d end_pending:%d speed:%d\n",
808 start_pending
, end_pending
, bdc
->gadget
.speed
);
810 /* If both start till end are processes, we cannot deq req */
811 if (!start_pending
&& !end_pending
)
815 * if ep_dequeue is called after disconnect then just return
818 if (bdc
->gadget
.speed
== USB_SPEED_UNKNOWN
)
820 tbi
= bdi_to_tbi(ep
, req
->bd_xfr
.next_hwd_bdi
);
821 table
= ep
->bd_list
.bd_table_array
[tbi
];
822 next_bd_dma
= table
->dma
+
823 sizeof(struct bdc_bd
)*(req
->bd_xfr
.next_hwd_bdi
-
824 tbi
* ep
->bd_list
.num_bds_table
);
826 first_req
= list_first_entry(&ep
->queue
, struct bdc_req
,
829 if (req
== first_req
)
833 * Due to HW limitation we need to bypadd chain bd's and issue ep_bla,
834 * incase if start is pending this is the first request in the list
835 * then issue ep_bla instead of marking as chain bd
837 if (start_pending
&& !first_remove
) {
839 * Mark the start bd as Chain bd, and point the chain
842 bd_start
= bdi_to_bd(ep
, start_bdi
);
843 bd_start
->offset
[0] = cpu_to_le32(lower_32_bits(next_bd_dma
));
844 bd_start
->offset
[1] = cpu_to_le32(upper_32_bits(next_bd_dma
));
845 bd_start
->offset
[2] = 0x0;
846 bd_start
->offset
[3] = cpu_to_le32(MARK_CHAIN_BD
);
847 bdc_dbg_bd_list(bdc
, ep
);
848 } else if (end_pending
) {
850 * The transfer is stopped in the middle, move the
851 * HW deq pointer to next_bd_dma
853 ret
= bdc_ep_bla(bdc
, ep
, next_bd_dma
);
855 dev_err(bdc
->dev
, "error in ep_bla:%d\n", ret
);
863 /* Halt/Clear the ep based on value */
864 static int ep_set_halt(struct bdc_ep
*ep
, u32 value
)
870 dev_dbg(bdc
->dev
, "%s ep:%s value=%d\n", __func__
, ep
->name
, value
);
873 dev_dbg(bdc
->dev
, "Halt\n");
875 bdc
->ep0_state
= WAIT_FOR_SETUP
;
877 ret
= bdc_ep_set_stall(bdc
, ep
->ep_num
);
879 dev_err(bdc
->dev
, "failed to set STALL on %s\n",
882 ep
->flags
|= BDC_EP_STALL
;
885 dev_dbg(bdc
->dev
, "Before Clear\n");
886 ret
= bdc_ep_clear_stall(bdc
, ep
->ep_num
);
888 dev_err(bdc
->dev
, "failed to clear STALL on %s\n",
891 ep
->flags
&= ~BDC_EP_STALL
;
892 dev_dbg(bdc
->dev
, "After Clear\n");
898 /* Free all the ep */
899 void bdc_free_ep(struct bdc
*bdc
)
904 dev_dbg(bdc
->dev
, "%s\n", __func__
);
905 for (epnum
= 1; epnum
< bdc
->num_eps
; epnum
++) {
906 ep
= bdc
->bdc_ep_array
[epnum
];
910 if (ep
->flags
& BDC_EP_ENABLED
)
911 ep_bd_list_free(ep
, ep
->bd_list
.num_tabs
);
913 /* ep0 is not in this gadget list */
915 list_del(&ep
->usb_ep
.ep_list
);
921 /* USB2 spec, section 7.1.20 */
922 static int bdc_set_test_mode(struct bdc
*bdc
)
926 usb2_pm
= bdc_readl(bdc
->regs
, BDC_USPPM2
);
927 usb2_pm
&= ~BDC_PTC_MASK
;
928 dev_dbg(bdc
->dev
, "%s\n", __func__
);
929 switch (bdc
->test_mode
) {
935 usb2_pm
|= bdc
->test_mode
<< 28;
940 dev_dbg(bdc
->dev
, "usb2_pm=%08x", usb2_pm
);
941 bdc_writel(bdc
->regs
, BDC_USPPM2
, usb2_pm
);
947 * Helper function to handle Transfer status report with status as either
950 static void handle_xsr_succ_status(struct bdc
*bdc
, struct bdc_ep
*ep
,
951 struct bdc_sr
*sreport
)
953 int short_bdi
, start_bdi
, end_bdi
, max_len_bds
, chain_bds
;
954 struct bd_list
*bd_list
= &ep
->bd_list
;
955 int actual_length
, length_short
;
956 struct bd_transfer
*bd_xfr
;
957 struct bdc_bd
*short_bd
;
964 dev_dbg(bdc
->dev
, "%s ep:%p\n", __func__
, ep
);
966 /* do not process thie sr if ignore flag is set */
967 if (ep
->ignore_next_sr
) {
968 ep
->ignore_next_sr
= false;
972 if (unlikely(list_empty(&ep
->queue
))) {
973 dev_warn(bdc
->dev
, "xfr srr with no BD's queued\n");
976 req
= list_entry(ep
->queue
.next
, struct bdc_req
,
979 bd_xfr
= &req
->bd_xfr
;
980 sr_status
= XSF_STS(le32_to_cpu(sreport
->offset
[3]));
983 * sr_status is short and this transfer has more than 1 bd then it needs
984 * special handling, this is only applicable for bulk and ctrl
986 if (sr_status
== XSF_SHORT
&& bd_xfr
->num_bds
> 1) {
988 * This is multi bd xfr, lets see which bd
989 * caused short transfer and how many bytes have been
990 * transferred so far.
992 tmp_32
= le32_to_cpu(sreport
->offset
[0]);
994 tmp_32
= le32_to_cpu(sreport
->offset
[1]);
995 deq_ptr_64
|= ((u64
)tmp_32
<< 32);
996 short_bdi
= bd_add_to_bdi(ep
, deq_ptr_64
);
997 if (unlikely(short_bdi
< 0))
998 dev_warn(bdc
->dev
, "bd doesn't exist?\n");
1000 start_bdi
= bd_xfr
->start_bdi
;
1002 * We know the start_bdi and short_bdi, how many xfr
1005 if (start_bdi
<= short_bdi
) {
1006 max_len_bds
= short_bdi
- start_bdi
;
1007 if (max_len_bds
<= bd_list
->num_bds_table
) {
1008 if (!(bdi_to_tbi(ep
, start_bdi
) ==
1009 bdi_to_tbi(ep
, short_bdi
)))
1012 chain_bds
= max_len_bds
/bd_list
->num_bds_table
;
1013 max_len_bds
-= chain_bds
;
1016 /* there is a wrap in the ring within a xfr */
1017 chain_bds
= (bd_list
->max_bdi
- start_bdi
)/
1018 bd_list
->num_bds_table
;
1019 chain_bds
+= short_bdi
/bd_list
->num_bds_table
;
1020 max_len_bds
= bd_list
->max_bdi
- start_bdi
;
1021 max_len_bds
+= short_bdi
;
1022 max_len_bds
-= chain_bds
;
1024 /* max_len_bds is the number of full length bds */
1025 end_bdi
= find_end_bdi(ep
, bd_xfr
->next_hwd_bdi
);
1026 if (!(end_bdi
== short_bdi
))
1027 ep
->ignore_next_sr
= true;
1029 actual_length
= max_len_bds
* BD_MAX_BUFF_SIZE
;
1030 short_bd
= bdi_to_bd(ep
, short_bdi
);
1032 length_short
= le32_to_cpu(short_bd
->offset
[2]) & 0x1FFFFF;
1033 /* actual length trensfered */
1034 length_short
-= SR_BD_LEN(le32_to_cpu(sreport
->offset
[2]));
1035 actual_length
+= length_short
;
1036 req
->usb_req
.actual
= actual_length
;
1038 req
->usb_req
.actual
= req
->usb_req
.length
-
1039 SR_BD_LEN(le32_to_cpu(sreport
->offset
[2]));
1041 "len=%d actual=%d bd_xfr->next_hwd_bdi:%d\n",
1042 req
->usb_req
.length
, req
->usb_req
.actual
,
1043 bd_xfr
->next_hwd_bdi
);
1046 /* Update the dequeue pointer */
1047 ep
->bd_list
.hwd_bdi
= bd_xfr
->next_hwd_bdi
;
1048 if (req
->usb_req
.actual
< req
->usb_req
.length
) {
1049 dev_dbg(bdc
->dev
, "short xfr on %d\n", ep
->ep_num
);
1050 if (req
->usb_req
.short_not_ok
)
1051 status
= -EREMOTEIO
;
1053 bdc_req_complete(ep
, bd_xfr
->req
, status
);
1056 /* EP0 setup related packet handlers */
1059 * Setup packet received, just store the packet and process on next DS or SS
1062 void bdc_xsf_ep0_setup_recv(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1064 struct usb_ctrlrequest
*setup_pkt
;
1068 "%s ep0_state:%s\n",
1069 __func__
, ep0_state_string
[bdc
->ep0_state
]);
1070 /* Store received setup packet */
1071 setup_pkt
= &bdc
->setup_pkt
;
1072 memcpy(setup_pkt
, &sreport
->offset
[0], sizeof(*setup_pkt
));
1073 len
= le16_to_cpu(setup_pkt
->wLength
);
1075 bdc
->ep0_state
= WAIT_FOR_STATUS_START
;
1077 bdc
->ep0_state
= WAIT_FOR_DATA_START
;
1081 "%s exit ep0_state:%s\n",
1082 __func__
, ep0_state_string
[bdc
->ep0_state
]);
1086 static void ep0_stall(struct bdc
*bdc
)
1088 struct bdc_ep
*ep
= bdc
->bdc_ep_array
[1];
1089 struct bdc_req
*req
;
1091 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1092 bdc
->delayed_status
= false;
1095 /* de-queue any pendig requests */
1096 while (!list_empty(&ep
->queue
)) {
1097 req
= list_entry(ep
->queue
.next
, struct bdc_req
,
1099 bdc_req_complete(ep
, req
, -ESHUTDOWN
);
1103 /* SET_ADD handlers */
1104 static int ep0_set_address(struct bdc
*bdc
, struct usb_ctrlrequest
*ctrl
)
1106 enum usb_device_state state
= bdc
->gadget
.state
;
1110 addr
= le16_to_cpu(ctrl
->wValue
);
1112 "%s addr:%d dev state:%d\n",
1113 __func__
, addr
, state
);
1119 case USB_STATE_DEFAULT
:
1120 case USB_STATE_ADDRESS
:
1121 /* Issue Address device command */
1122 ret
= bdc_address_device(bdc
, addr
);
1127 usb_gadget_set_state(&bdc
->gadget
, USB_STATE_ADDRESS
);
1129 usb_gadget_set_state(&bdc
->gadget
, USB_STATE_DEFAULT
);
1131 bdc
->dev_addr
= addr
;
1135 "SET Address in wrong device state %d\n",
1143 /* Handler for SET/CLEAR FEATURE requests for device */
1144 static int ep0_handle_feature_dev(struct bdc
*bdc
, u16 wValue
,
1145 u16 wIndex
, bool set
)
1147 enum usb_device_state state
= bdc
->gadget
.state
;
1150 dev_dbg(bdc
->dev
, "%s set:%d dev state:%d\n",
1151 __func__
, set
, state
);
1153 case USB_DEVICE_REMOTE_WAKEUP
:
1154 dev_dbg(bdc
->dev
, "USB_DEVICE_REMOTE_WAKEUP\n");
1156 bdc
->devstatus
|= REMOTE_WAKE_ENABLE
;
1158 bdc
->devstatus
&= ~REMOTE_WAKE_ENABLE
;
1161 case USB_DEVICE_TEST_MODE
:
1162 dev_dbg(bdc
->dev
, "USB_DEVICE_TEST_MODE\n");
1163 if ((wIndex
& 0xFF) ||
1164 (bdc
->gadget
.speed
!= USB_SPEED_HIGH
) || !set
)
1167 bdc
->test_mode
= wIndex
>> 8;
1170 case USB_DEVICE_U1_ENABLE
:
1171 dev_dbg(bdc
->dev
, "USB_DEVICE_U1_ENABLE\n");
1173 if (bdc
->gadget
.speed
!= USB_SPEED_SUPER
||
1174 state
!= USB_STATE_CONFIGURED
)
1177 usppms
= bdc_readl(bdc
->regs
, BDC_USPPMS
);
1179 /* clear previous u1t */
1180 usppms
&= ~BDC_U1T(BDC_U1T_MASK
);
1181 usppms
|= BDC_U1T(U1_TIMEOUT
);
1182 usppms
|= BDC_U1E
| BDC_PORT_W1S
;
1183 bdc
->devstatus
|= (1 << USB_DEV_STAT_U1_ENABLED
);
1186 usppms
|= BDC_PORT_W1S
;
1187 bdc
->devstatus
&= ~(1 << USB_DEV_STAT_U1_ENABLED
);
1189 bdc_writel(bdc
->regs
, BDC_USPPMS
, usppms
);
1192 case USB_DEVICE_U2_ENABLE
:
1193 dev_dbg(bdc
->dev
, "USB_DEVICE_U2_ENABLE\n");
1195 if (bdc
->gadget
.speed
!= USB_SPEED_SUPER
||
1196 state
!= USB_STATE_CONFIGURED
)
1199 usppms
= bdc_readl(bdc
->regs
, BDC_USPPMS
);
1203 bdc
->devstatus
|= (1 << USB_DEV_STAT_U2_ENABLED
);
1207 bdc
->devstatus
&= ~(1 << USB_DEV_STAT_U2_ENABLED
);
1209 bdc_writel(bdc
->regs
, BDC_USPPMS
, usppms
);
1212 case USB_DEVICE_LTM_ENABLE
:
1213 dev_dbg(bdc
->dev
, "USB_DEVICE_LTM_ENABLE?\n");
1214 if (bdc
->gadget
.speed
!= USB_SPEED_SUPER
||
1215 state
!= USB_STATE_CONFIGURED
)
1219 dev_err(bdc
->dev
, "Unknown wValue:%d\n", wValue
);
1221 } /* USB_RECIP_DEVICE end */
1226 /* SET/CLEAR FEATURE handler */
1227 static int ep0_handle_feature(struct bdc
*bdc
,
1228 struct usb_ctrlrequest
*setup_pkt
, bool set
)
1230 enum usb_device_state state
= bdc
->gadget
.state
;
1236 wValue
= le16_to_cpu(setup_pkt
->wValue
);
1237 wIndex
= le16_to_cpu(setup_pkt
->wIndex
);
1240 "%s wValue=%d wIndex=%d devstate=%08x speed=%d set=%d",
1241 __func__
, wValue
, wIndex
, state
,
1242 bdc
->gadget
.speed
, set
);
1244 switch (setup_pkt
->bRequestType
& USB_RECIP_MASK
) {
1245 case USB_RECIP_DEVICE
:
1246 return ep0_handle_feature_dev(bdc
, wValue
, wIndex
, set
);
1247 case USB_RECIP_INTERFACE
:
1248 dev_dbg(bdc
->dev
, "USB_RECIP_INTERFACE\n");
1249 /* USB3 spec, sec 9.4.9 */
1250 if (wValue
!= USB_INTRF_FUNC_SUSPEND
)
1252 /* USB3 spec, Table 9-8 */
1254 if (wIndex
& USB_INTRF_FUNC_SUSPEND_RW
) {
1255 dev_dbg(bdc
->dev
, "SET REMOTE_WAKEUP\n");
1256 bdc
->devstatus
|= REMOTE_WAKE_ENABLE
;
1258 dev_dbg(bdc
->dev
, "CLEAR REMOTE_WAKEUP\n");
1259 bdc
->devstatus
&= ~REMOTE_WAKE_ENABLE
;
1264 case USB_RECIP_ENDPOINT
:
1265 dev_dbg(bdc
->dev
, "USB_RECIP_ENDPOINT\n");
1266 if (wValue
!= USB_ENDPOINT_HALT
)
1269 epnum
= wIndex
& USB_ENDPOINT_NUMBER_MASK
;
1271 if ((wIndex
& USB_ENDPOINT_DIR_MASK
) == USB_DIR_IN
)
1272 epnum
= epnum
* 2 + 1;
1279 * If CLEAR_FEATURE on ep0 then don't do anything as the stall
1280 * condition on ep0 has already been cleared when SETUP packet
1283 if (epnum
== 1 && !set
) {
1284 dev_dbg(bdc
->dev
, "ep0 stall already cleared\n");
1287 dev_dbg(bdc
->dev
, "epnum=%d\n", epnum
);
1288 ep
= bdc
->bdc_ep_array
[epnum
];
1292 return ep_set_halt(ep
, set
);
1294 dev_err(bdc
->dev
, "Unknown recipient\n");
1301 /* GET_STATUS request handler */
1302 static int ep0_handle_status(struct bdc
*bdc
,
1303 struct usb_ctrlrequest
*setup_pkt
)
1305 enum usb_device_state state
= bdc
->gadget
.state
;
1311 /* USB2.0 spec sec 9.4.5 */
1312 if (state
== USB_STATE_DEFAULT
)
1314 wIndex
= le16_to_cpu(setup_pkt
->wIndex
);
1315 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1316 usb_status
= bdc
->devstatus
;
1317 switch (setup_pkt
->bRequestType
& USB_RECIP_MASK
) {
1318 case USB_RECIP_DEVICE
:
1320 "USB_RECIP_DEVICE devstatus:%08x\n",
1322 /* USB3 spec, sec 9.4.5 */
1323 if (bdc
->gadget
.speed
== USB_SPEED_SUPER
)
1324 usb_status
&= ~REMOTE_WAKE_ENABLE
;
1327 case USB_RECIP_INTERFACE
:
1328 dev_dbg(bdc
->dev
, "USB_RECIP_INTERFACE\n");
1329 if (bdc
->gadget
.speed
== USB_SPEED_SUPER
) {
1331 * This should come from func for Func remote wkup
1334 if (bdc
->devstatus
& REMOTE_WAKE_ENABLE
)
1335 usb_status
|= REMOTE_WAKE_ENABLE
;
1342 case USB_RECIP_ENDPOINT
:
1343 dev_dbg(bdc
->dev
, "USB_RECIP_ENDPOINT\n");
1344 epnum
= wIndex
& USB_ENDPOINT_NUMBER_MASK
;
1346 if ((wIndex
& USB_ENDPOINT_DIR_MASK
) == USB_DIR_IN
)
1347 epnum
= epnum
*2 + 1;
1351 epnum
= 1; /* EP0 */
1354 ep
= bdc
->bdc_ep_array
[epnum
];
1356 dev_err(bdc
->dev
, "ISSUE, GET_STATUS for invalid EP ?");
1359 if (ep
->flags
& BDC_EP_STALL
)
1360 usb_status
|= 1 << USB_ENDPOINT_HALT
;
1364 dev_err(bdc
->dev
, "Unknown recipient for get_status\n");
1367 /* prepare a data stage for GET_STATUS */
1368 dev_dbg(bdc
->dev
, "usb_status=%08x\n", usb_status
);
1369 *(__le16
*)bdc
->ep0_response_buff
= cpu_to_le16(usb_status
);
1370 bdc
->ep0_req
.usb_req
.length
= 2;
1371 bdc
->ep0_req
.usb_req
.buf
= &bdc
->ep0_response_buff
;
1372 ep0_queue_data_stage(bdc
);
1377 static void ep0_set_sel_cmpl(struct usb_ep
*_ep
, struct usb_request
*_req
)
1379 /* ep0_set_sel_cmpl */
1382 /* Queue data stage to handle 6 byte SET_SEL request */
1383 static int ep0_set_sel(struct bdc
*bdc
,
1384 struct usb_ctrlrequest
*setup_pkt
)
1389 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1390 wLength
= le16_to_cpu(setup_pkt
->wLength
);
1391 if (unlikely(wLength
!= 6)) {
1392 dev_err(bdc
->dev
, "%s Wrong wLength:%d\n", __func__
, wLength
);
1395 ep
= bdc
->bdc_ep_array
[1];
1396 bdc
->ep0_req
.ep
= ep
;
1397 bdc
->ep0_req
.usb_req
.length
= 6;
1398 bdc
->ep0_req
.usb_req
.buf
= bdc
->ep0_response_buff
;
1399 bdc
->ep0_req
.usb_req
.complete
= ep0_set_sel_cmpl
;
1400 ep0_queue_data_stage(bdc
);
1406 * Queue a 0 byte bd only if wLength is more than the length and and length is
1407 * a multiple of MaxPacket then queue 0 byte BD
1409 static int ep0_queue_zlp(struct bdc
*bdc
)
1413 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1414 bdc
->ep0_req
.ep
= bdc
->bdc_ep_array
[1];
1415 bdc
->ep0_req
.usb_req
.length
= 0;
1416 bdc
->ep0_req
.usb_req
.complete
= NULL
;
1417 bdc
->ep0_state
= WAIT_FOR_DATA_START
;
1418 ret
= bdc_queue_xfr(bdc
, &bdc
->ep0_req
);
1420 dev_err(bdc
->dev
, "err queueing zlp :%d\n", ret
);
1423 bdc
->ep0_state
= WAIT_FOR_DATA_XMIT
;
1428 /* Control request handler */
1429 static int handle_control_request(struct bdc
*bdc
)
1431 enum usb_device_state state
= bdc
->gadget
.state
;
1432 struct usb_ctrlrequest
*setup_pkt
;
1433 int delegate_setup
= 0;
1437 setup_pkt
= &bdc
->setup_pkt
;
1438 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1439 if ((setup_pkt
->bRequestType
& USB_TYPE_MASK
) == USB_TYPE_STANDARD
) {
1440 switch (setup_pkt
->bRequest
) {
1441 case USB_REQ_SET_ADDRESS
:
1442 dev_dbg(bdc
->dev
, "USB_REQ_SET_ADDRESS\n");
1443 ret
= ep0_set_address(bdc
, setup_pkt
);
1444 bdc
->devstatus
&= DEVSTATUS_CLEAR
;
1447 case USB_REQ_SET_CONFIGURATION
:
1448 dev_dbg(bdc
->dev
, "USB_REQ_SET_CONFIGURATION\n");
1449 if (state
== USB_STATE_ADDRESS
) {
1450 usb_gadget_set_state(&bdc
->gadget
,
1451 USB_STATE_CONFIGURED
);
1452 } else if (state
== USB_STATE_CONFIGURED
) {
1454 * USB2 spec sec 9.4.7, if wValue is 0 then dev
1455 * is moved to addressed state
1457 config
= le16_to_cpu(setup_pkt
->wValue
);
1459 usb_gadget_set_state(
1466 case USB_REQ_SET_FEATURE
:
1467 dev_dbg(bdc
->dev
, "USB_REQ_SET_FEATURE\n");
1468 ret
= ep0_handle_feature(bdc
, setup_pkt
, 1);
1471 case USB_REQ_CLEAR_FEATURE
:
1472 dev_dbg(bdc
->dev
, "USB_REQ_CLEAR_FEATURE\n");
1473 ret
= ep0_handle_feature(bdc
, setup_pkt
, 0);
1476 case USB_REQ_GET_STATUS
:
1477 dev_dbg(bdc
->dev
, "USB_REQ_GET_STATUS\n");
1478 ret
= ep0_handle_status(bdc
, setup_pkt
);
1481 case USB_REQ_SET_SEL
:
1482 dev_dbg(bdc
->dev
, "USB_REQ_SET_SEL\n");
1483 ret
= ep0_set_sel(bdc
, setup_pkt
);
1486 case USB_REQ_SET_ISOCH_DELAY
:
1488 "USB_REQ_SET_ISOCH_DELAY not handled\n");
1498 if (delegate_setup
) {
1499 spin_unlock(&bdc
->lock
);
1500 ret
= bdc
->gadget_driver
->setup(&bdc
->gadget
, setup_pkt
);
1501 spin_lock(&bdc
->lock
);
1507 /* EP0: Data stage started */
1508 void bdc_xsf_ep0_data_start(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1513 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1514 ep
= bdc
->bdc_ep_array
[1];
1515 /* If ep0 was stalled, the clear it first */
1516 if (ep
->flags
& BDC_EP_STALL
) {
1517 ret
= ep_set_halt(ep
, 0);
1521 if (bdc
->ep0_state
!= WAIT_FOR_DATA_START
)
1523 "Data stage not expected ep0_state:%s\n",
1524 ep0_state_string
[bdc
->ep0_state
]);
1526 ret
= handle_control_request(bdc
);
1527 if (ret
== USB_GADGET_DELAYED_STATUS
) {
1529 * The ep0 state will remain WAIT_FOR_DATA_START till
1530 * we received ep_queue on ep0
1532 bdc
->delayed_status
= true;
1536 bdc
->ep0_state
= WAIT_FOR_DATA_XMIT
;
1538 "ep0_state:%s", ep0_state_string
[bdc
->ep0_state
]);
1545 /* EP0: status stage started */
1546 void bdc_xsf_ep0_status_start(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1548 struct usb_ctrlrequest
*setup_pkt
;
1554 __func__
, ep0_state_string
[bdc
->ep0_state
]);
1555 ep
= bdc
->bdc_ep_array
[1];
1557 /* check if ZLP was queued? */
1558 if (bdc
->zlp_needed
)
1559 bdc
->zlp_needed
= false;
1561 if (ep
->flags
& BDC_EP_STALL
) {
1562 ret
= ep_set_halt(ep
, 0);
1567 if ((bdc
->ep0_state
!= WAIT_FOR_STATUS_START
) &&
1568 (bdc
->ep0_state
!= WAIT_FOR_DATA_XMIT
))
1570 "Status stage recv but ep0_state:%s\n",
1571 ep0_state_string
[bdc
->ep0_state
]);
1573 /* check if data stage is in progress ? */
1574 if (bdc
->ep0_state
== WAIT_FOR_DATA_XMIT
) {
1575 bdc
->ep0_state
= STATUS_PENDING
;
1576 /* Status stage will be queued upon Data stage transmit event */
1578 "status started but data not transmitted yet\n");
1581 setup_pkt
= &bdc
->setup_pkt
;
1584 * 2 stage setup then only process the setup, for 3 stage setup the date
1585 * stage is already handled
1587 if (!le16_to_cpu(setup_pkt
->wLength
)) {
1588 ret
= handle_control_request(bdc
);
1589 if (ret
== USB_GADGET_DELAYED_STATUS
) {
1590 bdc
->delayed_status
= true;
1591 /* ep0_state will remain WAIT_FOR_STATUS_START */
1596 /* Queue a status stage BD */
1597 ep0_queue_status_stage(bdc
);
1598 bdc
->ep0_state
= WAIT_FOR_STATUS_XMIT
;
1600 "ep0_state:%s", ep0_state_string
[bdc
->ep0_state
]);
1607 /* Helper function to update ep0 upon SR with xsf_succ or xsf_short */
1608 static void ep0_xsf_complete(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1610 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1611 switch (bdc
->ep0_state
) {
1612 case WAIT_FOR_DATA_XMIT
:
1613 bdc
->ep0_state
= WAIT_FOR_STATUS_START
;
1615 case WAIT_FOR_STATUS_XMIT
:
1616 bdc
->ep0_state
= WAIT_FOR_SETUP
;
1617 if (bdc
->test_mode
) {
1620 dev_dbg(bdc
->dev
, "test_mode:%d\n", bdc
->test_mode
);
1621 ret
= bdc_set_test_mode(bdc
);
1623 dev_err(bdc
->dev
, "Err in setting Test mode\n");
1629 case STATUS_PENDING
:
1630 bdc_xsf_ep0_status_start(bdc
, sreport
);
1635 "Unknown ep0_state:%s\n",
1636 ep0_state_string
[bdc
->ep0_state
]);
1641 /* xfr completion status report handler */
1642 void bdc_sr_xsf(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1648 ep_num
= (le32_to_cpu(sreport
->offset
[3])>>4) & 0x1f;
1649 ep
= bdc
->bdc_ep_array
[ep_num
];
1650 if (!ep
|| !(ep
->flags
& BDC_EP_ENABLED
)) {
1651 dev_err(bdc
->dev
, "xsf for ep not enabled\n");
1655 * check if this transfer is after link went from U3->U0 due
1658 if (bdc
->devstatus
& FUNC_WAKE_ISSUED
) {
1659 bdc
->devstatus
&= ~(FUNC_WAKE_ISSUED
);
1660 dev_dbg(bdc
->dev
, "%s clearing FUNC_WAKE_ISSUED flag\n",
1663 sr_status
= XSF_STS(le32_to_cpu(sreport
->offset
[3]));
1664 dev_dbg_ratelimited(bdc
->dev
, "%s sr_status=%d ep:%s\n",
1665 __func__
, sr_status
, ep
->name
);
1667 switch (sr_status
) {
1670 handle_xsr_succ_status(bdc
, ep
, sreport
);
1672 ep0_xsf_complete(bdc
, sreport
);
1675 case XSF_SETUP_RECV
:
1676 case XSF_DATA_START
:
1677 case XSF_STATUS_START
:
1680 "ep0 related packets on non ep0 endpoint");
1683 bdc
->sr_xsf_ep0
[sr_status
- XSF_SETUP_RECV
](bdc
, sreport
);
1688 dev_dbg(bdc
->dev
, "Babble on ep0 zlp_need:%d\n",
1691 * If the last completed transfer had wLength >Data Len,
1692 * and Len is multiple of MaxPacket,then queue ZLP
1694 if (bdc
->zlp_needed
) {
1695 /* queue 0 length bd */
1700 dev_warn(bdc
->dev
, "Babble on ep not handled\n");
1703 dev_warn(bdc
->dev
, "sr status not handled:%x\n", sr_status
);
1708 static int bdc_gadget_ep_queue(struct usb_ep
*_ep
,
1709 struct usb_request
*_req
, gfp_t gfp_flags
)
1711 struct bdc_req
*req
;
1712 unsigned long flags
;
1717 if (!_ep
|| !_ep
->desc
)
1720 if (!_req
|| !_req
->complete
|| !_req
->buf
)
1723 ep
= to_bdc_ep(_ep
);
1724 req
= to_bdc_req(_req
);
1726 dev_dbg(bdc
->dev
, "%s ep:%p req:%p\n", __func__
, ep
, req
);
1727 dev_dbg(bdc
->dev
, "queuing request %p to %s length %d zero:%d\n",
1728 _req
, ep
->name
, _req
->length
, _req
->zero
);
1730 if (!ep
->usb_ep
.desc
) {
1732 "trying to queue req %p to disabled %s\n",
1737 if (_req
->length
> MAX_XFR_LEN
) {
1739 "req length > supported MAX:%d requested:%d\n",
1740 MAX_XFR_LEN
, _req
->length
);
1743 spin_lock_irqsave(&bdc
->lock
, flags
);
1744 if (ep
== bdc
->bdc_ep_array
[1])
1745 ret
= ep0_queue(ep
, req
);
1747 ret
= ep_queue(ep
, req
);
1749 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1754 static int bdc_gadget_ep_dequeue(struct usb_ep
*_ep
,
1755 struct usb_request
*_req
)
1757 struct bdc_req
*req
;
1758 unsigned long flags
;
1766 ep
= to_bdc_ep(_ep
);
1767 req
= to_bdc_req(_req
);
1769 dev_dbg(bdc
->dev
, "%s ep:%s req:%p\n", __func__
, ep
->name
, req
);
1770 bdc_dbg_bd_list(bdc
, ep
);
1771 spin_lock_irqsave(&bdc
->lock
, flags
);
1772 /* make sure it's still queued on this endpoint */
1773 list_for_each_entry(req
, &ep
->queue
, queue
) {
1774 if (&req
->usb_req
== _req
)
1777 if (&req
->usb_req
!= _req
) {
1778 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1779 dev_err(bdc
->dev
, "usb_req !=req n");
1782 ret
= ep_dequeue(ep
, req
);
1787 bdc_req_complete(ep
, req
, -ECONNRESET
);
1790 bdc_dbg_bd_list(bdc
, ep
);
1791 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1796 static int bdc_gadget_ep_set_halt(struct usb_ep
*_ep
, int value
)
1798 unsigned long flags
;
1803 ep
= to_bdc_ep(_ep
);
1805 dev_dbg(bdc
->dev
, "%s ep:%s value=%d\n", __func__
, ep
->name
, value
);
1806 spin_lock_irqsave(&bdc
->lock
, flags
);
1807 if (usb_endpoint_xfer_isoc(ep
->usb_ep
.desc
))
1809 else if (!list_empty(&ep
->queue
))
1812 ret
= ep_set_halt(ep
, value
);
1814 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1819 static struct usb_request
*bdc_gadget_alloc_request(struct usb_ep
*_ep
,
1822 struct bdc_req
*req
;
1825 req
= kzalloc(sizeof(*req
), gfp_flags
);
1829 ep
= to_bdc_ep(_ep
);
1831 req
->epnum
= ep
->ep_num
;
1832 req
->usb_req
.dma
= DMA_ADDR_INVALID
;
1833 dev_dbg(ep
->bdc
->dev
, "%s ep:%s req:%p\n", __func__
, ep
->name
, req
);
1835 return &req
->usb_req
;
1838 static void bdc_gadget_free_request(struct usb_ep
*_ep
,
1839 struct usb_request
*_req
)
1841 struct bdc_req
*req
;
1843 req
= to_bdc_req(_req
);
1847 /* endpoint operations */
1849 /* configure endpoint and also allocate resources */
1850 static int bdc_gadget_ep_enable(struct usb_ep
*_ep
,
1851 const struct usb_endpoint_descriptor
*desc
)
1853 unsigned long flags
;
1858 if (!_ep
|| !desc
|| desc
->bDescriptorType
!= USB_DT_ENDPOINT
) {
1859 pr_debug("bdc_gadget_ep_enable invalid parameters\n");
1863 if (!desc
->wMaxPacketSize
) {
1864 pr_debug("bdc_gadget_ep_enable missing wMaxPacketSize\n");
1868 ep
= to_bdc_ep(_ep
);
1871 /* Sanity check, upper layer will not send enable for ep0 */
1872 if (ep
== bdc
->bdc_ep_array
[1])
1875 if (!bdc
->gadget_driver
1876 || bdc
->gadget
.speed
== USB_SPEED_UNKNOWN
) {
1880 dev_dbg(bdc
->dev
, "%s Enabling %s\n", __func__
, ep
->name
);
1881 spin_lock_irqsave(&bdc
->lock
, flags
);
1883 ep
->comp_desc
= _ep
->comp_desc
;
1884 ret
= bdc_ep_enable(ep
);
1885 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1890 static int bdc_gadget_ep_disable(struct usb_ep
*_ep
)
1892 unsigned long flags
;
1898 pr_debug("bdc: invalid parameters\n");
1901 ep
= to_bdc_ep(_ep
);
1904 /* Upper layer will not call this for ep0, but do a sanity check */
1905 if (ep
== bdc
->bdc_ep_array
[1]) {
1906 dev_warn(bdc
->dev
, "%s called for ep0\n", __func__
);
1910 "%s() ep:%s ep->flags:%08x\n",
1911 __func__
, ep
->name
, ep
->flags
);
1913 if (!(ep
->flags
& BDC_EP_ENABLED
)) {
1914 dev_warn(bdc
->dev
, "%s is already disabled\n", ep
->name
);
1917 spin_lock_irqsave(&bdc
->lock
, flags
);
1918 ret
= bdc_ep_disable(ep
);
1919 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1924 static const struct usb_ep_ops bdc_gadget_ep_ops
= {
1925 .enable
= bdc_gadget_ep_enable
,
1926 .disable
= bdc_gadget_ep_disable
,
1927 .alloc_request
= bdc_gadget_alloc_request
,
1928 .free_request
= bdc_gadget_free_request
,
1929 .queue
= bdc_gadget_ep_queue
,
1930 .dequeue
= bdc_gadget_ep_dequeue
,
1931 .set_halt
= bdc_gadget_ep_set_halt
1935 static int init_ep(struct bdc
*bdc
, u32 epnum
, u32 dir
)
1939 dev_dbg(bdc
->dev
, "%s epnum=%d dir=%d\n", __func__
, epnum
, dir
);
1940 ep
= kzalloc(sizeof(*ep
), GFP_KERNEL
);
1948 ep
->usb_ep
.caps
.dir_in
= true;
1950 ep
->usb_ep
.caps
.dir_out
= true;
1952 /* ep->ep_num is the index inside bdc_ep */
1955 bdc
->bdc_ep_array
[ep
->ep_num
] = ep
;
1956 snprintf(ep
->name
, sizeof(ep
->name
), "ep%d", epnum
- 1);
1957 usb_ep_set_maxpacket_limit(&ep
->usb_ep
, EP0_MAX_PKT_SIZE
);
1958 ep
->usb_ep
.caps
.type_control
= true;
1959 ep
->comp_desc
= NULL
;
1960 bdc
->gadget
.ep0
= &ep
->usb_ep
;
1963 ep
->ep_num
= epnum
* 2 - 1;
1965 ep
->ep_num
= epnum
* 2 - 2;
1967 bdc
->bdc_ep_array
[ep
->ep_num
] = ep
;
1968 snprintf(ep
->name
, sizeof(ep
->name
), "ep%d%s", epnum
- 1,
1969 dir
& 1 ? "in" : "out");
1971 usb_ep_set_maxpacket_limit(&ep
->usb_ep
, 1024);
1972 ep
->usb_ep
.caps
.type_iso
= true;
1973 ep
->usb_ep
.caps
.type_bulk
= true;
1974 ep
->usb_ep
.caps
.type_int
= true;
1975 ep
->usb_ep
.max_streams
= 0;
1976 list_add_tail(&ep
->usb_ep
.ep_list
, &bdc
->gadget
.ep_list
);
1978 ep
->usb_ep
.ops
= &bdc_gadget_ep_ops
;
1979 ep
->usb_ep
.name
= ep
->name
;
1981 ep
->ignore_next_sr
= false;
1982 dev_dbg(bdc
->dev
, "ep=%p ep->usb_ep.name=%s epnum=%d ep->epnum=%d\n",
1983 ep
, ep
->usb_ep
.name
, epnum
, ep
->ep_num
);
1985 INIT_LIST_HEAD(&ep
->queue
);
1991 int bdc_init_ep(struct bdc
*bdc
)
1996 dev_dbg(bdc
->dev
, "%s()\n", __func__
);
1997 INIT_LIST_HEAD(&bdc
->gadget
.ep_list
);
1999 ret
= init_ep(bdc
, 1, 0);
2001 dev_err(bdc
->dev
, "init ep ep0 fail %d\n", ret
);
2005 for (epnum
= 2; epnum
<= bdc
->num_eps
/ 2; epnum
++) {
2007 ret
= init_ep(bdc
, epnum
, 0);
2010 "init ep failed for:%d error: %d\n",
2016 ret
= init_ep(bdc
, epnum
, 1);
2019 "init ep failed for:%d error: %d\n",