2 * bdc_ep.c - BRCM BDC USB3.0 device controller endpoint related functions
4 * Copyright (C) 2014 Broadcom Corporation
6 * Author: Ashwini Pahuja
8 * Based on drivers under drivers/usb/
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/kernel.h>
20 #include <linux/delay.h>
21 #include <linux/dmapool.h>
22 #include <linux/ioport.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/errno.h>
26 #include <linux/init.h>
27 #include <linux/timer.h>
28 #include <linux/list.h>
29 #include <linux/interrupt.h>
30 #include <linux/moduleparam.h>
31 #include <linux/device.h>
32 #include <linux/usb/ch9.h>
33 #include <linux/usb/gadget.h>
34 #include <linux/usb/otg.h>
37 #include <linux/irq.h>
38 #include <asm/unaligned.h>
39 #include <linux/platform_device.h>
40 #include <linux/usb/composite.h>
47 static const char * const ep0_state_string
[] = {
49 "WAIT_FOR_DATA_START",
51 "WAIT_FOR_STATUS_START",
52 "WAIT_FOR_STATUS_XMIT",
56 /* Free the bdl during ep disable */
57 static void ep_bd_list_free(struct bdc_ep
*ep
, u32 num_tabs
)
59 struct bd_list
*bd_list
= &ep
->bd_list
;
60 struct bdc
*bdc
= ep
->bdc
;
61 struct bd_table
*bd_table
;
64 dev_dbg(bdc
->dev
, "%s ep:%s num_tabs:%d\n",
65 __func__
, ep
->name
, num_tabs
);
67 if (!bd_list
->bd_table_array
) {
68 dev_dbg(bdc
->dev
, "%s already freed\n", ep
->name
);
71 for (index
= 0; index
< num_tabs
; index
++) {
73 * check if the bd_table struct is allocated ?
74 * if yes, then check if bd memory has been allocated, then
75 * free the dma_pool and also the bd_table struct memory
77 bd_table
= bd_list
->bd_table_array
[index
];
78 dev_dbg(bdc
->dev
, "bd_table:%p index:%d\n", bd_table
, index
);
80 dev_dbg(bdc
->dev
, "bd_table not allocated\n");
83 if (!bd_table
->start_bd
) {
84 dev_dbg(bdc
->dev
, "bd dma pool not allocted\n");
89 "Free dma pool start_bd:%p dma:%llx\n",
91 (unsigned long long)bd_table
->dma
);
93 dma_pool_free(bdc
->bd_table_pool
,
96 /* Free the bd_table structure */
99 /* Free the bd table array */
100 kfree(ep
->bd_list
.bd_table_array
);
104 * chain the tables, by insteting a chain bd at the end of prev_table, pointing
107 static inline void chain_table(struct bd_table
*prev_table
,
108 struct bd_table
*next_table
,
111 /* Chain the prev table to next table */
112 prev_table
->start_bd
[bd_p_tab
-1].offset
[0] =
113 cpu_to_le32(lower_32_bits(next_table
->dma
));
115 prev_table
->start_bd
[bd_p_tab
-1].offset
[1] =
116 cpu_to_le32(upper_32_bits(next_table
->dma
));
118 prev_table
->start_bd
[bd_p_tab
-1].offset
[2] =
121 prev_table
->start_bd
[bd_p_tab
-1].offset
[3] =
122 cpu_to_le32(MARK_CHAIN_BD
);
125 /* Allocate the bdl for ep, during config ep */
126 static int ep_bd_list_alloc(struct bdc_ep
*ep
)
128 struct bd_table
*prev_table
= NULL
;
129 int index
, num_tabs
, bd_p_tab
;
130 struct bdc
*bdc
= ep
->bdc
;
131 struct bd_table
*bd_table
;
134 if (usb_endpoint_xfer_isoc(ep
->desc
))
135 num_tabs
= NUM_TABLES_ISOCH
;
137 num_tabs
= NUM_TABLES
;
139 bd_p_tab
= NUM_BDS_PER_TABLE
;
140 /* if there is only 1 table in bd list then loop chain to self */
142 "%s ep:%p num_tabs:%d\n",
143 __func__
, ep
, num_tabs
);
145 /* Allocate memory for table array */
146 ep
->bd_list
.bd_table_array
= kzalloc(
147 num_tabs
* sizeof(struct bd_table
*),
149 if (!ep
->bd_list
.bd_table_array
)
152 /* Allocate memory for each table */
153 for (index
= 0; index
< num_tabs
; index
++) {
154 /* Allocate memory for bd_table structure */
155 bd_table
= kzalloc(sizeof(struct bd_table
), GFP_ATOMIC
);
159 bd_table
->start_bd
= dma_pool_alloc(bdc
->bd_table_pool
,
162 if (!bd_table
->start_bd
) {
170 "index:%d start_bd:%p dma=%08llx prev_table:%p\n",
171 index
, bd_table
->start_bd
,
172 (unsigned long long)bd_table
->dma
, prev_table
);
174 ep
->bd_list
.bd_table_array
[index
] = bd_table
;
175 memset(bd_table
->start_bd
, 0, bd_p_tab
* sizeof(struct bdc_bd
));
177 chain_table(prev_table
, bd_table
, bd_p_tab
);
179 prev_table
= bd_table
;
181 chain_table(prev_table
, ep
->bd_list
.bd_table_array
[0], bd_p_tab
);
182 /* Memory allocation is successful, now init the internal fields */
183 ep
->bd_list
.num_tabs
= num_tabs
;
184 ep
->bd_list
.max_bdi
= (num_tabs
* bd_p_tab
) - 1;
185 ep
->bd_list
.num_tabs
= num_tabs
;
186 ep
->bd_list
.num_bds_table
= bd_p_tab
;
187 ep
->bd_list
.eqp_bdi
= 0;
188 ep
->bd_list
.hwd_bdi
= 0;
192 /* Free the bd_table_array, bd_table struct, bd's */
193 ep_bd_list_free(ep
, num_tabs
);
198 /* returns how many bd's are need for this transfer */
199 static inline int bd_needed_req(struct bdc_req
*req
)
204 /* 1 bd needed for 0 byte transfer */
205 if (req
->usb_req
.length
== 0)
208 /* remaining bytes after tranfering all max BD size BD's */
209 remaining
= req
->usb_req
.length
% BD_MAX_BUFF_SIZE
;
213 /* How many maximum BUFF size BD's ? */
214 remaining
= req
->usb_req
.length
/ BD_MAX_BUFF_SIZE
;
215 bd_needed
+= remaining
;
220 /* returns the bd index(bdi) corresponding to bd dma address */
221 static int bd_add_to_bdi(struct bdc_ep
*ep
, dma_addr_t bd_dma_addr
)
223 struct bd_list
*bd_list
= &ep
->bd_list
;
224 dma_addr_t dma_first_bd
, dma_last_bd
;
225 struct bdc
*bdc
= ep
->bdc
;
226 struct bd_table
*bd_table
;
230 dma_first_bd
= dma_last_bd
= 0;
231 dev_dbg(bdc
->dev
, "%s %llx\n",
232 __func__
, (unsigned long long)bd_dma_addr
);
234 * Find in which table this bd_dma_addr belongs?, go through the table
235 * array and compare addresses of first and last address of bd of each
238 for (tbi
= 0; tbi
< bd_list
->num_tabs
; tbi
++) {
239 bd_table
= bd_list
->bd_table_array
[tbi
];
240 dma_first_bd
= bd_table
->dma
;
241 dma_last_bd
= bd_table
->dma
+
242 (sizeof(struct bdc_bd
) *
243 (bd_list
->num_bds_table
- 1));
244 dev_dbg(bdc
->dev
, "dma_first_bd:%llx dma_last_bd:%llx\n",
245 (unsigned long long)dma_first_bd
,
246 (unsigned long long)dma_last_bd
);
247 if (bd_dma_addr
>= dma_first_bd
&& bd_dma_addr
<= dma_last_bd
) {
252 if (unlikely(!found
)) {
253 dev_err(bdc
->dev
, "%s FATAL err, bd not found\n", __func__
);
256 /* Now we know the table, find the bdi */
257 bdi
= (bd_dma_addr
- dma_first_bd
) / sizeof(struct bdc_bd
);
259 /* return the global bdi, to compare with ep eqp_bdi */
260 return (bdi
+ (tbi
* bd_list
->num_bds_table
));
263 /* returns the table index(tbi) of the given bdi */
264 static int bdi_to_tbi(struct bdc_ep
*ep
, int bdi
)
268 tbi
= bdi
/ ep
->bd_list
.num_bds_table
;
269 dev_vdbg(ep
->bdc
->dev
,
270 "bdi:%d num_bds_table:%d tbi:%d\n",
271 bdi
, ep
->bd_list
.num_bds_table
, tbi
);
276 /* Find the bdi last bd in the transfer */
277 static inline int find_end_bdi(struct bdc_ep
*ep
, int next_hwd_bdi
)
281 end_bdi
= next_hwd_bdi
- 1;
283 end_bdi
= ep
->bd_list
.max_bdi
- 1;
284 else if ((end_bdi
% (ep
->bd_list
.num_bds_table
-1)) == 0)
291 * How many transfer bd's are available on this ep bdl, chain bds are not
292 * counted in available bds
294 static int bd_available_ep(struct bdc_ep
*ep
)
296 struct bd_list
*bd_list
= &ep
->bd_list
;
297 int available1
, available2
;
298 struct bdc
*bdc
= ep
->bdc
;
299 int chain_bd1
, chain_bd2
;
300 int available_bd
= 0;
302 available1
= available2
= chain_bd1
= chain_bd2
= 0;
303 /* if empty then we have all bd's available - number of chain bd's */
304 if (bd_list
->eqp_bdi
== bd_list
->hwd_bdi
)
305 return bd_list
->max_bdi
- bd_list
->num_tabs
;
308 * Depending upon where eqp and dqp pointers are, caculate number
311 if (bd_list
->hwd_bdi
< bd_list
->eqp_bdi
) {
312 /* available bd's are from eqp..max_bds + 0..dqp - chain_bds */
313 available1
= bd_list
->max_bdi
- bd_list
->eqp_bdi
;
314 available2
= bd_list
->hwd_bdi
;
315 chain_bd1
= available1
/ bd_list
->num_bds_table
;
316 chain_bd2
= available2
/ bd_list
->num_bds_table
;
317 dev_vdbg(bdc
->dev
, "chain_bd1:%d chain_bd2:%d\n",
318 chain_bd1
, chain_bd2
);
319 available_bd
= available1
+ available2
- chain_bd1
- chain_bd2
;
321 /* available bd's are from eqp..dqp - number of chain bd's */
322 available1
= bd_list
->hwd_bdi
- bd_list
->eqp_bdi
;
323 /* if gap between eqp and dqp is less than NUM_BDS_PER_TABLE */
324 if ((bd_list
->hwd_bdi
- bd_list
->eqp_bdi
)
325 <= bd_list
->num_bds_table
) {
326 /* If there any chain bd in between */
327 if (!(bdi_to_tbi(ep
, bd_list
->hwd_bdi
)
328 == bdi_to_tbi(ep
, bd_list
->eqp_bdi
))) {
329 available_bd
= available1
- 1;
332 chain_bd1
= available1
/ bd_list
->num_bds_table
;
333 available_bd
= available1
- chain_bd1
;
337 * we need to keep one extra bd to check if ring is full or empty so
341 dev_vdbg(bdc
->dev
, "available_bd:%d\n", available_bd
);
346 /* Notify the hardware after queueing the bd to bdl */
347 void bdc_notify_xfr(struct bdc
*bdc
, u32 epnum
)
349 struct bdc_ep
*ep
= bdc
->bdc_ep_array
[epnum
];
351 dev_vdbg(bdc
->dev
, "%s epnum:%d\n", __func__
, epnum
);
353 * We don't have anyway to check if ep state is running,
354 * except the software flags.
356 if (unlikely(ep
->flags
& BDC_EP_STOP
))
357 ep
->flags
&= ~BDC_EP_STOP
;
359 bdc_writel(bdc
->regs
, BDC_XSFNTF
, epnum
);
362 /* returns the bd corresponding to bdi */
363 static struct bdc_bd
*bdi_to_bd(struct bdc_ep
*ep
, int bdi
)
365 int tbi
= bdi_to_tbi(ep
, bdi
);
368 local_bdi
= bdi
- (tbi
* ep
->bd_list
.num_bds_table
);
369 dev_vdbg(ep
->bdc
->dev
,
370 "%s bdi:%d local_bdi:%d\n",
371 __func__
, bdi
, local_bdi
);
373 return (ep
->bd_list
.bd_table_array
[tbi
]->start_bd
+ local_bdi
);
376 /* Advance the enqueue pointer */
377 static void ep_bdlist_eqp_adv(struct bdc_ep
*ep
)
379 ep
->bd_list
.eqp_bdi
++;
380 /* if it's chain bd, then move to next */
381 if (((ep
->bd_list
.eqp_bdi
+ 1) % ep
->bd_list
.num_bds_table
) == 0)
382 ep
->bd_list
.eqp_bdi
++;
384 /* if the eqp is pointing to last + 1 then move back to 0 */
385 if (ep
->bd_list
.eqp_bdi
== (ep
->bd_list
.max_bdi
+ 1))
386 ep
->bd_list
.eqp_bdi
= 0;
389 /* Setup the first bd for ep0 transfer */
390 static int setup_first_bd_ep0(struct bdc
*bdc
, struct bdc_req
*req
, u32
*dword3
)
396 req_len
= req
->usb_req
.length
;
397 switch (bdc
->ep0_state
) {
398 case WAIT_FOR_DATA_START
:
399 *dword3
|= BD_TYPE_DS
;
400 if (bdc
->setup_pkt
.bRequestType
& USB_DIR_IN
)
401 *dword3
|= BD_DIR_IN
;
403 /* check if zlp will be needed */
404 wValue
= le16_to_cpu(bdc
->setup_pkt
.wValue
);
405 if ((wValue
> req_len
) &&
406 (req_len
% bdc
->gadget
.ep0
->maxpacket
== 0)) {
407 dev_dbg(bdc
->dev
, "ZLP needed wVal:%d len:%d MaxP:%d\n",
409 bdc
->gadget
.ep0
->maxpacket
);
410 bdc
->zlp_needed
= true;
414 case WAIT_FOR_STATUS_START
:
415 *dword3
|= BD_TYPE_SS
;
416 if (!le16_to_cpu(bdc
->setup_pkt
.wLength
) ||
417 !(bdc
->setup_pkt
.bRequestType
& USB_DIR_IN
))
418 *dword3
|= BD_DIR_IN
;
422 "Unknown ep0 state for queueing bd ep0_state:%s\n",
423 ep0_state_string
[bdc
->ep0_state
]);
430 /* Setup the bd dma descriptor for a given request */
431 static int setup_bd_list_xfr(struct bdc
*bdc
, struct bdc_req
*req
, int num_bds
)
433 dma_addr_t buf_add
= req
->usb_req
.dma
;
434 u32 maxp
, tfs
, dword2
, dword3
;
435 struct bd_transfer
*bd_xfr
;
436 struct bd_list
*bd_list
;
443 bd_list
= &ep
->bd_list
;
444 bd_xfr
= &req
->bd_xfr
;
446 bd_xfr
->start_bdi
= bd_list
->eqp_bdi
;
447 bd
= bdi_to_bd(ep
, bd_list
->eqp_bdi
);
448 req_len
= req
->usb_req
.length
;
449 maxp
= usb_endpoint_maxp(ep
->desc
) & 0x7ff;
450 tfs
= roundup(req
->usb_req
.length
, maxp
);
452 dev_vdbg(bdc
->dev
, "%s ep:%s num_bds:%d tfs:%d r_len:%d bd:%p\n",
453 __func__
, ep
->name
, num_bds
, tfs
, req_len
, bd
);
455 for (bdnum
= 0; bdnum
< num_bds
; bdnum
++) {
459 dword3
|= BD_SOT
|BD_SBF
|(tfs
<<BD_TFS_SHIFT
);
461 /* format of first bd for ep0 is different than other */
462 if (ep
->ep_num
== 1) {
463 ret
= setup_first_bd_ep0(bdc
, req
, &dword3
);
471 if (req_len
> BD_MAX_BUFF_SIZE
) {
472 dword2
|= BD_MAX_BUFF_SIZE
;
473 req_len
-= BD_MAX_BUFF_SIZE
;
475 /* this should be the last bd */
480 /* Currently only 1 INT target is supported */
481 dword2
|= BD_INTR_TARGET(0);
482 bd
= bdi_to_bd(ep
, ep
->bd_list
.eqp_bdi
);
484 dev_err(bdc
->dev
, "Err bd pointing to wrong addr\n");
488 bd
->offset
[0] = cpu_to_le32(lower_32_bits(buf_add
));
489 bd
->offset
[1] = cpu_to_le32(upper_32_bits(buf_add
));
490 bd
->offset
[2] = cpu_to_le32(dword2
);
491 bd
->offset
[3] = cpu_to_le32(dword3
);
492 /* advance eqp pointer */
493 ep_bdlist_eqp_adv(ep
);
494 /* advance the buff pointer */
495 buf_add
+= BD_MAX_BUFF_SIZE
;
496 dev_vdbg(bdc
->dev
, "buf_add:%08llx req_len:%d bd:%p eqp:%d\n",
497 (unsigned long long)buf_add
, req_len
, bd
,
498 ep
->bd_list
.eqp_bdi
);
499 bd
= bdi_to_bd(ep
, ep
->bd_list
.eqp_bdi
);
500 bd
->offset
[3] = cpu_to_le32(BD_SBF
);
502 /* clear the STOP BD fetch bit from the first bd of this xfr */
503 bd
= bdi_to_bd(ep
, bd_xfr
->start_bdi
);
504 bd
->offset
[3] &= cpu_to_le32(~BD_SBF
);
505 /* the new eqp will be next hw dqp */
506 bd_xfr
->num_bds
= num_bds
;
507 bd_xfr
->next_hwd_bdi
= ep
->bd_list
.eqp_bdi
;
508 /* everything is written correctly before notifying the HW */
515 static int bdc_queue_xfr(struct bdc
*bdc
, struct bdc_req
*req
)
517 int num_bds
, bd_available
;
522 dev_dbg(bdc
->dev
, "%s req:%p\n", __func__
, req
);
523 dev_dbg(bdc
->dev
, "eqp_bdi:%d hwd_bdi:%d\n",
524 ep
->bd_list
.eqp_bdi
, ep
->bd_list
.hwd_bdi
);
526 num_bds
= bd_needed_req(req
);
527 bd_available
= bd_available_ep(ep
);
529 /* how many bd's are avaialble on ep */
530 if (num_bds
> bd_available
)
533 ret
= setup_bd_list_xfr(bdc
, req
, num_bds
);
536 list_add_tail(&req
->queue
, &ep
->queue
);
537 bdc_dbg_bd_list(bdc
, ep
);
538 bdc_notify_xfr(bdc
, ep
->ep_num
);
543 /* callback to gadget layer when xfr completes */
544 static void bdc_req_complete(struct bdc_ep
*ep
, struct bdc_req
*req
,
547 struct bdc
*bdc
= ep
->bdc
;
549 if (req
== NULL
|| &req
->queue
== NULL
|| &req
->usb_req
== NULL
)
552 dev_dbg(bdc
->dev
, "%s ep:%s status:%d\n", __func__
, ep
->name
, status
);
553 list_del(&req
->queue
);
554 req
->usb_req
.status
= status
;
555 usb_gadget_unmap_request(&bdc
->gadget
, &req
->usb_req
, ep
->dir
);
556 if (req
->usb_req
.complete
) {
557 spin_unlock(&bdc
->lock
);
558 usb_gadget_giveback_request(&ep
->usb_ep
, &req
->usb_req
);
559 spin_lock(&bdc
->lock
);
563 /* Disable the endpoint */
564 int bdc_ep_disable(struct bdc_ep
*ep
)
572 dev_dbg(bdc
->dev
, "%s() ep->ep_num=%d\n", __func__
, ep
->ep_num
);
573 /* Stop the endpoint */
574 ret
= bdc_stop_ep(bdc
, ep
->ep_num
);
577 * Intentionally don't check the ret value of stop, it can fail in
578 * disconnect scenarios, continue with dconfig
580 /* de-queue any pending requests */
581 while (!list_empty(&ep
->queue
)) {
582 req
= list_entry(ep
->queue
.next
, struct bdc_req
,
584 bdc_req_complete(ep
, req
, -ESHUTDOWN
);
586 /* deconfigure the endpoint */
587 ret
= bdc_dconfig_ep(bdc
, ep
);
590 "dconfig fail but continue with memory free");
593 /* ep0 memory is not freed, but reused on next connect sr */
597 /* Free the bdl memory */
598 ep_bd_list_free(ep
, ep
->bd_list
.num_tabs
);
600 ep
->comp_desc
= NULL
;
601 ep
->usb_ep
.desc
= NULL
;
608 int bdc_ep_enable(struct bdc_ep
*ep
)
614 dev_dbg(bdc
->dev
, "%s NUM_TABLES:%d %d\n",
615 __func__
, NUM_TABLES
, NUM_TABLES_ISOCH
);
617 ret
= ep_bd_list_alloc(ep
);
619 dev_err(bdc
->dev
, "ep bd list allocation failed:%d\n", ret
);
622 bdc_dbg_bd_list(bdc
, ep
);
623 /* only for ep0: config ep is called for ep0 from connect event */
624 ep
->flags
|= BDC_EP_ENABLED
;
628 /* Issue a configure endpoint command */
629 ret
= bdc_config_ep(bdc
, ep
);
633 ep
->usb_ep
.maxpacket
= usb_endpoint_maxp(ep
->desc
);
634 ep
->usb_ep
.desc
= ep
->desc
;
635 ep
->usb_ep
.comp_desc
= ep
->comp_desc
;
636 ep
->ep_type
= usb_endpoint_type(ep
->desc
);
637 ep
->flags
|= BDC_EP_ENABLED
;
642 /* EP0 related code */
644 /* Queue a status stage BD */
645 static int ep0_queue_status_stage(struct bdc
*bdc
)
647 struct bdc_req
*status_req
;
650 status_req
= &bdc
->status_req
;
651 ep
= bdc
->bdc_ep_array
[1];
653 status_req
->usb_req
.length
= 0;
654 status_req
->usb_req
.status
= -EINPROGRESS
;
655 status_req
->usb_req
.actual
= 0;
656 status_req
->usb_req
.complete
= NULL
;
657 bdc_queue_xfr(bdc
, status_req
);
662 /* Queue xfr on ep0 */
663 static int ep0_queue(struct bdc_ep
*ep
, struct bdc_req
*req
)
669 dev_dbg(bdc
->dev
, "%s()\n", __func__
);
670 req
->usb_req
.actual
= 0;
671 req
->usb_req
.status
= -EINPROGRESS
;
672 req
->epnum
= ep
->ep_num
;
674 if (bdc
->delayed_status
) {
675 bdc
->delayed_status
= false;
676 /* if status stage was delayed? */
677 if (bdc
->ep0_state
== WAIT_FOR_STATUS_START
) {
678 /* Queue a status stage BD */
679 ep0_queue_status_stage(bdc
);
680 bdc
->ep0_state
= WAIT_FOR_STATUS_XMIT
;
685 * if delayed status is false and 0 length transfer is requested
686 * i.e. for status stage of some setup request, then just
687 * return from here the status stage is queued independently
689 if (req
->usb_req
.length
== 0)
693 ret
= usb_gadget_map_request(&bdc
->gadget
, &req
->usb_req
, ep
->dir
);
695 dev_err(bdc
->dev
, "dma mapping failed %s\n", ep
->name
);
699 return bdc_queue_xfr(bdc
, req
);
702 /* Queue data stage */
703 static int ep0_queue_data_stage(struct bdc
*bdc
)
705 struct usb_request
*ep0_usb_req
;
708 dev_dbg(bdc
->dev
, "%s\n", __func__
);
709 ep0_usb_req
= &bdc
->ep0_req
.usb_req
;
710 ep
= bdc
->bdc_ep_array
[1];
711 bdc
->ep0_req
.ep
= ep
;
712 bdc
->ep0_req
.usb_req
.complete
= NULL
;
714 return ep0_queue(ep
, &bdc
->ep0_req
);
717 /* Queue req on ep */
718 static int ep_queue(struct bdc_ep
*ep
, struct bdc_req
*req
)
723 if (!req
|| !ep
->usb_ep
.desc
)
728 req
->usb_req
.actual
= 0;
729 req
->usb_req
.status
= -EINPROGRESS
;
730 req
->epnum
= ep
->ep_num
;
732 ret
= usb_gadget_map_request(&bdc
->gadget
, &req
->usb_req
, ep
->dir
);
734 dev_err(bdc
->dev
, "dma mapping failed\n");
738 return bdc_queue_xfr(bdc
, req
);
741 /* Dequeue a request from ep */
742 static int ep_dequeue(struct bdc_ep
*ep
, struct bdc_req
*req
)
744 int start_bdi
, end_bdi
, tbi
, eqp_bdi
, curr_hw_dqpi
;
745 bool start_pending
, end_pending
;
746 bool first_remove
= false;
747 struct bdc_req
*first_req
;
748 struct bdc_bd
*bd_start
;
749 struct bd_table
*table
;
750 dma_addr_t next_bd_dma
;
757 start_pending
= end_pending
= false;
758 eqp_bdi
= ep
->bd_list
.eqp_bdi
- 1;
761 eqp_bdi
= ep
->bd_list
.max_bdi
;
763 start_bdi
= req
->bd_xfr
.start_bdi
;
764 end_bdi
= find_end_bdi(ep
, req
->bd_xfr
.next_hwd_bdi
);
766 dev_dbg(bdc
->dev
, "%s ep:%s start:%d end:%d\n",
767 __func__
, ep
->name
, start_bdi
, end_bdi
);
768 dev_dbg(bdc
->dev
, "ep_dequeue ep=%p ep->desc=%p\n",
769 ep
, (void *)ep
->usb_ep
.desc
);
770 /* Stop the ep to see where the HW is ? */
771 ret
= bdc_stop_ep(bdc
, ep
->ep_num
);
772 /* if there is an issue with stopping ep, then no need to go further */
777 * After endpoint is stopped, there can be 3 cases, the request
778 * is processed, pending or in the middle of processing
781 /* The current hw dequeue pointer */
782 tmp_32
= bdc_readl(bdc
->regs
, BDC_EPSTS0(0));
784 tmp_32
= bdc_readl(bdc
->regs
, BDC_EPSTS1(0));
785 deq_ptr_64
|= ((u64
)tmp_32
<< 32);
787 /* we have the dma addr of next bd that will be fetched by hardware */
788 curr_hw_dqpi
= bd_add_to_bdi(ep
, deq_ptr_64
);
789 if (curr_hw_dqpi
< 0)
793 * curr_hw_dqpi points to actual dqp of HW and HW owns bd's from
794 * curr_hw_dqbdi..eqp_bdi.
797 /* Check if start_bdi and end_bdi are in range of HW owned BD's */
798 if (curr_hw_dqpi
> eqp_bdi
) {
799 /* there is a wrap from last to 0 */
800 if (start_bdi
>= curr_hw_dqpi
|| start_bdi
<= eqp_bdi
) {
801 start_pending
= true;
803 } else if (end_bdi
>= curr_hw_dqpi
|| end_bdi
<= eqp_bdi
) {
807 if (start_bdi
>= curr_hw_dqpi
) {
808 start_pending
= true;
810 } else if (end_bdi
>= curr_hw_dqpi
) {
815 "start_pending:%d end_pending:%d speed:%d\n",
816 start_pending
, end_pending
, bdc
->gadget
.speed
);
818 /* If both start till end are processes, we cannot deq req */
819 if (!start_pending
&& !end_pending
)
823 * if ep_dequeue is called after disconnect then just return
826 if (bdc
->gadget
.speed
== USB_SPEED_UNKNOWN
)
828 tbi
= bdi_to_tbi(ep
, req
->bd_xfr
.next_hwd_bdi
);
829 table
= ep
->bd_list
.bd_table_array
[tbi
];
830 next_bd_dma
= table
->dma
+
831 sizeof(struct bdc_bd
)*(req
->bd_xfr
.next_hwd_bdi
-
832 tbi
* ep
->bd_list
.num_bds_table
);
834 first_req
= list_first_entry(&ep
->queue
, struct bdc_req
,
837 if (req
== first_req
)
841 * Due to HW limitation we need to bypadd chain bd's and issue ep_bla,
842 * incase if start is pending this is the first request in the list
843 * then issue ep_bla instead of marking as chain bd
845 if (start_pending
&& !first_remove
) {
847 * Mark the start bd as Chain bd, and point the chain
850 bd_start
= bdi_to_bd(ep
, start_bdi
);
851 bd_start
->offset
[0] = cpu_to_le32(lower_32_bits(next_bd_dma
));
852 bd_start
->offset
[1] = cpu_to_le32(upper_32_bits(next_bd_dma
));
853 bd_start
->offset
[2] = 0x0;
854 bd_start
->offset
[3] = cpu_to_le32(MARK_CHAIN_BD
);
855 bdc_dbg_bd_list(bdc
, ep
);
856 } else if (end_pending
) {
858 * The transfer is stopped in the middle, move the
859 * HW deq pointer to next_bd_dma
861 ret
= bdc_ep_bla(bdc
, ep
, next_bd_dma
);
863 dev_err(bdc
->dev
, "error in ep_bla:%d\n", ret
);
871 /* Halt/Clear the ep based on value */
872 static int ep_set_halt(struct bdc_ep
*ep
, u32 value
)
878 dev_dbg(bdc
->dev
, "%s ep:%s value=%d\n", __func__
, ep
->name
, value
);
881 dev_dbg(bdc
->dev
, "Halt\n");
883 bdc
->ep0_state
= WAIT_FOR_SETUP
;
885 ret
= bdc_ep_set_stall(bdc
, ep
->ep_num
);
887 dev_err(bdc
->dev
, "failed to set STALL on %s\n",
890 ep
->flags
|= BDC_EP_STALL
;
893 dev_dbg(bdc
->dev
, "Before Clear\n");
894 ret
= bdc_ep_clear_stall(bdc
, ep
->ep_num
);
896 dev_err(bdc
->dev
, "failed to clear STALL on %s\n",
899 ep
->flags
&= ~BDC_EP_STALL
;
900 dev_dbg(bdc
->dev
, "After Clear\n");
906 /* Free all the ep */
907 void bdc_free_ep(struct bdc
*bdc
)
912 dev_dbg(bdc
->dev
, "%s\n", __func__
);
913 for (epnum
= 1; epnum
< bdc
->num_eps
; epnum
++) {
914 ep
= bdc
->bdc_ep_array
[epnum
];
918 if (ep
->flags
& BDC_EP_ENABLED
)
919 ep_bd_list_free(ep
, ep
->bd_list
.num_tabs
);
921 /* ep0 is not in this gadget list */
923 list_del(&ep
->usb_ep
.ep_list
);
929 /* USB2 spec, section 7.1.20 */
930 static int bdc_set_test_mode(struct bdc
*bdc
)
934 usb2_pm
= bdc_readl(bdc
->regs
, BDC_USPPM2
);
935 usb2_pm
&= ~BDC_PTC_MASK
;
936 dev_dbg(bdc
->dev
, "%s\n", __func__
);
937 switch (bdc
->test_mode
) {
943 usb2_pm
|= bdc
->test_mode
<< 28;
948 dev_dbg(bdc
->dev
, "usb2_pm=%08x", usb2_pm
);
949 bdc_writel(bdc
->regs
, BDC_USPPM2
, usb2_pm
);
955 * Helper function to handle Transfer status report with status as either
958 static void handle_xsr_succ_status(struct bdc
*bdc
, struct bdc_ep
*ep
,
959 struct bdc_sr
*sreport
)
961 int short_bdi
, start_bdi
, end_bdi
, max_len_bds
, chain_bds
;
962 struct bd_list
*bd_list
= &ep
->bd_list
;
963 int actual_length
, length_short
;
964 struct bd_transfer
*bd_xfr
;
965 struct bdc_bd
*short_bd
;
972 dev_dbg(bdc
->dev
, "%s ep:%p\n", __func__
, ep
);
974 /* do not process thie sr if ignore flag is set */
975 if (ep
->ignore_next_sr
) {
976 ep
->ignore_next_sr
= false;
980 if (unlikely(list_empty(&ep
->queue
))) {
981 dev_warn(bdc
->dev
, "xfr srr with no BD's queued\n");
984 req
= list_entry(ep
->queue
.next
, struct bdc_req
,
987 bd_xfr
= &req
->bd_xfr
;
988 sr_status
= XSF_STS(le32_to_cpu(sreport
->offset
[3]));
991 * sr_status is short and this transfer has more than 1 bd then it needs
992 * special handling, this is only applicable for bulk and ctrl
994 if (sr_status
== XSF_SHORT
&& bd_xfr
->num_bds
> 1) {
996 * This is multi bd xfr, lets see which bd
997 * caused short transfer and how many bytes have been
998 * transferred so far.
1000 tmp_32
= le32_to_cpu(sreport
->offset
[0]);
1001 deq_ptr_64
= tmp_32
;
1002 tmp_32
= le32_to_cpu(sreport
->offset
[1]);
1003 deq_ptr_64
|= ((u64
)tmp_32
<< 32);
1004 short_bdi
= bd_add_to_bdi(ep
, deq_ptr_64
);
1005 if (unlikely(short_bdi
< 0))
1006 dev_warn(bdc
->dev
, "bd doesn't exist?\n");
1008 start_bdi
= bd_xfr
->start_bdi
;
1010 * We know the start_bdi and short_bdi, how many xfr
1013 if (start_bdi
<= short_bdi
) {
1014 max_len_bds
= short_bdi
- start_bdi
;
1015 if (max_len_bds
<= bd_list
->num_bds_table
) {
1016 if (!(bdi_to_tbi(ep
, start_bdi
) ==
1017 bdi_to_tbi(ep
, short_bdi
)))
1020 chain_bds
= max_len_bds
/bd_list
->num_bds_table
;
1021 max_len_bds
-= chain_bds
;
1024 /* there is a wrap in the ring within a xfr */
1025 chain_bds
= (bd_list
->max_bdi
- start_bdi
)/
1026 bd_list
->num_bds_table
;
1027 chain_bds
+= short_bdi
/bd_list
->num_bds_table
;
1028 max_len_bds
= bd_list
->max_bdi
- start_bdi
;
1029 max_len_bds
+= short_bdi
;
1030 max_len_bds
-= chain_bds
;
1032 /* max_len_bds is the number of full length bds */
1033 end_bdi
= find_end_bdi(ep
, bd_xfr
->next_hwd_bdi
);
1034 if (!(end_bdi
== short_bdi
))
1035 ep
->ignore_next_sr
= true;
1037 actual_length
= max_len_bds
* BD_MAX_BUFF_SIZE
;
1038 short_bd
= bdi_to_bd(ep
, short_bdi
);
1040 length_short
= le32_to_cpu(short_bd
->offset
[2]) & 0x1FFFFF;
1041 /* actual length trensfered */
1042 length_short
-= SR_BD_LEN(le32_to_cpu(sreport
->offset
[2]));
1043 actual_length
+= length_short
;
1044 req
->usb_req
.actual
= actual_length
;
1046 req
->usb_req
.actual
= req
->usb_req
.length
-
1047 SR_BD_LEN(le32_to_cpu(sreport
->offset
[2]));
1049 "len=%d actual=%d bd_xfr->next_hwd_bdi:%d\n",
1050 req
->usb_req
.length
, req
->usb_req
.actual
,
1051 bd_xfr
->next_hwd_bdi
);
1054 /* Update the dequeue pointer */
1055 ep
->bd_list
.hwd_bdi
= bd_xfr
->next_hwd_bdi
;
1056 if (req
->usb_req
.actual
< req
->usb_req
.length
) {
1057 dev_dbg(bdc
->dev
, "short xfr on %d\n", ep
->ep_num
);
1058 if (req
->usb_req
.short_not_ok
)
1059 status
= -EREMOTEIO
;
1061 bdc_req_complete(ep
, bd_xfr
->req
, status
);
1064 /* EP0 setup related packet handlers */
1067 * Setup packet received, just store the packet and process on next DS or SS
1070 void bdc_xsf_ep0_setup_recv(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1072 struct usb_ctrlrequest
*setup_pkt
;
1076 "%s ep0_state:%s\n",
1077 __func__
, ep0_state_string
[bdc
->ep0_state
]);
1078 /* Store received setup packet */
1079 setup_pkt
= &bdc
->setup_pkt
;
1080 memcpy(setup_pkt
, &sreport
->offset
[0], sizeof(*setup_pkt
));
1081 len
= le16_to_cpu(setup_pkt
->wLength
);
1083 bdc
->ep0_state
= WAIT_FOR_STATUS_START
;
1085 bdc
->ep0_state
= WAIT_FOR_DATA_START
;
1089 "%s exit ep0_state:%s\n",
1090 __func__
, ep0_state_string
[bdc
->ep0_state
]);
1094 static void ep0_stall(struct bdc
*bdc
)
1096 struct bdc_ep
*ep
= bdc
->bdc_ep_array
[1];
1097 struct bdc_req
*req
;
1099 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1100 bdc
->delayed_status
= false;
1103 /* de-queue any pendig requests */
1104 while (!list_empty(&ep
->queue
)) {
1105 req
= list_entry(ep
->queue
.next
, struct bdc_req
,
1107 bdc_req_complete(ep
, req
, -ESHUTDOWN
);
1111 /* SET_ADD handlers */
1112 static int ep0_set_address(struct bdc
*bdc
, struct usb_ctrlrequest
*ctrl
)
1114 enum usb_device_state state
= bdc
->gadget
.state
;
1118 addr
= le16_to_cpu(ctrl
->wValue
);
1120 "%s addr:%d dev state:%d\n",
1121 __func__
, addr
, state
);
1127 case USB_STATE_DEFAULT
:
1128 case USB_STATE_ADDRESS
:
1129 /* Issue Address device command */
1130 ret
= bdc_address_device(bdc
, addr
);
1135 usb_gadget_set_state(&bdc
->gadget
, USB_STATE_ADDRESS
);
1137 usb_gadget_set_state(&bdc
->gadget
, USB_STATE_DEFAULT
);
1139 bdc
->dev_addr
= addr
;
1143 "SET Address in wrong device state %d\n",
1151 /* Handler for SET/CLEAR FEATURE requests for device */
1152 static int ep0_handle_feature_dev(struct bdc
*bdc
, u16 wValue
,
1153 u16 wIndex
, bool set
)
1155 enum usb_device_state state
= bdc
->gadget
.state
;
1158 dev_dbg(bdc
->dev
, "%s set:%d dev state:%d\n",
1159 __func__
, set
, state
);
1161 case USB_DEVICE_REMOTE_WAKEUP
:
1162 dev_dbg(bdc
->dev
, "USB_DEVICE_REMOTE_WAKEUP\n");
1164 bdc
->devstatus
|= REMOTE_WAKE_ENABLE
;
1166 bdc
->devstatus
&= ~REMOTE_WAKE_ENABLE
;
1169 case USB_DEVICE_TEST_MODE
:
1170 dev_dbg(bdc
->dev
, "USB_DEVICE_TEST_MODE\n");
1171 if ((wIndex
& 0xFF) ||
1172 (bdc
->gadget
.speed
!= USB_SPEED_HIGH
) || !set
)
1175 bdc
->test_mode
= wIndex
>> 8;
1178 case USB_DEVICE_U1_ENABLE
:
1179 dev_dbg(bdc
->dev
, "USB_DEVICE_U1_ENABLE\n");
1181 if (bdc
->gadget
.speed
!= USB_SPEED_SUPER
||
1182 state
!= USB_STATE_CONFIGURED
)
1185 usppms
= bdc_readl(bdc
->regs
, BDC_USPPMS
);
1187 /* clear previous u1t */
1188 usppms
&= ~BDC_U1T(BDC_U1T_MASK
);
1189 usppms
|= BDC_U1T(U1_TIMEOUT
);
1190 usppms
|= BDC_U1E
| BDC_PORT_W1S
;
1191 bdc
->devstatus
|= (1 << USB_DEV_STAT_U1_ENABLED
);
1194 usppms
|= BDC_PORT_W1S
;
1195 bdc
->devstatus
&= ~(1 << USB_DEV_STAT_U1_ENABLED
);
1197 bdc_writel(bdc
->regs
, BDC_USPPMS
, usppms
);
1200 case USB_DEVICE_U2_ENABLE
:
1201 dev_dbg(bdc
->dev
, "USB_DEVICE_U2_ENABLE\n");
1203 if (bdc
->gadget
.speed
!= USB_SPEED_SUPER
||
1204 state
!= USB_STATE_CONFIGURED
)
1207 usppms
= bdc_readl(bdc
->regs
, BDC_USPPMS
);
1211 bdc
->devstatus
|= (1 << USB_DEV_STAT_U2_ENABLED
);
1215 bdc
->devstatus
&= ~(1 << USB_DEV_STAT_U2_ENABLED
);
1217 bdc_writel(bdc
->regs
, BDC_USPPMS
, usppms
);
1220 case USB_DEVICE_LTM_ENABLE
:
1221 dev_dbg(bdc
->dev
, "USB_DEVICE_LTM_ENABLE?\n");
1222 if (bdc
->gadget
.speed
!= USB_SPEED_SUPER
||
1223 state
!= USB_STATE_CONFIGURED
)
1227 dev_err(bdc
->dev
, "Unknown wValue:%d\n", wValue
);
1229 } /* USB_RECIP_DEVICE end */
1234 /* SET/CLEAR FEATURE handler */
1235 static int ep0_handle_feature(struct bdc
*bdc
,
1236 struct usb_ctrlrequest
*setup_pkt
, bool set
)
1238 enum usb_device_state state
= bdc
->gadget
.state
;
1244 wValue
= le16_to_cpu(setup_pkt
->wValue
);
1245 wIndex
= le16_to_cpu(setup_pkt
->wIndex
);
1248 "%s wValue=%d wIndex=%d devstate=%08x speed=%d set=%d",
1249 __func__
, wValue
, wIndex
, state
,
1250 bdc
->gadget
.speed
, set
);
1252 switch (setup_pkt
->bRequestType
& USB_RECIP_MASK
) {
1253 case USB_RECIP_DEVICE
:
1254 return ep0_handle_feature_dev(bdc
, wValue
, wIndex
, set
);
1255 case USB_RECIP_INTERFACE
:
1256 dev_dbg(bdc
->dev
, "USB_RECIP_INTERFACE\n");
1257 /* USB3 spec, sec 9.4.9 */
1258 if (wValue
!= USB_INTRF_FUNC_SUSPEND
)
1260 /* USB3 spec, Table 9-8 */
1262 if (wIndex
& USB_INTRF_FUNC_SUSPEND_RW
) {
1263 dev_dbg(bdc
->dev
, "SET REMOTE_WAKEUP\n");
1264 bdc
->devstatus
|= REMOTE_WAKE_ENABLE
;
1266 dev_dbg(bdc
->dev
, "CLEAR REMOTE_WAKEUP\n");
1267 bdc
->devstatus
&= ~REMOTE_WAKE_ENABLE
;
1272 case USB_RECIP_ENDPOINT
:
1273 dev_dbg(bdc
->dev
, "USB_RECIP_ENDPOINT\n");
1274 if (wValue
!= USB_ENDPOINT_HALT
)
1277 epnum
= wIndex
& USB_ENDPOINT_NUMBER_MASK
;
1279 if ((wIndex
& USB_ENDPOINT_DIR_MASK
) == USB_DIR_IN
)
1280 epnum
= epnum
* 2 + 1;
1287 * If CLEAR_FEATURE on ep0 then don't do anything as the stall
1288 * condition on ep0 has already been cleared when SETUP packet
1291 if (epnum
== 1 && !set
) {
1292 dev_dbg(bdc
->dev
, "ep0 stall already cleared\n");
1295 dev_dbg(bdc
->dev
, "epnum=%d\n", epnum
);
1296 ep
= bdc
->bdc_ep_array
[epnum
];
1300 return ep_set_halt(ep
, set
);
1302 dev_err(bdc
->dev
, "Unknown recipient\n");
1309 /* GET_STATUS request handler */
1310 static int ep0_handle_status(struct bdc
*bdc
,
1311 struct usb_ctrlrequest
*setup_pkt
)
1313 enum usb_device_state state
= bdc
->gadget
.state
;
1319 /* USB2.0 spec sec 9.4.5 */
1320 if (state
== USB_STATE_DEFAULT
)
1322 wIndex
= le16_to_cpu(setup_pkt
->wIndex
);
1323 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1324 usb_status
= bdc
->devstatus
;
1325 switch (setup_pkt
->bRequestType
& USB_RECIP_MASK
) {
1326 case USB_RECIP_DEVICE
:
1328 "USB_RECIP_DEVICE devstatus:%08x\n",
1330 /* USB3 spec, sec 9.4.5 */
1331 if (bdc
->gadget
.speed
== USB_SPEED_SUPER
)
1332 usb_status
&= ~REMOTE_WAKE_ENABLE
;
1335 case USB_RECIP_INTERFACE
:
1336 dev_dbg(bdc
->dev
, "USB_RECIP_INTERFACE\n");
1337 if (bdc
->gadget
.speed
== USB_SPEED_SUPER
) {
1339 * This should come from func for Func remote wkup
1342 if (bdc
->devstatus
& REMOTE_WAKE_ENABLE
)
1343 usb_status
|= REMOTE_WAKE_ENABLE
;
1350 case USB_RECIP_ENDPOINT
:
1351 dev_dbg(bdc
->dev
, "USB_RECIP_ENDPOINT\n");
1352 epnum
= wIndex
& USB_ENDPOINT_NUMBER_MASK
;
1354 if ((wIndex
& USB_ENDPOINT_DIR_MASK
) == USB_DIR_IN
)
1355 epnum
= epnum
*2 + 1;
1359 epnum
= 1; /* EP0 */
1362 ep
= bdc
->bdc_ep_array
[epnum
];
1364 dev_err(bdc
->dev
, "ISSUE, GET_STATUS for invalid EP ?");
1367 if (ep
->flags
& BDC_EP_STALL
)
1368 usb_status
|= 1 << USB_ENDPOINT_HALT
;
1372 dev_err(bdc
->dev
, "Unknown recipient for get_status\n");
1375 /* prepare a data stage for GET_STATUS */
1376 dev_dbg(bdc
->dev
, "usb_status=%08x\n", usb_status
);
1377 *(__le16
*)bdc
->ep0_response_buff
= cpu_to_le16(usb_status
);
1378 bdc
->ep0_req
.usb_req
.length
= 2;
1379 bdc
->ep0_req
.usb_req
.buf
= &bdc
->ep0_response_buff
;
1380 ep0_queue_data_stage(bdc
);
1385 static void ep0_set_sel_cmpl(struct usb_ep
*_ep
, struct usb_request
*_req
)
1387 /* ep0_set_sel_cmpl */
1390 /* Queue data stage to handle 6 byte SET_SEL request */
1391 static int ep0_set_sel(struct bdc
*bdc
,
1392 struct usb_ctrlrequest
*setup_pkt
)
1398 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1399 wValue
= le16_to_cpu(setup_pkt
->wValue
);
1400 wLength
= le16_to_cpu(setup_pkt
->wLength
);
1401 if (unlikely(wLength
!= 6)) {
1402 dev_err(bdc
->dev
, "%s Wrong wLength:%d\n", __func__
, wLength
);
1405 ep
= bdc
->bdc_ep_array
[1];
1406 bdc
->ep0_req
.ep
= ep
;
1407 bdc
->ep0_req
.usb_req
.length
= 6;
1408 bdc
->ep0_req
.usb_req
.buf
= bdc
->ep0_response_buff
;
1409 bdc
->ep0_req
.usb_req
.complete
= ep0_set_sel_cmpl
;
1410 ep0_queue_data_stage(bdc
);
1416 * Queue a 0 byte bd only if wLength is more than the length and and length is
1417 * a multiple of MaxPacket then queue 0 byte BD
1419 static int ep0_queue_zlp(struct bdc
*bdc
)
1423 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1424 bdc
->ep0_req
.ep
= bdc
->bdc_ep_array
[1];
1425 bdc
->ep0_req
.usb_req
.length
= 0;
1426 bdc
->ep0_req
.usb_req
.complete
= NULL
;
1427 bdc
->ep0_state
= WAIT_FOR_DATA_START
;
1428 ret
= bdc_queue_xfr(bdc
, &bdc
->ep0_req
);
1430 dev_err(bdc
->dev
, "err queueing zlp :%d\n", ret
);
1433 bdc
->ep0_state
= WAIT_FOR_DATA_XMIT
;
1438 /* Control request handler */
1439 static int handle_control_request(struct bdc
*bdc
)
1441 enum usb_device_state state
= bdc
->gadget
.state
;
1442 struct usb_ctrlrequest
*setup_pkt
;
1443 int delegate_setup
= 0;
1447 setup_pkt
= &bdc
->setup_pkt
;
1448 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1449 if ((setup_pkt
->bRequestType
& USB_TYPE_MASK
) == USB_TYPE_STANDARD
) {
1450 switch (setup_pkt
->bRequest
) {
1451 case USB_REQ_SET_ADDRESS
:
1452 dev_dbg(bdc
->dev
, "USB_REQ_SET_ADDRESS\n");
1453 ret
= ep0_set_address(bdc
, setup_pkt
);
1454 bdc
->devstatus
&= DEVSTATUS_CLEAR
;
1457 case USB_REQ_SET_CONFIGURATION
:
1458 dev_dbg(bdc
->dev
, "USB_REQ_SET_CONFIGURATION\n");
1459 if (state
== USB_STATE_ADDRESS
) {
1460 usb_gadget_set_state(&bdc
->gadget
,
1461 USB_STATE_CONFIGURED
);
1462 } else if (state
== USB_STATE_CONFIGURED
) {
1464 * USB2 spec sec 9.4.7, if wValue is 0 then dev
1465 * is moved to addressed state
1467 config
= le16_to_cpu(setup_pkt
->wValue
);
1469 usb_gadget_set_state(
1476 case USB_REQ_SET_FEATURE
:
1477 dev_dbg(bdc
->dev
, "USB_REQ_SET_FEATURE\n");
1478 ret
= ep0_handle_feature(bdc
, setup_pkt
, 1);
1481 case USB_REQ_CLEAR_FEATURE
:
1482 dev_dbg(bdc
->dev
, "USB_REQ_CLEAR_FEATURE\n");
1483 ret
= ep0_handle_feature(bdc
, setup_pkt
, 0);
1486 case USB_REQ_GET_STATUS
:
1487 dev_dbg(bdc
->dev
, "USB_REQ_GET_STATUS\n");
1488 ret
= ep0_handle_status(bdc
, setup_pkt
);
1491 case USB_REQ_SET_SEL
:
1492 dev_dbg(bdc
->dev
, "USB_REQ_SET_SEL\n");
1493 ret
= ep0_set_sel(bdc
, setup_pkt
);
1496 case USB_REQ_SET_ISOCH_DELAY
:
1498 "USB_REQ_SET_ISOCH_DELAY not handled\n");
1508 if (delegate_setup
) {
1509 spin_unlock(&bdc
->lock
);
1510 ret
= bdc
->gadget_driver
->setup(&bdc
->gadget
, setup_pkt
);
1511 spin_lock(&bdc
->lock
);
1517 /* EP0: Data stage started */
1518 void bdc_xsf_ep0_data_start(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1523 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1524 ep
= bdc
->bdc_ep_array
[1];
1525 /* If ep0 was stalled, the clear it first */
1526 if (ep
->flags
& BDC_EP_STALL
) {
1527 ret
= ep_set_halt(ep
, 0);
1531 if (bdc
->ep0_state
!= WAIT_FOR_DATA_START
)
1533 "Data stage not expected ep0_state:%s\n",
1534 ep0_state_string
[bdc
->ep0_state
]);
1536 ret
= handle_control_request(bdc
);
1537 if (ret
== USB_GADGET_DELAYED_STATUS
) {
1539 * The ep0 state will remain WAIT_FOR_DATA_START till
1540 * we received ep_queue on ep0
1542 bdc
->delayed_status
= true;
1546 bdc
->ep0_state
= WAIT_FOR_DATA_XMIT
;
1548 "ep0_state:%s", ep0_state_string
[bdc
->ep0_state
]);
1555 /* EP0: status stage started */
1556 void bdc_xsf_ep0_status_start(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1558 struct usb_ctrlrequest
*setup_pkt
;
1564 __func__
, ep0_state_string
[bdc
->ep0_state
]);
1565 ep
= bdc
->bdc_ep_array
[1];
1567 /* check if ZLP was queued? */
1568 if (bdc
->zlp_needed
)
1569 bdc
->zlp_needed
= false;
1571 if (ep
->flags
& BDC_EP_STALL
) {
1572 ret
= ep_set_halt(ep
, 0);
1577 if ((bdc
->ep0_state
!= WAIT_FOR_STATUS_START
) &&
1578 (bdc
->ep0_state
!= WAIT_FOR_DATA_XMIT
))
1580 "Status stage recv but ep0_state:%s\n",
1581 ep0_state_string
[bdc
->ep0_state
]);
1583 /* check if data stage is in progress ? */
1584 if (bdc
->ep0_state
== WAIT_FOR_DATA_XMIT
) {
1585 bdc
->ep0_state
= STATUS_PENDING
;
1586 /* Status stage will be queued upon Data stage transmit event */
1588 "status started but data not transmitted yet\n");
1591 setup_pkt
= &bdc
->setup_pkt
;
1594 * 2 stage setup then only process the setup, for 3 stage setup the date
1595 * stage is already handled
1597 if (!le16_to_cpu(setup_pkt
->wLength
)) {
1598 ret
= handle_control_request(bdc
);
1599 if (ret
== USB_GADGET_DELAYED_STATUS
) {
1600 bdc
->delayed_status
= true;
1601 /* ep0_state will remain WAIT_FOR_STATUS_START */
1606 /* Queue a status stage BD */
1607 ep0_queue_status_stage(bdc
);
1608 bdc
->ep0_state
= WAIT_FOR_STATUS_XMIT
;
1610 "ep0_state:%s", ep0_state_string
[bdc
->ep0_state
]);
1617 /* Helper function to update ep0 upon SR with xsf_succ or xsf_short */
1618 static void ep0_xsf_complete(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1620 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1621 switch (bdc
->ep0_state
) {
1622 case WAIT_FOR_DATA_XMIT
:
1623 bdc
->ep0_state
= WAIT_FOR_STATUS_START
;
1625 case WAIT_FOR_STATUS_XMIT
:
1626 bdc
->ep0_state
= WAIT_FOR_SETUP
;
1627 if (bdc
->test_mode
) {
1630 dev_dbg(bdc
->dev
, "test_mode:%d\n", bdc
->test_mode
);
1631 ret
= bdc_set_test_mode(bdc
);
1633 dev_err(bdc
->dev
, "Err in setting Test mode\n");
1639 case STATUS_PENDING
:
1640 bdc_xsf_ep0_status_start(bdc
, sreport
);
1645 "Unknown ep0_state:%s\n",
1646 ep0_state_string
[bdc
->ep0_state
]);
1651 /* xfr completion status report handler */
1652 void bdc_sr_xsf(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1658 ep_num
= (le32_to_cpu(sreport
->offset
[3])>>4) & 0x1f;
1659 ep
= bdc
->bdc_ep_array
[ep_num
];
1660 if (!ep
|| !(ep
->flags
& BDC_EP_ENABLED
)) {
1661 dev_err(bdc
->dev
, "xsf for ep not enabled\n");
1665 * check if this transfer is after link went from U3->U0 due
1668 if (bdc
->devstatus
& FUNC_WAKE_ISSUED
) {
1669 bdc
->devstatus
&= ~(FUNC_WAKE_ISSUED
);
1670 dev_dbg(bdc
->dev
, "%s clearing FUNC_WAKE_ISSUED flag\n",
1673 sr_status
= XSF_STS(le32_to_cpu(sreport
->offset
[3]));
1674 dev_dbg_ratelimited(bdc
->dev
, "%s sr_status=%d ep:%s\n",
1675 __func__
, sr_status
, ep
->name
);
1677 switch (sr_status
) {
1680 handle_xsr_succ_status(bdc
, ep
, sreport
);
1682 ep0_xsf_complete(bdc
, sreport
);
1685 case XSF_SETUP_RECV
:
1686 case XSF_DATA_START
:
1687 case XSF_STATUS_START
:
1690 "ep0 related packets on non ep0 endpoint");
1693 bdc
->sr_xsf_ep0
[sr_status
- XSF_SETUP_RECV
](bdc
, sreport
);
1698 dev_dbg(bdc
->dev
, "Babble on ep0 zlp_need:%d\n",
1701 * If the last completed transfer had wLength >Data Len,
1702 * and Len is multiple of MaxPacket,then queue ZLP
1704 if (bdc
->zlp_needed
) {
1705 /* queue 0 length bd */
1710 dev_warn(bdc
->dev
, "Babble on ep not handled\n");
1713 dev_warn(bdc
->dev
, "sr status not handled:%x\n", sr_status
);
1718 static int bdc_gadget_ep_queue(struct usb_ep
*_ep
,
1719 struct usb_request
*_req
, gfp_t gfp_flags
)
1721 struct bdc_req
*req
;
1722 unsigned long flags
;
1727 if (!_ep
|| !_ep
->desc
)
1730 if (!_req
|| !_req
->complete
|| !_req
->buf
)
1733 ep
= to_bdc_ep(_ep
);
1734 req
= to_bdc_req(_req
);
1736 dev_dbg(bdc
->dev
, "%s ep:%p req:%p\n", __func__
, ep
, req
);
1737 dev_dbg(bdc
->dev
, "queuing request %p to %s length %d zero:%d\n",
1738 _req
, ep
->name
, _req
->length
, _req
->zero
);
1740 if (!ep
->usb_ep
.desc
) {
1742 "trying to queue req %p to disabled %s\n",
1747 if (_req
->length
> MAX_XFR_LEN
) {
1749 "req length > supported MAX:%d requested:%d\n",
1750 MAX_XFR_LEN
, _req
->length
);
1753 spin_lock_irqsave(&bdc
->lock
, flags
);
1754 if (ep
== bdc
->bdc_ep_array
[1])
1755 ret
= ep0_queue(ep
, req
);
1757 ret
= ep_queue(ep
, req
);
1759 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1764 static int bdc_gadget_ep_dequeue(struct usb_ep
*_ep
,
1765 struct usb_request
*_req
)
1767 struct bdc_req
*req
;
1768 unsigned long flags
;
1776 ep
= to_bdc_ep(_ep
);
1777 req
= to_bdc_req(_req
);
1779 dev_dbg(bdc
->dev
, "%s ep:%s req:%p\n", __func__
, ep
->name
, req
);
1780 bdc_dbg_bd_list(bdc
, ep
);
1781 spin_lock_irqsave(&bdc
->lock
, flags
);
1782 /* make sure it's still queued on this endpoint */
1783 list_for_each_entry(req
, &ep
->queue
, queue
) {
1784 if (&req
->usb_req
== _req
)
1787 if (&req
->usb_req
!= _req
) {
1788 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1789 dev_err(bdc
->dev
, "usb_req !=req n");
1792 ret
= ep_dequeue(ep
, req
);
1797 bdc_req_complete(ep
, req
, -ECONNRESET
);
1800 bdc_dbg_bd_list(bdc
, ep
);
1801 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1806 static int bdc_gadget_ep_set_halt(struct usb_ep
*_ep
, int value
)
1808 unsigned long flags
;
1813 ep
= to_bdc_ep(_ep
);
1815 dev_dbg(bdc
->dev
, "%s ep:%s value=%d\n", __func__
, ep
->name
, value
);
1816 spin_lock_irqsave(&bdc
->lock
, flags
);
1817 if (usb_endpoint_xfer_isoc(ep
->usb_ep
.desc
))
1819 else if (!list_empty(&ep
->queue
))
1822 ret
= ep_set_halt(ep
, value
);
1824 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1829 static struct usb_request
*bdc_gadget_alloc_request(struct usb_ep
*_ep
,
1832 struct bdc_req
*req
;
1835 req
= kzalloc(sizeof(*req
), gfp_flags
);
1839 ep
= to_bdc_ep(_ep
);
1841 req
->epnum
= ep
->ep_num
;
1842 req
->usb_req
.dma
= DMA_ADDR_INVALID
;
1843 dev_dbg(ep
->bdc
->dev
, "%s ep:%s req:%p\n", __func__
, ep
->name
, req
);
1845 return &req
->usb_req
;
1848 static void bdc_gadget_free_request(struct usb_ep
*_ep
,
1849 struct usb_request
*_req
)
1851 struct bdc_req
*req
;
1853 req
= to_bdc_req(_req
);
1857 /* endpoint operations */
1859 /* configure endpoint and also allocate resources */
1860 static int bdc_gadget_ep_enable(struct usb_ep
*_ep
,
1861 const struct usb_endpoint_descriptor
*desc
)
1863 unsigned long flags
;
1868 if (!_ep
|| !desc
|| desc
->bDescriptorType
!= USB_DT_ENDPOINT
) {
1869 pr_debug("bdc_gadget_ep_enable invalid parameters\n");
1873 if (!desc
->wMaxPacketSize
) {
1874 pr_debug("bdc_gadget_ep_enable missing wMaxPacketSize\n");
1878 ep
= to_bdc_ep(_ep
);
1881 /* Sanity check, upper layer will not send enable for ep0 */
1882 if (ep
== bdc
->bdc_ep_array
[1])
1885 if (!bdc
->gadget_driver
1886 || bdc
->gadget
.speed
== USB_SPEED_UNKNOWN
) {
1890 dev_dbg(bdc
->dev
, "%s Enabling %s\n", __func__
, ep
->name
);
1891 spin_lock_irqsave(&bdc
->lock
, flags
);
1893 ep
->comp_desc
= _ep
->comp_desc
;
1894 ret
= bdc_ep_enable(ep
);
1895 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1900 static int bdc_gadget_ep_disable(struct usb_ep
*_ep
)
1902 unsigned long flags
;
1908 pr_debug("bdc: invalid parameters\n");
1911 ep
= to_bdc_ep(_ep
);
1914 /* Upper layer will not call this for ep0, but do a sanity check */
1915 if (ep
== bdc
->bdc_ep_array
[1]) {
1916 dev_warn(bdc
->dev
, "%s called for ep0\n", __func__
);
1920 "%s() ep:%s ep->flags:%08x\n",
1921 __func__
, ep
->name
, ep
->flags
);
1923 if (!(ep
->flags
& BDC_EP_ENABLED
)) {
1924 dev_warn(bdc
->dev
, "%s is already disabled\n", ep
->name
);
1927 spin_lock_irqsave(&bdc
->lock
, flags
);
1928 ret
= bdc_ep_disable(ep
);
1929 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1934 static const struct usb_ep_ops bdc_gadget_ep_ops
= {
1935 .enable
= bdc_gadget_ep_enable
,
1936 .disable
= bdc_gadget_ep_disable
,
1937 .alloc_request
= bdc_gadget_alloc_request
,
1938 .free_request
= bdc_gadget_free_request
,
1939 .queue
= bdc_gadget_ep_queue
,
1940 .dequeue
= bdc_gadget_ep_dequeue
,
1941 .set_halt
= bdc_gadget_ep_set_halt
1945 static int init_ep(struct bdc
*bdc
, u32 epnum
, u32 dir
)
1949 dev_dbg(bdc
->dev
, "%s epnum=%d dir=%d\n", __func__
, epnum
, dir
);
1950 ep
= kzalloc(sizeof(*ep
), GFP_KERNEL
);
1958 ep
->usb_ep
.caps
.dir_in
= true;
1960 ep
->usb_ep
.caps
.dir_out
= true;
1962 /* ep->ep_num is the index inside bdc_ep */
1965 bdc
->bdc_ep_array
[ep
->ep_num
] = ep
;
1966 snprintf(ep
->name
, sizeof(ep
->name
), "ep%d", epnum
- 1);
1967 usb_ep_set_maxpacket_limit(&ep
->usb_ep
, EP0_MAX_PKT_SIZE
);
1968 ep
->usb_ep
.caps
.type_control
= true;
1969 ep
->comp_desc
= NULL
;
1970 bdc
->gadget
.ep0
= &ep
->usb_ep
;
1973 ep
->ep_num
= epnum
* 2 - 1;
1975 ep
->ep_num
= epnum
* 2 - 2;
1977 bdc
->bdc_ep_array
[ep
->ep_num
] = ep
;
1978 snprintf(ep
->name
, sizeof(ep
->name
), "ep%d%s", epnum
- 1,
1979 dir
& 1 ? "in" : "out");
1981 usb_ep_set_maxpacket_limit(&ep
->usb_ep
, 1024);
1982 ep
->usb_ep
.caps
.type_iso
= true;
1983 ep
->usb_ep
.caps
.type_bulk
= true;
1984 ep
->usb_ep
.caps
.type_int
= true;
1985 ep
->usb_ep
.max_streams
= 0;
1986 list_add_tail(&ep
->usb_ep
.ep_list
, &bdc
->gadget
.ep_list
);
1988 ep
->usb_ep
.ops
= &bdc_gadget_ep_ops
;
1989 ep
->usb_ep
.name
= ep
->name
;
1991 ep
->ignore_next_sr
= false;
1992 dev_dbg(bdc
->dev
, "ep=%p ep->usb_ep.name=%s epnum=%d ep->epnum=%d\n",
1993 ep
, ep
->usb_ep
.name
, epnum
, ep
->ep_num
);
1995 INIT_LIST_HEAD(&ep
->queue
);
2001 int bdc_init_ep(struct bdc
*bdc
)
2006 dev_dbg(bdc
->dev
, "%s()\n", __func__
);
2007 INIT_LIST_HEAD(&bdc
->gadget
.ep_list
);
2009 ret
= init_ep(bdc
, 1, 0);
2011 dev_err(bdc
->dev
, "init ep ep0 fail %d\n", ret
);
2015 for (epnum
= 2; epnum
<= bdc
->num_eps
/ 2; epnum
++) {
2017 ret
= init_ep(bdc
, epnum
, 0);
2020 "init ep failed for:%d error: %d\n",
2026 ret
= init_ep(bdc
, epnum
, 1);
2029 "init ep failed for:%d error: %d\n",