1 // SPDX-License-Identifier: GPL-2.0+
3 * bdc_ep.c - BRCM BDC USB3.0 device controller endpoint related functions
5 * Copyright (C) 2014 Broadcom Corporation
7 * Author: Ashwini Pahuja
9 * Based on drivers under drivers/usb/
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/kernel.h>
15 #include <linux/delay.h>
16 #include <linux/dmapool.h>
17 #include <linux/ioport.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/timer.h>
23 #include <linux/list.h>
24 #include <linux/interrupt.h>
25 #include <linux/moduleparam.h>
26 #include <linux/device.h>
27 #include <linux/usb/ch9.h>
28 #include <linux/usb/gadget.h>
29 #include <linux/usb/otg.h>
32 #include <linux/irq.h>
33 #include <asm/unaligned.h>
34 #include <linux/platform_device.h>
35 #include <linux/usb/composite.h>
42 static const char * const ep0_state_string
[] = {
44 "WAIT_FOR_DATA_START",
46 "WAIT_FOR_STATUS_START",
47 "WAIT_FOR_STATUS_XMIT",
51 /* Free the bdl during ep disable */
52 static void ep_bd_list_free(struct bdc_ep
*ep
, u32 num_tabs
)
54 struct bd_list
*bd_list
= &ep
->bd_list
;
55 struct bdc
*bdc
= ep
->bdc
;
56 struct bd_table
*bd_table
;
59 dev_dbg(bdc
->dev
, "%s ep:%s num_tabs:%d\n",
60 __func__
, ep
->name
, num_tabs
);
62 if (!bd_list
->bd_table_array
) {
63 dev_dbg(bdc
->dev
, "%s already freed\n", ep
->name
);
66 for (index
= 0; index
< num_tabs
; index
++) {
68 * check if the bd_table struct is allocated ?
69 * if yes, then check if bd memory has been allocated, then
70 * free the dma_pool and also the bd_table struct memory
72 bd_table
= bd_list
->bd_table_array
[index
];
73 dev_dbg(bdc
->dev
, "bd_table:%p index:%d\n", bd_table
, index
);
75 dev_dbg(bdc
->dev
, "bd_table not allocated\n");
78 if (!bd_table
->start_bd
) {
79 dev_dbg(bdc
->dev
, "bd dma pool not allocated\n");
84 "Free dma pool start_bd:%p dma:%llx\n",
86 (unsigned long long)bd_table
->dma
);
88 dma_pool_free(bdc
->bd_table_pool
,
91 /* Free the bd_table structure */
94 /* Free the bd table array */
95 kfree(ep
->bd_list
.bd_table_array
);
99 * chain the tables, by insteting a chain bd at the end of prev_table, pointing
102 static inline void chain_table(struct bd_table
*prev_table
,
103 struct bd_table
*next_table
,
106 /* Chain the prev table to next table */
107 prev_table
->start_bd
[bd_p_tab
-1].offset
[0] =
108 cpu_to_le32(lower_32_bits(next_table
->dma
));
110 prev_table
->start_bd
[bd_p_tab
-1].offset
[1] =
111 cpu_to_le32(upper_32_bits(next_table
->dma
));
113 prev_table
->start_bd
[bd_p_tab
-1].offset
[2] =
116 prev_table
->start_bd
[bd_p_tab
-1].offset
[3] =
117 cpu_to_le32(MARK_CHAIN_BD
);
120 /* Allocate the bdl for ep, during config ep */
121 static int ep_bd_list_alloc(struct bdc_ep
*ep
)
123 struct bd_table
*prev_table
= NULL
;
124 int index
, num_tabs
, bd_p_tab
;
125 struct bdc
*bdc
= ep
->bdc
;
126 struct bd_table
*bd_table
;
129 if (usb_endpoint_xfer_isoc(ep
->desc
))
130 num_tabs
= NUM_TABLES_ISOCH
;
132 num_tabs
= NUM_TABLES
;
134 bd_p_tab
= NUM_BDS_PER_TABLE
;
135 /* if there is only 1 table in bd list then loop chain to self */
137 "%s ep:%p num_tabs:%d\n",
138 __func__
, ep
, num_tabs
);
140 /* Allocate memory for table array */
141 ep
->bd_list
.bd_table_array
= kzalloc(
142 num_tabs
* sizeof(struct bd_table
*),
144 if (!ep
->bd_list
.bd_table_array
)
147 /* Allocate memory for each table */
148 for (index
= 0; index
< num_tabs
; index
++) {
149 /* Allocate memory for bd_table structure */
150 bd_table
= kzalloc(sizeof(struct bd_table
), GFP_ATOMIC
);
154 bd_table
->start_bd
= dma_pool_alloc(bdc
->bd_table_pool
,
157 if (!bd_table
->start_bd
) {
165 "index:%d start_bd:%p dma=%08llx prev_table:%p\n",
166 index
, bd_table
->start_bd
,
167 (unsigned long long)bd_table
->dma
, prev_table
);
169 ep
->bd_list
.bd_table_array
[index
] = bd_table
;
170 memset(bd_table
->start_bd
, 0, bd_p_tab
* sizeof(struct bdc_bd
));
172 chain_table(prev_table
, bd_table
, bd_p_tab
);
174 prev_table
= bd_table
;
176 chain_table(prev_table
, ep
->bd_list
.bd_table_array
[0], bd_p_tab
);
177 /* Memory allocation is successful, now init the internal fields */
178 ep
->bd_list
.num_tabs
= num_tabs
;
179 ep
->bd_list
.max_bdi
= (num_tabs
* bd_p_tab
) - 1;
180 ep
->bd_list
.num_tabs
= num_tabs
;
181 ep
->bd_list
.num_bds_table
= bd_p_tab
;
182 ep
->bd_list
.eqp_bdi
= 0;
183 ep
->bd_list
.hwd_bdi
= 0;
187 /* Free the bd_table_array, bd_table struct, bd's */
188 ep_bd_list_free(ep
, num_tabs
);
193 /* returns how many bd's are need for this transfer */
194 static inline int bd_needed_req(struct bdc_req
*req
)
199 /* 1 bd needed for 0 byte transfer */
200 if (req
->usb_req
.length
== 0)
203 /* remaining bytes after tranfering all max BD size BD's */
204 remaining
= req
->usb_req
.length
% BD_MAX_BUFF_SIZE
;
208 /* How many maximum BUFF size BD's ? */
209 remaining
= req
->usb_req
.length
/ BD_MAX_BUFF_SIZE
;
210 bd_needed
+= remaining
;
215 /* returns the bd index(bdi) corresponding to bd dma address */
216 static int bd_add_to_bdi(struct bdc_ep
*ep
, dma_addr_t bd_dma_addr
)
218 struct bd_list
*bd_list
= &ep
->bd_list
;
219 dma_addr_t dma_first_bd
, dma_last_bd
;
220 struct bdc
*bdc
= ep
->bdc
;
221 struct bd_table
*bd_table
;
225 dma_first_bd
= dma_last_bd
= 0;
226 dev_dbg(bdc
->dev
, "%s %llx\n",
227 __func__
, (unsigned long long)bd_dma_addr
);
229 * Find in which table this bd_dma_addr belongs?, go through the table
230 * array and compare addresses of first and last address of bd of each
233 for (tbi
= 0; tbi
< bd_list
->num_tabs
; tbi
++) {
234 bd_table
= bd_list
->bd_table_array
[tbi
];
235 dma_first_bd
= bd_table
->dma
;
236 dma_last_bd
= bd_table
->dma
+
237 (sizeof(struct bdc_bd
) *
238 (bd_list
->num_bds_table
- 1));
239 dev_dbg(bdc
->dev
, "dma_first_bd:%llx dma_last_bd:%llx\n",
240 (unsigned long long)dma_first_bd
,
241 (unsigned long long)dma_last_bd
);
242 if (bd_dma_addr
>= dma_first_bd
&& bd_dma_addr
<= dma_last_bd
) {
247 if (unlikely(!found
)) {
248 dev_err(bdc
->dev
, "%s FATAL err, bd not found\n", __func__
);
251 /* Now we know the table, find the bdi */
252 bdi
= (bd_dma_addr
- dma_first_bd
) / sizeof(struct bdc_bd
);
254 /* return the global bdi, to compare with ep eqp_bdi */
255 return (bdi
+ (tbi
* bd_list
->num_bds_table
));
258 /* returns the table index(tbi) of the given bdi */
259 static int bdi_to_tbi(struct bdc_ep
*ep
, int bdi
)
263 tbi
= bdi
/ ep
->bd_list
.num_bds_table
;
264 dev_vdbg(ep
->bdc
->dev
,
265 "bdi:%d num_bds_table:%d tbi:%d\n",
266 bdi
, ep
->bd_list
.num_bds_table
, tbi
);
271 /* Find the bdi last bd in the transfer */
272 static inline int find_end_bdi(struct bdc_ep
*ep
, int next_hwd_bdi
)
276 end_bdi
= next_hwd_bdi
- 1;
278 end_bdi
= ep
->bd_list
.max_bdi
- 1;
279 else if ((end_bdi
% (ep
->bd_list
.num_bds_table
-1)) == 0)
286 * How many transfer bd's are available on this ep bdl, chain bds are not
287 * counted in available bds
289 static int bd_available_ep(struct bdc_ep
*ep
)
291 struct bd_list
*bd_list
= &ep
->bd_list
;
292 int available1
, available2
;
293 struct bdc
*bdc
= ep
->bdc
;
294 int chain_bd1
, chain_bd2
;
295 int available_bd
= 0;
297 available1
= available2
= chain_bd1
= chain_bd2
= 0;
298 /* if empty then we have all bd's available - number of chain bd's */
299 if (bd_list
->eqp_bdi
== bd_list
->hwd_bdi
)
300 return bd_list
->max_bdi
- bd_list
->num_tabs
;
303 * Depending upon where eqp and dqp pointers are, caculate number
306 if (bd_list
->hwd_bdi
< bd_list
->eqp_bdi
) {
307 /* available bd's are from eqp..max_bds + 0..dqp - chain_bds */
308 available1
= bd_list
->max_bdi
- bd_list
->eqp_bdi
;
309 available2
= bd_list
->hwd_bdi
;
310 chain_bd1
= available1
/ bd_list
->num_bds_table
;
311 chain_bd2
= available2
/ bd_list
->num_bds_table
;
312 dev_vdbg(bdc
->dev
, "chain_bd1:%d chain_bd2:%d\n",
313 chain_bd1
, chain_bd2
);
314 available_bd
= available1
+ available2
- chain_bd1
- chain_bd2
;
316 /* available bd's are from eqp..dqp - number of chain bd's */
317 available1
= bd_list
->hwd_bdi
- bd_list
->eqp_bdi
;
318 /* if gap between eqp and dqp is less than NUM_BDS_PER_TABLE */
319 if ((bd_list
->hwd_bdi
- bd_list
->eqp_bdi
)
320 <= bd_list
->num_bds_table
) {
321 /* If there any chain bd in between */
322 if (!(bdi_to_tbi(ep
, bd_list
->hwd_bdi
)
323 == bdi_to_tbi(ep
, bd_list
->eqp_bdi
))) {
324 available_bd
= available1
- 1;
327 chain_bd1
= available1
/ bd_list
->num_bds_table
;
328 available_bd
= available1
- chain_bd1
;
332 * we need to keep one extra bd to check if ring is full or empty so
336 dev_vdbg(bdc
->dev
, "available_bd:%d\n", available_bd
);
341 /* Notify the hardware after queueing the bd to bdl */
342 void bdc_notify_xfr(struct bdc
*bdc
, u32 epnum
)
344 struct bdc_ep
*ep
= bdc
->bdc_ep_array
[epnum
];
346 dev_vdbg(bdc
->dev
, "%s epnum:%d\n", __func__
, epnum
);
348 * We don't have anyway to check if ep state is running,
349 * except the software flags.
351 if (unlikely(ep
->flags
& BDC_EP_STOP
))
352 ep
->flags
&= ~BDC_EP_STOP
;
354 bdc_writel(bdc
->regs
, BDC_XSFNTF
, epnum
);
357 /* returns the bd corresponding to bdi */
358 static struct bdc_bd
*bdi_to_bd(struct bdc_ep
*ep
, int bdi
)
360 int tbi
= bdi_to_tbi(ep
, bdi
);
363 local_bdi
= bdi
- (tbi
* ep
->bd_list
.num_bds_table
);
364 dev_vdbg(ep
->bdc
->dev
,
365 "%s bdi:%d local_bdi:%d\n",
366 __func__
, bdi
, local_bdi
);
368 return (ep
->bd_list
.bd_table_array
[tbi
]->start_bd
+ local_bdi
);
371 /* Advance the enqueue pointer */
372 static void ep_bdlist_eqp_adv(struct bdc_ep
*ep
)
374 ep
->bd_list
.eqp_bdi
++;
375 /* if it's chain bd, then move to next */
376 if (((ep
->bd_list
.eqp_bdi
+ 1) % ep
->bd_list
.num_bds_table
) == 0)
377 ep
->bd_list
.eqp_bdi
++;
379 /* if the eqp is pointing to last + 1 then move back to 0 */
380 if (ep
->bd_list
.eqp_bdi
== (ep
->bd_list
.max_bdi
+ 1))
381 ep
->bd_list
.eqp_bdi
= 0;
384 /* Setup the first bd for ep0 transfer */
385 static int setup_first_bd_ep0(struct bdc
*bdc
, struct bdc_req
*req
, u32
*dword3
)
391 req_len
= req
->usb_req
.length
;
392 switch (bdc
->ep0_state
) {
393 case WAIT_FOR_DATA_START
:
394 *dword3
|= BD_TYPE_DS
;
395 if (bdc
->setup_pkt
.bRequestType
& USB_DIR_IN
)
396 *dword3
|= BD_DIR_IN
;
398 /* check if zlp will be needed */
399 wValue
= le16_to_cpu(bdc
->setup_pkt
.wValue
);
400 if ((wValue
> req_len
) &&
401 (req_len
% bdc
->gadget
.ep0
->maxpacket
== 0)) {
402 dev_dbg(bdc
->dev
, "ZLP needed wVal:%d len:%d MaxP:%d\n",
404 bdc
->gadget
.ep0
->maxpacket
);
405 bdc
->zlp_needed
= true;
409 case WAIT_FOR_STATUS_START
:
410 *dword3
|= BD_TYPE_SS
;
411 if (!le16_to_cpu(bdc
->setup_pkt
.wLength
) ||
412 !(bdc
->setup_pkt
.bRequestType
& USB_DIR_IN
))
413 *dword3
|= BD_DIR_IN
;
417 "Unknown ep0 state for queueing bd ep0_state:%s\n",
418 ep0_state_string
[bdc
->ep0_state
]);
425 /* Setup the bd dma descriptor for a given request */
426 static int setup_bd_list_xfr(struct bdc
*bdc
, struct bdc_req
*req
, int num_bds
)
428 dma_addr_t buf_add
= req
->usb_req
.dma
;
429 u32 maxp
, tfs
, dword2
, dword3
;
430 struct bd_transfer
*bd_xfr
;
431 struct bd_list
*bd_list
;
438 bd_list
= &ep
->bd_list
;
439 bd_xfr
= &req
->bd_xfr
;
441 bd_xfr
->start_bdi
= bd_list
->eqp_bdi
;
442 bd
= bdi_to_bd(ep
, bd_list
->eqp_bdi
);
443 req_len
= req
->usb_req
.length
;
444 maxp
= usb_endpoint_maxp(ep
->desc
);
445 tfs
= roundup(req
->usb_req
.length
, maxp
);
447 dev_vdbg(bdc
->dev
, "%s ep:%s num_bds:%d tfs:%d r_len:%d bd:%p\n",
448 __func__
, ep
->name
, num_bds
, tfs
, req_len
, bd
);
450 for (bdnum
= 0; bdnum
< num_bds
; bdnum
++) {
454 dword3
|= BD_SOT
|BD_SBF
|(tfs
<<BD_TFS_SHIFT
);
456 /* format of first bd for ep0 is different than other */
457 if (ep
->ep_num
== 1) {
458 ret
= setup_first_bd_ep0(bdc
, req
, &dword3
);
466 if (req_len
> BD_MAX_BUFF_SIZE
) {
467 dword2
|= BD_MAX_BUFF_SIZE
;
468 req_len
-= BD_MAX_BUFF_SIZE
;
470 /* this should be the last bd */
475 /* Currently only 1 INT target is supported */
476 dword2
|= BD_INTR_TARGET(0);
477 bd
= bdi_to_bd(ep
, ep
->bd_list
.eqp_bdi
);
479 dev_err(bdc
->dev
, "Err bd pointing to wrong addr\n");
483 bd
->offset
[0] = cpu_to_le32(lower_32_bits(buf_add
));
484 bd
->offset
[1] = cpu_to_le32(upper_32_bits(buf_add
));
485 bd
->offset
[2] = cpu_to_le32(dword2
);
486 bd
->offset
[3] = cpu_to_le32(dword3
);
487 /* advance eqp pointer */
488 ep_bdlist_eqp_adv(ep
);
489 /* advance the buff pointer */
490 buf_add
+= BD_MAX_BUFF_SIZE
;
491 dev_vdbg(bdc
->dev
, "buf_add:%08llx req_len:%d bd:%p eqp:%d\n",
492 (unsigned long long)buf_add
, req_len
, bd
,
493 ep
->bd_list
.eqp_bdi
);
494 bd
= bdi_to_bd(ep
, ep
->bd_list
.eqp_bdi
);
495 bd
->offset
[3] = cpu_to_le32(BD_SBF
);
497 /* clear the STOP BD fetch bit from the first bd of this xfr */
498 bd
= bdi_to_bd(ep
, bd_xfr
->start_bdi
);
499 bd
->offset
[3] &= cpu_to_le32(~BD_SBF
);
500 /* the new eqp will be next hw dqp */
501 bd_xfr
->num_bds
= num_bds
;
502 bd_xfr
->next_hwd_bdi
= ep
->bd_list
.eqp_bdi
;
503 /* everything is written correctly before notifying the HW */
510 static int bdc_queue_xfr(struct bdc
*bdc
, struct bdc_req
*req
)
512 int num_bds
, bd_available
;
517 dev_dbg(bdc
->dev
, "%s req:%p\n", __func__
, req
);
518 dev_dbg(bdc
->dev
, "eqp_bdi:%d hwd_bdi:%d\n",
519 ep
->bd_list
.eqp_bdi
, ep
->bd_list
.hwd_bdi
);
521 num_bds
= bd_needed_req(req
);
522 bd_available
= bd_available_ep(ep
);
524 /* how many bd's are avaialble on ep */
525 if (num_bds
> bd_available
)
528 ret
= setup_bd_list_xfr(bdc
, req
, num_bds
);
531 list_add_tail(&req
->queue
, &ep
->queue
);
532 bdc_dbg_bd_list(bdc
, ep
);
533 bdc_notify_xfr(bdc
, ep
->ep_num
);
538 /* callback to gadget layer when xfr completes */
539 static void bdc_req_complete(struct bdc_ep
*ep
, struct bdc_req
*req
,
542 struct bdc
*bdc
= ep
->bdc
;
544 if (req
== NULL
|| &req
->queue
== NULL
|| &req
->usb_req
== NULL
)
547 dev_dbg(bdc
->dev
, "%s ep:%s status:%d\n", __func__
, ep
->name
, status
);
548 list_del(&req
->queue
);
549 req
->usb_req
.status
= status
;
550 usb_gadget_unmap_request(&bdc
->gadget
, &req
->usb_req
, ep
->dir
);
551 if (req
->usb_req
.complete
) {
552 spin_unlock(&bdc
->lock
);
553 usb_gadget_giveback_request(&ep
->usb_ep
, &req
->usb_req
);
554 spin_lock(&bdc
->lock
);
558 /* Disable the endpoint */
559 int bdc_ep_disable(struct bdc_ep
*ep
)
567 dev_dbg(bdc
->dev
, "%s() ep->ep_num=%d\n", __func__
, ep
->ep_num
);
568 /* Stop the endpoint */
569 ret
= bdc_stop_ep(bdc
, ep
->ep_num
);
572 * Intentionally don't check the ret value of stop, it can fail in
573 * disconnect scenarios, continue with dconfig
575 /* de-queue any pending requests */
576 while (!list_empty(&ep
->queue
)) {
577 req
= list_entry(ep
->queue
.next
, struct bdc_req
,
579 bdc_req_complete(ep
, req
, -ESHUTDOWN
);
581 /* deconfigure the endpoint */
582 ret
= bdc_dconfig_ep(bdc
, ep
);
585 "dconfig fail but continue with memory free");
588 /* ep0 memory is not freed, but reused on next connect sr */
592 /* Free the bdl memory */
593 ep_bd_list_free(ep
, ep
->bd_list
.num_tabs
);
595 ep
->comp_desc
= NULL
;
596 ep
->usb_ep
.desc
= NULL
;
603 int bdc_ep_enable(struct bdc_ep
*ep
)
609 dev_dbg(bdc
->dev
, "%s NUM_TABLES:%d %d\n",
610 __func__
, NUM_TABLES
, NUM_TABLES_ISOCH
);
612 ret
= ep_bd_list_alloc(ep
);
614 dev_err(bdc
->dev
, "ep bd list allocation failed:%d\n", ret
);
617 bdc_dbg_bd_list(bdc
, ep
);
618 /* only for ep0: config ep is called for ep0 from connect event */
619 ep
->flags
|= BDC_EP_ENABLED
;
623 /* Issue a configure endpoint command */
624 ret
= bdc_config_ep(bdc
, ep
);
628 ep
->usb_ep
.maxpacket
= usb_endpoint_maxp(ep
->desc
);
629 ep
->usb_ep
.desc
= ep
->desc
;
630 ep
->usb_ep
.comp_desc
= ep
->comp_desc
;
631 ep
->ep_type
= usb_endpoint_type(ep
->desc
);
632 ep
->flags
|= BDC_EP_ENABLED
;
637 /* EP0 related code */
639 /* Queue a status stage BD */
640 static int ep0_queue_status_stage(struct bdc
*bdc
)
642 struct bdc_req
*status_req
;
645 status_req
= &bdc
->status_req
;
646 ep
= bdc
->bdc_ep_array
[1];
648 status_req
->usb_req
.length
= 0;
649 status_req
->usb_req
.status
= -EINPROGRESS
;
650 status_req
->usb_req
.actual
= 0;
651 status_req
->usb_req
.complete
= NULL
;
652 bdc_queue_xfr(bdc
, status_req
);
657 /* Queue xfr on ep0 */
658 static int ep0_queue(struct bdc_ep
*ep
, struct bdc_req
*req
)
664 dev_dbg(bdc
->dev
, "%s()\n", __func__
);
665 req
->usb_req
.actual
= 0;
666 req
->usb_req
.status
= -EINPROGRESS
;
667 req
->epnum
= ep
->ep_num
;
669 if (bdc
->delayed_status
) {
670 bdc
->delayed_status
= false;
671 /* if status stage was delayed? */
672 if (bdc
->ep0_state
== WAIT_FOR_STATUS_START
) {
673 /* Queue a status stage BD */
674 ep0_queue_status_stage(bdc
);
675 bdc
->ep0_state
= WAIT_FOR_STATUS_XMIT
;
680 * if delayed status is false and 0 length transfer is requested
681 * i.e. for status stage of some setup request, then just
682 * return from here the status stage is queued independently
684 if (req
->usb_req
.length
== 0)
688 ret
= usb_gadget_map_request(&bdc
->gadget
, &req
->usb_req
, ep
->dir
);
690 dev_err(bdc
->dev
, "dma mapping failed %s\n", ep
->name
);
694 return bdc_queue_xfr(bdc
, req
);
697 /* Queue data stage */
698 static int ep0_queue_data_stage(struct bdc
*bdc
)
702 dev_dbg(bdc
->dev
, "%s\n", __func__
);
703 ep
= bdc
->bdc_ep_array
[1];
704 bdc
->ep0_req
.ep
= ep
;
705 bdc
->ep0_req
.usb_req
.complete
= NULL
;
707 return ep0_queue(ep
, &bdc
->ep0_req
);
710 /* Queue req on ep */
711 static int ep_queue(struct bdc_ep
*ep
, struct bdc_req
*req
)
716 if (!req
|| !ep
->usb_ep
.desc
)
721 req
->usb_req
.actual
= 0;
722 req
->usb_req
.status
= -EINPROGRESS
;
723 req
->epnum
= ep
->ep_num
;
725 ret
= usb_gadget_map_request(&bdc
->gadget
, &req
->usb_req
, ep
->dir
);
727 dev_err(bdc
->dev
, "dma mapping failed\n");
731 return bdc_queue_xfr(bdc
, req
);
734 /* Dequeue a request from ep */
735 static int ep_dequeue(struct bdc_ep
*ep
, struct bdc_req
*req
)
737 int start_bdi
, end_bdi
, tbi
, eqp_bdi
, curr_hw_dqpi
;
738 bool start_pending
, end_pending
;
739 bool first_remove
= false;
740 struct bdc_req
*first_req
;
741 struct bdc_bd
*bd_start
;
742 struct bd_table
*table
;
743 dma_addr_t next_bd_dma
;
750 start_pending
= end_pending
= false;
751 eqp_bdi
= ep
->bd_list
.eqp_bdi
- 1;
754 eqp_bdi
= ep
->bd_list
.max_bdi
;
756 start_bdi
= req
->bd_xfr
.start_bdi
;
757 end_bdi
= find_end_bdi(ep
, req
->bd_xfr
.next_hwd_bdi
);
759 dev_dbg(bdc
->dev
, "%s ep:%s start:%d end:%d\n",
760 __func__
, ep
->name
, start_bdi
, end_bdi
);
761 dev_dbg(bdc
->dev
, "ep_dequeue ep=%p ep->desc=%p\n",
762 ep
, (void *)ep
->usb_ep
.desc
);
763 /* Stop the ep to see where the HW is ? */
764 ret
= bdc_stop_ep(bdc
, ep
->ep_num
);
765 /* if there is an issue with stopping ep, then no need to go further */
770 * After endpoint is stopped, there can be 3 cases, the request
771 * is processed, pending or in the middle of processing
774 /* The current hw dequeue pointer */
775 tmp_32
= bdc_readl(bdc
->regs
, BDC_EPSTS0
);
777 tmp_32
= bdc_readl(bdc
->regs
, BDC_EPSTS1
);
778 deq_ptr_64
|= ((u64
)tmp_32
<< 32);
780 /* we have the dma addr of next bd that will be fetched by hardware */
781 curr_hw_dqpi
= bd_add_to_bdi(ep
, deq_ptr_64
);
782 if (curr_hw_dqpi
< 0)
786 * curr_hw_dqpi points to actual dqp of HW and HW owns bd's from
787 * curr_hw_dqbdi..eqp_bdi.
790 /* Check if start_bdi and end_bdi are in range of HW owned BD's */
791 if (curr_hw_dqpi
> eqp_bdi
) {
792 /* there is a wrap from last to 0 */
793 if (start_bdi
>= curr_hw_dqpi
|| start_bdi
<= eqp_bdi
) {
794 start_pending
= true;
796 } else if (end_bdi
>= curr_hw_dqpi
|| end_bdi
<= eqp_bdi
) {
800 if (start_bdi
>= curr_hw_dqpi
) {
801 start_pending
= true;
803 } else if (end_bdi
>= curr_hw_dqpi
) {
808 "start_pending:%d end_pending:%d speed:%d\n",
809 start_pending
, end_pending
, bdc
->gadget
.speed
);
811 /* If both start till end are processes, we cannot deq req */
812 if (!start_pending
&& !end_pending
)
816 * if ep_dequeue is called after disconnect then just return
819 if (bdc
->gadget
.speed
== USB_SPEED_UNKNOWN
)
821 tbi
= bdi_to_tbi(ep
, req
->bd_xfr
.next_hwd_bdi
);
822 table
= ep
->bd_list
.bd_table_array
[tbi
];
823 next_bd_dma
= table
->dma
+
824 sizeof(struct bdc_bd
)*(req
->bd_xfr
.next_hwd_bdi
-
825 tbi
* ep
->bd_list
.num_bds_table
);
827 first_req
= list_first_entry(&ep
->queue
, struct bdc_req
,
830 if (req
== first_req
)
834 * Due to HW limitation we need to bypadd chain bd's and issue ep_bla,
835 * incase if start is pending this is the first request in the list
836 * then issue ep_bla instead of marking as chain bd
838 if (start_pending
&& !first_remove
) {
840 * Mark the start bd as Chain bd, and point the chain
843 bd_start
= bdi_to_bd(ep
, start_bdi
);
844 bd_start
->offset
[0] = cpu_to_le32(lower_32_bits(next_bd_dma
));
845 bd_start
->offset
[1] = cpu_to_le32(upper_32_bits(next_bd_dma
));
846 bd_start
->offset
[2] = 0x0;
847 bd_start
->offset
[3] = cpu_to_le32(MARK_CHAIN_BD
);
848 bdc_dbg_bd_list(bdc
, ep
);
849 } else if (end_pending
) {
851 * The transfer is stopped in the middle, move the
852 * HW deq pointer to next_bd_dma
854 ret
= bdc_ep_bla(bdc
, ep
, next_bd_dma
);
856 dev_err(bdc
->dev
, "error in ep_bla:%d\n", ret
);
864 /* Halt/Clear the ep based on value */
865 static int ep_set_halt(struct bdc_ep
*ep
, u32 value
)
871 dev_dbg(bdc
->dev
, "%s ep:%s value=%d\n", __func__
, ep
->name
, value
);
874 dev_dbg(bdc
->dev
, "Halt\n");
876 bdc
->ep0_state
= WAIT_FOR_SETUP
;
878 ret
= bdc_ep_set_stall(bdc
, ep
->ep_num
);
880 dev_err(bdc
->dev
, "failed to set STALL on %s\n",
883 ep
->flags
|= BDC_EP_STALL
;
886 dev_dbg(bdc
->dev
, "Before Clear\n");
887 ret
= bdc_ep_clear_stall(bdc
, ep
->ep_num
);
889 dev_err(bdc
->dev
, "failed to clear STALL on %s\n",
892 ep
->flags
&= ~BDC_EP_STALL
;
893 dev_dbg(bdc
->dev
, "After Clear\n");
899 /* Free all the ep */
900 void bdc_free_ep(struct bdc
*bdc
)
905 dev_dbg(bdc
->dev
, "%s\n", __func__
);
906 for (epnum
= 1; epnum
< bdc
->num_eps
; epnum
++) {
907 ep
= bdc
->bdc_ep_array
[epnum
];
911 if (ep
->flags
& BDC_EP_ENABLED
)
912 ep_bd_list_free(ep
, ep
->bd_list
.num_tabs
);
914 /* ep0 is not in this gadget list */
916 list_del(&ep
->usb_ep
.ep_list
);
922 /* USB2 spec, section 7.1.20 */
923 static int bdc_set_test_mode(struct bdc
*bdc
)
927 usb2_pm
= bdc_readl(bdc
->regs
, BDC_USPPM2
);
928 usb2_pm
&= ~BDC_PTC_MASK
;
929 dev_dbg(bdc
->dev
, "%s\n", __func__
);
930 switch (bdc
->test_mode
) {
936 usb2_pm
|= bdc
->test_mode
<< 28;
941 dev_dbg(bdc
->dev
, "usb2_pm=%08x", usb2_pm
);
942 bdc_writel(bdc
->regs
, BDC_USPPM2
, usb2_pm
);
948 * Helper function to handle Transfer status report with status as either
951 static void handle_xsr_succ_status(struct bdc
*bdc
, struct bdc_ep
*ep
,
952 struct bdc_sr
*sreport
)
954 int short_bdi
, start_bdi
, end_bdi
, max_len_bds
, chain_bds
;
955 struct bd_list
*bd_list
= &ep
->bd_list
;
956 int actual_length
, length_short
;
957 struct bd_transfer
*bd_xfr
;
958 struct bdc_bd
*short_bd
;
965 dev_dbg(bdc
->dev
, "%s ep:%p\n", __func__
, ep
);
967 /* do not process thie sr if ignore flag is set */
968 if (ep
->ignore_next_sr
) {
969 ep
->ignore_next_sr
= false;
973 if (unlikely(list_empty(&ep
->queue
))) {
974 dev_warn(bdc
->dev
, "xfr srr with no BD's queued\n");
977 req
= list_entry(ep
->queue
.next
, struct bdc_req
,
980 bd_xfr
= &req
->bd_xfr
;
981 sr_status
= XSF_STS(le32_to_cpu(sreport
->offset
[3]));
984 * sr_status is short and this transfer has more than 1 bd then it needs
985 * special handling, this is only applicable for bulk and ctrl
987 if (sr_status
== XSF_SHORT
&& bd_xfr
->num_bds
> 1) {
989 * This is multi bd xfr, lets see which bd
990 * caused short transfer and how many bytes have been
991 * transferred so far.
993 tmp_32
= le32_to_cpu(sreport
->offset
[0]);
995 tmp_32
= le32_to_cpu(sreport
->offset
[1]);
996 deq_ptr_64
|= ((u64
)tmp_32
<< 32);
997 short_bdi
= bd_add_to_bdi(ep
, deq_ptr_64
);
998 if (unlikely(short_bdi
< 0))
999 dev_warn(bdc
->dev
, "bd doesn't exist?\n");
1001 start_bdi
= bd_xfr
->start_bdi
;
1003 * We know the start_bdi and short_bdi, how many xfr
1006 if (start_bdi
<= short_bdi
) {
1007 max_len_bds
= short_bdi
- start_bdi
;
1008 if (max_len_bds
<= bd_list
->num_bds_table
) {
1009 if (!(bdi_to_tbi(ep
, start_bdi
) ==
1010 bdi_to_tbi(ep
, short_bdi
)))
1013 chain_bds
= max_len_bds
/bd_list
->num_bds_table
;
1014 max_len_bds
-= chain_bds
;
1017 /* there is a wrap in the ring within a xfr */
1018 chain_bds
= (bd_list
->max_bdi
- start_bdi
)/
1019 bd_list
->num_bds_table
;
1020 chain_bds
+= short_bdi
/bd_list
->num_bds_table
;
1021 max_len_bds
= bd_list
->max_bdi
- start_bdi
;
1022 max_len_bds
+= short_bdi
;
1023 max_len_bds
-= chain_bds
;
1025 /* max_len_bds is the number of full length bds */
1026 end_bdi
= find_end_bdi(ep
, bd_xfr
->next_hwd_bdi
);
1027 if (!(end_bdi
== short_bdi
))
1028 ep
->ignore_next_sr
= true;
1030 actual_length
= max_len_bds
* BD_MAX_BUFF_SIZE
;
1031 short_bd
= bdi_to_bd(ep
, short_bdi
);
1033 length_short
= le32_to_cpu(short_bd
->offset
[2]) & 0x1FFFFF;
1034 /* actual length trensfered */
1035 length_short
-= SR_BD_LEN(le32_to_cpu(sreport
->offset
[2]));
1036 actual_length
+= length_short
;
1037 req
->usb_req
.actual
= actual_length
;
1039 req
->usb_req
.actual
= req
->usb_req
.length
-
1040 SR_BD_LEN(le32_to_cpu(sreport
->offset
[2]));
1042 "len=%d actual=%d bd_xfr->next_hwd_bdi:%d\n",
1043 req
->usb_req
.length
, req
->usb_req
.actual
,
1044 bd_xfr
->next_hwd_bdi
);
1047 /* Update the dequeue pointer */
1048 ep
->bd_list
.hwd_bdi
= bd_xfr
->next_hwd_bdi
;
1049 if (req
->usb_req
.actual
< req
->usb_req
.length
) {
1050 dev_dbg(bdc
->dev
, "short xfr on %d\n", ep
->ep_num
);
1051 if (req
->usb_req
.short_not_ok
)
1052 status
= -EREMOTEIO
;
1054 bdc_req_complete(ep
, bd_xfr
->req
, status
);
1057 /* EP0 setup related packet handlers */
1060 * Setup packet received, just store the packet and process on next DS or SS
1063 void bdc_xsf_ep0_setup_recv(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1065 struct usb_ctrlrequest
*setup_pkt
;
1069 "%s ep0_state:%s\n",
1070 __func__
, ep0_state_string
[bdc
->ep0_state
]);
1071 /* Store received setup packet */
1072 setup_pkt
= &bdc
->setup_pkt
;
1073 memcpy(setup_pkt
, &sreport
->offset
[0], sizeof(*setup_pkt
));
1074 len
= le16_to_cpu(setup_pkt
->wLength
);
1076 bdc
->ep0_state
= WAIT_FOR_STATUS_START
;
1078 bdc
->ep0_state
= WAIT_FOR_DATA_START
;
1082 "%s exit ep0_state:%s\n",
1083 __func__
, ep0_state_string
[bdc
->ep0_state
]);
1087 static void ep0_stall(struct bdc
*bdc
)
1089 struct bdc_ep
*ep
= bdc
->bdc_ep_array
[1];
1090 struct bdc_req
*req
;
1092 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1093 bdc
->delayed_status
= false;
1096 /* de-queue any pendig requests */
1097 while (!list_empty(&ep
->queue
)) {
1098 req
= list_entry(ep
->queue
.next
, struct bdc_req
,
1100 bdc_req_complete(ep
, req
, -ESHUTDOWN
);
1104 /* SET_ADD handlers */
1105 static int ep0_set_address(struct bdc
*bdc
, struct usb_ctrlrequest
*ctrl
)
1107 enum usb_device_state state
= bdc
->gadget
.state
;
1111 addr
= le16_to_cpu(ctrl
->wValue
);
1113 "%s addr:%d dev state:%d\n",
1114 __func__
, addr
, state
);
1120 case USB_STATE_DEFAULT
:
1121 case USB_STATE_ADDRESS
:
1122 /* Issue Address device command */
1123 ret
= bdc_address_device(bdc
, addr
);
1128 usb_gadget_set_state(&bdc
->gadget
, USB_STATE_ADDRESS
);
1130 usb_gadget_set_state(&bdc
->gadget
, USB_STATE_DEFAULT
);
1132 bdc
->dev_addr
= addr
;
1136 "SET Address in wrong device state %d\n",
1144 /* Handler for SET/CLEAR FEATURE requests for device */
1145 static int ep0_handle_feature_dev(struct bdc
*bdc
, u16 wValue
,
1146 u16 wIndex
, bool set
)
1148 enum usb_device_state state
= bdc
->gadget
.state
;
1151 dev_dbg(bdc
->dev
, "%s set:%d dev state:%d\n",
1152 __func__
, set
, state
);
1154 case USB_DEVICE_REMOTE_WAKEUP
:
1155 dev_dbg(bdc
->dev
, "USB_DEVICE_REMOTE_WAKEUP\n");
1157 bdc
->devstatus
|= REMOTE_WAKE_ENABLE
;
1159 bdc
->devstatus
&= ~REMOTE_WAKE_ENABLE
;
1162 case USB_DEVICE_TEST_MODE
:
1163 dev_dbg(bdc
->dev
, "USB_DEVICE_TEST_MODE\n");
1164 if ((wIndex
& 0xFF) ||
1165 (bdc
->gadget
.speed
!= USB_SPEED_HIGH
) || !set
)
1168 bdc
->test_mode
= wIndex
>> 8;
1171 case USB_DEVICE_U1_ENABLE
:
1172 dev_dbg(bdc
->dev
, "USB_DEVICE_U1_ENABLE\n");
1174 if (bdc
->gadget
.speed
!= USB_SPEED_SUPER
||
1175 state
!= USB_STATE_CONFIGURED
)
1178 usppms
= bdc_readl(bdc
->regs
, BDC_USPPMS
);
1180 /* clear previous u1t */
1181 usppms
&= ~BDC_U1T(BDC_U1T_MASK
);
1182 usppms
|= BDC_U1T(U1_TIMEOUT
);
1183 usppms
|= BDC_U1E
| BDC_PORT_W1S
;
1184 bdc
->devstatus
|= (1 << USB_DEV_STAT_U1_ENABLED
);
1187 usppms
|= BDC_PORT_W1S
;
1188 bdc
->devstatus
&= ~(1 << USB_DEV_STAT_U1_ENABLED
);
1190 bdc_writel(bdc
->regs
, BDC_USPPMS
, usppms
);
1193 case USB_DEVICE_U2_ENABLE
:
1194 dev_dbg(bdc
->dev
, "USB_DEVICE_U2_ENABLE\n");
1196 if (bdc
->gadget
.speed
!= USB_SPEED_SUPER
||
1197 state
!= USB_STATE_CONFIGURED
)
1200 usppms
= bdc_readl(bdc
->regs
, BDC_USPPMS
);
1204 bdc
->devstatus
|= (1 << USB_DEV_STAT_U2_ENABLED
);
1208 bdc
->devstatus
&= ~(1 << USB_DEV_STAT_U2_ENABLED
);
1210 bdc_writel(bdc
->regs
, BDC_USPPMS
, usppms
);
1213 case USB_DEVICE_LTM_ENABLE
:
1214 dev_dbg(bdc
->dev
, "USB_DEVICE_LTM_ENABLE?\n");
1215 if (bdc
->gadget
.speed
!= USB_SPEED_SUPER
||
1216 state
!= USB_STATE_CONFIGURED
)
1220 dev_err(bdc
->dev
, "Unknown wValue:%d\n", wValue
);
1222 } /* USB_RECIP_DEVICE end */
1227 /* SET/CLEAR FEATURE handler */
1228 static int ep0_handle_feature(struct bdc
*bdc
,
1229 struct usb_ctrlrequest
*setup_pkt
, bool set
)
1231 enum usb_device_state state
= bdc
->gadget
.state
;
1237 wValue
= le16_to_cpu(setup_pkt
->wValue
);
1238 wIndex
= le16_to_cpu(setup_pkt
->wIndex
);
1241 "%s wValue=%d wIndex=%d devstate=%08x speed=%d set=%d",
1242 __func__
, wValue
, wIndex
, state
,
1243 bdc
->gadget
.speed
, set
);
1245 switch (setup_pkt
->bRequestType
& USB_RECIP_MASK
) {
1246 case USB_RECIP_DEVICE
:
1247 return ep0_handle_feature_dev(bdc
, wValue
, wIndex
, set
);
1248 case USB_RECIP_INTERFACE
:
1249 dev_dbg(bdc
->dev
, "USB_RECIP_INTERFACE\n");
1250 /* USB3 spec, sec 9.4.9 */
1251 if (wValue
!= USB_INTRF_FUNC_SUSPEND
)
1253 /* USB3 spec, Table 9-8 */
1255 if (wIndex
& USB_INTRF_FUNC_SUSPEND_RW
) {
1256 dev_dbg(bdc
->dev
, "SET REMOTE_WAKEUP\n");
1257 bdc
->devstatus
|= REMOTE_WAKE_ENABLE
;
1259 dev_dbg(bdc
->dev
, "CLEAR REMOTE_WAKEUP\n");
1260 bdc
->devstatus
&= ~REMOTE_WAKE_ENABLE
;
1265 case USB_RECIP_ENDPOINT
:
1266 dev_dbg(bdc
->dev
, "USB_RECIP_ENDPOINT\n");
1267 if (wValue
!= USB_ENDPOINT_HALT
)
1270 epnum
= wIndex
& USB_ENDPOINT_NUMBER_MASK
;
1272 if ((wIndex
& USB_ENDPOINT_DIR_MASK
) == USB_DIR_IN
)
1273 epnum
= epnum
* 2 + 1;
1280 * If CLEAR_FEATURE on ep0 then don't do anything as the stall
1281 * condition on ep0 has already been cleared when SETUP packet
1284 if (epnum
== 1 && !set
) {
1285 dev_dbg(bdc
->dev
, "ep0 stall already cleared\n");
1288 dev_dbg(bdc
->dev
, "epnum=%d\n", epnum
);
1289 ep
= bdc
->bdc_ep_array
[epnum
];
1293 return ep_set_halt(ep
, set
);
1295 dev_err(bdc
->dev
, "Unknown recipient\n");
1302 /* GET_STATUS request handler */
1303 static int ep0_handle_status(struct bdc
*bdc
,
1304 struct usb_ctrlrequest
*setup_pkt
)
1306 enum usb_device_state state
= bdc
->gadget
.state
;
1312 /* USB2.0 spec sec 9.4.5 */
1313 if (state
== USB_STATE_DEFAULT
)
1315 wIndex
= le16_to_cpu(setup_pkt
->wIndex
);
1316 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1317 usb_status
= bdc
->devstatus
;
1318 switch (setup_pkt
->bRequestType
& USB_RECIP_MASK
) {
1319 case USB_RECIP_DEVICE
:
1321 "USB_RECIP_DEVICE devstatus:%08x\n",
1323 /* USB3 spec, sec 9.4.5 */
1324 if (bdc
->gadget
.speed
== USB_SPEED_SUPER
)
1325 usb_status
&= ~REMOTE_WAKE_ENABLE
;
1328 case USB_RECIP_INTERFACE
:
1329 dev_dbg(bdc
->dev
, "USB_RECIP_INTERFACE\n");
1330 if (bdc
->gadget
.speed
== USB_SPEED_SUPER
) {
1332 * This should come from func for Func remote wkup
1335 if (bdc
->devstatus
& REMOTE_WAKE_ENABLE
)
1336 usb_status
|= REMOTE_WAKE_ENABLE
;
1343 case USB_RECIP_ENDPOINT
:
1344 dev_dbg(bdc
->dev
, "USB_RECIP_ENDPOINT\n");
1345 epnum
= wIndex
& USB_ENDPOINT_NUMBER_MASK
;
1347 if ((wIndex
& USB_ENDPOINT_DIR_MASK
) == USB_DIR_IN
)
1348 epnum
= epnum
*2 + 1;
1352 epnum
= 1; /* EP0 */
1355 ep
= bdc
->bdc_ep_array
[epnum
];
1357 dev_err(bdc
->dev
, "ISSUE, GET_STATUS for invalid EP ?");
1360 if (ep
->flags
& BDC_EP_STALL
)
1361 usb_status
|= 1 << USB_ENDPOINT_HALT
;
1365 dev_err(bdc
->dev
, "Unknown recipient for get_status\n");
1368 /* prepare a data stage for GET_STATUS */
1369 dev_dbg(bdc
->dev
, "usb_status=%08x\n", usb_status
);
1370 *(__le16
*)bdc
->ep0_response_buff
= cpu_to_le16(usb_status
);
1371 bdc
->ep0_req
.usb_req
.length
= 2;
1372 bdc
->ep0_req
.usb_req
.buf
= &bdc
->ep0_response_buff
;
1373 ep0_queue_data_stage(bdc
);
1378 static void ep0_set_sel_cmpl(struct usb_ep
*_ep
, struct usb_request
*_req
)
1380 /* ep0_set_sel_cmpl */
1383 /* Queue data stage to handle 6 byte SET_SEL request */
1384 static int ep0_set_sel(struct bdc
*bdc
,
1385 struct usb_ctrlrequest
*setup_pkt
)
1390 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1391 wLength
= le16_to_cpu(setup_pkt
->wLength
);
1392 if (unlikely(wLength
!= 6)) {
1393 dev_err(bdc
->dev
, "%s Wrong wLength:%d\n", __func__
, wLength
);
1396 ep
= bdc
->bdc_ep_array
[1];
1397 bdc
->ep0_req
.ep
= ep
;
1398 bdc
->ep0_req
.usb_req
.length
= 6;
1399 bdc
->ep0_req
.usb_req
.buf
= bdc
->ep0_response_buff
;
1400 bdc
->ep0_req
.usb_req
.complete
= ep0_set_sel_cmpl
;
1401 ep0_queue_data_stage(bdc
);
1407 * Queue a 0 byte bd only if wLength is more than the length and and length is
1408 * a multiple of MaxPacket then queue 0 byte BD
1410 static int ep0_queue_zlp(struct bdc
*bdc
)
1414 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1415 bdc
->ep0_req
.ep
= bdc
->bdc_ep_array
[1];
1416 bdc
->ep0_req
.usb_req
.length
= 0;
1417 bdc
->ep0_req
.usb_req
.complete
= NULL
;
1418 bdc
->ep0_state
= WAIT_FOR_DATA_START
;
1419 ret
= bdc_queue_xfr(bdc
, &bdc
->ep0_req
);
1421 dev_err(bdc
->dev
, "err queueing zlp :%d\n", ret
);
1424 bdc
->ep0_state
= WAIT_FOR_DATA_XMIT
;
1429 /* Control request handler */
1430 static int handle_control_request(struct bdc
*bdc
)
1432 enum usb_device_state state
= bdc
->gadget
.state
;
1433 struct usb_ctrlrequest
*setup_pkt
;
1434 int delegate_setup
= 0;
1438 setup_pkt
= &bdc
->setup_pkt
;
1439 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1440 if ((setup_pkt
->bRequestType
& USB_TYPE_MASK
) == USB_TYPE_STANDARD
) {
1441 switch (setup_pkt
->bRequest
) {
1442 case USB_REQ_SET_ADDRESS
:
1443 dev_dbg(bdc
->dev
, "USB_REQ_SET_ADDRESS\n");
1444 ret
= ep0_set_address(bdc
, setup_pkt
);
1445 bdc
->devstatus
&= DEVSTATUS_CLEAR
;
1448 case USB_REQ_SET_CONFIGURATION
:
1449 dev_dbg(bdc
->dev
, "USB_REQ_SET_CONFIGURATION\n");
1450 if (state
== USB_STATE_ADDRESS
) {
1451 usb_gadget_set_state(&bdc
->gadget
,
1452 USB_STATE_CONFIGURED
);
1453 } else if (state
== USB_STATE_CONFIGURED
) {
1455 * USB2 spec sec 9.4.7, if wValue is 0 then dev
1456 * is moved to addressed state
1458 config
= le16_to_cpu(setup_pkt
->wValue
);
1460 usb_gadget_set_state(
1467 case USB_REQ_SET_FEATURE
:
1468 dev_dbg(bdc
->dev
, "USB_REQ_SET_FEATURE\n");
1469 ret
= ep0_handle_feature(bdc
, setup_pkt
, 1);
1472 case USB_REQ_CLEAR_FEATURE
:
1473 dev_dbg(bdc
->dev
, "USB_REQ_CLEAR_FEATURE\n");
1474 ret
= ep0_handle_feature(bdc
, setup_pkt
, 0);
1477 case USB_REQ_GET_STATUS
:
1478 dev_dbg(bdc
->dev
, "USB_REQ_GET_STATUS\n");
1479 ret
= ep0_handle_status(bdc
, setup_pkt
);
1482 case USB_REQ_SET_SEL
:
1483 dev_dbg(bdc
->dev
, "USB_REQ_SET_SEL\n");
1484 ret
= ep0_set_sel(bdc
, setup_pkt
);
1487 case USB_REQ_SET_ISOCH_DELAY
:
1489 "USB_REQ_SET_ISOCH_DELAY not handled\n");
1499 if (delegate_setup
) {
1500 spin_unlock(&bdc
->lock
);
1501 ret
= bdc
->gadget_driver
->setup(&bdc
->gadget
, setup_pkt
);
1502 spin_lock(&bdc
->lock
);
1508 /* EP0: Data stage started */
1509 void bdc_xsf_ep0_data_start(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1514 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1515 ep
= bdc
->bdc_ep_array
[1];
1516 /* If ep0 was stalled, the clear it first */
1517 if (ep
->flags
& BDC_EP_STALL
) {
1518 ret
= ep_set_halt(ep
, 0);
1522 if (bdc
->ep0_state
!= WAIT_FOR_DATA_START
)
1524 "Data stage not expected ep0_state:%s\n",
1525 ep0_state_string
[bdc
->ep0_state
]);
1527 ret
= handle_control_request(bdc
);
1528 if (ret
== USB_GADGET_DELAYED_STATUS
) {
1530 * The ep0 state will remain WAIT_FOR_DATA_START till
1531 * we received ep_queue on ep0
1533 bdc
->delayed_status
= true;
1537 bdc
->ep0_state
= WAIT_FOR_DATA_XMIT
;
1539 "ep0_state:%s", ep0_state_string
[bdc
->ep0_state
]);
1546 /* EP0: status stage started */
1547 void bdc_xsf_ep0_status_start(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1549 struct usb_ctrlrequest
*setup_pkt
;
1555 __func__
, ep0_state_string
[bdc
->ep0_state
]);
1556 ep
= bdc
->bdc_ep_array
[1];
1558 /* check if ZLP was queued? */
1559 if (bdc
->zlp_needed
)
1560 bdc
->zlp_needed
= false;
1562 if (ep
->flags
& BDC_EP_STALL
) {
1563 ret
= ep_set_halt(ep
, 0);
1568 if ((bdc
->ep0_state
!= WAIT_FOR_STATUS_START
) &&
1569 (bdc
->ep0_state
!= WAIT_FOR_DATA_XMIT
))
1571 "Status stage recv but ep0_state:%s\n",
1572 ep0_state_string
[bdc
->ep0_state
]);
1574 /* check if data stage is in progress ? */
1575 if (bdc
->ep0_state
== WAIT_FOR_DATA_XMIT
) {
1576 bdc
->ep0_state
= STATUS_PENDING
;
1577 /* Status stage will be queued upon Data stage transmit event */
1579 "status started but data not transmitted yet\n");
1582 setup_pkt
= &bdc
->setup_pkt
;
1585 * 2 stage setup then only process the setup, for 3 stage setup the date
1586 * stage is already handled
1588 if (!le16_to_cpu(setup_pkt
->wLength
)) {
1589 ret
= handle_control_request(bdc
);
1590 if (ret
== USB_GADGET_DELAYED_STATUS
) {
1591 bdc
->delayed_status
= true;
1592 /* ep0_state will remain WAIT_FOR_STATUS_START */
1597 /* Queue a status stage BD */
1598 ep0_queue_status_stage(bdc
);
1599 bdc
->ep0_state
= WAIT_FOR_STATUS_XMIT
;
1601 "ep0_state:%s", ep0_state_string
[bdc
->ep0_state
]);
1608 /* Helper function to update ep0 upon SR with xsf_succ or xsf_short */
1609 static void ep0_xsf_complete(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1611 dev_dbg(bdc
->dev
, "%s\n", __func__
);
1612 switch (bdc
->ep0_state
) {
1613 case WAIT_FOR_DATA_XMIT
:
1614 bdc
->ep0_state
= WAIT_FOR_STATUS_START
;
1616 case WAIT_FOR_STATUS_XMIT
:
1617 bdc
->ep0_state
= WAIT_FOR_SETUP
;
1618 if (bdc
->test_mode
) {
1621 dev_dbg(bdc
->dev
, "test_mode:%d\n", bdc
->test_mode
);
1622 ret
= bdc_set_test_mode(bdc
);
1624 dev_err(bdc
->dev
, "Err in setting Test mode\n");
1630 case STATUS_PENDING
:
1631 bdc_xsf_ep0_status_start(bdc
, sreport
);
1636 "Unknown ep0_state:%s\n",
1637 ep0_state_string
[bdc
->ep0_state
]);
1642 /* xfr completion status report handler */
1643 void bdc_sr_xsf(struct bdc
*bdc
, struct bdc_sr
*sreport
)
1649 ep_num
= (le32_to_cpu(sreport
->offset
[3])>>4) & 0x1f;
1650 ep
= bdc
->bdc_ep_array
[ep_num
];
1651 if (!ep
|| !(ep
->flags
& BDC_EP_ENABLED
)) {
1652 dev_err(bdc
->dev
, "xsf for ep not enabled\n");
1656 * check if this transfer is after link went from U3->U0 due
1659 if (bdc
->devstatus
& FUNC_WAKE_ISSUED
) {
1660 bdc
->devstatus
&= ~(FUNC_WAKE_ISSUED
);
1661 dev_dbg(bdc
->dev
, "%s clearing FUNC_WAKE_ISSUED flag\n",
1664 sr_status
= XSF_STS(le32_to_cpu(sreport
->offset
[3]));
1665 dev_dbg_ratelimited(bdc
->dev
, "%s sr_status=%d ep:%s\n",
1666 __func__
, sr_status
, ep
->name
);
1668 switch (sr_status
) {
1671 handle_xsr_succ_status(bdc
, ep
, sreport
);
1673 ep0_xsf_complete(bdc
, sreport
);
1676 case XSF_SETUP_RECV
:
1677 case XSF_DATA_START
:
1678 case XSF_STATUS_START
:
1681 "ep0 related packets on non ep0 endpoint");
1684 bdc
->sr_xsf_ep0
[sr_status
- XSF_SETUP_RECV
](bdc
, sreport
);
1689 dev_dbg(bdc
->dev
, "Babble on ep0 zlp_need:%d\n",
1692 * If the last completed transfer had wLength >Data Len,
1693 * and Len is multiple of MaxPacket,then queue ZLP
1695 if (bdc
->zlp_needed
) {
1696 /* queue 0 length bd */
1701 dev_warn(bdc
->dev
, "Babble on ep not handled\n");
1704 dev_warn(bdc
->dev
, "sr status not handled:%x\n", sr_status
);
1709 static int bdc_gadget_ep_queue(struct usb_ep
*_ep
,
1710 struct usb_request
*_req
, gfp_t gfp_flags
)
1712 struct bdc_req
*req
;
1713 unsigned long flags
;
1718 if (!_ep
|| !_ep
->desc
)
1721 if (!_req
|| !_req
->complete
|| !_req
->buf
)
1724 ep
= to_bdc_ep(_ep
);
1725 req
= to_bdc_req(_req
);
1727 dev_dbg(bdc
->dev
, "%s ep:%p req:%p\n", __func__
, ep
, req
);
1728 dev_dbg(bdc
->dev
, "queuing request %p to %s length %d zero:%d\n",
1729 _req
, ep
->name
, _req
->length
, _req
->zero
);
1731 if (!ep
->usb_ep
.desc
) {
1733 "trying to queue req %p to disabled %s\n",
1738 if (_req
->length
> MAX_XFR_LEN
) {
1740 "req length > supported MAX:%d requested:%d\n",
1741 MAX_XFR_LEN
, _req
->length
);
1744 spin_lock_irqsave(&bdc
->lock
, flags
);
1745 if (ep
== bdc
->bdc_ep_array
[1])
1746 ret
= ep0_queue(ep
, req
);
1748 ret
= ep_queue(ep
, req
);
1750 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1755 static int bdc_gadget_ep_dequeue(struct usb_ep
*_ep
,
1756 struct usb_request
*_req
)
1758 struct bdc_req
*req
;
1759 unsigned long flags
;
1767 ep
= to_bdc_ep(_ep
);
1768 req
= to_bdc_req(_req
);
1770 dev_dbg(bdc
->dev
, "%s ep:%s req:%p\n", __func__
, ep
->name
, req
);
1771 bdc_dbg_bd_list(bdc
, ep
);
1772 spin_lock_irqsave(&bdc
->lock
, flags
);
1773 /* make sure it's still queued on this endpoint */
1774 list_for_each_entry(req
, &ep
->queue
, queue
) {
1775 if (&req
->usb_req
== _req
)
1778 if (&req
->usb_req
!= _req
) {
1779 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1780 dev_err(bdc
->dev
, "usb_req !=req n");
1783 ret
= ep_dequeue(ep
, req
);
1788 bdc_req_complete(ep
, req
, -ECONNRESET
);
1791 bdc_dbg_bd_list(bdc
, ep
);
1792 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1797 static int bdc_gadget_ep_set_halt(struct usb_ep
*_ep
, int value
)
1799 unsigned long flags
;
1804 ep
= to_bdc_ep(_ep
);
1806 dev_dbg(bdc
->dev
, "%s ep:%s value=%d\n", __func__
, ep
->name
, value
);
1807 spin_lock_irqsave(&bdc
->lock
, flags
);
1808 if (usb_endpoint_xfer_isoc(ep
->usb_ep
.desc
))
1810 else if (!list_empty(&ep
->queue
))
1813 ret
= ep_set_halt(ep
, value
);
1815 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1820 static struct usb_request
*bdc_gadget_alloc_request(struct usb_ep
*_ep
,
1823 struct bdc_req
*req
;
1826 req
= kzalloc(sizeof(*req
), gfp_flags
);
1830 ep
= to_bdc_ep(_ep
);
1832 req
->epnum
= ep
->ep_num
;
1833 req
->usb_req
.dma
= DMA_ADDR_INVALID
;
1834 dev_dbg(ep
->bdc
->dev
, "%s ep:%s req:%p\n", __func__
, ep
->name
, req
);
1836 return &req
->usb_req
;
1839 static void bdc_gadget_free_request(struct usb_ep
*_ep
,
1840 struct usb_request
*_req
)
1842 struct bdc_req
*req
;
1844 req
= to_bdc_req(_req
);
1848 /* endpoint operations */
1850 /* configure endpoint and also allocate resources */
1851 static int bdc_gadget_ep_enable(struct usb_ep
*_ep
,
1852 const struct usb_endpoint_descriptor
*desc
)
1854 unsigned long flags
;
1859 if (!_ep
|| !desc
|| desc
->bDescriptorType
!= USB_DT_ENDPOINT
) {
1860 pr_debug("bdc_gadget_ep_enable invalid parameters\n");
1864 if (!desc
->wMaxPacketSize
) {
1865 pr_debug("bdc_gadget_ep_enable missing wMaxPacketSize\n");
1869 ep
= to_bdc_ep(_ep
);
1872 /* Sanity check, upper layer will not send enable for ep0 */
1873 if (ep
== bdc
->bdc_ep_array
[1])
1876 if (!bdc
->gadget_driver
1877 || bdc
->gadget
.speed
== USB_SPEED_UNKNOWN
) {
1881 dev_dbg(bdc
->dev
, "%s Enabling %s\n", __func__
, ep
->name
);
1882 spin_lock_irqsave(&bdc
->lock
, flags
);
1884 ep
->comp_desc
= _ep
->comp_desc
;
1885 ret
= bdc_ep_enable(ep
);
1886 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1891 static int bdc_gadget_ep_disable(struct usb_ep
*_ep
)
1893 unsigned long flags
;
1899 pr_debug("bdc: invalid parameters\n");
1902 ep
= to_bdc_ep(_ep
);
1905 /* Upper layer will not call this for ep0, but do a sanity check */
1906 if (ep
== bdc
->bdc_ep_array
[1]) {
1907 dev_warn(bdc
->dev
, "%s called for ep0\n", __func__
);
1911 "%s() ep:%s ep->flags:%08x\n",
1912 __func__
, ep
->name
, ep
->flags
);
1914 if (!(ep
->flags
& BDC_EP_ENABLED
)) {
1915 dev_warn(bdc
->dev
, "%s is already disabled\n", ep
->name
);
1918 spin_lock_irqsave(&bdc
->lock
, flags
);
1919 ret
= bdc_ep_disable(ep
);
1920 spin_unlock_irqrestore(&bdc
->lock
, flags
);
1925 static const struct usb_ep_ops bdc_gadget_ep_ops
= {
1926 .enable
= bdc_gadget_ep_enable
,
1927 .disable
= bdc_gadget_ep_disable
,
1928 .alloc_request
= bdc_gadget_alloc_request
,
1929 .free_request
= bdc_gadget_free_request
,
1930 .queue
= bdc_gadget_ep_queue
,
1931 .dequeue
= bdc_gadget_ep_dequeue
,
1932 .set_halt
= bdc_gadget_ep_set_halt
1936 static int init_ep(struct bdc
*bdc
, u32 epnum
, u32 dir
)
1940 dev_dbg(bdc
->dev
, "%s epnum=%d dir=%d\n", __func__
, epnum
, dir
);
1941 ep
= kzalloc(sizeof(*ep
), GFP_KERNEL
);
1949 ep
->usb_ep
.caps
.dir_in
= true;
1951 ep
->usb_ep
.caps
.dir_out
= true;
1953 /* ep->ep_num is the index inside bdc_ep */
1956 bdc
->bdc_ep_array
[ep
->ep_num
] = ep
;
1957 snprintf(ep
->name
, sizeof(ep
->name
), "ep%d", epnum
- 1);
1958 usb_ep_set_maxpacket_limit(&ep
->usb_ep
, EP0_MAX_PKT_SIZE
);
1959 ep
->usb_ep
.caps
.type_control
= true;
1960 ep
->comp_desc
= NULL
;
1961 bdc
->gadget
.ep0
= &ep
->usb_ep
;
1964 ep
->ep_num
= epnum
* 2 - 1;
1966 ep
->ep_num
= epnum
* 2 - 2;
1968 bdc
->bdc_ep_array
[ep
->ep_num
] = ep
;
1969 snprintf(ep
->name
, sizeof(ep
->name
), "ep%d%s", epnum
- 1,
1970 dir
& 1 ? "in" : "out");
1972 usb_ep_set_maxpacket_limit(&ep
->usb_ep
, 1024);
1973 ep
->usb_ep
.caps
.type_iso
= true;
1974 ep
->usb_ep
.caps
.type_bulk
= true;
1975 ep
->usb_ep
.caps
.type_int
= true;
1976 ep
->usb_ep
.max_streams
= 0;
1977 list_add_tail(&ep
->usb_ep
.ep_list
, &bdc
->gadget
.ep_list
);
1979 ep
->usb_ep
.ops
= &bdc_gadget_ep_ops
;
1980 ep
->usb_ep
.name
= ep
->name
;
1982 ep
->ignore_next_sr
= false;
1983 dev_dbg(bdc
->dev
, "ep=%p ep->usb_ep.name=%s epnum=%d ep->epnum=%d\n",
1984 ep
, ep
->usb_ep
.name
, epnum
, ep
->ep_num
);
1986 INIT_LIST_HEAD(&ep
->queue
);
1992 int bdc_init_ep(struct bdc
*bdc
)
1997 dev_dbg(bdc
->dev
, "%s()\n", __func__
);
1998 INIT_LIST_HEAD(&bdc
->gadget
.ep_list
);
2000 ret
= init_ep(bdc
, 1, 0);
2002 dev_err(bdc
->dev
, "init ep ep0 fail %d\n", ret
);
2006 for (epnum
= 2; epnum
<= bdc
->num_eps
/ 2; epnum
++) {
2008 ret
= init_ep(bdc
, epnum
, 0);
2011 "init ep failed for:%d error: %d\n",
2017 ret
= init_ep(bdc
, epnum
, 1);
2020 "init ep failed for:%d error: %d\n",