Avoid beyond bounds copy while caching ACL
[zen-stable.git] / drivers / usb / gadget / langwell_udc.c
blobaca575a68109b74a760056be808407b59334aad6
1 /*
2 * Intel Langwell USB Device Controller driver
3 * Copyright (C) 2008-2009, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 */
11 /* #undef DEBUG */
12 /* #undef VERBOSE_DEBUG */
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/kernel.h>
18 #include <linux/delay.h>
19 #include <linux/ioport.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/timer.h>
25 #include <linux/list.h>
26 #include <linux/interrupt.h>
27 #include <linux/moduleparam.h>
28 #include <linux/device.h>
29 #include <linux/usb/ch9.h>
30 #include <linux/usb/gadget.h>
31 #include <linux/usb/otg.h>
32 #include <linux/pm.h>
33 #include <linux/io.h>
34 #include <linux/irq.h>
35 #include <asm/system.h>
36 #include <asm/unaligned.h>
38 #include "langwell_udc.h"
41 #define DRIVER_DESC "Intel Langwell USB Device Controller driver"
42 #define DRIVER_VERSION "16 May 2009"
44 static const char driver_name[] = "langwell_udc";
45 static const char driver_desc[] = DRIVER_DESC;
48 /* for endpoint 0 operations */
49 static const struct usb_endpoint_descriptor
50 langwell_ep0_desc = {
51 .bLength = USB_DT_ENDPOINT_SIZE,
52 .bDescriptorType = USB_DT_ENDPOINT,
53 .bEndpointAddress = 0,
54 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
55 .wMaxPacketSize = EP0_MAX_PKT_SIZE,
59 /*-------------------------------------------------------------------------*/
60 /* debugging */
62 #ifdef VERBOSE_DEBUG
63 static inline void print_all_registers(struct langwell_udc *dev)
65 int i;
67 /* Capability Registers */
68 dev_dbg(&dev->pdev->dev,
69 "Capability Registers (offset: 0x%04x, length: 0x%08x)\n",
70 CAP_REG_OFFSET, (u32)sizeof(struct langwell_cap_regs));
71 dev_dbg(&dev->pdev->dev, "caplength=0x%02x\n",
72 readb(&dev->cap_regs->caplength));
73 dev_dbg(&dev->pdev->dev, "hciversion=0x%04x\n",
74 readw(&dev->cap_regs->hciversion));
75 dev_dbg(&dev->pdev->dev, "hcsparams=0x%08x\n",
76 readl(&dev->cap_regs->hcsparams));
77 dev_dbg(&dev->pdev->dev, "hccparams=0x%08x\n",
78 readl(&dev->cap_regs->hccparams));
79 dev_dbg(&dev->pdev->dev, "dciversion=0x%04x\n",
80 readw(&dev->cap_regs->dciversion));
81 dev_dbg(&dev->pdev->dev, "dccparams=0x%08x\n",
82 readl(&dev->cap_regs->dccparams));
84 /* Operational Registers */
85 dev_dbg(&dev->pdev->dev,
86 "Operational Registers (offset: 0x%04x, length: 0x%08x)\n",
87 OP_REG_OFFSET, (u32)sizeof(struct langwell_op_regs));
88 dev_dbg(&dev->pdev->dev, "extsts=0x%08x\n",
89 readl(&dev->op_regs->extsts));
90 dev_dbg(&dev->pdev->dev, "extintr=0x%08x\n",
91 readl(&dev->op_regs->extintr));
92 dev_dbg(&dev->pdev->dev, "usbcmd=0x%08x\n",
93 readl(&dev->op_regs->usbcmd));
94 dev_dbg(&dev->pdev->dev, "usbsts=0x%08x\n",
95 readl(&dev->op_regs->usbsts));
96 dev_dbg(&dev->pdev->dev, "usbintr=0x%08x\n",
97 readl(&dev->op_regs->usbintr));
98 dev_dbg(&dev->pdev->dev, "frindex=0x%08x\n",
99 readl(&dev->op_regs->frindex));
100 dev_dbg(&dev->pdev->dev, "ctrldssegment=0x%08x\n",
101 readl(&dev->op_regs->ctrldssegment));
102 dev_dbg(&dev->pdev->dev, "deviceaddr=0x%08x\n",
103 readl(&dev->op_regs->deviceaddr));
104 dev_dbg(&dev->pdev->dev, "endpointlistaddr=0x%08x\n",
105 readl(&dev->op_regs->endpointlistaddr));
106 dev_dbg(&dev->pdev->dev, "ttctrl=0x%08x\n",
107 readl(&dev->op_regs->ttctrl));
108 dev_dbg(&dev->pdev->dev, "burstsize=0x%08x\n",
109 readl(&dev->op_regs->burstsize));
110 dev_dbg(&dev->pdev->dev, "txfilltuning=0x%08x\n",
111 readl(&dev->op_regs->txfilltuning));
112 dev_dbg(&dev->pdev->dev, "txttfilltuning=0x%08x\n",
113 readl(&dev->op_regs->txttfilltuning));
114 dev_dbg(&dev->pdev->dev, "ic_usb=0x%08x\n",
115 readl(&dev->op_regs->ic_usb));
116 dev_dbg(&dev->pdev->dev, "ulpi_viewport=0x%08x\n",
117 readl(&dev->op_regs->ulpi_viewport));
118 dev_dbg(&dev->pdev->dev, "configflag=0x%08x\n",
119 readl(&dev->op_regs->configflag));
120 dev_dbg(&dev->pdev->dev, "portsc1=0x%08x\n",
121 readl(&dev->op_regs->portsc1));
122 dev_dbg(&dev->pdev->dev, "devlc=0x%08x\n",
123 readl(&dev->op_regs->devlc));
124 dev_dbg(&dev->pdev->dev, "otgsc=0x%08x\n",
125 readl(&dev->op_regs->otgsc));
126 dev_dbg(&dev->pdev->dev, "usbmode=0x%08x\n",
127 readl(&dev->op_regs->usbmode));
128 dev_dbg(&dev->pdev->dev, "endptnak=0x%08x\n",
129 readl(&dev->op_regs->endptnak));
130 dev_dbg(&dev->pdev->dev, "endptnaken=0x%08x\n",
131 readl(&dev->op_regs->endptnaken));
132 dev_dbg(&dev->pdev->dev, "endptsetupstat=0x%08x\n",
133 readl(&dev->op_regs->endptsetupstat));
134 dev_dbg(&dev->pdev->dev, "endptprime=0x%08x\n",
135 readl(&dev->op_regs->endptprime));
136 dev_dbg(&dev->pdev->dev, "endptflush=0x%08x\n",
137 readl(&dev->op_regs->endptflush));
138 dev_dbg(&dev->pdev->dev, "endptstat=0x%08x\n",
139 readl(&dev->op_regs->endptstat));
140 dev_dbg(&dev->pdev->dev, "endptcomplete=0x%08x\n",
141 readl(&dev->op_regs->endptcomplete));
143 for (i = 0; i < dev->ep_max / 2; i++) {
144 dev_dbg(&dev->pdev->dev, "endptctrl[%d]=0x%08x\n",
145 i, readl(&dev->op_regs->endptctrl[i]));
148 #else
150 #define print_all_registers(dev) do { } while (0)
152 #endif /* VERBOSE_DEBUG */
155 /*-------------------------------------------------------------------------*/
157 #define is_in(ep) (((ep)->ep_num == 0) ? ((ep)->dev->ep0_dir == \
158 USB_DIR_IN) : (usb_endpoint_dir_in((ep)->desc)))
160 #define DIR_STRING(ep) (is_in(ep) ? "in" : "out")
163 static char *type_string(const struct usb_endpoint_descriptor *desc)
165 switch (usb_endpoint_type(desc)) {
166 case USB_ENDPOINT_XFER_BULK:
167 return "bulk";
168 case USB_ENDPOINT_XFER_ISOC:
169 return "iso";
170 case USB_ENDPOINT_XFER_INT:
171 return "int";
174 return "control";
178 /* configure endpoint control registers */
179 static void ep_reset(struct langwell_ep *ep, unsigned char ep_num,
180 unsigned char is_in, unsigned char ep_type)
182 struct langwell_udc *dev;
183 u32 endptctrl;
185 dev = ep->dev;
186 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
188 endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
189 if (is_in) { /* TX */
190 if (ep_num)
191 endptctrl |= EPCTRL_TXR;
192 endptctrl |= EPCTRL_TXE;
193 endptctrl |= ep_type << EPCTRL_TXT_SHIFT;
194 } else { /* RX */
195 if (ep_num)
196 endptctrl |= EPCTRL_RXR;
197 endptctrl |= EPCTRL_RXE;
198 endptctrl |= ep_type << EPCTRL_RXT_SHIFT;
201 writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
203 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
207 /* reset ep0 dQH and endptctrl */
208 static void ep0_reset(struct langwell_udc *dev)
210 struct langwell_ep *ep;
211 int i;
213 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
215 /* ep0 in and out */
216 for (i = 0; i < 2; i++) {
217 ep = &dev->ep[i];
218 ep->dev = dev;
220 /* ep0 dQH */
221 ep->dqh = &dev->ep_dqh[i];
223 /* configure ep0 endpoint capabilities in dQH */
224 ep->dqh->dqh_ios = 1;
225 ep->dqh->dqh_mpl = EP0_MAX_PKT_SIZE;
227 /* enable ep0-in HW zero length termination select */
228 if (is_in(ep))
229 ep->dqh->dqh_zlt = 0;
230 ep->dqh->dqh_mult = 0;
232 ep->dqh->dtd_next = DTD_TERM;
234 /* configure ep0 control registers */
235 ep_reset(&dev->ep[0], 0, i, USB_ENDPOINT_XFER_CONTROL);
238 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
242 /*-------------------------------------------------------------------------*/
244 /* endpoints operations */
246 /* configure endpoint, making it usable */
247 static int langwell_ep_enable(struct usb_ep *_ep,
248 const struct usb_endpoint_descriptor *desc)
250 struct langwell_udc *dev;
251 struct langwell_ep *ep;
252 u16 max = 0;
253 unsigned long flags;
254 int i, retval = 0;
255 unsigned char zlt, ios = 0, mult = 0;
257 ep = container_of(_ep, struct langwell_ep, ep);
258 dev = ep->dev;
259 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
261 if (!_ep || !desc || ep->desc
262 || desc->bDescriptorType != USB_DT_ENDPOINT)
263 return -EINVAL;
265 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
266 return -ESHUTDOWN;
268 max = usb_endpoint_maxp(desc);
271 * disable HW zero length termination select
272 * driver handles zero length packet through req->req.zero
274 zlt = 1;
277 * sanity check type, direction, address, and then
278 * initialize the endpoint capabilities fields in dQH
280 switch (usb_endpoint_type(desc)) {
281 case USB_ENDPOINT_XFER_CONTROL:
282 ios = 1;
283 break;
284 case USB_ENDPOINT_XFER_BULK:
285 if ((dev->gadget.speed == USB_SPEED_HIGH
286 && max != 512)
287 || (dev->gadget.speed == USB_SPEED_FULL
288 && max > 64)) {
289 goto done;
291 break;
292 case USB_ENDPOINT_XFER_INT:
293 if (strstr(ep->ep.name, "-iso")) /* bulk is ok */
294 goto done;
296 switch (dev->gadget.speed) {
297 case USB_SPEED_HIGH:
298 if (max <= 1024)
299 break;
300 case USB_SPEED_FULL:
301 if (max <= 64)
302 break;
303 default:
304 if (max <= 8)
305 break;
306 goto done;
308 break;
309 case USB_ENDPOINT_XFER_ISOC:
310 if (strstr(ep->ep.name, "-bulk")
311 || strstr(ep->ep.name, "-int"))
312 goto done;
314 switch (dev->gadget.speed) {
315 case USB_SPEED_HIGH:
316 if (max <= 1024)
317 break;
318 case USB_SPEED_FULL:
319 if (max <= 1023)
320 break;
321 default:
322 goto done;
325 * FIXME:
326 * calculate transactions needed for high bandwidth iso
328 mult = (unsigned char)(1 + ((max >> 11) & 0x03));
329 max = max & 0x8ff; /* bit 0~10 */
330 /* 3 transactions at most */
331 if (mult > 3)
332 goto done;
333 break;
334 default:
335 goto done;
338 spin_lock_irqsave(&dev->lock, flags);
340 ep->ep.maxpacket = max;
341 ep->desc = desc;
342 ep->stopped = 0;
343 ep->ep_num = usb_endpoint_num(desc);
345 /* ep_type */
346 ep->ep_type = usb_endpoint_type(desc);
348 /* configure endpoint control registers */
349 ep_reset(ep, ep->ep_num, is_in(ep), ep->ep_type);
351 /* configure endpoint capabilities in dQH */
352 i = ep->ep_num * 2 + is_in(ep);
353 ep->dqh = &dev->ep_dqh[i];
354 ep->dqh->dqh_ios = ios;
355 ep->dqh->dqh_mpl = cpu_to_le16(max);
356 ep->dqh->dqh_zlt = zlt;
357 ep->dqh->dqh_mult = mult;
358 ep->dqh->dtd_next = DTD_TERM;
360 dev_dbg(&dev->pdev->dev, "enabled %s (ep%d%s-%s), max %04x\n",
361 _ep->name,
362 ep->ep_num,
363 DIR_STRING(ep),
364 type_string(desc),
365 max);
367 spin_unlock_irqrestore(&dev->lock, flags);
368 done:
369 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
370 return retval;
374 /*-------------------------------------------------------------------------*/
376 /* retire a request */
377 static void done(struct langwell_ep *ep, struct langwell_request *req,
378 int status)
380 struct langwell_udc *dev = ep->dev;
381 unsigned stopped = ep->stopped;
382 struct langwell_dtd *curr_dtd, *next_dtd;
383 int i;
385 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
387 /* remove the req from ep->queue */
388 list_del_init(&req->queue);
390 if (req->req.status == -EINPROGRESS)
391 req->req.status = status;
392 else
393 status = req->req.status;
395 /* free dTD for the request */
396 next_dtd = req->head;
397 for (i = 0; i < req->dtd_count; i++) {
398 curr_dtd = next_dtd;
399 if (i != req->dtd_count - 1)
400 next_dtd = curr_dtd->next_dtd_virt;
401 dma_pool_free(dev->dtd_pool, curr_dtd, curr_dtd->dtd_dma);
404 if (req->mapped) {
405 dma_unmap_single(&dev->pdev->dev,
406 req->req.dma, req->req.length,
407 is_in(ep) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
408 req->req.dma = DMA_ADDR_INVALID;
409 req->mapped = 0;
410 } else
411 dma_sync_single_for_cpu(&dev->pdev->dev, req->req.dma,
412 req->req.length,
413 is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
415 if (status != -ESHUTDOWN)
416 dev_dbg(&dev->pdev->dev,
417 "complete %s, req %p, stat %d, len %u/%u\n",
418 ep->ep.name, &req->req, status,
419 req->req.actual, req->req.length);
421 /* don't modify queue heads during completion callback */
422 ep->stopped = 1;
424 spin_unlock(&dev->lock);
425 /* complete routine from gadget driver */
426 if (req->req.complete)
427 req->req.complete(&ep->ep, &req->req);
429 spin_lock(&dev->lock);
430 ep->stopped = stopped;
432 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
436 static void langwell_ep_fifo_flush(struct usb_ep *_ep);
438 /* delete all endpoint requests, called with spinlock held */
439 static void nuke(struct langwell_ep *ep, int status)
441 /* called with spinlock held */
442 ep->stopped = 1;
444 /* endpoint fifo flush */
445 if (&ep->ep && ep->desc)
446 langwell_ep_fifo_flush(&ep->ep);
448 while (!list_empty(&ep->queue)) {
449 struct langwell_request *req = NULL;
450 req = list_entry(ep->queue.next, struct langwell_request,
451 queue);
452 done(ep, req, status);
457 /*-------------------------------------------------------------------------*/
459 /* endpoint is no longer usable */
460 static int langwell_ep_disable(struct usb_ep *_ep)
462 struct langwell_ep *ep;
463 unsigned long flags;
464 struct langwell_udc *dev;
465 int ep_num;
466 u32 endptctrl;
468 ep = container_of(_ep, struct langwell_ep, ep);
469 dev = ep->dev;
470 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
472 if (!_ep || !ep->desc)
473 return -EINVAL;
475 spin_lock_irqsave(&dev->lock, flags);
477 /* disable endpoint control register */
478 ep_num = ep->ep_num;
479 endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
480 if (is_in(ep))
481 endptctrl &= ~EPCTRL_TXE;
482 else
483 endptctrl &= ~EPCTRL_RXE;
484 writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
486 /* nuke all pending requests (does flush) */
487 nuke(ep, -ESHUTDOWN);
489 ep->desc = NULL;
490 ep->ep.desc = NULL;
491 ep->stopped = 1;
493 spin_unlock_irqrestore(&dev->lock, flags);
495 dev_dbg(&dev->pdev->dev, "disabled %s\n", _ep->name);
496 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
498 return 0;
502 /* allocate a request object to use with this endpoint */
503 static struct usb_request *langwell_alloc_request(struct usb_ep *_ep,
504 gfp_t gfp_flags)
506 struct langwell_ep *ep;
507 struct langwell_udc *dev;
508 struct langwell_request *req = NULL;
510 if (!_ep)
511 return NULL;
513 ep = container_of(_ep, struct langwell_ep, ep);
514 dev = ep->dev;
515 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
517 req = kzalloc(sizeof(*req), gfp_flags);
518 if (!req)
519 return NULL;
521 req->req.dma = DMA_ADDR_INVALID;
522 INIT_LIST_HEAD(&req->queue);
524 dev_vdbg(&dev->pdev->dev, "alloc request for %s\n", _ep->name);
525 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
526 return &req->req;
530 /* free a request object */
531 static void langwell_free_request(struct usb_ep *_ep,
532 struct usb_request *_req)
534 struct langwell_ep *ep;
535 struct langwell_udc *dev;
536 struct langwell_request *req = NULL;
538 ep = container_of(_ep, struct langwell_ep, ep);
539 dev = ep->dev;
540 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
542 if (!_ep || !_req)
543 return;
545 req = container_of(_req, struct langwell_request, req);
546 WARN_ON(!list_empty(&req->queue));
548 if (_req)
549 kfree(req);
551 dev_vdbg(&dev->pdev->dev, "free request for %s\n", _ep->name);
552 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
556 /*-------------------------------------------------------------------------*/
558 /* queue dTD and PRIME endpoint */
559 static int queue_dtd(struct langwell_ep *ep, struct langwell_request *req)
561 u32 bit_mask, usbcmd, endptstat, dtd_dma;
562 u8 dtd_status;
563 int i;
564 struct langwell_dqh *dqh;
565 struct langwell_udc *dev;
567 dev = ep->dev;
568 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
570 i = ep->ep_num * 2 + is_in(ep);
571 dqh = &dev->ep_dqh[i];
573 if (ep->ep_num)
574 dev_vdbg(&dev->pdev->dev, "%s\n", ep->name);
575 else
576 /* ep0 */
577 dev_vdbg(&dev->pdev->dev, "%s-%s\n", ep->name, DIR_STRING(ep));
579 dev_vdbg(&dev->pdev->dev, "ep_dqh[%d] addr: 0x%p\n",
580 i, &(dev->ep_dqh[i]));
582 bit_mask = is_in(ep) ?
583 (1 << (ep->ep_num + 16)) : (1 << (ep->ep_num));
585 dev_vdbg(&dev->pdev->dev, "bit_mask = 0x%08x\n", bit_mask);
587 /* check if the pipe is empty */
588 if (!(list_empty(&ep->queue))) {
589 /* add dTD to the end of linked list */
590 struct langwell_request *lastreq;
591 lastreq = list_entry(ep->queue.prev,
592 struct langwell_request, queue);
594 lastreq->tail->dtd_next =
595 cpu_to_le32(req->head->dtd_dma & DTD_NEXT_MASK);
597 /* read prime bit, if 1 goto out */
598 if (readl(&dev->op_regs->endptprime) & bit_mask)
599 goto out;
601 do {
602 /* set ATDTW bit in USBCMD */
603 usbcmd = readl(&dev->op_regs->usbcmd);
604 writel(usbcmd | CMD_ATDTW, &dev->op_regs->usbcmd);
606 /* read correct status bit */
607 endptstat = readl(&dev->op_regs->endptstat) & bit_mask;
609 } while (!(readl(&dev->op_regs->usbcmd) & CMD_ATDTW));
611 /* write ATDTW bit to 0 */
612 usbcmd = readl(&dev->op_regs->usbcmd);
613 writel(usbcmd & ~CMD_ATDTW, &dev->op_regs->usbcmd);
615 if (endptstat)
616 goto out;
619 /* write dQH next pointer and terminate bit to 0 */
620 dtd_dma = req->head->dtd_dma & DTD_NEXT_MASK;
621 dqh->dtd_next = cpu_to_le32(dtd_dma);
623 /* clear active and halt bit */
624 dtd_status = (u8) ~(DTD_STS_ACTIVE | DTD_STS_HALTED);
625 dqh->dtd_status &= dtd_status;
626 dev_vdbg(&dev->pdev->dev, "dqh->dtd_status = 0x%x\n", dqh->dtd_status);
628 /* ensure that updates to the dQH will occur before priming */
629 wmb();
631 /* write 1 to endptprime register to PRIME endpoint */
632 bit_mask = is_in(ep) ? (1 << (ep->ep_num + 16)) : (1 << ep->ep_num);
633 dev_vdbg(&dev->pdev->dev, "endprime bit_mask = 0x%08x\n", bit_mask);
634 writel(bit_mask, &dev->op_regs->endptprime);
635 out:
636 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
637 return 0;
641 /* fill in the dTD structure to build a transfer descriptor */
642 static struct langwell_dtd *build_dtd(struct langwell_request *req,
643 unsigned *length, dma_addr_t *dma, int *is_last)
645 u32 buf_ptr;
646 struct langwell_dtd *dtd;
647 struct langwell_udc *dev;
648 int i;
650 dev = req->ep->dev;
651 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
653 /* the maximum transfer length, up to 16k bytes */
654 *length = min(req->req.length - req->req.actual,
655 (unsigned)DTD_MAX_TRANSFER_LENGTH);
657 /* create dTD dma_pool resource */
658 dtd = dma_pool_alloc(dev->dtd_pool, GFP_KERNEL, dma);
659 if (dtd == NULL)
660 return dtd;
661 dtd->dtd_dma = *dma;
663 /* initialize buffer page pointers */
664 buf_ptr = (u32)(req->req.dma + req->req.actual);
665 for (i = 0; i < 5; i++)
666 dtd->dtd_buf[i] = cpu_to_le32(buf_ptr + i * PAGE_SIZE);
668 req->req.actual += *length;
670 /* fill in total bytes with transfer size */
671 dtd->dtd_total = cpu_to_le16(*length);
672 dev_vdbg(&dev->pdev->dev, "dtd->dtd_total = %d\n", dtd->dtd_total);
674 /* set is_last flag if req->req.zero is set or not */
675 if (req->req.zero) {
676 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
677 *is_last = 1;
678 else
679 *is_last = 0;
680 } else if (req->req.length == req->req.actual) {
681 *is_last = 1;
682 } else
683 *is_last = 0;
685 if (*is_last == 0)
686 dev_vdbg(&dev->pdev->dev, "multi-dtd request!\n");
688 /* set interrupt on complete bit for the last dTD */
689 if (*is_last && !req->req.no_interrupt)
690 dtd->dtd_ioc = 1;
692 /* set multiplier override 0 for non-ISO and non-TX endpoint */
693 dtd->dtd_multo = 0;
695 /* set the active bit of status field to 1 */
696 dtd->dtd_status = DTD_STS_ACTIVE;
697 dev_vdbg(&dev->pdev->dev, "dtd->dtd_status = 0x%02x\n",
698 dtd->dtd_status);
700 dev_vdbg(&dev->pdev->dev, "length = %d, dma addr= 0x%08x\n",
701 *length, (int)*dma);
702 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
703 return dtd;
707 /* generate dTD linked list for a request */
708 static int req_to_dtd(struct langwell_request *req)
710 unsigned count;
711 int is_last, is_first = 1;
712 struct langwell_dtd *dtd, *last_dtd = NULL;
713 struct langwell_udc *dev;
714 dma_addr_t dma;
716 dev = req->ep->dev;
717 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
718 do {
719 dtd = build_dtd(req, &count, &dma, &is_last);
720 if (dtd == NULL)
721 return -ENOMEM;
723 if (is_first) {
724 is_first = 0;
725 req->head = dtd;
726 } else {
727 last_dtd->dtd_next = cpu_to_le32(dma);
728 last_dtd->next_dtd_virt = dtd;
730 last_dtd = dtd;
731 req->dtd_count++;
732 } while (!is_last);
734 /* set terminate bit to 1 for the last dTD */
735 dtd->dtd_next = DTD_TERM;
737 req->tail = dtd;
739 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
740 return 0;
743 /*-------------------------------------------------------------------------*/
745 /* queue (submits) an I/O requests to an endpoint */
746 static int langwell_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
747 gfp_t gfp_flags)
749 struct langwell_request *req;
750 struct langwell_ep *ep;
751 struct langwell_udc *dev;
752 unsigned long flags;
753 int is_iso = 0, zlflag = 0;
755 /* always require a cpu-view buffer */
756 req = container_of(_req, struct langwell_request, req);
757 ep = container_of(_ep, struct langwell_ep, ep);
759 if (!_req || !_req->complete || !_req->buf
760 || !list_empty(&req->queue)) {
761 return -EINVAL;
764 if (unlikely(!_ep || !ep->desc))
765 return -EINVAL;
767 dev = ep->dev;
768 req->ep = ep;
769 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
771 if (usb_endpoint_xfer_isoc(ep->desc)) {
772 if (req->req.length > ep->ep.maxpacket)
773 return -EMSGSIZE;
774 is_iso = 1;
777 if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
778 return -ESHUTDOWN;
780 /* set up dma mapping in case the caller didn't */
781 if (_req->dma == DMA_ADDR_INVALID) {
782 /* WORKAROUND: WARN_ON(size == 0) */
783 if (_req->length == 0) {
784 dev_vdbg(&dev->pdev->dev, "req->length: 0->1\n");
785 zlflag = 1;
786 _req->length++;
789 _req->dma = dma_map_single(&dev->pdev->dev,
790 _req->buf, _req->length,
791 is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
792 if (zlflag && (_req->length == 1)) {
793 dev_vdbg(&dev->pdev->dev, "req->length: 1->0\n");
794 zlflag = 0;
795 _req->length = 0;
798 req->mapped = 1;
799 dev_vdbg(&dev->pdev->dev, "req->mapped = 1\n");
800 } else {
801 dma_sync_single_for_device(&dev->pdev->dev,
802 _req->dma, _req->length,
803 is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
804 req->mapped = 0;
805 dev_vdbg(&dev->pdev->dev, "req->mapped = 0\n");
808 dev_dbg(&dev->pdev->dev,
809 "%s queue req %p, len %u, buf %p, dma 0x%08x\n",
810 _ep->name,
811 _req, _req->length, _req->buf, (int)_req->dma);
813 _req->status = -EINPROGRESS;
814 _req->actual = 0;
815 req->dtd_count = 0;
817 spin_lock_irqsave(&dev->lock, flags);
819 /* build and put dTDs to endpoint queue */
820 if (!req_to_dtd(req)) {
821 queue_dtd(ep, req);
822 } else {
823 spin_unlock_irqrestore(&dev->lock, flags);
824 return -ENOMEM;
827 /* update ep0 state */
828 if (ep->ep_num == 0)
829 dev->ep0_state = DATA_STATE_XMIT;
831 if (likely(req != NULL)) {
832 list_add_tail(&req->queue, &ep->queue);
833 dev_vdbg(&dev->pdev->dev, "list_add_tail()\n");
836 spin_unlock_irqrestore(&dev->lock, flags);
838 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
839 return 0;
843 /* dequeue (cancels, unlinks) an I/O request from an endpoint */
844 static int langwell_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
846 struct langwell_ep *ep;
847 struct langwell_udc *dev;
848 struct langwell_request *req;
849 unsigned long flags;
850 int stopped, ep_num, retval = 0;
851 u32 endptctrl;
853 ep = container_of(_ep, struct langwell_ep, ep);
854 dev = ep->dev;
855 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
857 if (!_ep || !ep->desc || !_req)
858 return -EINVAL;
860 if (!dev->driver)
861 return -ESHUTDOWN;
863 spin_lock_irqsave(&dev->lock, flags);
864 stopped = ep->stopped;
866 /* quiesce dma while we patch the queue */
867 ep->stopped = 1;
868 ep_num = ep->ep_num;
870 /* disable endpoint control register */
871 endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
872 if (is_in(ep))
873 endptctrl &= ~EPCTRL_TXE;
874 else
875 endptctrl &= ~EPCTRL_RXE;
876 writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
878 /* make sure it's still queued on this endpoint */
879 list_for_each_entry(req, &ep->queue, queue) {
880 if (&req->req == _req)
881 break;
884 if (&req->req != _req) {
885 retval = -EINVAL;
886 goto done;
889 /* queue head may be partially complete. */
890 if (ep->queue.next == &req->queue) {
891 dev_dbg(&dev->pdev->dev, "unlink (%s) dma\n", _ep->name);
892 _req->status = -ECONNRESET;
893 langwell_ep_fifo_flush(&ep->ep);
895 /* not the last request in endpoint queue */
896 if (likely(ep->queue.next == &req->queue)) {
897 struct langwell_dqh *dqh;
898 struct langwell_request *next_req;
900 dqh = ep->dqh;
901 next_req = list_entry(req->queue.next,
902 struct langwell_request, queue);
904 /* point the dQH to the first dTD of next request */
905 writel((u32) next_req->head, &dqh->dqh_current);
907 } else {
908 struct langwell_request *prev_req;
910 prev_req = list_entry(req->queue.prev,
911 struct langwell_request, queue);
912 writel(readl(&req->tail->dtd_next),
913 &prev_req->tail->dtd_next);
916 done(ep, req, -ECONNRESET);
918 done:
919 /* enable endpoint again */
920 endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
921 if (is_in(ep))
922 endptctrl |= EPCTRL_TXE;
923 else
924 endptctrl |= EPCTRL_RXE;
925 writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
927 ep->stopped = stopped;
928 spin_unlock_irqrestore(&dev->lock, flags);
930 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
931 return retval;
935 /*-------------------------------------------------------------------------*/
937 /* endpoint set/clear halt */
938 static void ep_set_halt(struct langwell_ep *ep, int value)
940 u32 endptctrl = 0;
941 int ep_num;
942 struct langwell_udc *dev = ep->dev;
943 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
945 ep_num = ep->ep_num;
946 endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
948 /* value: 1 - set halt, 0 - clear halt */
949 if (value) {
950 /* set the stall bit */
951 if (is_in(ep))
952 endptctrl |= EPCTRL_TXS;
953 else
954 endptctrl |= EPCTRL_RXS;
955 } else {
956 /* clear the stall bit and reset data toggle */
957 if (is_in(ep)) {
958 endptctrl &= ~EPCTRL_TXS;
959 endptctrl |= EPCTRL_TXR;
960 } else {
961 endptctrl &= ~EPCTRL_RXS;
962 endptctrl |= EPCTRL_RXR;
966 writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
968 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
972 /* set the endpoint halt feature */
973 static int langwell_ep_set_halt(struct usb_ep *_ep, int value)
975 struct langwell_ep *ep;
976 struct langwell_udc *dev;
977 unsigned long flags;
978 int retval = 0;
980 ep = container_of(_ep, struct langwell_ep, ep);
981 dev = ep->dev;
983 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
985 if (!_ep || !ep->desc)
986 return -EINVAL;
988 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
989 return -ESHUTDOWN;
991 if (usb_endpoint_xfer_isoc(ep->desc))
992 return -EOPNOTSUPP;
994 spin_lock_irqsave(&dev->lock, flags);
997 * attempt to halt IN ep will fail if any transfer requests
998 * are still queue
1000 if (!list_empty(&ep->queue) && is_in(ep) && value) {
1001 /* IN endpoint FIFO holds bytes */
1002 dev_dbg(&dev->pdev->dev, "%s FIFO holds bytes\n", _ep->name);
1003 retval = -EAGAIN;
1004 goto done;
1007 /* endpoint set/clear halt */
1008 if (ep->ep_num) {
1009 ep_set_halt(ep, value);
1010 } else { /* endpoint 0 */
1011 dev->ep0_state = WAIT_FOR_SETUP;
1012 dev->ep0_dir = USB_DIR_OUT;
1014 done:
1015 spin_unlock_irqrestore(&dev->lock, flags);
1016 dev_dbg(&dev->pdev->dev, "%s %s halt\n",
1017 _ep->name, value ? "set" : "clear");
1018 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1019 return retval;
1023 /* set the halt feature and ignores clear requests */
1024 static int langwell_ep_set_wedge(struct usb_ep *_ep)
1026 struct langwell_ep *ep;
1027 struct langwell_udc *dev;
1029 ep = container_of(_ep, struct langwell_ep, ep);
1030 dev = ep->dev;
1032 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1034 if (!_ep || !ep->desc)
1035 return -EINVAL;
1037 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1038 return usb_ep_set_halt(_ep);
1042 /* flush contents of a fifo */
1043 static void langwell_ep_fifo_flush(struct usb_ep *_ep)
1045 struct langwell_ep *ep;
1046 struct langwell_udc *dev;
1047 u32 flush_bit;
1048 unsigned long timeout;
1050 ep = container_of(_ep, struct langwell_ep, ep);
1051 dev = ep->dev;
1053 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1055 if (!_ep || !ep->desc) {
1056 dev_vdbg(&dev->pdev->dev, "ep or ep->desc is NULL\n");
1057 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1058 return;
1061 dev_vdbg(&dev->pdev->dev, "%s-%s fifo flush\n",
1062 _ep->name, DIR_STRING(ep));
1064 /* flush endpoint buffer */
1065 if (ep->ep_num == 0)
1066 flush_bit = (1 << 16) | 1;
1067 else if (is_in(ep))
1068 flush_bit = 1 << (ep->ep_num + 16); /* TX */
1069 else
1070 flush_bit = 1 << ep->ep_num; /* RX */
1072 /* wait until flush complete */
1073 timeout = jiffies + FLUSH_TIMEOUT;
1074 do {
1075 writel(flush_bit, &dev->op_regs->endptflush);
1076 while (readl(&dev->op_regs->endptflush)) {
1077 if (time_after(jiffies, timeout)) {
1078 dev_err(&dev->pdev->dev, "ep flush timeout\n");
1079 goto done;
1081 cpu_relax();
1083 } while (readl(&dev->op_regs->endptstat) & flush_bit);
1084 done:
1085 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1089 /* endpoints operations structure */
1090 static const struct usb_ep_ops langwell_ep_ops = {
1092 /* configure endpoint, making it usable */
1093 .enable = langwell_ep_enable,
1095 /* endpoint is no longer usable */
1096 .disable = langwell_ep_disable,
1098 /* allocate a request object to use with this endpoint */
1099 .alloc_request = langwell_alloc_request,
1101 /* free a request object */
1102 .free_request = langwell_free_request,
1104 /* queue (submits) an I/O requests to an endpoint */
1105 .queue = langwell_ep_queue,
1107 /* dequeue (cancels, unlinks) an I/O request from an endpoint */
1108 .dequeue = langwell_ep_dequeue,
1110 /* set the endpoint halt feature */
1111 .set_halt = langwell_ep_set_halt,
1113 /* set the halt feature and ignores clear requests */
1114 .set_wedge = langwell_ep_set_wedge,
1116 /* flush contents of a fifo */
1117 .fifo_flush = langwell_ep_fifo_flush,
1121 /*-------------------------------------------------------------------------*/
1123 /* device controller usb_gadget_ops structure */
1125 /* returns the current frame number */
1126 static int langwell_get_frame(struct usb_gadget *_gadget)
1128 struct langwell_udc *dev;
1129 u16 retval;
1131 if (!_gadget)
1132 return -ENODEV;
1134 dev = container_of(_gadget, struct langwell_udc, gadget);
1135 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1137 retval = readl(&dev->op_regs->frindex) & FRINDEX_MASK;
1139 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1140 return retval;
1144 /* enter or exit PHY low power state */
1145 static void langwell_phy_low_power(struct langwell_udc *dev, bool flag)
1147 u32 devlc;
1148 u8 devlc_byte2;
1149 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1151 devlc = readl(&dev->op_regs->devlc);
1152 dev_vdbg(&dev->pdev->dev, "devlc = 0x%08x\n", devlc);
1154 if (flag)
1155 devlc |= LPM_PHCD;
1156 else
1157 devlc &= ~LPM_PHCD;
1159 /* FIXME: workaround for Langwell A1/A2/A3 sighting */
1160 devlc_byte2 = (devlc >> 16) & 0xff;
1161 writeb(devlc_byte2, (u8 *)&dev->op_regs->devlc + 2);
1163 devlc = readl(&dev->op_regs->devlc);
1164 dev_vdbg(&dev->pdev->dev,
1165 "%s PHY low power suspend, devlc = 0x%08x\n",
1166 flag ? "enter" : "exit", devlc);
1170 /* tries to wake up the host connected to this gadget */
1171 static int langwell_wakeup(struct usb_gadget *_gadget)
1173 struct langwell_udc *dev;
1174 u32 portsc1;
1175 unsigned long flags;
1177 if (!_gadget)
1178 return 0;
1180 dev = container_of(_gadget, struct langwell_udc, gadget);
1181 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1183 /* remote wakeup feature not enabled by host */
1184 if (!dev->remote_wakeup) {
1185 dev_info(&dev->pdev->dev, "remote wakeup is disabled\n");
1186 return -ENOTSUPP;
1189 spin_lock_irqsave(&dev->lock, flags);
1191 portsc1 = readl(&dev->op_regs->portsc1);
1192 if (!(portsc1 & PORTS_SUSP)) {
1193 spin_unlock_irqrestore(&dev->lock, flags);
1194 return 0;
1197 /* LPM L1 to L0 or legacy remote wakeup */
1198 if (dev->lpm && dev->lpm_state == LPM_L1)
1199 dev_info(&dev->pdev->dev, "LPM L1 to L0 remote wakeup\n");
1200 else
1201 dev_info(&dev->pdev->dev, "device remote wakeup\n");
1203 /* exit PHY low power suspend */
1204 if (dev->pdev->device != 0x0829)
1205 langwell_phy_low_power(dev, 0);
1207 /* force port resume */
1208 portsc1 |= PORTS_FPR;
1209 writel(portsc1, &dev->op_regs->portsc1);
1211 spin_unlock_irqrestore(&dev->lock, flags);
1213 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1214 return 0;
1218 /* notify controller that VBUS is powered or not */
1219 static int langwell_vbus_session(struct usb_gadget *_gadget, int is_active)
1221 struct langwell_udc *dev;
1222 unsigned long flags;
1223 u32 usbcmd;
1225 if (!_gadget)
1226 return -ENODEV;
1228 dev = container_of(_gadget, struct langwell_udc, gadget);
1229 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1231 spin_lock_irqsave(&dev->lock, flags);
1232 dev_vdbg(&dev->pdev->dev, "VBUS status: %s\n",
1233 is_active ? "on" : "off");
1235 dev->vbus_active = (is_active != 0);
1236 if (dev->driver && dev->softconnected && dev->vbus_active) {
1237 usbcmd = readl(&dev->op_regs->usbcmd);
1238 usbcmd |= CMD_RUNSTOP;
1239 writel(usbcmd, &dev->op_regs->usbcmd);
1240 } else {
1241 usbcmd = readl(&dev->op_regs->usbcmd);
1242 usbcmd &= ~CMD_RUNSTOP;
1243 writel(usbcmd, &dev->op_regs->usbcmd);
1246 spin_unlock_irqrestore(&dev->lock, flags);
1248 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1249 return 0;
1253 /* constrain controller's VBUS power usage */
1254 static int langwell_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
1256 struct langwell_udc *dev;
1258 if (!_gadget)
1259 return -ENODEV;
1261 dev = container_of(_gadget, struct langwell_udc, gadget);
1262 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1264 if (dev->transceiver) {
1265 dev_vdbg(&dev->pdev->dev, "otg_set_power\n");
1266 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1267 return otg_set_power(dev->transceiver, mA);
1270 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1271 return -ENOTSUPP;
1275 /* D+ pullup, software-controlled connect/disconnect to USB host */
1276 static int langwell_pullup(struct usb_gadget *_gadget, int is_on)
1278 struct langwell_udc *dev;
1279 u32 usbcmd;
1280 unsigned long flags;
1282 if (!_gadget)
1283 return -ENODEV;
1285 dev = container_of(_gadget, struct langwell_udc, gadget);
1287 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1289 spin_lock_irqsave(&dev->lock, flags);
1290 dev->softconnected = (is_on != 0);
1292 if (dev->driver && dev->softconnected && dev->vbus_active) {
1293 usbcmd = readl(&dev->op_regs->usbcmd);
1294 usbcmd |= CMD_RUNSTOP;
1295 writel(usbcmd, &dev->op_regs->usbcmd);
1296 } else {
1297 usbcmd = readl(&dev->op_regs->usbcmd);
1298 usbcmd &= ~CMD_RUNSTOP;
1299 writel(usbcmd, &dev->op_regs->usbcmd);
1301 spin_unlock_irqrestore(&dev->lock, flags);
1303 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1304 return 0;
1307 static int langwell_start(struct usb_gadget *g,
1308 struct usb_gadget_driver *driver);
1310 static int langwell_stop(struct usb_gadget *g,
1311 struct usb_gadget_driver *driver);
1313 /* device controller usb_gadget_ops structure */
1314 static const struct usb_gadget_ops langwell_ops = {
1316 /* returns the current frame number */
1317 .get_frame = langwell_get_frame,
1319 /* tries to wake up the host connected to this gadget */
1320 .wakeup = langwell_wakeup,
1322 /* set the device selfpowered feature, always selfpowered */
1323 /* .set_selfpowered = langwell_set_selfpowered, */
1325 /* notify controller that VBUS is powered or not */
1326 .vbus_session = langwell_vbus_session,
1328 /* constrain controller's VBUS power usage */
1329 .vbus_draw = langwell_vbus_draw,
1331 /* D+ pullup, software-controlled connect/disconnect to USB host */
1332 .pullup = langwell_pullup,
1334 .udc_start = langwell_start,
1335 .udc_stop = langwell_stop,
1339 /*-------------------------------------------------------------------------*/
1341 /* device controller operations */
1343 /* reset device controller */
1344 static int langwell_udc_reset(struct langwell_udc *dev)
1346 u32 usbcmd, usbmode, devlc, endpointlistaddr;
1347 u8 devlc_byte0, devlc_byte2;
1348 unsigned long timeout;
1350 if (!dev)
1351 return -EINVAL;
1353 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1355 /* set controller to stop state */
1356 usbcmd = readl(&dev->op_regs->usbcmd);
1357 usbcmd &= ~CMD_RUNSTOP;
1358 writel(usbcmd, &dev->op_regs->usbcmd);
1360 /* reset device controller */
1361 usbcmd = readl(&dev->op_regs->usbcmd);
1362 usbcmd |= CMD_RST;
1363 writel(usbcmd, &dev->op_regs->usbcmd);
1365 /* wait for reset to complete */
1366 timeout = jiffies + RESET_TIMEOUT;
1367 while (readl(&dev->op_regs->usbcmd) & CMD_RST) {
1368 if (time_after(jiffies, timeout)) {
1369 dev_err(&dev->pdev->dev, "device reset timeout\n");
1370 return -ETIMEDOUT;
1372 cpu_relax();
1375 /* set controller to device mode */
1376 usbmode = readl(&dev->op_regs->usbmode);
1377 usbmode |= MODE_DEVICE;
1379 /* turn setup lockout off, require setup tripwire in usbcmd */
1380 usbmode |= MODE_SLOM;
1382 writel(usbmode, &dev->op_regs->usbmode);
1383 usbmode = readl(&dev->op_regs->usbmode);
1384 dev_vdbg(&dev->pdev->dev, "usbmode=0x%08x\n", usbmode);
1386 /* Write-Clear setup status */
1387 writel(0, &dev->op_regs->usbsts);
1389 /* if support USB LPM, ACK all LPM token */
1390 if (dev->lpm) {
1391 devlc = readl(&dev->op_regs->devlc);
1392 dev_vdbg(&dev->pdev->dev, "devlc = 0x%08x\n", devlc);
1393 /* FIXME: workaround for Langwell A1/A2/A3 sighting */
1394 devlc &= ~LPM_STL; /* don't STALL LPM token */
1395 devlc &= ~LPM_NYT_ACK; /* ACK LPM token */
1396 devlc_byte0 = devlc & 0xff;
1397 devlc_byte2 = (devlc >> 16) & 0xff;
1398 writeb(devlc_byte0, (u8 *)&dev->op_regs->devlc);
1399 writeb(devlc_byte2, (u8 *)&dev->op_regs->devlc + 2);
1400 devlc = readl(&dev->op_regs->devlc);
1401 dev_vdbg(&dev->pdev->dev,
1402 "ACK LPM token, devlc = 0x%08x\n", devlc);
1405 /* fill endpointlistaddr register */
1406 endpointlistaddr = dev->ep_dqh_dma;
1407 endpointlistaddr &= ENDPOINTLISTADDR_MASK;
1408 writel(endpointlistaddr, &dev->op_regs->endpointlistaddr);
1410 dev_vdbg(&dev->pdev->dev,
1411 "dQH base (vir: %p, phy: 0x%08x), endpointlistaddr=0x%08x\n",
1412 dev->ep_dqh, endpointlistaddr,
1413 readl(&dev->op_regs->endpointlistaddr));
1414 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1415 return 0;
1419 /* reinitialize device controller endpoints */
1420 static int eps_reinit(struct langwell_udc *dev)
1422 struct langwell_ep *ep;
1423 char name[14];
1424 int i;
1426 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1428 /* initialize ep0 */
1429 ep = &dev->ep[0];
1430 ep->dev = dev;
1431 strncpy(ep->name, "ep0", sizeof(ep->name));
1432 ep->ep.name = ep->name;
1433 ep->ep.ops = &langwell_ep_ops;
1434 ep->stopped = 0;
1435 ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
1436 ep->ep_num = 0;
1437 ep->desc = &langwell_ep0_desc;
1438 INIT_LIST_HEAD(&ep->queue);
1440 ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1442 /* initialize other endpoints */
1443 for (i = 2; i < dev->ep_max; i++) {
1444 ep = &dev->ep[i];
1445 if (i % 2)
1446 snprintf(name, sizeof(name), "ep%din", i / 2);
1447 else
1448 snprintf(name, sizeof(name), "ep%dout", i / 2);
1449 ep->dev = dev;
1450 strncpy(ep->name, name, sizeof(ep->name));
1451 ep->ep.name = ep->name;
1453 ep->ep.ops = &langwell_ep_ops;
1454 ep->stopped = 0;
1455 ep->ep.maxpacket = (unsigned short) ~0;
1456 ep->ep_num = i / 2;
1458 INIT_LIST_HEAD(&ep->queue);
1459 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
1462 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1463 return 0;
1467 /* enable interrupt and set controller to run state */
1468 static void langwell_udc_start(struct langwell_udc *dev)
1470 u32 usbintr, usbcmd;
1471 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1473 /* enable interrupts */
1474 usbintr = INTR_ULPIE /* ULPI */
1475 | INTR_SLE /* suspend */
1476 /* | INTR_SRE SOF received */
1477 | INTR_URE /* USB reset */
1478 | INTR_AAE /* async advance */
1479 | INTR_SEE /* system error */
1480 | INTR_FRE /* frame list rollover */
1481 | INTR_PCE /* port change detect */
1482 | INTR_UEE /* USB error interrupt */
1483 | INTR_UE; /* USB interrupt */
1484 writel(usbintr, &dev->op_regs->usbintr);
1486 /* clear stopped bit */
1487 dev->stopped = 0;
1489 /* set controller to run */
1490 usbcmd = readl(&dev->op_regs->usbcmd);
1491 usbcmd |= CMD_RUNSTOP;
1492 writel(usbcmd, &dev->op_regs->usbcmd);
1494 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1498 /* disable interrupt and set controller to stop state */
1499 static void langwell_udc_stop(struct langwell_udc *dev)
1501 u32 usbcmd;
1503 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1505 /* disable all interrupts */
1506 writel(0, &dev->op_regs->usbintr);
1508 /* set stopped bit */
1509 dev->stopped = 1;
1511 /* set controller to stop state */
1512 usbcmd = readl(&dev->op_regs->usbcmd);
1513 usbcmd &= ~CMD_RUNSTOP;
1514 writel(usbcmd, &dev->op_regs->usbcmd);
1516 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1520 /* stop all USB activities */
1521 static void stop_activity(struct langwell_udc *dev)
1523 struct langwell_ep *ep;
1524 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1526 nuke(&dev->ep[0], -ESHUTDOWN);
1528 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1529 nuke(ep, -ESHUTDOWN);
1532 /* report disconnect; the driver is already quiesced */
1533 if (dev->driver) {
1534 spin_unlock(&dev->lock);
1535 dev->driver->disconnect(&dev->gadget);
1536 spin_lock(&dev->lock);
1539 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1543 /*-------------------------------------------------------------------------*/
1545 /* device "function" sysfs attribute file */
1546 static ssize_t show_function(struct device *_dev,
1547 struct device_attribute *attr, char *buf)
1549 struct langwell_udc *dev = dev_get_drvdata(_dev);
1551 if (!dev->driver || !dev->driver->function
1552 || strlen(dev->driver->function) > PAGE_SIZE)
1553 return 0;
1555 return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
1557 static DEVICE_ATTR(function, S_IRUGO, show_function, NULL);
1560 static inline enum usb_device_speed lpm_device_speed(u32 reg)
1562 switch (LPM_PSPD(reg)) {
1563 case LPM_SPEED_HIGH:
1564 return USB_SPEED_HIGH;
1565 case LPM_SPEED_FULL:
1566 return USB_SPEED_FULL;
1567 case LPM_SPEED_LOW:
1568 return USB_SPEED_LOW;
1569 default:
1570 return USB_SPEED_UNKNOWN;
1574 /* device "langwell_udc" sysfs attribute file */
1575 static ssize_t show_langwell_udc(struct device *_dev,
1576 struct device_attribute *attr, char *buf)
1578 struct langwell_udc *dev = dev_get_drvdata(_dev);
1579 struct langwell_request *req;
1580 struct langwell_ep *ep = NULL;
1581 char *next;
1582 unsigned size;
1583 unsigned t;
1584 unsigned i;
1585 unsigned long flags;
1586 u32 tmp_reg;
1588 next = buf;
1589 size = PAGE_SIZE;
1590 spin_lock_irqsave(&dev->lock, flags);
1592 /* driver basic information */
1593 t = scnprintf(next, size,
1594 DRIVER_DESC "\n"
1595 "%s version: %s\n"
1596 "Gadget driver: %s\n\n",
1597 driver_name, DRIVER_VERSION,
1598 dev->driver ? dev->driver->driver.name : "(none)");
1599 size -= t;
1600 next += t;
1602 /* device registers */
1603 tmp_reg = readl(&dev->op_regs->usbcmd);
1604 t = scnprintf(next, size,
1605 "USBCMD reg:\n"
1606 "SetupTW: %d\n"
1607 "Run/Stop: %s\n\n",
1608 (tmp_reg & CMD_SUTW) ? 1 : 0,
1609 (tmp_reg & CMD_RUNSTOP) ? "Run" : "Stop");
1610 size -= t;
1611 next += t;
1613 tmp_reg = readl(&dev->op_regs->usbsts);
1614 t = scnprintf(next, size,
1615 "USB Status Reg:\n"
1616 "Device Suspend: %d\n"
1617 "Reset Received: %d\n"
1618 "System Error: %s\n"
1619 "USB Error Interrupt: %s\n\n",
1620 (tmp_reg & STS_SLI) ? 1 : 0,
1621 (tmp_reg & STS_URI) ? 1 : 0,
1622 (tmp_reg & STS_SEI) ? "Error" : "No error",
1623 (tmp_reg & STS_UEI) ? "Error detected" : "No error");
1624 size -= t;
1625 next += t;
1627 tmp_reg = readl(&dev->op_regs->usbintr);
1628 t = scnprintf(next, size,
1629 "USB Intrrupt Enable Reg:\n"
1630 "Sleep Enable: %d\n"
1631 "SOF Received Enable: %d\n"
1632 "Reset Enable: %d\n"
1633 "System Error Enable: %d\n"
1634 "Port Change Dectected Enable: %d\n"
1635 "USB Error Intr Enable: %d\n"
1636 "USB Intr Enable: %d\n\n",
1637 (tmp_reg & INTR_SLE) ? 1 : 0,
1638 (tmp_reg & INTR_SRE) ? 1 : 0,
1639 (tmp_reg & INTR_URE) ? 1 : 0,
1640 (tmp_reg & INTR_SEE) ? 1 : 0,
1641 (tmp_reg & INTR_PCE) ? 1 : 0,
1642 (tmp_reg & INTR_UEE) ? 1 : 0,
1643 (tmp_reg & INTR_UE) ? 1 : 0);
1644 size -= t;
1645 next += t;
1647 tmp_reg = readl(&dev->op_regs->frindex);
1648 t = scnprintf(next, size,
1649 "USB Frame Index Reg:\n"
1650 "Frame Number is 0x%08x\n\n",
1651 (tmp_reg & FRINDEX_MASK));
1652 size -= t;
1653 next += t;
1655 tmp_reg = readl(&dev->op_regs->deviceaddr);
1656 t = scnprintf(next, size,
1657 "USB Device Address Reg:\n"
1658 "Device Addr is 0x%x\n\n",
1659 USBADR(tmp_reg));
1660 size -= t;
1661 next += t;
1663 tmp_reg = readl(&dev->op_regs->endpointlistaddr);
1664 t = scnprintf(next, size,
1665 "USB Endpoint List Address Reg:\n"
1666 "Endpoint List Pointer is 0x%x\n\n",
1667 EPBASE(tmp_reg));
1668 size -= t;
1669 next += t;
1671 tmp_reg = readl(&dev->op_regs->portsc1);
1672 t = scnprintf(next, size,
1673 "USB Port Status & Control Reg:\n"
1674 "Port Reset: %s\n"
1675 "Port Suspend Mode: %s\n"
1676 "Over-current Change: %s\n"
1677 "Port Enable/Disable Change: %s\n"
1678 "Port Enabled/Disabled: %s\n"
1679 "Current Connect Status: %s\n"
1680 "LPM Suspend Status: %s\n\n",
1681 (tmp_reg & PORTS_PR) ? "Reset" : "Not Reset",
1682 (tmp_reg & PORTS_SUSP) ? "Suspend " : "Not Suspend",
1683 (tmp_reg & PORTS_OCC) ? "Detected" : "No",
1684 (tmp_reg & PORTS_PEC) ? "Changed" : "Not Changed",
1685 (tmp_reg & PORTS_PE) ? "Enable" : "Not Correct",
1686 (tmp_reg & PORTS_CCS) ? "Attached" : "Not Attached",
1687 (tmp_reg & PORTS_SLP) ? "LPM L1" : "LPM L0");
1688 size -= t;
1689 next += t;
1691 tmp_reg = readl(&dev->op_regs->devlc);
1692 t = scnprintf(next, size,
1693 "Device LPM Control Reg:\n"
1694 "Parallel Transceiver : %d\n"
1695 "Serial Transceiver : %d\n"
1696 "Port Speed: %s\n"
1697 "Port Force Full Speed Connenct: %s\n"
1698 "PHY Low Power Suspend Clock: %s\n"
1699 "BmAttributes: %d\n\n",
1700 LPM_PTS(tmp_reg),
1701 (tmp_reg & LPM_STS) ? 1 : 0,
1702 usb_speed_string(lpm_device_speed(tmp_reg)),
1703 (tmp_reg & LPM_PFSC) ? "Force Full Speed" : "Not Force",
1704 (tmp_reg & LPM_PHCD) ? "Disabled" : "Enabled",
1705 LPM_BA(tmp_reg));
1706 size -= t;
1707 next += t;
1709 tmp_reg = readl(&dev->op_regs->usbmode);
1710 t = scnprintf(next, size,
1711 "USB Mode Reg:\n"
1712 "Controller Mode is : %s\n\n", ({
1713 char *s;
1714 switch (MODE_CM(tmp_reg)) {
1715 case MODE_IDLE:
1716 s = "Idle"; break;
1717 case MODE_DEVICE:
1718 s = "Device Controller"; break;
1719 case MODE_HOST:
1720 s = "Host Controller"; break;
1721 default:
1722 s = "None"; break;
1725 }));
1726 size -= t;
1727 next += t;
1729 tmp_reg = readl(&dev->op_regs->endptsetupstat);
1730 t = scnprintf(next, size,
1731 "Endpoint Setup Status Reg:\n"
1732 "SETUP on ep 0x%04x\n\n",
1733 tmp_reg & SETUPSTAT_MASK);
1734 size -= t;
1735 next += t;
1737 for (i = 0; i < dev->ep_max / 2; i++) {
1738 tmp_reg = readl(&dev->op_regs->endptctrl[i]);
1739 t = scnprintf(next, size, "EP Ctrl Reg [%d]: 0x%08x\n",
1740 i, tmp_reg);
1741 size -= t;
1742 next += t;
1744 tmp_reg = readl(&dev->op_regs->endptprime);
1745 t = scnprintf(next, size, "EP Prime Reg: 0x%08x\n\n", tmp_reg);
1746 size -= t;
1747 next += t;
1749 /* langwell_udc, langwell_ep, langwell_request structure information */
1750 ep = &dev->ep[0];
1751 t = scnprintf(next, size, "%s MaxPacketSize: 0x%x, ep_num: %d\n",
1752 ep->ep.name, ep->ep.maxpacket, ep->ep_num);
1753 size -= t;
1754 next += t;
1756 if (list_empty(&ep->queue)) {
1757 t = scnprintf(next, size, "its req queue is empty\n\n");
1758 size -= t;
1759 next += t;
1760 } else {
1761 list_for_each_entry(req, &ep->queue, queue) {
1762 t = scnprintf(next, size,
1763 "req %p actual 0x%x length 0x%x buf %p\n",
1764 &req->req, req->req.actual,
1765 req->req.length, req->req.buf);
1766 size -= t;
1767 next += t;
1770 /* other gadget->eplist ep */
1771 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1772 if (ep->desc) {
1773 t = scnprintf(next, size,
1774 "\n%s MaxPacketSize: 0x%x, "
1775 "ep_num: %d\n",
1776 ep->ep.name, ep->ep.maxpacket,
1777 ep->ep_num);
1778 size -= t;
1779 next += t;
1781 if (list_empty(&ep->queue)) {
1782 t = scnprintf(next, size,
1783 "its req queue is empty\n\n");
1784 size -= t;
1785 next += t;
1786 } else {
1787 list_for_each_entry(req, &ep->queue, queue) {
1788 t = scnprintf(next, size,
1789 "req %p actual 0x%x length "
1790 "0x%x buf %p\n",
1791 &req->req, req->req.actual,
1792 req->req.length, req->req.buf);
1793 size -= t;
1794 next += t;
1800 spin_unlock_irqrestore(&dev->lock, flags);
1801 return PAGE_SIZE - size;
1803 static DEVICE_ATTR(langwell_udc, S_IRUGO, show_langwell_udc, NULL);
1806 /* device "remote_wakeup" sysfs attribute file */
1807 static ssize_t store_remote_wakeup(struct device *_dev,
1808 struct device_attribute *attr, const char *buf, size_t count)
1810 struct langwell_udc *dev = dev_get_drvdata(_dev);
1811 unsigned long flags;
1812 ssize_t rc = count;
1814 if (count > 2)
1815 return -EINVAL;
1817 if (count > 0 && buf[count-1] == '\n')
1818 ((char *) buf)[count-1] = 0;
1820 if (buf[0] != '1')
1821 return -EINVAL;
1823 /* force remote wakeup enabled in case gadget driver doesn't support */
1824 spin_lock_irqsave(&dev->lock, flags);
1825 dev->remote_wakeup = 1;
1826 dev->dev_status |= (1 << USB_DEVICE_REMOTE_WAKEUP);
1827 spin_unlock_irqrestore(&dev->lock, flags);
1829 langwell_wakeup(&dev->gadget);
1831 return rc;
1833 static DEVICE_ATTR(remote_wakeup, S_IWUSR, NULL, store_remote_wakeup);
1836 /*-------------------------------------------------------------------------*/
1839 * when a driver is successfully registered, it will receive
1840 * control requests including set_configuration(), which enables
1841 * non-control requests. then usb traffic follows until a
1842 * disconnect is reported. then a host may connect again, or
1843 * the driver might get unbound.
1846 static int langwell_start(struct usb_gadget *g,
1847 struct usb_gadget_driver *driver)
1849 struct langwell_udc *dev = gadget_to_langwell(g);
1850 unsigned long flags;
1851 int retval;
1853 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1855 spin_lock_irqsave(&dev->lock, flags);
1857 /* hook up the driver ... */
1858 driver->driver.bus = NULL;
1859 dev->driver = driver;
1860 dev->gadget.dev.driver = &driver->driver;
1862 spin_unlock_irqrestore(&dev->lock, flags);
1864 retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
1865 if (retval)
1866 goto err;
1868 dev->usb_state = USB_STATE_ATTACHED;
1869 dev->ep0_state = WAIT_FOR_SETUP;
1870 dev->ep0_dir = USB_DIR_OUT;
1872 /* enable interrupt and set controller to run state */
1873 if (dev->got_irq)
1874 langwell_udc_start(dev);
1876 dev_vdbg(&dev->pdev->dev,
1877 "After langwell_udc_start(), print all registers:\n");
1878 print_all_registers(dev);
1880 dev_info(&dev->pdev->dev, "register driver: %s\n",
1881 driver->driver.name);
1882 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1884 return 0;
1886 err:
1887 dev->gadget.dev.driver = NULL;
1888 dev->driver = NULL;
1890 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1892 return retval;
1895 /* unregister gadget driver */
1896 static int langwell_stop(struct usb_gadget *g,
1897 struct usb_gadget_driver *driver)
1899 struct langwell_udc *dev = gadget_to_langwell(g);
1900 unsigned long flags;
1902 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1904 /* exit PHY low power suspend */
1905 if (dev->pdev->device != 0x0829)
1906 langwell_phy_low_power(dev, 0);
1908 /* unbind OTG transceiver */
1909 if (dev->transceiver)
1910 (void)otg_set_peripheral(dev->transceiver, 0);
1912 /* disable interrupt and set controller to stop state */
1913 langwell_udc_stop(dev);
1915 dev->usb_state = USB_STATE_ATTACHED;
1916 dev->ep0_state = WAIT_FOR_SETUP;
1917 dev->ep0_dir = USB_DIR_OUT;
1919 spin_lock_irqsave(&dev->lock, flags);
1921 /* stop all usb activities */
1922 dev->gadget.speed = USB_SPEED_UNKNOWN;
1923 dev->gadget.dev.driver = NULL;
1924 dev->driver = NULL;
1925 stop_activity(dev);
1926 spin_unlock_irqrestore(&dev->lock, flags);
1928 device_remove_file(&dev->pdev->dev, &dev_attr_function);
1930 dev_info(&dev->pdev->dev, "unregistered driver '%s'\n",
1931 driver->driver.name);
1932 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1934 return 0;
1937 /*-------------------------------------------------------------------------*/
1940 * setup tripwire is used as a semaphore to ensure that the setup data
1941 * payload is extracted from a dQH without being corrupted
1943 static void setup_tripwire(struct langwell_udc *dev)
1945 u32 usbcmd,
1946 endptsetupstat;
1947 unsigned long timeout;
1948 struct langwell_dqh *dqh;
1950 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1952 /* ep0 OUT dQH */
1953 dqh = &dev->ep_dqh[EP_DIR_OUT];
1955 /* Write-Clear endptsetupstat */
1956 endptsetupstat = readl(&dev->op_regs->endptsetupstat);
1957 writel(endptsetupstat, &dev->op_regs->endptsetupstat);
1959 /* wait until endptsetupstat is cleared */
1960 timeout = jiffies + SETUPSTAT_TIMEOUT;
1961 while (readl(&dev->op_regs->endptsetupstat)) {
1962 if (time_after(jiffies, timeout)) {
1963 dev_err(&dev->pdev->dev, "setup_tripwire timeout\n");
1964 break;
1966 cpu_relax();
1969 /* while a hazard exists when setup packet arrives */
1970 do {
1971 /* set setup tripwire bit */
1972 usbcmd = readl(&dev->op_regs->usbcmd);
1973 writel(usbcmd | CMD_SUTW, &dev->op_regs->usbcmd);
1975 /* copy the setup packet to local buffer */
1976 memcpy(&dev->local_setup_buff, &dqh->dqh_setup, 8);
1977 } while (!(readl(&dev->op_regs->usbcmd) & CMD_SUTW));
1979 /* Write-Clear setup tripwire bit */
1980 usbcmd = readl(&dev->op_regs->usbcmd);
1981 writel(usbcmd & ~CMD_SUTW, &dev->op_regs->usbcmd);
1983 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1987 /* protocol ep0 stall, will automatically be cleared on new transaction */
1988 static void ep0_stall(struct langwell_udc *dev)
1990 u32 endptctrl;
1992 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1994 /* set TX and RX to stall */
1995 endptctrl = readl(&dev->op_regs->endptctrl[0]);
1996 endptctrl |= EPCTRL_TXS | EPCTRL_RXS;
1997 writel(endptctrl, &dev->op_regs->endptctrl[0]);
1999 /* update ep0 state */
2000 dev->ep0_state = WAIT_FOR_SETUP;
2001 dev->ep0_dir = USB_DIR_OUT;
2003 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2007 /* PRIME a status phase for ep0 */
2008 static int prime_status_phase(struct langwell_udc *dev, int dir)
2010 struct langwell_request *req;
2011 struct langwell_ep *ep;
2012 int status = 0;
2014 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2016 if (dir == EP_DIR_IN)
2017 dev->ep0_dir = USB_DIR_IN;
2018 else
2019 dev->ep0_dir = USB_DIR_OUT;
2021 ep = &dev->ep[0];
2022 dev->ep0_state = WAIT_FOR_OUT_STATUS;
2024 req = dev->status_req;
2026 req->ep = ep;
2027 req->req.length = 0;
2028 req->req.status = -EINPROGRESS;
2029 req->req.actual = 0;
2030 req->req.complete = NULL;
2031 req->dtd_count = 0;
2033 if (!req_to_dtd(req))
2034 status = queue_dtd(ep, req);
2035 else
2036 return -ENOMEM;
2038 if (status)
2039 dev_err(&dev->pdev->dev, "can't queue ep0 status request\n");
2041 list_add_tail(&req->queue, &ep->queue);
2043 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2044 return status;
2048 /* SET_ADDRESS request routine */
2049 static void set_address(struct langwell_udc *dev, u16 value,
2050 u16 index, u16 length)
2052 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2054 /* save the new address to device struct */
2055 dev->dev_addr = (u8) value;
2056 dev_vdbg(&dev->pdev->dev, "dev->dev_addr = %d\n", dev->dev_addr);
2058 /* update usb state */
2059 dev->usb_state = USB_STATE_ADDRESS;
2061 /* STATUS phase */
2062 if (prime_status_phase(dev, EP_DIR_IN))
2063 ep0_stall(dev);
2065 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2069 /* return endpoint by windex */
2070 static struct langwell_ep *get_ep_by_windex(struct langwell_udc *dev,
2071 u16 wIndex)
2073 struct langwell_ep *ep;
2074 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2076 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
2077 return &dev->ep[0];
2079 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
2080 u8 bEndpointAddress;
2081 if (!ep->desc)
2082 continue;
2084 bEndpointAddress = ep->desc->bEndpointAddress;
2085 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
2086 continue;
2088 if ((wIndex & USB_ENDPOINT_NUMBER_MASK)
2089 == (bEndpointAddress & USB_ENDPOINT_NUMBER_MASK))
2090 return ep;
2093 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2094 return NULL;
2098 /* return whether endpoint is stalled, 0: not stalled; 1: stalled */
2099 static int ep_is_stall(struct langwell_ep *ep)
2101 struct langwell_udc *dev = ep->dev;
2102 u32 endptctrl;
2103 int retval;
2105 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2107 endptctrl = readl(&dev->op_regs->endptctrl[ep->ep_num]);
2108 if (is_in(ep))
2109 retval = endptctrl & EPCTRL_TXS ? 1 : 0;
2110 else
2111 retval = endptctrl & EPCTRL_RXS ? 1 : 0;
2113 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2114 return retval;
2118 /* GET_STATUS request routine */
2119 static void get_status(struct langwell_udc *dev, u8 request_type, u16 value,
2120 u16 index, u16 length)
2122 struct langwell_request *req;
2123 struct langwell_ep *ep;
2124 u16 status_data = 0; /* 16 bits cpu view status data */
2125 int status = 0;
2127 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2129 ep = &dev->ep[0];
2131 if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
2132 /* get device status */
2133 status_data = dev->dev_status;
2134 } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
2135 /* get interface status */
2136 status_data = 0;
2137 } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
2138 /* get endpoint status */
2139 struct langwell_ep *epn;
2140 epn = get_ep_by_windex(dev, index);
2141 /* stall if endpoint doesn't exist */
2142 if (!epn)
2143 goto stall;
2145 status_data = ep_is_stall(epn) << USB_ENDPOINT_HALT;
2148 dev_dbg(&dev->pdev->dev, "get status data: 0x%04x\n", status_data);
2150 dev->ep0_dir = USB_DIR_IN;
2152 /* borrow the per device status_req */
2153 req = dev->status_req;
2155 /* fill in the reqest structure */
2156 *((u16 *) req->req.buf) = cpu_to_le16(status_data);
2157 req->ep = ep;
2158 req->req.length = 2;
2159 req->req.status = -EINPROGRESS;
2160 req->req.actual = 0;
2161 req->req.complete = NULL;
2162 req->dtd_count = 0;
2164 /* prime the data phase */
2165 if (!req_to_dtd(req))
2166 status = queue_dtd(ep, req);
2167 else /* no mem */
2168 goto stall;
2170 if (status) {
2171 dev_err(&dev->pdev->dev,
2172 "response error on GET_STATUS request\n");
2173 goto stall;
2176 list_add_tail(&req->queue, &ep->queue);
2177 dev->ep0_state = DATA_STATE_XMIT;
2179 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2180 return;
2181 stall:
2182 ep0_stall(dev);
2183 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2187 /* setup packet interrupt handler */
2188 static void handle_setup_packet(struct langwell_udc *dev,
2189 struct usb_ctrlrequest *setup)
2191 u16 wValue = le16_to_cpu(setup->wValue);
2192 u16 wIndex = le16_to_cpu(setup->wIndex);
2193 u16 wLength = le16_to_cpu(setup->wLength);
2194 u32 portsc1;
2196 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2198 /* ep0 fifo flush */
2199 nuke(&dev->ep[0], -ESHUTDOWN);
2201 dev_dbg(&dev->pdev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
2202 setup->bRequestType, setup->bRequest,
2203 wValue, wIndex, wLength);
2205 /* RNDIS gadget delegate */
2206 if ((setup->bRequestType == 0x21) && (setup->bRequest == 0x00)) {
2207 /* USB_CDC_SEND_ENCAPSULATED_COMMAND */
2208 goto delegate;
2211 /* USB_CDC_GET_ENCAPSULATED_RESPONSE */
2212 if ((setup->bRequestType == 0xa1) && (setup->bRequest == 0x01)) {
2213 /* USB_CDC_GET_ENCAPSULATED_RESPONSE */
2214 goto delegate;
2217 /* We process some stardard setup requests here */
2218 switch (setup->bRequest) {
2219 case USB_REQ_GET_STATUS:
2220 dev_dbg(&dev->pdev->dev, "SETUP: USB_REQ_GET_STATUS\n");
2221 /* get status, DATA and STATUS phase */
2222 if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
2223 != (USB_DIR_IN | USB_TYPE_STANDARD))
2224 break;
2225 get_status(dev, setup->bRequestType, wValue, wIndex, wLength);
2226 goto end;
2228 case USB_REQ_SET_ADDRESS:
2229 dev_dbg(&dev->pdev->dev, "SETUP: USB_REQ_SET_ADDRESS\n");
2230 /* STATUS phase */
2231 if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD
2232 | USB_RECIP_DEVICE))
2233 break;
2234 set_address(dev, wValue, wIndex, wLength);
2235 goto end;
2237 case USB_REQ_CLEAR_FEATURE:
2238 case USB_REQ_SET_FEATURE:
2239 /* STATUS phase */
2241 int rc = -EOPNOTSUPP;
2242 if (setup->bRequest == USB_REQ_SET_FEATURE)
2243 dev_dbg(&dev->pdev->dev,
2244 "SETUP: USB_REQ_SET_FEATURE\n");
2245 else if (setup->bRequest == USB_REQ_CLEAR_FEATURE)
2246 dev_dbg(&dev->pdev->dev,
2247 "SETUP: USB_REQ_CLEAR_FEATURE\n");
2249 if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
2250 == (USB_RECIP_ENDPOINT | USB_TYPE_STANDARD)) {
2251 struct langwell_ep *epn;
2252 epn = get_ep_by_windex(dev, wIndex);
2253 /* stall if endpoint doesn't exist */
2254 if (!epn) {
2255 ep0_stall(dev);
2256 goto end;
2259 if (wValue != 0 || wLength != 0
2260 || epn->ep_num > dev->ep_max)
2261 break;
2263 spin_unlock(&dev->lock);
2264 rc = langwell_ep_set_halt(&epn->ep,
2265 (setup->bRequest == USB_REQ_SET_FEATURE)
2266 ? 1 : 0);
2267 spin_lock(&dev->lock);
2269 } else if ((setup->bRequestType & (USB_RECIP_MASK
2270 | USB_TYPE_MASK)) == (USB_RECIP_DEVICE
2271 | USB_TYPE_STANDARD)) {
2272 rc = 0;
2273 switch (wValue) {
2274 case USB_DEVICE_REMOTE_WAKEUP:
2275 if (setup->bRequest == USB_REQ_SET_FEATURE) {
2276 dev->remote_wakeup = 1;
2277 dev->dev_status |= (1 << wValue);
2278 } else {
2279 dev->remote_wakeup = 0;
2280 dev->dev_status &= ~(1 << wValue);
2282 break;
2283 case USB_DEVICE_TEST_MODE:
2284 dev_dbg(&dev->pdev->dev, "SETUP: TEST MODE\n");
2285 if ((wIndex & 0xff) ||
2286 (dev->gadget.speed != USB_SPEED_HIGH))
2287 ep0_stall(dev);
2289 switch (wIndex >> 8) {
2290 case TEST_J:
2291 case TEST_K:
2292 case TEST_SE0_NAK:
2293 case TEST_PACKET:
2294 case TEST_FORCE_EN:
2295 if (prime_status_phase(dev, EP_DIR_IN))
2296 ep0_stall(dev);
2297 portsc1 = readl(&dev->op_regs->portsc1);
2298 portsc1 |= (wIndex & 0xf00) << 8;
2299 writel(portsc1, &dev->op_regs->portsc1);
2300 goto end;
2301 default:
2302 rc = -EOPNOTSUPP;
2304 break;
2305 default:
2306 rc = -EOPNOTSUPP;
2307 break;
2310 if (!gadget_is_otg(&dev->gadget))
2311 break;
2312 else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE)
2313 dev->gadget.b_hnp_enable = 1;
2314 else if (setup->bRequest == USB_DEVICE_A_HNP_SUPPORT)
2315 dev->gadget.a_hnp_support = 1;
2316 else if (setup->bRequest ==
2317 USB_DEVICE_A_ALT_HNP_SUPPORT)
2318 dev->gadget.a_alt_hnp_support = 1;
2319 else
2320 break;
2321 } else
2322 break;
2324 if (rc == 0) {
2325 if (prime_status_phase(dev, EP_DIR_IN))
2326 ep0_stall(dev);
2328 goto end;
2331 case USB_REQ_GET_DESCRIPTOR:
2332 dev_dbg(&dev->pdev->dev,
2333 "SETUP: USB_REQ_GET_DESCRIPTOR\n");
2334 goto delegate;
2336 case USB_REQ_SET_DESCRIPTOR:
2337 dev_dbg(&dev->pdev->dev,
2338 "SETUP: USB_REQ_SET_DESCRIPTOR unsupported\n");
2339 goto delegate;
2341 case USB_REQ_GET_CONFIGURATION:
2342 dev_dbg(&dev->pdev->dev,
2343 "SETUP: USB_REQ_GET_CONFIGURATION\n");
2344 goto delegate;
2346 case USB_REQ_SET_CONFIGURATION:
2347 dev_dbg(&dev->pdev->dev,
2348 "SETUP: USB_REQ_SET_CONFIGURATION\n");
2349 goto delegate;
2351 case USB_REQ_GET_INTERFACE:
2352 dev_dbg(&dev->pdev->dev,
2353 "SETUP: USB_REQ_GET_INTERFACE\n");
2354 goto delegate;
2356 case USB_REQ_SET_INTERFACE:
2357 dev_dbg(&dev->pdev->dev,
2358 "SETUP: USB_REQ_SET_INTERFACE\n");
2359 goto delegate;
2361 case USB_REQ_SYNCH_FRAME:
2362 dev_dbg(&dev->pdev->dev,
2363 "SETUP: USB_REQ_SYNCH_FRAME unsupported\n");
2364 goto delegate;
2366 default:
2367 /* delegate USB standard requests to the gadget driver */
2368 goto delegate;
2369 delegate:
2370 /* USB requests handled by gadget */
2371 if (wLength) {
2372 /* DATA phase from gadget, STATUS phase from udc */
2373 dev->ep0_dir = (setup->bRequestType & USB_DIR_IN)
2374 ? USB_DIR_IN : USB_DIR_OUT;
2375 dev_vdbg(&dev->pdev->dev,
2376 "dev->ep0_dir = 0x%x, wLength = %d\n",
2377 dev->ep0_dir, wLength);
2378 spin_unlock(&dev->lock);
2379 if (dev->driver->setup(&dev->gadget,
2380 &dev->local_setup_buff) < 0)
2381 ep0_stall(dev);
2382 spin_lock(&dev->lock);
2383 dev->ep0_state = (setup->bRequestType & USB_DIR_IN)
2384 ? DATA_STATE_XMIT : DATA_STATE_RECV;
2385 } else {
2386 /* no DATA phase, IN STATUS phase from gadget */
2387 dev->ep0_dir = USB_DIR_IN;
2388 dev_vdbg(&dev->pdev->dev,
2389 "dev->ep0_dir = 0x%x, wLength = %d\n",
2390 dev->ep0_dir, wLength);
2391 spin_unlock(&dev->lock);
2392 if (dev->driver->setup(&dev->gadget,
2393 &dev->local_setup_buff) < 0)
2394 ep0_stall(dev);
2395 spin_lock(&dev->lock);
2396 dev->ep0_state = WAIT_FOR_OUT_STATUS;
2398 break;
2400 end:
2401 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2405 /* transfer completion, process endpoint request and free the completed dTDs
2406 * for this request
2408 static int process_ep_req(struct langwell_udc *dev, int index,
2409 struct langwell_request *curr_req)
2411 struct langwell_dtd *curr_dtd;
2412 struct langwell_dqh *curr_dqh;
2413 int td_complete, actual, remaining_length;
2414 int i, dir;
2415 u8 dtd_status = 0;
2416 int retval = 0;
2418 curr_dqh = &dev->ep_dqh[index];
2419 dir = index % 2;
2421 curr_dtd = curr_req->head;
2422 td_complete = 0;
2423 actual = curr_req->req.length;
2425 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2427 for (i = 0; i < curr_req->dtd_count; i++) {
2429 /* command execution states by dTD */
2430 dtd_status = curr_dtd->dtd_status;
2432 barrier();
2433 remaining_length = le16_to_cpu(curr_dtd->dtd_total);
2434 actual -= remaining_length;
2436 if (!dtd_status) {
2437 /* transfers completed successfully */
2438 if (!remaining_length) {
2439 td_complete++;
2440 dev_vdbg(&dev->pdev->dev,
2441 "dTD transmitted successfully\n");
2442 } else {
2443 if (dir) {
2444 dev_vdbg(&dev->pdev->dev,
2445 "TX dTD remains data\n");
2446 retval = -EPROTO;
2447 break;
2449 } else {
2450 td_complete++;
2451 break;
2454 } else {
2455 /* transfers completed with errors */
2456 if (dtd_status & DTD_STS_ACTIVE) {
2457 dev_dbg(&dev->pdev->dev,
2458 "dTD status ACTIVE dQH[%d]\n", index);
2459 retval = 1;
2460 return retval;
2461 } else if (dtd_status & DTD_STS_HALTED) {
2462 dev_err(&dev->pdev->dev,
2463 "dTD error %08x dQH[%d]\n",
2464 dtd_status, index);
2465 /* clear the errors and halt condition */
2466 curr_dqh->dtd_status = 0;
2467 retval = -EPIPE;
2468 break;
2469 } else if (dtd_status & DTD_STS_DBE) {
2470 dev_dbg(&dev->pdev->dev,
2471 "data buffer (overflow) error\n");
2472 retval = -EPROTO;
2473 break;
2474 } else if (dtd_status & DTD_STS_TRE) {
2475 dev_dbg(&dev->pdev->dev,
2476 "transaction(ISO) error\n");
2477 retval = -EILSEQ;
2478 break;
2479 } else
2480 dev_err(&dev->pdev->dev,
2481 "unknown error (0x%x)!\n",
2482 dtd_status);
2485 if (i != curr_req->dtd_count - 1)
2486 curr_dtd = (struct langwell_dtd *)
2487 curr_dtd->next_dtd_virt;
2490 if (retval)
2491 return retval;
2493 curr_req->req.actual = actual;
2495 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2496 return 0;
2500 /* complete DATA or STATUS phase of ep0 prime status phase if needed */
2501 static void ep0_req_complete(struct langwell_udc *dev,
2502 struct langwell_ep *ep0, struct langwell_request *req)
2504 u32 new_addr;
2505 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2507 if (dev->usb_state == USB_STATE_ADDRESS) {
2508 /* set the new address */
2509 new_addr = (u32)dev->dev_addr;
2510 writel(new_addr << USBADR_SHIFT, &dev->op_regs->deviceaddr);
2512 new_addr = USBADR(readl(&dev->op_regs->deviceaddr));
2513 dev_vdbg(&dev->pdev->dev, "new_addr = %d\n", new_addr);
2516 done(ep0, req, 0);
2518 switch (dev->ep0_state) {
2519 case DATA_STATE_XMIT:
2520 /* receive status phase */
2521 if (prime_status_phase(dev, EP_DIR_OUT))
2522 ep0_stall(dev);
2523 break;
2524 case DATA_STATE_RECV:
2525 /* send status phase */
2526 if (prime_status_phase(dev, EP_DIR_IN))
2527 ep0_stall(dev);
2528 break;
2529 case WAIT_FOR_OUT_STATUS:
2530 dev->ep0_state = WAIT_FOR_SETUP;
2531 break;
2532 case WAIT_FOR_SETUP:
2533 dev_err(&dev->pdev->dev, "unexpect ep0 packets\n");
2534 break;
2535 default:
2536 ep0_stall(dev);
2537 break;
2540 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2544 /* USB transfer completion interrupt */
2545 static void handle_trans_complete(struct langwell_udc *dev)
2547 u32 complete_bits;
2548 int i, ep_num, dir, bit_mask, status;
2549 struct langwell_ep *epn;
2550 struct langwell_request *curr_req, *temp_req;
2552 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2554 complete_bits = readl(&dev->op_regs->endptcomplete);
2555 dev_vdbg(&dev->pdev->dev, "endptcomplete register: 0x%08x\n",
2556 complete_bits);
2558 /* Write-Clear the bits in endptcomplete register */
2559 writel(complete_bits, &dev->op_regs->endptcomplete);
2561 if (!complete_bits) {
2562 dev_dbg(&dev->pdev->dev, "complete_bits = 0\n");
2563 goto done;
2566 for (i = 0; i < dev->ep_max; i++) {
2567 ep_num = i / 2;
2568 dir = i % 2;
2570 bit_mask = 1 << (ep_num + 16 * dir);
2572 if (!(complete_bits & bit_mask))
2573 continue;
2575 /* ep0 */
2576 if (i == 1)
2577 epn = &dev->ep[0];
2578 else
2579 epn = &dev->ep[i];
2581 if (epn->name == NULL) {
2582 dev_warn(&dev->pdev->dev, "invalid endpoint\n");
2583 continue;
2586 if (i < 2)
2587 /* ep0 in and out */
2588 dev_dbg(&dev->pdev->dev, "%s-%s transfer completed\n",
2589 epn->name,
2590 is_in(epn) ? "in" : "out");
2591 else
2592 dev_dbg(&dev->pdev->dev, "%s transfer completed\n",
2593 epn->name);
2595 /* process the req queue until an uncomplete request */
2596 list_for_each_entry_safe(curr_req, temp_req,
2597 &epn->queue, queue) {
2598 status = process_ep_req(dev, i, curr_req);
2599 dev_vdbg(&dev->pdev->dev, "%s req status: %d\n",
2600 epn->name, status);
2602 if (status)
2603 break;
2605 /* write back status to req */
2606 curr_req->req.status = status;
2608 /* ep0 request completion */
2609 if (ep_num == 0) {
2610 ep0_req_complete(dev, epn, curr_req);
2611 break;
2612 } else {
2613 done(epn, curr_req, status);
2617 done:
2618 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2621 /* port change detect interrupt handler */
2622 static void handle_port_change(struct langwell_udc *dev)
2624 u32 portsc1, devlc;
2626 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2628 if (dev->bus_reset)
2629 dev->bus_reset = 0;
2631 portsc1 = readl(&dev->op_regs->portsc1);
2632 devlc = readl(&dev->op_regs->devlc);
2633 dev_vdbg(&dev->pdev->dev, "portsc1 = 0x%08x, devlc = 0x%08x\n",
2634 portsc1, devlc);
2636 /* bus reset is finished */
2637 if (!(portsc1 & PORTS_PR)) {
2638 /* get the speed */
2639 dev->gadget.speed = lpm_device_speed(devlc);
2640 dev_vdbg(&dev->pdev->dev, "dev->gadget.speed = %d\n",
2641 dev->gadget.speed);
2644 /* LPM L0 to L1 */
2645 if (dev->lpm && dev->lpm_state == LPM_L0)
2646 if (portsc1 & PORTS_SUSP && portsc1 & PORTS_SLP) {
2647 dev_info(&dev->pdev->dev, "LPM L0 to L1\n");
2648 dev->lpm_state = LPM_L1;
2651 /* LPM L1 to L0, force resume or remote wakeup finished */
2652 if (dev->lpm && dev->lpm_state == LPM_L1)
2653 if (!(portsc1 & PORTS_SUSP)) {
2654 dev_info(&dev->pdev->dev, "LPM L1 to L0\n");
2655 dev->lpm_state = LPM_L0;
2658 /* update USB state */
2659 if (!dev->resume_state)
2660 dev->usb_state = USB_STATE_DEFAULT;
2662 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2666 /* USB reset interrupt handler */
2667 static void handle_usb_reset(struct langwell_udc *dev)
2669 u32 deviceaddr,
2670 endptsetupstat,
2671 endptcomplete;
2672 unsigned long timeout;
2674 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2676 /* Write-Clear the device address */
2677 deviceaddr = readl(&dev->op_regs->deviceaddr);
2678 writel(deviceaddr & ~USBADR_MASK, &dev->op_regs->deviceaddr);
2680 dev->dev_addr = 0;
2682 /* clear usb state */
2683 dev->resume_state = 0;
2685 /* LPM L1 to L0, reset */
2686 if (dev->lpm)
2687 dev->lpm_state = LPM_L0;
2689 dev->ep0_dir = USB_DIR_OUT;
2690 dev->ep0_state = WAIT_FOR_SETUP;
2692 /* remote wakeup reset to 0 when the device is reset */
2693 dev->remote_wakeup = 0;
2694 dev->dev_status = 1 << USB_DEVICE_SELF_POWERED;
2695 dev->gadget.b_hnp_enable = 0;
2696 dev->gadget.a_hnp_support = 0;
2697 dev->gadget.a_alt_hnp_support = 0;
2699 /* Write-Clear all the setup token semaphores */
2700 endptsetupstat = readl(&dev->op_regs->endptsetupstat);
2701 writel(endptsetupstat, &dev->op_regs->endptsetupstat);
2703 /* Write-Clear all the endpoint complete status bits */
2704 endptcomplete = readl(&dev->op_regs->endptcomplete);
2705 writel(endptcomplete, &dev->op_regs->endptcomplete);
2707 /* wait until all endptprime bits cleared */
2708 timeout = jiffies + PRIME_TIMEOUT;
2709 while (readl(&dev->op_regs->endptprime)) {
2710 if (time_after(jiffies, timeout)) {
2711 dev_err(&dev->pdev->dev, "USB reset timeout\n");
2712 break;
2714 cpu_relax();
2717 /* write 1s to endptflush register to clear any primed buffers */
2718 writel((u32) ~0, &dev->op_regs->endptflush);
2720 if (readl(&dev->op_regs->portsc1) & PORTS_PR) {
2721 dev_vdbg(&dev->pdev->dev, "USB bus reset\n");
2722 /* bus is reseting */
2723 dev->bus_reset = 1;
2725 /* reset all the queues, stop all USB activities */
2726 stop_activity(dev);
2727 dev->usb_state = USB_STATE_DEFAULT;
2728 } else {
2729 dev_vdbg(&dev->pdev->dev, "device controller reset\n");
2730 /* controller reset */
2731 langwell_udc_reset(dev);
2733 /* reset all the queues, stop all USB activities */
2734 stop_activity(dev);
2736 /* reset ep0 dQH and endptctrl */
2737 ep0_reset(dev);
2739 /* enable interrupt and set controller to run state */
2740 langwell_udc_start(dev);
2742 dev->usb_state = USB_STATE_ATTACHED;
2745 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2749 /* USB bus suspend/resume interrupt */
2750 static void handle_bus_suspend(struct langwell_udc *dev)
2752 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2754 dev->resume_state = dev->usb_state;
2755 dev->usb_state = USB_STATE_SUSPENDED;
2757 /* report suspend to the driver */
2758 if (dev->driver) {
2759 if (dev->driver->suspend) {
2760 spin_unlock(&dev->lock);
2761 dev->driver->suspend(&dev->gadget);
2762 spin_lock(&dev->lock);
2763 dev_dbg(&dev->pdev->dev, "suspend %s\n",
2764 dev->driver->driver.name);
2768 /* enter PHY low power suspend */
2769 if (dev->pdev->device != 0x0829)
2770 langwell_phy_low_power(dev, 0);
2772 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2776 static void handle_bus_resume(struct langwell_udc *dev)
2778 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2780 dev->usb_state = dev->resume_state;
2781 dev->resume_state = 0;
2783 /* exit PHY low power suspend */
2784 if (dev->pdev->device != 0x0829)
2785 langwell_phy_low_power(dev, 0);
2787 /* report resume to the driver */
2788 if (dev->driver) {
2789 if (dev->driver->resume) {
2790 spin_unlock(&dev->lock);
2791 dev->driver->resume(&dev->gadget);
2792 spin_lock(&dev->lock);
2793 dev_dbg(&dev->pdev->dev, "resume %s\n",
2794 dev->driver->driver.name);
2798 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2802 /* USB device controller interrupt handler */
2803 static irqreturn_t langwell_irq(int irq, void *_dev)
2805 struct langwell_udc *dev = _dev;
2806 u32 usbsts,
2807 usbintr,
2808 irq_sts,
2809 portsc1;
2811 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2813 if (dev->stopped) {
2814 dev_vdbg(&dev->pdev->dev, "handle IRQ_NONE\n");
2815 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2816 return IRQ_NONE;
2819 spin_lock(&dev->lock);
2821 /* USB status */
2822 usbsts = readl(&dev->op_regs->usbsts);
2824 /* USB interrupt enable */
2825 usbintr = readl(&dev->op_regs->usbintr);
2827 irq_sts = usbsts & usbintr;
2828 dev_vdbg(&dev->pdev->dev,
2829 "usbsts = 0x%08x, usbintr = 0x%08x, irq_sts = 0x%08x\n",
2830 usbsts, usbintr, irq_sts);
2832 if (!irq_sts) {
2833 dev_vdbg(&dev->pdev->dev, "handle IRQ_NONE\n");
2834 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2835 spin_unlock(&dev->lock);
2836 return IRQ_NONE;
2839 /* Write-Clear interrupt status bits */
2840 writel(irq_sts, &dev->op_regs->usbsts);
2842 /* resume from suspend */
2843 portsc1 = readl(&dev->op_regs->portsc1);
2844 if (dev->usb_state == USB_STATE_SUSPENDED)
2845 if (!(portsc1 & PORTS_SUSP))
2846 handle_bus_resume(dev);
2848 /* USB interrupt */
2849 if (irq_sts & STS_UI) {
2850 dev_vdbg(&dev->pdev->dev, "USB interrupt\n");
2852 /* setup packet received from ep0 */
2853 if (readl(&dev->op_regs->endptsetupstat)
2854 & EP0SETUPSTAT_MASK) {
2855 dev_vdbg(&dev->pdev->dev,
2856 "USB SETUP packet received interrupt\n");
2857 /* setup tripwire semaphone */
2858 setup_tripwire(dev);
2859 handle_setup_packet(dev, &dev->local_setup_buff);
2862 /* USB transfer completion */
2863 if (readl(&dev->op_regs->endptcomplete)) {
2864 dev_vdbg(&dev->pdev->dev,
2865 "USB transfer completion interrupt\n");
2866 handle_trans_complete(dev);
2870 /* SOF received interrupt (for ISO transfer) */
2871 if (irq_sts & STS_SRI) {
2872 /* FIXME */
2873 /* dev_vdbg(&dev->pdev->dev, "SOF received interrupt\n"); */
2876 /* port change detect interrupt */
2877 if (irq_sts & STS_PCI) {
2878 dev_vdbg(&dev->pdev->dev, "port change detect interrupt\n");
2879 handle_port_change(dev);
2882 /* suspend interrupt */
2883 if (irq_sts & STS_SLI) {
2884 dev_vdbg(&dev->pdev->dev, "suspend interrupt\n");
2885 handle_bus_suspend(dev);
2888 /* USB reset interrupt */
2889 if (irq_sts & STS_URI) {
2890 dev_vdbg(&dev->pdev->dev, "USB reset interrupt\n");
2891 handle_usb_reset(dev);
2894 /* USB error or system error interrupt */
2895 if (irq_sts & (STS_UEI | STS_SEI)) {
2896 /* FIXME */
2897 dev_warn(&dev->pdev->dev, "error IRQ, irq_sts: %x\n", irq_sts);
2900 spin_unlock(&dev->lock);
2902 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2903 return IRQ_HANDLED;
2907 /*-------------------------------------------------------------------------*/
2909 /* release device structure */
2910 static void gadget_release(struct device *_dev)
2912 struct langwell_udc *dev = dev_get_drvdata(_dev);
2914 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2916 complete(dev->done);
2918 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2919 kfree(dev);
2923 /* enable SRAM caching if SRAM detected */
2924 static void sram_init(struct langwell_udc *dev)
2926 struct pci_dev *pdev = dev->pdev;
2928 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2930 dev->sram_addr = pci_resource_start(pdev, 1);
2931 dev->sram_size = pci_resource_len(pdev, 1);
2932 dev_info(&dev->pdev->dev, "Found private SRAM at %x size:%x\n",
2933 dev->sram_addr, dev->sram_size);
2934 dev->got_sram = 1;
2936 if (pci_request_region(pdev, 1, kobject_name(&pdev->dev.kobj))) {
2937 dev_warn(&dev->pdev->dev, "SRAM request failed\n");
2938 dev->got_sram = 0;
2939 } else if (!dma_declare_coherent_memory(&pdev->dev, dev->sram_addr,
2940 dev->sram_addr, dev->sram_size, DMA_MEMORY_MAP)) {
2941 dev_warn(&dev->pdev->dev, "SRAM DMA declare failed\n");
2942 pci_release_region(pdev, 1);
2943 dev->got_sram = 0;
2946 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2950 /* release SRAM caching */
2951 static void sram_deinit(struct langwell_udc *dev)
2953 struct pci_dev *pdev = dev->pdev;
2955 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2957 dma_release_declared_memory(&pdev->dev);
2958 pci_release_region(pdev, 1);
2960 dev->got_sram = 0;
2962 dev_info(&dev->pdev->dev, "release SRAM caching\n");
2963 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2967 /* tear down the binding between this driver and the pci device */
2968 static void langwell_udc_remove(struct pci_dev *pdev)
2970 struct langwell_udc *dev = pci_get_drvdata(pdev);
2972 DECLARE_COMPLETION(done);
2974 BUG_ON(dev->driver);
2975 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2977 dev->done = &done;
2979 /* free dTD dma_pool and dQH */
2980 if (dev->dtd_pool)
2981 dma_pool_destroy(dev->dtd_pool);
2983 if (dev->ep_dqh)
2984 dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
2985 dev->ep_dqh, dev->ep_dqh_dma);
2987 /* release SRAM caching */
2988 if (dev->has_sram && dev->got_sram)
2989 sram_deinit(dev);
2991 if (dev->status_req) {
2992 kfree(dev->status_req->req.buf);
2993 kfree(dev->status_req);
2996 kfree(dev->ep);
2998 /* disable IRQ handler */
2999 if (dev->got_irq)
3000 free_irq(pdev->irq, dev);
3002 if (dev->cap_regs)
3003 iounmap(dev->cap_regs);
3005 if (dev->region)
3006 release_mem_region(pci_resource_start(pdev, 0),
3007 pci_resource_len(pdev, 0));
3009 if (dev->enabled)
3010 pci_disable_device(pdev);
3012 dev->cap_regs = NULL;
3014 dev_info(&dev->pdev->dev, "unbind\n");
3015 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3017 device_unregister(&dev->gadget.dev);
3018 device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
3019 device_remove_file(&pdev->dev, &dev_attr_remote_wakeup);
3021 pci_set_drvdata(pdev, NULL);
3023 /* free dev, wait for the release() finished */
3024 wait_for_completion(&done);
3029 * wrap this driver around the specified device, but
3030 * don't respond over USB until a gadget driver binds to us.
3032 static int langwell_udc_probe(struct pci_dev *pdev,
3033 const struct pci_device_id *id)
3035 struct langwell_udc *dev;
3036 unsigned long resource, len;
3037 void __iomem *base = NULL;
3038 size_t size;
3039 int retval;
3041 /* alloc, and start init */
3042 dev = kzalloc(sizeof *dev, GFP_KERNEL);
3043 if (dev == NULL) {
3044 retval = -ENOMEM;
3045 goto error;
3048 /* initialize device spinlock */
3049 spin_lock_init(&dev->lock);
3051 dev->pdev = pdev;
3052 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
3054 pci_set_drvdata(pdev, dev);
3056 /* now all the pci goodies ... */
3057 if (pci_enable_device(pdev) < 0) {
3058 retval = -ENODEV;
3059 goto error;
3061 dev->enabled = 1;
3063 /* control register: BAR 0 */
3064 resource = pci_resource_start(pdev, 0);
3065 len = pci_resource_len(pdev, 0);
3066 if (!request_mem_region(resource, len, driver_name)) {
3067 dev_err(&dev->pdev->dev, "controller already in use\n");
3068 retval = -EBUSY;
3069 goto error;
3071 dev->region = 1;
3073 base = ioremap_nocache(resource, len);
3074 if (base == NULL) {
3075 dev_err(&dev->pdev->dev, "can't map memory\n");
3076 retval = -EFAULT;
3077 goto error;
3080 dev->cap_regs = (struct langwell_cap_regs __iomem *) base;
3081 dev_vdbg(&dev->pdev->dev, "dev->cap_regs: %p\n", dev->cap_regs);
3082 dev->op_regs = (struct langwell_op_regs __iomem *)
3083 (base + OP_REG_OFFSET);
3084 dev_vdbg(&dev->pdev->dev, "dev->op_regs: %p\n", dev->op_regs);
3086 /* irq setup after old hardware is cleaned up */
3087 if (!pdev->irq) {
3088 dev_err(&dev->pdev->dev, "No IRQ. Check PCI setup!\n");
3089 retval = -ENODEV;
3090 goto error;
3093 dev->has_sram = 1;
3094 dev->got_sram = 0;
3095 dev_vdbg(&dev->pdev->dev, "dev->has_sram: %d\n", dev->has_sram);
3097 /* enable SRAM caching if detected */
3098 if (dev->has_sram && !dev->got_sram)
3099 sram_init(dev);
3101 dev_info(&dev->pdev->dev,
3102 "irq %d, io mem: 0x%08lx, len: 0x%08lx, pci mem 0x%p\n",
3103 pdev->irq, resource, len, base);
3104 /* enables bus-mastering for device dev */
3105 pci_set_master(pdev);
3107 if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED,
3108 driver_name, dev) != 0) {
3109 dev_err(&dev->pdev->dev,
3110 "request interrupt %d failed\n", pdev->irq);
3111 retval = -EBUSY;
3112 goto error;
3114 dev->got_irq = 1;
3116 /* set stopped bit */
3117 dev->stopped = 1;
3119 /* capabilities and endpoint number */
3120 dev->lpm = (readl(&dev->cap_regs->hccparams) & HCC_LEN) ? 1 : 0;
3121 dev->dciversion = readw(&dev->cap_regs->dciversion);
3122 dev->devcap = (readl(&dev->cap_regs->dccparams) & DEVCAP) ? 1 : 0;
3123 dev_vdbg(&dev->pdev->dev, "dev->lpm: %d\n", dev->lpm);
3124 dev_vdbg(&dev->pdev->dev, "dev->dciversion: 0x%04x\n",
3125 dev->dciversion);
3126 dev_vdbg(&dev->pdev->dev, "dccparams: 0x%08x\n",
3127 readl(&dev->cap_regs->dccparams));
3128 dev_vdbg(&dev->pdev->dev, "dev->devcap: %d\n", dev->devcap);
3129 if (!dev->devcap) {
3130 dev_err(&dev->pdev->dev, "can't support device mode\n");
3131 retval = -ENODEV;
3132 goto error;
3135 /* a pair of endpoints (out/in) for each address */
3136 dev->ep_max = DEN(readl(&dev->cap_regs->dccparams)) * 2;
3137 dev_vdbg(&dev->pdev->dev, "dev->ep_max: %d\n", dev->ep_max);
3139 /* allocate endpoints memory */
3140 dev->ep = kzalloc(sizeof(struct langwell_ep) * dev->ep_max,
3141 GFP_KERNEL);
3142 if (!dev->ep) {
3143 dev_err(&dev->pdev->dev, "allocate endpoints memory failed\n");
3144 retval = -ENOMEM;
3145 goto error;
3148 /* allocate device dQH memory */
3149 size = dev->ep_max * sizeof(struct langwell_dqh);
3150 dev_vdbg(&dev->pdev->dev, "orig size = %zd\n", size);
3151 if (size < DQH_ALIGNMENT)
3152 size = DQH_ALIGNMENT;
3153 else if ((size % DQH_ALIGNMENT) != 0) {
3154 size += DQH_ALIGNMENT + 1;
3155 size &= ~(DQH_ALIGNMENT - 1);
3157 dev->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
3158 &dev->ep_dqh_dma, GFP_KERNEL);
3159 if (!dev->ep_dqh) {
3160 dev_err(&dev->pdev->dev, "allocate dQH memory failed\n");
3161 retval = -ENOMEM;
3162 goto error;
3164 dev->ep_dqh_size = size;
3165 dev_vdbg(&dev->pdev->dev, "ep_dqh_size = %zd\n", dev->ep_dqh_size);
3167 /* initialize ep0 status request structure */
3168 dev->status_req = kzalloc(sizeof(struct langwell_request), GFP_KERNEL);
3169 if (!dev->status_req) {
3170 dev_err(&dev->pdev->dev,
3171 "allocate status_req memory failed\n");
3172 retval = -ENOMEM;
3173 goto error;
3175 INIT_LIST_HEAD(&dev->status_req->queue);
3177 /* allocate a small amount of memory to get valid address */
3178 dev->status_req->req.buf = kmalloc(8, GFP_KERNEL);
3179 dev->status_req->req.dma = virt_to_phys(dev->status_req->req.buf);
3181 dev->resume_state = USB_STATE_NOTATTACHED;
3182 dev->usb_state = USB_STATE_POWERED;
3183 dev->ep0_dir = USB_DIR_OUT;
3185 /* remote wakeup reset to 0 when the device is reset */
3186 dev->remote_wakeup = 0;
3187 dev->dev_status = 1 << USB_DEVICE_SELF_POWERED;
3189 /* reset device controller */
3190 langwell_udc_reset(dev);
3192 /* initialize gadget structure */
3193 dev->gadget.ops = &langwell_ops; /* usb_gadget_ops */
3194 dev->gadget.ep0 = &dev->ep[0].ep; /* gadget ep0 */
3195 INIT_LIST_HEAD(&dev->gadget.ep_list); /* ep_list */
3196 dev->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
3197 dev->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
3199 /* the "gadget" abstracts/virtualizes the controller */
3200 dev_set_name(&dev->gadget.dev, "gadget");
3201 dev->gadget.dev.parent = &pdev->dev;
3202 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
3203 dev->gadget.dev.release = gadget_release;
3204 dev->gadget.name = driver_name; /* gadget name */
3206 /* controller endpoints reinit */
3207 eps_reinit(dev);
3209 /* reset ep0 dQH and endptctrl */
3210 ep0_reset(dev);
3212 /* create dTD dma_pool resource */
3213 dev->dtd_pool = dma_pool_create("langwell_dtd",
3214 &dev->pdev->dev,
3215 sizeof(struct langwell_dtd),
3216 DTD_ALIGNMENT,
3217 DMA_BOUNDARY);
3219 if (!dev->dtd_pool) {
3220 retval = -ENOMEM;
3221 goto error;
3224 /* done */
3225 dev_info(&dev->pdev->dev, "%s\n", driver_desc);
3226 dev_info(&dev->pdev->dev, "irq %d, pci mem %p\n", pdev->irq, base);
3227 dev_info(&dev->pdev->dev, "Driver version: " DRIVER_VERSION "\n");
3228 dev_info(&dev->pdev->dev, "Support (max) %d endpoints\n", dev->ep_max);
3229 dev_info(&dev->pdev->dev, "Device interface version: 0x%04x\n",
3230 dev->dciversion);
3231 dev_info(&dev->pdev->dev, "Controller mode: %s\n",
3232 dev->devcap ? "Device" : "Host");
3233 dev_info(&dev->pdev->dev, "Support USB LPM: %s\n",
3234 dev->lpm ? "Yes" : "No");
3236 dev_vdbg(&dev->pdev->dev,
3237 "After langwell_udc_probe(), print all registers:\n");
3238 print_all_registers(dev);
3240 retval = device_register(&dev->gadget.dev);
3241 if (retval)
3242 goto error;
3244 retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
3245 if (retval)
3246 goto error;
3248 retval = device_create_file(&pdev->dev, &dev_attr_langwell_udc);
3249 if (retval)
3250 goto error;
3252 retval = device_create_file(&pdev->dev, &dev_attr_remote_wakeup);
3253 if (retval)
3254 goto error_attr1;
3256 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3257 return 0;
3259 error_attr1:
3260 device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
3261 error:
3262 if (dev) {
3263 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3264 langwell_udc_remove(pdev);
3267 return retval;
3271 /* device controller suspend */
3272 static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
3274 struct langwell_udc *dev = pci_get_drvdata(pdev);
3276 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
3278 usb_del_gadget_udc(&dev->gadget);
3279 /* disable interrupt and set controller to stop state */
3280 langwell_udc_stop(dev);
3282 /* disable IRQ handler */
3283 if (dev->got_irq)
3284 free_irq(pdev->irq, dev);
3285 dev->got_irq = 0;
3287 /* save PCI state */
3288 pci_save_state(pdev);
3290 spin_lock_irq(&dev->lock);
3291 /* stop all usb activities */
3292 stop_activity(dev);
3293 spin_unlock_irq(&dev->lock);
3295 /* free dTD dma_pool and dQH */
3296 if (dev->dtd_pool)
3297 dma_pool_destroy(dev->dtd_pool);
3299 if (dev->ep_dqh)
3300 dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
3301 dev->ep_dqh, dev->ep_dqh_dma);
3303 /* release SRAM caching */
3304 if (dev->has_sram && dev->got_sram)
3305 sram_deinit(dev);
3307 /* set device power state */
3308 pci_set_power_state(pdev, PCI_D3hot);
3310 /* enter PHY low power suspend */
3311 if (dev->pdev->device != 0x0829)
3312 langwell_phy_low_power(dev, 1);
3314 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3315 return 0;
3319 /* device controller resume */
3320 static int langwell_udc_resume(struct pci_dev *pdev)
3322 struct langwell_udc *dev = pci_get_drvdata(pdev);
3323 size_t size;
3325 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
3327 /* exit PHY low power suspend */
3328 if (dev->pdev->device != 0x0829)
3329 langwell_phy_low_power(dev, 0);
3331 /* set device D0 power state */
3332 pci_set_power_state(pdev, PCI_D0);
3334 /* enable SRAM caching if detected */
3335 if (dev->has_sram && !dev->got_sram)
3336 sram_init(dev);
3338 /* allocate device dQH memory */
3339 size = dev->ep_max * sizeof(struct langwell_dqh);
3340 dev_vdbg(&dev->pdev->dev, "orig size = %zd\n", size);
3341 if (size < DQH_ALIGNMENT)
3342 size = DQH_ALIGNMENT;
3343 else if ((size % DQH_ALIGNMENT) != 0) {
3344 size += DQH_ALIGNMENT + 1;
3345 size &= ~(DQH_ALIGNMENT - 1);
3347 dev->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
3348 &dev->ep_dqh_dma, GFP_KERNEL);
3349 if (!dev->ep_dqh) {
3350 dev_err(&dev->pdev->dev, "allocate dQH memory failed\n");
3351 return -ENOMEM;
3353 dev->ep_dqh_size = size;
3354 dev_vdbg(&dev->pdev->dev, "ep_dqh_size = %zd\n", dev->ep_dqh_size);
3356 /* create dTD dma_pool resource */
3357 dev->dtd_pool = dma_pool_create("langwell_dtd",
3358 &dev->pdev->dev,
3359 sizeof(struct langwell_dtd),
3360 DTD_ALIGNMENT,
3361 DMA_BOUNDARY);
3363 if (!dev->dtd_pool)
3364 return -ENOMEM;
3366 /* restore PCI state */
3367 pci_restore_state(pdev);
3369 /* enable IRQ handler */
3370 if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED,
3371 driver_name, dev) != 0) {
3372 dev_err(&dev->pdev->dev, "request interrupt %d failed\n",
3373 pdev->irq);
3374 return -EBUSY;
3376 dev->got_irq = 1;
3378 /* reset and start controller to run state */
3379 if (dev->stopped) {
3380 /* reset device controller */
3381 langwell_udc_reset(dev);
3383 /* reset ep0 dQH and endptctrl */
3384 ep0_reset(dev);
3386 /* start device if gadget is loaded */
3387 if (dev->driver)
3388 langwell_udc_start(dev);
3391 /* reset USB status */
3392 dev->usb_state = USB_STATE_ATTACHED;
3393 dev->ep0_state = WAIT_FOR_SETUP;
3394 dev->ep0_dir = USB_DIR_OUT;
3396 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3397 return 0;
3401 /* pci driver shutdown */
3402 static void langwell_udc_shutdown(struct pci_dev *pdev)
3404 struct langwell_udc *dev = pci_get_drvdata(pdev);
3405 u32 usbmode;
3407 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
3409 /* reset controller mode to IDLE */
3410 usbmode = readl(&dev->op_regs->usbmode);
3411 dev_dbg(&dev->pdev->dev, "usbmode = 0x%08x\n", usbmode);
3412 usbmode &= (~3 | MODE_IDLE);
3413 writel(usbmode, &dev->op_regs->usbmode);
3415 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3418 /*-------------------------------------------------------------------------*/
3420 static const struct pci_device_id pci_ids[] = { {
3421 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3422 .class_mask = ~0,
3423 .vendor = 0x8086,
3424 .device = 0x0811,
3425 .subvendor = PCI_ANY_ID,
3426 .subdevice = PCI_ANY_ID,
3427 }, { /* end: all zeroes */ }
3430 MODULE_DEVICE_TABLE(pci, pci_ids);
3433 static struct pci_driver langwell_pci_driver = {
3434 .name = (char *) driver_name,
3435 .id_table = pci_ids,
3437 .probe = langwell_udc_probe,
3438 .remove = langwell_udc_remove,
3440 /* device controller suspend/resume */
3441 .suspend = langwell_udc_suspend,
3442 .resume = langwell_udc_resume,
3444 .shutdown = langwell_udc_shutdown,
3448 static int __init init(void)
3450 return pci_register_driver(&langwell_pci_driver);
3452 module_init(init);
3455 static void __exit cleanup(void)
3457 pci_unregister_driver(&langwell_pci_driver);
3459 module_exit(cleanup);
3462 MODULE_DESCRIPTION(DRIVER_DESC);
3463 MODULE_AUTHOR("Xiaochen Shen <xiaochen.shen@intel.com>");
3464 MODULE_VERSION(DRIVER_VERSION);
3465 MODULE_LICENSE("GPL");