procfs: do not confuse jiffies with cputime64_t
[zen-stable.git] / drivers / usb / gadget / langwell_udc.c
blobc9fa3bf5b37769963ac3e2ccdc1f1b8e6a479369
1 /*
2 * Intel Langwell USB Device Controller driver
3 * Copyright (C) 2008-2009, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 */
11 /* #undef DEBUG */
12 /* #undef VERBOSE_DEBUG */
14 #if defined(CONFIG_USB_LANGWELL_OTG)
15 #define OTG_TRANSCEIVER
16 #endif
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/kernel.h>
23 #include <linux/delay.h>
24 #include <linux/ioport.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/init.h>
29 #include <linux/timer.h>
30 #include <linux/list.h>
31 #include <linux/interrupt.h>
32 #include <linux/moduleparam.h>
33 #include <linux/device.h>
34 #include <linux/usb/ch9.h>
35 #include <linux/usb/gadget.h>
36 #include <linux/usb/otg.h>
37 #include <linux/pm.h>
38 #include <linux/io.h>
39 #include <linux/irq.h>
40 #include <asm/system.h>
41 #include <asm/unaligned.h>
43 #include "langwell_udc.h"
46 #define DRIVER_DESC "Intel Langwell USB Device Controller driver"
47 #define DRIVER_VERSION "16 May 2009"
49 static const char driver_name[] = "langwell_udc";
50 static const char driver_desc[] = DRIVER_DESC;
53 /* for endpoint 0 operations */
54 static const struct usb_endpoint_descriptor
55 langwell_ep0_desc = {
56 .bLength = USB_DT_ENDPOINT_SIZE,
57 .bDescriptorType = USB_DT_ENDPOINT,
58 .bEndpointAddress = 0,
59 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
60 .wMaxPacketSize = EP0_MAX_PKT_SIZE,
64 /*-------------------------------------------------------------------------*/
65 /* debugging */
67 #ifdef VERBOSE_DEBUG
68 static inline void print_all_registers(struct langwell_udc *dev)
70 int i;
72 /* Capability Registers */
73 dev_dbg(&dev->pdev->dev,
74 "Capability Registers (offset: 0x%04x, length: 0x%08x)\n",
75 CAP_REG_OFFSET, (u32)sizeof(struct langwell_cap_regs));
76 dev_dbg(&dev->pdev->dev, "caplength=0x%02x\n",
77 readb(&dev->cap_regs->caplength));
78 dev_dbg(&dev->pdev->dev, "hciversion=0x%04x\n",
79 readw(&dev->cap_regs->hciversion));
80 dev_dbg(&dev->pdev->dev, "hcsparams=0x%08x\n",
81 readl(&dev->cap_regs->hcsparams));
82 dev_dbg(&dev->pdev->dev, "hccparams=0x%08x\n",
83 readl(&dev->cap_regs->hccparams));
84 dev_dbg(&dev->pdev->dev, "dciversion=0x%04x\n",
85 readw(&dev->cap_regs->dciversion));
86 dev_dbg(&dev->pdev->dev, "dccparams=0x%08x\n",
87 readl(&dev->cap_regs->dccparams));
89 /* Operational Registers */
90 dev_dbg(&dev->pdev->dev,
91 "Operational Registers (offset: 0x%04x, length: 0x%08x)\n",
92 OP_REG_OFFSET, (u32)sizeof(struct langwell_op_regs));
93 dev_dbg(&dev->pdev->dev, "extsts=0x%08x\n",
94 readl(&dev->op_regs->extsts));
95 dev_dbg(&dev->pdev->dev, "extintr=0x%08x\n",
96 readl(&dev->op_regs->extintr));
97 dev_dbg(&dev->pdev->dev, "usbcmd=0x%08x\n",
98 readl(&dev->op_regs->usbcmd));
99 dev_dbg(&dev->pdev->dev, "usbsts=0x%08x\n",
100 readl(&dev->op_regs->usbsts));
101 dev_dbg(&dev->pdev->dev, "usbintr=0x%08x\n",
102 readl(&dev->op_regs->usbintr));
103 dev_dbg(&dev->pdev->dev, "frindex=0x%08x\n",
104 readl(&dev->op_regs->frindex));
105 dev_dbg(&dev->pdev->dev, "ctrldssegment=0x%08x\n",
106 readl(&dev->op_regs->ctrldssegment));
107 dev_dbg(&dev->pdev->dev, "deviceaddr=0x%08x\n",
108 readl(&dev->op_regs->deviceaddr));
109 dev_dbg(&dev->pdev->dev, "endpointlistaddr=0x%08x\n",
110 readl(&dev->op_regs->endpointlistaddr));
111 dev_dbg(&dev->pdev->dev, "ttctrl=0x%08x\n",
112 readl(&dev->op_regs->ttctrl));
113 dev_dbg(&dev->pdev->dev, "burstsize=0x%08x\n",
114 readl(&dev->op_regs->burstsize));
115 dev_dbg(&dev->pdev->dev, "txfilltuning=0x%08x\n",
116 readl(&dev->op_regs->txfilltuning));
117 dev_dbg(&dev->pdev->dev, "txttfilltuning=0x%08x\n",
118 readl(&dev->op_regs->txttfilltuning));
119 dev_dbg(&dev->pdev->dev, "ic_usb=0x%08x\n",
120 readl(&dev->op_regs->ic_usb));
121 dev_dbg(&dev->pdev->dev, "ulpi_viewport=0x%08x\n",
122 readl(&dev->op_regs->ulpi_viewport));
123 dev_dbg(&dev->pdev->dev, "configflag=0x%08x\n",
124 readl(&dev->op_regs->configflag));
125 dev_dbg(&dev->pdev->dev, "portsc1=0x%08x\n",
126 readl(&dev->op_regs->portsc1));
127 dev_dbg(&dev->pdev->dev, "devlc=0x%08x\n",
128 readl(&dev->op_regs->devlc));
129 dev_dbg(&dev->pdev->dev, "otgsc=0x%08x\n",
130 readl(&dev->op_regs->otgsc));
131 dev_dbg(&dev->pdev->dev, "usbmode=0x%08x\n",
132 readl(&dev->op_regs->usbmode));
133 dev_dbg(&dev->pdev->dev, "endptnak=0x%08x\n",
134 readl(&dev->op_regs->endptnak));
135 dev_dbg(&dev->pdev->dev, "endptnaken=0x%08x\n",
136 readl(&dev->op_regs->endptnaken));
137 dev_dbg(&dev->pdev->dev, "endptsetupstat=0x%08x\n",
138 readl(&dev->op_regs->endptsetupstat));
139 dev_dbg(&dev->pdev->dev, "endptprime=0x%08x\n",
140 readl(&dev->op_regs->endptprime));
141 dev_dbg(&dev->pdev->dev, "endptflush=0x%08x\n",
142 readl(&dev->op_regs->endptflush));
143 dev_dbg(&dev->pdev->dev, "endptstat=0x%08x\n",
144 readl(&dev->op_regs->endptstat));
145 dev_dbg(&dev->pdev->dev, "endptcomplete=0x%08x\n",
146 readl(&dev->op_regs->endptcomplete));
148 for (i = 0; i < dev->ep_max / 2; i++) {
149 dev_dbg(&dev->pdev->dev, "endptctrl[%d]=0x%08x\n",
150 i, readl(&dev->op_regs->endptctrl[i]));
153 #else
155 #define print_all_registers(dev) do { } while (0)
157 #endif /* VERBOSE_DEBUG */
160 /*-------------------------------------------------------------------------*/
162 #define is_in(ep) (((ep)->ep_num == 0) ? ((ep)->dev->ep0_dir == \
163 USB_DIR_IN) : (usb_endpoint_dir_in((ep)->desc)))
165 #define DIR_STRING(ep) (is_in(ep) ? "in" : "out")
168 static char *type_string(const struct usb_endpoint_descriptor *desc)
170 switch (usb_endpoint_type(desc)) {
171 case USB_ENDPOINT_XFER_BULK:
172 return "bulk";
173 case USB_ENDPOINT_XFER_ISOC:
174 return "iso";
175 case USB_ENDPOINT_XFER_INT:
176 return "int";
179 return "control";
183 /* configure endpoint control registers */
184 static void ep_reset(struct langwell_ep *ep, unsigned char ep_num,
185 unsigned char is_in, unsigned char ep_type)
187 struct langwell_udc *dev;
188 u32 endptctrl;
190 dev = ep->dev;
191 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
193 endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
194 if (is_in) { /* TX */
195 if (ep_num)
196 endptctrl |= EPCTRL_TXR;
197 endptctrl |= EPCTRL_TXE;
198 endptctrl |= ep_type << EPCTRL_TXT_SHIFT;
199 } else { /* RX */
200 if (ep_num)
201 endptctrl |= EPCTRL_RXR;
202 endptctrl |= EPCTRL_RXE;
203 endptctrl |= ep_type << EPCTRL_RXT_SHIFT;
206 writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
208 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
212 /* reset ep0 dQH and endptctrl */
213 static void ep0_reset(struct langwell_udc *dev)
215 struct langwell_ep *ep;
216 int i;
218 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
220 /* ep0 in and out */
221 for (i = 0; i < 2; i++) {
222 ep = &dev->ep[i];
223 ep->dev = dev;
225 /* ep0 dQH */
226 ep->dqh = &dev->ep_dqh[i];
228 /* configure ep0 endpoint capabilities in dQH */
229 ep->dqh->dqh_ios = 1;
230 ep->dqh->dqh_mpl = EP0_MAX_PKT_SIZE;
232 /* enable ep0-in HW zero length termination select */
233 if (is_in(ep))
234 ep->dqh->dqh_zlt = 0;
235 ep->dqh->dqh_mult = 0;
237 ep->dqh->dtd_next = DTD_TERM;
239 /* configure ep0 control registers */
240 ep_reset(&dev->ep[0], 0, i, USB_ENDPOINT_XFER_CONTROL);
243 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
247 /*-------------------------------------------------------------------------*/
249 /* endpoints operations */
251 /* configure endpoint, making it usable */
252 static int langwell_ep_enable(struct usb_ep *_ep,
253 const struct usb_endpoint_descriptor *desc)
255 struct langwell_udc *dev;
256 struct langwell_ep *ep;
257 u16 max = 0;
258 unsigned long flags;
259 int i, retval = 0;
260 unsigned char zlt, ios = 0, mult = 0;
262 ep = container_of(_ep, struct langwell_ep, ep);
263 dev = ep->dev;
264 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
266 if (!_ep || !desc || ep->desc
267 || desc->bDescriptorType != USB_DT_ENDPOINT)
268 return -EINVAL;
270 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
271 return -ESHUTDOWN;
273 max = usb_endpoint_maxp(desc);
276 * disable HW zero length termination select
277 * driver handles zero length packet through req->req.zero
279 zlt = 1;
282 * sanity check type, direction, address, and then
283 * initialize the endpoint capabilities fields in dQH
285 switch (usb_endpoint_type(desc)) {
286 case USB_ENDPOINT_XFER_CONTROL:
287 ios = 1;
288 break;
289 case USB_ENDPOINT_XFER_BULK:
290 if ((dev->gadget.speed == USB_SPEED_HIGH
291 && max != 512)
292 || (dev->gadget.speed == USB_SPEED_FULL
293 && max > 64)) {
294 goto done;
296 break;
297 case USB_ENDPOINT_XFER_INT:
298 if (strstr(ep->ep.name, "-iso")) /* bulk is ok */
299 goto done;
301 switch (dev->gadget.speed) {
302 case USB_SPEED_HIGH:
303 if (max <= 1024)
304 break;
305 case USB_SPEED_FULL:
306 if (max <= 64)
307 break;
308 default:
309 if (max <= 8)
310 break;
311 goto done;
313 break;
314 case USB_ENDPOINT_XFER_ISOC:
315 if (strstr(ep->ep.name, "-bulk")
316 || strstr(ep->ep.name, "-int"))
317 goto done;
319 switch (dev->gadget.speed) {
320 case USB_SPEED_HIGH:
321 if (max <= 1024)
322 break;
323 case USB_SPEED_FULL:
324 if (max <= 1023)
325 break;
326 default:
327 goto done;
330 * FIXME:
331 * calculate transactions needed for high bandwidth iso
333 mult = (unsigned char)(1 + ((max >> 11) & 0x03));
334 max = max & 0x8ff; /* bit 0~10 */
335 /* 3 transactions at most */
336 if (mult > 3)
337 goto done;
338 break;
339 default:
340 goto done;
343 spin_lock_irqsave(&dev->lock, flags);
345 ep->ep.maxpacket = max;
346 ep->desc = desc;
347 ep->stopped = 0;
348 ep->ep_num = usb_endpoint_num(desc);
350 /* ep_type */
351 ep->ep_type = usb_endpoint_type(desc);
353 /* configure endpoint control registers */
354 ep_reset(ep, ep->ep_num, is_in(ep), ep->ep_type);
356 /* configure endpoint capabilities in dQH */
357 i = ep->ep_num * 2 + is_in(ep);
358 ep->dqh = &dev->ep_dqh[i];
359 ep->dqh->dqh_ios = ios;
360 ep->dqh->dqh_mpl = cpu_to_le16(max);
361 ep->dqh->dqh_zlt = zlt;
362 ep->dqh->dqh_mult = mult;
363 ep->dqh->dtd_next = DTD_TERM;
365 dev_dbg(&dev->pdev->dev, "enabled %s (ep%d%s-%s), max %04x\n",
366 _ep->name,
367 ep->ep_num,
368 DIR_STRING(ep),
369 type_string(desc),
370 max);
372 spin_unlock_irqrestore(&dev->lock, flags);
373 done:
374 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
375 return retval;
379 /*-------------------------------------------------------------------------*/
381 /* retire a request */
382 static void done(struct langwell_ep *ep, struct langwell_request *req,
383 int status)
385 struct langwell_udc *dev = ep->dev;
386 unsigned stopped = ep->stopped;
387 struct langwell_dtd *curr_dtd, *next_dtd;
388 int i;
390 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
392 /* remove the req from ep->queue */
393 list_del_init(&req->queue);
395 if (req->req.status == -EINPROGRESS)
396 req->req.status = status;
397 else
398 status = req->req.status;
400 /* free dTD for the request */
401 next_dtd = req->head;
402 for (i = 0; i < req->dtd_count; i++) {
403 curr_dtd = next_dtd;
404 if (i != req->dtd_count - 1)
405 next_dtd = curr_dtd->next_dtd_virt;
406 dma_pool_free(dev->dtd_pool, curr_dtd, curr_dtd->dtd_dma);
409 if (req->mapped) {
410 dma_unmap_single(&dev->pdev->dev,
411 req->req.dma, req->req.length,
412 is_in(ep) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
413 req->req.dma = DMA_ADDR_INVALID;
414 req->mapped = 0;
415 } else
416 dma_sync_single_for_cpu(&dev->pdev->dev, req->req.dma,
417 req->req.length,
418 is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
420 if (status != -ESHUTDOWN)
421 dev_dbg(&dev->pdev->dev,
422 "complete %s, req %p, stat %d, len %u/%u\n",
423 ep->ep.name, &req->req, status,
424 req->req.actual, req->req.length);
426 /* don't modify queue heads during completion callback */
427 ep->stopped = 1;
429 spin_unlock(&dev->lock);
430 /* complete routine from gadget driver */
431 if (req->req.complete)
432 req->req.complete(&ep->ep, &req->req);
434 spin_lock(&dev->lock);
435 ep->stopped = stopped;
437 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
441 static void langwell_ep_fifo_flush(struct usb_ep *_ep);
443 /* delete all endpoint requests, called with spinlock held */
444 static void nuke(struct langwell_ep *ep, int status)
446 /* called with spinlock held */
447 ep->stopped = 1;
449 /* endpoint fifo flush */
450 if (&ep->ep && ep->desc)
451 langwell_ep_fifo_flush(&ep->ep);
453 while (!list_empty(&ep->queue)) {
454 struct langwell_request *req = NULL;
455 req = list_entry(ep->queue.next, struct langwell_request,
456 queue);
457 done(ep, req, status);
462 /*-------------------------------------------------------------------------*/
464 /* endpoint is no longer usable */
465 static int langwell_ep_disable(struct usb_ep *_ep)
467 struct langwell_ep *ep;
468 unsigned long flags;
469 struct langwell_udc *dev;
470 int ep_num;
471 u32 endptctrl;
473 ep = container_of(_ep, struct langwell_ep, ep);
474 dev = ep->dev;
475 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
477 if (!_ep || !ep->desc)
478 return -EINVAL;
480 spin_lock_irqsave(&dev->lock, flags);
482 /* disable endpoint control register */
483 ep_num = ep->ep_num;
484 endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
485 if (is_in(ep))
486 endptctrl &= ~EPCTRL_TXE;
487 else
488 endptctrl &= ~EPCTRL_RXE;
489 writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
491 /* nuke all pending requests (does flush) */
492 nuke(ep, -ESHUTDOWN);
494 ep->desc = NULL;
495 ep->stopped = 1;
497 spin_unlock_irqrestore(&dev->lock, flags);
499 dev_dbg(&dev->pdev->dev, "disabled %s\n", _ep->name);
500 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
502 return 0;
506 /* allocate a request object to use with this endpoint */
507 static struct usb_request *langwell_alloc_request(struct usb_ep *_ep,
508 gfp_t gfp_flags)
510 struct langwell_ep *ep;
511 struct langwell_udc *dev;
512 struct langwell_request *req = NULL;
514 if (!_ep)
515 return NULL;
517 ep = container_of(_ep, struct langwell_ep, ep);
518 dev = ep->dev;
519 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
521 req = kzalloc(sizeof(*req), gfp_flags);
522 if (!req)
523 return NULL;
525 req->req.dma = DMA_ADDR_INVALID;
526 INIT_LIST_HEAD(&req->queue);
528 dev_vdbg(&dev->pdev->dev, "alloc request for %s\n", _ep->name);
529 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
530 return &req->req;
534 /* free a request object */
535 static void langwell_free_request(struct usb_ep *_ep,
536 struct usb_request *_req)
538 struct langwell_ep *ep;
539 struct langwell_udc *dev;
540 struct langwell_request *req = NULL;
542 ep = container_of(_ep, struct langwell_ep, ep);
543 dev = ep->dev;
544 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
546 if (!_ep || !_req)
547 return;
549 req = container_of(_req, struct langwell_request, req);
550 WARN_ON(!list_empty(&req->queue));
552 if (_req)
553 kfree(req);
555 dev_vdbg(&dev->pdev->dev, "free request for %s\n", _ep->name);
556 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
560 /*-------------------------------------------------------------------------*/
562 /* queue dTD and PRIME endpoint */
563 static int queue_dtd(struct langwell_ep *ep, struct langwell_request *req)
565 u32 bit_mask, usbcmd, endptstat, dtd_dma;
566 u8 dtd_status;
567 int i;
568 struct langwell_dqh *dqh;
569 struct langwell_udc *dev;
571 dev = ep->dev;
572 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
574 i = ep->ep_num * 2 + is_in(ep);
575 dqh = &dev->ep_dqh[i];
577 if (ep->ep_num)
578 dev_vdbg(&dev->pdev->dev, "%s\n", ep->name);
579 else
580 /* ep0 */
581 dev_vdbg(&dev->pdev->dev, "%s-%s\n", ep->name, DIR_STRING(ep));
583 dev_vdbg(&dev->pdev->dev, "ep_dqh[%d] addr: 0x%p\n",
584 i, &(dev->ep_dqh[i]));
586 bit_mask = is_in(ep) ?
587 (1 << (ep->ep_num + 16)) : (1 << (ep->ep_num));
589 dev_vdbg(&dev->pdev->dev, "bit_mask = 0x%08x\n", bit_mask);
591 /* check if the pipe is empty */
592 if (!(list_empty(&ep->queue))) {
593 /* add dTD to the end of linked list */
594 struct langwell_request *lastreq;
595 lastreq = list_entry(ep->queue.prev,
596 struct langwell_request, queue);
598 lastreq->tail->dtd_next =
599 cpu_to_le32(req->head->dtd_dma & DTD_NEXT_MASK);
601 /* read prime bit, if 1 goto out */
602 if (readl(&dev->op_regs->endptprime) & bit_mask)
603 goto out;
605 do {
606 /* set ATDTW bit in USBCMD */
607 usbcmd = readl(&dev->op_regs->usbcmd);
608 writel(usbcmd | CMD_ATDTW, &dev->op_regs->usbcmd);
610 /* read correct status bit */
611 endptstat = readl(&dev->op_regs->endptstat) & bit_mask;
613 } while (!(readl(&dev->op_regs->usbcmd) & CMD_ATDTW));
615 /* write ATDTW bit to 0 */
616 usbcmd = readl(&dev->op_regs->usbcmd);
617 writel(usbcmd & ~CMD_ATDTW, &dev->op_regs->usbcmd);
619 if (endptstat)
620 goto out;
623 /* write dQH next pointer and terminate bit to 0 */
624 dtd_dma = req->head->dtd_dma & DTD_NEXT_MASK;
625 dqh->dtd_next = cpu_to_le32(dtd_dma);
627 /* clear active and halt bit */
628 dtd_status = (u8) ~(DTD_STS_ACTIVE | DTD_STS_HALTED);
629 dqh->dtd_status &= dtd_status;
630 dev_vdbg(&dev->pdev->dev, "dqh->dtd_status = 0x%x\n", dqh->dtd_status);
632 /* ensure that updates to the dQH will occur before priming */
633 wmb();
635 /* write 1 to endptprime register to PRIME endpoint */
636 bit_mask = is_in(ep) ? (1 << (ep->ep_num + 16)) : (1 << ep->ep_num);
637 dev_vdbg(&dev->pdev->dev, "endprime bit_mask = 0x%08x\n", bit_mask);
638 writel(bit_mask, &dev->op_regs->endptprime);
639 out:
640 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
641 return 0;
645 /* fill in the dTD structure to build a transfer descriptor */
646 static struct langwell_dtd *build_dtd(struct langwell_request *req,
647 unsigned *length, dma_addr_t *dma, int *is_last)
649 u32 buf_ptr;
650 struct langwell_dtd *dtd;
651 struct langwell_udc *dev;
652 int i;
654 dev = req->ep->dev;
655 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
657 /* the maximum transfer length, up to 16k bytes */
658 *length = min(req->req.length - req->req.actual,
659 (unsigned)DTD_MAX_TRANSFER_LENGTH);
661 /* create dTD dma_pool resource */
662 dtd = dma_pool_alloc(dev->dtd_pool, GFP_KERNEL, dma);
663 if (dtd == NULL)
664 return dtd;
665 dtd->dtd_dma = *dma;
667 /* initialize buffer page pointers */
668 buf_ptr = (u32)(req->req.dma + req->req.actual);
669 for (i = 0; i < 5; i++)
670 dtd->dtd_buf[i] = cpu_to_le32(buf_ptr + i * PAGE_SIZE);
672 req->req.actual += *length;
674 /* fill in total bytes with transfer size */
675 dtd->dtd_total = cpu_to_le16(*length);
676 dev_vdbg(&dev->pdev->dev, "dtd->dtd_total = %d\n", dtd->dtd_total);
678 /* set is_last flag if req->req.zero is set or not */
679 if (req->req.zero) {
680 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
681 *is_last = 1;
682 else
683 *is_last = 0;
684 } else if (req->req.length == req->req.actual) {
685 *is_last = 1;
686 } else
687 *is_last = 0;
689 if (*is_last == 0)
690 dev_vdbg(&dev->pdev->dev, "multi-dtd request!\n");
692 /* set interrupt on complete bit for the last dTD */
693 if (*is_last && !req->req.no_interrupt)
694 dtd->dtd_ioc = 1;
696 /* set multiplier override 0 for non-ISO and non-TX endpoint */
697 dtd->dtd_multo = 0;
699 /* set the active bit of status field to 1 */
700 dtd->dtd_status = DTD_STS_ACTIVE;
701 dev_vdbg(&dev->pdev->dev, "dtd->dtd_status = 0x%02x\n",
702 dtd->dtd_status);
704 dev_vdbg(&dev->pdev->dev, "length = %d, dma addr= 0x%08x\n",
705 *length, (int)*dma);
706 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
707 return dtd;
711 /* generate dTD linked list for a request */
712 static int req_to_dtd(struct langwell_request *req)
714 unsigned count;
715 int is_last, is_first = 1;
716 struct langwell_dtd *dtd, *last_dtd = NULL;
717 struct langwell_udc *dev;
718 dma_addr_t dma;
720 dev = req->ep->dev;
721 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
722 do {
723 dtd = build_dtd(req, &count, &dma, &is_last);
724 if (dtd == NULL)
725 return -ENOMEM;
727 if (is_first) {
728 is_first = 0;
729 req->head = dtd;
730 } else {
731 last_dtd->dtd_next = cpu_to_le32(dma);
732 last_dtd->next_dtd_virt = dtd;
734 last_dtd = dtd;
735 req->dtd_count++;
736 } while (!is_last);
738 /* set terminate bit to 1 for the last dTD */
739 dtd->dtd_next = DTD_TERM;
741 req->tail = dtd;
743 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
744 return 0;
747 /*-------------------------------------------------------------------------*/
749 /* queue (submits) an I/O requests to an endpoint */
750 static int langwell_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
751 gfp_t gfp_flags)
753 struct langwell_request *req;
754 struct langwell_ep *ep;
755 struct langwell_udc *dev;
756 unsigned long flags;
757 int is_iso = 0, zlflag = 0;
759 /* always require a cpu-view buffer */
760 req = container_of(_req, struct langwell_request, req);
761 ep = container_of(_ep, struct langwell_ep, ep);
763 if (!_req || !_req->complete || !_req->buf
764 || !list_empty(&req->queue)) {
765 return -EINVAL;
768 if (unlikely(!_ep || !ep->desc))
769 return -EINVAL;
771 dev = ep->dev;
772 req->ep = ep;
773 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
775 if (usb_endpoint_xfer_isoc(ep->desc)) {
776 if (req->req.length > ep->ep.maxpacket)
777 return -EMSGSIZE;
778 is_iso = 1;
781 if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
782 return -ESHUTDOWN;
784 /* set up dma mapping in case the caller didn't */
785 if (_req->dma == DMA_ADDR_INVALID) {
786 /* WORKAROUND: WARN_ON(size == 0) */
787 if (_req->length == 0) {
788 dev_vdbg(&dev->pdev->dev, "req->length: 0->1\n");
789 zlflag = 1;
790 _req->length++;
793 _req->dma = dma_map_single(&dev->pdev->dev,
794 _req->buf, _req->length,
795 is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
796 if (zlflag && (_req->length == 1)) {
797 dev_vdbg(&dev->pdev->dev, "req->length: 1->0\n");
798 zlflag = 0;
799 _req->length = 0;
802 req->mapped = 1;
803 dev_vdbg(&dev->pdev->dev, "req->mapped = 1\n");
804 } else {
805 dma_sync_single_for_device(&dev->pdev->dev,
806 _req->dma, _req->length,
807 is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
808 req->mapped = 0;
809 dev_vdbg(&dev->pdev->dev, "req->mapped = 0\n");
812 dev_dbg(&dev->pdev->dev,
813 "%s queue req %p, len %u, buf %p, dma 0x%08x\n",
814 _ep->name,
815 _req, _req->length, _req->buf, (int)_req->dma);
817 _req->status = -EINPROGRESS;
818 _req->actual = 0;
819 req->dtd_count = 0;
821 spin_lock_irqsave(&dev->lock, flags);
823 /* build and put dTDs to endpoint queue */
824 if (!req_to_dtd(req)) {
825 queue_dtd(ep, req);
826 } else {
827 spin_unlock_irqrestore(&dev->lock, flags);
828 return -ENOMEM;
831 /* update ep0 state */
832 if (ep->ep_num == 0)
833 dev->ep0_state = DATA_STATE_XMIT;
835 if (likely(req != NULL)) {
836 list_add_tail(&req->queue, &ep->queue);
837 dev_vdbg(&dev->pdev->dev, "list_add_tail()\n");
840 spin_unlock_irqrestore(&dev->lock, flags);
842 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
843 return 0;
847 /* dequeue (cancels, unlinks) an I/O request from an endpoint */
848 static int langwell_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
850 struct langwell_ep *ep;
851 struct langwell_udc *dev;
852 struct langwell_request *req;
853 unsigned long flags;
854 int stopped, ep_num, retval = 0;
855 u32 endptctrl;
857 ep = container_of(_ep, struct langwell_ep, ep);
858 dev = ep->dev;
859 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
861 if (!_ep || !ep->desc || !_req)
862 return -EINVAL;
864 if (!dev->driver)
865 return -ESHUTDOWN;
867 spin_lock_irqsave(&dev->lock, flags);
868 stopped = ep->stopped;
870 /* quiesce dma while we patch the queue */
871 ep->stopped = 1;
872 ep_num = ep->ep_num;
874 /* disable endpoint control register */
875 endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
876 if (is_in(ep))
877 endptctrl &= ~EPCTRL_TXE;
878 else
879 endptctrl &= ~EPCTRL_RXE;
880 writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
882 /* make sure it's still queued on this endpoint */
883 list_for_each_entry(req, &ep->queue, queue) {
884 if (&req->req == _req)
885 break;
888 if (&req->req != _req) {
889 retval = -EINVAL;
890 goto done;
893 /* queue head may be partially complete. */
894 if (ep->queue.next == &req->queue) {
895 dev_dbg(&dev->pdev->dev, "unlink (%s) dma\n", _ep->name);
896 _req->status = -ECONNRESET;
897 langwell_ep_fifo_flush(&ep->ep);
899 /* not the last request in endpoint queue */
900 if (likely(ep->queue.next == &req->queue)) {
901 struct langwell_dqh *dqh;
902 struct langwell_request *next_req;
904 dqh = ep->dqh;
905 next_req = list_entry(req->queue.next,
906 struct langwell_request, queue);
908 /* point the dQH to the first dTD of next request */
909 writel((u32) next_req->head, &dqh->dqh_current);
911 } else {
912 struct langwell_request *prev_req;
914 prev_req = list_entry(req->queue.prev,
915 struct langwell_request, queue);
916 writel(readl(&req->tail->dtd_next),
917 &prev_req->tail->dtd_next);
920 done(ep, req, -ECONNRESET);
922 done:
923 /* enable endpoint again */
924 endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
925 if (is_in(ep))
926 endptctrl |= EPCTRL_TXE;
927 else
928 endptctrl |= EPCTRL_RXE;
929 writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
931 ep->stopped = stopped;
932 spin_unlock_irqrestore(&dev->lock, flags);
934 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
935 return retval;
939 /*-------------------------------------------------------------------------*/
941 /* endpoint set/clear halt */
942 static void ep_set_halt(struct langwell_ep *ep, int value)
944 u32 endptctrl = 0;
945 int ep_num;
946 struct langwell_udc *dev = ep->dev;
947 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
949 ep_num = ep->ep_num;
950 endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
952 /* value: 1 - set halt, 0 - clear halt */
953 if (value) {
954 /* set the stall bit */
955 if (is_in(ep))
956 endptctrl |= EPCTRL_TXS;
957 else
958 endptctrl |= EPCTRL_RXS;
959 } else {
960 /* clear the stall bit and reset data toggle */
961 if (is_in(ep)) {
962 endptctrl &= ~EPCTRL_TXS;
963 endptctrl |= EPCTRL_TXR;
964 } else {
965 endptctrl &= ~EPCTRL_RXS;
966 endptctrl |= EPCTRL_RXR;
970 writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
972 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
976 /* set the endpoint halt feature */
977 static int langwell_ep_set_halt(struct usb_ep *_ep, int value)
979 struct langwell_ep *ep;
980 struct langwell_udc *dev;
981 unsigned long flags;
982 int retval = 0;
984 ep = container_of(_ep, struct langwell_ep, ep);
985 dev = ep->dev;
987 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
989 if (!_ep || !ep->desc)
990 return -EINVAL;
992 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
993 return -ESHUTDOWN;
995 if (usb_endpoint_xfer_isoc(ep->desc))
996 return -EOPNOTSUPP;
998 spin_lock_irqsave(&dev->lock, flags);
1001 * attempt to halt IN ep will fail if any transfer requests
1002 * are still queue
1004 if (!list_empty(&ep->queue) && is_in(ep) && value) {
1005 /* IN endpoint FIFO holds bytes */
1006 dev_dbg(&dev->pdev->dev, "%s FIFO holds bytes\n", _ep->name);
1007 retval = -EAGAIN;
1008 goto done;
1011 /* endpoint set/clear halt */
1012 if (ep->ep_num) {
1013 ep_set_halt(ep, value);
1014 } else { /* endpoint 0 */
1015 dev->ep0_state = WAIT_FOR_SETUP;
1016 dev->ep0_dir = USB_DIR_OUT;
1018 done:
1019 spin_unlock_irqrestore(&dev->lock, flags);
1020 dev_dbg(&dev->pdev->dev, "%s %s halt\n",
1021 _ep->name, value ? "set" : "clear");
1022 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1023 return retval;
1027 /* set the halt feature and ignores clear requests */
1028 static int langwell_ep_set_wedge(struct usb_ep *_ep)
1030 struct langwell_ep *ep;
1031 struct langwell_udc *dev;
1033 ep = container_of(_ep, struct langwell_ep, ep);
1034 dev = ep->dev;
1036 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1038 if (!_ep || !ep->desc)
1039 return -EINVAL;
1041 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1042 return usb_ep_set_halt(_ep);
1046 /* flush contents of a fifo */
1047 static void langwell_ep_fifo_flush(struct usb_ep *_ep)
1049 struct langwell_ep *ep;
1050 struct langwell_udc *dev;
1051 u32 flush_bit;
1052 unsigned long timeout;
1054 ep = container_of(_ep, struct langwell_ep, ep);
1055 dev = ep->dev;
1057 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1059 if (!_ep || !ep->desc) {
1060 dev_vdbg(&dev->pdev->dev, "ep or ep->desc is NULL\n");
1061 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1062 return;
1065 dev_vdbg(&dev->pdev->dev, "%s-%s fifo flush\n",
1066 _ep->name, DIR_STRING(ep));
1068 /* flush endpoint buffer */
1069 if (ep->ep_num == 0)
1070 flush_bit = (1 << 16) | 1;
1071 else if (is_in(ep))
1072 flush_bit = 1 << (ep->ep_num + 16); /* TX */
1073 else
1074 flush_bit = 1 << ep->ep_num; /* RX */
1076 /* wait until flush complete */
1077 timeout = jiffies + FLUSH_TIMEOUT;
1078 do {
1079 writel(flush_bit, &dev->op_regs->endptflush);
1080 while (readl(&dev->op_regs->endptflush)) {
1081 if (time_after(jiffies, timeout)) {
1082 dev_err(&dev->pdev->dev, "ep flush timeout\n");
1083 goto done;
1085 cpu_relax();
1087 } while (readl(&dev->op_regs->endptstat) & flush_bit);
1088 done:
1089 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1093 /* endpoints operations structure */
1094 static const struct usb_ep_ops langwell_ep_ops = {
1096 /* configure endpoint, making it usable */
1097 .enable = langwell_ep_enable,
1099 /* endpoint is no longer usable */
1100 .disable = langwell_ep_disable,
1102 /* allocate a request object to use with this endpoint */
1103 .alloc_request = langwell_alloc_request,
1105 /* free a request object */
1106 .free_request = langwell_free_request,
1108 /* queue (submits) an I/O requests to an endpoint */
1109 .queue = langwell_ep_queue,
1111 /* dequeue (cancels, unlinks) an I/O request from an endpoint */
1112 .dequeue = langwell_ep_dequeue,
1114 /* set the endpoint halt feature */
1115 .set_halt = langwell_ep_set_halt,
1117 /* set the halt feature and ignores clear requests */
1118 .set_wedge = langwell_ep_set_wedge,
1120 /* flush contents of a fifo */
1121 .fifo_flush = langwell_ep_fifo_flush,
1125 /*-------------------------------------------------------------------------*/
1127 /* device controller usb_gadget_ops structure */
1129 /* returns the current frame number */
1130 static int langwell_get_frame(struct usb_gadget *_gadget)
1132 struct langwell_udc *dev;
1133 u16 retval;
1135 if (!_gadget)
1136 return -ENODEV;
1138 dev = container_of(_gadget, struct langwell_udc, gadget);
1139 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1141 retval = readl(&dev->op_regs->frindex) & FRINDEX_MASK;
1143 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1144 return retval;
1148 /* enter or exit PHY low power state */
1149 static void langwell_phy_low_power(struct langwell_udc *dev, bool flag)
1151 u32 devlc;
1152 u8 devlc_byte2;
1153 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1155 devlc = readl(&dev->op_regs->devlc);
1156 dev_vdbg(&dev->pdev->dev, "devlc = 0x%08x\n", devlc);
1158 if (flag)
1159 devlc |= LPM_PHCD;
1160 else
1161 devlc &= ~LPM_PHCD;
1163 /* FIXME: workaround for Langwell A1/A2/A3 sighting */
1164 devlc_byte2 = (devlc >> 16) & 0xff;
1165 writeb(devlc_byte2, (u8 *)&dev->op_regs->devlc + 2);
1167 devlc = readl(&dev->op_regs->devlc);
1168 dev_vdbg(&dev->pdev->dev,
1169 "%s PHY low power suspend, devlc = 0x%08x\n",
1170 flag ? "enter" : "exit", devlc);
1174 /* tries to wake up the host connected to this gadget */
1175 static int langwell_wakeup(struct usb_gadget *_gadget)
1177 struct langwell_udc *dev;
1178 u32 portsc1;
1179 unsigned long flags;
1181 if (!_gadget)
1182 return 0;
1184 dev = container_of(_gadget, struct langwell_udc, gadget);
1185 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1187 /* remote wakeup feature not enabled by host */
1188 if (!dev->remote_wakeup) {
1189 dev_info(&dev->pdev->dev, "remote wakeup is disabled\n");
1190 return -ENOTSUPP;
1193 spin_lock_irqsave(&dev->lock, flags);
1195 portsc1 = readl(&dev->op_regs->portsc1);
1196 if (!(portsc1 & PORTS_SUSP)) {
1197 spin_unlock_irqrestore(&dev->lock, flags);
1198 return 0;
1201 /* LPM L1 to L0 or legacy remote wakeup */
1202 if (dev->lpm && dev->lpm_state == LPM_L1)
1203 dev_info(&dev->pdev->dev, "LPM L1 to L0 remote wakeup\n");
1204 else
1205 dev_info(&dev->pdev->dev, "device remote wakeup\n");
1207 /* exit PHY low power suspend */
1208 if (dev->pdev->device != 0x0829)
1209 langwell_phy_low_power(dev, 0);
1211 /* force port resume */
1212 portsc1 |= PORTS_FPR;
1213 writel(portsc1, &dev->op_regs->portsc1);
1215 spin_unlock_irqrestore(&dev->lock, flags);
1217 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1218 return 0;
1222 /* notify controller that VBUS is powered or not */
1223 static int langwell_vbus_session(struct usb_gadget *_gadget, int is_active)
1225 struct langwell_udc *dev;
1226 unsigned long flags;
1227 u32 usbcmd;
1229 if (!_gadget)
1230 return -ENODEV;
1232 dev = container_of(_gadget, struct langwell_udc, gadget);
1233 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1235 spin_lock_irqsave(&dev->lock, flags);
1236 dev_vdbg(&dev->pdev->dev, "VBUS status: %s\n",
1237 is_active ? "on" : "off");
1239 dev->vbus_active = (is_active != 0);
1240 if (dev->driver && dev->softconnected && dev->vbus_active) {
1241 usbcmd = readl(&dev->op_regs->usbcmd);
1242 usbcmd |= CMD_RUNSTOP;
1243 writel(usbcmd, &dev->op_regs->usbcmd);
1244 } else {
1245 usbcmd = readl(&dev->op_regs->usbcmd);
1246 usbcmd &= ~CMD_RUNSTOP;
1247 writel(usbcmd, &dev->op_regs->usbcmd);
1250 spin_unlock_irqrestore(&dev->lock, flags);
1252 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1253 return 0;
1257 /* constrain controller's VBUS power usage */
1258 static int langwell_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
1260 struct langwell_udc *dev;
1262 if (!_gadget)
1263 return -ENODEV;
1265 dev = container_of(_gadget, struct langwell_udc, gadget);
1266 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1268 if (dev->transceiver) {
1269 dev_vdbg(&dev->pdev->dev, "otg_set_power\n");
1270 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1271 return otg_set_power(dev->transceiver, mA);
1274 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1275 return -ENOTSUPP;
1279 /* D+ pullup, software-controlled connect/disconnect to USB host */
1280 static int langwell_pullup(struct usb_gadget *_gadget, int is_on)
1282 struct langwell_udc *dev;
1283 u32 usbcmd;
1284 unsigned long flags;
1286 if (!_gadget)
1287 return -ENODEV;
1289 dev = container_of(_gadget, struct langwell_udc, gadget);
1291 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1293 spin_lock_irqsave(&dev->lock, flags);
1294 dev->softconnected = (is_on != 0);
1296 if (dev->driver && dev->softconnected && dev->vbus_active) {
1297 usbcmd = readl(&dev->op_regs->usbcmd);
1298 usbcmd |= CMD_RUNSTOP;
1299 writel(usbcmd, &dev->op_regs->usbcmd);
1300 } else {
1301 usbcmd = readl(&dev->op_regs->usbcmd);
1302 usbcmd &= ~CMD_RUNSTOP;
1303 writel(usbcmd, &dev->op_regs->usbcmd);
1305 spin_unlock_irqrestore(&dev->lock, flags);
1307 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1308 return 0;
1311 static int langwell_start(struct usb_gadget *g,
1312 struct usb_gadget_driver *driver);
1314 static int langwell_stop(struct usb_gadget *g,
1315 struct usb_gadget_driver *driver);
1317 /* device controller usb_gadget_ops structure */
1318 static const struct usb_gadget_ops langwell_ops = {
1320 /* returns the current frame number */
1321 .get_frame = langwell_get_frame,
1323 /* tries to wake up the host connected to this gadget */
1324 .wakeup = langwell_wakeup,
1326 /* set the device selfpowered feature, always selfpowered */
1327 /* .set_selfpowered = langwell_set_selfpowered, */
1329 /* notify controller that VBUS is powered or not */
1330 .vbus_session = langwell_vbus_session,
1332 /* constrain controller's VBUS power usage */
1333 .vbus_draw = langwell_vbus_draw,
1335 /* D+ pullup, software-controlled connect/disconnect to USB host */
1336 .pullup = langwell_pullup,
1338 .udc_start = langwell_start,
1339 .udc_stop = langwell_stop,
1343 /*-------------------------------------------------------------------------*/
1345 /* device controller operations */
1347 /* reset device controller */
1348 static int langwell_udc_reset(struct langwell_udc *dev)
1350 u32 usbcmd, usbmode, devlc, endpointlistaddr;
1351 u8 devlc_byte0, devlc_byte2;
1352 unsigned long timeout;
1354 if (!dev)
1355 return -EINVAL;
1357 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1359 /* set controller to stop state */
1360 usbcmd = readl(&dev->op_regs->usbcmd);
1361 usbcmd &= ~CMD_RUNSTOP;
1362 writel(usbcmd, &dev->op_regs->usbcmd);
1364 /* reset device controller */
1365 usbcmd = readl(&dev->op_regs->usbcmd);
1366 usbcmd |= CMD_RST;
1367 writel(usbcmd, &dev->op_regs->usbcmd);
1369 /* wait for reset to complete */
1370 timeout = jiffies + RESET_TIMEOUT;
1371 while (readl(&dev->op_regs->usbcmd) & CMD_RST) {
1372 if (time_after(jiffies, timeout)) {
1373 dev_err(&dev->pdev->dev, "device reset timeout\n");
1374 return -ETIMEDOUT;
1376 cpu_relax();
1379 /* set controller to device mode */
1380 usbmode = readl(&dev->op_regs->usbmode);
1381 usbmode |= MODE_DEVICE;
1383 /* turn setup lockout off, require setup tripwire in usbcmd */
1384 usbmode |= MODE_SLOM;
1386 writel(usbmode, &dev->op_regs->usbmode);
1387 usbmode = readl(&dev->op_regs->usbmode);
1388 dev_vdbg(&dev->pdev->dev, "usbmode=0x%08x\n", usbmode);
1390 /* Write-Clear setup status */
1391 writel(0, &dev->op_regs->usbsts);
1393 /* if support USB LPM, ACK all LPM token */
1394 if (dev->lpm) {
1395 devlc = readl(&dev->op_regs->devlc);
1396 dev_vdbg(&dev->pdev->dev, "devlc = 0x%08x\n", devlc);
1397 /* FIXME: workaround for Langwell A1/A2/A3 sighting */
1398 devlc &= ~LPM_STL; /* don't STALL LPM token */
1399 devlc &= ~LPM_NYT_ACK; /* ACK LPM token */
1400 devlc_byte0 = devlc & 0xff;
1401 devlc_byte2 = (devlc >> 16) & 0xff;
1402 writeb(devlc_byte0, (u8 *)&dev->op_regs->devlc);
1403 writeb(devlc_byte2, (u8 *)&dev->op_regs->devlc + 2);
1404 devlc = readl(&dev->op_regs->devlc);
1405 dev_vdbg(&dev->pdev->dev,
1406 "ACK LPM token, devlc = 0x%08x\n", devlc);
1409 /* fill endpointlistaddr register */
1410 endpointlistaddr = dev->ep_dqh_dma;
1411 endpointlistaddr &= ENDPOINTLISTADDR_MASK;
1412 writel(endpointlistaddr, &dev->op_regs->endpointlistaddr);
1414 dev_vdbg(&dev->pdev->dev,
1415 "dQH base (vir: %p, phy: 0x%08x), endpointlistaddr=0x%08x\n",
1416 dev->ep_dqh, endpointlistaddr,
1417 readl(&dev->op_regs->endpointlistaddr));
1418 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1419 return 0;
1423 /* reinitialize device controller endpoints */
1424 static int eps_reinit(struct langwell_udc *dev)
1426 struct langwell_ep *ep;
1427 char name[14];
1428 int i;
1430 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1432 /* initialize ep0 */
1433 ep = &dev->ep[0];
1434 ep->dev = dev;
1435 strncpy(ep->name, "ep0", sizeof(ep->name));
1436 ep->ep.name = ep->name;
1437 ep->ep.ops = &langwell_ep_ops;
1438 ep->stopped = 0;
1439 ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
1440 ep->ep_num = 0;
1441 ep->desc = &langwell_ep0_desc;
1442 INIT_LIST_HEAD(&ep->queue);
1444 ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1446 /* initialize other endpoints */
1447 for (i = 2; i < dev->ep_max; i++) {
1448 ep = &dev->ep[i];
1449 if (i % 2)
1450 snprintf(name, sizeof(name), "ep%din", i / 2);
1451 else
1452 snprintf(name, sizeof(name), "ep%dout", i / 2);
1453 ep->dev = dev;
1454 strncpy(ep->name, name, sizeof(ep->name));
1455 ep->ep.name = ep->name;
1457 ep->ep.ops = &langwell_ep_ops;
1458 ep->stopped = 0;
1459 ep->ep.maxpacket = (unsigned short) ~0;
1460 ep->ep_num = i / 2;
1462 INIT_LIST_HEAD(&ep->queue);
1463 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
1466 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1467 return 0;
1471 /* enable interrupt and set controller to run state */
1472 static void langwell_udc_start(struct langwell_udc *dev)
1474 u32 usbintr, usbcmd;
1475 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1477 /* enable interrupts */
1478 usbintr = INTR_ULPIE /* ULPI */
1479 | INTR_SLE /* suspend */
1480 /* | INTR_SRE SOF received */
1481 | INTR_URE /* USB reset */
1482 | INTR_AAE /* async advance */
1483 | INTR_SEE /* system error */
1484 | INTR_FRE /* frame list rollover */
1485 | INTR_PCE /* port change detect */
1486 | INTR_UEE /* USB error interrupt */
1487 | INTR_UE; /* USB interrupt */
1488 writel(usbintr, &dev->op_regs->usbintr);
1490 /* clear stopped bit */
1491 dev->stopped = 0;
1493 /* set controller to run */
1494 usbcmd = readl(&dev->op_regs->usbcmd);
1495 usbcmd |= CMD_RUNSTOP;
1496 writel(usbcmd, &dev->op_regs->usbcmd);
1498 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1502 /* disable interrupt and set controller to stop state */
1503 static void langwell_udc_stop(struct langwell_udc *dev)
1505 u32 usbcmd;
1507 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1509 /* disable all interrupts */
1510 writel(0, &dev->op_regs->usbintr);
1512 /* set stopped bit */
1513 dev->stopped = 1;
1515 /* set controller to stop state */
1516 usbcmd = readl(&dev->op_regs->usbcmd);
1517 usbcmd &= ~CMD_RUNSTOP;
1518 writel(usbcmd, &dev->op_regs->usbcmd);
1520 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1524 /* stop all USB activities */
1525 static void stop_activity(struct langwell_udc *dev,
1526 struct usb_gadget_driver *driver)
1528 struct langwell_ep *ep;
1529 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1531 nuke(&dev->ep[0], -ESHUTDOWN);
1533 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1534 nuke(ep, -ESHUTDOWN);
1537 /* report disconnect; the driver is already quiesced */
1538 if (driver) {
1539 spin_unlock(&dev->lock);
1540 driver->disconnect(&dev->gadget);
1541 spin_lock(&dev->lock);
1544 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1548 /*-------------------------------------------------------------------------*/
1550 /* device "function" sysfs attribute file */
1551 static ssize_t show_function(struct device *_dev,
1552 struct device_attribute *attr, char *buf)
1554 struct langwell_udc *dev = dev_get_drvdata(_dev);
1556 if (!dev->driver || !dev->driver->function
1557 || strlen(dev->driver->function) > PAGE_SIZE)
1558 return 0;
1560 return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
1562 static DEVICE_ATTR(function, S_IRUGO, show_function, NULL);
1565 static inline enum usb_device_speed lpm_device_speed(u32 reg)
1567 switch (LPM_PSPD(reg)) {
1568 case LPM_SPEED_HIGH:
1569 return USB_SPEED_HIGH;
1570 case LPM_SPEED_FULL:
1571 return USB_SPEED_FULL;
1572 case LPM_SPEED_LOW:
1573 return USB_SPEED_LOW;
1574 default:
1575 return USB_SPEED_UNKNOWN;
1579 /* device "langwell_udc" sysfs attribute file */
1580 static ssize_t show_langwell_udc(struct device *_dev,
1581 struct device_attribute *attr, char *buf)
1583 struct langwell_udc *dev = dev_get_drvdata(_dev);
1584 struct langwell_request *req;
1585 struct langwell_ep *ep = NULL;
1586 char *next;
1587 unsigned size;
1588 unsigned t;
1589 unsigned i;
1590 unsigned long flags;
1591 u32 tmp_reg;
1593 next = buf;
1594 size = PAGE_SIZE;
1595 spin_lock_irqsave(&dev->lock, flags);
1597 /* driver basic information */
1598 t = scnprintf(next, size,
1599 DRIVER_DESC "\n"
1600 "%s version: %s\n"
1601 "Gadget driver: %s\n\n",
1602 driver_name, DRIVER_VERSION,
1603 dev->driver ? dev->driver->driver.name : "(none)");
1604 size -= t;
1605 next += t;
1607 /* device registers */
1608 tmp_reg = readl(&dev->op_regs->usbcmd);
1609 t = scnprintf(next, size,
1610 "USBCMD reg:\n"
1611 "SetupTW: %d\n"
1612 "Run/Stop: %s\n\n",
1613 (tmp_reg & CMD_SUTW) ? 1 : 0,
1614 (tmp_reg & CMD_RUNSTOP) ? "Run" : "Stop");
1615 size -= t;
1616 next += t;
1618 tmp_reg = readl(&dev->op_regs->usbsts);
1619 t = scnprintf(next, size,
1620 "USB Status Reg:\n"
1621 "Device Suspend: %d\n"
1622 "Reset Received: %d\n"
1623 "System Error: %s\n"
1624 "USB Error Interrupt: %s\n\n",
1625 (tmp_reg & STS_SLI) ? 1 : 0,
1626 (tmp_reg & STS_URI) ? 1 : 0,
1627 (tmp_reg & STS_SEI) ? "Error" : "No error",
1628 (tmp_reg & STS_UEI) ? "Error detected" : "No error");
1629 size -= t;
1630 next += t;
1632 tmp_reg = readl(&dev->op_regs->usbintr);
1633 t = scnprintf(next, size,
1634 "USB Intrrupt Enable Reg:\n"
1635 "Sleep Enable: %d\n"
1636 "SOF Received Enable: %d\n"
1637 "Reset Enable: %d\n"
1638 "System Error Enable: %d\n"
1639 "Port Change Dectected Enable: %d\n"
1640 "USB Error Intr Enable: %d\n"
1641 "USB Intr Enable: %d\n\n",
1642 (tmp_reg & INTR_SLE) ? 1 : 0,
1643 (tmp_reg & INTR_SRE) ? 1 : 0,
1644 (tmp_reg & INTR_URE) ? 1 : 0,
1645 (tmp_reg & INTR_SEE) ? 1 : 0,
1646 (tmp_reg & INTR_PCE) ? 1 : 0,
1647 (tmp_reg & INTR_UEE) ? 1 : 0,
1648 (tmp_reg & INTR_UE) ? 1 : 0);
1649 size -= t;
1650 next += t;
1652 tmp_reg = readl(&dev->op_regs->frindex);
1653 t = scnprintf(next, size,
1654 "USB Frame Index Reg:\n"
1655 "Frame Number is 0x%08x\n\n",
1656 (tmp_reg & FRINDEX_MASK));
1657 size -= t;
1658 next += t;
1660 tmp_reg = readl(&dev->op_regs->deviceaddr);
1661 t = scnprintf(next, size,
1662 "USB Device Address Reg:\n"
1663 "Device Addr is 0x%x\n\n",
1664 USBADR(tmp_reg));
1665 size -= t;
1666 next += t;
1668 tmp_reg = readl(&dev->op_regs->endpointlistaddr);
1669 t = scnprintf(next, size,
1670 "USB Endpoint List Address Reg:\n"
1671 "Endpoint List Pointer is 0x%x\n\n",
1672 EPBASE(tmp_reg));
1673 size -= t;
1674 next += t;
1676 tmp_reg = readl(&dev->op_regs->portsc1);
1677 t = scnprintf(next, size,
1678 "USB Port Status & Control Reg:\n"
1679 "Port Reset: %s\n"
1680 "Port Suspend Mode: %s\n"
1681 "Over-current Change: %s\n"
1682 "Port Enable/Disable Change: %s\n"
1683 "Port Enabled/Disabled: %s\n"
1684 "Current Connect Status: %s\n"
1685 "LPM Suspend Status: %s\n\n",
1686 (tmp_reg & PORTS_PR) ? "Reset" : "Not Reset",
1687 (tmp_reg & PORTS_SUSP) ? "Suspend " : "Not Suspend",
1688 (tmp_reg & PORTS_OCC) ? "Detected" : "No",
1689 (tmp_reg & PORTS_PEC) ? "Changed" : "Not Changed",
1690 (tmp_reg & PORTS_PE) ? "Enable" : "Not Correct",
1691 (tmp_reg & PORTS_CCS) ? "Attached" : "Not Attached",
1692 (tmp_reg & PORTS_SLP) ? "LPM L1" : "LPM L0");
1693 size -= t;
1694 next += t;
1696 tmp_reg = readl(&dev->op_regs->devlc);
1697 t = scnprintf(next, size,
1698 "Device LPM Control Reg:\n"
1699 "Parallel Transceiver : %d\n"
1700 "Serial Transceiver : %d\n"
1701 "Port Speed: %s\n"
1702 "Port Force Full Speed Connenct: %s\n"
1703 "PHY Low Power Suspend Clock: %s\n"
1704 "BmAttributes: %d\n\n",
1705 LPM_PTS(tmp_reg),
1706 (tmp_reg & LPM_STS) ? 1 : 0,
1707 usb_speed_string(lpm_device_speed(tmp_reg)),
1708 (tmp_reg & LPM_PFSC) ? "Force Full Speed" : "Not Force",
1709 (tmp_reg & LPM_PHCD) ? "Disabled" : "Enabled",
1710 LPM_BA(tmp_reg));
1711 size -= t;
1712 next += t;
1714 tmp_reg = readl(&dev->op_regs->usbmode);
1715 t = scnprintf(next, size,
1716 "USB Mode Reg:\n"
1717 "Controller Mode is : %s\n\n", ({
1718 char *s;
1719 switch (MODE_CM(tmp_reg)) {
1720 case MODE_IDLE:
1721 s = "Idle"; break;
1722 case MODE_DEVICE:
1723 s = "Device Controller"; break;
1724 case MODE_HOST:
1725 s = "Host Controller"; break;
1726 default:
1727 s = "None"; break;
1730 }));
1731 size -= t;
1732 next += t;
1734 tmp_reg = readl(&dev->op_regs->endptsetupstat);
1735 t = scnprintf(next, size,
1736 "Endpoint Setup Status Reg:\n"
1737 "SETUP on ep 0x%04x\n\n",
1738 tmp_reg & SETUPSTAT_MASK);
1739 size -= t;
1740 next += t;
1742 for (i = 0; i < dev->ep_max / 2; i++) {
1743 tmp_reg = readl(&dev->op_regs->endptctrl[i]);
1744 t = scnprintf(next, size, "EP Ctrl Reg [%d]: 0x%08x\n",
1745 i, tmp_reg);
1746 size -= t;
1747 next += t;
1749 tmp_reg = readl(&dev->op_regs->endptprime);
1750 t = scnprintf(next, size, "EP Prime Reg: 0x%08x\n\n", tmp_reg);
1751 size -= t;
1752 next += t;
1754 /* langwell_udc, langwell_ep, langwell_request structure information */
1755 ep = &dev->ep[0];
1756 t = scnprintf(next, size, "%s MaxPacketSize: 0x%x, ep_num: %d\n",
1757 ep->ep.name, ep->ep.maxpacket, ep->ep_num);
1758 size -= t;
1759 next += t;
1761 if (list_empty(&ep->queue)) {
1762 t = scnprintf(next, size, "its req queue is empty\n\n");
1763 size -= t;
1764 next += t;
1765 } else {
1766 list_for_each_entry(req, &ep->queue, queue) {
1767 t = scnprintf(next, size,
1768 "req %p actual 0x%x length 0x%x buf %p\n",
1769 &req->req, req->req.actual,
1770 req->req.length, req->req.buf);
1771 size -= t;
1772 next += t;
1775 /* other gadget->eplist ep */
1776 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1777 if (ep->desc) {
1778 t = scnprintf(next, size,
1779 "\n%s MaxPacketSize: 0x%x, "
1780 "ep_num: %d\n",
1781 ep->ep.name, ep->ep.maxpacket,
1782 ep->ep_num);
1783 size -= t;
1784 next += t;
1786 if (list_empty(&ep->queue)) {
1787 t = scnprintf(next, size,
1788 "its req queue is empty\n\n");
1789 size -= t;
1790 next += t;
1791 } else {
1792 list_for_each_entry(req, &ep->queue, queue) {
1793 t = scnprintf(next, size,
1794 "req %p actual 0x%x length "
1795 "0x%x buf %p\n",
1796 &req->req, req->req.actual,
1797 req->req.length, req->req.buf);
1798 size -= t;
1799 next += t;
1805 spin_unlock_irqrestore(&dev->lock, flags);
1806 return PAGE_SIZE - size;
1808 static DEVICE_ATTR(langwell_udc, S_IRUGO, show_langwell_udc, NULL);
1811 /* device "remote_wakeup" sysfs attribute file */
1812 static ssize_t store_remote_wakeup(struct device *_dev,
1813 struct device_attribute *attr, const char *buf, size_t count)
1815 struct langwell_udc *dev = dev_get_drvdata(_dev);
1816 unsigned long flags;
1817 ssize_t rc = count;
1819 if (count > 2)
1820 return -EINVAL;
1822 if (count > 0 && buf[count-1] == '\n')
1823 ((char *) buf)[count-1] = 0;
1825 if (buf[0] != '1')
1826 return -EINVAL;
1828 /* force remote wakeup enabled in case gadget driver doesn't support */
1829 spin_lock_irqsave(&dev->lock, flags);
1830 dev->remote_wakeup = 1;
1831 dev->dev_status |= (1 << USB_DEVICE_REMOTE_WAKEUP);
1832 spin_unlock_irqrestore(&dev->lock, flags);
1834 langwell_wakeup(&dev->gadget);
1836 return rc;
1838 static DEVICE_ATTR(remote_wakeup, S_IWUSR, NULL, store_remote_wakeup);
1841 /*-------------------------------------------------------------------------*/
1844 * when a driver is successfully registered, it will receive
1845 * control requests including set_configuration(), which enables
1846 * non-control requests. then usb traffic follows until a
1847 * disconnect is reported. then a host may connect again, or
1848 * the driver might get unbound.
1851 static int langwell_start(struct usb_gadget *g,
1852 struct usb_gadget_driver *driver)
1854 struct langwell_udc *dev = gadget_to_langwell(g);
1855 unsigned long flags;
1856 int retval;
1858 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1860 spin_lock_irqsave(&dev->lock, flags);
1862 /* hook up the driver ... */
1863 driver->driver.bus = NULL;
1864 dev->driver = driver;
1865 dev->gadget.dev.driver = &driver->driver;
1867 spin_unlock_irqrestore(&dev->lock, flags);
1869 retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
1870 if (retval)
1871 goto err;
1873 dev->usb_state = USB_STATE_ATTACHED;
1874 dev->ep0_state = WAIT_FOR_SETUP;
1875 dev->ep0_dir = USB_DIR_OUT;
1877 /* enable interrupt and set controller to run state */
1878 if (dev->got_irq)
1879 langwell_udc_start(dev);
1881 dev_vdbg(&dev->pdev->dev,
1882 "After langwell_udc_start(), print all registers:\n");
1883 print_all_registers(dev);
1885 dev_info(&dev->pdev->dev, "register driver: %s\n",
1886 driver->driver.name);
1887 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1889 return 0;
1891 err:
1892 dev->gadget.dev.driver = NULL;
1893 dev->driver = NULL;
1895 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1897 return retval;
1900 /* unregister gadget driver */
1901 static int langwell_stop(struct usb_gadget *g,
1902 struct usb_gadget_driver *driver)
1904 struct langwell_udc *dev = gadget_to_langwell(g);
1905 unsigned long flags;
1907 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1909 /* exit PHY low power suspend */
1910 if (dev->pdev->device != 0x0829)
1911 langwell_phy_low_power(dev, 0);
1913 /* unbind OTG transceiver */
1914 if (dev->transceiver)
1915 (void)otg_set_peripheral(dev->transceiver, 0);
1917 /* disable interrupt and set controller to stop state */
1918 langwell_udc_stop(dev);
1920 dev->usb_state = USB_STATE_ATTACHED;
1921 dev->ep0_state = WAIT_FOR_SETUP;
1922 dev->ep0_dir = USB_DIR_OUT;
1924 spin_lock_irqsave(&dev->lock, flags);
1926 /* stop all usb activities */
1927 dev->gadget.speed = USB_SPEED_UNKNOWN;
1928 stop_activity(dev, driver);
1929 spin_unlock_irqrestore(&dev->lock, flags);
1931 dev->gadget.dev.driver = NULL;
1932 dev->driver = NULL;
1934 device_remove_file(&dev->pdev->dev, &dev_attr_function);
1936 dev_info(&dev->pdev->dev, "unregistered driver '%s'\n",
1937 driver->driver.name);
1938 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1940 return 0;
1943 /*-------------------------------------------------------------------------*/
1946 * setup tripwire is used as a semaphore to ensure that the setup data
1947 * payload is extracted from a dQH without being corrupted
1949 static void setup_tripwire(struct langwell_udc *dev)
1951 u32 usbcmd,
1952 endptsetupstat;
1953 unsigned long timeout;
1954 struct langwell_dqh *dqh;
1956 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1958 /* ep0 OUT dQH */
1959 dqh = &dev->ep_dqh[EP_DIR_OUT];
1961 /* Write-Clear endptsetupstat */
1962 endptsetupstat = readl(&dev->op_regs->endptsetupstat);
1963 writel(endptsetupstat, &dev->op_regs->endptsetupstat);
1965 /* wait until endptsetupstat is cleared */
1966 timeout = jiffies + SETUPSTAT_TIMEOUT;
1967 while (readl(&dev->op_regs->endptsetupstat)) {
1968 if (time_after(jiffies, timeout)) {
1969 dev_err(&dev->pdev->dev, "setup_tripwire timeout\n");
1970 break;
1972 cpu_relax();
1975 /* while a hazard exists when setup packet arrives */
1976 do {
1977 /* set setup tripwire bit */
1978 usbcmd = readl(&dev->op_regs->usbcmd);
1979 writel(usbcmd | CMD_SUTW, &dev->op_regs->usbcmd);
1981 /* copy the setup packet to local buffer */
1982 memcpy(&dev->local_setup_buff, &dqh->dqh_setup, 8);
1983 } while (!(readl(&dev->op_regs->usbcmd) & CMD_SUTW));
1985 /* Write-Clear setup tripwire bit */
1986 usbcmd = readl(&dev->op_regs->usbcmd);
1987 writel(usbcmd & ~CMD_SUTW, &dev->op_regs->usbcmd);
1989 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1993 /* protocol ep0 stall, will automatically be cleared on new transaction */
1994 static void ep0_stall(struct langwell_udc *dev)
1996 u32 endptctrl;
1998 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2000 /* set TX and RX to stall */
2001 endptctrl = readl(&dev->op_regs->endptctrl[0]);
2002 endptctrl |= EPCTRL_TXS | EPCTRL_RXS;
2003 writel(endptctrl, &dev->op_regs->endptctrl[0]);
2005 /* update ep0 state */
2006 dev->ep0_state = WAIT_FOR_SETUP;
2007 dev->ep0_dir = USB_DIR_OUT;
2009 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2013 /* PRIME a status phase for ep0 */
2014 static int prime_status_phase(struct langwell_udc *dev, int dir)
2016 struct langwell_request *req;
2017 struct langwell_ep *ep;
2018 int status = 0;
2020 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2022 if (dir == EP_DIR_IN)
2023 dev->ep0_dir = USB_DIR_IN;
2024 else
2025 dev->ep0_dir = USB_DIR_OUT;
2027 ep = &dev->ep[0];
2028 dev->ep0_state = WAIT_FOR_OUT_STATUS;
2030 req = dev->status_req;
2032 req->ep = ep;
2033 req->req.length = 0;
2034 req->req.status = -EINPROGRESS;
2035 req->req.actual = 0;
2036 req->req.complete = NULL;
2037 req->dtd_count = 0;
2039 if (!req_to_dtd(req))
2040 status = queue_dtd(ep, req);
2041 else
2042 return -ENOMEM;
2044 if (status)
2045 dev_err(&dev->pdev->dev, "can't queue ep0 status request\n");
2047 list_add_tail(&req->queue, &ep->queue);
2049 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2050 return status;
2054 /* SET_ADDRESS request routine */
2055 static void set_address(struct langwell_udc *dev, u16 value,
2056 u16 index, u16 length)
2058 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2060 /* save the new address to device struct */
2061 dev->dev_addr = (u8) value;
2062 dev_vdbg(&dev->pdev->dev, "dev->dev_addr = %d\n", dev->dev_addr);
2064 /* update usb state */
2065 dev->usb_state = USB_STATE_ADDRESS;
2067 /* STATUS phase */
2068 if (prime_status_phase(dev, EP_DIR_IN))
2069 ep0_stall(dev);
2071 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2075 /* return endpoint by windex */
2076 static struct langwell_ep *get_ep_by_windex(struct langwell_udc *dev,
2077 u16 wIndex)
2079 struct langwell_ep *ep;
2080 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2082 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
2083 return &dev->ep[0];
2085 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
2086 u8 bEndpointAddress;
2087 if (!ep->desc)
2088 continue;
2090 bEndpointAddress = ep->desc->bEndpointAddress;
2091 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
2092 continue;
2094 if ((wIndex & USB_ENDPOINT_NUMBER_MASK)
2095 == (bEndpointAddress & USB_ENDPOINT_NUMBER_MASK))
2096 return ep;
2099 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2100 return NULL;
2104 /* return whether endpoint is stalled, 0: not stalled; 1: stalled */
2105 static int ep_is_stall(struct langwell_ep *ep)
2107 struct langwell_udc *dev = ep->dev;
2108 u32 endptctrl;
2109 int retval;
2111 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2113 endptctrl = readl(&dev->op_regs->endptctrl[ep->ep_num]);
2114 if (is_in(ep))
2115 retval = endptctrl & EPCTRL_TXS ? 1 : 0;
2116 else
2117 retval = endptctrl & EPCTRL_RXS ? 1 : 0;
2119 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2120 return retval;
2124 /* GET_STATUS request routine */
2125 static void get_status(struct langwell_udc *dev, u8 request_type, u16 value,
2126 u16 index, u16 length)
2128 struct langwell_request *req;
2129 struct langwell_ep *ep;
2130 u16 status_data = 0; /* 16 bits cpu view status data */
2131 int status = 0;
2133 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2135 ep = &dev->ep[0];
2137 if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
2138 /* get device status */
2139 status_data = dev->dev_status;
2140 } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
2141 /* get interface status */
2142 status_data = 0;
2143 } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
2144 /* get endpoint status */
2145 struct langwell_ep *epn;
2146 epn = get_ep_by_windex(dev, index);
2147 /* stall if endpoint doesn't exist */
2148 if (!epn)
2149 goto stall;
2151 status_data = ep_is_stall(epn) << USB_ENDPOINT_HALT;
2154 dev_dbg(&dev->pdev->dev, "get status data: 0x%04x\n", status_data);
2156 dev->ep0_dir = USB_DIR_IN;
2158 /* borrow the per device status_req */
2159 req = dev->status_req;
2161 /* fill in the reqest structure */
2162 *((u16 *) req->req.buf) = cpu_to_le16(status_data);
2163 req->ep = ep;
2164 req->req.length = 2;
2165 req->req.status = -EINPROGRESS;
2166 req->req.actual = 0;
2167 req->req.complete = NULL;
2168 req->dtd_count = 0;
2170 /* prime the data phase */
2171 if (!req_to_dtd(req))
2172 status = queue_dtd(ep, req);
2173 else /* no mem */
2174 goto stall;
2176 if (status) {
2177 dev_err(&dev->pdev->dev,
2178 "response error on GET_STATUS request\n");
2179 goto stall;
2182 list_add_tail(&req->queue, &ep->queue);
2183 dev->ep0_state = DATA_STATE_XMIT;
2185 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2186 return;
2187 stall:
2188 ep0_stall(dev);
2189 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2193 /* setup packet interrupt handler */
2194 static void handle_setup_packet(struct langwell_udc *dev,
2195 struct usb_ctrlrequest *setup)
2197 u16 wValue = le16_to_cpu(setup->wValue);
2198 u16 wIndex = le16_to_cpu(setup->wIndex);
2199 u16 wLength = le16_to_cpu(setup->wLength);
2200 u32 portsc1;
2202 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2204 /* ep0 fifo flush */
2205 nuke(&dev->ep[0], -ESHUTDOWN);
2207 dev_dbg(&dev->pdev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
2208 setup->bRequestType, setup->bRequest,
2209 wValue, wIndex, wLength);
2211 /* RNDIS gadget delegate */
2212 if ((setup->bRequestType == 0x21) && (setup->bRequest == 0x00)) {
2213 /* USB_CDC_SEND_ENCAPSULATED_COMMAND */
2214 goto delegate;
2217 /* USB_CDC_GET_ENCAPSULATED_RESPONSE */
2218 if ((setup->bRequestType == 0xa1) && (setup->bRequest == 0x01)) {
2219 /* USB_CDC_GET_ENCAPSULATED_RESPONSE */
2220 goto delegate;
2223 /* We process some stardard setup requests here */
2224 switch (setup->bRequest) {
2225 case USB_REQ_GET_STATUS:
2226 dev_dbg(&dev->pdev->dev, "SETUP: USB_REQ_GET_STATUS\n");
2227 /* get status, DATA and STATUS phase */
2228 if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
2229 != (USB_DIR_IN | USB_TYPE_STANDARD))
2230 break;
2231 get_status(dev, setup->bRequestType, wValue, wIndex, wLength);
2232 goto end;
2234 case USB_REQ_SET_ADDRESS:
2235 dev_dbg(&dev->pdev->dev, "SETUP: USB_REQ_SET_ADDRESS\n");
2236 /* STATUS phase */
2237 if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD
2238 | USB_RECIP_DEVICE))
2239 break;
2240 set_address(dev, wValue, wIndex, wLength);
2241 goto end;
2243 case USB_REQ_CLEAR_FEATURE:
2244 case USB_REQ_SET_FEATURE:
2245 /* STATUS phase */
2247 int rc = -EOPNOTSUPP;
2248 if (setup->bRequest == USB_REQ_SET_FEATURE)
2249 dev_dbg(&dev->pdev->dev,
2250 "SETUP: USB_REQ_SET_FEATURE\n");
2251 else if (setup->bRequest == USB_REQ_CLEAR_FEATURE)
2252 dev_dbg(&dev->pdev->dev,
2253 "SETUP: USB_REQ_CLEAR_FEATURE\n");
2255 if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
2256 == (USB_RECIP_ENDPOINT | USB_TYPE_STANDARD)) {
2257 struct langwell_ep *epn;
2258 epn = get_ep_by_windex(dev, wIndex);
2259 /* stall if endpoint doesn't exist */
2260 if (!epn) {
2261 ep0_stall(dev);
2262 goto end;
2265 if (wValue != 0 || wLength != 0
2266 || epn->ep_num > dev->ep_max)
2267 break;
2269 spin_unlock(&dev->lock);
2270 rc = langwell_ep_set_halt(&epn->ep,
2271 (setup->bRequest == USB_REQ_SET_FEATURE)
2272 ? 1 : 0);
2273 spin_lock(&dev->lock);
2275 } else if ((setup->bRequestType & (USB_RECIP_MASK
2276 | USB_TYPE_MASK)) == (USB_RECIP_DEVICE
2277 | USB_TYPE_STANDARD)) {
2278 rc = 0;
2279 switch (wValue) {
2280 case USB_DEVICE_REMOTE_WAKEUP:
2281 if (setup->bRequest == USB_REQ_SET_FEATURE) {
2282 dev->remote_wakeup = 1;
2283 dev->dev_status |= (1 << wValue);
2284 } else {
2285 dev->remote_wakeup = 0;
2286 dev->dev_status &= ~(1 << wValue);
2288 break;
2289 case USB_DEVICE_TEST_MODE:
2290 dev_dbg(&dev->pdev->dev, "SETUP: TEST MODE\n");
2291 if ((wIndex & 0xff) ||
2292 (dev->gadget.speed != USB_SPEED_HIGH))
2293 ep0_stall(dev);
2295 switch (wIndex >> 8) {
2296 case TEST_J:
2297 case TEST_K:
2298 case TEST_SE0_NAK:
2299 case TEST_PACKET:
2300 case TEST_FORCE_EN:
2301 if (prime_status_phase(dev, EP_DIR_IN))
2302 ep0_stall(dev);
2303 portsc1 = readl(&dev->op_regs->portsc1);
2304 portsc1 |= (wIndex & 0xf00) << 8;
2305 writel(portsc1, &dev->op_regs->portsc1);
2306 goto end;
2307 default:
2308 rc = -EOPNOTSUPP;
2310 break;
2311 default:
2312 rc = -EOPNOTSUPP;
2313 break;
2316 if (!gadget_is_otg(&dev->gadget))
2317 break;
2318 else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE) {
2319 dev->gadget.b_hnp_enable = 1;
2320 #ifdef OTG_TRANSCEIVER
2321 if (!dev->lotg->otg.default_a)
2322 dev->lotg->hsm.b_hnp_enable = 1;
2323 #endif
2324 } else if (setup->bRequest == USB_DEVICE_A_HNP_SUPPORT)
2325 dev->gadget.a_hnp_support = 1;
2326 else if (setup->bRequest ==
2327 USB_DEVICE_A_ALT_HNP_SUPPORT)
2328 dev->gadget.a_alt_hnp_support = 1;
2329 else
2330 break;
2331 } else
2332 break;
2334 if (rc == 0) {
2335 if (prime_status_phase(dev, EP_DIR_IN))
2336 ep0_stall(dev);
2338 goto end;
2341 case USB_REQ_GET_DESCRIPTOR:
2342 dev_dbg(&dev->pdev->dev,
2343 "SETUP: USB_REQ_GET_DESCRIPTOR\n");
2344 goto delegate;
2346 case USB_REQ_SET_DESCRIPTOR:
2347 dev_dbg(&dev->pdev->dev,
2348 "SETUP: USB_REQ_SET_DESCRIPTOR unsupported\n");
2349 goto delegate;
2351 case USB_REQ_GET_CONFIGURATION:
2352 dev_dbg(&dev->pdev->dev,
2353 "SETUP: USB_REQ_GET_CONFIGURATION\n");
2354 goto delegate;
2356 case USB_REQ_SET_CONFIGURATION:
2357 dev_dbg(&dev->pdev->dev,
2358 "SETUP: USB_REQ_SET_CONFIGURATION\n");
2359 goto delegate;
2361 case USB_REQ_GET_INTERFACE:
2362 dev_dbg(&dev->pdev->dev,
2363 "SETUP: USB_REQ_GET_INTERFACE\n");
2364 goto delegate;
2366 case USB_REQ_SET_INTERFACE:
2367 dev_dbg(&dev->pdev->dev,
2368 "SETUP: USB_REQ_SET_INTERFACE\n");
2369 goto delegate;
2371 case USB_REQ_SYNCH_FRAME:
2372 dev_dbg(&dev->pdev->dev,
2373 "SETUP: USB_REQ_SYNCH_FRAME unsupported\n");
2374 goto delegate;
2376 default:
2377 /* delegate USB standard requests to the gadget driver */
2378 goto delegate;
2379 delegate:
2380 /* USB requests handled by gadget */
2381 if (wLength) {
2382 /* DATA phase from gadget, STATUS phase from udc */
2383 dev->ep0_dir = (setup->bRequestType & USB_DIR_IN)
2384 ? USB_DIR_IN : USB_DIR_OUT;
2385 dev_vdbg(&dev->pdev->dev,
2386 "dev->ep0_dir = 0x%x, wLength = %d\n",
2387 dev->ep0_dir, wLength);
2388 spin_unlock(&dev->lock);
2389 if (dev->driver->setup(&dev->gadget,
2390 &dev->local_setup_buff) < 0)
2391 ep0_stall(dev);
2392 spin_lock(&dev->lock);
2393 dev->ep0_state = (setup->bRequestType & USB_DIR_IN)
2394 ? DATA_STATE_XMIT : DATA_STATE_RECV;
2395 } else {
2396 /* no DATA phase, IN STATUS phase from gadget */
2397 dev->ep0_dir = USB_DIR_IN;
2398 dev_vdbg(&dev->pdev->dev,
2399 "dev->ep0_dir = 0x%x, wLength = %d\n",
2400 dev->ep0_dir, wLength);
2401 spin_unlock(&dev->lock);
2402 if (dev->driver->setup(&dev->gadget,
2403 &dev->local_setup_buff) < 0)
2404 ep0_stall(dev);
2405 spin_lock(&dev->lock);
2406 dev->ep0_state = WAIT_FOR_OUT_STATUS;
2408 break;
2410 end:
2411 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2415 /* transfer completion, process endpoint request and free the completed dTDs
2416 * for this request
2418 static int process_ep_req(struct langwell_udc *dev, int index,
2419 struct langwell_request *curr_req)
2421 struct langwell_dtd *curr_dtd;
2422 struct langwell_dqh *curr_dqh;
2423 int td_complete, actual, remaining_length;
2424 int i, dir;
2425 u8 dtd_status = 0;
2426 int retval = 0;
2428 curr_dqh = &dev->ep_dqh[index];
2429 dir = index % 2;
2431 curr_dtd = curr_req->head;
2432 td_complete = 0;
2433 actual = curr_req->req.length;
2435 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2437 for (i = 0; i < curr_req->dtd_count; i++) {
2439 /* command execution states by dTD */
2440 dtd_status = curr_dtd->dtd_status;
2442 barrier();
2443 remaining_length = le16_to_cpu(curr_dtd->dtd_total);
2444 actual -= remaining_length;
2446 if (!dtd_status) {
2447 /* transfers completed successfully */
2448 if (!remaining_length) {
2449 td_complete++;
2450 dev_vdbg(&dev->pdev->dev,
2451 "dTD transmitted successfully\n");
2452 } else {
2453 if (dir) {
2454 dev_vdbg(&dev->pdev->dev,
2455 "TX dTD remains data\n");
2456 retval = -EPROTO;
2457 break;
2459 } else {
2460 td_complete++;
2461 break;
2464 } else {
2465 /* transfers completed with errors */
2466 if (dtd_status & DTD_STS_ACTIVE) {
2467 dev_dbg(&dev->pdev->dev,
2468 "dTD status ACTIVE dQH[%d]\n", index);
2469 retval = 1;
2470 return retval;
2471 } else if (dtd_status & DTD_STS_HALTED) {
2472 dev_err(&dev->pdev->dev,
2473 "dTD error %08x dQH[%d]\n",
2474 dtd_status, index);
2475 /* clear the errors and halt condition */
2476 curr_dqh->dtd_status = 0;
2477 retval = -EPIPE;
2478 break;
2479 } else if (dtd_status & DTD_STS_DBE) {
2480 dev_dbg(&dev->pdev->dev,
2481 "data buffer (overflow) error\n");
2482 retval = -EPROTO;
2483 break;
2484 } else if (dtd_status & DTD_STS_TRE) {
2485 dev_dbg(&dev->pdev->dev,
2486 "transaction(ISO) error\n");
2487 retval = -EILSEQ;
2488 break;
2489 } else
2490 dev_err(&dev->pdev->dev,
2491 "unknown error (0x%x)!\n",
2492 dtd_status);
2495 if (i != curr_req->dtd_count - 1)
2496 curr_dtd = (struct langwell_dtd *)
2497 curr_dtd->next_dtd_virt;
2500 if (retval)
2501 return retval;
2503 curr_req->req.actual = actual;
2505 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2506 return 0;
2510 /* complete DATA or STATUS phase of ep0 prime status phase if needed */
2511 static void ep0_req_complete(struct langwell_udc *dev,
2512 struct langwell_ep *ep0, struct langwell_request *req)
2514 u32 new_addr;
2515 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2517 if (dev->usb_state == USB_STATE_ADDRESS) {
2518 /* set the new address */
2519 new_addr = (u32)dev->dev_addr;
2520 writel(new_addr << USBADR_SHIFT, &dev->op_regs->deviceaddr);
2522 new_addr = USBADR(readl(&dev->op_regs->deviceaddr));
2523 dev_vdbg(&dev->pdev->dev, "new_addr = %d\n", new_addr);
2526 done(ep0, req, 0);
2528 switch (dev->ep0_state) {
2529 case DATA_STATE_XMIT:
2530 /* receive status phase */
2531 if (prime_status_phase(dev, EP_DIR_OUT))
2532 ep0_stall(dev);
2533 break;
2534 case DATA_STATE_RECV:
2535 /* send status phase */
2536 if (prime_status_phase(dev, EP_DIR_IN))
2537 ep0_stall(dev);
2538 break;
2539 case WAIT_FOR_OUT_STATUS:
2540 dev->ep0_state = WAIT_FOR_SETUP;
2541 break;
2542 case WAIT_FOR_SETUP:
2543 dev_err(&dev->pdev->dev, "unexpect ep0 packets\n");
2544 break;
2545 default:
2546 ep0_stall(dev);
2547 break;
2550 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2554 /* USB transfer completion interrupt */
2555 static void handle_trans_complete(struct langwell_udc *dev)
2557 u32 complete_bits;
2558 int i, ep_num, dir, bit_mask, status;
2559 struct langwell_ep *epn;
2560 struct langwell_request *curr_req, *temp_req;
2562 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2564 complete_bits = readl(&dev->op_regs->endptcomplete);
2565 dev_vdbg(&dev->pdev->dev, "endptcomplete register: 0x%08x\n",
2566 complete_bits);
2568 /* Write-Clear the bits in endptcomplete register */
2569 writel(complete_bits, &dev->op_regs->endptcomplete);
2571 if (!complete_bits) {
2572 dev_dbg(&dev->pdev->dev, "complete_bits = 0\n");
2573 goto done;
2576 for (i = 0; i < dev->ep_max; i++) {
2577 ep_num = i / 2;
2578 dir = i % 2;
2580 bit_mask = 1 << (ep_num + 16 * dir);
2582 if (!(complete_bits & bit_mask))
2583 continue;
2585 /* ep0 */
2586 if (i == 1)
2587 epn = &dev->ep[0];
2588 else
2589 epn = &dev->ep[i];
2591 if (epn->name == NULL) {
2592 dev_warn(&dev->pdev->dev, "invalid endpoint\n");
2593 continue;
2596 if (i < 2)
2597 /* ep0 in and out */
2598 dev_dbg(&dev->pdev->dev, "%s-%s transfer completed\n",
2599 epn->name,
2600 is_in(epn) ? "in" : "out");
2601 else
2602 dev_dbg(&dev->pdev->dev, "%s transfer completed\n",
2603 epn->name);
2605 /* process the req queue until an uncomplete request */
2606 list_for_each_entry_safe(curr_req, temp_req,
2607 &epn->queue, queue) {
2608 status = process_ep_req(dev, i, curr_req);
2609 dev_vdbg(&dev->pdev->dev, "%s req status: %d\n",
2610 epn->name, status);
2612 if (status)
2613 break;
2615 /* write back status to req */
2616 curr_req->req.status = status;
2618 /* ep0 request completion */
2619 if (ep_num == 0) {
2620 ep0_req_complete(dev, epn, curr_req);
2621 break;
2622 } else {
2623 done(epn, curr_req, status);
2627 done:
2628 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2631 /* port change detect interrupt handler */
2632 static void handle_port_change(struct langwell_udc *dev)
2634 u32 portsc1, devlc;
2636 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2638 if (dev->bus_reset)
2639 dev->bus_reset = 0;
2641 portsc1 = readl(&dev->op_regs->portsc1);
2642 devlc = readl(&dev->op_regs->devlc);
2643 dev_vdbg(&dev->pdev->dev, "portsc1 = 0x%08x, devlc = 0x%08x\n",
2644 portsc1, devlc);
2646 /* bus reset is finished */
2647 if (!(portsc1 & PORTS_PR)) {
2648 /* get the speed */
2649 dev->gadget.speed = lpm_device_speed(devlc);
2650 dev_vdbg(&dev->pdev->dev, "dev->gadget.speed = %d\n",
2651 dev->gadget.speed);
2654 /* LPM L0 to L1 */
2655 if (dev->lpm && dev->lpm_state == LPM_L0)
2656 if (portsc1 & PORTS_SUSP && portsc1 & PORTS_SLP) {
2657 dev_info(&dev->pdev->dev, "LPM L0 to L1\n");
2658 dev->lpm_state = LPM_L1;
2661 /* LPM L1 to L0, force resume or remote wakeup finished */
2662 if (dev->lpm && dev->lpm_state == LPM_L1)
2663 if (!(portsc1 & PORTS_SUSP)) {
2664 dev_info(&dev->pdev->dev, "LPM L1 to L0\n");
2665 dev->lpm_state = LPM_L0;
2668 /* update USB state */
2669 if (!dev->resume_state)
2670 dev->usb_state = USB_STATE_DEFAULT;
2672 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2676 /* USB reset interrupt handler */
2677 static void handle_usb_reset(struct langwell_udc *dev)
2679 u32 deviceaddr,
2680 endptsetupstat,
2681 endptcomplete;
2682 unsigned long timeout;
2684 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2686 /* Write-Clear the device address */
2687 deviceaddr = readl(&dev->op_regs->deviceaddr);
2688 writel(deviceaddr & ~USBADR_MASK, &dev->op_regs->deviceaddr);
2690 dev->dev_addr = 0;
2692 /* clear usb state */
2693 dev->resume_state = 0;
2695 /* LPM L1 to L0, reset */
2696 if (dev->lpm)
2697 dev->lpm_state = LPM_L0;
2699 dev->ep0_dir = USB_DIR_OUT;
2700 dev->ep0_state = WAIT_FOR_SETUP;
2702 /* remote wakeup reset to 0 when the device is reset */
2703 dev->remote_wakeup = 0;
2704 dev->dev_status = 1 << USB_DEVICE_SELF_POWERED;
2705 dev->gadget.b_hnp_enable = 0;
2706 dev->gadget.a_hnp_support = 0;
2707 dev->gadget.a_alt_hnp_support = 0;
2709 /* Write-Clear all the setup token semaphores */
2710 endptsetupstat = readl(&dev->op_regs->endptsetupstat);
2711 writel(endptsetupstat, &dev->op_regs->endptsetupstat);
2713 /* Write-Clear all the endpoint complete status bits */
2714 endptcomplete = readl(&dev->op_regs->endptcomplete);
2715 writel(endptcomplete, &dev->op_regs->endptcomplete);
2717 /* wait until all endptprime bits cleared */
2718 timeout = jiffies + PRIME_TIMEOUT;
2719 while (readl(&dev->op_regs->endptprime)) {
2720 if (time_after(jiffies, timeout)) {
2721 dev_err(&dev->pdev->dev, "USB reset timeout\n");
2722 break;
2724 cpu_relax();
2727 /* write 1s to endptflush register to clear any primed buffers */
2728 writel((u32) ~0, &dev->op_regs->endptflush);
2730 if (readl(&dev->op_regs->portsc1) & PORTS_PR) {
2731 dev_vdbg(&dev->pdev->dev, "USB bus reset\n");
2732 /* bus is reseting */
2733 dev->bus_reset = 1;
2735 /* reset all the queues, stop all USB activities */
2736 stop_activity(dev, dev->driver);
2737 dev->usb_state = USB_STATE_DEFAULT;
2738 } else {
2739 dev_vdbg(&dev->pdev->dev, "device controller reset\n");
2740 /* controller reset */
2741 langwell_udc_reset(dev);
2743 /* reset all the queues, stop all USB activities */
2744 stop_activity(dev, dev->driver);
2746 /* reset ep0 dQH and endptctrl */
2747 ep0_reset(dev);
2749 /* enable interrupt and set controller to run state */
2750 langwell_udc_start(dev);
2752 dev->usb_state = USB_STATE_ATTACHED;
2755 #ifdef OTG_TRANSCEIVER
2756 /* refer to USB OTG 6.6.2.3 b_hnp_en is cleared */
2757 if (!dev->lotg->otg.default_a)
2758 dev->lotg->hsm.b_hnp_enable = 0;
2759 #endif
2761 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2765 /* USB bus suspend/resume interrupt */
2766 static void handle_bus_suspend(struct langwell_udc *dev)
2768 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2770 dev->resume_state = dev->usb_state;
2771 dev->usb_state = USB_STATE_SUSPENDED;
2773 #ifdef OTG_TRANSCEIVER
2774 if (dev->lotg->otg.default_a) {
2775 if (dev->lotg->hsm.b_bus_suspend_vld == 1) {
2776 dev->lotg->hsm.b_bus_suspend = 1;
2777 /* notify transceiver the state changes */
2778 if (spin_trylock(&dev->lotg->wq_lock)) {
2779 langwell_update_transceiver();
2780 spin_unlock(&dev->lotg->wq_lock);
2783 dev->lotg->hsm.b_bus_suspend_vld++;
2784 } else {
2785 if (!dev->lotg->hsm.a_bus_suspend) {
2786 dev->lotg->hsm.a_bus_suspend = 1;
2787 /* notify transceiver the state changes */
2788 if (spin_trylock(&dev->lotg->wq_lock)) {
2789 langwell_update_transceiver();
2790 spin_unlock(&dev->lotg->wq_lock);
2794 #endif
2796 /* report suspend to the driver */
2797 if (dev->driver) {
2798 if (dev->driver->suspend) {
2799 spin_unlock(&dev->lock);
2800 dev->driver->suspend(&dev->gadget);
2801 spin_lock(&dev->lock);
2802 dev_dbg(&dev->pdev->dev, "suspend %s\n",
2803 dev->driver->driver.name);
2807 /* enter PHY low power suspend */
2808 if (dev->pdev->device != 0x0829)
2809 langwell_phy_low_power(dev, 0);
2811 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2815 static void handle_bus_resume(struct langwell_udc *dev)
2817 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2819 dev->usb_state = dev->resume_state;
2820 dev->resume_state = 0;
2822 /* exit PHY low power suspend */
2823 if (dev->pdev->device != 0x0829)
2824 langwell_phy_low_power(dev, 0);
2826 #ifdef OTG_TRANSCEIVER
2827 if (dev->lotg->otg.default_a == 0)
2828 dev->lotg->hsm.a_bus_suspend = 0;
2829 #endif
2831 /* report resume to the driver */
2832 if (dev->driver) {
2833 if (dev->driver->resume) {
2834 spin_unlock(&dev->lock);
2835 dev->driver->resume(&dev->gadget);
2836 spin_lock(&dev->lock);
2837 dev_dbg(&dev->pdev->dev, "resume %s\n",
2838 dev->driver->driver.name);
2842 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2846 /* USB device controller interrupt handler */
2847 static irqreturn_t langwell_irq(int irq, void *_dev)
2849 struct langwell_udc *dev = _dev;
2850 u32 usbsts,
2851 usbintr,
2852 irq_sts,
2853 portsc1;
2855 dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2857 if (dev->stopped) {
2858 dev_vdbg(&dev->pdev->dev, "handle IRQ_NONE\n");
2859 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2860 return IRQ_NONE;
2863 spin_lock(&dev->lock);
2865 /* USB status */
2866 usbsts = readl(&dev->op_regs->usbsts);
2868 /* USB interrupt enable */
2869 usbintr = readl(&dev->op_regs->usbintr);
2871 irq_sts = usbsts & usbintr;
2872 dev_vdbg(&dev->pdev->dev,
2873 "usbsts = 0x%08x, usbintr = 0x%08x, irq_sts = 0x%08x\n",
2874 usbsts, usbintr, irq_sts);
2876 if (!irq_sts) {
2877 dev_vdbg(&dev->pdev->dev, "handle IRQ_NONE\n");
2878 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2879 spin_unlock(&dev->lock);
2880 return IRQ_NONE;
2883 /* Write-Clear interrupt status bits */
2884 writel(irq_sts, &dev->op_regs->usbsts);
2886 /* resume from suspend */
2887 portsc1 = readl(&dev->op_regs->portsc1);
2888 if (dev->usb_state == USB_STATE_SUSPENDED)
2889 if (!(portsc1 & PORTS_SUSP))
2890 handle_bus_resume(dev);
2892 /* USB interrupt */
2893 if (irq_sts & STS_UI) {
2894 dev_vdbg(&dev->pdev->dev, "USB interrupt\n");
2896 /* setup packet received from ep0 */
2897 if (readl(&dev->op_regs->endptsetupstat)
2898 & EP0SETUPSTAT_MASK) {
2899 dev_vdbg(&dev->pdev->dev,
2900 "USB SETUP packet received interrupt\n");
2901 /* setup tripwire semaphone */
2902 setup_tripwire(dev);
2903 handle_setup_packet(dev, &dev->local_setup_buff);
2906 /* USB transfer completion */
2907 if (readl(&dev->op_regs->endptcomplete)) {
2908 dev_vdbg(&dev->pdev->dev,
2909 "USB transfer completion interrupt\n");
2910 handle_trans_complete(dev);
2914 /* SOF received interrupt (for ISO transfer) */
2915 if (irq_sts & STS_SRI) {
2916 /* FIXME */
2917 /* dev_vdbg(&dev->pdev->dev, "SOF received interrupt\n"); */
2920 /* port change detect interrupt */
2921 if (irq_sts & STS_PCI) {
2922 dev_vdbg(&dev->pdev->dev, "port change detect interrupt\n");
2923 handle_port_change(dev);
2926 /* suspend interrupt */
2927 if (irq_sts & STS_SLI) {
2928 dev_vdbg(&dev->pdev->dev, "suspend interrupt\n");
2929 handle_bus_suspend(dev);
2932 /* USB reset interrupt */
2933 if (irq_sts & STS_URI) {
2934 dev_vdbg(&dev->pdev->dev, "USB reset interrupt\n");
2935 handle_usb_reset(dev);
2938 /* USB error or system error interrupt */
2939 if (irq_sts & (STS_UEI | STS_SEI)) {
2940 /* FIXME */
2941 dev_warn(&dev->pdev->dev, "error IRQ, irq_sts: %x\n", irq_sts);
2944 spin_unlock(&dev->lock);
2946 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2947 return IRQ_HANDLED;
2951 /*-------------------------------------------------------------------------*/
2953 /* release device structure */
2954 static void gadget_release(struct device *_dev)
2956 struct langwell_udc *dev = dev_get_drvdata(_dev);
2958 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2960 complete(dev->done);
2962 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2963 kfree(dev);
2967 /* enable SRAM caching if SRAM detected */
2968 static void sram_init(struct langwell_udc *dev)
2970 struct pci_dev *pdev = dev->pdev;
2972 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2974 dev->sram_addr = pci_resource_start(pdev, 1);
2975 dev->sram_size = pci_resource_len(pdev, 1);
2976 dev_info(&dev->pdev->dev, "Found private SRAM at %x size:%x\n",
2977 dev->sram_addr, dev->sram_size);
2978 dev->got_sram = 1;
2980 if (pci_request_region(pdev, 1, kobject_name(&pdev->dev.kobj))) {
2981 dev_warn(&dev->pdev->dev, "SRAM request failed\n");
2982 dev->got_sram = 0;
2983 } else if (!dma_declare_coherent_memory(&pdev->dev, dev->sram_addr,
2984 dev->sram_addr, dev->sram_size, DMA_MEMORY_MAP)) {
2985 dev_warn(&dev->pdev->dev, "SRAM DMA declare failed\n");
2986 pci_release_region(pdev, 1);
2987 dev->got_sram = 0;
2990 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2994 /* release SRAM caching */
2995 static void sram_deinit(struct langwell_udc *dev)
2997 struct pci_dev *pdev = dev->pdev;
2999 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
3001 dma_release_declared_memory(&pdev->dev);
3002 pci_release_region(pdev, 1);
3004 dev->got_sram = 0;
3006 dev_info(&dev->pdev->dev, "release SRAM caching\n");
3007 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3011 /* tear down the binding between this driver and the pci device */
3012 static void langwell_udc_remove(struct pci_dev *pdev)
3014 struct langwell_udc *dev = pci_get_drvdata(pdev);
3016 DECLARE_COMPLETION(done);
3018 BUG_ON(dev->driver);
3019 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
3021 dev->done = &done;
3023 #ifndef OTG_TRANSCEIVER
3024 /* free dTD dma_pool and dQH */
3025 if (dev->dtd_pool)
3026 dma_pool_destroy(dev->dtd_pool);
3028 if (dev->ep_dqh)
3029 dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
3030 dev->ep_dqh, dev->ep_dqh_dma);
3032 /* release SRAM caching */
3033 if (dev->has_sram && dev->got_sram)
3034 sram_deinit(dev);
3035 #endif
3037 if (dev->status_req) {
3038 kfree(dev->status_req->req.buf);
3039 kfree(dev->status_req);
3042 kfree(dev->ep);
3044 /* disable IRQ handler */
3045 if (dev->got_irq)
3046 free_irq(pdev->irq, dev);
3048 #ifndef OTG_TRANSCEIVER
3049 if (dev->cap_regs)
3050 iounmap(dev->cap_regs);
3052 if (dev->region)
3053 release_mem_region(pci_resource_start(pdev, 0),
3054 pci_resource_len(pdev, 0));
3056 if (dev->enabled)
3057 pci_disable_device(pdev);
3058 #else
3059 if (dev->transceiver) {
3060 otg_put_transceiver(dev->transceiver);
3061 dev->transceiver = NULL;
3062 dev->lotg = NULL;
3064 #endif
3066 dev->cap_regs = NULL;
3068 dev_info(&dev->pdev->dev, "unbind\n");
3069 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3071 device_unregister(&dev->gadget.dev);
3072 device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
3073 device_remove_file(&pdev->dev, &dev_attr_remote_wakeup);
3075 #ifndef OTG_TRANSCEIVER
3076 pci_set_drvdata(pdev, NULL);
3077 #endif
3079 /* free dev, wait for the release() finished */
3080 wait_for_completion(&done);
3085 * wrap this driver around the specified device, but
3086 * don't respond over USB until a gadget driver binds to us.
3088 static int langwell_udc_probe(struct pci_dev *pdev,
3089 const struct pci_device_id *id)
3091 struct langwell_udc *dev;
3092 #ifndef OTG_TRANSCEIVER
3093 unsigned long resource, len;
3094 #endif
3095 void __iomem *base = NULL;
3096 size_t size;
3097 int retval;
3099 /* alloc, and start init */
3100 dev = kzalloc(sizeof *dev, GFP_KERNEL);
3101 if (dev == NULL) {
3102 retval = -ENOMEM;
3103 goto error;
3106 /* initialize device spinlock */
3107 spin_lock_init(&dev->lock);
3109 dev->pdev = pdev;
3110 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
3112 #ifdef OTG_TRANSCEIVER
3113 /* PCI device is already enabled by otg_transceiver driver */
3114 dev->enabled = 1;
3116 /* mem region and register base */
3117 dev->region = 1;
3118 dev->transceiver = otg_get_transceiver();
3119 dev->lotg = otg_to_langwell(dev->transceiver);
3120 base = dev->lotg->regs;
3121 #else
3122 pci_set_drvdata(pdev, dev);
3124 /* now all the pci goodies ... */
3125 if (pci_enable_device(pdev) < 0) {
3126 retval = -ENODEV;
3127 goto error;
3129 dev->enabled = 1;
3131 /* control register: BAR 0 */
3132 resource = pci_resource_start(pdev, 0);
3133 len = pci_resource_len(pdev, 0);
3134 if (!request_mem_region(resource, len, driver_name)) {
3135 dev_err(&dev->pdev->dev, "controller already in use\n");
3136 retval = -EBUSY;
3137 goto error;
3139 dev->region = 1;
3141 base = ioremap_nocache(resource, len);
3142 #endif
3143 if (base == NULL) {
3144 dev_err(&dev->pdev->dev, "can't map memory\n");
3145 retval = -EFAULT;
3146 goto error;
3149 dev->cap_regs = (struct langwell_cap_regs __iomem *) base;
3150 dev_vdbg(&dev->pdev->dev, "dev->cap_regs: %p\n", dev->cap_regs);
3151 dev->op_regs = (struct langwell_op_regs __iomem *)
3152 (base + OP_REG_OFFSET);
3153 dev_vdbg(&dev->pdev->dev, "dev->op_regs: %p\n", dev->op_regs);
3155 /* irq setup after old hardware is cleaned up */
3156 if (!pdev->irq) {
3157 dev_err(&dev->pdev->dev, "No IRQ. Check PCI setup!\n");
3158 retval = -ENODEV;
3159 goto error;
3162 dev->has_sram = 1;
3163 dev->got_sram = 0;
3164 dev_vdbg(&dev->pdev->dev, "dev->has_sram: %d\n", dev->has_sram);
3166 #ifndef OTG_TRANSCEIVER
3167 /* enable SRAM caching if detected */
3168 if (dev->has_sram && !dev->got_sram)
3169 sram_init(dev);
3171 dev_info(&dev->pdev->dev,
3172 "irq %d, io mem: 0x%08lx, len: 0x%08lx, pci mem 0x%p\n",
3173 pdev->irq, resource, len, base);
3174 /* enables bus-mastering for device dev */
3175 pci_set_master(pdev);
3177 if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED,
3178 driver_name, dev) != 0) {
3179 dev_err(&dev->pdev->dev,
3180 "request interrupt %d failed\n", pdev->irq);
3181 retval = -EBUSY;
3182 goto error;
3184 dev->got_irq = 1;
3185 #endif
3187 /* set stopped bit */
3188 dev->stopped = 1;
3190 /* capabilities and endpoint number */
3191 dev->lpm = (readl(&dev->cap_regs->hccparams) & HCC_LEN) ? 1 : 0;
3192 dev->dciversion = readw(&dev->cap_regs->dciversion);
3193 dev->devcap = (readl(&dev->cap_regs->dccparams) & DEVCAP) ? 1 : 0;
3194 dev_vdbg(&dev->pdev->dev, "dev->lpm: %d\n", dev->lpm);
3195 dev_vdbg(&dev->pdev->dev, "dev->dciversion: 0x%04x\n",
3196 dev->dciversion);
3197 dev_vdbg(&dev->pdev->dev, "dccparams: 0x%08x\n",
3198 readl(&dev->cap_regs->dccparams));
3199 dev_vdbg(&dev->pdev->dev, "dev->devcap: %d\n", dev->devcap);
3200 if (!dev->devcap) {
3201 dev_err(&dev->pdev->dev, "can't support device mode\n");
3202 retval = -ENODEV;
3203 goto error;
3206 /* a pair of endpoints (out/in) for each address */
3207 dev->ep_max = DEN(readl(&dev->cap_regs->dccparams)) * 2;
3208 dev_vdbg(&dev->pdev->dev, "dev->ep_max: %d\n", dev->ep_max);
3210 /* allocate endpoints memory */
3211 dev->ep = kzalloc(sizeof(struct langwell_ep) * dev->ep_max,
3212 GFP_KERNEL);
3213 if (!dev->ep) {
3214 dev_err(&dev->pdev->dev, "allocate endpoints memory failed\n");
3215 retval = -ENOMEM;
3216 goto error;
3219 /* allocate device dQH memory */
3220 size = dev->ep_max * sizeof(struct langwell_dqh);
3221 dev_vdbg(&dev->pdev->dev, "orig size = %zd\n", size);
3222 if (size < DQH_ALIGNMENT)
3223 size = DQH_ALIGNMENT;
3224 else if ((size % DQH_ALIGNMENT) != 0) {
3225 size += DQH_ALIGNMENT + 1;
3226 size &= ~(DQH_ALIGNMENT - 1);
3228 dev->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
3229 &dev->ep_dqh_dma, GFP_KERNEL);
3230 if (!dev->ep_dqh) {
3231 dev_err(&dev->pdev->dev, "allocate dQH memory failed\n");
3232 retval = -ENOMEM;
3233 goto error;
3235 dev->ep_dqh_size = size;
3236 dev_vdbg(&dev->pdev->dev, "ep_dqh_size = %zd\n", dev->ep_dqh_size);
3238 /* initialize ep0 status request structure */
3239 dev->status_req = kzalloc(sizeof(struct langwell_request), GFP_KERNEL);
3240 if (!dev->status_req) {
3241 dev_err(&dev->pdev->dev,
3242 "allocate status_req memory failed\n");
3243 retval = -ENOMEM;
3244 goto error;
3246 INIT_LIST_HEAD(&dev->status_req->queue);
3248 /* allocate a small amount of memory to get valid address */
3249 dev->status_req->req.buf = kmalloc(8, GFP_KERNEL);
3250 dev->status_req->req.dma = virt_to_phys(dev->status_req->req.buf);
3252 dev->resume_state = USB_STATE_NOTATTACHED;
3253 dev->usb_state = USB_STATE_POWERED;
3254 dev->ep0_dir = USB_DIR_OUT;
3256 /* remote wakeup reset to 0 when the device is reset */
3257 dev->remote_wakeup = 0;
3258 dev->dev_status = 1 << USB_DEVICE_SELF_POWERED;
3260 #ifndef OTG_TRANSCEIVER
3261 /* reset device controller */
3262 langwell_udc_reset(dev);
3263 #endif
3265 /* initialize gadget structure */
3266 dev->gadget.ops = &langwell_ops; /* usb_gadget_ops */
3267 dev->gadget.ep0 = &dev->ep[0].ep; /* gadget ep0 */
3268 INIT_LIST_HEAD(&dev->gadget.ep_list); /* ep_list */
3269 dev->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
3270 dev->gadget.is_dualspeed = 1; /* support dual speed */
3271 #ifdef OTG_TRANSCEIVER
3272 dev->gadget.is_otg = 1; /* support otg mode */
3273 #endif
3275 /* the "gadget" abstracts/virtualizes the controller */
3276 dev_set_name(&dev->gadget.dev, "gadget");
3277 dev->gadget.dev.parent = &pdev->dev;
3278 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
3279 dev->gadget.dev.release = gadget_release;
3280 dev->gadget.name = driver_name; /* gadget name */
3282 /* controller endpoints reinit */
3283 eps_reinit(dev);
3285 #ifndef OTG_TRANSCEIVER
3286 /* reset ep0 dQH and endptctrl */
3287 ep0_reset(dev);
3288 #endif
3290 /* create dTD dma_pool resource */
3291 dev->dtd_pool = dma_pool_create("langwell_dtd",
3292 &dev->pdev->dev,
3293 sizeof(struct langwell_dtd),
3294 DTD_ALIGNMENT,
3295 DMA_BOUNDARY);
3297 if (!dev->dtd_pool) {
3298 retval = -ENOMEM;
3299 goto error;
3302 /* done */
3303 dev_info(&dev->pdev->dev, "%s\n", driver_desc);
3304 dev_info(&dev->pdev->dev, "irq %d, pci mem %p\n", pdev->irq, base);
3305 dev_info(&dev->pdev->dev, "Driver version: " DRIVER_VERSION "\n");
3306 dev_info(&dev->pdev->dev, "Support (max) %d endpoints\n", dev->ep_max);
3307 dev_info(&dev->pdev->dev, "Device interface version: 0x%04x\n",
3308 dev->dciversion);
3309 dev_info(&dev->pdev->dev, "Controller mode: %s\n",
3310 dev->devcap ? "Device" : "Host");
3311 dev_info(&dev->pdev->dev, "Support USB LPM: %s\n",
3312 dev->lpm ? "Yes" : "No");
3314 dev_vdbg(&dev->pdev->dev,
3315 "After langwell_udc_probe(), print all registers:\n");
3316 print_all_registers(dev);
3318 retval = device_register(&dev->gadget.dev);
3319 if (retval)
3320 goto error;
3322 retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
3323 if (retval)
3324 goto error;
3326 retval = device_create_file(&pdev->dev, &dev_attr_langwell_udc);
3327 if (retval)
3328 goto error;
3330 retval = device_create_file(&pdev->dev, &dev_attr_remote_wakeup);
3331 if (retval)
3332 goto error_attr1;
3334 dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3335 return 0;
3337 error_attr1:
3338 device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
3339 error:
3340 if (dev) {
3341 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3342 langwell_udc_remove(pdev);
3345 return retval;
3349 /* device controller suspend */
3350 static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
3352 struct langwell_udc *dev = pci_get_drvdata(pdev);
3354 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
3356 usb_del_gadget_udc(&dev->gadget);
3357 /* disable interrupt and set controller to stop state */
3358 langwell_udc_stop(dev);
3360 /* disable IRQ handler */
3361 if (dev->got_irq)
3362 free_irq(pdev->irq, dev);
3363 dev->got_irq = 0;
3365 /* save PCI state */
3366 pci_save_state(pdev);
3368 spin_lock_irq(&dev->lock);
3369 /* stop all usb activities */
3370 stop_activity(dev, dev->driver);
3371 spin_unlock_irq(&dev->lock);
3373 /* free dTD dma_pool and dQH */
3374 if (dev->dtd_pool)
3375 dma_pool_destroy(dev->dtd_pool);
3377 if (dev->ep_dqh)
3378 dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
3379 dev->ep_dqh, dev->ep_dqh_dma);
3381 /* release SRAM caching */
3382 if (dev->has_sram && dev->got_sram)
3383 sram_deinit(dev);
3385 /* set device power state */
3386 pci_set_power_state(pdev, PCI_D3hot);
3388 /* enter PHY low power suspend */
3389 if (dev->pdev->device != 0x0829)
3390 langwell_phy_low_power(dev, 1);
3392 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3393 return 0;
3397 /* device controller resume */
3398 static int langwell_udc_resume(struct pci_dev *pdev)
3400 struct langwell_udc *dev = pci_get_drvdata(pdev);
3401 size_t size;
3403 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
3405 /* exit PHY low power suspend */
3406 if (dev->pdev->device != 0x0829)
3407 langwell_phy_low_power(dev, 0);
3409 /* set device D0 power state */
3410 pci_set_power_state(pdev, PCI_D0);
3412 /* enable SRAM caching if detected */
3413 if (dev->has_sram && !dev->got_sram)
3414 sram_init(dev);
3416 /* allocate device dQH memory */
3417 size = dev->ep_max * sizeof(struct langwell_dqh);
3418 dev_vdbg(&dev->pdev->dev, "orig size = %zd\n", size);
3419 if (size < DQH_ALIGNMENT)
3420 size = DQH_ALIGNMENT;
3421 else if ((size % DQH_ALIGNMENT) != 0) {
3422 size += DQH_ALIGNMENT + 1;
3423 size &= ~(DQH_ALIGNMENT - 1);
3425 dev->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
3426 &dev->ep_dqh_dma, GFP_KERNEL);
3427 if (!dev->ep_dqh) {
3428 dev_err(&dev->pdev->dev, "allocate dQH memory failed\n");
3429 return -ENOMEM;
3431 dev->ep_dqh_size = size;
3432 dev_vdbg(&dev->pdev->dev, "ep_dqh_size = %zd\n", dev->ep_dqh_size);
3434 /* create dTD dma_pool resource */
3435 dev->dtd_pool = dma_pool_create("langwell_dtd",
3436 &dev->pdev->dev,
3437 sizeof(struct langwell_dtd),
3438 DTD_ALIGNMENT,
3439 DMA_BOUNDARY);
3441 if (!dev->dtd_pool)
3442 return -ENOMEM;
3444 /* restore PCI state */
3445 pci_restore_state(pdev);
3447 /* enable IRQ handler */
3448 if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED,
3449 driver_name, dev) != 0) {
3450 dev_err(&dev->pdev->dev, "request interrupt %d failed\n",
3451 pdev->irq);
3452 return -EBUSY;
3454 dev->got_irq = 1;
3456 /* reset and start controller to run state */
3457 if (dev->stopped) {
3458 /* reset device controller */
3459 langwell_udc_reset(dev);
3461 /* reset ep0 dQH and endptctrl */
3462 ep0_reset(dev);
3464 /* start device if gadget is loaded */
3465 if (dev->driver)
3466 langwell_udc_start(dev);
3469 /* reset USB status */
3470 dev->usb_state = USB_STATE_ATTACHED;
3471 dev->ep0_state = WAIT_FOR_SETUP;
3472 dev->ep0_dir = USB_DIR_OUT;
3474 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3475 return 0;
3479 /* pci driver shutdown */
3480 static void langwell_udc_shutdown(struct pci_dev *pdev)
3482 struct langwell_udc *dev = pci_get_drvdata(pdev);
3483 u32 usbmode;
3485 dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
3487 /* reset controller mode to IDLE */
3488 usbmode = readl(&dev->op_regs->usbmode);
3489 dev_dbg(&dev->pdev->dev, "usbmode = 0x%08x\n", usbmode);
3490 usbmode &= (~3 | MODE_IDLE);
3491 writel(usbmode, &dev->op_regs->usbmode);
3493 dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3496 /*-------------------------------------------------------------------------*/
3498 static const struct pci_device_id pci_ids[] = { {
3499 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3500 .class_mask = ~0,
3501 .vendor = 0x8086,
3502 .device = 0x0811,
3503 .subvendor = PCI_ANY_ID,
3504 .subdevice = PCI_ANY_ID,
3505 }, { /* end: all zeroes */ }
3508 MODULE_DEVICE_TABLE(pci, pci_ids);
3511 static struct pci_driver langwell_pci_driver = {
3512 .name = (char *) driver_name,
3513 .id_table = pci_ids,
3515 .probe = langwell_udc_probe,
3516 .remove = langwell_udc_remove,
3518 /* device controller suspend/resume */
3519 .suspend = langwell_udc_suspend,
3520 .resume = langwell_udc_resume,
3522 .shutdown = langwell_udc_shutdown,
3526 static int __init init(void)
3528 #ifdef OTG_TRANSCEIVER
3529 return langwell_register_peripheral(&langwell_pci_driver);
3530 #else
3531 return pci_register_driver(&langwell_pci_driver);
3532 #endif
3534 module_init(init);
3537 static void __exit cleanup(void)
3539 #ifdef OTG_TRANSCEIVER
3540 return langwell_unregister_peripheral(&langwell_pci_driver);
3541 #else
3542 pci_unregister_driver(&langwell_pci_driver);
3543 #endif
3545 module_exit(cleanup);
3548 MODULE_DESCRIPTION(DRIVER_DESC);
3549 MODULE_AUTHOR("Xiaochen Shen <xiaochen.shen@intel.com>");
3550 MODULE_VERSION(DRIVER_VERSION);
3551 MODULE_LICENSE("GPL");