Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / usb / gadget / udc / goku_udc.c
blob4504d0b202dbf3622733b86502403ad710cb8e92
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Toshiba TC86C001 ("Goku-S") USB Device Controller driver
5 * Copyright (C) 2000-2002 Lineo
6 * by Stuart Lynne, Tom Rushworth, and Bruce Balden
7 * Copyright (C) 2002 Toshiba Corporation
8 * Copyright (C) 2003 MontaVista Software (source@mvista.com)
9 */
12 * This device has ep0 and three semi-configurable bulk/interrupt endpoints.
14 * - Endpoint numbering is fixed: ep{1,2,3}-bulk
15 * - Gadget drivers can choose ep maxpacket (8/16/32/64)
16 * - Gadget drivers can choose direction (IN, OUT)
17 * - DMA works with ep1 (OUT transfers) and ep2 (IN transfers).
20 // #define VERBOSE /* extra debug messages (success too) */
21 // #define USB_TRACE /* packet-level success messages */
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include <linux/ioport.h>
28 #include <linux/slab.h>
29 #include <linux/errno.h>
30 #include <linux/timer.h>
31 #include <linux/list.h>
32 #include <linux/interrupt.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/device.h>
36 #include <linux/usb/ch9.h>
37 #include <linux/usb/gadget.h>
38 #include <linux/prefetch.h>
40 #include <asm/byteorder.h>
41 #include <asm/io.h>
42 #include <asm/irq.h>
43 #include <asm/unaligned.h>
46 #include "goku_udc.h"
48 #define DRIVER_DESC "TC86C001 USB Device Controller"
49 #define DRIVER_VERSION "30-Oct 2003"
51 static const char driver_name [] = "goku_udc";
52 static const char driver_desc [] = DRIVER_DESC;
54 MODULE_AUTHOR("source@mvista.com");
55 MODULE_DESCRIPTION(DRIVER_DESC);
56 MODULE_LICENSE("GPL");
60 * IN dma behaves ok under testing, though the IN-dma abort paths don't
61 * seem to behave quite as expected. Used by default.
63 * OUT dma documents design problems handling the common "short packet"
64 * transfer termination policy; it couldn't be enabled by default, even
65 * if the OUT-dma abort problems had a resolution.
67 static unsigned use_dma = 1;
69 #if 0
70 //#include <linux/moduleparam.h>
71 /* "modprobe goku_udc use_dma=1" etc
72 * 0 to disable dma
73 * 1 to use IN dma only (normal operation)
74 * 2 to use IN and OUT dma
76 module_param(use_dma, uint, S_IRUGO);
77 #endif
79 /*-------------------------------------------------------------------------*/
81 static void nuke(struct goku_ep *, int status);
83 static inline void
84 command(struct goku_udc_regs __iomem *regs, int command, unsigned epnum)
86 writel(COMMAND_EP(epnum) | command, &regs->Command);
87 udelay(300);
90 static int
91 goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
93 struct goku_udc *dev;
94 struct goku_ep *ep;
95 u32 mode;
96 u16 max;
97 unsigned long flags;
99 ep = container_of(_ep, struct goku_ep, ep);
100 if (!_ep || !desc
101 || desc->bDescriptorType != USB_DT_ENDPOINT)
102 return -EINVAL;
103 dev = ep->dev;
104 if (ep == &dev->ep[0])
105 return -EINVAL;
106 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
107 return -ESHUTDOWN;
108 if (ep->num != usb_endpoint_num(desc))
109 return -EINVAL;
111 switch (usb_endpoint_type(desc)) {
112 case USB_ENDPOINT_XFER_BULK:
113 case USB_ENDPOINT_XFER_INT:
114 break;
115 default:
116 return -EINVAL;
119 if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK)
120 != EPxSTATUS_EP_INVALID)
121 return -EBUSY;
123 /* enabling the no-toggle interrupt mode would need an api hook */
124 mode = 0;
125 max = get_unaligned_le16(&desc->wMaxPacketSize);
126 switch (max) {
127 case 64:
128 mode++; /* fall through */
129 case 32:
130 mode++; /* fall through */
131 case 16:
132 mode++; /* fall through */
133 case 8:
134 mode <<= 3;
135 break;
136 default:
137 return -EINVAL;
139 mode |= 2 << 1; /* bulk, or intr-with-toggle */
141 /* ep1/ep2 dma direction is chosen early; it works in the other
142 * direction, with pio. be cautious with out-dma.
144 ep->is_in = usb_endpoint_dir_in(desc);
145 if (ep->is_in) {
146 mode |= 1;
147 ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
148 } else {
149 ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT);
150 if (ep->dma)
151 DBG(dev, "%s out-dma hides short packets\n",
152 ep->ep.name);
155 spin_lock_irqsave(&ep->dev->lock, flags);
157 /* ep1 and ep2 can do double buffering and/or dma */
158 if (ep->num < 3) {
159 struct goku_udc_regs __iomem *regs = ep->dev->regs;
160 u32 tmp;
162 /* double buffer except (for now) with pio in */
163 tmp = ((ep->dma || !ep->is_in)
164 ? 0x10 /* double buffered */
165 : 0x11 /* single buffer */
166 ) << ep->num;
167 tmp |= readl(&regs->EPxSingle);
168 writel(tmp, &regs->EPxSingle);
170 tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num;
171 tmp |= readl(&regs->EPxBCS);
172 writel(tmp, &regs->EPxBCS);
174 writel(mode, ep->reg_mode);
175 command(ep->dev->regs, COMMAND_RESET, ep->num);
176 ep->ep.maxpacket = max;
177 ep->stopped = 0;
178 ep->ep.desc = desc;
179 spin_unlock_irqrestore(&ep->dev->lock, flags);
181 DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name,
182 ep->is_in ? "IN" : "OUT",
183 ep->dma ? "dma" : "pio",
184 max);
186 return 0;
189 static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep)
191 struct goku_udc *dev = ep->dev;
193 if (regs) {
194 command(regs, COMMAND_INVALID, ep->num);
195 if (ep->num) {
196 if (ep->num == UDC_MSTWR_ENDPOINT)
197 dev->int_enable &= ~(INT_MSTWREND
198 |INT_MSTWRTMOUT);
199 else if (ep->num == UDC_MSTRD_ENDPOINT)
200 dev->int_enable &= ~INT_MSTRDEND;
201 dev->int_enable &= ~INT_EPxDATASET (ep->num);
202 } else
203 dev->int_enable &= ~INT_EP0;
204 writel(dev->int_enable, &regs->int_enable);
205 readl(&regs->int_enable);
206 if (ep->num < 3) {
207 struct goku_udc_regs __iomem *r = ep->dev->regs;
208 u32 tmp;
210 tmp = readl(&r->EPxSingle);
211 tmp &= ~(0x11 << ep->num);
212 writel(tmp, &r->EPxSingle);
214 tmp = readl(&r->EPxBCS);
215 tmp &= ~(0x11 << ep->num);
216 writel(tmp, &r->EPxBCS);
218 /* reset dma in case we're still using it */
219 if (ep->dma) {
220 u32 master;
222 master = readl(&regs->dma_master) & MST_RW_BITS;
223 if (ep->num == UDC_MSTWR_ENDPOINT) {
224 master &= ~MST_W_BITS;
225 master |= MST_WR_RESET;
226 } else {
227 master &= ~MST_R_BITS;
228 master |= MST_RD_RESET;
230 writel(master, &regs->dma_master);
234 usb_ep_set_maxpacket_limit(&ep->ep, MAX_FIFO_SIZE);
235 ep->ep.desc = NULL;
236 ep->stopped = 1;
237 ep->irqs = 0;
238 ep->dma = 0;
241 static int goku_ep_disable(struct usb_ep *_ep)
243 struct goku_ep *ep;
244 struct goku_udc *dev;
245 unsigned long flags;
247 ep = container_of(_ep, struct goku_ep, ep);
248 if (!_ep || !ep->ep.desc)
249 return -ENODEV;
250 dev = ep->dev;
251 if (dev->ep0state == EP0_SUSPEND)
252 return -EBUSY;
254 VDBG(dev, "disable %s\n", _ep->name);
256 spin_lock_irqsave(&dev->lock, flags);
257 nuke(ep, -ESHUTDOWN);
258 ep_reset(dev->regs, ep);
259 spin_unlock_irqrestore(&dev->lock, flags);
261 return 0;
264 /*-------------------------------------------------------------------------*/
266 static struct usb_request *
267 goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
269 struct goku_request *req;
271 if (!_ep)
272 return NULL;
273 req = kzalloc(sizeof *req, gfp_flags);
274 if (!req)
275 return NULL;
277 INIT_LIST_HEAD(&req->queue);
278 return &req->req;
281 static void
282 goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
284 struct goku_request *req;
286 if (!_ep || !_req)
287 return;
289 req = container_of(_req, struct goku_request, req);
290 WARN_ON(!list_empty(&req->queue));
291 kfree(req);
294 /*-------------------------------------------------------------------------*/
296 static void
297 done(struct goku_ep *ep, struct goku_request *req, int status)
299 struct goku_udc *dev;
300 unsigned stopped = ep->stopped;
302 list_del_init(&req->queue);
304 if (likely(req->req.status == -EINPROGRESS))
305 req->req.status = status;
306 else
307 status = req->req.status;
309 dev = ep->dev;
311 if (ep->dma)
312 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
314 #ifndef USB_TRACE
315 if (status && status != -ESHUTDOWN)
316 #endif
317 VDBG(dev, "complete %s req %p stat %d len %u/%u\n",
318 ep->ep.name, &req->req, status,
319 req->req.actual, req->req.length);
321 /* don't modify queue heads during completion callback */
322 ep->stopped = 1;
323 spin_unlock(&dev->lock);
324 usb_gadget_giveback_request(&ep->ep, &req->req);
325 spin_lock(&dev->lock);
326 ep->stopped = stopped;
329 /*-------------------------------------------------------------------------*/
331 static inline int
332 write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max)
334 unsigned length, count;
336 length = min(req->req.length - req->req.actual, max);
337 req->req.actual += length;
339 count = length;
340 while (likely(count--))
341 writel(*buf++, fifo);
342 return length;
345 // return: 0 = still running, 1 = completed, negative = errno
346 static int write_fifo(struct goku_ep *ep, struct goku_request *req)
348 struct goku_udc *dev = ep->dev;
349 u32 tmp;
350 u8 *buf;
351 unsigned count;
352 int is_last;
354 tmp = readl(&dev->regs->DataSet);
355 buf = req->req.buf + req->req.actual;
356 prefetch(buf);
358 dev = ep->dev;
359 if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN))
360 return -EL2HLT;
362 /* NOTE: just single-buffered PIO-IN for now. */
363 if (unlikely((tmp & DATASET_A(ep->num)) != 0))
364 return 0;
366 /* clear our "packet available" irq */
367 if (ep->num != 0)
368 writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status);
370 count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket);
372 /* last packet often short (sometimes a zlp, especially on ep0) */
373 if (unlikely(count != ep->ep.maxpacket)) {
374 writel(~(1<<ep->num), &dev->regs->EOP);
375 if (ep->num == 0) {
376 dev->ep[0].stopped = 1;
377 dev->ep0state = EP0_STATUS;
379 is_last = 1;
380 } else {
381 if (likely(req->req.length != req->req.actual)
382 || req->req.zero)
383 is_last = 0;
384 else
385 is_last = 1;
387 #if 0 /* printk seemed to trash is_last...*/
388 //#ifdef USB_TRACE
389 VDBG(dev, "wrote %s %u bytes%s IN %u left %p\n",
390 ep->ep.name, count, is_last ? "/last" : "",
391 req->req.length - req->req.actual, req);
392 #endif
394 /* requests complete when all IN data is in the FIFO,
395 * or sometimes later, if a zlp was needed.
397 if (is_last) {
398 done(ep, req, 0);
399 return 1;
402 return 0;
405 static int read_fifo(struct goku_ep *ep, struct goku_request *req)
407 struct goku_udc_regs __iomem *regs;
408 u32 size, set;
409 u8 *buf;
410 unsigned bufferspace, is_short, dbuff;
412 regs = ep->dev->regs;
413 top:
414 buf = req->req.buf + req->req.actual;
415 prefetchw(buf);
417 if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT))
418 return -EL2HLT;
420 dbuff = (ep->num == 1 || ep->num == 2);
421 do {
422 /* ack dataset irq matching the status we'll handle */
423 if (ep->num != 0)
424 writel(~INT_EPxDATASET(ep->num), &regs->int_status);
426 set = readl(&regs->DataSet) & DATASET_AB(ep->num);
427 size = readl(&regs->EPxSizeLA[ep->num]);
428 bufferspace = req->req.length - req->req.actual;
430 /* usually do nothing without an OUT packet */
431 if (likely(ep->num != 0 || bufferspace != 0)) {
432 if (unlikely(set == 0))
433 break;
434 /* use ep1/ep2 double-buffering for OUT */
435 if (!(size & PACKET_ACTIVE))
436 size = readl(&regs->EPxSizeLB[ep->num]);
437 if (!(size & PACKET_ACTIVE)) /* "can't happen" */
438 break;
439 size &= DATASIZE; /* EPxSizeH == 0 */
441 /* ep0out no-out-data case for set_config, etc */
442 } else
443 size = 0;
445 /* read all bytes from this packet */
446 req->req.actual += size;
447 is_short = (size < ep->ep.maxpacket);
448 #ifdef USB_TRACE
449 VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n",
450 ep->ep.name, size, is_short ? "/S" : "",
451 req, req->req.actual, req->req.length);
452 #endif
453 while (likely(size-- != 0)) {
454 u8 byte = (u8) readl(ep->reg_fifo);
456 if (unlikely(bufferspace == 0)) {
457 /* this happens when the driver's buffer
458 * is smaller than what the host sent.
459 * discard the extra data in this packet.
461 if (req->req.status != -EOVERFLOW)
462 DBG(ep->dev, "%s overflow %u\n",
463 ep->ep.name, size);
464 req->req.status = -EOVERFLOW;
465 } else {
466 *buf++ = byte;
467 bufferspace--;
471 /* completion */
472 if (unlikely(is_short || req->req.actual == req->req.length)) {
473 if (unlikely(ep->num == 0)) {
474 /* non-control endpoints now usable? */
475 if (ep->dev->req_config)
476 writel(ep->dev->configured
477 ? USBSTATE_CONFIGURED
478 : 0,
479 &regs->UsbState);
480 /* ep0out status stage */
481 writel(~(1<<0), &regs->EOP);
482 ep->stopped = 1;
483 ep->dev->ep0state = EP0_STATUS;
485 done(ep, req, 0);
487 /* empty the second buffer asap */
488 if (dbuff && !list_empty(&ep->queue)) {
489 req = list_entry(ep->queue.next,
490 struct goku_request, queue);
491 goto top;
493 return 1;
495 } while (dbuff);
496 return 0;
499 static inline void
500 pio_irq_enable(struct goku_udc *dev,
501 struct goku_udc_regs __iomem *regs, int epnum)
503 dev->int_enable |= INT_EPxDATASET (epnum);
504 writel(dev->int_enable, &regs->int_enable);
505 /* write may still be posted */
508 static inline void
509 pio_irq_disable(struct goku_udc *dev,
510 struct goku_udc_regs __iomem *regs, int epnum)
512 dev->int_enable &= ~INT_EPxDATASET (epnum);
513 writel(dev->int_enable, &regs->int_enable);
514 /* write may still be posted */
517 static inline void
518 pio_advance(struct goku_ep *ep)
520 struct goku_request *req;
522 if (unlikely(list_empty (&ep->queue)))
523 return;
524 req = list_entry(ep->queue.next, struct goku_request, queue);
525 (ep->is_in ? write_fifo : read_fifo)(ep, req);
529 /*-------------------------------------------------------------------------*/
531 // return: 0 = q running, 1 = q stopped, negative = errno
532 static int start_dma(struct goku_ep *ep, struct goku_request *req)
534 struct goku_udc_regs __iomem *regs = ep->dev->regs;
535 u32 master;
536 u32 start = req->req.dma;
537 u32 end = start + req->req.length - 1;
539 master = readl(&regs->dma_master) & MST_RW_BITS;
541 /* re-init the bits affecting IN dma; careful with zlps */
542 if (likely(ep->is_in)) {
543 if (unlikely(master & MST_RD_ENA)) {
544 DBG (ep->dev, "start, IN active dma %03x!!\n",
545 master);
546 // return -EL2HLT;
548 writel(end, &regs->in_dma_end);
549 writel(start, &regs->in_dma_start);
551 master &= ~MST_R_BITS;
552 if (unlikely(req->req.length == 0))
553 master = MST_RD_ENA | MST_RD_EOPB;
554 else if ((req->req.length % ep->ep.maxpacket) != 0
555 || req->req.zero)
556 master = MST_RD_ENA | MST_EOPB_ENA;
557 else
558 master = MST_RD_ENA | MST_EOPB_DIS;
560 ep->dev->int_enable |= INT_MSTRDEND;
562 /* Goku DMA-OUT merges short packets, which plays poorly with
563 * protocols where short packets mark the transfer boundaries.
564 * The chip supports a nonstandard policy with INT_MSTWRTMOUT,
565 * ending transfers after 3 SOFs; we don't turn it on.
567 } else {
568 if (unlikely(master & MST_WR_ENA)) {
569 DBG (ep->dev, "start, OUT active dma %03x!!\n",
570 master);
571 // return -EL2HLT;
573 writel(end, &regs->out_dma_end);
574 writel(start, &regs->out_dma_start);
576 master &= ~MST_W_BITS;
577 master |= MST_WR_ENA | MST_TIMEOUT_DIS;
579 ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT;
582 writel(master, &regs->dma_master);
583 writel(ep->dev->int_enable, &regs->int_enable);
584 return 0;
587 static void dma_advance(struct goku_udc *dev, struct goku_ep *ep)
589 struct goku_request *req;
590 struct goku_udc_regs __iomem *regs = ep->dev->regs;
591 u32 master;
593 master = readl(&regs->dma_master);
595 if (unlikely(list_empty(&ep->queue))) {
596 stop:
597 if (ep->is_in)
598 dev->int_enable &= ~INT_MSTRDEND;
599 else
600 dev->int_enable &= ~(INT_MSTWREND|INT_MSTWRTMOUT);
601 writel(dev->int_enable, &regs->int_enable);
602 return;
604 req = list_entry(ep->queue.next, struct goku_request, queue);
606 /* normal hw dma completion (not abort) */
607 if (likely(ep->is_in)) {
608 if (unlikely(master & MST_RD_ENA))
609 return;
610 req->req.actual = readl(&regs->in_dma_current);
611 } else {
612 if (unlikely(master & MST_WR_ENA))
613 return;
615 /* hardware merges short packets, and also hides packet
616 * overruns. a partial packet MAY be in the fifo here.
618 req->req.actual = readl(&regs->out_dma_current);
620 req->req.actual -= req->req.dma;
621 req->req.actual++;
623 #ifdef USB_TRACE
624 VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n",
625 ep->ep.name, ep->is_in ? "IN" : "OUT",
626 req->req.actual, req->req.length, req);
627 #endif
628 done(ep, req, 0);
629 if (list_empty(&ep->queue))
630 goto stop;
631 req = list_entry(ep->queue.next, struct goku_request, queue);
632 (void) start_dma(ep, req);
635 static void abort_dma(struct goku_ep *ep, int status)
637 struct goku_udc_regs __iomem *regs = ep->dev->regs;
638 struct goku_request *req;
639 u32 curr, master;
641 /* NAK future host requests, hoping the implicit delay lets the
642 * dma engine finish reading (or writing) its latest packet and
643 * empty the dma buffer (up to 16 bytes).
645 * This avoids needing to clean up a partial packet in the fifo;
646 * we can't do that for IN without side effects to HALT and TOGGLE.
648 command(regs, COMMAND_FIFO_DISABLE, ep->num);
649 req = list_entry(ep->queue.next, struct goku_request, queue);
650 master = readl(&regs->dma_master) & MST_RW_BITS;
652 /* FIXME using these resets isn't usably documented. this may
653 * not work unless it's followed by disabling the endpoint.
655 * FIXME the OUT reset path doesn't even behave consistently.
657 if (ep->is_in) {
658 if (unlikely((readl(&regs->dma_master) & MST_RD_ENA) == 0))
659 goto finished;
660 curr = readl(&regs->in_dma_current);
662 writel(curr, &regs->in_dma_end);
663 writel(curr, &regs->in_dma_start);
665 master &= ~MST_R_BITS;
666 master |= MST_RD_RESET;
667 writel(master, &regs->dma_master);
669 if (readl(&regs->dma_master) & MST_RD_ENA)
670 DBG(ep->dev, "IN dma active after reset!\n");
672 } else {
673 if (unlikely((readl(&regs->dma_master) & MST_WR_ENA) == 0))
674 goto finished;
675 curr = readl(&regs->out_dma_current);
677 writel(curr, &regs->out_dma_end);
678 writel(curr, &regs->out_dma_start);
680 master &= ~MST_W_BITS;
681 master |= MST_WR_RESET;
682 writel(master, &regs->dma_master);
684 if (readl(&regs->dma_master) & MST_WR_ENA)
685 DBG(ep->dev, "OUT dma active after reset!\n");
687 req->req.actual = (curr - req->req.dma) + 1;
688 req->req.status = status;
690 VDBG(ep->dev, "%s %s %s %d/%d\n", __func__, ep->ep.name,
691 ep->is_in ? "IN" : "OUT",
692 req->req.actual, req->req.length);
694 command(regs, COMMAND_FIFO_ENABLE, ep->num);
696 return;
698 finished:
699 /* dma already completed; no abort needed */
700 command(regs, COMMAND_FIFO_ENABLE, ep->num);
701 req->req.actual = req->req.length;
702 req->req.status = 0;
705 /*-------------------------------------------------------------------------*/
707 static int
708 goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
710 struct goku_request *req;
711 struct goku_ep *ep;
712 struct goku_udc *dev;
713 unsigned long flags;
714 int status;
716 /* always require a cpu-view buffer so pio works */
717 req = container_of(_req, struct goku_request, req);
718 if (unlikely(!_req || !_req->complete
719 || !_req->buf || !list_empty(&req->queue)))
720 return -EINVAL;
721 ep = container_of(_ep, struct goku_ep, ep);
722 if (unlikely(!_ep || (!ep->ep.desc && ep->num != 0)))
723 return -EINVAL;
724 dev = ep->dev;
725 if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
726 return -ESHUTDOWN;
728 /* can't touch registers when suspended */
729 if (dev->ep0state == EP0_SUSPEND)
730 return -EBUSY;
732 /* set up dma mapping in case the caller didn't */
733 if (ep->dma) {
734 status = usb_gadget_map_request(&dev->gadget, &req->req,
735 ep->is_in);
736 if (status)
737 return status;
740 #ifdef USB_TRACE
741 VDBG(dev, "%s queue req %p, len %u buf %p\n",
742 _ep->name, _req, _req->length, _req->buf);
743 #endif
745 spin_lock_irqsave(&dev->lock, flags);
747 _req->status = -EINPROGRESS;
748 _req->actual = 0;
750 /* for ep0 IN without premature status, zlp is required and
751 * writing EOP starts the status stage (OUT).
753 if (unlikely(ep->num == 0 && ep->is_in))
754 _req->zero = 1;
756 /* kickstart this i/o queue? */
757 status = 0;
758 if (list_empty(&ep->queue) && likely(!ep->stopped)) {
759 /* dma: done after dma completion IRQ (or error)
760 * pio: done after last fifo operation
762 if (ep->dma)
763 status = start_dma(ep, req);
764 else
765 status = (ep->is_in ? write_fifo : read_fifo)(ep, req);
767 if (unlikely(status != 0)) {
768 if (status > 0)
769 status = 0;
770 req = NULL;
773 } /* else pio or dma irq handler advances the queue. */
775 if (likely(req != NULL))
776 list_add_tail(&req->queue, &ep->queue);
778 if (likely(!list_empty(&ep->queue))
779 && likely(ep->num != 0)
780 && !ep->dma
781 && !(dev->int_enable & INT_EPxDATASET (ep->num)))
782 pio_irq_enable(dev, dev->regs, ep->num);
784 spin_unlock_irqrestore(&dev->lock, flags);
786 /* pci writes may still be posted */
787 return status;
790 /* dequeue ALL requests */
791 static void nuke(struct goku_ep *ep, int status)
793 struct goku_request *req;
795 ep->stopped = 1;
796 if (list_empty(&ep->queue))
797 return;
798 if (ep->dma)
799 abort_dma(ep, status);
800 while (!list_empty(&ep->queue)) {
801 req = list_entry(ep->queue.next, struct goku_request, queue);
802 done(ep, req, status);
806 /* dequeue JUST ONE request */
807 static int goku_dequeue(struct usb_ep *_ep, struct usb_request *_req)
809 struct goku_request *req;
810 struct goku_ep *ep;
811 struct goku_udc *dev;
812 unsigned long flags;
814 ep = container_of(_ep, struct goku_ep, ep);
815 if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
816 return -EINVAL;
817 dev = ep->dev;
818 if (!dev->driver)
819 return -ESHUTDOWN;
821 /* we can't touch (dma) registers when suspended */
822 if (dev->ep0state == EP0_SUSPEND)
823 return -EBUSY;
825 VDBG(dev, "%s %s %s %s %p\n", __func__, _ep->name,
826 ep->is_in ? "IN" : "OUT",
827 ep->dma ? "dma" : "pio",
828 _req);
830 spin_lock_irqsave(&dev->lock, flags);
832 /* make sure it's actually queued on this endpoint */
833 list_for_each_entry (req, &ep->queue, queue) {
834 if (&req->req == _req)
835 break;
837 if (&req->req != _req) {
838 spin_unlock_irqrestore (&dev->lock, flags);
839 return -EINVAL;
842 if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) {
843 abort_dma(ep, -ECONNRESET);
844 done(ep, req, -ECONNRESET);
845 dma_advance(dev, ep);
846 } else if (!list_empty(&req->queue))
847 done(ep, req, -ECONNRESET);
848 else
849 req = NULL;
850 spin_unlock_irqrestore(&dev->lock, flags);
852 return req ? 0 : -EOPNOTSUPP;
855 /*-------------------------------------------------------------------------*/
857 static void goku_clear_halt(struct goku_ep *ep)
859 // assert (ep->num !=0)
860 VDBG(ep->dev, "%s clear halt\n", ep->ep.name);
861 command(ep->dev->regs, COMMAND_SETDATA0, ep->num);
862 command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num);
863 if (ep->stopped) {
864 ep->stopped = 0;
865 if (ep->dma) {
866 struct goku_request *req;
868 if (list_empty(&ep->queue))
869 return;
870 req = list_entry(ep->queue.next, struct goku_request,
871 queue);
872 (void) start_dma(ep, req);
873 } else
874 pio_advance(ep);
878 static int goku_set_halt(struct usb_ep *_ep, int value)
880 struct goku_ep *ep;
881 unsigned long flags;
882 int retval = 0;
884 if (!_ep)
885 return -ENODEV;
886 ep = container_of (_ep, struct goku_ep, ep);
888 if (ep->num == 0) {
889 if (value) {
890 ep->dev->ep0state = EP0_STALL;
891 ep->dev->ep[0].stopped = 1;
892 } else
893 return -EINVAL;
895 /* don't change EPxSTATUS_EP_INVALID to READY */
896 } else if (!ep->ep.desc) {
897 DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name);
898 return -EINVAL;
901 spin_lock_irqsave(&ep->dev->lock, flags);
902 if (!list_empty(&ep->queue))
903 retval = -EAGAIN;
904 else if (ep->is_in && value
905 /* data in (either) packet buffer? */
906 && (readl(&ep->dev->regs->DataSet)
907 & DATASET_AB(ep->num)))
908 retval = -EAGAIN;
909 else if (!value)
910 goku_clear_halt(ep);
911 else {
912 ep->stopped = 1;
913 VDBG(ep->dev, "%s set halt\n", ep->ep.name);
914 command(ep->dev->regs, COMMAND_STALL, ep->num);
915 readl(ep->reg_status);
917 spin_unlock_irqrestore(&ep->dev->lock, flags);
918 return retval;
921 static int goku_fifo_status(struct usb_ep *_ep)
923 struct goku_ep *ep;
924 struct goku_udc_regs __iomem *regs;
925 u32 size;
927 if (!_ep)
928 return -ENODEV;
929 ep = container_of(_ep, struct goku_ep, ep);
931 /* size is only reported sanely for OUT */
932 if (ep->is_in)
933 return -EOPNOTSUPP;
935 /* ignores 16-byte dma buffer; SizeH == 0 */
936 regs = ep->dev->regs;
937 size = readl(&regs->EPxSizeLA[ep->num]) & DATASIZE;
938 size += readl(&regs->EPxSizeLB[ep->num]) & DATASIZE;
939 VDBG(ep->dev, "%s %s %u\n", __func__, ep->ep.name, size);
940 return size;
943 static void goku_fifo_flush(struct usb_ep *_ep)
945 struct goku_ep *ep;
946 struct goku_udc_regs __iomem *regs;
947 u32 size;
949 if (!_ep)
950 return;
951 ep = container_of(_ep, struct goku_ep, ep);
952 VDBG(ep->dev, "%s %s\n", __func__, ep->ep.name);
954 /* don't change EPxSTATUS_EP_INVALID to READY */
955 if (!ep->ep.desc && ep->num != 0) {
956 DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name);
957 return;
960 regs = ep->dev->regs;
961 size = readl(&regs->EPxSizeLA[ep->num]);
962 size &= DATASIZE;
964 /* Non-desirable behavior: FIFO_CLEAR also clears the
965 * endpoint halt feature. For OUT, we _could_ just read
966 * the bytes out (PIO, if !ep->dma); for in, no choice.
968 if (size)
969 command(regs, COMMAND_FIFO_CLEAR, ep->num);
972 static const struct usb_ep_ops goku_ep_ops = {
973 .enable = goku_ep_enable,
974 .disable = goku_ep_disable,
976 .alloc_request = goku_alloc_request,
977 .free_request = goku_free_request,
979 .queue = goku_queue,
980 .dequeue = goku_dequeue,
982 .set_halt = goku_set_halt,
983 .fifo_status = goku_fifo_status,
984 .fifo_flush = goku_fifo_flush,
987 /*-------------------------------------------------------------------------*/
989 static int goku_get_frame(struct usb_gadget *_gadget)
991 return -EOPNOTSUPP;
994 static struct usb_ep *goku_match_ep(struct usb_gadget *g,
995 struct usb_endpoint_descriptor *desc,
996 struct usb_ss_ep_comp_descriptor *ep_comp)
998 struct goku_udc *dev = to_goku_udc(g);
999 struct usb_ep *ep;
1001 switch (usb_endpoint_type(desc)) {
1002 case USB_ENDPOINT_XFER_INT:
1003 /* single buffering is enough */
1004 ep = &dev->ep[3].ep;
1005 if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp))
1006 return ep;
1007 break;
1008 case USB_ENDPOINT_XFER_BULK:
1009 if (usb_endpoint_dir_in(desc)) {
1010 /* DMA may be available */
1011 ep = &dev->ep[2].ep;
1012 if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp))
1013 return ep;
1015 break;
1016 default:
1017 /* nothing */ ;
1020 return NULL;
1023 static int goku_udc_start(struct usb_gadget *g,
1024 struct usb_gadget_driver *driver);
1025 static int goku_udc_stop(struct usb_gadget *g);
1027 static const struct usb_gadget_ops goku_ops = {
1028 .get_frame = goku_get_frame,
1029 .udc_start = goku_udc_start,
1030 .udc_stop = goku_udc_stop,
1031 .match_ep = goku_match_ep,
1032 // no remote wakeup
1033 // not selfpowered
1036 /*-------------------------------------------------------------------------*/
1038 static inline const char *dmastr(void)
1040 if (use_dma == 0)
1041 return "(dma disabled)";
1042 else if (use_dma == 2)
1043 return "(dma IN and OUT)";
1044 else
1045 return "(dma IN)";
1048 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
1050 static const char proc_node_name [] = "driver/udc";
1052 #define FOURBITS "%s%s%s%s"
1053 #define EIGHTBITS FOURBITS FOURBITS
1055 static void dump_intmask(struct seq_file *m, const char *label, u32 mask)
1057 /* int_status is the same format ... */
1058 seq_printf(m, "%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n",
1059 label, mask,
1060 (mask & INT_PWRDETECT) ? " power" : "",
1061 (mask & INT_SYSERROR) ? " sys" : "",
1062 (mask & INT_MSTRDEND) ? " in-dma" : "",
1063 (mask & INT_MSTWRTMOUT) ? " wrtmo" : "",
1065 (mask & INT_MSTWREND) ? " out-dma" : "",
1066 (mask & INT_MSTWRSET) ? " wrset" : "",
1067 (mask & INT_ERR) ? " err" : "",
1068 (mask & INT_SOF) ? " sof" : "",
1070 (mask & INT_EP3NAK) ? " ep3nak" : "",
1071 (mask & INT_EP2NAK) ? " ep2nak" : "",
1072 (mask & INT_EP1NAK) ? " ep1nak" : "",
1073 (mask & INT_EP3DATASET) ? " ep3" : "",
1075 (mask & INT_EP2DATASET) ? " ep2" : "",
1076 (mask & INT_EP1DATASET) ? " ep1" : "",
1077 (mask & INT_STATUSNAK) ? " ep0snak" : "",
1078 (mask & INT_STATUS) ? " ep0status" : "",
1080 (mask & INT_SETUP) ? " setup" : "",
1081 (mask & INT_ENDPOINT0) ? " ep0" : "",
1082 (mask & INT_USBRESET) ? " reset" : "",
1083 (mask & INT_SUSPEND) ? " suspend" : "");
1086 static const char *udc_ep_state(enum ep0state state)
1088 switch (state) {
1089 case EP0_DISCONNECT:
1090 return "ep0_disconnect";
1091 case EP0_IDLE:
1092 return "ep0_idle";
1093 case EP0_IN:
1094 return "ep0_in";
1095 case EP0_OUT:
1096 return "ep0_out";
1097 case EP0_STATUS:
1098 return "ep0_status";
1099 case EP0_STALL:
1100 return "ep0_stall";
1101 case EP0_SUSPEND:
1102 return "ep0_suspend";
1105 return "ep0_?";
1108 static const char *udc_ep_status(u32 status)
1110 switch (status & EPxSTATUS_EP_MASK) {
1111 case EPxSTATUS_EP_READY:
1112 return "ready";
1113 case EPxSTATUS_EP_DATAIN:
1114 return "packet";
1115 case EPxSTATUS_EP_FULL:
1116 return "full";
1117 case EPxSTATUS_EP_TX_ERR: /* host will retry */
1118 return "tx_err";
1119 case EPxSTATUS_EP_RX_ERR:
1120 return "rx_err";
1121 case EPxSTATUS_EP_BUSY: /* ep0 only */
1122 return "busy";
1123 case EPxSTATUS_EP_STALL:
1124 return "stall";
1125 case EPxSTATUS_EP_INVALID: /* these "can't happen" */
1126 return "invalid";
1129 return "?";
1132 static int udc_proc_read(struct seq_file *m, void *v)
1134 struct goku_udc *dev = m->private;
1135 struct goku_udc_regs __iomem *regs = dev->regs;
1136 unsigned long flags;
1137 int i, is_usb_connected;
1138 u32 tmp;
1140 local_irq_save(flags);
1142 /* basic device status */
1143 tmp = readl(&regs->power_detect);
1144 is_usb_connected = tmp & PW_DETECT;
1145 seq_printf(m,
1146 "%s - %s\n"
1147 "%s version: %s %s\n"
1148 "Gadget driver: %s\n"
1149 "Host %s, %s\n"
1150 "\n",
1151 pci_name(dev->pdev), driver_desc,
1152 driver_name, DRIVER_VERSION, dmastr(),
1153 dev->driver ? dev->driver->driver.name : "(none)",
1154 is_usb_connected
1155 ? ((tmp & PW_PULLUP) ? "full speed" : "powered")
1156 : "disconnected",
1157 udc_ep_state(dev->ep0state));
1159 dump_intmask(m, "int_status", readl(&regs->int_status));
1160 dump_intmask(m, "int_enable", readl(&regs->int_enable));
1162 if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0)
1163 goto done;
1165 /* registers for (active) device and ep0 */
1166 seq_printf(m, "\nirqs %lu\ndataset %02x single.bcs %02x.%02x state %x addr %u\n",
1167 dev->irqs, readl(&regs->DataSet),
1168 readl(&regs->EPxSingle), readl(&regs->EPxBCS),
1169 readl(&regs->UsbState),
1170 readl(&regs->address));
1171 if (seq_has_overflowed(m))
1172 goto done;
1174 tmp = readl(&regs->dma_master);
1175 seq_printf(m, "dma %03X =" EIGHTBITS "%s %s\n",
1176 tmp,
1177 (tmp & MST_EOPB_DIS) ? " eopb-" : "",
1178 (tmp & MST_EOPB_ENA) ? " eopb+" : "",
1179 (tmp & MST_TIMEOUT_DIS) ? " tmo-" : "",
1180 (tmp & MST_TIMEOUT_ENA) ? " tmo+" : "",
1182 (tmp & MST_RD_EOPB) ? " eopb" : "",
1183 (tmp & MST_RD_RESET) ? " in_reset" : "",
1184 (tmp & MST_WR_RESET) ? " out_reset" : "",
1185 (tmp & MST_RD_ENA) ? " IN" : "",
1187 (tmp & MST_WR_ENA) ? " OUT" : "",
1188 (tmp & MST_CONNECTION) ? "ep1in/ep2out" : "ep1out/ep2in");
1189 if (seq_has_overflowed(m))
1190 goto done;
1192 /* dump endpoint queues */
1193 for (i = 0; i < 4; i++) {
1194 struct goku_ep *ep = &dev->ep [i];
1195 struct goku_request *req;
1197 if (i && !ep->ep.desc)
1198 continue;
1200 tmp = readl(ep->reg_status);
1201 seq_printf(m, "%s %s max %u %s, irqs %lu, status %02x (%s) " FOURBITS "\n",
1202 ep->ep.name,
1203 ep->is_in ? "in" : "out",
1204 ep->ep.maxpacket,
1205 ep->dma ? "dma" : "pio",
1206 ep->irqs,
1207 tmp, udc_ep_status(tmp),
1208 (tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0",
1209 (tmp & EPxSTATUS_SUSPEND) ? " suspend" : "",
1210 (tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "",
1211 (tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : "");
1212 if (seq_has_overflowed(m))
1213 goto done;
1215 if (list_empty(&ep->queue)) {
1216 seq_puts(m, "\t(nothing queued)\n");
1217 if (seq_has_overflowed(m))
1218 goto done;
1219 continue;
1221 list_for_each_entry(req, &ep->queue, queue) {
1222 if (ep->dma && req->queue.prev == &ep->queue) {
1223 if (i == UDC_MSTRD_ENDPOINT)
1224 tmp = readl(&regs->in_dma_current);
1225 else
1226 tmp = readl(&regs->out_dma_current);
1227 tmp -= req->req.dma;
1228 tmp++;
1229 } else
1230 tmp = req->req.actual;
1232 seq_printf(m, "\treq %p len %u/%u buf %p\n",
1233 &req->req, tmp, req->req.length,
1234 req->req.buf);
1235 if (seq_has_overflowed(m))
1236 goto done;
1240 done:
1241 local_irq_restore(flags);
1242 return 0;
1246 * seq_file wrappers for procfile show routines.
1248 static int udc_proc_open(struct inode *inode, struct file *file)
1250 return single_open(file, udc_proc_read, PDE_DATA(file_inode(file)));
1253 static const struct file_operations udc_proc_fops = {
1254 .open = udc_proc_open,
1255 .read = seq_read,
1256 .llseek = seq_lseek,
1257 .release = single_release,
1260 #endif /* CONFIG_USB_GADGET_DEBUG_FILES */
1262 /*-------------------------------------------------------------------------*/
1264 static void udc_reinit (struct goku_udc *dev)
1266 static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" };
1268 unsigned i;
1270 INIT_LIST_HEAD (&dev->gadget.ep_list);
1271 dev->gadget.ep0 = &dev->ep [0].ep;
1272 dev->gadget.speed = USB_SPEED_UNKNOWN;
1273 dev->ep0state = EP0_DISCONNECT;
1274 dev->irqs = 0;
1276 for (i = 0; i < 4; i++) {
1277 struct goku_ep *ep = &dev->ep[i];
1279 ep->num = i;
1280 ep->ep.name = names[i];
1281 ep->reg_fifo = &dev->regs->ep_fifo [i];
1282 ep->reg_status = &dev->regs->ep_status [i];
1283 ep->reg_mode = &dev->regs->ep_mode[i];
1285 ep->ep.ops = &goku_ep_ops;
1286 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
1287 ep->dev = dev;
1288 INIT_LIST_HEAD (&ep->queue);
1290 ep_reset(NULL, ep);
1292 if (i == 0)
1293 ep->ep.caps.type_control = true;
1294 else
1295 ep->ep.caps.type_bulk = true;
1297 ep->ep.caps.dir_in = true;
1298 ep->ep.caps.dir_out = true;
1301 dev->ep[0].reg_mode = NULL;
1302 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, MAX_EP0_SIZE);
1303 list_del_init (&dev->ep[0].ep.ep_list);
1306 static void udc_reset(struct goku_udc *dev)
1308 struct goku_udc_regs __iomem *regs = dev->regs;
1310 writel(0, &regs->power_detect);
1311 writel(0, &regs->int_enable);
1312 readl(&regs->int_enable);
1313 dev->int_enable = 0;
1315 /* deassert reset, leave USB D+ at hi-Z (no pullup)
1316 * don't let INT_PWRDETECT sequence begin
1318 udelay(250);
1319 writel(PW_RESETB, &regs->power_detect);
1320 readl(&regs->int_enable);
1323 static void ep0_start(struct goku_udc *dev)
1325 struct goku_udc_regs __iomem *regs = dev->regs;
1326 unsigned i;
1328 VDBG(dev, "%s\n", __func__);
1330 udc_reset(dev);
1331 udc_reinit (dev);
1332 //writel(MST_EOPB_ENA | MST_TIMEOUT_ENA, &regs->dma_master);
1334 /* hw handles set_address, set_feature, get_status; maybe more */
1335 writel( G_REQMODE_SET_INTF | G_REQMODE_GET_INTF
1336 | G_REQMODE_SET_CONF | G_REQMODE_GET_CONF
1337 | G_REQMODE_GET_DESC
1338 | G_REQMODE_CLEAR_FEAT
1339 , &regs->reqmode);
1341 for (i = 0; i < 4; i++)
1342 dev->ep[i].irqs = 0;
1344 /* can't modify descriptors after writing UsbReady */
1345 for (i = 0; i < DESC_LEN; i++)
1346 writel(0, &regs->descriptors[i]);
1347 writel(0, &regs->UsbReady);
1349 /* expect ep0 requests when the host drops reset */
1350 writel(PW_RESETB | PW_PULLUP, &regs->power_detect);
1351 dev->int_enable = INT_DEVWIDE | INT_EP0;
1352 writel(dev->int_enable, &dev->regs->int_enable);
1353 readl(&regs->int_enable);
1354 dev->gadget.speed = USB_SPEED_FULL;
1355 dev->ep0state = EP0_IDLE;
1358 static void udc_enable(struct goku_udc *dev)
1360 /* start enumeration now, or after power detect irq */
1361 if (readl(&dev->regs->power_detect) & PW_DETECT)
1362 ep0_start(dev);
1363 else {
1364 DBG(dev, "%s\n", __func__);
1365 dev->int_enable = INT_PWRDETECT;
1366 writel(dev->int_enable, &dev->regs->int_enable);
1370 /*-------------------------------------------------------------------------*/
1372 /* keeping it simple:
1373 * - one bus driver, initted first;
1374 * - one function driver, initted second
1377 /* when a driver is successfully registered, it will receive
1378 * control requests including set_configuration(), which enables
1379 * non-control requests. then usb traffic follows until a
1380 * disconnect is reported. then a host may connect again, or
1381 * the driver might get unbound.
1383 static int goku_udc_start(struct usb_gadget *g,
1384 struct usb_gadget_driver *driver)
1386 struct goku_udc *dev = to_goku_udc(g);
1388 /* hook up the driver */
1389 driver->driver.bus = NULL;
1390 dev->driver = driver;
1393 * then enable host detection and ep0; and we're ready
1394 * for set_configuration as well as eventual disconnect.
1396 udc_enable(dev);
1398 return 0;
1401 static void stop_activity(struct goku_udc *dev)
1403 unsigned i;
1405 DBG (dev, "%s\n", __func__);
1407 /* disconnect gadget driver after quiesceing hw and the driver */
1408 udc_reset (dev);
1409 for (i = 0; i < 4; i++)
1410 nuke(&dev->ep [i], -ESHUTDOWN);
1412 if (dev->driver)
1413 udc_enable(dev);
1416 static int goku_udc_stop(struct usb_gadget *g)
1418 struct goku_udc *dev = to_goku_udc(g);
1419 unsigned long flags;
1421 spin_lock_irqsave(&dev->lock, flags);
1422 dev->driver = NULL;
1423 stop_activity(dev);
1424 spin_unlock_irqrestore(&dev->lock, flags);
1426 return 0;
1429 /*-------------------------------------------------------------------------*/
1431 static void ep0_setup(struct goku_udc *dev)
1433 struct goku_udc_regs __iomem *regs = dev->regs;
1434 struct usb_ctrlrequest ctrl;
1435 int tmp;
1437 /* read SETUP packet and enter DATA stage */
1438 ctrl.bRequestType = readl(&regs->bRequestType);
1439 ctrl.bRequest = readl(&regs->bRequest);
1440 ctrl.wValue = cpu_to_le16((readl(&regs->wValueH) << 8)
1441 | readl(&regs->wValueL));
1442 ctrl.wIndex = cpu_to_le16((readl(&regs->wIndexH) << 8)
1443 | readl(&regs->wIndexL));
1444 ctrl.wLength = cpu_to_le16((readl(&regs->wLengthH) << 8)
1445 | readl(&regs->wLengthL));
1446 writel(0, &regs->SetupRecv);
1448 nuke(&dev->ep[0], 0);
1449 dev->ep[0].stopped = 0;
1450 if (likely(ctrl.bRequestType & USB_DIR_IN)) {
1451 dev->ep[0].is_in = 1;
1452 dev->ep0state = EP0_IN;
1453 /* detect early status stages */
1454 writel(ICONTROL_STATUSNAK, &dev->regs->IntControl);
1455 } else {
1456 dev->ep[0].is_in = 0;
1457 dev->ep0state = EP0_OUT;
1459 /* NOTE: CLEAR_FEATURE is done in software so that we can
1460 * synchronize transfer restarts after bulk IN stalls. data
1461 * won't even enter the fifo until the halt is cleared.
1463 switch (ctrl.bRequest) {
1464 case USB_REQ_CLEAR_FEATURE:
1465 switch (ctrl.bRequestType) {
1466 case USB_RECIP_ENDPOINT:
1467 tmp = le16_to_cpu(ctrl.wIndex) & 0x0f;
1468 /* active endpoint */
1469 if (tmp > 3 ||
1470 (!dev->ep[tmp].ep.desc && tmp != 0))
1471 goto stall;
1472 if (ctrl.wIndex & cpu_to_le16(
1473 USB_DIR_IN)) {
1474 if (!dev->ep[tmp].is_in)
1475 goto stall;
1476 } else {
1477 if (dev->ep[tmp].is_in)
1478 goto stall;
1480 if (ctrl.wValue != cpu_to_le16(
1481 USB_ENDPOINT_HALT))
1482 goto stall;
1483 if (tmp)
1484 goku_clear_halt(&dev->ep[tmp]);
1485 succeed:
1486 /* start ep0out status stage */
1487 writel(~(1<<0), &regs->EOP);
1488 dev->ep[0].stopped = 1;
1489 dev->ep0state = EP0_STATUS;
1490 return;
1491 case USB_RECIP_DEVICE:
1492 /* device remote wakeup: always clear */
1493 if (ctrl.wValue != cpu_to_le16(1))
1494 goto stall;
1495 VDBG(dev, "clear dev remote wakeup\n");
1496 goto succeed;
1497 case USB_RECIP_INTERFACE:
1498 goto stall;
1499 default: /* pass to gadget driver */
1500 break;
1502 break;
1503 default:
1504 break;
1508 #ifdef USB_TRACE
1509 VDBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1510 ctrl.bRequestType, ctrl.bRequest,
1511 le16_to_cpu(ctrl.wValue), le16_to_cpu(ctrl.wIndex),
1512 le16_to_cpu(ctrl.wLength));
1513 #endif
1515 /* hw wants to know when we're configured (or not) */
1516 dev->req_config = (ctrl.bRequest == USB_REQ_SET_CONFIGURATION
1517 && ctrl.bRequestType == USB_RECIP_DEVICE);
1518 if (unlikely(dev->req_config))
1519 dev->configured = (ctrl.wValue != cpu_to_le16(0));
1521 /* delegate everything to the gadget driver.
1522 * it may respond after this irq handler returns.
1524 spin_unlock (&dev->lock);
1525 tmp = dev->driver->setup(&dev->gadget, &ctrl);
1526 spin_lock (&dev->lock);
1527 if (unlikely(tmp < 0)) {
1528 stall:
1529 #ifdef USB_TRACE
1530 VDBG(dev, "req %02x.%02x protocol STALL; err %d\n",
1531 ctrl.bRequestType, ctrl.bRequest, tmp);
1532 #endif
1533 command(regs, COMMAND_STALL, 0);
1534 dev->ep[0].stopped = 1;
1535 dev->ep0state = EP0_STALL;
1538 /* expect at least one data or status stage irq */
1541 #define ACK(irqbit) { \
1542 stat &= ~irqbit; \
1543 writel(~irqbit, &regs->int_status); \
1544 handled = 1; \
1547 static irqreturn_t goku_irq(int irq, void *_dev)
1549 struct goku_udc *dev = _dev;
1550 struct goku_udc_regs __iomem *regs = dev->regs;
1551 struct goku_ep *ep;
1552 u32 stat, handled = 0;
1553 unsigned i, rescans = 5;
1555 spin_lock(&dev->lock);
1557 rescan:
1558 stat = readl(&regs->int_status) & dev->int_enable;
1559 if (!stat)
1560 goto done;
1561 dev->irqs++;
1563 /* device-wide irqs */
1564 if (unlikely(stat & INT_DEVWIDE)) {
1565 if (stat & INT_SYSERROR) {
1566 ERROR(dev, "system error\n");
1567 stop_activity(dev);
1568 stat = 0;
1569 handled = 1;
1570 // FIXME have a neater way to prevent re-enumeration
1571 dev->driver = NULL;
1572 goto done;
1574 if (stat & INT_PWRDETECT) {
1575 writel(~stat, &regs->int_status);
1576 if (readl(&dev->regs->power_detect) & PW_DETECT) {
1577 VDBG(dev, "connect\n");
1578 ep0_start(dev);
1579 } else {
1580 DBG(dev, "disconnect\n");
1581 if (dev->gadget.speed == USB_SPEED_FULL)
1582 stop_activity(dev);
1583 dev->ep0state = EP0_DISCONNECT;
1584 dev->int_enable = INT_DEVWIDE;
1585 writel(dev->int_enable, &dev->regs->int_enable);
1587 stat = 0;
1588 handled = 1;
1589 goto done;
1591 if (stat & INT_SUSPEND) {
1592 ACK(INT_SUSPEND);
1593 if (readl(&regs->ep_status[0]) & EPxSTATUS_SUSPEND) {
1594 switch (dev->ep0state) {
1595 case EP0_DISCONNECT:
1596 case EP0_SUSPEND:
1597 goto pm_next;
1598 default:
1599 break;
1601 DBG(dev, "USB suspend\n");
1602 dev->ep0state = EP0_SUSPEND;
1603 if (dev->gadget.speed != USB_SPEED_UNKNOWN
1604 && dev->driver
1605 && dev->driver->suspend) {
1606 spin_unlock(&dev->lock);
1607 dev->driver->suspend(&dev->gadget);
1608 spin_lock(&dev->lock);
1610 } else {
1611 if (dev->ep0state != EP0_SUSPEND) {
1612 DBG(dev, "bogus USB resume %d\n",
1613 dev->ep0state);
1614 goto pm_next;
1616 DBG(dev, "USB resume\n");
1617 dev->ep0state = EP0_IDLE;
1618 if (dev->gadget.speed != USB_SPEED_UNKNOWN
1619 && dev->driver
1620 && dev->driver->resume) {
1621 spin_unlock(&dev->lock);
1622 dev->driver->resume(&dev->gadget);
1623 spin_lock(&dev->lock);
1627 pm_next:
1628 if (stat & INT_USBRESET) { /* hub reset done */
1629 ACK(INT_USBRESET);
1630 INFO(dev, "USB reset done, gadget %s\n",
1631 dev->driver->driver.name);
1633 // and INT_ERR on some endpoint's crc/bitstuff/... problem
1636 /* progress ep0 setup, data, or status stages.
1637 * no transition {EP0_STATUS, EP0_STALL} --> EP0_IDLE; saves irqs
1639 if (stat & INT_SETUP) {
1640 ACK(INT_SETUP);
1641 dev->ep[0].irqs++;
1642 ep0_setup(dev);
1644 if (stat & INT_STATUSNAK) {
1645 ACK(INT_STATUSNAK|INT_ENDPOINT0);
1646 if (dev->ep0state == EP0_IN) {
1647 ep = &dev->ep[0];
1648 ep->irqs++;
1649 nuke(ep, 0);
1650 writel(~(1<<0), &regs->EOP);
1651 dev->ep0state = EP0_STATUS;
1654 if (stat & INT_ENDPOINT0) {
1655 ACK(INT_ENDPOINT0);
1656 ep = &dev->ep[0];
1657 ep->irqs++;
1658 pio_advance(ep);
1661 /* dma completion */
1662 if (stat & INT_MSTRDEND) { /* IN */
1663 ACK(INT_MSTRDEND);
1664 ep = &dev->ep[UDC_MSTRD_ENDPOINT];
1665 ep->irqs++;
1666 dma_advance(dev, ep);
1668 if (stat & INT_MSTWREND) { /* OUT */
1669 ACK(INT_MSTWREND);
1670 ep = &dev->ep[UDC_MSTWR_ENDPOINT];
1671 ep->irqs++;
1672 dma_advance(dev, ep);
1674 if (stat & INT_MSTWRTMOUT) { /* OUT */
1675 ACK(INT_MSTWRTMOUT);
1676 ep = &dev->ep[UDC_MSTWR_ENDPOINT];
1677 ep->irqs++;
1678 ERROR(dev, "%s write timeout ?\n", ep->ep.name);
1679 // reset dma? then dma_advance()
1682 /* pio */
1683 for (i = 1; i < 4; i++) {
1684 u32 tmp = INT_EPxDATASET(i);
1686 if (!(stat & tmp))
1687 continue;
1688 ep = &dev->ep[i];
1689 pio_advance(ep);
1690 if (list_empty (&ep->queue))
1691 pio_irq_disable(dev, regs, i);
1692 stat &= ~tmp;
1693 handled = 1;
1694 ep->irqs++;
1697 if (rescans--)
1698 goto rescan;
1700 done:
1701 (void)readl(&regs->int_enable);
1702 spin_unlock(&dev->lock);
1703 if (stat)
1704 DBG(dev, "unhandled irq status: %05x (%05x, %05x)\n", stat,
1705 readl(&regs->int_status), dev->int_enable);
1706 return IRQ_RETVAL(handled);
1709 #undef ACK
1711 /*-------------------------------------------------------------------------*/
1713 static void gadget_release(struct device *_dev)
1715 struct goku_udc *dev = dev_get_drvdata(_dev);
1717 kfree(dev);
1720 /* tear down the binding between this driver and the pci device */
1722 static void goku_remove(struct pci_dev *pdev)
1724 struct goku_udc *dev = pci_get_drvdata(pdev);
1726 DBG(dev, "%s\n", __func__);
1728 usb_del_gadget_udc(&dev->gadget);
1730 BUG_ON(dev->driver);
1732 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
1733 remove_proc_entry(proc_node_name, NULL);
1734 #endif
1735 if (dev->regs)
1736 udc_reset(dev);
1737 if (dev->got_irq)
1738 free_irq(pdev->irq, dev);
1739 if (dev->regs)
1740 iounmap(dev->regs);
1741 if (dev->got_region)
1742 release_mem_region(pci_resource_start (pdev, 0),
1743 pci_resource_len (pdev, 0));
1744 if (dev->enabled)
1745 pci_disable_device(pdev);
1747 dev->regs = NULL;
1749 INFO(dev, "unbind\n");
1752 /* wrap this driver around the specified pci device, but
1753 * don't respond over USB until a gadget driver binds to us.
1756 static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1758 struct goku_udc *dev = NULL;
1759 unsigned long resource, len;
1760 void __iomem *base = NULL;
1761 int retval;
1763 if (!pdev->irq) {
1764 printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev));
1765 retval = -ENODEV;
1766 goto err;
1769 /* alloc, and start init */
1770 dev = kzalloc (sizeof *dev, GFP_KERNEL);
1771 if (!dev) {
1772 retval = -ENOMEM;
1773 goto err;
1776 spin_lock_init(&dev->lock);
1777 dev->pdev = pdev;
1778 dev->gadget.ops = &goku_ops;
1779 dev->gadget.max_speed = USB_SPEED_FULL;
1781 /* the "gadget" abstracts/virtualizes the controller */
1782 dev->gadget.name = driver_name;
1784 /* now all the pci goodies ... */
1785 retval = pci_enable_device(pdev);
1786 if (retval < 0) {
1787 DBG(dev, "can't enable, %d\n", retval);
1788 goto err;
1790 dev->enabled = 1;
1792 resource = pci_resource_start(pdev, 0);
1793 len = pci_resource_len(pdev, 0);
1794 if (!request_mem_region(resource, len, driver_name)) {
1795 DBG(dev, "controller already in use\n");
1796 retval = -EBUSY;
1797 goto err;
1799 dev->got_region = 1;
1801 base = ioremap_nocache(resource, len);
1802 if (base == NULL) {
1803 DBG(dev, "can't map memory\n");
1804 retval = -EFAULT;
1805 goto err;
1807 dev->regs = (struct goku_udc_regs __iomem *) base;
1809 pci_set_drvdata(pdev, dev);
1810 INFO(dev, "%s\n", driver_desc);
1811 INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
1812 INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
1814 /* init to known state, then setup irqs */
1815 udc_reset(dev);
1816 udc_reinit (dev);
1817 if (request_irq(pdev->irq, goku_irq, IRQF_SHARED,
1818 driver_name, dev) != 0) {
1819 DBG(dev, "request interrupt %d failed\n", pdev->irq);
1820 retval = -EBUSY;
1821 goto err;
1823 dev->got_irq = 1;
1824 if (use_dma)
1825 pci_set_master(pdev);
1828 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
1829 proc_create_data(proc_node_name, 0, NULL, &udc_proc_fops, dev);
1830 #endif
1832 retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
1833 gadget_release);
1834 if (retval)
1835 goto err;
1837 return 0;
1839 err:
1840 if (dev)
1841 goku_remove (pdev);
1842 /* gadget_release is not registered yet, kfree explicitly */
1843 kfree(dev);
1844 return retval;
1848 /*-------------------------------------------------------------------------*/
1850 static const struct pci_device_id pci_ids[] = { {
1851 .class = PCI_CLASS_SERIAL_USB_DEVICE,
1852 .class_mask = ~0,
1853 .vendor = 0x102f, /* Toshiba */
1854 .device = 0x0107, /* this UDC */
1855 .subvendor = PCI_ANY_ID,
1856 .subdevice = PCI_ANY_ID,
1858 }, { /* end: all zeroes */ }
1860 MODULE_DEVICE_TABLE (pci, pci_ids);
1862 static struct pci_driver goku_pci_driver = {
1863 .name = (char *) driver_name,
1864 .id_table = pci_ids,
1866 .probe = goku_probe,
1867 .remove = goku_remove,
1869 /* FIXME add power management support */
1872 module_pci_driver(goku_pci_driver);