MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / drivers / usb / gadget / net2280.c
blobd28de0b8cebac7e0b49a65de6d16c6fb0df99bce
1 /*
2 * Driver for the NetChip 2280 USB device controller.
3 * Specs and errata are available from <http://www.netchip.com>.
5 * NetChip Technology Inc. supported the development of this driver.
8 * CODE STATUS HIGHLIGHTS
10 * This driver should work well with most "gadget" drivers, including
11 * the File Storage, Serial, and Ethernet/RNDIS gadget drivers
12 * as well as Gadget Zero and Gadgetfs.
14 * DMA is enabled by default. Drivers using transfer queues might use
15 * DMA chaining to remove IRQ latencies between transfers. (Except when
16 * short OUT transfers happen.) Drivers can use the req->no_interrupt
17 * hint to completely eliminate some IRQs, if a later IRQ is guaranteed
18 * and DMA chaining is enabled.
20 * Note that almost all the errata workarounds here are only needed for
21 * rev1 chips. Rev1a silicon (0110) fixes almost all of them.
25 * Copyright (C) 2003 David Brownell
26 * Copyright (C) 2003 NetChip Technologies
28 * This program is free software; you can redistribute it and/or modify
29 * it under the terms of the GNU General Public License as published by
30 * the Free Software Foundation; either version 2 of the License, or
31 * (at your option) any later version.
33 * This program is distributed in the hope that it will be useful,
34 * but WITHOUT ANY WARRANTY; without even the implied warranty of
35 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
36 * GNU General Public License for more details.
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
43 #undef DEBUG /* messages on error and most fault paths */
44 #undef VERBOSE /* extra debug messages (success too) */
46 #include <linux/config.h>
47 #include <linux/module.h>
48 #include <linux/pci.h>
49 #include <linux/kernel.h>
50 #include <linux/delay.h>
51 #include <linux/ioport.h>
52 #include <linux/sched.h>
53 #include <linux/slab.h>
54 #include <linux/smp_lock.h>
55 #include <linux/errno.h>
56 #include <linux/init.h>
57 #include <linux/timer.h>
58 #include <linux/list.h>
59 #include <linux/interrupt.h>
60 #include <linux/moduleparam.h>
61 #include <linux/device.h>
62 #include <linux/usb_ch9.h>
63 #include <linux/usb_gadget.h>
65 #include <asm/byteorder.h>
66 #include <asm/io.h>
67 #include <asm/irq.h>
68 #include <asm/system.h>
69 #include <asm/unaligned.h>
72 #define DRIVER_DESC "NetChip 2280 USB Peripheral Controller"
73 #define DRIVER_VERSION "2004 Jan 14"
75 #define DMA_ADDR_INVALID (~(dma_addr_t)0)
76 #define EP_DONTUSE 13 /* nonzero */
78 #define USE_RDK_LEDS /* GPIO pins control three LEDs */
79 #define USE_SYSFS_DEBUG_FILES
82 static const char driver_name [] = "net2280";
83 static const char driver_desc [] = DRIVER_DESC;
85 static const char ep0name [] = "ep0";
86 static const char *ep_name [] = {
87 ep0name,
88 "ep-a", "ep-b", "ep-c", "ep-d",
89 "ep-e", "ep-f",
92 /* use_dma -- general goodness, fewer interrupts, less cpu load (vs PIO)
93 * use_dma_chaining -- dma descriptor queueing gives even more irq reduction
95 * The net2280 DMA engines are not tightly integrated with their FIFOs;
96 * not all cases are (yet) handled well in this driver or the silicon.
97 * Some gadget drivers work better with the dma support here than others.
98 * These two parameters let you use PIO or more aggressive DMA.
100 static int use_dma = 1;
101 static int use_dma_chaining = 0;
103 /* "modprobe net2280 use_dma=n" etc */
104 module_param (use_dma, bool, S_IRUGO);
105 module_param (use_dma_chaining, bool, S_IRUGO);
108 /* mode 0 == ep-{a,b,c,d} 1K fifo each
109 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
110 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
112 static ushort fifo_mode = 0;
114 /* "modprobe net2280 fifo_mode=1" etc */
115 module_param (fifo_mode, ushort, 0644);
118 #define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
120 #if defined(USE_SYSFS_DEBUG_FILES) || defined (DEBUG)
121 static char *type_string (u8 bmAttributes)
123 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
124 case USB_ENDPOINT_XFER_BULK: return "bulk";
125 case USB_ENDPOINT_XFER_ISOC: return "iso";
126 case USB_ENDPOINT_XFER_INT: return "intr";
128 return "control";
130 #endif
132 #include "net2280.h"
134 #define valid_bit __constant_cpu_to_le32 (1 << VALID_BIT)
135 #define dma_done_ie __constant_cpu_to_le32 (1 << DMA_DONE_INTERRUPT_ENABLE)
137 /*-------------------------------------------------------------------------*/
139 static int
140 net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
142 struct net2280 *dev;
143 struct net2280_ep *ep;
144 u32 max, tmp;
145 unsigned long flags;
147 ep = container_of (_ep, struct net2280_ep, ep);
148 if (!_ep || !desc || ep->desc || _ep->name == ep0name
149 || desc->bDescriptorType != USB_DT_ENDPOINT)
150 return -EINVAL;
151 dev = ep->dev;
152 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
153 return -ESHUTDOWN;
155 /* erratum 0119 workaround ties up an endpoint number */
156 if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE)
157 return -EDOM;
159 /* sanity check ep-e/ep-f since their fifos are small */
160 max = le16_to_cpu (desc->wMaxPacketSize) & 0x1fff;
161 if (ep->num > 4 && max > 64)
162 return -ERANGE;
164 spin_lock_irqsave (&dev->lock, flags);
165 _ep->maxpacket = max & 0x7ff;
166 ep->desc = desc;
168 /* ep_reset() has already been called */
169 ep->stopped = 0;
170 ep->out_overflow = 0;
172 /* set speed-dependent max packet; may kick in high bandwidth */
173 set_idx_reg (dev->regs, REG_EP_MAXPKT (dev, ep->num), max);
175 /* FIFO lines can't go to different packets. PIO is ok, so
176 * use it instead of troublesome (non-bulk) multi-packet DMA.
178 if (ep->dma && (max % 4) != 0 && use_dma_chaining) {
179 DEBUG (ep->dev, "%s, no dma for maxpacket %d\n",
180 ep->ep.name, ep->ep.maxpacket);
181 ep->dma = NULL;
184 /* set type, direction, address; reset fifo counters */
185 writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat);
186 tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
187 if (tmp == USB_ENDPOINT_XFER_INT) {
188 /* erratum 0105 workaround prevents hs NYET */
189 if (dev->chiprev == 0100
190 && dev->gadget.speed == USB_SPEED_HIGH
191 && !(desc->bEndpointAddress & USB_DIR_IN))
192 writel ((1 << CLEAR_NAK_OUT_PACKETS_MODE),
193 &ep->regs->ep_rsp);
194 } else if (tmp == USB_ENDPOINT_XFER_BULK) {
195 /* catch some particularly blatant driver bugs */
196 if ((dev->gadget.speed == USB_SPEED_HIGH
197 && max != 512)
198 || (dev->gadget.speed == USB_SPEED_FULL
199 && max > 64)) {
200 spin_unlock_irqrestore (&dev->lock, flags);
201 return -ERANGE;
204 ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC) ? 1 : 0;
205 tmp <<= ENDPOINT_TYPE;
206 tmp |= desc->bEndpointAddress;
207 tmp |= (4 << ENDPOINT_BYTE_COUNT); /* default full fifo lines */
208 tmp |= 1 << ENDPOINT_ENABLE;
209 wmb ();
211 /* for OUT transfers, block the rx fifo until a read is posted */
212 ep->is_in = (tmp & USB_DIR_IN) != 0;
213 if (!ep->is_in)
214 writel ((1 << SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
216 writel (tmp, &ep->regs->ep_cfg);
218 /* enable irqs */
219 if (!ep->dma) { /* pio, per-packet */
220 tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0);
221 writel (tmp, &dev->regs->pciirqenb0);
223 tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
224 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
225 | readl (&ep->regs->ep_irqenb);
226 writel (tmp, &ep->regs->ep_irqenb);
227 } else { /* dma, per-request */
228 tmp = (1 << (8 + ep->num)); /* completion */
229 tmp |= readl (&dev->regs->pciirqenb1);
230 writel (tmp, &dev->regs->pciirqenb1);
232 /* for short OUT transfers, dma completions can't
233 * advance the queue; do it pio-style, by hand.
234 * NOTE erratum 0112 workaround #2
236 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
237 tmp = (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
238 writel (tmp, &ep->regs->ep_irqenb);
240 tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0);
241 writel (tmp, &dev->regs->pciirqenb0);
245 tmp = desc->bEndpointAddress;
246 DEBUG (dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
247 _ep->name, tmp & 0x0f, DIR_STRING (tmp),
248 type_string (desc->bmAttributes),
249 ep->dma ? "dma" : "pio", max);
251 /* pci writes may still be posted */
252 spin_unlock_irqrestore (&dev->lock, flags);
253 return 0;
256 static int handshake (u32 __iomem *ptr, u32 mask, u32 done, int usec)
258 u32 result;
260 do {
261 result = readl (ptr);
262 if (result == ~(u32)0) /* "device unplugged" */
263 return -ENODEV;
264 result &= mask;
265 if (result == done)
266 return 0;
267 udelay (1);
268 usec--;
269 } while (usec > 0);
270 return -ETIMEDOUT;
273 static struct usb_ep_ops net2280_ep_ops;
275 static void ep_reset (struct net2280_regs __iomem *regs, struct net2280_ep *ep)
277 u32 tmp;
279 ep->desc = NULL;
280 INIT_LIST_HEAD (&ep->queue);
282 ep->ep.maxpacket = ~0;
283 ep->ep.ops = &net2280_ep_ops;
285 /* disable the dma, irqs, endpoint... */
286 if (ep->dma) {
287 writel (0, &ep->dma->dmactl);
288 writel ( (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT)
289 | (1 << DMA_TRANSACTION_DONE_INTERRUPT)
290 | (1 << DMA_ABORT)
291 , &ep->dma->dmastat);
293 tmp = readl (&regs->pciirqenb0);
294 tmp &= ~(1 << ep->num);
295 writel (tmp, &regs->pciirqenb0);
296 } else {
297 tmp = readl (&regs->pciirqenb1);
298 tmp &= ~(1 << (8 + ep->num)); /* completion */
299 writel (tmp, &regs->pciirqenb1);
301 writel (0, &ep->regs->ep_irqenb);
303 /* init to our chosen defaults, notably so that we NAK OUT
304 * packets until the driver queues a read (+note erratum 0112)
306 tmp = (1 << SET_NAK_OUT_PACKETS_MODE)
307 | (1 << SET_NAK_OUT_PACKETS)
308 | (1 << CLEAR_EP_HIDE_STATUS_PHASE)
309 | (1 << CLEAR_INTERRUPT_MODE);
311 if (ep->num != 0) {
312 tmp |= (1 << CLEAR_ENDPOINT_TOGGLE)
313 | (1 << CLEAR_ENDPOINT_HALT);
315 writel (tmp, &ep->regs->ep_rsp);
317 /* scrub most status bits, and flush any fifo state */
318 writel ( (1 << TIMEOUT)
319 | (1 << USB_STALL_SENT)
320 | (1 << USB_IN_NAK_SENT)
321 | (1 << USB_IN_ACK_RCVD)
322 | (1 << USB_OUT_PING_NAK_SENT)
323 | (1 << USB_OUT_ACK_SENT)
324 | (1 << FIFO_OVERFLOW)
325 | (1 << FIFO_UNDERFLOW)
326 | (1 << FIFO_FLUSH)
327 | (1 << SHORT_PACKET_OUT_DONE_INTERRUPT)
328 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)
329 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
330 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
331 | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
332 | (1 << DATA_IN_TOKEN_INTERRUPT)
333 , &ep->regs->ep_stat);
335 /* fifo size is handled separately */
338 static void nuke (struct net2280_ep *);
340 static int net2280_disable (struct usb_ep *_ep)
342 struct net2280_ep *ep;
343 unsigned long flags;
345 ep = container_of (_ep, struct net2280_ep, ep);
346 if (!_ep || !ep->desc || _ep->name == ep0name)
347 return -EINVAL;
349 spin_lock_irqsave (&ep->dev->lock, flags);
350 nuke (ep);
351 ep_reset (ep->dev->regs, ep);
353 VDEBUG (ep->dev, "disabled %s %s\n",
354 ep->dma ? "dma" : "pio", _ep->name);
356 /* synch memory views with the device */
357 (void) readl (&ep->regs->ep_cfg);
359 if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4)
360 ep->dma = &ep->dev->dma [ep->num - 1];
362 spin_unlock_irqrestore (&ep->dev->lock, flags);
363 return 0;
366 /*-------------------------------------------------------------------------*/
368 static struct usb_request *
369 net2280_alloc_request (struct usb_ep *_ep, int gfp_flags)
371 struct net2280_ep *ep;
372 struct net2280_request *req;
374 if (!_ep)
375 return NULL;
376 ep = container_of (_ep, struct net2280_ep, ep);
378 req = kmalloc (sizeof *req, gfp_flags);
379 if (!req)
380 return NULL;
382 memset (req, 0, sizeof *req);
383 req->req.dma = DMA_ADDR_INVALID;
384 INIT_LIST_HEAD (&req->queue);
386 /* this dma descriptor may be swapped with the previous dummy */
387 if (ep->dma) {
388 struct net2280_dma *td;
390 td = pci_pool_alloc (ep->dev->requests, gfp_flags,
391 &req->td_dma);
392 if (!td) {
393 kfree (req);
394 return NULL;
396 td->dmacount = 0; /* not VALID */
397 td->dmaaddr = __constant_cpu_to_le32 (DMA_ADDR_INVALID);
398 td->dmadesc = td->dmaaddr;
399 req->td = td;
401 return &req->req;
404 static void
405 net2280_free_request (struct usb_ep *_ep, struct usb_request *_req)
407 struct net2280_ep *ep;
408 struct net2280_request *req;
410 ep = container_of (_ep, struct net2280_ep, ep);
411 if (!_ep || !_req)
412 return;
414 req = container_of (_req, struct net2280_request, req);
415 WARN_ON (!list_empty (&req->queue));
416 if (req->td)
417 pci_pool_free (ep->dev->requests, req->td, req->td_dma);
418 kfree (req);
421 /*-------------------------------------------------------------------------*/
423 #undef USE_KMALLOC
425 /* many common platforms have dma-coherent caches, which means that it's
426 * safe to use kmalloc() memory for all i/o buffers without using any
427 * cache flushing calls. (unless you're trying to share cache lines
428 * between dma and non-dma activities, which is a slow idea in any case.)
430 * other platforms need more care, with 2.5 having a moderately general
431 * solution (which falls down for allocations smaller than one page)
432 * that improves significantly on the 2.4 PCI allocators by removing
433 * the restriction that memory never be freed in_interrupt().
435 #if defined(CONFIG_X86)
436 #define USE_KMALLOC
438 #elif defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
439 #define USE_KMALLOC
441 #elif defined(CONFIG_MIPS) && !defined(CONFIG_NONCOHERENT_IO)
442 #define USE_KMALLOC
444 /* FIXME there are other cases, including an x86-64 one ... */
445 #endif
447 /* allocating buffers this way eliminates dma mapping overhead, which
448 * on some platforms will mean eliminating a per-io buffer copy. with
449 * some kinds of system caches, further tweaks may still be needed.
451 static void *
452 net2280_alloc_buffer (
453 struct usb_ep *_ep,
454 unsigned bytes,
455 dma_addr_t *dma,
456 int gfp_flags
459 void *retval;
460 struct net2280_ep *ep;
462 ep = container_of (_ep, struct net2280_ep, ep);
463 if (!_ep)
464 return NULL;
465 *dma = DMA_ADDR_INVALID;
467 #if defined(USE_KMALLOC)
468 retval = kmalloc(bytes, gfp_flags);
469 if (retval)
470 *dma = virt_to_phys(retval);
471 #else
472 if (ep->dma) {
473 /* the main problem with this call is that it wastes memory
474 * on typical 1/N page allocations: it allocates 1-N pages.
476 #warning Using dma_alloc_coherent even with buffers smaller than a page.
477 retval = dma_alloc_coherent(&ep->dev->pdev->dev,
478 bytes, dma, gfp_flags);
479 } else
480 retval = kmalloc(bytes, gfp_flags);
481 #endif
482 return retval;
485 static void
486 net2280_free_buffer (
487 struct usb_ep *_ep,
488 void *buf,
489 dma_addr_t dma,
490 unsigned bytes
492 /* free memory into the right allocator */
493 #ifndef USE_KMALLOC
494 if (dma != DMA_ADDR_INVALID) {
495 struct net2280_ep *ep;
497 ep = container_of(_ep, struct net2280_ep, ep);
498 if (!_ep)
499 return;
500 dma_free_coherent(&ep->dev->pdev->dev, bytes, buf, dma);
501 } else
502 #endif
503 kfree (buf);
506 /*-------------------------------------------------------------------------*/
508 /* load a packet into the fifo we use for usb IN transfers.
509 * works for all endpoints.
511 * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
512 * at a time, but this code is simpler because it knows it only writes
513 * one packet. ep-a..ep-d should use dma instead.
515 static void
516 write_fifo (struct net2280_ep *ep, struct usb_request *req)
518 struct net2280_ep_regs __iomem *regs = ep->regs;
519 u8 *buf;
520 u32 tmp;
521 unsigned count, total;
523 /* INVARIANT: fifo is currently empty. (testable) */
525 if (req) {
526 buf = req->buf + req->actual;
527 prefetch (buf);
528 total = req->length - req->actual;
529 } else {
530 total = 0;
531 buf = NULL;
534 /* write just one packet at a time */
535 count = ep->ep.maxpacket;
536 if (count > total) /* min() cannot be used on a bitfield */
537 count = total;
539 VDEBUG (ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
540 ep->ep.name, count,
541 (count != ep->ep.maxpacket) ? " (short)" : "",
542 req);
543 while (count >= 4) {
544 /* NOTE be careful if you try to align these. fifo lines
545 * should normally be full (4 bytes) and successive partial
546 * lines are ok only in certain cases.
548 tmp = get_unaligned ((u32 *)buf);
549 cpu_to_le32s (&tmp);
550 writel (tmp, &regs->ep_data);
551 buf += 4;
552 count -= 4;
555 /* last fifo entry is "short" unless we wrote a full packet.
556 * also explicitly validate last word in (periodic) transfers
557 * when maxpacket is not a multiple of 4 bytes.
559 if (count || total < ep->ep.maxpacket) {
560 tmp = count ? get_unaligned ((u32 *)buf) : count;
561 cpu_to_le32s (&tmp);
562 set_fifo_bytecount (ep, count & 0x03);
563 writel (tmp, &regs->ep_data);
566 /* pci writes may still be posted */
569 /* work around erratum 0106: PCI and USB race over the OUT fifo.
570 * caller guarantees chiprev 0100, out endpoint is NAKing, and
571 * there's no real data in the fifo.
573 * NOTE: also used in cases where that erratum doesn't apply:
574 * where the host wrote "too much" data to us.
576 static void out_flush (struct net2280_ep *ep)
578 u32 __iomem *statp;
579 u32 tmp;
581 ASSERT_OUT_NAKING (ep);
583 statp = &ep->regs->ep_stat;
584 writel ( (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
585 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
586 , statp);
587 writel ((1 << FIFO_FLUSH), statp);
588 mb ();
589 tmp = readl (statp);
590 if (tmp & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
591 /* high speed did bulk NYET; fifo isn't filling */
592 && ep->dev->gadget.speed == USB_SPEED_FULL) {
593 unsigned usec;
595 usec = 50; /* 64 byte bulk/interrupt */
596 handshake (statp, (1 << USB_OUT_PING_NAK_SENT),
597 (1 << USB_OUT_PING_NAK_SENT), usec);
598 /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
602 /* unload packet(s) from the fifo we use for usb OUT transfers.
603 * returns true iff the request completed, because of short packet
604 * or the request buffer having filled with full packets.
606 * for ep-a..ep-d this will read multiple packets out when they
607 * have been accepted.
609 static int
610 read_fifo (struct net2280_ep *ep, struct net2280_request *req)
612 struct net2280_ep_regs __iomem *regs = ep->regs;
613 u8 *buf = req->req.buf + req->req.actual;
614 unsigned count, tmp, is_short;
615 unsigned cleanup = 0, prevent = 0;
617 /* erratum 0106 ... packets coming in during fifo reads might
618 * be incompletely rejected. not all cases have workarounds.
620 if (ep->dev->chiprev == 0x0100
621 && ep->dev->gadget.speed == USB_SPEED_FULL) {
622 udelay (1);
623 tmp = readl (&ep->regs->ep_stat);
624 if ((tmp & (1 << NAK_OUT_PACKETS)))
625 cleanup = 1;
626 else if ((tmp & (1 << FIFO_FULL))) {
627 start_out_naking (ep);
628 prevent = 1;
630 /* else: hope we don't see the problem */
633 /* never overflow the rx buffer. the fifo reads packets until
634 * it sees a short one; we might not be ready for them all.
636 prefetchw (buf);
637 count = readl (&regs->ep_avail);
638 if (unlikely (count == 0)) {
639 udelay (1);
640 tmp = readl (&ep->regs->ep_stat);
641 count = readl (&regs->ep_avail);
642 /* handled that data already? */
643 if (count == 0 && (tmp & (1 << NAK_OUT_PACKETS)) == 0)
644 return 0;
647 tmp = req->req.length - req->req.actual;
648 if (count > tmp) {
649 /* as with DMA, data overflow gets flushed */
650 if ((tmp % ep->ep.maxpacket) != 0) {
651 ERROR (ep->dev,
652 "%s out fifo %d bytes, expected %d\n",
653 ep->ep.name, count, tmp);
654 req->req.status = -EOVERFLOW;
655 cleanup = 1;
656 /* NAK_OUT_PACKETS will be set, so flushing is safe;
657 * the next read will start with the next packet
659 } /* else it's a ZLP, no worries */
660 count = tmp;
662 req->req.actual += count;
664 is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
666 VDEBUG (ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
667 ep->ep.name, count, is_short ? " (short)" : "",
668 cleanup ? " flush" : "", prevent ? " nak" : "",
669 req, req->req.actual, req->req.length);
671 while (count >= 4) {
672 tmp = readl (&regs->ep_data);
673 cpu_to_le32s (&tmp);
674 put_unaligned (tmp, (u32 *)buf);
675 buf += 4;
676 count -= 4;
678 if (count) {
679 tmp = readl (&regs->ep_data);
680 /* LE conversion is implicit here: */
681 do {
682 *buf++ = (u8) tmp;
683 tmp >>= 8;
684 } while (--count);
686 if (cleanup)
687 out_flush (ep);
688 if (prevent) {
689 writel ((1 << CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
690 (void) readl (&ep->regs->ep_rsp);
693 return is_short || ((req->req.actual == req->req.length)
694 && !req->req.zero);
697 /* fill out dma descriptor to match a given request */
698 static void
699 fill_dma_desc (struct net2280_ep *ep, struct net2280_request *req, int valid)
701 struct net2280_dma *td = req->td;
702 u32 dmacount = req->req.length;
704 /* don't let DMA continue after a short OUT packet,
705 * so overruns can't affect the next transfer.
706 * in case of overruns on max-size packets, we can't
707 * stop the fifo from filling but we can flush it.
709 if (ep->is_in)
710 dmacount |= (1 << DMA_DIRECTION);
711 else if ((dmacount % ep->ep.maxpacket) != 0)
712 dmacount |= (1 << END_OF_CHAIN);
714 req->valid = valid;
715 if (valid)
716 dmacount |= (1 << VALID_BIT);
717 if (likely(!req->req.no_interrupt || !use_dma_chaining))
718 dmacount |= (1 << DMA_DONE_INTERRUPT_ENABLE);
720 /* td->dmadesc = previously set by caller */
721 td->dmaaddr = cpu_to_le32p (&req->req.dma);
723 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
724 wmb ();
725 td->dmacount = cpu_to_le32p (&dmacount);
728 static const u32 dmactl_default =
729 (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT)
730 | (1 << DMA_CLEAR_COUNT_ENABLE)
731 /* erratum 0116 workaround part 1 (use POLLING) */
732 | (POLL_100_USEC << DESCRIPTOR_POLLING_RATE)
733 | (1 << DMA_VALID_BIT_POLLING_ENABLE)
734 | (1 << DMA_VALID_BIT_ENABLE)
735 | (1 << DMA_SCATTER_GATHER_ENABLE)
736 /* erratum 0116 workaround part 2 (no AUTOSTART) */
737 | (1 << DMA_ENABLE);
739 static inline void spin_stop_dma (struct net2280_dma_regs __iomem *dma)
741 handshake (&dma->dmactl, (1 << DMA_ENABLE), 0, 50);
744 static inline void stop_dma (struct net2280_dma_regs __iomem *dma)
746 writel (readl (&dma->dmactl) & ~(1 << DMA_ENABLE), &dma->dmactl);
747 spin_stop_dma (dma);
750 static void start_queue (struct net2280_ep *ep, u32 dmactl, u32 td_dma)
752 struct net2280_dma_regs __iomem *dma = ep->dma;
754 writel ((1 << VALID_BIT) | (ep->is_in << DMA_DIRECTION),
755 &dma->dmacount);
756 writel (readl (&dma->dmastat), &dma->dmastat);
758 writel (td_dma, &dma->dmadesc);
759 writel (dmactl, &dma->dmactl);
761 /* erratum 0116 workaround part 3: pci arbiter away from net2280 */
762 (void) readl (&ep->dev->pci->pcimstctl);
764 writel ((1 << DMA_START), &dma->dmastat);
766 if (!ep->is_in)
767 stop_out_naking (ep);
770 static void start_dma (struct net2280_ep *ep, struct net2280_request *req)
772 u32 tmp;
773 struct net2280_dma_regs __iomem *dma = ep->dma;
775 /* FIXME can't use DMA for ZLPs */
777 /* on this path we "know" there's no dma active (yet) */
778 WARN_ON (readl (&dma->dmactl) & (1 << DMA_ENABLE));
779 writel (0, &ep->dma->dmactl);
781 /* previous OUT packet might have been short */
782 if (!ep->is_in && ((tmp = readl (&ep->regs->ep_stat))
783 & (1 << NAK_OUT_PACKETS)) != 0) {
784 writel ((1 << SHORT_PACKET_TRANSFERRED_INTERRUPT),
785 &ep->regs->ep_stat);
787 tmp = readl (&ep->regs->ep_avail);
788 if (tmp) {
789 writel (readl (&dma->dmastat), &dma->dmastat);
791 /* transfer all/some fifo data */
792 writel (req->req.dma, &dma->dmaaddr);
793 tmp = min (tmp, req->req.length);
795 /* dma irq, faking scatterlist status */
796 req->td->dmacount = cpu_to_le32 (req->req.length - tmp);
797 writel ((1 << DMA_DONE_INTERRUPT_ENABLE)
798 | tmp, &dma->dmacount);
799 req->td->dmadesc = 0;
800 req->valid = 1;
802 writel ((1 << DMA_ENABLE), &dma->dmactl);
803 writel ((1 << DMA_START), &dma->dmastat);
804 return;
808 tmp = dmactl_default;
810 /* force packet boundaries between dma requests, but prevent the
811 * controller from automagically writing a last "short" packet
812 * (zero length) unless the driver explicitly said to do that.
814 if (ep->is_in) {
815 if (likely ((req->req.length % ep->ep.maxpacket) != 0
816 || req->req.zero)) {
817 tmp |= (1 << DMA_FIFO_VALIDATE);
818 ep->in_fifo_validate = 1;
819 } else
820 ep->in_fifo_validate = 0;
823 /* init req->td, pointing to the current dummy */
824 req->td->dmadesc = cpu_to_le32 (ep->td_dma);
825 fill_dma_desc (ep, req, 1);
827 if (!use_dma_chaining)
828 req->td->dmacount |= __constant_cpu_to_le32 (1 << END_OF_CHAIN);
830 start_queue (ep, tmp, req->td_dma);
833 static inline void
834 queue_dma (struct net2280_ep *ep, struct net2280_request *req, int valid)
836 struct net2280_dma *end;
837 dma_addr_t tmp;
839 /* swap new dummy for old, link; fill and maybe activate */
840 end = ep->dummy;
841 ep->dummy = req->td;
842 req->td = end;
844 tmp = ep->td_dma;
845 ep->td_dma = req->td_dma;
846 req->td_dma = tmp;
848 end->dmadesc = cpu_to_le32 (ep->td_dma);
850 fill_dma_desc (ep, req, valid);
853 static void
854 done (struct net2280_ep *ep, struct net2280_request *req, int status)
856 struct net2280 *dev;
857 unsigned stopped = ep->stopped;
859 list_del_init (&req->queue);
861 if (req->req.status == -EINPROGRESS)
862 req->req.status = status;
863 else
864 status = req->req.status;
866 dev = ep->dev;
867 if (req->mapped) {
868 pci_unmap_single (dev->pdev, req->req.dma, req->req.length,
869 ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
870 req->req.dma = DMA_ADDR_INVALID;
871 req->mapped = 0;
874 if (status && status != -ESHUTDOWN)
875 VDEBUG (dev, "complete %s req %p stat %d len %u/%u\n",
876 ep->ep.name, &req->req, status,
877 req->req.actual, req->req.length);
879 /* don't modify queue heads during completion callback */
880 ep->stopped = 1;
881 spin_unlock (&dev->lock);
882 req->req.complete (&ep->ep, &req->req);
883 spin_lock (&dev->lock);
884 ep->stopped = stopped;
887 /*-------------------------------------------------------------------------*/
889 static int
890 net2280_queue (struct usb_ep *_ep, struct usb_request *_req, int gfp_flags)
892 struct net2280_request *req;
893 struct net2280_ep *ep;
894 struct net2280 *dev;
895 unsigned long flags;
897 /* we always require a cpu-view buffer, so that we can
898 * always use pio (as fallback or whatever).
900 req = container_of (_req, struct net2280_request, req);
901 if (!_req || !_req->complete || !_req->buf
902 || !list_empty (&req->queue))
903 return -EINVAL;
904 if (_req->length > (~0 & DMA_BYTE_COUNT_MASK))
905 return -EDOM;
906 ep = container_of (_ep, struct net2280_ep, ep);
907 if (!_ep || (!ep->desc && ep->num != 0))
908 return -EINVAL;
909 dev = ep->dev;
910 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
911 return -ESHUTDOWN;
913 /* FIXME implement PIO fallback for ZLPs with DMA */
914 if (ep->dma && _req->length == 0)
915 return -EOPNOTSUPP;
917 /* set up dma mapping in case the caller didn't */
918 if (ep->dma && _req->dma == DMA_ADDR_INVALID) {
919 _req->dma = pci_map_single (dev->pdev, _req->buf, _req->length,
920 ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
921 req->mapped = 1;
924 #if 0
925 VDEBUG (dev, "%s queue req %p, len %d buf %p\n",
926 _ep->name, _req, _req->length, _req->buf);
927 #endif
929 spin_lock_irqsave (&dev->lock, flags);
931 _req->status = -EINPROGRESS;
932 _req->actual = 0;
934 /* kickstart this i/o queue? */
935 if (list_empty (&ep->queue) && !ep->stopped) {
936 /* use DMA if the endpoint supports it, else pio */
937 if (ep->dma)
938 start_dma (ep, req);
939 else {
940 /* maybe there's no control data, just status ack */
941 if (ep->num == 0 && _req->length == 0) {
942 allow_status (ep);
943 done (ep, req, 0);
944 VDEBUG (dev, "%s status ack\n", ep->ep.name);
945 goto done;
948 /* PIO ... stuff the fifo, or unblock it. */
949 if (ep->is_in)
950 write_fifo (ep, _req);
951 else if (list_empty (&ep->queue)) {
952 u32 s;
954 /* OUT FIFO might have packet(s) buffered */
955 s = readl (&ep->regs->ep_stat);
956 if ((s & (1 << FIFO_EMPTY)) == 0) {
957 /* note: _req->short_not_ok is
958 * ignored here since PIO _always_
959 * stops queue advance here, and
960 * _req->status doesn't change for
961 * short reads (only _req->actual)
963 if (read_fifo (ep, req)) {
964 done (ep, req, 0);
965 if (ep->num == 0)
966 allow_status (ep);
967 /* don't queue it */
968 req = NULL;
969 } else
970 s = readl (&ep->regs->ep_stat);
973 /* don't NAK, let the fifo fill */
974 if (req && (s & (1 << NAK_OUT_PACKETS)))
975 writel ((1 << CLEAR_NAK_OUT_PACKETS),
976 &ep->regs->ep_rsp);
980 } else if (ep->dma) {
981 int valid = 1;
983 if (ep->is_in) {
984 int expect;
986 /* preventing magic zlps is per-engine state, not
987 * per-transfer; irq logic must recover hiccups.
989 expect = likely (req->req.zero
990 || (req->req.length % ep->ep.maxpacket) != 0);
991 if (expect != ep->in_fifo_validate)
992 valid = 0;
994 queue_dma (ep, req, valid);
996 } /* else the irq handler advances the queue. */
998 if (req)
999 list_add_tail (&req->queue, &ep->queue);
1000 done:
1001 spin_unlock_irqrestore (&dev->lock, flags);
1003 /* pci writes may still be posted */
1004 return 0;
1007 static inline void
1008 dma_done (
1009 struct net2280_ep *ep,
1010 struct net2280_request *req,
1011 u32 dmacount,
1012 int status
1015 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
1016 done (ep, req, status);
1019 static void restart_dma (struct net2280_ep *ep);
1021 static void scan_dma_completions (struct net2280_ep *ep)
1023 /* only look at descriptors that were "naturally" retired,
1024 * so fifo and list head state won't matter
1026 while (!list_empty (&ep->queue)) {
1027 struct net2280_request *req;
1028 u32 tmp;
1030 req = list_entry (ep->queue.next,
1031 struct net2280_request, queue);
1032 if (!req->valid)
1033 break;
1034 rmb ();
1035 tmp = le32_to_cpup (&req->td->dmacount);
1036 if ((tmp & (1 << VALID_BIT)) != 0)
1037 break;
1039 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
1040 * cases where DMA must be aborted; this code handles
1041 * all non-abort DMA completions.
1043 if (unlikely (req->td->dmadesc == 0)) {
1044 /* paranoia */
1045 tmp = readl (&ep->dma->dmacount);
1046 if (tmp & DMA_BYTE_COUNT_MASK)
1047 break;
1048 /* single transfer mode */
1049 dma_done (ep, req, tmp, 0);
1050 break;
1051 } else if (!ep->is_in
1052 && (req->req.length % ep->ep.maxpacket) != 0) {
1053 tmp = readl (&ep->regs->ep_stat);
1055 /* AVOID TROUBLE HERE by not issuing short reads from
1056 * your gadget driver. That helps avoids errata 0121,
1057 * 0122, and 0124; not all cases trigger the warning.
1059 if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
1060 WARN (ep->dev, "%s lost packet sync!\n",
1061 ep->ep.name);
1062 req->req.status = -EOVERFLOW;
1063 } else if ((tmp = readl (&ep->regs->ep_avail)) != 0) {
1064 /* fifo gets flushed later */
1065 ep->out_overflow = 1;
1066 DEBUG (ep->dev, "%s dma, discard %d len %d\n",
1067 ep->ep.name, tmp,
1068 req->req.length);
1069 req->req.status = -EOVERFLOW;
1072 dma_done (ep, req, tmp, 0);
1076 static void restart_dma (struct net2280_ep *ep)
1078 struct net2280_request *req;
1079 u32 dmactl = dmactl_default;
1081 if (ep->stopped)
1082 return;
1083 req = list_entry (ep->queue.next, struct net2280_request, queue);
1085 if (!use_dma_chaining) {
1086 start_dma (ep, req);
1087 return;
1090 /* the 2280 will be processing the queue unless queue hiccups after
1091 * the previous transfer:
1092 * IN: wanted automagic zlp, head doesn't (or vice versa)
1093 * DMA_FIFO_VALIDATE doesn't init from dma descriptors.
1094 * OUT: was "usb-short", we must restart.
1096 if (ep->is_in && !req->valid) {
1097 struct net2280_request *entry, *prev = NULL;
1098 int reqmode, done = 0;
1100 DEBUG (ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td);
1101 ep->in_fifo_validate = likely (req->req.zero
1102 || (req->req.length % ep->ep.maxpacket) != 0);
1103 if (ep->in_fifo_validate)
1104 dmactl |= (1 << DMA_FIFO_VALIDATE);
1105 list_for_each_entry (entry, &ep->queue, queue) {
1106 u32 dmacount;
1108 if (entry == req)
1109 continue;
1110 dmacount = entry->td->dmacount;
1111 if (!done) {
1112 reqmode = likely (entry->req.zero
1113 || (entry->req.length
1114 % ep->ep.maxpacket) != 0);
1115 if (reqmode == ep->in_fifo_validate) {
1116 entry->valid = 1;
1117 dmacount |= valid_bit;
1118 entry->td->dmacount = dmacount;
1119 prev = entry;
1120 continue;
1121 } else {
1122 /* force a hiccup */
1123 prev->td->dmacount |= dma_done_ie;
1124 done = 1;
1128 /* walk the rest of the queue so unlinks behave */
1129 entry->valid = 0;
1130 dmacount &= ~valid_bit;
1131 entry->td->dmacount = dmacount;
1132 prev = entry;
1136 writel (0, &ep->dma->dmactl);
1137 start_queue (ep, dmactl, req->td_dma);
1140 static void abort_dma (struct net2280_ep *ep)
1142 /* abort the current transfer */
1143 if (likely (!list_empty (&ep->queue))) {
1144 /* FIXME work around errata 0121, 0122, 0124 */
1145 writel ((1 << DMA_ABORT), &ep->dma->dmastat);
1146 spin_stop_dma (ep->dma);
1147 } else
1148 stop_dma (ep->dma);
1149 scan_dma_completions (ep);
1152 /* dequeue ALL requests */
1153 static void nuke (struct net2280_ep *ep)
1155 struct net2280_request *req;
1157 /* called with spinlock held */
1158 ep->stopped = 1;
1159 if (ep->dma)
1160 abort_dma (ep);
1161 while (!list_empty (&ep->queue)) {
1162 req = list_entry (ep->queue.next,
1163 struct net2280_request,
1164 queue);
1165 done (ep, req, -ESHUTDOWN);
1169 /* dequeue JUST ONE request */
1170 static int net2280_dequeue (struct usb_ep *_ep, struct usb_request *_req)
1172 struct net2280_ep *ep;
1173 struct net2280_request *req;
1174 unsigned long flags;
1175 u32 dmactl;
1176 int stopped;
1178 ep = container_of (_ep, struct net2280_ep, ep);
1179 if (!_ep || (!ep->desc && ep->num != 0) || !_req)
1180 return -EINVAL;
1182 spin_lock_irqsave (&ep->dev->lock, flags);
1183 stopped = ep->stopped;
1185 /* quiesce dma while we patch the queue */
1186 dmactl = 0;
1187 ep->stopped = 1;
1188 if (ep->dma) {
1189 dmactl = readl (&ep->dma->dmactl);
1190 /* WARNING erratum 0127 may kick in ... */
1191 stop_dma (ep->dma);
1192 scan_dma_completions (ep);
1195 /* make sure it's still queued on this endpoint */
1196 list_for_each_entry (req, &ep->queue, queue) {
1197 if (&req->req == _req)
1198 break;
1200 if (&req->req != _req) {
1201 spin_unlock_irqrestore (&ep->dev->lock, flags);
1202 return -EINVAL;
1205 /* queue head may be partially complete. */
1206 if (ep->queue.next == &req->queue) {
1207 if (ep->dma) {
1208 DEBUG (ep->dev, "unlink (%s) dma\n", _ep->name);
1209 _req->status = -ECONNRESET;
1210 abort_dma (ep);
1211 if (likely (ep->queue.next == &req->queue)) {
1212 // NOTE: misreports single-transfer mode
1213 req->td->dmacount = 0; /* invalidate */
1214 dma_done (ep, req,
1215 readl (&ep->dma->dmacount),
1216 -ECONNRESET);
1218 } else {
1219 DEBUG (ep->dev, "unlink (%s) pio\n", _ep->name);
1220 done (ep, req, -ECONNRESET);
1222 req = NULL;
1224 /* patch up hardware chaining data */
1225 } else if (ep->dma && use_dma_chaining) {
1226 if (req->queue.prev == ep->queue.next) {
1227 writel (le32_to_cpu (req->td->dmadesc),
1228 &ep->dma->dmadesc);
1229 if (req->td->dmacount & dma_done_ie)
1230 writel (readl (&ep->dma->dmacount)
1231 | dma_done_ie,
1232 &ep->dma->dmacount);
1233 } else {
1234 struct net2280_request *prev;
1236 prev = list_entry (req->queue.prev,
1237 struct net2280_request, queue);
1238 prev->td->dmadesc = req->td->dmadesc;
1239 if (req->td->dmacount & dma_done_ie)
1240 prev->td->dmacount |= dma_done_ie;
1244 if (req)
1245 done (ep, req, -ECONNRESET);
1246 ep->stopped = stopped;
1248 if (ep->dma) {
1249 /* turn off dma on inactive queues */
1250 if (list_empty (&ep->queue))
1251 stop_dma (ep->dma);
1252 else if (!ep->stopped) {
1253 /* resume current request, or start new one */
1254 if (req)
1255 writel (dmactl, &ep->dma->dmactl);
1256 else
1257 start_dma (ep, list_entry (ep->queue.next,
1258 struct net2280_request, queue));
1262 spin_unlock_irqrestore (&ep->dev->lock, flags);
1263 return req ? 0 : -EOPNOTSUPP;
1266 /*-------------------------------------------------------------------------*/
1268 static int net2280_fifo_status (struct usb_ep *_ep);
1270 static int
1271 net2280_set_halt (struct usb_ep *_ep, int value)
1273 struct net2280_ep *ep;
1274 unsigned long flags;
1275 int retval = 0;
1277 ep = container_of (_ep, struct net2280_ep, ep);
1278 if (!_ep || (!ep->desc && ep->num != 0))
1279 return -EINVAL;
1280 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1281 return -ESHUTDOWN;
1282 if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
1283 == USB_ENDPOINT_XFER_ISOC)
1284 return -EINVAL;
1286 spin_lock_irqsave (&ep->dev->lock, flags);
1287 if (!list_empty (&ep->queue))
1288 retval = -EAGAIN;
1289 else if (ep->is_in && value && net2280_fifo_status (_ep) != 0)
1290 retval = -EAGAIN;
1291 else {
1292 VDEBUG (ep->dev, "%s %s halt\n", _ep->name,
1293 value ? "set" : "clear");
1294 /* set/clear, then synch memory views with the device */
1295 if (value) {
1296 if (ep->num == 0)
1297 ep->dev->protocol_stall = 1;
1298 else
1299 set_halt (ep);
1300 } else
1301 clear_halt (ep);
1302 (void) readl (&ep->regs->ep_rsp);
1304 spin_unlock_irqrestore (&ep->dev->lock, flags);
1306 return retval;
1309 static int
1310 net2280_fifo_status (struct usb_ep *_ep)
1312 struct net2280_ep *ep;
1313 u32 avail;
1315 ep = container_of (_ep, struct net2280_ep, ep);
1316 if (!_ep || (!ep->desc && ep->num != 0))
1317 return -ENODEV;
1318 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1319 return -ESHUTDOWN;
1321 avail = readl (&ep->regs->ep_avail) & ((1 << 12) - 1);
1322 if (avail > ep->fifo_size)
1323 return -EOVERFLOW;
1324 if (ep->is_in)
1325 avail = ep->fifo_size - avail;
1326 return avail;
1329 static void
1330 net2280_fifo_flush (struct usb_ep *_ep)
1332 struct net2280_ep *ep;
1334 ep = container_of (_ep, struct net2280_ep, ep);
1335 if (!_ep || (!ep->desc && ep->num != 0))
1336 return;
1337 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1338 return;
1340 writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat);
1341 (void) readl (&ep->regs->ep_rsp);
1344 static struct usb_ep_ops net2280_ep_ops = {
1345 .enable = net2280_enable,
1346 .disable = net2280_disable,
1348 .alloc_request = net2280_alloc_request,
1349 .free_request = net2280_free_request,
1351 .alloc_buffer = net2280_alloc_buffer,
1352 .free_buffer = net2280_free_buffer,
1354 .queue = net2280_queue,
1355 .dequeue = net2280_dequeue,
1357 .set_halt = net2280_set_halt,
1358 .fifo_status = net2280_fifo_status,
1359 .fifo_flush = net2280_fifo_flush,
1362 /*-------------------------------------------------------------------------*/
1364 static int net2280_get_frame (struct usb_gadget *_gadget)
1366 struct net2280 *dev;
1367 unsigned long flags;
1368 u16 retval;
1370 if (!_gadget)
1371 return -ENODEV;
1372 dev = container_of (_gadget, struct net2280, gadget);
1373 spin_lock_irqsave (&dev->lock, flags);
1374 retval = get_idx_reg (dev->regs, REG_FRAME) & 0x03ff;
1375 spin_unlock_irqrestore (&dev->lock, flags);
1376 return retval;
1379 static int net2280_wakeup (struct usb_gadget *_gadget)
1381 struct net2280 *dev;
1382 u32 tmp;
1383 unsigned long flags;
1385 if (!_gadget)
1386 return 0;
1387 dev = container_of (_gadget, struct net2280, gadget);
1389 spin_lock_irqsave (&dev->lock, flags);
1390 tmp = readl (&dev->usb->usbctl);
1391 if (tmp & (1 << DEVICE_REMOTE_WAKEUP_ENABLE))
1392 writel (1 << GENERATE_RESUME, &dev->usb->usbstat);
1393 spin_unlock_irqrestore (&dev->lock, flags);
1395 /* pci writes may still be posted */
1396 return 0;
1399 static int net2280_set_selfpowered (struct usb_gadget *_gadget, int value)
1401 struct net2280 *dev;
1402 u32 tmp;
1403 unsigned long flags;
1405 if (!_gadget)
1406 return 0;
1407 dev = container_of (_gadget, struct net2280, gadget);
1409 spin_lock_irqsave (&dev->lock, flags);
1410 tmp = readl (&dev->usb->usbctl);
1411 if (value)
1412 tmp |= (1 << SELF_POWERED_STATUS);
1413 else
1414 tmp &= ~(1 << SELF_POWERED_STATUS);
1415 writel (tmp, &dev->usb->usbctl);
1416 spin_unlock_irqrestore (&dev->lock, flags);
1418 return 0;
1421 static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
1423 struct net2280 *dev;
1424 u32 tmp;
1425 unsigned long flags;
1427 if (!_gadget)
1428 return -ENODEV;
1429 dev = container_of (_gadget, struct net2280, gadget);
1431 spin_lock_irqsave (&dev->lock, flags);
1432 tmp = readl (&dev->usb->usbctl);
1433 dev->softconnect = (is_on != 0);
1434 if (is_on)
1435 tmp |= (1 << USB_DETECT_ENABLE);
1436 else
1437 tmp &= ~(1 << USB_DETECT_ENABLE);
1438 writel (tmp, &dev->usb->usbctl);
1439 spin_unlock_irqrestore (&dev->lock, flags);
1441 return 0;
1444 static const struct usb_gadget_ops net2280_ops = {
1445 .get_frame = net2280_get_frame,
1446 .wakeup = net2280_wakeup,
1447 .set_selfpowered = net2280_set_selfpowered,
1448 .pullup = net2280_pullup,
1451 /*-------------------------------------------------------------------------*/
1453 #ifdef USE_SYSFS_DEBUG_FILES
1455 /* "function" sysfs attribute */
1456 static ssize_t
1457 show_function (struct device *_dev, char *buf)
1459 struct net2280 *dev = dev_get_drvdata (_dev);
1461 if (!dev->driver
1462 || !dev->driver->function
1463 || strlen (dev->driver->function) > PAGE_SIZE)
1464 return 0;
1465 return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
1467 static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
1469 static ssize_t
1470 show_registers (struct device *_dev, char *buf)
1472 struct net2280 *dev;
1473 char *next;
1474 unsigned size, t;
1475 unsigned long flags;
1476 int i;
1477 u32 t1, t2;
1478 char *s;
1480 dev = dev_get_drvdata (_dev);
1481 next = buf;
1482 size = PAGE_SIZE;
1483 spin_lock_irqsave (&dev->lock, flags);
1485 if (dev->driver)
1486 s = dev->driver->driver.name;
1487 else
1488 s = "(none)";
1490 /* Main Control Registers */
1491 t = scnprintf (next, size, "%s version " DRIVER_VERSION
1492 ", chiprev %04x, dma %s\n\n"
1493 "devinit %03x fifoctl %08x gadget '%s'\n"
1494 "pci irqenb0 %02x irqenb1 %08x "
1495 "irqstat0 %04x irqstat1 %08x\n",
1496 driver_name, dev->chiprev,
1497 use_dma
1498 ? (use_dma_chaining ? "chaining" : "enabled")
1499 : "disabled",
1500 readl (&dev->regs->devinit),
1501 readl (&dev->regs->fifoctl),
1503 readl (&dev->regs->pciirqenb0),
1504 readl (&dev->regs->pciirqenb1),
1505 readl (&dev->regs->irqstat0),
1506 readl (&dev->regs->irqstat1));
1507 size -= t;
1508 next += t;
1510 /* USB Control Registers */
1511 t1 = readl (&dev->usb->usbctl);
1512 t2 = readl (&dev->usb->usbstat);
1513 if (t1 & (1 << VBUS_PIN)) {
1514 if (t2 & (1 << HIGH_SPEED))
1515 s = "high speed";
1516 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1517 s = "powered";
1518 else
1519 s = "full speed";
1520 /* full speed bit (6) not working?? */
1521 } else
1522 s = "not attached";
1523 t = scnprintf (next, size,
1524 "stdrsp %08x usbctl %08x usbstat %08x "
1525 "addr 0x%02x (%s)\n",
1526 readl (&dev->usb->stdrsp), t1, t2,
1527 readl (&dev->usb->ouraddr), s);
1528 size -= t;
1529 next += t;
1531 /* PCI Master Control Registers */
1533 /* DMA Control Registers */
1535 /* Configurable EP Control Registers */
1536 for (i = 0; i < 7; i++) {
1537 struct net2280_ep *ep;
1539 ep = &dev->ep [i];
1540 if (i && !ep->desc)
1541 continue;
1543 t1 = readl (&ep->regs->ep_cfg);
1544 t2 = readl (&ep->regs->ep_rsp) & 0xff;
1545 t = scnprintf (next, size,
1546 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
1547 "irqenb %02x\n",
1548 ep->ep.name, t1, t2,
1549 (t2 & (1 << CLEAR_NAK_OUT_PACKETS))
1550 ? "NAK " : "",
1551 (t2 & (1 << CLEAR_EP_HIDE_STATUS_PHASE))
1552 ? "hide " : "",
1553 (t2 & (1 << CLEAR_EP_FORCE_CRC_ERROR))
1554 ? "CRC " : "",
1555 (t2 & (1 << CLEAR_INTERRUPT_MODE))
1556 ? "interrupt " : "",
1557 (t2 & (1<<CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
1558 ? "status " : "",
1559 (t2 & (1 << CLEAR_NAK_OUT_PACKETS_MODE))
1560 ? "NAKmode " : "",
1561 (t2 & (1 << CLEAR_ENDPOINT_TOGGLE))
1562 ? "DATA1 " : "DATA0 ",
1563 (t2 & (1 << CLEAR_ENDPOINT_HALT))
1564 ? "HALT " : "",
1565 readl (&ep->regs->ep_irqenb));
1566 size -= t;
1567 next += t;
1569 t = scnprintf (next, size,
1570 "\tstat %08x avail %04x "
1571 "(ep%d%s-%s)%s\n",
1572 readl (&ep->regs->ep_stat),
1573 readl (&ep->regs->ep_avail),
1574 t1 & 0x0f, DIR_STRING (t1),
1575 type_string (t1 >> 8),
1576 ep->stopped ? "*" : "");
1577 size -= t;
1578 next += t;
1580 if (!ep->dma)
1581 continue;
1583 t = scnprintf (next, size,
1584 " dma\tctl %08x stat %08x count %08x\n"
1585 "\taddr %08x desc %08x\n",
1586 readl (&ep->dma->dmactl),
1587 readl (&ep->dma->dmastat),
1588 readl (&ep->dma->dmacount),
1589 readl (&ep->dma->dmaaddr),
1590 readl (&ep->dma->dmadesc));
1591 size -= t;
1592 next += t;
1596 /* Indexed Registers */
1597 // none yet
1599 /* Statistics */
1600 t = scnprintf (next, size, "\nirqs: ");
1601 size -= t;
1602 next += t;
1603 for (i = 0; i < 7; i++) {
1604 struct net2280_ep *ep;
1606 ep = &dev->ep [i];
1607 if (i && !ep->irqs)
1608 continue;
1609 t = scnprintf (next, size, " %s/%lu", ep->ep.name, ep->irqs);
1610 size -= t;
1611 next += t;
1614 t = scnprintf (next, size, "\n");
1615 size -= t;
1616 next += t;
1618 spin_unlock_irqrestore (&dev->lock, flags);
1620 return PAGE_SIZE - size;
1622 static DEVICE_ATTR (registers, S_IRUGO, show_registers, NULL);
1624 static ssize_t
1625 show_queues (struct device *_dev, char *buf)
1627 struct net2280 *dev;
1628 char *next;
1629 unsigned size;
1630 unsigned long flags;
1631 int i;
1633 dev = dev_get_drvdata (_dev);
1634 next = buf;
1635 size = PAGE_SIZE;
1636 spin_lock_irqsave (&dev->lock, flags);
1638 for (i = 0; i < 7; i++) {
1639 struct net2280_ep *ep = &dev->ep [i];
1640 struct net2280_request *req;
1641 int t;
1643 if (i != 0) {
1644 const struct usb_endpoint_descriptor *d;
1646 d = ep->desc;
1647 if (!d)
1648 continue;
1649 t = d->bEndpointAddress;
1650 t = scnprintf (next, size,
1651 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
1652 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
1653 (t & USB_DIR_IN) ? "in" : "out",
1654 ({ char *val;
1655 switch (d->bmAttributes & 0x03) {
1656 case USB_ENDPOINT_XFER_BULK:
1657 val = "bulk"; break;
1658 case USB_ENDPOINT_XFER_INT:
1659 val = "intr"; break;
1660 default:
1661 val = "iso"; break;
1662 }; val; }),
1663 le16_to_cpu (d->wMaxPacketSize) & 0x1fff,
1664 ep->dma ? "dma" : "pio", ep->fifo_size
1666 } else /* ep0 should only have one transfer queued */
1667 t = scnprintf (next, size, "ep0 max 64 pio %s\n",
1668 ep->is_in ? "in" : "out");
1669 if (t <= 0 || t > size)
1670 goto done;
1671 size -= t;
1672 next += t;
1674 if (list_empty (&ep->queue)) {
1675 t = scnprintf (next, size, "\t(nothing queued)\n");
1676 if (t <= 0 || t > size)
1677 goto done;
1678 size -= t;
1679 next += t;
1680 continue;
1682 list_for_each_entry (req, &ep->queue, queue) {
1683 if (ep->dma && req->td_dma == readl (&ep->dma->dmadesc))
1684 t = scnprintf (next, size,
1685 "\treq %p len %d/%d "
1686 "buf %p (dmacount %08x)\n",
1687 &req->req, req->req.actual,
1688 req->req.length, req->req.buf,
1689 readl (&ep->dma->dmacount));
1690 else
1691 t = scnprintf (next, size,
1692 "\treq %p len %d/%d buf %p\n",
1693 &req->req, req->req.actual,
1694 req->req.length, req->req.buf);
1695 if (t <= 0 || t > size)
1696 goto done;
1697 size -= t;
1698 next += t;
1700 if (ep->dma) {
1701 struct net2280_dma *td;
1703 td = req->td;
1704 t = scnprintf (next, size, "\t td %08x "
1705 " count %08x buf %08x desc %08x\n",
1706 req->td_dma, td->dmacount,
1707 td->dmaaddr, td->dmadesc);
1708 if (t <= 0 || t > size)
1709 goto done;
1710 size -= t;
1711 next += t;
1716 done:
1717 spin_unlock_irqrestore (&dev->lock, flags);
1718 return PAGE_SIZE - size;
1720 static DEVICE_ATTR (queues, S_IRUGO, show_queues, NULL);
1723 #else
1725 #define device_create_file(a,b) do {} while (0)
1726 #define device_remove_file device_create_file
1728 #endif
1730 /*-------------------------------------------------------------------------*/
1732 /* another driver-specific mode might be a request type doing dma
1733 * to/from another device fifo instead of to/from memory.
1736 static void set_fifo_mode (struct net2280 *dev, int mode)
1738 /* keeping high bits preserves BAR2 */
1739 writel ((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
1741 /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
1742 INIT_LIST_HEAD (&dev->gadget.ep_list);
1743 list_add_tail (&dev->ep [1].ep.ep_list, &dev->gadget.ep_list);
1744 list_add_tail (&dev->ep [2].ep.ep_list, &dev->gadget.ep_list);
1745 switch (mode) {
1746 case 0:
1747 list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
1748 list_add_tail (&dev->ep [4].ep.ep_list, &dev->gadget.ep_list);
1749 dev->ep [1].fifo_size = dev->ep [2].fifo_size = 1024;
1750 break;
1751 case 1:
1752 dev->ep [1].fifo_size = dev->ep [2].fifo_size = 2048;
1753 break;
1754 case 2:
1755 list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
1756 dev->ep [1].fifo_size = 2048;
1757 dev->ep [2].fifo_size = 1024;
1758 break;
1760 /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
1761 list_add_tail (&dev->ep [5].ep.ep_list, &dev->gadget.ep_list);
1762 list_add_tail (&dev->ep [6].ep.ep_list, &dev->gadget.ep_list);
1766 * net2280_set_fifo_mode - change allocation of fifo buffers
1767 * @gadget: access to the net2280 device that will be updated
1768 * @mode: 0 for default, four 1kB buffers (ep-a through ep-d);
1769 * 1 for two 2kB buffers (ep-a and ep-b only);
1770 * 2 for one 2kB buffer (ep-a) and two 1kB ones (ep-b, ep-c).
1772 * returns zero on success, else negative errno. when this succeeds,
1773 * the contents of gadget->ep_list may have changed.
1775 * you may only call this function when endpoints a-d are all disabled.
1776 * use it whenever extra hardware buffering can help performance, such
1777 * as before enabling "high bandwidth" interrupt endpoints that use
1778 * maxpacket bigger than 512 (when double buffering would otherwise
1779 * be unavailable).
1781 int net2280_set_fifo_mode (struct usb_gadget *gadget, int mode)
1783 int i;
1784 struct net2280 *dev;
1785 int status = 0;
1786 unsigned long flags;
1788 if (!gadget)
1789 return -ENODEV;
1790 dev = container_of (gadget, struct net2280, gadget);
1792 spin_lock_irqsave (&dev->lock, flags);
1794 for (i = 1; i <= 4; i++)
1795 if (dev->ep [i].desc) {
1796 status = -EINVAL;
1797 break;
1799 if (mode < 0 || mode > 2)
1800 status = -EINVAL;
1801 if (status == 0)
1802 set_fifo_mode (dev, mode);
1803 spin_unlock_irqrestore (&dev->lock, flags);
1805 if (status == 0) {
1806 if (mode == 1)
1807 DEBUG (dev, "fifo: ep-a 2K, ep-b 2K\n");
1808 else if (mode == 2)
1809 DEBUG (dev, "fifo: ep-a 2K, ep-b 1K, ep-c 1K\n");
1810 /* else all are 1K */
1812 return status;
1814 EXPORT_SYMBOL (net2280_set_fifo_mode);
1816 /*-------------------------------------------------------------------------*/
1818 /* keeping it simple:
1819 * - one bus driver, initted first;
1820 * - one function driver, initted second
1822 * most of the work to support multiple net2280 controllers would
1823 * be to associate this gadget driver (yes?) with all of them, or
1824 * perhaps to bind specific drivers to specific devices.
1827 static struct net2280 *the_controller;
1829 static void usb_reset (struct net2280 *dev)
1831 u32 tmp;
1833 dev->gadget.speed = USB_SPEED_UNKNOWN;
1834 (void) readl (&dev->usb->usbctl);
1836 net2280_led_init (dev);
1838 /* disable automatic responses, and irqs */
1839 writel (0, &dev->usb->stdrsp);
1840 writel (0, &dev->regs->pciirqenb0);
1841 writel (0, &dev->regs->pciirqenb1);
1843 /* clear old dma and irq state */
1844 for (tmp = 0; tmp < 4; tmp++) {
1845 struct net2280_ep *ep = &dev->ep [tmp + 1];
1847 if (ep->dma)
1848 abort_dma (ep);
1850 writel (~0, &dev->regs->irqstat0),
1851 writel (~(1 << SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
1853 /* reset, and enable pci */
1854 tmp = readl (&dev->regs->devinit)
1855 | (1 << PCI_ENABLE)
1856 | (1 << FIFO_SOFT_RESET)
1857 | (1 << USB_SOFT_RESET)
1858 | (1 << M8051_RESET);
1859 writel (tmp, &dev->regs->devinit);
1861 /* standard fifo and endpoint allocations */
1862 set_fifo_mode (dev, (fifo_mode <= 2) ? fifo_mode : 0);
1865 static void usb_reinit (struct net2280 *dev)
1867 u32 tmp;
1868 int init_dma;
1870 /* use_dma changes are ignored till next device re-init */
1871 init_dma = use_dma;
1873 /* basic endpoint init */
1874 for (tmp = 0; tmp < 7; tmp++) {
1875 struct net2280_ep *ep = &dev->ep [tmp];
1877 ep->ep.name = ep_name [tmp];
1878 ep->dev = dev;
1879 ep->num = tmp;
1881 if (tmp > 0 && tmp <= 4) {
1882 ep->fifo_size = 1024;
1883 if (init_dma)
1884 ep->dma = &dev->dma [tmp - 1];
1885 } else
1886 ep->fifo_size = 64;
1887 ep->regs = &dev->epregs [tmp];
1888 ep_reset (dev->regs, ep);
1890 dev->ep [0].ep.maxpacket = 64;
1891 dev->ep [5].ep.maxpacket = 64;
1892 dev->ep [6].ep.maxpacket = 64;
1894 dev->gadget.ep0 = &dev->ep [0].ep;
1895 dev->ep [0].stopped = 0;
1896 INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
1898 /* we want to prevent lowlevel/insecure access from the USB host,
1899 * but erratum 0119 means this enable bit is ignored
1901 for (tmp = 0; tmp < 5; tmp++)
1902 writel (EP_DONTUSE, &dev->dep [tmp].dep_cfg);
1905 static void ep0_start (struct net2280 *dev)
1907 writel ( (1 << CLEAR_EP_HIDE_STATUS_PHASE)
1908 | (1 << CLEAR_NAK_OUT_PACKETS)
1909 | (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
1910 , &dev->epregs [0].ep_rsp);
1913 * hardware optionally handles a bunch of standard requests
1914 * that the API hides from drivers anyway. have it do so.
1915 * endpoint status/features are handled in software, to
1916 * help pass tests for some dubious behavior.
1918 writel ( (1 << SET_TEST_MODE)
1919 | (1 << SET_ADDRESS)
1920 | (1 << DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP)
1921 | (1 << GET_DEVICE_STATUS)
1922 | (1 << GET_INTERFACE_STATUS)
1923 , &dev->usb->stdrsp);
1924 writel ( (1 << USB_ROOT_PORT_WAKEUP_ENABLE)
1925 | (1 << SELF_POWERED_USB_DEVICE)
1926 | (1 << REMOTE_WAKEUP_SUPPORT)
1927 | (dev->softconnect << USB_DETECT_ENABLE)
1928 | (1 << SELF_POWERED_STATUS)
1929 , &dev->usb->usbctl);
1931 /* enable irqs so we can see ep0 and general operation */
1932 writel ( (1 << SETUP_PACKET_INTERRUPT_ENABLE)
1933 | (1 << ENDPOINT_0_INTERRUPT_ENABLE)
1934 , &dev->regs->pciirqenb0);
1935 writel ( (1 << PCI_INTERRUPT_ENABLE)
1936 | (1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE)
1937 | (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE)
1938 | (1 << PCI_RETRY_ABORT_INTERRUPT_ENABLE)
1939 | (1 << VBUS_INTERRUPT_ENABLE)
1940 | (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE)
1941 | (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE)
1942 , &dev->regs->pciirqenb1);
1944 /* don't leave any writes posted */
1945 (void) readl (&dev->usb->usbctl);
1948 /* when a driver is successfully registered, it will receive
1949 * control requests including set_configuration(), which enables
1950 * non-control requests. then usb traffic follows until a
1951 * disconnect is reported. then a host may connect again, or
1952 * the driver might get unbound.
1954 int usb_gadget_register_driver (struct usb_gadget_driver *driver)
1956 struct net2280 *dev = the_controller;
1957 int retval;
1958 unsigned i;
1960 /* insist on high speed support from the driver, since
1961 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
1962 * "must not be used in normal operation"
1964 if (!driver
1965 || driver->speed != USB_SPEED_HIGH
1966 || !driver->bind
1967 || !driver->unbind
1968 || !driver->setup)
1969 return -EINVAL;
1970 if (!dev)
1971 return -ENODEV;
1972 if (dev->driver)
1973 return -EBUSY;
1975 for (i = 0; i < 7; i++)
1976 dev->ep [i].irqs = 0;
1978 /* hook up the driver ... */
1979 dev->softconnect = 1;
1980 driver->driver.bus = NULL;
1981 dev->driver = driver;
1982 dev->gadget.dev.driver = &driver->driver;
1983 retval = driver->bind (&dev->gadget);
1984 if (retval) {
1985 DEBUG (dev, "bind to driver %s --> %d\n",
1986 driver->driver.name, retval);
1987 dev->driver = NULL;
1988 dev->gadget.dev.driver = NULL;
1989 return retval;
1992 device_create_file (&dev->pdev->dev, &dev_attr_function);
1993 device_create_file (&dev->pdev->dev, &dev_attr_queues);
1995 /* ... then enable host detection and ep0; and we're ready
1996 * for set_configuration as well as eventual disconnect.
1998 net2280_led_active (dev, 1);
1999 ep0_start (dev);
2001 DEBUG (dev, "%s ready, usbctl %08x stdrsp %08x\n",
2002 driver->driver.name,
2003 readl (&dev->usb->usbctl),
2004 readl (&dev->usb->stdrsp));
2006 /* pci writes may still be posted */
2007 return 0;
2009 EXPORT_SYMBOL (usb_gadget_register_driver);
2011 static void
2012 stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
2014 int i;
2016 /* don't disconnect if it's not connected */
2017 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
2018 driver = NULL;
2020 /* stop hardware; prevent new request submissions;
2021 * and kill any outstanding requests.
2023 usb_reset (dev);
2024 for (i = 0; i < 7; i++)
2025 nuke (&dev->ep [i]);
2027 /* report disconnect; the driver is already quiesced */
2028 if (driver) {
2029 spin_unlock (&dev->lock);
2030 driver->disconnect (&dev->gadget);
2031 spin_lock (&dev->lock);
2034 usb_reinit (dev);
2037 int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
2039 struct net2280 *dev = the_controller;
2040 unsigned long flags;
2042 if (!dev)
2043 return -ENODEV;
2044 if (!driver || driver != dev->driver)
2045 return -EINVAL;
2047 spin_lock_irqsave (&dev->lock, flags);
2048 stop_activity (dev, driver);
2049 spin_unlock_irqrestore (&dev->lock, flags);
2051 net2280_pullup (&dev->gadget, 0);
2053 driver->unbind (&dev->gadget);
2054 dev->gadget.dev.driver = NULL;
2055 dev->driver = NULL;
2057 net2280_led_active (dev, 0);
2058 device_remove_file (&dev->pdev->dev, &dev_attr_function);
2059 device_remove_file (&dev->pdev->dev, &dev_attr_queues);
2061 DEBUG (dev, "unregistered driver '%s'\n", driver->driver.name);
2062 return 0;
2064 EXPORT_SYMBOL (usb_gadget_unregister_driver);
2067 /*-------------------------------------------------------------------------*/
2069 /* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
2070 * also works for dma-capable endpoints, in pio mode or just
2071 * to manually advance the queue after short OUT transfers.
2073 static void handle_ep_small (struct net2280_ep *ep)
2075 struct net2280_request *req;
2076 u32 t;
2077 /* 0 error, 1 mid-data, 2 done */
2078 int mode = 1;
2080 if (!list_empty (&ep->queue))
2081 req = list_entry (ep->queue.next,
2082 struct net2280_request, queue);
2083 else
2084 req = NULL;
2086 /* ack all, and handle what we care about */
2087 t = readl (&ep->regs->ep_stat);
2088 ep->irqs++;
2089 #if 0
2090 VDEBUG (ep->dev, "%s ack ep_stat %08x, req %p\n",
2091 ep->ep.name, t, req ? &req->req : 0);
2092 #endif
2093 writel (t & ~(1 << NAK_OUT_PACKETS), &ep->regs->ep_stat);
2095 /* for ep0, monitor token irqs to catch data stage length errors
2096 * and to synchronize on status.
2098 * also, to defer reporting of protocol stalls ... here's where
2099 * data or status first appears, handling stalls here should never
2100 * cause trouble on the host side..
2102 * control requests could be slightly faster without token synch for
2103 * status, but status can jam up that way.
2105 if (unlikely (ep->num == 0)) {
2106 if (ep->is_in) {
2107 /* status; stop NAKing */
2108 if (t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)) {
2109 if (ep->dev->protocol_stall) {
2110 ep->stopped = 1;
2111 set_halt (ep);
2113 if (!req)
2114 allow_status (ep);
2115 mode = 2;
2116 /* reply to extra IN data tokens with a zlp */
2117 } else if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) {
2118 if (ep->dev->protocol_stall) {
2119 ep->stopped = 1;
2120 set_halt (ep);
2121 mode = 2;
2122 } else if (!req && ep->stopped)
2123 write_fifo (ep, NULL);
2125 } else {
2126 /* status; stop NAKing */
2127 if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) {
2128 if (ep->dev->protocol_stall) {
2129 ep->stopped = 1;
2130 set_halt (ep);
2132 mode = 2;
2133 /* an extra OUT token is an error */
2134 } else if (((t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT))
2135 && req
2136 && req->req.actual == req->req.length)
2137 || !req) {
2138 ep->dev->protocol_stall = 1;
2139 set_halt (ep);
2140 ep->stopped = 1;
2141 if (req)
2142 done (ep, req, -EOVERFLOW);
2143 req = NULL;
2148 if (unlikely (!req))
2149 return;
2151 /* manual DMA queue advance after short OUT */
2152 if (likely (ep->dma != 0)) {
2153 if (t & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
2154 u32 count;
2155 int stopped = ep->stopped;
2157 /* TRANSFERRED works around OUT_DONE erratum 0112.
2158 * we expect (N <= maxpacket) bytes; host wrote M.
2159 * iff (M < N) we won't ever see a DMA interrupt.
2161 ep->stopped = 1;
2162 for (count = 0; ; t = readl (&ep->regs->ep_stat)) {
2164 /* any preceding dma transfers must finish.
2165 * dma handles (M >= N), may empty the queue
2167 scan_dma_completions (ep);
2168 if (unlikely (list_empty (&ep->queue)
2169 || ep->out_overflow)) {
2170 req = NULL;
2171 break;
2173 req = list_entry (ep->queue.next,
2174 struct net2280_request, queue);
2176 /* here either (M < N), a "real" short rx;
2177 * or (M == N) and the queue didn't empty
2179 if (likely (t & (1 << FIFO_EMPTY))) {
2180 count = readl (&ep->dma->dmacount);
2181 count &= DMA_BYTE_COUNT_MASK;
2182 if (readl (&ep->dma->dmadesc)
2183 != req->td_dma)
2184 req = NULL;
2185 break;
2187 udelay(1);
2190 /* stop DMA, leave ep NAKing */
2191 writel ((1 << DMA_ABORT), &ep->dma->dmastat);
2192 spin_stop_dma (ep->dma);
2194 if (likely (req)) {
2195 req->td->dmacount = 0;
2196 t = readl (&ep->regs->ep_avail);
2197 dma_done (ep, req, count, t);
2200 /* also flush to prevent erratum 0106 trouble */
2201 if (unlikely (ep->out_overflow
2202 || (ep->dev->chiprev == 0x0100
2203 && ep->dev->gadget.speed
2204 == USB_SPEED_FULL))) {
2205 out_flush (ep);
2206 ep->out_overflow = 0;
2209 /* (re)start dma if needed, stop NAKing */
2210 ep->stopped = stopped;
2211 if (!list_empty (&ep->queue))
2212 restart_dma (ep);
2213 } else
2214 DEBUG (ep->dev, "%s dma ep_stat %08x ??\n",
2215 ep->ep.name, t);
2216 return;
2218 /* data packet(s) received (in the fifo, OUT) */
2219 } else if (t & (1 << DATA_PACKET_RECEIVED_INTERRUPT)) {
2220 if (read_fifo (ep, req) && ep->num != 0)
2221 mode = 2;
2223 /* data packet(s) transmitted (IN) */
2224 } else if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)) {
2225 unsigned len;
2227 len = req->req.length - req->req.actual;
2228 if (len > ep->ep.maxpacket)
2229 len = ep->ep.maxpacket;
2230 req->req.actual += len;
2232 /* if we wrote it all, we're usually done */
2233 if (req->req.actual == req->req.length) {
2234 if (ep->num == 0) {
2235 /* wait for control status */
2236 if (mode != 2)
2237 req = NULL;
2238 } else if (!req->req.zero || len != ep->ep.maxpacket)
2239 mode = 2;
2242 /* there was nothing to do ... */
2243 } else if (mode == 1)
2244 return;
2246 /* done */
2247 if (mode == 2) {
2248 /* stream endpoints often resubmit/unlink in completion */
2249 done (ep, req, 0);
2251 /* maybe advance queue to next request */
2252 if (ep->num == 0) {
2253 /* NOTE: net2280 could let gadget driver start the
2254 * status stage later. since not all controllers let
2255 * them control that, the api doesn't (yet) allow it.
2257 if (!ep->stopped)
2258 allow_status (ep);
2259 req = NULL;
2260 } else {
2261 if (!list_empty (&ep->queue) && !ep->stopped)
2262 req = list_entry (ep->queue.next,
2263 struct net2280_request, queue);
2264 else
2265 req = NULL;
2266 if (req && !ep->is_in)
2267 stop_out_naking (ep);
2271 /* is there a buffer for the next packet?
2272 * for best streaming performance, make sure there is one.
2274 if (req && !ep->stopped) {
2276 /* load IN fifo with next packet (may be zlp) */
2277 if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
2278 write_fifo (ep, &req->req);
2282 static struct net2280_ep *
2283 get_ep_by_addr (struct net2280 *dev, u16 wIndex)
2285 struct net2280_ep *ep;
2287 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
2288 return &dev->ep [0];
2289 list_for_each_entry (ep, &dev->gadget.ep_list, ep.ep_list) {
2290 u8 bEndpointAddress;
2292 if (!ep->desc)
2293 continue;
2294 bEndpointAddress = ep->desc->bEndpointAddress;
2295 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
2296 continue;
2297 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
2298 return ep;
2300 return NULL;
2303 static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
2305 struct net2280_ep *ep;
2306 u32 num, scratch;
2308 /* most of these don't need individual acks */
2309 stat &= ~(1 << INTA_ASSERTED);
2310 if (!stat)
2311 return;
2312 // DEBUG (dev, "irqstat0 %04x\n", stat);
2314 /* starting a control request? */
2315 if (unlikely (stat & (1 << SETUP_PACKET_INTERRUPT))) {
2316 union {
2317 u32 raw [2];
2318 struct usb_ctrlrequest r;
2319 } u;
2320 int tmp = 0;
2321 struct net2280_request *req;
2323 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
2324 if (readl (&dev->usb->usbstat) & (1 << HIGH_SPEED))
2325 dev->gadget.speed = USB_SPEED_HIGH;
2326 else
2327 dev->gadget.speed = USB_SPEED_FULL;
2328 net2280_led_speed (dev, dev->gadget.speed);
2329 DEBUG (dev, "%s speed\n",
2330 (dev->gadget.speed == USB_SPEED_HIGH)
2331 ? "high" : "full");
2334 ep = &dev->ep [0];
2335 ep->irqs++;
2337 /* make sure any leftover request state is cleared */
2338 stat &= ~(1 << ENDPOINT_0_INTERRUPT);
2339 while (!list_empty (&ep->queue)) {
2340 req = list_entry (ep->queue.next,
2341 struct net2280_request, queue);
2342 done (ep, req, (req->req.actual == req->req.length)
2343 ? 0 : -EPROTO);
2345 ep->stopped = 0;
2346 dev->protocol_stall = 0;
2347 writel ( (1 << TIMEOUT)
2348 | (1 << USB_STALL_SENT)
2349 | (1 << USB_IN_NAK_SENT)
2350 | (1 << USB_IN_ACK_RCVD)
2351 | (1 << USB_OUT_PING_NAK_SENT)
2352 | (1 << USB_OUT_ACK_SENT)
2353 | (1 << FIFO_OVERFLOW)
2354 | (1 << FIFO_UNDERFLOW)
2355 | (1 << SHORT_PACKET_OUT_DONE_INTERRUPT)
2356 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)
2357 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
2358 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
2359 | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
2360 | (1 << DATA_IN_TOKEN_INTERRUPT)
2361 , &ep->regs->ep_stat);
2362 u.raw [0] = readl (&dev->usb->setup0123);
2363 u.raw [1] = readl (&dev->usb->setup4567);
2365 cpu_to_le32s (&u.raw [0]);
2366 cpu_to_le32s (&u.raw [1]);
2368 le16_to_cpus (&u.r.wValue);
2369 le16_to_cpus (&u.r.wIndex);
2370 le16_to_cpus (&u.r.wLength);
2372 /* ack the irq */
2373 writel (1 << SETUP_PACKET_INTERRUPT, &dev->regs->irqstat0);
2374 stat ^= (1 << SETUP_PACKET_INTERRUPT);
2376 /* watch control traffic at the token level, and force
2377 * synchronization before letting the status stage happen.
2378 * FIXME ignore tokens we'll NAK, until driver responds.
2379 * that'll mean a lot less irqs for some drivers.
2381 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
2382 if (ep->is_in) {
2383 scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
2384 | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
2385 | (1 << DATA_IN_TOKEN_INTERRUPT);
2386 stop_out_naking (ep);
2387 } else
2388 scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT)
2389 | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
2390 | (1 << DATA_IN_TOKEN_INTERRUPT);
2391 writel (scratch, &dev->epregs [0].ep_irqenb);
2393 /* we made the hardware handle most lowlevel requests;
2394 * everything else goes uplevel to the gadget code.
2396 switch (u.r.bRequest) {
2397 case USB_REQ_GET_STATUS: {
2398 struct net2280_ep *e;
2399 u16 status;
2401 /* hw handles device and interface status */
2402 if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
2403 goto delegate;
2404 if ((e = get_ep_by_addr (dev, u.r.wIndex)) == 0
2405 || u.r.wLength > 2)
2406 goto do_stall;
2408 if (readl (&e->regs->ep_rsp)
2409 & (1 << SET_ENDPOINT_HALT))
2410 status = __constant_cpu_to_le16 (1);
2411 else
2412 status = __constant_cpu_to_le16 (0);
2414 /* don't bother with a request object! */
2415 writel (0, &dev->epregs [0].ep_irqenb);
2416 set_fifo_bytecount (ep, u.r.wLength);
2417 writel (status, &dev->epregs [0].ep_data);
2418 allow_status (ep);
2419 VDEBUG (dev, "%s stat %02x\n", ep->ep.name, status);
2420 goto next_endpoints;
2422 break;
2423 case USB_REQ_CLEAR_FEATURE: {
2424 struct net2280_ep *e;
2426 /* hw handles device features */
2427 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
2428 goto delegate;
2429 if (u.r.wValue != USB_ENDPOINT_HALT
2430 || u.r.wLength != 0)
2431 goto do_stall;
2432 if ((e = get_ep_by_addr (dev, u.r.wIndex)) == 0)
2433 goto do_stall;
2434 clear_halt (e);
2435 allow_status (ep);
2436 VDEBUG (dev, "%s clear halt\n", ep->ep.name);
2437 goto next_endpoints;
2439 break;
2440 case USB_REQ_SET_FEATURE: {
2441 struct net2280_ep *e;
2443 /* hw handles device features */
2444 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
2445 goto delegate;
2446 if (u.r.wValue != USB_ENDPOINT_HALT
2447 || u.r.wLength != 0)
2448 goto do_stall;
2449 if ((e = get_ep_by_addr (dev, u.r.wIndex)) == 0)
2450 goto do_stall;
2451 set_halt (e);
2452 allow_status (ep);
2453 VDEBUG (dev, "%s set halt\n", ep->ep.name);
2454 goto next_endpoints;
2456 break;
2457 default:
2458 delegate:
2459 VDEBUG (dev, "setup %02x.%02x v%04x i%04x "
2460 "ep_cfg %08x\n",
2461 u.r.bRequestType, u.r.bRequest,
2462 u.r.wValue, u.r.wIndex,
2463 readl (&ep->regs->ep_cfg));
2464 spin_unlock (&dev->lock);
2465 tmp = dev->driver->setup (&dev->gadget, &u.r);
2466 spin_lock (&dev->lock);
2469 /* stall ep0 on error */
2470 if (tmp < 0) {
2471 do_stall:
2472 VDEBUG (dev, "req %02x.%02x protocol STALL; stat %d\n",
2473 u.r.bRequestType, u.r.bRequest, tmp);
2474 dev->protocol_stall = 1;
2477 /* some in/out token irq should follow; maybe stall then.
2478 * driver must queue a request (even zlp) or halt ep0
2479 * before the host times out.
2483 next_endpoints:
2484 /* endpoint data irq ? */
2485 scratch = stat & 0x7f;
2486 stat &= ~0x7f;
2487 for (num = 0; scratch; num++) {
2488 u32 t;
2490 /* do this endpoint's FIFO and queue need tending? */
2491 t = 1 << num;
2492 if ((scratch & t) == 0)
2493 continue;
2494 scratch ^= t;
2496 ep = &dev->ep [num];
2497 handle_ep_small (ep);
2500 if (stat)
2501 DEBUG (dev, "unhandled irqstat0 %08x\n", stat);
2504 #define DMA_INTERRUPTS ( \
2505 (1 << DMA_D_INTERRUPT) \
2506 | (1 << DMA_C_INTERRUPT) \
2507 | (1 << DMA_B_INTERRUPT) \
2508 | (1 << DMA_A_INTERRUPT))
2509 #define PCI_ERROR_INTERRUPTS ( \
2510 (1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT) \
2511 | (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT) \
2512 | (1 << PCI_RETRY_ABORT_INTERRUPT))
2514 static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
2516 struct net2280_ep *ep;
2517 u32 tmp, num, mask, scratch;
2519 /* after disconnect there's nothing else to do! */
2520 tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
2521 mask = (1 << HIGH_SPEED) | (1 << FULL_SPEED);
2523 /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
2524 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRRUPT set and
2525 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
2526 * only indicates a change in the reset state).
2528 if (stat & tmp) {
2529 writel (tmp, &dev->regs->irqstat1);
2530 if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
2531 ((readl (&dev->usb->usbstat) & mask) == 0))
2532 || ((readl (&dev->usb->usbctl) & (1 << VBUS_PIN)) == 0)
2533 ) && ( dev->gadget.speed != USB_SPEED_UNKNOWN)) {
2534 DEBUG (dev, "disconnect %s\n",
2535 dev->driver->driver.name);
2536 stop_activity (dev, dev->driver);
2537 ep0_start (dev);
2538 return;
2540 stat &= ~tmp;
2542 /* vBUS can bounce ... one of many reasons to ignore the
2543 * notion of hotplug events on bus connect/disconnect!
2545 if (!stat)
2546 return;
2549 /* NOTE: chip stays in PCI D0 state for now, but it could
2550 * enter D1 to save more power
2552 tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2553 if (stat & tmp) {
2554 writel (tmp, &dev->regs->irqstat1);
2555 if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2556 if (dev->driver->suspend)
2557 dev->driver->suspend (&dev->gadget);
2558 } else {
2559 if (dev->driver->resume)
2560 dev->driver->resume (&dev->gadget);
2561 /* at high speed, note erratum 0133 */
2563 stat &= ~tmp;
2566 /* clear any other status/irqs */
2567 if (stat)
2568 writel (stat, &dev->regs->irqstat1);
2570 /* some status we can just ignore */
2571 stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2572 | (1 << SUSPEND_REQUEST_INTERRUPT)
2573 | (1 << RESUME_INTERRUPT)
2574 | (1 << SOF_INTERRUPT));
2575 if (!stat)
2576 return;
2577 // DEBUG (dev, "irqstat1 %08x\n", stat);
2579 /* DMA status, for ep-{a,b,c,d} */
2580 scratch = stat & DMA_INTERRUPTS;
2581 stat &= ~DMA_INTERRUPTS;
2582 scratch >>= 9;
2583 for (num = 0; scratch; num++) {
2584 struct net2280_dma_regs __iomem *dma;
2586 tmp = 1 << num;
2587 if ((tmp & scratch) == 0)
2588 continue;
2589 scratch ^= tmp;
2591 ep = &dev->ep [num + 1];
2592 dma = ep->dma;
2594 if (!dma)
2595 continue;
2597 /* clear ep's dma status */
2598 tmp = readl (&dma->dmastat);
2599 writel (tmp, &dma->dmastat);
2601 /* chaining should stop on abort, short OUT from fifo,
2602 * or (stat0 codepath) short OUT transfer.
2604 if (!use_dma_chaining) {
2605 if ((tmp & (1 << DMA_TRANSACTION_DONE_INTERRUPT))
2606 == 0) {
2607 DEBUG (ep->dev, "%s no xact done? %08x\n",
2608 ep->ep.name, tmp);
2609 continue;
2611 stop_dma (ep->dma);
2614 /* OUT transfers terminate when the data from the
2615 * host is in our memory. Process whatever's done.
2616 * On this path, we know transfer's last packet wasn't
2617 * less than req->length. NAK_OUT_PACKETS may be set,
2618 * or the FIFO may already be holding new packets.
2620 * IN transfers can linger in the FIFO for a very
2621 * long time ... we ignore that for now, accounting
2622 * precisely (like PIO does) needs per-packet irqs
2624 scan_dma_completions (ep);
2626 /* disable dma on inactive queues; else maybe restart */
2627 if (list_empty (&ep->queue)) {
2628 if (use_dma_chaining)
2629 stop_dma (ep->dma);
2630 } else {
2631 tmp = readl (&dma->dmactl);
2632 if (!use_dma_chaining
2633 || (tmp & (1 << DMA_ENABLE)) == 0)
2634 restart_dma (ep);
2635 else if (ep->is_in && use_dma_chaining) {
2636 struct net2280_request *req;
2637 u32 dmacount;
2639 /* the descriptor at the head of the chain
2640 * may still have VALID_BIT clear; that's
2641 * used to trigger changing DMA_FIFO_VALIDATE
2642 * (affects automagic zlp writes).
2644 req = list_entry (ep->queue.next,
2645 struct net2280_request, queue);
2646 dmacount = req->td->dmacount;
2647 dmacount &= __constant_cpu_to_le32 (
2648 (1 << VALID_BIT)
2649 | DMA_BYTE_COUNT_MASK);
2650 if (dmacount && (dmacount & valid_bit) == 0)
2651 restart_dma (ep);
2654 ep->irqs++;
2657 /* NOTE: there are other PCI errors we might usefully notice.
2658 * if they appear very often, here's where to try recovering.
2660 if (stat & PCI_ERROR_INTERRUPTS) {
2661 ERROR (dev, "pci dma error; stat %08x\n", stat);
2662 stat &= ~PCI_ERROR_INTERRUPTS;
2663 /* these are fatal errors, but "maybe" they won't
2664 * happen again ...
2666 stop_activity (dev, dev->driver);
2667 ep0_start (dev);
2668 stat = 0;
2671 if (stat)
2672 DEBUG (dev, "unhandled irqstat1 %08x\n", stat);
2675 static irqreturn_t net2280_irq (int irq, void *_dev, struct pt_regs * r)
2677 struct net2280 *dev = _dev;
2679 spin_lock (&dev->lock);
2681 /* handle disconnect, dma, and more */
2682 handle_stat1_irqs (dev, readl (&dev->regs->irqstat1));
2684 /* control requests and PIO */
2685 handle_stat0_irqs (dev, readl (&dev->regs->irqstat0));
2687 spin_unlock (&dev->lock);
2689 return IRQ_HANDLED;
2692 /*-------------------------------------------------------------------------*/
2694 static void gadget_release (struct device *_dev)
2696 struct net2280 *dev = dev_get_drvdata (_dev);
2698 kfree (dev);
2701 /* tear down the binding between this driver and the pci device */
2703 static void net2280_remove (struct pci_dev *pdev)
2705 struct net2280 *dev = pci_get_drvdata (pdev);
2707 /* start with the driver above us */
2708 if (dev->driver) {
2709 /* should have been done already by driver model core */
2710 WARN (dev, "pci remove, driver '%s' is still registered\n",
2711 dev->driver->driver.name);
2712 usb_gadget_unregister_driver (dev->driver);
2715 /* then clean up the resources we allocated during probe() */
2716 net2280_led_shutdown (dev);
2717 if (dev->requests) {
2718 int i;
2719 for (i = 1; i < 5; i++) {
2720 if (!dev->ep [i].dummy)
2721 continue;
2722 pci_pool_free (dev->requests, dev->ep [i].dummy,
2723 dev->ep [i].td_dma);
2725 pci_pool_destroy (dev->requests);
2727 if (dev->got_irq)
2728 free_irq (pdev->irq, dev);
2729 if (dev->regs)
2730 iounmap (dev->regs);
2731 if (dev->region)
2732 release_mem_region (pci_resource_start (pdev, 0),
2733 pci_resource_len (pdev, 0));
2734 if (dev->enabled)
2735 pci_disable_device (pdev);
2736 device_unregister (&dev->gadget.dev);
2737 device_remove_file (&pdev->dev, &dev_attr_registers);
2738 pci_set_drvdata (pdev, NULL);
2740 INFO (dev, "unbind\n");
2742 the_controller = NULL;
2745 /* wrap this driver around the specified device, but
2746 * don't respond over USB until a gadget driver binds to us.
2749 static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
2751 struct net2280 *dev;
2752 unsigned long resource, len;
2753 void __iomem *base = NULL;
2754 int retval, i;
2755 char buf [8], *bufp;
2757 /* if you want to support more than one controller in a system,
2758 * usb_gadget_driver_{register,unregister}() must change.
2760 if (the_controller) {
2761 dev_warn (&pdev->dev, "ignoring\n");
2762 return -EBUSY;
2765 /* alloc, and start init */
2766 dev = kmalloc (sizeof *dev, SLAB_KERNEL);
2767 if (dev == NULL){
2768 retval = -ENOMEM;
2769 goto done;
2772 memset (dev, 0, sizeof *dev);
2773 spin_lock_init (&dev->lock);
2774 dev->pdev = pdev;
2775 dev->gadget.ops = &net2280_ops;
2776 dev->gadget.is_dualspeed = 1;
2778 /* the "gadget" abstracts/virtualizes the controller */
2779 strcpy (dev->gadget.dev.bus_id, "gadget");
2780 dev->gadget.dev.parent = &pdev->dev;
2781 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
2782 dev->gadget.dev.release = gadget_release;
2783 dev->gadget.name = driver_name;
2785 /* now all the pci goodies ... */
2786 if (pci_enable_device (pdev) < 0) {
2787 retval = -ENODEV;
2788 goto done;
2790 dev->enabled = 1;
2792 /* BAR 0 holds all the registers
2793 * BAR 1 is 8051 memory; unused here (note erratum 0103)
2794 * BAR 2 is fifo memory; unused here
2796 resource = pci_resource_start (pdev, 0);
2797 len = pci_resource_len (pdev, 0);
2798 if (!request_mem_region (resource, len, driver_name)) {
2799 DEBUG (dev, "controller already in use\n");
2800 retval = -EBUSY;
2801 goto done;
2803 dev->region = 1;
2805 base = ioremap_nocache (resource, len);
2806 if (base == NULL) {
2807 DEBUG (dev, "can't map memory\n");
2808 retval = -EFAULT;
2809 goto done;
2811 dev->regs = (struct net2280_regs __iomem *) base;
2812 dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
2813 dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
2814 dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
2815 dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
2816 dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
2818 /* put into initial config, link up all endpoints */
2819 writel (0, &dev->usb->usbctl);
2820 usb_reset (dev);
2821 usb_reinit (dev);
2823 /* irq setup after old hardware is cleaned up */
2824 if (!pdev->irq) {
2825 ERROR (dev, "No IRQ. Check PCI setup!\n");
2826 retval = -ENODEV;
2827 goto done;
2829 #ifndef __sparc__
2830 scnprintf (buf, sizeof buf, "%d", pdev->irq);
2831 bufp = buf;
2832 #else
2833 bufp = __irq_itoa(pdev->irq);
2834 #endif
2835 if (request_irq (pdev->irq, net2280_irq, SA_SHIRQ, driver_name, dev)
2836 != 0) {
2837 ERROR (dev, "request interrupt %s failed\n", bufp);
2838 retval = -EBUSY;
2839 goto done;
2841 dev->got_irq = 1;
2843 /* DMA setup */
2844 dev->requests = pci_pool_create ("requests", pdev,
2845 sizeof (struct net2280_dma),
2846 0 /* no alignment requirements */,
2847 0 /* or page-crossing issues */);
2848 if (!dev->requests) {
2849 DEBUG (dev, "can't get request pool\n");
2850 retval = -ENOMEM;
2851 goto done;
2853 for (i = 1; i < 5; i++) {
2854 struct net2280_dma *td;
2856 td = pci_pool_alloc (dev->requests, GFP_KERNEL,
2857 &dev->ep [i].td_dma);
2858 if (!td) {
2859 DEBUG (dev, "can't get dummy %d\n", i);
2860 retval = -ENOMEM;
2861 goto done;
2863 td->dmacount = 0; /* not VALID */
2864 td->dmaaddr = __constant_cpu_to_le32 (DMA_ADDR_INVALID);
2865 td->dmadesc = td->dmaaddr;
2866 dev->ep [i].dummy = td;
2869 /* enable lower-overhead pci memory bursts during DMA */
2870 writel ( (1 << DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE)
2871 // 256 write retries may not be enough...
2872 // | (1 << PCI_RETRY_ABORT_ENABLE)
2873 | (1 << DMA_READ_MULTIPLE_ENABLE)
2874 | (1 << DMA_READ_LINE_ENABLE)
2875 , &dev->pci->pcimstctl);
2876 /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
2877 pci_set_master (pdev);
2878 pci_set_mwi (pdev);
2880 /* ... also flushes any posted pci writes */
2881 dev->chiprev = get_idx_reg (dev->regs, REG_CHIPREV) & 0xffff;
2883 /* done */
2884 pci_set_drvdata (pdev, dev);
2885 INFO (dev, "%s\n", driver_desc);
2886 INFO (dev, "irq %s, pci mem %p, chip rev %04x\n",
2887 bufp, base, dev->chiprev);
2888 INFO (dev, "version: " DRIVER_VERSION "; dma %s\n",
2889 use_dma
2890 ? (use_dma_chaining ? "chaining" : "enabled")
2891 : "disabled");
2892 the_controller = dev;
2894 device_register (&dev->gadget.dev);
2895 device_create_file (&pdev->dev, &dev_attr_registers);
2897 return 0;
2899 done:
2900 if (dev)
2901 net2280_remove (pdev);
2902 return retval;
2906 /*-------------------------------------------------------------------------*/
2908 static struct pci_device_id pci_ids [] = { {
2909 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
2910 .class_mask = ~0,
2911 .vendor = 0x17cc,
2912 .device = 0x2280,
2913 .subvendor = PCI_ANY_ID,
2914 .subdevice = PCI_ANY_ID,
2916 }, { /* end: all zeroes */ }
2918 MODULE_DEVICE_TABLE (pci, pci_ids);
2920 /* pci driver glue; this is a "new style" PCI driver module */
2921 static struct pci_driver net2280_pci_driver = {
2922 .name = (char *) driver_name,
2923 .id_table = pci_ids,
2925 .probe = net2280_probe,
2926 .remove = net2280_remove,
2928 /* FIXME add power management support */
2931 MODULE_DESCRIPTION (DRIVER_DESC);
2932 MODULE_AUTHOR ("David Brownell");
2933 MODULE_LICENSE ("GPL");
2935 static int __init init (void)
2937 if (!use_dma)
2938 use_dma_chaining = 0;
2939 return pci_module_init (&net2280_pci_driver);
2941 module_init (init);
2943 static void __exit cleanup (void)
2945 pci_unregister_driver (&net2280_pci_driver);
2947 module_exit (cleanup);