2 * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
4 * 2013 (c) Aeroflex Gaisler AB
6 * This driver supports GRUSBDC USB Device Controller cores available in the
7 * GRLIB VHDL IP core library.
9 * Full documentation of the GRUSBDC core can be found here:
10 * http://www.gaisler.com/products/grlib/grip.pdf
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * - Andreas Larsson <andreas@gaisler.com>
23 * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
24 * individually configurable to any of the four USB transfer types. This driver
25 * only supports cores in DMA mode.
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/errno.h>
33 #include <linux/list.h>
34 #include <linux/interrupt.h>
35 #include <linux/device.h>
36 #include <linux/usb/ch9.h>
37 #include <linux/usb/gadget.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/dmapool.h>
40 #include <linux/debugfs.h>
41 #include <linux/seq_file.h>
42 #include <linux/of_platform.h>
43 #include <linux/of_irq.h>
44 #include <linux/of_address.h>
46 #include <asm/byteorder.h>
50 #define DRIVER_NAME "gr_udc"
51 #define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
53 static const char driver_name
[] = DRIVER_NAME
;
54 static const char driver_desc
[] = DRIVER_DESC
;
56 #define gr_read32(x) (ioread32be((x)))
57 #define gr_write32(x, v) (iowrite32be((v), (x)))
59 /* USB speed and corresponding string calculated from status register value */
60 #define GR_SPEED(status) \
61 ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
62 #define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
64 /* Size of hardware buffer calculated from epctrl register value */
65 #define GR_BUFFER_SIZE(epctrl) \
66 ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
67 GR_EPCTRL_BUFSZ_SCALER)
69 /* ---------------------------------------------------------------------- */
70 /* Debug printout functionality */
72 static const char * const gr_modestring
[] = {"control", "iso", "bulk", "int"};
74 static const char *gr_ep0state_string(enum gr_ep0state state
)
76 static const char *const names
[] = {
77 [GR_EP0_DISCONNECT
] = "disconnect",
78 [GR_EP0_SETUP
] = "setup",
79 [GR_EP0_IDATA
] = "idata",
80 [GR_EP0_ODATA
] = "odata",
81 [GR_EP0_ISTATUS
] = "istatus",
82 [GR_EP0_OSTATUS
] = "ostatus",
83 [GR_EP0_STALL
] = "stall",
84 [GR_EP0_SUSPEND
] = "suspend",
87 if (state
< 0 || state
>= ARRAY_SIZE(names
))
95 static void gr_dbgprint_request(const char *str
, struct gr_ep
*ep
,
96 struct gr_request
*req
)
98 int buflen
= ep
->is_in
? req
->req
.length
: req
->req
.actual
;
100 int plen
= min(rowlen
, buflen
);
102 dev_dbg(ep
->dev
->dev
, "%s: 0x%p, %d bytes data%s:\n", str
, req
, buflen
,
103 (buflen
> plen
? " (truncated)" : ""));
104 print_hex_dump_debug(" ", DUMP_PREFIX_NONE
,
105 rowlen
, 4, req
->req
.buf
, plen
, false);
108 static void gr_dbgprint_devreq(struct gr_udc
*dev
, u8 type
, u8 request
,
109 u16 value
, u16 index
, u16 length
)
111 dev_vdbg(dev
->dev
, "REQ: %02x.%02x v%04x i%04x l%04x\n",
112 type
, request
, value
, index
, length
);
114 #else /* !VERBOSE_DEBUG */
116 static void gr_dbgprint_request(const char *str
, struct gr_ep
*ep
,
117 struct gr_request
*req
) {}
119 static void gr_dbgprint_devreq(struct gr_udc
*dev
, u8 type
, u8 request
,
120 u16 value
, u16 index
, u16 length
) {}
122 #endif /* VERBOSE_DEBUG */
124 /* ---------------------------------------------------------------------- */
125 /* Debugfs functionality */
127 #ifdef CONFIG_USB_GADGET_DEBUG_FS
129 static void gr_seq_ep_show(struct seq_file
*seq
, struct gr_ep
*ep
)
131 u32 epctrl
= gr_read32(&ep
->regs
->epctrl
);
132 u32 epstat
= gr_read32(&ep
->regs
->epstat
);
133 int mode
= (epctrl
& GR_EPCTRL_TT_MASK
) >> GR_EPCTRL_TT_POS
;
134 struct gr_request
*req
;
136 seq_printf(seq
, "%s:\n", ep
->ep
.name
);
137 seq_printf(seq
, " mode = %s\n", gr_modestring
[mode
]);
138 seq_printf(seq
, " halted: %d\n", !!(epctrl
& GR_EPCTRL_EH
));
139 seq_printf(seq
, " disabled: %d\n", !!(epctrl
& GR_EPCTRL_ED
));
140 seq_printf(seq
, " valid: %d\n", !!(epctrl
& GR_EPCTRL_EV
));
141 seq_printf(seq
, " dma_start = %d\n", ep
->dma_start
);
142 seq_printf(seq
, " stopped = %d\n", ep
->stopped
);
143 seq_printf(seq
, " wedged = %d\n", ep
->wedged
);
144 seq_printf(seq
, " callback = %d\n", ep
->callback
);
145 seq_printf(seq
, " maxpacket = %d\n", ep
->ep
.maxpacket
);
146 seq_printf(seq
, " maxpacket_limit = %d\n", ep
->ep
.maxpacket_limit
);
147 seq_printf(seq
, " bytes_per_buffer = %d\n", ep
->bytes_per_buffer
);
148 if (mode
== 1 || mode
== 3)
149 seq_printf(seq
, " nt = %d\n",
150 (epctrl
& GR_EPCTRL_NT_MASK
) >> GR_EPCTRL_NT_POS
);
152 seq_printf(seq
, " Buffer 0: %s %s%d\n",
153 epstat
& GR_EPSTAT_B0
? "valid" : "invalid",
154 epstat
& GR_EPSTAT_BS
? " " : "selected ",
155 (epstat
& GR_EPSTAT_B0CNT_MASK
) >> GR_EPSTAT_B0CNT_POS
);
156 seq_printf(seq
, " Buffer 1: %s %s%d\n",
157 epstat
& GR_EPSTAT_B1
? "valid" : "invalid",
158 epstat
& GR_EPSTAT_BS
? "selected " : " ",
159 (epstat
& GR_EPSTAT_B1CNT_MASK
) >> GR_EPSTAT_B1CNT_POS
);
161 if (list_empty(&ep
->queue
)) {
162 seq_puts(seq
, " Queue: empty\n\n");
166 seq_puts(seq
, " Queue:\n");
167 list_for_each_entry(req
, &ep
->queue
, queue
) {
168 struct gr_dma_desc
*desc
;
169 struct gr_dma_desc
*next
;
171 seq_printf(seq
, " 0x%p: 0x%p %d %d\n", req
,
172 &req
->req
.buf
, req
->req
.actual
, req
->req
.length
);
174 next
= req
->first_desc
;
177 next
= desc
->next_desc
;
178 seq_printf(seq
, " %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
179 desc
== req
->curr_desc
? 'c' : ' ',
180 desc
, desc
->paddr
, desc
->ctrl
, desc
->data
);
181 } while (desc
!= req
->last_desc
);
187 static int gr_seq_show(struct seq_file
*seq
, void *v
)
189 struct gr_udc
*dev
= seq
->private;
190 u32 control
= gr_read32(&dev
->regs
->control
);
191 u32 status
= gr_read32(&dev
->regs
->status
);
194 seq_printf(seq
, "usb state = %s\n",
195 usb_state_string(dev
->gadget
.state
));
196 seq_printf(seq
, "address = %d\n",
197 (control
& GR_CONTROL_UA_MASK
) >> GR_CONTROL_UA_POS
);
198 seq_printf(seq
, "speed = %s\n", GR_SPEED_STR(status
));
199 seq_printf(seq
, "ep0state = %s\n", gr_ep0state_string(dev
->ep0state
));
200 seq_printf(seq
, "irq_enabled = %d\n", dev
->irq_enabled
);
201 seq_printf(seq
, "remote_wakeup = %d\n", dev
->remote_wakeup
);
202 seq_printf(seq
, "test_mode = %d\n", dev
->test_mode
);
205 list_for_each_entry(ep
, &dev
->ep_list
, ep_list
)
206 gr_seq_ep_show(seq
, ep
);
211 static int gr_dfs_open(struct inode
*inode
, struct file
*file
)
213 return single_open(file
, gr_seq_show
, inode
->i_private
);
216 static const struct file_operations gr_dfs_fops
= {
217 .owner
= THIS_MODULE
,
221 .release
= single_release
,
224 static void gr_dfs_create(struct gr_udc
*dev
)
226 const char *name
= "gr_udc_state";
228 dev
->dfs_root
= debugfs_create_dir(dev_name(dev
->dev
), NULL
);
229 dev
->dfs_state
= debugfs_create_file(name
, 0444, dev
->dfs_root
, dev
,
233 static void gr_dfs_delete(struct gr_udc
*dev
)
235 /* Handles NULL and ERR pointers internally */
236 debugfs_remove(dev
->dfs_state
);
237 debugfs_remove(dev
->dfs_root
);
240 #else /* !CONFIG_USB_GADGET_DEBUG_FS */
242 static void gr_dfs_create(struct gr_udc
*dev
) {}
243 static void gr_dfs_delete(struct gr_udc
*dev
) {}
245 #endif /* CONFIG_USB_GADGET_DEBUG_FS */
247 /* ---------------------------------------------------------------------- */
248 /* DMA and request handling */
250 /* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
251 static struct gr_dma_desc
*gr_alloc_dma_desc(struct gr_ep
*ep
, gfp_t gfp_flags
)
254 struct gr_dma_desc
*dma_desc
;
256 dma_desc
= dma_pool_zalloc(ep
->dev
->desc_pool
, gfp_flags
, &paddr
);
258 dev_err(ep
->dev
->dev
, "Could not allocate from DMA pool\n");
262 dma_desc
->paddr
= paddr
;
267 static inline void gr_free_dma_desc(struct gr_udc
*dev
,
268 struct gr_dma_desc
*desc
)
270 dma_pool_free(dev
->desc_pool
, desc
, (dma_addr_t
)desc
->paddr
);
273 /* Frees the chain of struct gr_dma_desc for the given request */
274 static void gr_free_dma_desc_chain(struct gr_udc
*dev
, struct gr_request
*req
)
276 struct gr_dma_desc
*desc
;
277 struct gr_dma_desc
*next
;
279 next
= req
->first_desc
;
285 next
= desc
->next_desc
;
286 gr_free_dma_desc(dev
, desc
);
287 } while (desc
!= req
->last_desc
);
289 req
->first_desc
= NULL
;
290 req
->curr_desc
= NULL
;
291 req
->last_desc
= NULL
;
294 static void gr_ep0_setup(struct gr_udc
*dev
, struct gr_request
*req
);
297 * Frees allocated resources and calls the appropriate completion function/setup
298 * package handler for a finished request.
300 * Must be called with dev->lock held and irqs disabled.
302 static void gr_finish_request(struct gr_ep
*ep
, struct gr_request
*req
,
304 __releases(&dev
->lock
)
305 __acquires(&dev
->lock
)
309 list_del_init(&req
->queue
);
311 if (likely(req
->req
.status
== -EINPROGRESS
))
312 req
->req
.status
= status
;
314 status
= req
->req
.status
;
317 usb_gadget_unmap_request(&dev
->gadget
, &req
->req
, ep
->is_in
);
318 gr_free_dma_desc_chain(dev
, req
);
320 if (ep
->is_in
) { /* For OUT, req->req.actual gets updated bit by bit */
321 req
->req
.actual
= req
->req
.length
;
322 } else if (req
->oddlen
&& req
->req
.actual
> req
->evenlen
) {
324 * Copy to user buffer in this case where length was not evenly
325 * divisible by ep->ep.maxpacket and the last descriptor was
328 char *buftail
= ((char *)req
->req
.buf
+ req
->evenlen
);
330 memcpy(buftail
, ep
->tailbuf
, req
->oddlen
);
332 if (req
->req
.actual
> req
->req
.length
) {
333 /* We got more data than was requested */
334 dev_dbg(ep
->dev
->dev
, "Overflow for ep %s\n",
336 gr_dbgprint_request("OVFL", ep
, req
);
337 req
->req
.status
= -EOVERFLOW
;
343 gr_dbgprint_request("SENT", ep
, req
);
345 gr_dbgprint_request("RECV", ep
, req
);
348 /* Prevent changes to ep->queue during callback */
350 if (req
== dev
->ep0reqo
&& !status
) {
352 gr_ep0_setup(dev
, req
);
355 "Unexpected non setup packet on ep0in\n");
356 } else if (req
->req
.complete
) {
357 spin_unlock(&dev
->lock
);
359 usb_gadget_giveback_request(&ep
->ep
, &req
->req
);
361 spin_lock(&dev
->lock
);
366 static struct usb_request
*gr_alloc_request(struct usb_ep
*_ep
, gfp_t gfp_flags
)
368 struct gr_request
*req
;
370 req
= kzalloc(sizeof(*req
), gfp_flags
);
374 INIT_LIST_HEAD(&req
->queue
);
380 * Starts DMA for endpoint ep if there are requests in the queue.
382 * Must be called with dev->lock held and with !ep->stopped.
384 static void gr_start_dma(struct gr_ep
*ep
)
386 struct gr_request
*req
;
389 if (list_empty(&ep
->queue
)) {
394 req
= list_first_entry(&ep
->queue
, struct gr_request
, queue
);
396 /* A descriptor should already have been allocated */
397 BUG_ON(!req
->curr_desc
);
400 * The DMA controller can not handle smaller OUT buffers than
401 * ep->ep.maxpacket. It could lead to buffer overruns if an unexpectedly
402 * long packet are received. Therefore an internal bounce buffer gets
403 * used when such a request gets enabled.
405 if (!ep
->is_in
&& req
->oddlen
)
406 req
->last_desc
->data
= ep
->tailbuf_paddr
;
408 wmb(); /* Make sure all is settled before handing it over to DMA */
410 /* Set the descriptor pointer in the hardware */
411 gr_write32(&ep
->regs
->dmaaddr
, req
->curr_desc
->paddr
);
413 /* Announce available descriptors */
414 dmactrl
= gr_read32(&ep
->regs
->dmactrl
);
415 gr_write32(&ep
->regs
->dmactrl
, dmactrl
| GR_DMACTRL_DA
);
421 * Finishes the first request in the ep's queue and, if available, starts the
422 * next request in queue.
424 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
426 static void gr_dma_advance(struct gr_ep
*ep
, int status
)
428 struct gr_request
*req
;
430 req
= list_first_entry(&ep
->queue
, struct gr_request
, queue
);
431 gr_finish_request(ep
, req
, status
);
432 gr_start_dma(ep
); /* Regardless of ep->dma_start */
436 * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
437 * transfer to be canceled and clears GR_DMACTRL_DA.
439 * Must be called with dev->lock held.
441 static void gr_abort_dma(struct gr_ep
*ep
)
445 dmactrl
= gr_read32(&ep
->regs
->dmactrl
);
446 gr_write32(&ep
->regs
->dmactrl
, dmactrl
| GR_DMACTRL_AD
);
450 * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
453 * Size is not used for OUT endpoints. Hardware can not be instructed to handle
454 * smaller buffer than MAXPL in the OUT direction.
456 static int gr_add_dma_desc(struct gr_ep
*ep
, struct gr_request
*req
,
457 dma_addr_t data
, unsigned size
, gfp_t gfp_flags
)
459 struct gr_dma_desc
*desc
;
461 desc
= gr_alloc_dma_desc(ep
, gfp_flags
);
468 (GR_DESC_IN_CTRL_LEN_MASK
& size
) | GR_DESC_IN_CTRL_EN
;
470 desc
->ctrl
= GR_DESC_OUT_CTRL_IE
;
472 if (!req
->first_desc
) {
473 req
->first_desc
= desc
;
474 req
->curr_desc
= desc
;
476 req
->last_desc
->next_desc
= desc
;
477 req
->last_desc
->next
= desc
->paddr
;
478 req
->last_desc
->ctrl
|= GR_DESC_OUT_CTRL_NX
;
480 req
->last_desc
= desc
;
486 * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
487 * together covers req->req.length bytes of the buffer at DMA address
488 * req->req.dma for the OUT direction.
490 * The first descriptor in the chain is enabled, the rest disabled. The
491 * interrupt handler will later enable them one by one when needed so we can
492 * find out when the transfer is finished. For OUT endpoints, all descriptors
493 * therefore generate interrutps.
495 static int gr_setup_out_desc_list(struct gr_ep
*ep
, struct gr_request
*req
,
498 u16 bytes_left
; /* Bytes left to provide descriptors for */
499 u16 bytes_used
; /* Bytes accommodated for */
502 req
->first_desc
= NULL
; /* Signals that no allocation is done yet */
503 bytes_left
= req
->req
.length
;
505 while (bytes_left
> 0) {
506 dma_addr_t start
= req
->req
.dma
+ bytes_used
;
507 u16 size
= min(bytes_left
, ep
->bytes_per_buffer
);
509 if (size
< ep
->bytes_per_buffer
) {
510 /* Prepare using bounce buffer */
511 req
->evenlen
= req
->req
.length
- bytes_left
;
515 ret
= gr_add_dma_desc(ep
, req
, start
, size
, gfp_flags
);
523 req
->first_desc
->ctrl
|= GR_DESC_OUT_CTRL_EN
;
528 gr_free_dma_desc_chain(ep
->dev
, req
);
534 * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
535 * together covers req->req.length bytes of the buffer at DMA address
536 * req->req.dma for the IN direction.
538 * When more data is provided than the maximum payload size, the hardware splits
539 * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
540 * is always set to a multiple of the maximum payload (restricted to the valid
541 * number of maximum payloads during high bandwidth isochronous or interrupt
544 * All descriptors are enabled from the beginning and we only generate an
545 * interrupt for the last one indicating that the entire request has been pushed
548 static int gr_setup_in_desc_list(struct gr_ep
*ep
, struct gr_request
*req
,
551 u16 bytes_left
; /* Bytes left in req to provide descriptors for */
552 u16 bytes_used
; /* Bytes in req accommodated for */
555 req
->first_desc
= NULL
; /* Signals that no allocation is done yet */
556 bytes_left
= req
->req
.length
;
558 do { /* Allow for zero length packets */
559 dma_addr_t start
= req
->req
.dma
+ bytes_used
;
560 u16 size
= min(bytes_left
, ep
->bytes_per_buffer
);
562 ret
= gr_add_dma_desc(ep
, req
, start
, size
, gfp_flags
);
568 } while (bytes_left
> 0);
571 * Send an extra zero length packet to indicate that no more data is
572 * available when req->req.zero is set and the data length is even
573 * multiples of ep->ep.maxpacket.
575 if (req
->req
.zero
&& (req
->req
.length
% ep
->ep
.maxpacket
== 0)) {
576 ret
= gr_add_dma_desc(ep
, req
, 0, 0, gfp_flags
);
582 * For IN packets we only want to know when the last packet has been
583 * transmitted (not just put into internal buffers).
585 req
->last_desc
->ctrl
|= GR_DESC_IN_CTRL_PI
;
590 gr_free_dma_desc_chain(ep
->dev
, req
);
595 /* Must be called with dev->lock held */
596 static int gr_queue(struct gr_ep
*ep
, struct gr_request
*req
, gfp_t gfp_flags
)
598 struct gr_udc
*dev
= ep
->dev
;
601 if (unlikely(!ep
->ep
.desc
&& ep
->num
!= 0)) {
602 dev_err(dev
->dev
, "No ep descriptor for %s\n", ep
->ep
.name
);
606 if (unlikely(!req
->req
.buf
|| !list_empty(&req
->queue
))) {
608 "Invalid request for %s: buf=%p list_empty=%d\n",
609 ep
->ep
.name
, req
->req
.buf
, list_empty(&req
->queue
));
613 if (unlikely(!dev
->driver
|| dev
->gadget
.speed
== USB_SPEED_UNKNOWN
)) {
614 dev_err(dev
->dev
, "-ESHUTDOWN");
618 /* Can't touch registers when suspended */
619 if (dev
->ep0state
== GR_EP0_SUSPEND
) {
620 dev_err(dev
->dev
, "-EBUSY");
624 /* Set up DMA mapping in case the caller didn't */
625 ret
= usb_gadget_map_request(&dev
->gadget
, &req
->req
, ep
->is_in
);
627 dev_err(dev
->dev
, "usb_gadget_map_request");
632 ret
= gr_setup_in_desc_list(ep
, req
, gfp_flags
);
634 ret
= gr_setup_out_desc_list(ep
, req
, gfp_flags
);
638 req
->req
.status
= -EINPROGRESS
;
640 list_add_tail(&req
->queue
, &ep
->queue
);
642 /* Start DMA if not started, otherwise interrupt handler handles it */
643 if (!ep
->dma_start
&& likely(!ep
->stopped
))
650 * Queue a request from within the driver.
652 * Must be called with dev->lock held.
654 static inline int gr_queue_int(struct gr_ep
*ep
, struct gr_request
*req
,
658 gr_dbgprint_request("RESP", ep
, req
);
660 return gr_queue(ep
, req
, gfp_flags
);
663 /* ---------------------------------------------------------------------- */
664 /* General helper functions */
667 * Dequeue ALL requests.
669 * Must be called with dev->lock held and irqs disabled.
671 static void gr_ep_nuke(struct gr_ep
*ep
)
673 struct gr_request
*req
;
679 while (!list_empty(&ep
->queue
)) {
680 req
= list_first_entry(&ep
->queue
, struct gr_request
, queue
);
681 gr_finish_request(ep
, req
, -ESHUTDOWN
);
686 * Reset the hardware state of this endpoint.
688 * Must be called with dev->lock held.
690 static void gr_ep_reset(struct gr_ep
*ep
)
692 gr_write32(&ep
->regs
->epctrl
, 0);
693 gr_write32(&ep
->regs
->dmactrl
, 0);
695 ep
->ep
.maxpacket
= MAX_CTRL_PL_SIZE
;
702 * Generate STALL on ep0in/out.
704 * Must be called with dev->lock held.
706 static void gr_control_stall(struct gr_udc
*dev
)
710 epctrl
= gr_read32(&dev
->epo
[0].regs
->epctrl
);
711 gr_write32(&dev
->epo
[0].regs
->epctrl
, epctrl
| GR_EPCTRL_CS
);
712 epctrl
= gr_read32(&dev
->epi
[0].regs
->epctrl
);
713 gr_write32(&dev
->epi
[0].regs
->epctrl
, epctrl
| GR_EPCTRL_CS
);
715 dev
->ep0state
= GR_EP0_STALL
;
719 * Halts, halts and wedges, or clears halt for an endpoint.
721 * Must be called with dev->lock held.
723 static int gr_ep_halt_wedge(struct gr_ep
*ep
, int halt
, int wedge
, int fromhost
)
728 if (ep
->num
&& !ep
->ep
.desc
)
731 if (ep
->num
&& ep
->ep
.desc
->bmAttributes
== USB_ENDPOINT_XFER_ISOC
)
734 /* Never actually halt ep0, and therefore never clear halt for ep0 */
736 if (halt
&& !fromhost
) {
737 /* ep0 halt from gadget - generate protocol stall */
738 gr_control_stall(ep
->dev
);
739 dev_dbg(ep
->dev
->dev
, "EP: stall ep0\n");
745 dev_dbg(ep
->dev
->dev
, "EP: %s halt %s\n",
746 (halt
? (wedge
? "wedge" : "set") : "clear"), ep
->ep
.name
);
748 epctrl
= gr_read32(&ep
->regs
->epctrl
);
751 gr_write32(&ep
->regs
->epctrl
, epctrl
| GR_EPCTRL_EH
);
756 gr_write32(&ep
->regs
->epctrl
, epctrl
& ~GR_EPCTRL_EH
);
760 /* Things might have been queued up in the meantime */
768 /* Must be called with dev->lock held */
769 static inline void gr_set_ep0state(struct gr_udc
*dev
, enum gr_ep0state value
)
771 if (dev
->ep0state
!= value
)
772 dev_vdbg(dev
->dev
, "STATE: ep0state=%s\n",
773 gr_ep0state_string(value
));
774 dev
->ep0state
= value
;
778 * Should only be called when endpoints can not generate interrupts.
780 * Must be called with dev->lock held.
782 static void gr_disable_interrupts_and_pullup(struct gr_udc
*dev
)
784 gr_write32(&dev
->regs
->control
, 0);
785 wmb(); /* Make sure that we do not deny one of our interrupts */
786 dev
->irq_enabled
= 0;
790 * Stop all device activity and disable data line pullup.
792 * Must be called with dev->lock held and irqs disabled.
794 static void gr_stop_activity(struct gr_udc
*dev
)
798 list_for_each_entry(ep
, &dev
->ep_list
, ep_list
)
801 gr_disable_interrupts_and_pullup(dev
);
803 gr_set_ep0state(dev
, GR_EP0_DISCONNECT
);
804 usb_gadget_set_state(&dev
->gadget
, USB_STATE_NOTATTACHED
);
807 /* ---------------------------------------------------------------------- */
808 /* ep0 setup packet handling */
810 static void gr_ep0_testmode_complete(struct usb_ep
*_ep
,
811 struct usb_request
*_req
)
817 ep
= container_of(_ep
, struct gr_ep
, ep
);
820 spin_lock(&dev
->lock
);
822 control
= gr_read32(&dev
->regs
->control
);
823 control
|= GR_CONTROL_TM
| (dev
->test_mode
<< GR_CONTROL_TS_POS
);
824 gr_write32(&dev
->regs
->control
, control
);
826 spin_unlock(&dev
->lock
);
829 static void gr_ep0_dummy_complete(struct usb_ep
*_ep
, struct usb_request
*_req
)
831 /* Nothing needs to be done here */
835 * Queue a response on ep0in.
837 * Must be called with dev->lock held.
839 static int gr_ep0_respond(struct gr_udc
*dev
, u8
*buf
, int length
,
840 void (*complete
)(struct usb_ep
*ep
,
841 struct usb_request
*req
))
843 u8
*reqbuf
= dev
->ep0reqi
->req
.buf
;
847 for (i
= 0; i
< length
; i
++)
849 dev
->ep0reqi
->req
.length
= length
;
850 dev
->ep0reqi
->req
.complete
= complete
;
852 status
= gr_queue_int(&dev
->epi
[0], dev
->ep0reqi
, GFP_ATOMIC
);
855 "Could not queue ep0in setup response: %d\n", status
);
861 * Queue a 2 byte response on ep0in.
863 * Must be called with dev->lock held.
865 static inline int gr_ep0_respond_u16(struct gr_udc
*dev
, u16 response
)
867 __le16 le_response
= cpu_to_le16(response
);
869 return gr_ep0_respond(dev
, (u8
*)&le_response
, 2,
870 gr_ep0_dummy_complete
);
874 * Queue a ZLP response on ep0in.
876 * Must be called with dev->lock held.
878 static inline int gr_ep0_respond_empty(struct gr_udc
*dev
)
880 return gr_ep0_respond(dev
, NULL
, 0, gr_ep0_dummy_complete
);
884 * This is run when a SET_ADDRESS request is received. First writes
885 * the new address to the control register which is updated internally
886 * when the next IN packet is ACKED.
888 * Must be called with dev->lock held.
890 static void gr_set_address(struct gr_udc
*dev
, u8 address
)
894 control
= gr_read32(&dev
->regs
->control
) & ~GR_CONTROL_UA_MASK
;
895 control
|= (address
<< GR_CONTROL_UA_POS
) & GR_CONTROL_UA_MASK
;
896 control
|= GR_CONTROL_SU
;
897 gr_write32(&dev
->regs
->control
, control
);
901 * Returns negative for STALL, 0 for successful handling and positive for
904 * Must be called with dev->lock held.
906 static int gr_device_request(struct gr_udc
*dev
, u8 type
, u8 request
,
907 u16 value
, u16 index
)
913 case USB_REQ_SET_ADDRESS
:
914 dev_dbg(dev
->dev
, "STATUS: address %d\n", value
& 0xff);
915 gr_set_address(dev
, value
& 0xff);
917 usb_gadget_set_state(&dev
->gadget
, USB_STATE_ADDRESS
);
919 usb_gadget_set_state(&dev
->gadget
, USB_STATE_DEFAULT
);
920 return gr_ep0_respond_empty(dev
);
922 case USB_REQ_GET_STATUS
:
923 /* Self powered | remote wakeup */
924 response
= 0x0001 | (dev
->remote_wakeup
? 0x0002 : 0);
925 return gr_ep0_respond_u16(dev
, response
);
927 case USB_REQ_SET_FEATURE
:
929 case USB_DEVICE_REMOTE_WAKEUP
:
930 /* Allow remote wakeup */
931 dev
->remote_wakeup
= 1;
932 return gr_ep0_respond_empty(dev
);
934 case USB_DEVICE_TEST_MODE
:
935 /* The hardware does not support TEST_FORCE_EN */
937 if (test
>= TEST_J
&& test
<= TEST_PACKET
) {
938 dev
->test_mode
= test
;
939 return gr_ep0_respond(dev
, NULL
, 0,
940 gr_ep0_testmode_complete
);
945 case USB_REQ_CLEAR_FEATURE
:
947 case USB_DEVICE_REMOTE_WAKEUP
:
948 /* Disallow remote wakeup */
949 dev
->remote_wakeup
= 0;
950 return gr_ep0_respond_empty(dev
);
955 return 1; /* Delegate the rest */
959 * Returns negative for STALL, 0 for successful handling and positive for
962 * Must be called with dev->lock held.
964 static int gr_interface_request(struct gr_udc
*dev
, u8 type
, u8 request
,
965 u16 value
, u16 index
)
967 if (dev
->gadget
.state
!= USB_STATE_CONFIGURED
)
971 * Should return STALL for invalid interfaces, but udc driver does not
972 * know anything about that. However, many gadget drivers do not handle
973 * GET_STATUS so we need to take care of that.
977 case USB_REQ_GET_STATUS
:
978 return gr_ep0_respond_u16(dev
, 0x0000);
980 case USB_REQ_SET_FEATURE
:
981 case USB_REQ_CLEAR_FEATURE
:
983 * No possible valid standard requests. Still let gadget drivers
989 return 1; /* Delegate the rest */
993 * Returns negative for STALL, 0 for successful handling and positive for
996 * Must be called with dev->lock held.
998 static int gr_endpoint_request(struct gr_udc
*dev
, u8 type
, u8 request
,
999 u16 value
, u16 index
)
1004 u8 epnum
= index
& USB_ENDPOINT_NUMBER_MASK
;
1005 u8 is_in
= index
& USB_ENDPOINT_DIR_MASK
;
1007 if ((is_in
&& epnum
>= dev
->nepi
) || (!is_in
&& epnum
>= dev
->nepo
))
1010 if (dev
->gadget
.state
!= USB_STATE_CONFIGURED
&& epnum
!= 0)
1013 ep
= (is_in
? &dev
->epi
[epnum
] : &dev
->epo
[epnum
]);
1016 case USB_REQ_GET_STATUS
:
1017 halted
= gr_read32(&ep
->regs
->epctrl
) & GR_EPCTRL_EH
;
1018 return gr_ep0_respond_u16(dev
, halted
? 0x0001 : 0);
1020 case USB_REQ_SET_FEATURE
:
1022 case USB_ENDPOINT_HALT
:
1023 status
= gr_ep_halt_wedge(ep
, 1, 0, 1);
1025 status
= gr_ep0_respond_empty(dev
);
1030 case USB_REQ_CLEAR_FEATURE
:
1032 case USB_ENDPOINT_HALT
:
1035 status
= gr_ep_halt_wedge(ep
, 0, 0, 1);
1037 status
= gr_ep0_respond_empty(dev
);
1043 return 1; /* Delegate the rest */
1046 /* Must be called with dev->lock held */
1047 static void gr_ep0out_requeue(struct gr_udc
*dev
)
1049 int ret
= gr_queue_int(&dev
->epo
[0], dev
->ep0reqo
, GFP_ATOMIC
);
1052 dev_err(dev
->dev
, "Could not queue ep0out setup request: %d\n",
1057 * The main function dealing with setup requests on ep0.
1059 * Must be called with dev->lock held and irqs disabled
1061 static void gr_ep0_setup(struct gr_udc
*dev
, struct gr_request
*req
)
1062 __releases(&dev
->lock
)
1063 __acquires(&dev
->lock
)
1066 struct usb_ctrlrequest ctrl
;
1078 /* Restore from ep0 halt */
1079 if (dev
->ep0state
== GR_EP0_STALL
) {
1080 gr_set_ep0state(dev
, GR_EP0_SETUP
);
1081 if (!req
->req
.actual
)
1085 if (dev
->ep0state
== GR_EP0_ISTATUS
) {
1086 gr_set_ep0state(dev
, GR_EP0_SETUP
);
1087 if (req
->req
.actual
> 0)
1089 "Unexpected setup packet at state %s\n",
1090 gr_ep0state_string(GR_EP0_ISTATUS
));
1092 goto out
; /* Got expected ZLP */
1093 } else if (dev
->ep0state
!= GR_EP0_SETUP
) {
1095 "Unexpected ep0out request at state %s - stalling\n",
1096 gr_ep0state_string(dev
->ep0state
));
1097 gr_control_stall(dev
);
1098 gr_set_ep0state(dev
, GR_EP0_SETUP
);
1100 } else if (!req
->req
.actual
) {
1101 dev_dbg(dev
->dev
, "Unexpected ZLP at state %s\n",
1102 gr_ep0state_string(dev
->ep0state
));
1106 /* Handle SETUP packet */
1107 for (i
= 0; i
< req
->req
.actual
; i
++)
1108 u
.raw
[i
] = ((u8
*)req
->req
.buf
)[i
];
1110 type
= u
.ctrl
.bRequestType
;
1111 request
= u
.ctrl
.bRequest
;
1112 value
= le16_to_cpu(u
.ctrl
.wValue
);
1113 index
= le16_to_cpu(u
.ctrl
.wIndex
);
1114 length
= le16_to_cpu(u
.ctrl
.wLength
);
1116 gr_dbgprint_devreq(dev
, type
, request
, value
, index
, length
);
1118 /* Check for data stage */
1120 if (type
& USB_DIR_IN
)
1121 gr_set_ep0state(dev
, GR_EP0_IDATA
);
1123 gr_set_ep0state(dev
, GR_EP0_ODATA
);
1126 status
= 1; /* Positive status flags delegation */
1127 if ((type
& USB_TYPE_MASK
) == USB_TYPE_STANDARD
) {
1128 switch (type
& USB_RECIP_MASK
) {
1129 case USB_RECIP_DEVICE
:
1130 status
= gr_device_request(dev
, type
, request
,
1133 case USB_RECIP_ENDPOINT
:
1134 status
= gr_endpoint_request(dev
, type
, request
,
1137 case USB_RECIP_INTERFACE
:
1138 status
= gr_interface_request(dev
, type
, request
,
1145 spin_unlock(&dev
->lock
);
1147 dev_vdbg(dev
->dev
, "DELEGATE\n");
1148 status
= dev
->driver
->setup(&dev
->gadget
, &u
.ctrl
);
1150 spin_lock(&dev
->lock
);
1153 /* Generate STALL on both ep0out and ep0in if requested */
1154 if (unlikely(status
< 0)) {
1155 dev_vdbg(dev
->dev
, "STALL\n");
1156 gr_control_stall(dev
);
1159 if ((type
& USB_TYPE_MASK
) == USB_TYPE_STANDARD
&&
1160 request
== USB_REQ_SET_CONFIGURATION
) {
1162 dev_dbg(dev
->dev
, "STATUS: deconfigured\n");
1163 usb_gadget_set_state(&dev
->gadget
, USB_STATE_ADDRESS
);
1164 } else if (status
>= 0) {
1165 /* Not configured unless gadget OK:s it */
1166 dev_dbg(dev
->dev
, "STATUS: configured: %d\n", value
);
1167 usb_gadget_set_state(&dev
->gadget
,
1168 USB_STATE_CONFIGURED
);
1172 /* Get ready for next stage */
1173 if (dev
->ep0state
== GR_EP0_ODATA
)
1174 gr_set_ep0state(dev
, GR_EP0_OSTATUS
);
1175 else if (dev
->ep0state
== GR_EP0_IDATA
)
1176 gr_set_ep0state(dev
, GR_EP0_ISTATUS
);
1178 gr_set_ep0state(dev
, GR_EP0_SETUP
);
1181 gr_ep0out_requeue(dev
);
1184 /* ---------------------------------------------------------------------- */
1185 /* VBUS and USB reset handling */
1187 /* Must be called with dev->lock held and irqs disabled */
1188 static void gr_vbus_connected(struct gr_udc
*dev
, u32 status
)
1192 dev
->gadget
.speed
= GR_SPEED(status
);
1193 usb_gadget_set_state(&dev
->gadget
, USB_STATE_POWERED
);
1195 /* Turn on full interrupts and pullup */
1196 control
= (GR_CONTROL_SI
| GR_CONTROL_UI
| GR_CONTROL_VI
|
1197 GR_CONTROL_SP
| GR_CONTROL_EP
);
1198 gr_write32(&dev
->regs
->control
, control
);
1201 /* Must be called with dev->lock held */
1202 static void gr_enable_vbus_detect(struct gr_udc
*dev
)
1206 dev
->irq_enabled
= 1;
1207 wmb(); /* Make sure we do not ignore an interrupt */
1208 gr_write32(&dev
->regs
->control
, GR_CONTROL_VI
);
1210 /* Take care of the case we are already plugged in at this point */
1211 status
= gr_read32(&dev
->regs
->status
);
1212 if (status
& GR_STATUS_VB
)
1213 gr_vbus_connected(dev
, status
);
1216 /* Must be called with dev->lock held and irqs disabled */
1217 static void gr_vbus_disconnected(struct gr_udc
*dev
)
1219 gr_stop_activity(dev
);
1221 /* Report disconnect */
1222 if (dev
->driver
&& dev
->driver
->disconnect
) {
1223 spin_unlock(&dev
->lock
);
1225 dev
->driver
->disconnect(&dev
->gadget
);
1227 spin_lock(&dev
->lock
);
1230 gr_enable_vbus_detect(dev
);
1233 /* Must be called with dev->lock held and irqs disabled */
1234 static void gr_udc_usbreset(struct gr_udc
*dev
, u32 status
)
1236 gr_set_address(dev
, 0);
1237 gr_set_ep0state(dev
, GR_EP0_SETUP
);
1238 usb_gadget_set_state(&dev
->gadget
, USB_STATE_DEFAULT
);
1239 dev
->gadget
.speed
= GR_SPEED(status
);
1241 gr_ep_nuke(&dev
->epo
[0]);
1242 gr_ep_nuke(&dev
->epi
[0]);
1243 dev
->epo
[0].stopped
= 0;
1244 dev
->epi
[0].stopped
= 0;
1245 gr_ep0out_requeue(dev
);
1248 /* ---------------------------------------------------------------------- */
1252 * Handles interrupts from in endpoints. Returns whether something was handled.
1254 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1256 static int gr_handle_in_ep(struct gr_ep
*ep
)
1258 struct gr_request
*req
;
1260 req
= list_first_entry(&ep
->queue
, struct gr_request
, queue
);
1261 if (!req
->last_desc
)
1264 if (ACCESS_ONCE(req
->last_desc
->ctrl
) & GR_DESC_IN_CTRL_EN
)
1265 return 0; /* Not put in hardware buffers yet */
1267 if (gr_read32(&ep
->regs
->epstat
) & (GR_EPSTAT_B1
| GR_EPSTAT_B0
))
1268 return 0; /* Not transmitted yet, still in hardware buffers */
1270 /* Write complete */
1271 gr_dma_advance(ep
, 0);
1277 * Handles interrupts from out endpoints. Returns whether something was handled.
1279 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1281 static int gr_handle_out_ep(struct gr_ep
*ep
)
1286 struct gr_request
*req
;
1287 struct gr_udc
*dev
= ep
->dev
;
1289 req
= list_first_entry(&ep
->queue
, struct gr_request
, queue
);
1290 if (!req
->curr_desc
)
1293 ctrl
= ACCESS_ONCE(req
->curr_desc
->ctrl
);
1294 if (ctrl
& GR_DESC_OUT_CTRL_EN
)
1295 return 0; /* Not received yet */
1298 len
= ctrl
& GR_DESC_OUT_CTRL_LEN_MASK
;
1299 req
->req
.actual
+= len
;
1300 if (ctrl
& GR_DESC_OUT_CTRL_SE
)
1303 if (len
< ep
->ep
.maxpacket
|| req
->req
.actual
>= req
->req
.length
) {
1304 /* Short packet or >= expected size - we are done */
1306 if ((ep
== &dev
->epo
[0]) && (dev
->ep0state
== GR_EP0_OSTATUS
)) {
1308 * Send a status stage ZLP to ack the DATA stage in the
1309 * OUT direction. This needs to be done before
1310 * gr_dma_advance as that can lead to a call to
1311 * ep0_setup that can change dev->ep0state.
1313 gr_ep0_respond_empty(dev
);
1314 gr_set_ep0state(dev
, GR_EP0_SETUP
);
1317 gr_dma_advance(ep
, 0);
1319 /* Not done yet. Enable the next descriptor to receive more. */
1320 req
->curr_desc
= req
->curr_desc
->next_desc
;
1321 req
->curr_desc
->ctrl
|= GR_DESC_OUT_CTRL_EN
;
1323 ep_dmactrl
= gr_read32(&ep
->regs
->dmactrl
);
1324 gr_write32(&ep
->regs
->dmactrl
, ep_dmactrl
| GR_DMACTRL_DA
);
1331 * Handle state changes. Returns whether something was handled.
1333 * Must be called with dev->lock held and irqs disabled.
1335 static int gr_handle_state_changes(struct gr_udc
*dev
)
1337 u32 status
= gr_read32(&dev
->regs
->status
);
1339 int powstate
= !(dev
->gadget
.state
== USB_STATE_NOTATTACHED
||
1340 dev
->gadget
.state
== USB_STATE_ATTACHED
);
1342 /* VBUS valid detected */
1343 if (!powstate
&& (status
& GR_STATUS_VB
)) {
1344 dev_dbg(dev
->dev
, "STATUS: vbus valid detected\n");
1345 gr_vbus_connected(dev
, status
);
1350 if (powstate
&& !(status
& GR_STATUS_VB
)) {
1351 dev_dbg(dev
->dev
, "STATUS: vbus invalid detected\n");
1352 gr_vbus_disconnected(dev
);
1356 /* USB reset detected */
1357 if (status
& GR_STATUS_UR
) {
1358 dev_dbg(dev
->dev
, "STATUS: USB reset - speed is %s\n",
1359 GR_SPEED_STR(status
));
1360 gr_write32(&dev
->regs
->status
, GR_STATUS_UR
);
1361 gr_udc_usbreset(dev
, status
);
1366 if (dev
->gadget
.speed
!= GR_SPEED(status
)) {
1367 dev_dbg(dev
->dev
, "STATUS: USB Speed change to %s\n",
1368 GR_SPEED_STR(status
));
1369 dev
->gadget
.speed
= GR_SPEED(status
);
1373 /* Going into suspend */
1374 if ((dev
->ep0state
!= GR_EP0_SUSPEND
) && !(status
& GR_STATUS_SU
)) {
1375 dev_dbg(dev
->dev
, "STATUS: USB suspend\n");
1376 gr_set_ep0state(dev
, GR_EP0_SUSPEND
);
1377 dev
->suspended_from
= dev
->gadget
.state
;
1378 usb_gadget_set_state(&dev
->gadget
, USB_STATE_SUSPENDED
);
1380 if ((dev
->gadget
.speed
!= USB_SPEED_UNKNOWN
) &&
1381 dev
->driver
&& dev
->driver
->suspend
) {
1382 spin_unlock(&dev
->lock
);
1384 dev
->driver
->suspend(&dev
->gadget
);
1386 spin_lock(&dev
->lock
);
1391 /* Coming out of suspend */
1392 if ((dev
->ep0state
== GR_EP0_SUSPEND
) && (status
& GR_STATUS_SU
)) {
1393 dev_dbg(dev
->dev
, "STATUS: USB resume\n");
1394 if (dev
->suspended_from
== USB_STATE_POWERED
)
1395 gr_set_ep0state(dev
, GR_EP0_DISCONNECT
);
1397 gr_set_ep0state(dev
, GR_EP0_SETUP
);
1398 usb_gadget_set_state(&dev
->gadget
, dev
->suspended_from
);
1400 if ((dev
->gadget
.speed
!= USB_SPEED_UNKNOWN
) &&
1401 dev
->driver
&& dev
->driver
->resume
) {
1402 spin_unlock(&dev
->lock
);
1404 dev
->driver
->resume(&dev
->gadget
);
1406 spin_lock(&dev
->lock
);
1414 /* Non-interrupt context irq handler */
1415 static irqreturn_t
gr_irq_handler(int irq
, void *_dev
)
1417 struct gr_udc
*dev
= _dev
;
1421 unsigned long flags
;
1423 spin_lock_irqsave(&dev
->lock
, flags
);
1425 if (!dev
->irq_enabled
)
1429 * Check IN ep interrupts. We check these before the OUT eps because
1430 * some gadgets reuse the request that might already be currently
1431 * outstanding and needs to be completed (mainly setup requests).
1433 for (i
= 0; i
< dev
->nepi
; i
++) {
1435 if (!ep
->stopped
&& !ep
->callback
&& !list_empty(&ep
->queue
))
1436 handled
= gr_handle_in_ep(ep
) || handled
;
1439 /* Check OUT ep interrupts */
1440 for (i
= 0; i
< dev
->nepo
; i
++) {
1442 if (!ep
->stopped
&& !ep
->callback
&& !list_empty(&ep
->queue
))
1443 handled
= gr_handle_out_ep(ep
) || handled
;
1446 /* Check status interrupts */
1447 handled
= gr_handle_state_changes(dev
) || handled
;
1450 * Check AMBA DMA errors. Only check if we didn't find anything else to
1451 * handle because this shouldn't happen if we did everything right.
1454 list_for_each_entry(ep
, &dev
->ep_list
, ep_list
) {
1455 if (gr_read32(&ep
->regs
->dmactrl
) & GR_DMACTRL_AE
) {
1457 "AMBA Error occurred for %s\n",
1465 spin_unlock_irqrestore(&dev
->lock
, flags
);
1467 return handled
? IRQ_HANDLED
: IRQ_NONE
;
1470 /* Interrupt context irq handler */
1471 static irqreturn_t
gr_irq(int irq
, void *_dev
)
1473 struct gr_udc
*dev
= _dev
;
1475 if (!dev
->irq_enabled
)
1478 return IRQ_WAKE_THREAD
;
1481 /* ---------------------------------------------------------------------- */
1484 /* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
1485 static int gr_ep_enable(struct usb_ep
*_ep
,
1486 const struct usb_endpoint_descriptor
*desc
)
1493 u16 buffer_size
= 0;
1496 ep
= container_of(_ep
, struct gr_ep
, ep
);
1497 if (!_ep
|| !desc
|| desc
->bDescriptorType
!= USB_DT_ENDPOINT
)
1502 /* 'ep0' IN and OUT are reserved */
1503 if (ep
== &dev
->epo
[0] || ep
== &dev
->epi
[0])
1506 if (!dev
->driver
|| dev
->gadget
.speed
== USB_SPEED_UNKNOWN
)
1509 /* Make sure we are clear for enabling */
1510 epctrl
= gr_read32(&ep
->regs
->epctrl
);
1511 if (epctrl
& GR_EPCTRL_EV
)
1514 /* Check that directions match */
1515 if (!ep
->is_in
!= !usb_endpoint_dir_in(desc
))
1519 if ((!ep
->is_in
&& ep
->num
>= dev
->nepo
) ||
1520 (ep
->is_in
&& ep
->num
>= dev
->nepi
))
1523 if (usb_endpoint_xfer_control(desc
)) {
1525 } else if (usb_endpoint_xfer_isoc(desc
)) {
1527 } else if (usb_endpoint_xfer_bulk(desc
)) {
1529 } else if (usb_endpoint_xfer_int(desc
)) {
1532 dev_err(dev
->dev
, "Unknown transfer type for %s\n",
1538 * Bits 10-0 set the max payload. 12-11 set the number of
1539 * additional transactions.
1541 max
= 0x7ff & usb_endpoint_maxp(desc
);
1542 nt
= 0x3 & (usb_endpoint_maxp(desc
) >> 11);
1543 buffer_size
= GR_BUFFER_SIZE(epctrl
);
1544 if (nt
&& (mode
== 0 || mode
== 2)) {
1546 "%s mode: multiple trans./microframe not valid\n",
1547 (mode
== 2 ? "Bulk" : "Control"));
1549 } else if (nt
== 0x3) {
1551 "Invalid value 0x3 for additional trans./microframe\n");
1553 } else if ((nt
+ 1) * max
> buffer_size
) {
1554 dev_err(dev
->dev
, "Hw buffer size %d < max payload %d * %d\n",
1555 buffer_size
, (nt
+ 1), max
);
1557 } else if (max
== 0) {
1558 dev_err(dev
->dev
, "Max payload cannot be set to 0\n");
1560 } else if (max
> ep
->ep
.maxpacket_limit
) {
1561 dev_err(dev
->dev
, "Requested max payload %d > limit %d\n",
1562 max
, ep
->ep
.maxpacket_limit
);
1566 spin_lock(&ep
->dev
->lock
);
1569 spin_unlock(&ep
->dev
->lock
);
1576 ep
->ep
.maxpacket
= max
;
1582 * Maximum possible size of all payloads in one microframe
1583 * regardless of direction when using high-bandwidth mode.
1585 ep
->bytes_per_buffer
= (nt
+ 1) * max
;
1586 } else if (ep
->is_in
) {
1588 * The biggest multiple of maximum packet size that fits into
1589 * the buffer. The hardware will split up into many packets in
1592 ep
->bytes_per_buffer
= (buffer_size
/ max
) * max
;
1595 * Only single packets will be placed the buffers in the OUT
1598 ep
->bytes_per_buffer
= max
;
1601 epctrl
= (max
<< GR_EPCTRL_MAXPL_POS
)
1602 | (nt
<< GR_EPCTRL_NT_POS
)
1603 | (mode
<< GR_EPCTRL_TT_POS
)
1606 epctrl
|= GR_EPCTRL_PI
;
1607 gr_write32(&ep
->regs
->epctrl
, epctrl
);
1609 gr_write32(&ep
->regs
->dmactrl
, GR_DMACTRL_IE
| GR_DMACTRL_AI
);
1611 spin_unlock(&ep
->dev
->lock
);
1613 dev_dbg(ep
->dev
->dev
, "EP: %s enabled - %s with %d bytes/buffer\n",
1614 ep
->ep
.name
, gr_modestring
[mode
], ep
->bytes_per_buffer
);
1618 /* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
1619 static int gr_ep_disable(struct usb_ep
*_ep
)
1623 unsigned long flags
;
1625 ep
= container_of(_ep
, struct gr_ep
, ep
);
1626 if (!_ep
|| !ep
->ep
.desc
)
1631 /* 'ep0' IN and OUT are reserved */
1632 if (ep
== &dev
->epo
[0] || ep
== &dev
->epi
[0])
1635 if (dev
->ep0state
== GR_EP0_SUSPEND
)
1638 dev_dbg(ep
->dev
->dev
, "EP: disable %s\n", ep
->ep
.name
);
1640 spin_lock_irqsave(&dev
->lock
, flags
);
1646 spin_unlock_irqrestore(&dev
->lock
, flags
);
1652 * Frees a request, but not any DMA buffers associated with it
1653 * (gr_finish_request should already have taken care of that).
1655 static void gr_free_request(struct usb_ep
*_ep
, struct usb_request
*_req
)
1657 struct gr_request
*req
;
1661 req
= container_of(_req
, struct gr_request
, req
);
1663 /* Leads to memory leak */
1664 WARN(!list_empty(&req
->queue
),
1665 "request not dequeued properly before freeing\n");
1670 /* Queue a request from the gadget */
1671 static int gr_queue_ext(struct usb_ep
*_ep
, struct usb_request
*_req
,
1675 struct gr_request
*req
;
1679 if (unlikely(!_ep
|| !_req
))
1682 ep
= container_of(_ep
, struct gr_ep
, ep
);
1683 req
= container_of(_req
, struct gr_request
, req
);
1686 spin_lock(&ep
->dev
->lock
);
1689 * The ep0 pointer in the gadget struct is used both for ep0in and
1690 * ep0out. In a data stage in the out direction ep0out needs to be used
1691 * instead of the default ep0in. Completion functions might use
1692 * driver_data, so that needs to be copied as well.
1694 if ((ep
== &dev
->epi
[0]) && (dev
->ep0state
== GR_EP0_ODATA
)) {
1696 ep
->ep
.driver_data
= dev
->epi
[0].ep
.driver_data
;
1700 gr_dbgprint_request("EXTERN", ep
, req
);
1702 ret
= gr_queue(ep
, req
, GFP_ATOMIC
);
1704 spin_unlock(&ep
->dev
->lock
);
1709 /* Dequeue JUST ONE request */
1710 static int gr_dequeue(struct usb_ep
*_ep
, struct usb_request
*_req
)
1712 struct gr_request
*req
;
1716 unsigned long flags
;
1718 ep
= container_of(_ep
, struct gr_ep
, ep
);
1719 if (!_ep
|| !_req
|| (!ep
->ep
.desc
&& ep
->num
!= 0))
1725 /* We can't touch (DMA) registers when suspended */
1726 if (dev
->ep0state
== GR_EP0_SUSPEND
)
1729 spin_lock_irqsave(&dev
->lock
, flags
);
1731 /* Make sure it's actually queued on this endpoint */
1732 list_for_each_entry(req
, &ep
->queue
, queue
) {
1733 if (&req
->req
== _req
)
1736 if (&req
->req
!= _req
) {
1741 if (list_first_entry(&ep
->queue
, struct gr_request
, queue
) == req
) {
1742 /* This request is currently being processed */
1745 gr_finish_request(ep
, req
, -ECONNRESET
);
1747 gr_dma_advance(ep
, -ECONNRESET
);
1748 } else if (!list_empty(&req
->queue
)) {
1749 /* Not being processed - gr_finish_request dequeues it */
1750 gr_finish_request(ep
, req
, -ECONNRESET
);
1756 spin_unlock_irqrestore(&dev
->lock
, flags
);
1761 /* Helper for gr_set_halt and gr_set_wedge */
1762 static int gr_set_halt_wedge(struct usb_ep
*_ep
, int halt
, int wedge
)
1769 ep
= container_of(_ep
, struct gr_ep
, ep
);
1771 spin_lock(&ep
->dev
->lock
);
1773 /* Halting an IN endpoint should fail if queue is not empty */
1774 if (halt
&& ep
->is_in
&& !list_empty(&ep
->queue
)) {
1779 ret
= gr_ep_halt_wedge(ep
, halt
, wedge
, 0);
1782 spin_unlock(&ep
->dev
->lock
);
1788 static int gr_set_halt(struct usb_ep
*_ep
, int halt
)
1790 return gr_set_halt_wedge(_ep
, halt
, 0);
1793 /* Halt and wedge endpoint */
1794 static int gr_set_wedge(struct usb_ep
*_ep
)
1796 return gr_set_halt_wedge(_ep
, 1, 1);
1800 * Return the total number of bytes currently stored in the internal buffers of
1803 static int gr_fifo_status(struct usb_ep
*_ep
)
1811 ep
= container_of(_ep
, struct gr_ep
, ep
);
1813 epstat
= gr_read32(&ep
->regs
->epstat
);
1815 if (epstat
& GR_EPSTAT_B0
)
1816 bytes
+= (epstat
& GR_EPSTAT_B0CNT_MASK
) >> GR_EPSTAT_B0CNT_POS
;
1817 if (epstat
& GR_EPSTAT_B1
)
1818 bytes
+= (epstat
& GR_EPSTAT_B1CNT_MASK
) >> GR_EPSTAT_B1CNT_POS
;
1824 /* Empty data from internal buffers of an endpoint. */
1825 static void gr_fifo_flush(struct usb_ep
*_ep
)
1832 ep
= container_of(_ep
, struct gr_ep
, ep
);
1833 dev_vdbg(ep
->dev
->dev
, "EP: flush fifo %s\n", ep
->ep
.name
);
1835 spin_lock(&ep
->dev
->lock
);
1837 epctrl
= gr_read32(&ep
->regs
->epctrl
);
1838 epctrl
|= GR_EPCTRL_CB
;
1839 gr_write32(&ep
->regs
->epctrl
, epctrl
);
1841 spin_unlock(&ep
->dev
->lock
);
1844 static struct usb_ep_ops gr_ep_ops
= {
1845 .enable
= gr_ep_enable
,
1846 .disable
= gr_ep_disable
,
1848 .alloc_request
= gr_alloc_request
,
1849 .free_request
= gr_free_request
,
1851 .queue
= gr_queue_ext
,
1852 .dequeue
= gr_dequeue
,
1854 .set_halt
= gr_set_halt
,
1855 .set_wedge
= gr_set_wedge
,
1856 .fifo_status
= gr_fifo_status
,
1857 .fifo_flush
= gr_fifo_flush
,
1860 /* ---------------------------------------------------------------------- */
1861 /* USB Gadget ops */
1863 static int gr_get_frame(struct usb_gadget
*_gadget
)
1869 dev
= container_of(_gadget
, struct gr_udc
, gadget
);
1870 return gr_read32(&dev
->regs
->status
) & GR_STATUS_FN_MASK
;
1873 static int gr_wakeup(struct usb_gadget
*_gadget
)
1879 dev
= container_of(_gadget
, struct gr_udc
, gadget
);
1881 /* Remote wakeup feature not enabled by host*/
1882 if (!dev
->remote_wakeup
)
1885 spin_lock(&dev
->lock
);
1887 gr_write32(&dev
->regs
->control
,
1888 gr_read32(&dev
->regs
->control
) | GR_CONTROL_RW
);
1890 spin_unlock(&dev
->lock
);
1895 static int gr_pullup(struct usb_gadget
*_gadget
, int is_on
)
1902 dev
= container_of(_gadget
, struct gr_udc
, gadget
);
1904 spin_lock(&dev
->lock
);
1906 control
= gr_read32(&dev
->regs
->control
);
1908 control
|= GR_CONTROL_EP
;
1910 control
&= ~GR_CONTROL_EP
;
1911 gr_write32(&dev
->regs
->control
, control
);
1913 spin_unlock(&dev
->lock
);
1918 static int gr_udc_start(struct usb_gadget
*gadget
,
1919 struct usb_gadget_driver
*driver
)
1921 struct gr_udc
*dev
= to_gr_udc(gadget
);
1923 spin_lock(&dev
->lock
);
1925 /* Hook up the driver */
1926 driver
->driver
.bus
= NULL
;
1927 dev
->driver
= driver
;
1929 /* Get ready for host detection */
1930 gr_enable_vbus_detect(dev
);
1932 spin_unlock(&dev
->lock
);
1937 static int gr_udc_stop(struct usb_gadget
*gadget
)
1939 struct gr_udc
*dev
= to_gr_udc(gadget
);
1940 unsigned long flags
;
1942 spin_lock_irqsave(&dev
->lock
, flags
);
1945 gr_stop_activity(dev
);
1947 spin_unlock_irqrestore(&dev
->lock
, flags
);
1952 static const struct usb_gadget_ops gr_ops
= {
1953 .get_frame
= gr_get_frame
,
1954 .wakeup
= gr_wakeup
,
1955 .pullup
= gr_pullup
,
1956 .udc_start
= gr_udc_start
,
1957 .udc_stop
= gr_udc_stop
,
1958 /* Other operations not supported */
1961 /* ---------------------------------------------------------------------- */
1962 /* Module probe, removal and of-matching */
1964 static const char * const onames
[] = {
1965 "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
1966 "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
1967 "ep12out", "ep13out", "ep14out", "ep15out"
1970 static const char * const inames
[] = {
1971 "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
1972 "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
1973 "ep12in", "ep13in", "ep14in", "ep15in"
1976 /* Must be called with dev->lock held */
1977 static int gr_ep_init(struct gr_udc
*dev
, int num
, int is_in
, u32 maxplimit
)
1980 struct gr_request
*req
;
1981 struct usb_request
*_req
;
1985 ep
= &dev
->epi
[num
];
1986 ep
->ep
.name
= inames
[num
];
1987 ep
->regs
= &dev
->regs
->epi
[num
];
1989 ep
= &dev
->epo
[num
];
1990 ep
->ep
.name
= onames
[num
];
1991 ep
->regs
= &dev
->regs
->epo
[num
];
1998 ep
->ep
.ops
= &gr_ep_ops
;
1999 INIT_LIST_HEAD(&ep
->queue
);
2002 _req
= gr_alloc_request(&ep
->ep
, GFP_ATOMIC
);
2003 buf
= devm_kzalloc(dev
->dev
, PAGE_SIZE
, GFP_DMA
| GFP_ATOMIC
);
2004 if (!_req
|| !buf
) {
2005 /* possible _req freed by gr_probe via gr_remove */
2009 req
= container_of(_req
, struct gr_request
, req
);
2011 req
->req
.length
= MAX_CTRL_PL_SIZE
;
2014 dev
->ep0reqi
= req
; /* Complete gets set as used */
2016 dev
->ep0reqo
= req
; /* Completion treated separately */
2018 usb_ep_set_maxpacket_limit(&ep
->ep
, MAX_CTRL_PL_SIZE
);
2019 ep
->bytes_per_buffer
= MAX_CTRL_PL_SIZE
;
2021 ep
->ep
.caps
.type_control
= true;
2023 usb_ep_set_maxpacket_limit(&ep
->ep
, (u16
)maxplimit
);
2024 list_add_tail(&ep
->ep
.ep_list
, &dev
->gadget
.ep_list
);
2026 ep
->ep
.caps
.type_iso
= true;
2027 ep
->ep
.caps
.type_bulk
= true;
2028 ep
->ep
.caps
.type_int
= true;
2030 list_add_tail(&ep
->ep_list
, &dev
->ep_list
);
2033 ep
->ep
.caps
.dir_in
= true;
2035 ep
->ep
.caps
.dir_out
= true;
2037 ep
->tailbuf
= dma_alloc_coherent(dev
->dev
, ep
->ep
.maxpacket_limit
,
2038 &ep
->tailbuf_paddr
, GFP_ATOMIC
);
2045 /* Must be called with dev->lock held */
2046 static int gr_udc_init(struct gr_udc
*dev
)
2048 struct device_node
*np
= dev
->dev
->of_node
;
2055 gr_set_address(dev
, 0);
2057 INIT_LIST_HEAD(&dev
->gadget
.ep_list
);
2058 dev
->gadget
.speed
= USB_SPEED_UNKNOWN
;
2059 dev
->gadget
.ep0
= &dev
->epi
[0].ep
;
2061 INIT_LIST_HEAD(&dev
->ep_list
);
2062 gr_set_ep0state(dev
, GR_EP0_DISCONNECT
);
2064 for (i
= 0; i
< dev
->nepo
; i
++) {
2065 if (of_property_read_u32_index(np
, "epobufsizes", i
, &bufsize
))
2067 ret
= gr_ep_init(dev
, i
, 0, bufsize
);
2072 for (i
= 0; i
< dev
->nepi
; i
++) {
2073 if (of_property_read_u32_index(np
, "epibufsizes", i
, &bufsize
))
2075 ret
= gr_ep_init(dev
, i
, 1, bufsize
);
2080 /* Must be disabled by default */
2081 dev
->remote_wakeup
= 0;
2083 /* Enable ep0out and ep0in */
2084 epctrl_val
= (MAX_CTRL_PL_SIZE
<< GR_EPCTRL_MAXPL_POS
) | GR_EPCTRL_EV
;
2085 dmactrl_val
= GR_DMACTRL_IE
| GR_DMACTRL_AI
;
2086 gr_write32(&dev
->epo
[0].regs
->epctrl
, epctrl_val
);
2087 gr_write32(&dev
->epi
[0].regs
->epctrl
, epctrl_val
| GR_EPCTRL_PI
);
2088 gr_write32(&dev
->epo
[0].regs
->dmactrl
, dmactrl_val
);
2089 gr_write32(&dev
->epi
[0].regs
->dmactrl
, dmactrl_val
);
2094 static void gr_ep_remove(struct gr_udc
*dev
, int num
, int is_in
)
2099 ep
= &dev
->epi
[num
];
2101 ep
= &dev
->epo
[num
];
2104 dma_free_coherent(dev
->dev
, ep
->ep
.maxpacket_limit
,
2105 ep
->tailbuf
, ep
->tailbuf_paddr
);
2108 static int gr_remove(struct platform_device
*pdev
)
2110 struct gr_udc
*dev
= platform_get_drvdata(pdev
);
2114 usb_del_gadget_udc(&dev
->gadget
); /* Shuts everything down */
2119 dma_pool_destroy(dev
->desc_pool
);
2120 platform_set_drvdata(pdev
, NULL
);
2122 gr_free_request(&dev
->epi
[0].ep
, &dev
->ep0reqi
->req
);
2123 gr_free_request(&dev
->epo
[0].ep
, &dev
->ep0reqo
->req
);
2125 for (i
= 0; i
< dev
->nepo
; i
++)
2126 gr_ep_remove(dev
, i
, 0);
2127 for (i
= 0; i
< dev
->nepi
; i
++)
2128 gr_ep_remove(dev
, i
, 1);
2132 static int gr_request_irq(struct gr_udc
*dev
, int irq
)
2134 return devm_request_threaded_irq(dev
->dev
, irq
, gr_irq
, gr_irq_handler
,
2135 IRQF_SHARED
, driver_name
, dev
);
2138 static int gr_probe(struct platform_device
*pdev
)
2141 struct resource
*res
;
2142 struct gr_regs __iomem
*regs
;
2146 dev
= devm_kzalloc(&pdev
->dev
, sizeof(*dev
), GFP_KERNEL
);
2149 dev
->dev
= &pdev
->dev
;
2151 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2152 regs
= devm_ioremap_resource(dev
->dev
, res
);
2154 return PTR_ERR(regs
);
2156 dev
->irq
= platform_get_irq(pdev
, 0);
2157 if (dev
->irq
<= 0) {
2158 dev_err(dev
->dev
, "No irq found\n");
2162 /* Some core configurations has separate irqs for IN and OUT events */
2163 dev
->irqi
= platform_get_irq(pdev
, 1);
2164 if (dev
->irqi
> 0) {
2165 dev
->irqo
= platform_get_irq(pdev
, 2);
2166 if (dev
->irqo
<= 0) {
2167 dev_err(dev
->dev
, "Found irqi but not irqo\n");
2174 dev
->gadget
.name
= driver_name
;
2175 dev
->gadget
.max_speed
= USB_SPEED_HIGH
;
2176 dev
->gadget
.ops
= &gr_ops
;
2178 spin_lock_init(&dev
->lock
);
2181 platform_set_drvdata(pdev
, dev
);
2183 /* Determine number of endpoints and data interface mode */
2184 status
= gr_read32(&dev
->regs
->status
);
2185 dev
->nepi
= ((status
& GR_STATUS_NEPI_MASK
) >> GR_STATUS_NEPI_POS
) + 1;
2186 dev
->nepo
= ((status
& GR_STATUS_NEPO_MASK
) >> GR_STATUS_NEPO_POS
) + 1;
2188 if (!(status
& GR_STATUS_DM
)) {
2189 dev_err(dev
->dev
, "Slave mode cores are not supported\n");
2193 /* --- Effects of the following calls might need explicit cleanup --- */
2195 /* Create DMA pool for descriptors */
2196 dev
->desc_pool
= dma_pool_create("desc_pool", dev
->dev
,
2197 sizeof(struct gr_dma_desc
), 4, 0);
2198 if (!dev
->desc_pool
) {
2199 dev_err(dev
->dev
, "Could not allocate DMA pool");
2203 spin_lock(&dev
->lock
);
2205 /* Inside lock so that no gadget can use this udc until probe is done */
2206 retval
= usb_add_gadget_udc(dev
->dev
, &dev
->gadget
);
2208 dev_err(dev
->dev
, "Could not add gadget udc");
2213 retval
= gr_udc_init(dev
);
2219 /* Clear all interrupt enables that might be left on since last boot */
2220 gr_disable_interrupts_and_pullup(dev
);
2222 retval
= gr_request_irq(dev
, dev
->irq
);
2224 dev_err(dev
->dev
, "Failed to request irq %d\n", dev
->irq
);
2229 retval
= gr_request_irq(dev
, dev
->irqi
);
2231 dev_err(dev
->dev
, "Failed to request irqi %d\n",
2235 retval
= gr_request_irq(dev
, dev
->irqo
);
2237 dev_err(dev
->dev
, "Failed to request irqo %d\n",
2244 dev_info(dev
->dev
, "regs: %p, irqs %d, %d, %d\n", dev
->regs
,
2245 dev
->irq
, dev
->irqi
, dev
->irqo
);
2247 dev_info(dev
->dev
, "regs: %p, irq %d\n", dev
->regs
, dev
->irq
);
2250 spin_unlock(&dev
->lock
);
2258 static const struct of_device_id gr_match
[] = {
2259 {.name
= "GAISLER_USBDC"},
2263 MODULE_DEVICE_TABLE(of
, gr_match
);
2265 static struct platform_driver gr_driver
= {
2267 .name
= DRIVER_NAME
,
2268 .of_match_table
= gr_match
,
2271 .remove
= gr_remove
,
2273 module_platform_driver(gr_driver
);
2275 MODULE_AUTHOR("Aeroflex Gaisler AB.");
2276 MODULE_DESCRIPTION(DRIVER_DESC
);
2277 MODULE_LICENSE("GPL");