2 * Driver for the Atmel USBA high speed USB device controller
4 * Copyright (C) 2005-2007 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/clk.h>
11 #include <linux/clk/at91_pmc.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/list.h>
20 #include <linux/platform_device.h>
21 #include <linux/usb/ch9.h>
22 #include <linux/usb/gadget.h>
23 #include <linux/usb/atmel_usba_udc.h>
24 #include <linux/delay.h>
25 #include <linux/platform_data/atmel.h>
27 #include <linux/of_gpio.h>
31 #include "atmel_usba_udc.h"
33 #ifdef CONFIG_USB_GADGET_DEBUG_FS
34 #include <linux/debugfs.h>
35 #include <linux/uaccess.h>
37 static int queue_dbg_open(struct inode
*inode
, struct file
*file
)
39 struct usba_ep
*ep
= inode
->i_private
;
40 struct usba_request
*req
, *req_copy
;
41 struct list_head
*queue_data
;
43 queue_data
= kmalloc(sizeof(*queue_data
), GFP_KERNEL
);
46 INIT_LIST_HEAD(queue_data
);
48 spin_lock_irq(&ep
->udc
->lock
);
49 list_for_each_entry(req
, &ep
->queue
, queue
) {
50 req_copy
= kmemdup(req
, sizeof(*req_copy
), GFP_ATOMIC
);
53 list_add_tail(&req_copy
->queue
, queue_data
);
55 spin_unlock_irq(&ep
->udc
->lock
);
57 file
->private_data
= queue_data
;
61 spin_unlock_irq(&ep
->udc
->lock
);
62 list_for_each_entry_safe(req
, req_copy
, queue_data
, queue
) {
63 list_del(&req
->queue
);
71 * bbbbbbbb llllllll IZS sssss nnnn FDL\n\0
75 * I/i: interrupt/no interrupt
77 * S/s: short ok/short not ok
80 * F/f: submitted/not submitted to FIFO
81 * D/d: using/not using DMA
82 * L/l: last transaction/not last transaction
84 static ssize_t
queue_dbg_read(struct file
*file
, char __user
*buf
,
85 size_t nbytes
, loff_t
*ppos
)
87 struct list_head
*queue
= file
->private_data
;
88 struct usba_request
*req
, *tmp_req
;
89 size_t len
, remaining
, actual
= 0;
92 if (!access_ok(VERIFY_WRITE
, buf
, nbytes
))
95 mutex_lock(&file_inode(file
)->i_mutex
);
96 list_for_each_entry_safe(req
, tmp_req
, queue
, queue
) {
97 len
= snprintf(tmpbuf
, sizeof(tmpbuf
),
98 "%8p %08x %c%c%c %5d %c%c%c\n",
99 req
->req
.buf
, req
->req
.length
,
100 req
->req
.no_interrupt
? 'i' : 'I',
101 req
->req
.zero
? 'Z' : 'z',
102 req
->req
.short_not_ok
? 's' : 'S',
104 req
->submitted
? 'F' : 'f',
105 req
->using_dma
? 'D' : 'd',
106 req
->last_transaction
? 'L' : 'l');
107 len
= min(len
, sizeof(tmpbuf
));
111 list_del(&req
->queue
);
114 remaining
= __copy_to_user(buf
, tmpbuf
, len
);
115 actual
+= len
- remaining
;
122 mutex_unlock(&file_inode(file
)->i_mutex
);
127 static int queue_dbg_release(struct inode
*inode
, struct file
*file
)
129 struct list_head
*queue_data
= file
->private_data
;
130 struct usba_request
*req
, *tmp_req
;
132 list_for_each_entry_safe(req
, tmp_req
, queue_data
, queue
) {
133 list_del(&req
->queue
);
140 static int regs_dbg_open(struct inode
*inode
, struct file
*file
)
142 struct usba_udc
*udc
;
147 mutex_lock(&inode
->i_mutex
);
148 udc
= inode
->i_private
;
149 data
= kmalloc(inode
->i_size
, GFP_KERNEL
);
153 spin_lock_irq(&udc
->lock
);
154 for (i
= 0; i
< inode
->i_size
/ 4; i
++)
155 data
[i
] = usba_io_readl(udc
->regs
+ i
* 4);
156 spin_unlock_irq(&udc
->lock
);
158 file
->private_data
= data
;
162 mutex_unlock(&inode
->i_mutex
);
167 static ssize_t
regs_dbg_read(struct file
*file
, char __user
*buf
,
168 size_t nbytes
, loff_t
*ppos
)
170 struct inode
*inode
= file_inode(file
);
173 mutex_lock(&inode
->i_mutex
);
174 ret
= simple_read_from_buffer(buf
, nbytes
, ppos
,
176 file_inode(file
)->i_size
);
177 mutex_unlock(&inode
->i_mutex
);
182 static int regs_dbg_release(struct inode
*inode
, struct file
*file
)
184 kfree(file
->private_data
);
188 const struct file_operations queue_dbg_fops
= {
189 .owner
= THIS_MODULE
,
190 .open
= queue_dbg_open
,
192 .read
= queue_dbg_read
,
193 .release
= queue_dbg_release
,
196 const struct file_operations regs_dbg_fops
= {
197 .owner
= THIS_MODULE
,
198 .open
= regs_dbg_open
,
199 .llseek
= generic_file_llseek
,
200 .read
= regs_dbg_read
,
201 .release
= regs_dbg_release
,
204 static void usba_ep_init_debugfs(struct usba_udc
*udc
,
207 struct dentry
*ep_root
;
209 ep_root
= debugfs_create_dir(ep
->ep
.name
, udc
->debugfs_root
);
212 ep
->debugfs_dir
= ep_root
;
214 ep
->debugfs_queue
= debugfs_create_file("queue", 0400, ep_root
,
215 ep
, &queue_dbg_fops
);
216 if (!ep
->debugfs_queue
)
220 ep
->debugfs_dma_status
221 = debugfs_create_u32("dma_status", 0400, ep_root
,
222 &ep
->last_dma_status
);
223 if (!ep
->debugfs_dma_status
)
226 if (ep_is_control(ep
)) {
228 = debugfs_create_u32("state", 0400, ep_root
,
230 if (!ep
->debugfs_state
)
238 debugfs_remove(ep
->debugfs_dma_status
);
240 debugfs_remove(ep
->debugfs_queue
);
242 debugfs_remove(ep_root
);
244 dev_err(&ep
->udc
->pdev
->dev
,
245 "failed to create debugfs directory for %s\n", ep
->ep
.name
);
248 static void usba_ep_cleanup_debugfs(struct usba_ep
*ep
)
250 debugfs_remove(ep
->debugfs_queue
);
251 debugfs_remove(ep
->debugfs_dma_status
);
252 debugfs_remove(ep
->debugfs_state
);
253 debugfs_remove(ep
->debugfs_dir
);
254 ep
->debugfs_dma_status
= NULL
;
255 ep
->debugfs_dir
= NULL
;
258 static void usba_init_debugfs(struct usba_udc
*udc
)
260 struct dentry
*root
, *regs
;
261 struct resource
*regs_resource
;
263 root
= debugfs_create_dir(udc
->gadget
.name
, NULL
);
264 if (IS_ERR(root
) || !root
)
266 udc
->debugfs_root
= root
;
268 regs_resource
= platform_get_resource(udc
->pdev
, IORESOURCE_MEM
,
272 regs
= debugfs_create_file_size("regs", 0400, root
, udc
,
274 resource_size(regs_resource
));
277 udc
->debugfs_regs
= regs
;
280 usba_ep_init_debugfs(udc
, to_usba_ep(udc
->gadget
.ep0
));
285 debugfs_remove(root
);
287 udc
->debugfs_root
= NULL
;
288 dev_err(&udc
->pdev
->dev
, "debugfs is not available\n");
291 static void usba_cleanup_debugfs(struct usba_udc
*udc
)
293 usba_ep_cleanup_debugfs(to_usba_ep(udc
->gadget
.ep0
));
294 debugfs_remove(udc
->debugfs_regs
);
295 debugfs_remove(udc
->debugfs_root
);
296 udc
->debugfs_regs
= NULL
;
297 udc
->debugfs_root
= NULL
;
300 static inline void usba_ep_init_debugfs(struct usba_udc
*udc
,
306 static inline void usba_ep_cleanup_debugfs(struct usba_ep
*ep
)
311 static inline void usba_init_debugfs(struct usba_udc
*udc
)
316 static inline void usba_cleanup_debugfs(struct usba_udc
*udc
)
322 static inline u32
usba_int_enb_get(struct usba_udc
*udc
)
324 return udc
->int_enb_cache
;
327 static inline void usba_int_enb_set(struct usba_udc
*udc
, u32 val
)
329 usba_writel(udc
, INT_ENB
, val
);
330 udc
->int_enb_cache
= val
;
333 static int vbus_is_present(struct usba_udc
*udc
)
335 if (gpio_is_valid(udc
->vbus_pin
))
336 return gpio_get_value(udc
->vbus_pin
) ^ udc
->vbus_pin_inverted
;
338 /* No Vbus detection: Assume always present */
342 static void toggle_bias(struct usba_udc
*udc
, int is_on
)
344 if (udc
->errata
&& udc
->errata
->toggle_bias
)
345 udc
->errata
->toggle_bias(udc
, is_on
);
348 static void generate_bias_pulse(struct usba_udc
*udc
)
350 if (!udc
->bias_pulse_needed
)
353 if (udc
->errata
&& udc
->errata
->pulse_bias
)
354 udc
->errata
->pulse_bias(udc
);
356 udc
->bias_pulse_needed
= false;
359 static void next_fifo_transaction(struct usba_ep
*ep
, struct usba_request
*req
)
361 unsigned int transaction_len
;
363 transaction_len
= req
->req
.length
- req
->req
.actual
;
364 req
->last_transaction
= 1;
365 if (transaction_len
> ep
->ep
.maxpacket
) {
366 transaction_len
= ep
->ep
.maxpacket
;
367 req
->last_transaction
= 0;
368 } else if (transaction_len
== ep
->ep
.maxpacket
&& req
->req
.zero
)
369 req
->last_transaction
= 0;
371 DBG(DBG_QUEUE
, "%s: submit_transaction, req %p (length %d)%s\n",
372 ep
->ep
.name
, req
, transaction_len
,
373 req
->last_transaction
? ", done" : "");
375 memcpy_toio(ep
->fifo
, req
->req
.buf
+ req
->req
.actual
, transaction_len
);
376 usba_ep_writel(ep
, SET_STA
, USBA_TX_PK_RDY
);
377 req
->req
.actual
+= transaction_len
;
380 static void submit_request(struct usba_ep
*ep
, struct usba_request
*req
)
382 DBG(DBG_QUEUE
, "%s: submit_request: req %p (length %d)\n",
383 ep
->ep
.name
, req
, req
->req
.length
);
388 if (req
->using_dma
) {
389 if (req
->req
.length
== 0) {
390 usba_ep_writel(ep
, CTL_ENB
, USBA_TX_PK_RDY
);
395 usba_ep_writel(ep
, CTL_ENB
, USBA_SHORT_PACKET
);
397 usba_ep_writel(ep
, CTL_DIS
, USBA_SHORT_PACKET
);
399 usba_dma_writel(ep
, ADDRESS
, req
->req
.dma
);
400 usba_dma_writel(ep
, CONTROL
, req
->ctrl
);
402 next_fifo_transaction(ep
, req
);
403 if (req
->last_transaction
) {
404 usba_ep_writel(ep
, CTL_DIS
, USBA_TX_PK_RDY
);
405 usba_ep_writel(ep
, CTL_ENB
, USBA_TX_COMPLETE
);
407 usba_ep_writel(ep
, CTL_DIS
, USBA_TX_COMPLETE
);
408 usba_ep_writel(ep
, CTL_ENB
, USBA_TX_PK_RDY
);
413 static void submit_next_request(struct usba_ep
*ep
)
415 struct usba_request
*req
;
417 if (list_empty(&ep
->queue
)) {
418 usba_ep_writel(ep
, CTL_DIS
, USBA_TX_PK_RDY
| USBA_RX_BK_RDY
);
422 req
= list_entry(ep
->queue
.next
, struct usba_request
, queue
);
424 submit_request(ep
, req
);
427 static void send_status(struct usba_udc
*udc
, struct usba_ep
*ep
)
429 ep
->state
= STATUS_STAGE_IN
;
430 usba_ep_writel(ep
, SET_STA
, USBA_TX_PK_RDY
);
431 usba_ep_writel(ep
, CTL_ENB
, USBA_TX_COMPLETE
);
434 static void receive_data(struct usba_ep
*ep
)
436 struct usba_udc
*udc
= ep
->udc
;
437 struct usba_request
*req
;
438 unsigned long status
;
439 unsigned int bytecount
, nr_busy
;
442 status
= usba_ep_readl(ep
, STA
);
443 nr_busy
= USBA_BFEXT(BUSY_BANKS
, status
);
445 DBG(DBG_QUEUE
, "receive data: nr_busy=%u\n", nr_busy
);
447 while (nr_busy
> 0) {
448 if (list_empty(&ep
->queue
)) {
449 usba_ep_writel(ep
, CTL_DIS
, USBA_RX_BK_RDY
);
452 req
= list_entry(ep
->queue
.next
,
453 struct usba_request
, queue
);
455 bytecount
= USBA_BFEXT(BYTE_COUNT
, status
);
457 if (status
& (1 << 31))
459 if (req
->req
.actual
+ bytecount
>= req
->req
.length
) {
461 bytecount
= req
->req
.length
- req
->req
.actual
;
464 memcpy_fromio(req
->req
.buf
+ req
->req
.actual
,
465 ep
->fifo
, bytecount
);
466 req
->req
.actual
+= bytecount
;
468 usba_ep_writel(ep
, CLR_STA
, USBA_RX_BK_RDY
);
471 DBG(DBG_QUEUE
, "%s: request done\n", ep
->ep
.name
);
473 list_del_init(&req
->queue
);
474 usba_ep_writel(ep
, CTL_DIS
, USBA_RX_BK_RDY
);
475 spin_unlock(&udc
->lock
);
476 usb_gadget_giveback_request(&ep
->ep
, &req
->req
);
477 spin_lock(&udc
->lock
);
480 status
= usba_ep_readl(ep
, STA
);
481 nr_busy
= USBA_BFEXT(BUSY_BANKS
, status
);
483 if (is_complete
&& ep_is_control(ep
)) {
484 send_status(udc
, ep
);
491 request_complete(struct usba_ep
*ep
, struct usba_request
*req
, int status
)
493 struct usba_udc
*udc
= ep
->udc
;
495 WARN_ON(!list_empty(&req
->queue
));
497 if (req
->req
.status
== -EINPROGRESS
)
498 req
->req
.status
= status
;
501 usb_gadget_unmap_request(&udc
->gadget
, &req
->req
, ep
->is_in
);
503 DBG(DBG_GADGET
| DBG_REQ
,
504 "%s: req %p complete: status %d, actual %u\n",
505 ep
->ep
.name
, req
, req
->req
.status
, req
->req
.actual
);
507 spin_unlock(&udc
->lock
);
508 usb_gadget_giveback_request(&ep
->ep
, &req
->req
);
509 spin_lock(&udc
->lock
);
513 request_complete_list(struct usba_ep
*ep
, struct list_head
*list
, int status
)
515 struct usba_request
*req
, *tmp_req
;
517 list_for_each_entry_safe(req
, tmp_req
, list
, queue
) {
518 list_del_init(&req
->queue
);
519 request_complete(ep
, req
, status
);
524 usba_ep_enable(struct usb_ep
*_ep
, const struct usb_endpoint_descriptor
*desc
)
526 struct usba_ep
*ep
= to_usba_ep(_ep
);
527 struct usba_udc
*udc
= ep
->udc
;
528 unsigned long flags
, ept_cfg
, maxpacket
;
529 unsigned int nr_trans
;
531 DBG(DBG_GADGET
, "%s: ep_enable: desc=%p\n", ep
->ep
.name
, desc
);
533 maxpacket
= usb_endpoint_maxp(desc
) & 0x7ff;
535 if (((desc
->bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK
) != ep
->index
)
537 || desc
->bDescriptorType
!= USB_DT_ENDPOINT
539 || maxpacket
> ep
->fifo_size
) {
540 DBG(DBG_ERR
, "ep_enable: Invalid argument");
548 ept_cfg
= USBA_BF(EPT_SIZE
, USBA_EPT_SIZE_8
);
550 /* LSB is bit 1, not 0 */
551 ept_cfg
= USBA_BF(EPT_SIZE
, fls(maxpacket
- 1) - 3);
553 DBG(DBG_HW
, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n",
554 ep
->ep
.name
, ept_cfg
, maxpacket
);
556 if (usb_endpoint_dir_in(desc
)) {
558 ept_cfg
|= USBA_EPT_DIR_IN
;
561 switch (usb_endpoint_type(desc
)) {
562 case USB_ENDPOINT_XFER_CONTROL
:
563 ept_cfg
|= USBA_BF(EPT_TYPE
, USBA_EPT_TYPE_CONTROL
);
564 ept_cfg
|= USBA_BF(BK_NUMBER
, USBA_BK_NUMBER_ONE
);
566 case USB_ENDPOINT_XFER_ISOC
:
568 DBG(DBG_ERR
, "ep_enable: %s is not isoc capable\n",
574 * Bits 11:12 specify number of _additional_
575 * transactions per microframe.
577 nr_trans
= ((usb_endpoint_maxp(desc
) >> 11) & 3) + 1;
582 ept_cfg
|= USBA_BF(EPT_TYPE
, USBA_EPT_TYPE_ISO
);
585 * Do triple-buffering on high-bandwidth iso endpoints.
587 if (nr_trans
> 1 && ep
->nr_banks
== 3)
588 ept_cfg
|= USBA_BF(BK_NUMBER
, USBA_BK_NUMBER_TRIPLE
);
590 ept_cfg
|= USBA_BF(BK_NUMBER
, USBA_BK_NUMBER_DOUBLE
);
591 ept_cfg
|= USBA_BF(NB_TRANS
, nr_trans
);
593 case USB_ENDPOINT_XFER_BULK
:
594 ept_cfg
|= USBA_BF(EPT_TYPE
, USBA_EPT_TYPE_BULK
);
595 ept_cfg
|= USBA_BF(BK_NUMBER
, USBA_BK_NUMBER_DOUBLE
);
597 case USB_ENDPOINT_XFER_INT
:
598 ept_cfg
|= USBA_BF(EPT_TYPE
, USBA_EPT_TYPE_INT
);
599 ept_cfg
|= USBA_BF(BK_NUMBER
, USBA_BK_NUMBER_DOUBLE
);
603 spin_lock_irqsave(&ep
->udc
->lock
, flags
);
606 ep
->ep
.maxpacket
= maxpacket
;
608 usba_ep_writel(ep
, CFG
, ept_cfg
);
609 usba_ep_writel(ep
, CTL_ENB
, USBA_EPT_ENABLE
);
614 usba_int_enb_set(udc
, usba_int_enb_get(udc
) |
615 USBA_BF(EPT_INT
, 1 << ep
->index
) |
616 USBA_BF(DMA_INT
, 1 << ep
->index
));
617 ctrl
= USBA_AUTO_VALID
| USBA_INTDIS_DMA
;
618 usba_ep_writel(ep
, CTL_ENB
, ctrl
);
620 usba_int_enb_set(udc
, usba_int_enb_get(udc
) |
621 USBA_BF(EPT_INT
, 1 << ep
->index
));
624 spin_unlock_irqrestore(&udc
->lock
, flags
);
626 DBG(DBG_HW
, "EPT_CFG%d after init: %#08lx\n", ep
->index
,
627 (unsigned long)usba_ep_readl(ep
, CFG
));
628 DBG(DBG_HW
, "INT_ENB after init: %#08lx\n",
629 (unsigned long)usba_int_enb_get(udc
));
634 static int usba_ep_disable(struct usb_ep
*_ep
)
636 struct usba_ep
*ep
= to_usba_ep(_ep
);
637 struct usba_udc
*udc
= ep
->udc
;
641 DBG(DBG_GADGET
, "ep_disable: %s\n", ep
->ep
.name
);
643 spin_lock_irqsave(&udc
->lock
, flags
);
646 spin_unlock_irqrestore(&udc
->lock
, flags
);
647 /* REVISIT because this driver disables endpoints in
648 * reset_all_endpoints() before calling disconnect(),
649 * most gadget drivers would trigger this non-error ...
651 if (udc
->gadget
.speed
!= USB_SPEED_UNKNOWN
)
652 DBG(DBG_ERR
, "ep_disable: %s not enabled\n",
658 list_splice_init(&ep
->queue
, &req_list
);
660 usba_dma_writel(ep
, CONTROL
, 0);
661 usba_dma_writel(ep
, ADDRESS
, 0);
662 usba_dma_readl(ep
, STATUS
);
664 usba_ep_writel(ep
, CTL_DIS
, USBA_EPT_ENABLE
);
665 usba_int_enb_set(udc
, usba_int_enb_get(udc
) &
666 ~USBA_BF(EPT_INT
, 1 << ep
->index
));
668 request_complete_list(ep
, &req_list
, -ESHUTDOWN
);
670 spin_unlock_irqrestore(&udc
->lock
, flags
);
675 static struct usb_request
*
676 usba_ep_alloc_request(struct usb_ep
*_ep
, gfp_t gfp_flags
)
678 struct usba_request
*req
;
680 DBG(DBG_GADGET
, "ep_alloc_request: %p, 0x%x\n", _ep
, gfp_flags
);
682 req
= kzalloc(sizeof(*req
), gfp_flags
);
686 INIT_LIST_HEAD(&req
->queue
);
692 usba_ep_free_request(struct usb_ep
*_ep
, struct usb_request
*_req
)
694 struct usba_request
*req
= to_usba_req(_req
);
696 DBG(DBG_GADGET
, "ep_free_request: %p, %p\n", _ep
, _req
);
701 static int queue_dma(struct usba_udc
*udc
, struct usba_ep
*ep
,
702 struct usba_request
*req
, gfp_t gfp_flags
)
707 DBG(DBG_DMA
, "%s: req l/%u d/%08x %c%c%c\n",
708 ep
->ep
.name
, req
->req
.length
, req
->req
.dma
,
709 req
->req
.zero
? 'Z' : 'z',
710 req
->req
.short_not_ok
? 'S' : 's',
711 req
->req
.no_interrupt
? 'I' : 'i');
713 if (req
->req
.length
> 0x10000) {
714 /* Lengths from 0 to 65536 (inclusive) are supported */
715 DBG(DBG_ERR
, "invalid request length %u\n", req
->req
.length
);
719 ret
= usb_gadget_map_request(&udc
->gadget
, &req
->req
, ep
->is_in
);
724 req
->ctrl
= USBA_BF(DMA_BUF_LEN
, req
->req
.length
)
725 | USBA_DMA_CH_EN
| USBA_DMA_END_BUF_IE
726 | USBA_DMA_END_BUF_EN
;
729 req
->ctrl
|= USBA_DMA_END_TR_EN
| USBA_DMA_END_TR_IE
;
732 * Add this request to the queue and submit for DMA if
733 * possible. Check if we're still alive first -- we may have
734 * received a reset since last time we checked.
737 spin_lock_irqsave(&udc
->lock
, flags
);
739 if (list_empty(&ep
->queue
))
740 submit_request(ep
, req
);
742 list_add_tail(&req
->queue
, &ep
->queue
);
745 spin_unlock_irqrestore(&udc
->lock
, flags
);
751 usba_ep_queue(struct usb_ep
*_ep
, struct usb_request
*_req
, gfp_t gfp_flags
)
753 struct usba_request
*req
= to_usba_req(_req
);
754 struct usba_ep
*ep
= to_usba_ep(_ep
);
755 struct usba_udc
*udc
= ep
->udc
;
759 DBG(DBG_GADGET
| DBG_QUEUE
| DBG_REQ
, "%s: queue req %p, len %u\n",
760 ep
->ep
.name
, req
, _req
->length
);
762 if (!udc
->driver
|| udc
->gadget
.speed
== USB_SPEED_UNKNOWN
||
768 req
->last_transaction
= 0;
770 _req
->status
= -EINPROGRESS
;
774 return queue_dma(udc
, ep
, req
, gfp_flags
);
776 /* May have received a reset since last time we checked */
778 spin_lock_irqsave(&udc
->lock
, flags
);
780 list_add_tail(&req
->queue
, &ep
->queue
);
782 if ((!ep_is_control(ep
) && ep
->is_in
) ||
784 && (ep
->state
== DATA_STAGE_IN
785 || ep
->state
== STATUS_STAGE_IN
)))
786 usba_ep_writel(ep
, CTL_ENB
, USBA_TX_PK_RDY
);
788 usba_ep_writel(ep
, CTL_ENB
, USBA_RX_BK_RDY
);
791 spin_unlock_irqrestore(&udc
->lock
, flags
);
797 usba_update_req(struct usba_ep
*ep
, struct usba_request
*req
, u32 status
)
799 req
->req
.actual
= req
->req
.length
- USBA_BFEXT(DMA_BUF_LEN
, status
);
802 static int stop_dma(struct usba_ep
*ep
, u32
*pstatus
)
804 unsigned int timeout
;
808 * Stop the DMA controller. When writing both CH_EN
809 * and LINK to 0, the other bits are not affected.
811 usba_dma_writel(ep
, CONTROL
, 0);
813 /* Wait for the FIFO to empty */
814 for (timeout
= 40; timeout
; --timeout
) {
815 status
= usba_dma_readl(ep
, STATUS
);
816 if (!(status
& USBA_DMA_CH_EN
))
825 dev_err(&ep
->udc
->pdev
->dev
,
826 "%s: timed out waiting for DMA FIFO to empty\n",
834 static int usba_ep_dequeue(struct usb_ep
*_ep
, struct usb_request
*_req
)
836 struct usba_ep
*ep
= to_usba_ep(_ep
);
837 struct usba_udc
*udc
= ep
->udc
;
838 struct usba_request
*req
;
842 DBG(DBG_GADGET
| DBG_QUEUE
, "ep_dequeue: %s, req %p\n",
845 spin_lock_irqsave(&udc
->lock
, flags
);
847 list_for_each_entry(req
, &ep
->queue
, queue
) {
848 if (&req
->req
== _req
)
852 if (&req
->req
!= _req
) {
853 spin_unlock_irqrestore(&udc
->lock
, flags
);
857 if (req
->using_dma
) {
859 * If this request is currently being transferred,
860 * stop the DMA controller and reset the FIFO.
862 if (ep
->queue
.next
== &req
->queue
) {
863 status
= usba_dma_readl(ep
, STATUS
);
864 if (status
& USBA_DMA_CH_EN
)
865 stop_dma(ep
, &status
);
867 #ifdef CONFIG_USB_GADGET_DEBUG_FS
868 ep
->last_dma_status
= status
;
871 usba_writel(udc
, EPT_RST
, 1 << ep
->index
);
873 usba_update_req(ep
, req
, status
);
878 * Errors should stop the queue from advancing until the
879 * completion function returns.
881 list_del_init(&req
->queue
);
883 request_complete(ep
, req
, -ECONNRESET
);
885 /* Process the next request if any */
886 submit_next_request(ep
);
887 spin_unlock_irqrestore(&udc
->lock
, flags
);
892 static int usba_ep_set_halt(struct usb_ep
*_ep
, int value
)
894 struct usba_ep
*ep
= to_usba_ep(_ep
);
895 struct usba_udc
*udc
= ep
->udc
;
899 DBG(DBG_GADGET
, "endpoint %s: %s HALT\n", ep
->ep
.name
,
900 value
? "set" : "clear");
903 DBG(DBG_ERR
, "Attempted to halt uninitialized ep %s\n",
908 DBG(DBG_ERR
, "Attempted to halt isochronous ep %s\n",
913 spin_lock_irqsave(&udc
->lock
, flags
);
916 * We can't halt IN endpoints while there are still data to be
919 if (!list_empty(&ep
->queue
)
920 || ((value
&& ep
->is_in
&& (usba_ep_readl(ep
, STA
)
921 & USBA_BF(BUSY_BANKS
, -1L))))) {
925 usba_ep_writel(ep
, SET_STA
, USBA_FORCE_STALL
);
927 usba_ep_writel(ep
, CLR_STA
,
928 USBA_FORCE_STALL
| USBA_TOGGLE_CLR
);
929 usba_ep_readl(ep
, STA
);
932 spin_unlock_irqrestore(&udc
->lock
, flags
);
937 static int usba_ep_fifo_status(struct usb_ep
*_ep
)
939 struct usba_ep
*ep
= to_usba_ep(_ep
);
941 return USBA_BFEXT(BYTE_COUNT
, usba_ep_readl(ep
, STA
));
944 static void usba_ep_fifo_flush(struct usb_ep
*_ep
)
946 struct usba_ep
*ep
= to_usba_ep(_ep
);
947 struct usba_udc
*udc
= ep
->udc
;
949 usba_writel(udc
, EPT_RST
, 1 << ep
->index
);
952 static const struct usb_ep_ops usba_ep_ops
= {
953 .enable
= usba_ep_enable
,
954 .disable
= usba_ep_disable
,
955 .alloc_request
= usba_ep_alloc_request
,
956 .free_request
= usba_ep_free_request
,
957 .queue
= usba_ep_queue
,
958 .dequeue
= usba_ep_dequeue
,
959 .set_halt
= usba_ep_set_halt
,
960 .fifo_status
= usba_ep_fifo_status
,
961 .fifo_flush
= usba_ep_fifo_flush
,
964 static int usba_udc_get_frame(struct usb_gadget
*gadget
)
966 struct usba_udc
*udc
= to_usba_udc(gadget
);
968 return USBA_BFEXT(FRAME_NUMBER
, usba_readl(udc
, FNUM
));
971 static int usba_udc_wakeup(struct usb_gadget
*gadget
)
973 struct usba_udc
*udc
= to_usba_udc(gadget
);
978 spin_lock_irqsave(&udc
->lock
, flags
);
979 if (udc
->devstatus
& (1 << USB_DEVICE_REMOTE_WAKEUP
)) {
980 ctrl
= usba_readl(udc
, CTRL
);
981 usba_writel(udc
, CTRL
, ctrl
| USBA_REMOTE_WAKE_UP
);
984 spin_unlock_irqrestore(&udc
->lock
, flags
);
990 usba_udc_set_selfpowered(struct usb_gadget
*gadget
, int is_selfpowered
)
992 struct usba_udc
*udc
= to_usba_udc(gadget
);
995 gadget
->is_selfpowered
= (is_selfpowered
!= 0);
996 spin_lock_irqsave(&udc
->lock
, flags
);
998 udc
->devstatus
|= 1 << USB_DEVICE_SELF_POWERED
;
1000 udc
->devstatus
&= ~(1 << USB_DEVICE_SELF_POWERED
);
1001 spin_unlock_irqrestore(&udc
->lock
, flags
);
1006 static int atmel_usba_start(struct usb_gadget
*gadget
,
1007 struct usb_gadget_driver
*driver
);
1008 static int atmel_usba_stop(struct usb_gadget
*gadget
);
1010 static const struct usb_gadget_ops usba_udc_ops
= {
1011 .get_frame
= usba_udc_get_frame
,
1012 .wakeup
= usba_udc_wakeup
,
1013 .set_selfpowered
= usba_udc_set_selfpowered
,
1014 .udc_start
= atmel_usba_start
,
1015 .udc_stop
= atmel_usba_stop
,
1018 static struct usb_endpoint_descriptor usba_ep0_desc
= {
1019 .bLength
= USB_DT_ENDPOINT_SIZE
,
1020 .bDescriptorType
= USB_DT_ENDPOINT
,
1021 .bEndpointAddress
= 0,
1022 .bmAttributes
= USB_ENDPOINT_XFER_CONTROL
,
1023 .wMaxPacketSize
= cpu_to_le16(64),
1024 /* FIXME: I have no idea what to put here */
1028 static struct usb_gadget usba_gadget_template
= {
1029 .ops
= &usba_udc_ops
,
1030 .max_speed
= USB_SPEED_HIGH
,
1031 .name
= "atmel_usba_udc",
1035 * Called with interrupts disabled and udc->lock held.
1037 static void reset_all_endpoints(struct usba_udc
*udc
)
1040 struct usba_request
*req
, *tmp_req
;
1042 usba_writel(udc
, EPT_RST
, ~0UL);
1044 ep
= to_usba_ep(udc
->gadget
.ep0
);
1045 list_for_each_entry_safe(req
, tmp_req
, &ep
->queue
, queue
) {
1046 list_del_init(&req
->queue
);
1047 request_complete(ep
, req
, -ECONNRESET
);
1050 /* NOTE: normally, the next call to the gadget driver is in
1051 * charge of disabling endpoints... usually disconnect().
1052 * The exception would be entering a high speed test mode.
1054 * FIXME remove this code ... and retest thoroughly.
1056 list_for_each_entry(ep
, &udc
->gadget
.ep_list
, ep
.ep_list
) {
1058 spin_unlock(&udc
->lock
);
1059 usba_ep_disable(&ep
->ep
);
1060 spin_lock(&udc
->lock
);
1065 static struct usba_ep
*get_ep_by_addr(struct usba_udc
*udc
, u16 wIndex
)
1069 if ((wIndex
& USB_ENDPOINT_NUMBER_MASK
) == 0)
1070 return to_usba_ep(udc
->gadget
.ep0
);
1072 list_for_each_entry (ep
, &udc
->gadget
.ep_list
, ep
.ep_list
) {
1073 u8 bEndpointAddress
;
1077 bEndpointAddress
= ep
->ep
.desc
->bEndpointAddress
;
1078 if ((wIndex
^ bEndpointAddress
) & USB_DIR_IN
)
1080 if ((bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK
)
1081 == (wIndex
& USB_ENDPOINT_NUMBER_MASK
))
1088 /* Called with interrupts disabled and udc->lock held */
1089 static inline void set_protocol_stall(struct usba_udc
*udc
, struct usba_ep
*ep
)
1091 usba_ep_writel(ep
, SET_STA
, USBA_FORCE_STALL
);
1092 ep
->state
= WAIT_FOR_SETUP
;
1095 static inline int is_stalled(struct usba_udc
*udc
, struct usba_ep
*ep
)
1097 if (usba_ep_readl(ep
, STA
) & USBA_FORCE_STALL
)
1102 static inline void set_address(struct usba_udc
*udc
, unsigned int addr
)
1106 DBG(DBG_BUS
, "setting address %u...\n", addr
);
1107 regval
= usba_readl(udc
, CTRL
);
1108 regval
= USBA_BFINS(DEV_ADDR
, addr
, regval
);
1109 usba_writel(udc
, CTRL
, regval
);
1112 static int do_test_mode(struct usba_udc
*udc
)
1114 static const char test_packet_buffer
[] = {
1116 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1118 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1120 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1121 /* JJJJJJJKKKKKKK * 8 */
1122 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1123 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1125 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1126 /* {JKKKKKKK * 10}, JK */
1127 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 0x7E
1130 struct device
*dev
= &udc
->pdev
->dev
;
1133 test_mode
= udc
->test_mode
;
1135 /* Start from a clean slate */
1136 reset_all_endpoints(udc
);
1138 switch (test_mode
) {
1141 usba_writel(udc
, TST
, USBA_TST_J_MODE
);
1142 dev_info(dev
, "Entering Test_J mode...\n");
1146 usba_writel(udc
, TST
, USBA_TST_K_MODE
);
1147 dev_info(dev
, "Entering Test_K mode...\n");
1151 * Test_SE0_NAK: Force high-speed mode and set up ep0
1152 * for Bulk IN transfers
1154 ep
= &udc
->usba_ep
[0];
1155 usba_writel(udc
, TST
,
1156 USBA_BF(SPEED_CFG
, USBA_SPEED_CFG_FORCE_HIGH
));
1157 usba_ep_writel(ep
, CFG
,
1158 USBA_BF(EPT_SIZE
, USBA_EPT_SIZE_64
)
1160 | USBA_BF(EPT_TYPE
, USBA_EPT_TYPE_BULK
)
1161 | USBA_BF(BK_NUMBER
, 1));
1162 if (!(usba_ep_readl(ep
, CFG
) & USBA_EPT_MAPPED
)) {
1163 set_protocol_stall(udc
, ep
);
1164 dev_err(dev
, "Test_SE0_NAK: ep0 not mapped\n");
1166 usba_ep_writel(ep
, CTL_ENB
, USBA_EPT_ENABLE
);
1167 dev_info(dev
, "Entering Test_SE0_NAK mode...\n");
1172 ep
= &udc
->usba_ep
[0];
1173 usba_ep_writel(ep
, CFG
,
1174 USBA_BF(EPT_SIZE
, USBA_EPT_SIZE_64
)
1176 | USBA_BF(EPT_TYPE
, USBA_EPT_TYPE_BULK
)
1177 | USBA_BF(BK_NUMBER
, 1));
1178 if (!(usba_ep_readl(ep
, CFG
) & USBA_EPT_MAPPED
)) {
1179 set_protocol_stall(udc
, ep
);
1180 dev_err(dev
, "Test_Packet: ep0 not mapped\n");
1182 usba_ep_writel(ep
, CTL_ENB
, USBA_EPT_ENABLE
);
1183 usba_writel(udc
, TST
, USBA_TST_PKT_MODE
);
1184 memcpy_toio(ep
->fifo
, test_packet_buffer
,
1185 sizeof(test_packet_buffer
));
1186 usba_ep_writel(ep
, SET_STA
, USBA_TX_PK_RDY
);
1187 dev_info(dev
, "Entering Test_Packet mode...\n");
1191 dev_err(dev
, "Invalid test mode: 0x%04x\n", test_mode
);
1198 /* Avoid overly long expressions */
1199 static inline bool feature_is_dev_remote_wakeup(struct usb_ctrlrequest
*crq
)
1201 if (crq
->wValue
== cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP
))
1206 static inline bool feature_is_dev_test_mode(struct usb_ctrlrequest
*crq
)
1208 if (crq
->wValue
== cpu_to_le16(USB_DEVICE_TEST_MODE
))
1213 static inline bool feature_is_ep_halt(struct usb_ctrlrequest
*crq
)
1215 if (crq
->wValue
== cpu_to_le16(USB_ENDPOINT_HALT
))
1220 static int handle_ep0_setup(struct usba_udc
*udc
, struct usba_ep
*ep
,
1221 struct usb_ctrlrequest
*crq
)
1225 switch (crq
->bRequest
) {
1226 case USB_REQ_GET_STATUS
: {
1229 if (crq
->bRequestType
== (USB_DIR_IN
| USB_RECIP_DEVICE
)) {
1230 status
= cpu_to_le16(udc
->devstatus
);
1231 } else if (crq
->bRequestType
1232 == (USB_DIR_IN
| USB_RECIP_INTERFACE
)) {
1233 status
= cpu_to_le16(0);
1234 } else if (crq
->bRequestType
1235 == (USB_DIR_IN
| USB_RECIP_ENDPOINT
)) {
1236 struct usba_ep
*target
;
1238 target
= get_ep_by_addr(udc
, le16_to_cpu(crq
->wIndex
));
1243 if (is_stalled(udc
, target
))
1244 status
|= cpu_to_le16(1);
1248 /* Write directly to the FIFO. No queueing is done. */
1249 if (crq
->wLength
!= cpu_to_le16(sizeof(status
)))
1251 ep
->state
= DATA_STAGE_IN
;
1252 usba_io_writew(status
, ep
->fifo
);
1253 usba_ep_writel(ep
, SET_STA
, USBA_TX_PK_RDY
);
1257 case USB_REQ_CLEAR_FEATURE
: {
1258 if (crq
->bRequestType
== USB_RECIP_DEVICE
) {
1259 if (feature_is_dev_remote_wakeup(crq
))
1261 &= ~(1 << USB_DEVICE_REMOTE_WAKEUP
);
1263 /* Can't CLEAR_FEATURE TEST_MODE */
1265 } else if (crq
->bRequestType
== USB_RECIP_ENDPOINT
) {
1266 struct usba_ep
*target
;
1268 if (crq
->wLength
!= cpu_to_le16(0)
1269 || !feature_is_ep_halt(crq
))
1271 target
= get_ep_by_addr(udc
, le16_to_cpu(crq
->wIndex
));
1275 usba_ep_writel(target
, CLR_STA
, USBA_FORCE_STALL
);
1276 if (target
->index
!= 0)
1277 usba_ep_writel(target
, CLR_STA
,
1283 send_status(udc
, ep
);
1287 case USB_REQ_SET_FEATURE
: {
1288 if (crq
->bRequestType
== USB_RECIP_DEVICE
) {
1289 if (feature_is_dev_test_mode(crq
)) {
1290 send_status(udc
, ep
);
1291 ep
->state
= STATUS_STAGE_TEST
;
1292 udc
->test_mode
= le16_to_cpu(crq
->wIndex
);
1294 } else if (feature_is_dev_remote_wakeup(crq
)) {
1295 udc
->devstatus
|= 1 << USB_DEVICE_REMOTE_WAKEUP
;
1299 } else if (crq
->bRequestType
== USB_RECIP_ENDPOINT
) {
1300 struct usba_ep
*target
;
1302 if (crq
->wLength
!= cpu_to_le16(0)
1303 || !feature_is_ep_halt(crq
))
1306 target
= get_ep_by_addr(udc
, le16_to_cpu(crq
->wIndex
));
1310 usba_ep_writel(target
, SET_STA
, USBA_FORCE_STALL
);
1314 send_status(udc
, ep
);
1318 case USB_REQ_SET_ADDRESS
:
1319 if (crq
->bRequestType
!= (USB_DIR_OUT
| USB_RECIP_DEVICE
))
1322 set_address(udc
, le16_to_cpu(crq
->wValue
));
1323 send_status(udc
, ep
);
1324 ep
->state
= STATUS_STAGE_ADDR
;
1329 spin_unlock(&udc
->lock
);
1330 retval
= udc
->driver
->setup(&udc
->gadget
, crq
);
1331 spin_lock(&udc
->lock
);
1337 pr_err("udc: %s: Invalid setup request: %02x.%02x v%04x i%04x l%d, "
1338 "halting endpoint...\n",
1339 ep
->ep
.name
, crq
->bRequestType
, crq
->bRequest
,
1340 le16_to_cpu(crq
->wValue
), le16_to_cpu(crq
->wIndex
),
1341 le16_to_cpu(crq
->wLength
));
1342 set_protocol_stall(udc
, ep
);
1346 static void usba_control_irq(struct usba_udc
*udc
, struct usba_ep
*ep
)
1348 struct usba_request
*req
;
1353 epstatus
= usba_ep_readl(ep
, STA
);
1354 epctrl
= usba_ep_readl(ep
, CTL
);
1356 DBG(DBG_INT
, "%s [%d]: s/%08x c/%08x\n",
1357 ep
->ep
.name
, ep
->state
, epstatus
, epctrl
);
1360 if (!list_empty(&ep
->queue
))
1361 req
= list_entry(ep
->queue
.next
,
1362 struct usba_request
, queue
);
1364 if ((epctrl
& USBA_TX_PK_RDY
) && !(epstatus
& USBA_TX_PK_RDY
)) {
1366 next_fifo_transaction(ep
, req
);
1368 submit_request(ep
, req
);
1370 if (req
->last_transaction
) {
1371 usba_ep_writel(ep
, CTL_DIS
, USBA_TX_PK_RDY
);
1372 usba_ep_writel(ep
, CTL_ENB
, USBA_TX_COMPLETE
);
1376 if ((epstatus
& epctrl
) & USBA_TX_COMPLETE
) {
1377 usba_ep_writel(ep
, CLR_STA
, USBA_TX_COMPLETE
);
1379 switch (ep
->state
) {
1381 usba_ep_writel(ep
, CTL_ENB
, USBA_RX_BK_RDY
);
1382 usba_ep_writel(ep
, CTL_DIS
, USBA_TX_COMPLETE
);
1383 ep
->state
= STATUS_STAGE_OUT
;
1385 case STATUS_STAGE_ADDR
:
1386 /* Activate our new address */
1387 usba_writel(udc
, CTRL
, (usba_readl(udc
, CTRL
)
1389 usba_ep_writel(ep
, CTL_DIS
, USBA_TX_COMPLETE
);
1390 ep
->state
= WAIT_FOR_SETUP
;
1392 case STATUS_STAGE_IN
:
1394 list_del_init(&req
->queue
);
1395 request_complete(ep
, req
, 0);
1396 submit_next_request(ep
);
1398 usba_ep_writel(ep
, CTL_DIS
, USBA_TX_COMPLETE
);
1399 ep
->state
= WAIT_FOR_SETUP
;
1401 case STATUS_STAGE_TEST
:
1402 usba_ep_writel(ep
, CTL_DIS
, USBA_TX_COMPLETE
);
1403 ep
->state
= WAIT_FOR_SETUP
;
1404 if (do_test_mode(udc
))
1405 set_protocol_stall(udc
, ep
);
1408 pr_err("udc: %s: TXCOMP: Invalid endpoint state %d, "
1409 "halting endpoint...\n",
1410 ep
->ep
.name
, ep
->state
);
1411 set_protocol_stall(udc
, ep
);
1417 if ((epstatus
& epctrl
) & USBA_RX_BK_RDY
) {
1418 switch (ep
->state
) {
1419 case STATUS_STAGE_OUT
:
1420 usba_ep_writel(ep
, CLR_STA
, USBA_RX_BK_RDY
);
1421 usba_ep_writel(ep
, CTL_DIS
, USBA_RX_BK_RDY
);
1424 list_del_init(&req
->queue
);
1425 request_complete(ep
, req
, 0);
1427 ep
->state
= WAIT_FOR_SETUP
;
1430 case DATA_STAGE_OUT
:
1435 usba_ep_writel(ep
, CLR_STA
, USBA_RX_BK_RDY
);
1436 usba_ep_writel(ep
, CTL_DIS
, USBA_RX_BK_RDY
);
1437 pr_err("udc: %s: RXRDY: Invalid endpoint state %d, "
1438 "halting endpoint...\n",
1439 ep
->ep
.name
, ep
->state
);
1440 set_protocol_stall(udc
, ep
);
1446 if (epstatus
& USBA_RX_SETUP
) {
1448 struct usb_ctrlrequest crq
;
1449 unsigned long data
[2];
1451 unsigned int pkt_len
;
1454 if (ep
->state
!= WAIT_FOR_SETUP
) {
1456 * Didn't expect a SETUP packet at this
1457 * point. Clean up any pending requests (which
1458 * may be successful).
1460 int status
= -EPROTO
;
1463 * RXRDY and TXCOMP are dropped when SETUP
1464 * packets arrive. Just pretend we received
1465 * the status packet.
1467 if (ep
->state
== STATUS_STAGE_OUT
1468 || ep
->state
== STATUS_STAGE_IN
) {
1469 usba_ep_writel(ep
, CTL_DIS
, USBA_RX_BK_RDY
);
1474 list_del_init(&req
->queue
);
1475 request_complete(ep
, req
, status
);
1479 pkt_len
= USBA_BFEXT(BYTE_COUNT
, usba_ep_readl(ep
, STA
));
1480 DBG(DBG_HW
, "Packet length: %u\n", pkt_len
);
1481 if (pkt_len
!= sizeof(crq
)) {
1482 pr_warning("udc: Invalid packet length %u "
1483 "(expected %zu)\n", pkt_len
, sizeof(crq
));
1484 set_protocol_stall(udc
, ep
);
1488 DBG(DBG_FIFO
, "Copying ctrl request from 0x%p:\n", ep
->fifo
);
1489 memcpy_fromio(crq
.data
, ep
->fifo
, sizeof(crq
));
1491 /* Free up one bank in the FIFO so that we can
1492 * generate or receive a reply right away. */
1493 usba_ep_writel(ep
, CLR_STA
, USBA_RX_SETUP
);
1495 /* printk(KERN_DEBUG "setup: %d: %02x.%02x\n",
1496 ep->state, crq.crq.bRequestType,
1497 crq.crq.bRequest); */
1499 if (crq
.crq
.bRequestType
& USB_DIR_IN
) {
1501 * The USB 2.0 spec states that "if wLength is
1502 * zero, there is no data transfer phase."
1503 * However, testusb #14 seems to actually
1504 * expect a data phase even if wLength = 0...
1506 ep
->state
= DATA_STAGE_IN
;
1508 if (crq
.crq
.wLength
!= cpu_to_le16(0))
1509 ep
->state
= DATA_STAGE_OUT
;
1511 ep
->state
= STATUS_STAGE_IN
;
1516 ret
= handle_ep0_setup(udc
, ep
, &crq
.crq
);
1518 spin_unlock(&udc
->lock
);
1519 ret
= udc
->driver
->setup(&udc
->gadget
, &crq
.crq
);
1520 spin_lock(&udc
->lock
);
1523 DBG(DBG_BUS
, "req %02x.%02x, length %d, state %d, ret %d\n",
1524 crq
.crq
.bRequestType
, crq
.crq
.bRequest
,
1525 le16_to_cpu(crq
.crq
.wLength
), ep
->state
, ret
);
1528 /* Let the host know that we failed */
1529 set_protocol_stall(udc
, ep
);
1534 static void usba_ep_irq(struct usba_udc
*udc
, struct usba_ep
*ep
)
1536 struct usba_request
*req
;
1540 epstatus
= usba_ep_readl(ep
, STA
);
1541 epctrl
= usba_ep_readl(ep
, CTL
);
1543 DBG(DBG_INT
, "%s: interrupt, status: 0x%08x\n", ep
->ep
.name
, epstatus
);
1545 while ((epctrl
& USBA_TX_PK_RDY
) && !(epstatus
& USBA_TX_PK_RDY
)) {
1546 DBG(DBG_BUS
, "%s: TX PK ready\n", ep
->ep
.name
);
1548 if (list_empty(&ep
->queue
)) {
1549 dev_warn(&udc
->pdev
->dev
, "ep_irq: queue empty\n");
1550 usba_ep_writel(ep
, CTL_DIS
, USBA_TX_PK_RDY
);
1554 req
= list_entry(ep
->queue
.next
, struct usba_request
, queue
);
1556 if (req
->using_dma
) {
1557 /* Send a zero-length packet */
1558 usba_ep_writel(ep
, SET_STA
,
1560 usba_ep_writel(ep
, CTL_DIS
,
1562 list_del_init(&req
->queue
);
1563 submit_next_request(ep
);
1564 request_complete(ep
, req
, 0);
1567 next_fifo_transaction(ep
, req
);
1569 submit_request(ep
, req
);
1571 if (req
->last_transaction
) {
1572 list_del_init(&req
->queue
);
1573 submit_next_request(ep
);
1574 request_complete(ep
, req
, 0);
1578 epstatus
= usba_ep_readl(ep
, STA
);
1579 epctrl
= usba_ep_readl(ep
, CTL
);
1581 if ((epstatus
& epctrl
) & USBA_RX_BK_RDY
) {
1582 DBG(DBG_BUS
, "%s: RX data ready\n", ep
->ep
.name
);
1587 static void usba_dma_irq(struct usba_udc
*udc
, struct usba_ep
*ep
)
1589 struct usba_request
*req
;
1590 u32 status
, control
, pending
;
1592 status
= usba_dma_readl(ep
, STATUS
);
1593 control
= usba_dma_readl(ep
, CONTROL
);
1594 #ifdef CONFIG_USB_GADGET_DEBUG_FS
1595 ep
->last_dma_status
= status
;
1597 pending
= status
& control
;
1598 DBG(DBG_INT
| DBG_DMA
, "dma irq, s/%#08x, c/%#08x\n", status
, control
);
1600 if (status
& USBA_DMA_CH_EN
) {
1601 dev_err(&udc
->pdev
->dev
,
1602 "DMA_CH_EN is set after transfer is finished!\n");
1603 dev_err(&udc
->pdev
->dev
,
1604 "status=%#08x, pending=%#08x, control=%#08x\n",
1605 status
, pending
, control
);
1608 * try to pretend nothing happened. We might have to
1609 * do something here...
1613 if (list_empty(&ep
->queue
))
1614 /* Might happen if a reset comes along at the right moment */
1617 if (pending
& (USBA_DMA_END_TR_ST
| USBA_DMA_END_BUF_ST
)) {
1618 req
= list_entry(ep
->queue
.next
, struct usba_request
, queue
);
1619 usba_update_req(ep
, req
, status
);
1621 list_del_init(&req
->queue
);
1622 submit_next_request(ep
);
1623 request_complete(ep
, req
, 0);
1627 static irqreturn_t
usba_udc_irq(int irq
, void *devid
)
1629 struct usba_udc
*udc
= devid
;
1630 u32 status
, int_enb
;
1634 spin_lock(&udc
->lock
);
1636 int_enb
= usba_int_enb_get(udc
);
1637 status
= usba_readl(udc
, INT_STA
) & int_enb
;
1638 DBG(DBG_INT
, "irq, status=%#08x\n", status
);
1640 if (status
& USBA_DET_SUSPEND
) {
1641 toggle_bias(udc
, 0);
1642 usba_writel(udc
, INT_CLR
, USBA_DET_SUSPEND
);
1643 usba_int_enb_set(udc
, int_enb
| USBA_WAKE_UP
);
1644 udc
->bias_pulse_needed
= true;
1645 DBG(DBG_BUS
, "Suspend detected\n");
1646 if (udc
->gadget
.speed
!= USB_SPEED_UNKNOWN
1647 && udc
->driver
&& udc
->driver
->suspend
) {
1648 spin_unlock(&udc
->lock
);
1649 udc
->driver
->suspend(&udc
->gadget
);
1650 spin_lock(&udc
->lock
);
1654 if (status
& USBA_WAKE_UP
) {
1655 toggle_bias(udc
, 1);
1656 usba_writel(udc
, INT_CLR
, USBA_WAKE_UP
);
1657 usba_int_enb_set(udc
, int_enb
& ~USBA_WAKE_UP
);
1658 DBG(DBG_BUS
, "Wake Up CPU detected\n");
1661 if (status
& USBA_END_OF_RESUME
) {
1662 usba_writel(udc
, INT_CLR
, USBA_END_OF_RESUME
);
1663 generate_bias_pulse(udc
);
1664 DBG(DBG_BUS
, "Resume detected\n");
1665 if (udc
->gadget
.speed
!= USB_SPEED_UNKNOWN
1666 && udc
->driver
&& udc
->driver
->resume
) {
1667 spin_unlock(&udc
->lock
);
1668 udc
->driver
->resume(&udc
->gadget
);
1669 spin_lock(&udc
->lock
);
1673 dma_status
= USBA_BFEXT(DMA_INT
, status
);
1677 for (i
= 1; i
<= USBA_NR_DMAS
; i
++)
1678 if (dma_status
& (1 << i
))
1679 usba_dma_irq(udc
, &udc
->usba_ep
[i
]);
1682 ep_status
= USBA_BFEXT(EPT_INT
, status
);
1686 for (i
= 0; i
< udc
->num_ep
; i
++)
1687 if (ep_status
& (1 << i
)) {
1688 if (ep_is_control(&udc
->usba_ep
[i
]))
1689 usba_control_irq(udc
, &udc
->usba_ep
[i
]);
1691 usba_ep_irq(udc
, &udc
->usba_ep
[i
]);
1695 if (status
& USBA_END_OF_RESET
) {
1696 struct usba_ep
*ep0
;
1698 usba_writel(udc
, INT_CLR
, USBA_END_OF_RESET
);
1699 generate_bias_pulse(udc
);
1700 reset_all_endpoints(udc
);
1702 if (udc
->gadget
.speed
!= USB_SPEED_UNKNOWN
&& udc
->driver
) {
1703 udc
->gadget
.speed
= USB_SPEED_UNKNOWN
;
1704 spin_unlock(&udc
->lock
);
1705 usb_gadget_udc_reset(&udc
->gadget
, udc
->driver
);
1706 spin_lock(&udc
->lock
);
1709 if (status
& USBA_HIGH_SPEED
)
1710 udc
->gadget
.speed
= USB_SPEED_HIGH
;
1712 udc
->gadget
.speed
= USB_SPEED_FULL
;
1713 DBG(DBG_BUS
, "%s bus reset detected\n",
1714 usb_speed_string(udc
->gadget
.speed
));
1716 ep0
= &udc
->usba_ep
[0];
1717 ep0
->ep
.desc
= &usba_ep0_desc
;
1718 ep0
->state
= WAIT_FOR_SETUP
;
1719 usba_ep_writel(ep0
, CFG
,
1720 (USBA_BF(EPT_SIZE
, EP0_EPT_SIZE
)
1721 | USBA_BF(EPT_TYPE
, USBA_EPT_TYPE_CONTROL
)
1722 | USBA_BF(BK_NUMBER
, USBA_BK_NUMBER_ONE
)));
1723 usba_ep_writel(ep0
, CTL_ENB
,
1724 USBA_EPT_ENABLE
| USBA_RX_SETUP
);
1725 usba_int_enb_set(udc
, int_enb
| USBA_BF(EPT_INT
, 1) |
1726 USBA_DET_SUSPEND
| USBA_END_OF_RESUME
);
1729 * Unclear why we hit this irregularly, e.g. in usbtest,
1730 * but it's clearly harmless...
1732 if (!(usba_ep_readl(ep0
, CFG
) & USBA_EPT_MAPPED
))
1733 dev_dbg(&udc
->pdev
->dev
,
1734 "ODD: EP0 configuration is invalid!\n");
1737 spin_unlock(&udc
->lock
);
1742 static int start_clock(struct usba_udc
*udc
)
1749 ret
= clk_prepare_enable(udc
->pclk
);
1752 ret
= clk_prepare_enable(udc
->hclk
);
1754 clk_disable_unprepare(udc
->pclk
);
1758 udc
->clocked
= true;
1762 static void stop_clock(struct usba_udc
*udc
)
1767 clk_disable_unprepare(udc
->hclk
);
1768 clk_disable_unprepare(udc
->pclk
);
1770 udc
->clocked
= false;
1773 static int usba_start(struct usba_udc
*udc
)
1775 unsigned long flags
;
1778 ret
= start_clock(udc
);
1782 spin_lock_irqsave(&udc
->lock
, flags
);
1783 toggle_bias(udc
, 1);
1784 usba_writel(udc
, CTRL
, USBA_ENABLE_MASK
);
1785 usba_int_enb_set(udc
, USBA_END_OF_RESET
);
1786 spin_unlock_irqrestore(&udc
->lock
, flags
);
1791 static void usba_stop(struct usba_udc
*udc
)
1793 unsigned long flags
;
1795 spin_lock_irqsave(&udc
->lock
, flags
);
1796 udc
->gadget
.speed
= USB_SPEED_UNKNOWN
;
1797 reset_all_endpoints(udc
);
1799 /* This will also disable the DP pullup */
1800 toggle_bias(udc
, 0);
1801 usba_writel(udc
, CTRL
, USBA_DISABLE_MASK
);
1802 spin_unlock_irqrestore(&udc
->lock
, flags
);
1807 static irqreturn_t
usba_vbus_irq_thread(int irq
, void *devid
)
1809 struct usba_udc
*udc
= devid
;
1815 mutex_lock(&udc
->vbus_mutex
);
1817 vbus
= vbus_is_present(udc
);
1818 if (vbus
!= udc
->vbus_prev
) {
1824 if (udc
->driver
->disconnect
)
1825 udc
->driver
->disconnect(&udc
->gadget
);
1827 udc
->vbus_prev
= vbus
;
1830 mutex_unlock(&udc
->vbus_mutex
);
1834 static int atmel_usba_start(struct usb_gadget
*gadget
,
1835 struct usb_gadget_driver
*driver
)
1838 struct usba_udc
*udc
= container_of(gadget
, struct usba_udc
, gadget
);
1839 unsigned long flags
;
1841 spin_lock_irqsave(&udc
->lock
, flags
);
1842 udc
->devstatus
= 1 << USB_DEVICE_SELF_POWERED
;
1843 udc
->driver
= driver
;
1844 spin_unlock_irqrestore(&udc
->lock
, flags
);
1846 mutex_lock(&udc
->vbus_mutex
);
1848 if (gpio_is_valid(udc
->vbus_pin
))
1849 enable_irq(gpio_to_irq(udc
->vbus_pin
));
1851 /* If Vbus is present, enable the controller and wait for reset */
1852 udc
->vbus_prev
= vbus_is_present(udc
);
1853 if (udc
->vbus_prev
) {
1854 ret
= usba_start(udc
);
1859 mutex_unlock(&udc
->vbus_mutex
);
1863 if (gpio_is_valid(udc
->vbus_pin
))
1864 disable_irq(gpio_to_irq(udc
->vbus_pin
));
1866 mutex_unlock(&udc
->vbus_mutex
);
1868 spin_lock_irqsave(&udc
->lock
, flags
);
1869 udc
->devstatus
&= ~(1 << USB_DEVICE_SELF_POWERED
);
1871 spin_unlock_irqrestore(&udc
->lock
, flags
);
1875 static int atmel_usba_stop(struct usb_gadget
*gadget
)
1877 struct usba_udc
*udc
= container_of(gadget
, struct usba_udc
, gadget
);
1879 if (gpio_is_valid(udc
->vbus_pin
))
1880 disable_irq(gpio_to_irq(udc
->vbus_pin
));
1890 static void at91sam9rl_toggle_bias(struct usba_udc
*udc
, int is_on
)
1892 unsigned int uckr
= at91_pmc_read(AT91_CKGR_UCKR
);
1895 at91_pmc_write(AT91_CKGR_UCKR
, uckr
| AT91_PMC_BIASEN
);
1897 at91_pmc_write(AT91_CKGR_UCKR
, uckr
& ~(AT91_PMC_BIASEN
));
1900 static void at91sam9g45_pulse_bias(struct usba_udc
*udc
)
1902 unsigned int uckr
= at91_pmc_read(AT91_CKGR_UCKR
);
1904 at91_pmc_write(AT91_CKGR_UCKR
, uckr
& ~(AT91_PMC_BIASEN
));
1905 at91_pmc_write(AT91_CKGR_UCKR
, uckr
| AT91_PMC_BIASEN
);
1908 static const struct usba_udc_errata at91sam9rl_errata
= {
1909 .toggle_bias
= at91sam9rl_toggle_bias
,
1912 static const struct usba_udc_errata at91sam9g45_errata
= {
1913 .pulse_bias
= at91sam9g45_pulse_bias
,
1916 static const struct of_device_id atmel_udc_dt_ids
[] = {
1917 { .compatible
= "atmel,at91sam9rl-udc", .data
= &at91sam9rl_errata
},
1918 { .compatible
= "atmel,at91sam9g45-udc", .data
= &at91sam9g45_errata
},
1919 { .compatible
= "atmel,sama5d3-udc" },
1923 MODULE_DEVICE_TABLE(of
, atmel_udc_dt_ids
);
1925 static struct usba_ep
* atmel_udc_of_init(struct platform_device
*pdev
,
1926 struct usba_udc
*udc
)
1930 enum of_gpio_flags flags
;
1931 struct device_node
*np
= pdev
->dev
.of_node
;
1932 const struct of_device_id
*match
;
1933 struct device_node
*pp
;
1935 struct usba_ep
*eps
, *ep
;
1937 match
= of_match_node(atmel_udc_dt_ids
, np
);
1939 return ERR_PTR(-EINVAL
);
1941 udc
->errata
= match
->data
;
1945 udc
->vbus_pin
= of_get_named_gpio_flags(np
, "atmel,vbus-gpio", 0,
1947 udc
->vbus_pin_inverted
= (flags
& OF_GPIO_ACTIVE_LOW
) ? 1 : 0;
1950 while ((pp
= of_get_next_child(np
, pp
)))
1953 eps
= devm_kzalloc(&pdev
->dev
, sizeof(struct usba_ep
) * udc
->num_ep
,
1956 return ERR_PTR(-ENOMEM
);
1958 udc
->gadget
.ep0
= &eps
[0].ep
;
1960 INIT_LIST_HEAD(&eps
[0].ep
.ep_list
);
1964 while ((pp
= of_get_next_child(np
, pp
))) {
1967 ret
= of_property_read_u32(pp
, "reg", &val
);
1969 dev_err(&pdev
->dev
, "of_probe: reg error(%d)\n", ret
);
1974 ret
= of_property_read_u32(pp
, "atmel,fifo-size", &val
);
1976 dev_err(&pdev
->dev
, "of_probe: fifo-size error(%d)\n", ret
);
1979 ep
->fifo_size
= val
;
1981 ret
= of_property_read_u32(pp
, "atmel,nb-banks", &val
);
1983 dev_err(&pdev
->dev
, "of_probe: nb-banks error(%d)\n", ret
);
1988 ep
->can_dma
= of_property_read_bool(pp
, "atmel,can-dma");
1989 ep
->can_isoc
= of_property_read_bool(pp
, "atmel,can-isoc");
1991 ret
= of_property_read_string(pp
, "name", &name
);
1994 ep
->ep_regs
= udc
->regs
+ USBA_EPT_BASE(i
);
1995 ep
->dma_regs
= udc
->regs
+ USBA_DMA_BASE(i
);
1996 ep
->fifo
= udc
->fifo
+ USBA_FIFO_BASE(i
);
1997 ep
->ep
.ops
= &usba_ep_ops
;
1998 usb_ep_set_maxpacket_limit(&ep
->ep
, ep
->fifo_size
);
2000 INIT_LIST_HEAD(&ep
->queue
);
2003 list_add_tail(&ep
->ep
.ep_list
, &udc
->gadget
.ep_list
);
2009 dev_err(&pdev
->dev
, "of_probe: no endpoint specified\n");
2016 return ERR_PTR(ret
);
2019 static struct usba_ep
* atmel_udc_of_init(struct platform_device
*pdev
,
2020 struct usba_udc
*udc
)
2022 return ERR_PTR(-ENOSYS
);
2026 static struct usba_ep
* usba_udc_pdata(struct platform_device
*pdev
,
2027 struct usba_udc
*udc
)
2029 struct usba_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
2030 struct usba_ep
*eps
;
2034 return ERR_PTR(-ENXIO
);
2036 eps
= devm_kzalloc(&pdev
->dev
, sizeof(struct usba_ep
) * pdata
->num_ep
,
2039 return ERR_PTR(-ENOMEM
);
2041 udc
->gadget
.ep0
= &eps
[0].ep
;
2043 udc
->vbus_pin
= pdata
->vbus_pin
;
2044 udc
->vbus_pin_inverted
= pdata
->vbus_pin_inverted
;
2045 udc
->num_ep
= pdata
->num_ep
;
2047 INIT_LIST_HEAD(&eps
[0].ep
.ep_list
);
2049 for (i
= 0; i
< pdata
->num_ep
; i
++) {
2050 struct usba_ep
*ep
= &eps
[i
];
2052 ep
->ep_regs
= udc
->regs
+ USBA_EPT_BASE(i
);
2053 ep
->dma_regs
= udc
->regs
+ USBA_DMA_BASE(i
);
2054 ep
->fifo
= udc
->fifo
+ USBA_FIFO_BASE(i
);
2055 ep
->ep
.ops
= &usba_ep_ops
;
2056 ep
->ep
.name
= pdata
->ep
[i
].name
;
2057 ep
->fifo_size
= pdata
->ep
[i
].fifo_size
;
2058 usb_ep_set_maxpacket_limit(&ep
->ep
, ep
->fifo_size
);
2060 INIT_LIST_HEAD(&ep
->queue
);
2061 ep
->nr_banks
= pdata
->ep
[i
].nr_banks
;
2062 ep
->index
= pdata
->ep
[i
].index
;
2063 ep
->can_dma
= pdata
->ep
[i
].can_dma
;
2064 ep
->can_isoc
= pdata
->ep
[i
].can_isoc
;
2067 list_add_tail(&ep
->ep
.ep_list
, &udc
->gadget
.ep_list
);
2073 static int usba_udc_probe(struct platform_device
*pdev
)
2075 struct resource
*regs
, *fifo
;
2076 struct clk
*pclk
, *hclk
;
2077 struct usba_udc
*udc
;
2080 udc
= devm_kzalloc(&pdev
->dev
, sizeof(*udc
), GFP_KERNEL
);
2084 udc
->gadget
= usba_gadget_template
;
2085 INIT_LIST_HEAD(&udc
->gadget
.ep_list
);
2087 regs
= platform_get_resource(pdev
, IORESOURCE_MEM
, CTRL_IOMEM_ID
);
2088 fifo
= platform_get_resource(pdev
, IORESOURCE_MEM
, FIFO_IOMEM_ID
);
2092 irq
= platform_get_irq(pdev
, 0);
2096 pclk
= devm_clk_get(&pdev
->dev
, "pclk");
2098 return PTR_ERR(pclk
);
2099 hclk
= devm_clk_get(&pdev
->dev
, "hclk");
2101 return PTR_ERR(hclk
);
2103 spin_lock_init(&udc
->lock
);
2104 mutex_init(&udc
->vbus_mutex
);
2108 udc
->vbus_pin
= -ENODEV
;
2111 udc
->regs
= devm_ioremap(&pdev
->dev
, regs
->start
, resource_size(regs
));
2113 dev_err(&pdev
->dev
, "Unable to map I/O memory, aborting.\n");
2116 dev_info(&pdev
->dev
, "MMIO registers at 0x%08lx mapped at %p\n",
2117 (unsigned long)regs
->start
, udc
->regs
);
2118 udc
->fifo
= devm_ioremap(&pdev
->dev
, fifo
->start
, resource_size(fifo
));
2120 dev_err(&pdev
->dev
, "Unable to map FIFO, aborting.\n");
2123 dev_info(&pdev
->dev
, "FIFO at 0x%08lx mapped at %p\n",
2124 (unsigned long)fifo
->start
, udc
->fifo
);
2126 platform_set_drvdata(pdev
, udc
);
2128 /* Make sure we start from a clean slate */
2129 ret
= clk_prepare_enable(pclk
);
2131 dev_err(&pdev
->dev
, "Unable to enable pclk, aborting.\n");
2135 usba_writel(udc
, CTRL
, USBA_DISABLE_MASK
);
2136 clk_disable_unprepare(pclk
);
2138 if (pdev
->dev
.of_node
)
2139 udc
->usba_ep
= atmel_udc_of_init(pdev
, udc
);
2141 udc
->usba_ep
= usba_udc_pdata(pdev
, udc
);
2143 toggle_bias(udc
, 0);
2145 if (IS_ERR(udc
->usba_ep
))
2146 return PTR_ERR(udc
->usba_ep
);
2148 ret
= devm_request_irq(&pdev
->dev
, irq
, usba_udc_irq
, 0,
2149 "atmel_usba_udc", udc
);
2151 dev_err(&pdev
->dev
, "Cannot request irq %d (error %d)\n",
2157 if (gpio_is_valid(udc
->vbus_pin
)) {
2158 if (!devm_gpio_request(&pdev
->dev
, udc
->vbus_pin
, "atmel_usba_udc")) {
2159 irq_set_status_flags(gpio_to_irq(udc
->vbus_pin
),
2161 ret
= devm_request_threaded_irq(&pdev
->dev
,
2162 gpio_to_irq(udc
->vbus_pin
), NULL
,
2163 usba_vbus_irq_thread
, IRQF_ONESHOT
,
2164 "atmel_usba_udc", udc
);
2166 udc
->vbus_pin
= -ENODEV
;
2167 dev_warn(&udc
->pdev
->dev
,
2168 "failed to request vbus irq; "
2169 "assuming always on\n");
2172 /* gpio_request fail so use -EINVAL for gpio_is_valid */
2173 udc
->vbus_pin
= -EINVAL
;
2177 ret
= usb_add_gadget_udc(&pdev
->dev
, &udc
->gadget
);
2180 device_init_wakeup(&pdev
->dev
, 1);
2182 usba_init_debugfs(udc
);
2183 for (i
= 1; i
< udc
->num_ep
; i
++)
2184 usba_ep_init_debugfs(udc
, &udc
->usba_ep
[i
]);
2189 static int usba_udc_remove(struct platform_device
*pdev
)
2191 struct usba_udc
*udc
;
2194 udc
= platform_get_drvdata(pdev
);
2196 device_init_wakeup(&pdev
->dev
, 0);
2197 usb_del_gadget_udc(&udc
->gadget
);
2199 for (i
= 1; i
< udc
->num_ep
; i
++)
2200 usba_ep_cleanup_debugfs(&udc
->usba_ep
[i
]);
2201 usba_cleanup_debugfs(udc
);
2207 static int usba_udc_suspend(struct device
*dev
)
2209 struct usba_udc
*udc
= dev_get_drvdata(dev
);
2215 mutex_lock(&udc
->vbus_mutex
);
2217 if (!device_may_wakeup(dev
)) {
2223 * Device may wake up. We stay clocked if we failed
2224 * to request vbus irq, assuming always on.
2226 if (gpio_is_valid(udc
->vbus_pin
)) {
2228 enable_irq_wake(gpio_to_irq(udc
->vbus_pin
));
2232 mutex_unlock(&udc
->vbus_mutex
);
2236 static int usba_udc_resume(struct device
*dev
)
2238 struct usba_udc
*udc
= dev_get_drvdata(dev
);
2244 if (device_may_wakeup(dev
) && gpio_is_valid(udc
->vbus_pin
))
2245 disable_irq_wake(gpio_to_irq(udc
->vbus_pin
));
2247 /* If Vbus is present, enable the controller and wait for reset */
2248 mutex_lock(&udc
->vbus_mutex
);
2249 udc
->vbus_prev
= vbus_is_present(udc
);
2252 mutex_unlock(&udc
->vbus_mutex
);
2258 static SIMPLE_DEV_PM_OPS(usba_udc_pm_ops
, usba_udc_suspend
, usba_udc_resume
);
2260 static struct platform_driver udc_driver
= {
2261 .remove
= usba_udc_remove
,
2263 .name
= "atmel_usba_udc",
2264 .pm
= &usba_udc_pm_ops
,
2265 .of_match_table
= of_match_ptr(atmel_udc_dt_ids
),
2269 module_platform_driver_probe(udc_driver
, usba_udc_probe
);
2271 MODULE_DESCRIPTION("Atmel USBA UDC driver");
2272 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
2273 MODULE_LICENSE("GPL");
2274 MODULE_ALIAS("platform:atmel_usba_udc");