1 // SPDX-License-Identifier: GPL-2.0+
3 * USB Host Controller Driver for IMX21
5 * Copyright (C) 2006 Loping Dog Embedded Systems
6 * Copyright (C) 2009 Martin Fuzzey
7 * Originally written by Jay Monkman <jtm@lopingdog.com>
8 * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
13 * The i.MX21 USB hardware contains
14 * * 32 transfer descriptors (called ETDs)
15 * * 4Kb of Data memory
17 * The data memory is shared between the host and function controllers
18 * (but this driver only supports the host controller)
20 * So setting up a transfer involves:
22 * * Fill in ETD with appropriate information
23 * * Allocating data memory (and putting the offset in the ETD)
25 * * Get interrupt when done.
27 * An ETD is assigned to each active endpoint.
29 * Low resource (ETD and Data memory) situations are handled differently for
30 * isochronous and non insosynchronous transactions :
32 * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
34 * ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
35 * They allocate both ETDs and Data memory during URB submission
36 * (and fail if unavailable).
39 #include <linux/clk.h>
41 #include <linux/kernel.h>
42 #include <linux/list.h>
43 #include <linux/platform_device.h>
44 #include <linux/slab.h>
45 #include <linux/usb.h>
46 #include <linux/usb/hcd.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/module.h>
50 #include "imx21-hcd.h"
52 #ifdef CONFIG_DYNAMIC_DEBUG
57 #define DEBUG_LOG_FRAME(imx21, etd, event) \
58 (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
60 #define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
63 static const char hcd_name
[] = "imx21-hcd";
65 static inline struct imx21
*hcd_to_imx21(struct usb_hcd
*hcd
)
67 return (struct imx21
*)hcd
->hcd_priv
;
71 /* =========================================== */
72 /* Hardware access helpers */
73 /* =========================================== */
75 static inline void set_register_bits(struct imx21
*imx21
, u32 offset
, u32 mask
)
77 void __iomem
*reg
= imx21
->regs
+ offset
;
78 writel(readl(reg
) | mask
, reg
);
81 static inline void clear_register_bits(struct imx21
*imx21
,
84 void __iomem
*reg
= imx21
->regs
+ offset
;
85 writel(readl(reg
) & ~mask
, reg
);
88 static inline void clear_toggle_bit(struct imx21
*imx21
, u32 offset
, u32 mask
)
90 void __iomem
*reg
= imx21
->regs
+ offset
;
92 if (readl(reg
) & mask
)
96 static inline void set_toggle_bit(struct imx21
*imx21
, u32 offset
, u32 mask
)
98 void __iomem
*reg
= imx21
->regs
+ offset
;
100 if (!(readl(reg
) & mask
))
104 static void etd_writel(struct imx21
*imx21
, int etd_num
, int dword
, u32 value
)
106 writel(value
, imx21
->regs
+ USB_ETD_DWORD(etd_num
, dword
));
109 static u32
etd_readl(struct imx21
*imx21
, int etd_num
, int dword
)
111 return readl(imx21
->regs
+ USB_ETD_DWORD(etd_num
, dword
));
114 static inline int wrap_frame(int counter
)
116 return counter
& 0xFFFF;
119 static inline int frame_after(int frame
, int after
)
121 /* handle wrapping like jiffies time_afer */
122 return (s16
)((s16
)after
- (s16
)frame
) < 0;
125 static int imx21_hc_get_frame(struct usb_hcd
*hcd
)
127 struct imx21
*imx21
= hcd_to_imx21(hcd
);
129 return wrap_frame(readl(imx21
->regs
+ USBH_FRMNUB
));
132 static inline bool unsuitable_for_dma(dma_addr_t addr
)
134 return (addr
& 3) != 0;
137 #include "imx21-dbg.c"
139 static void nonisoc_urb_completed_for_etd(
140 struct imx21
*imx21
, struct etd_priv
*etd
, int status
);
141 static void schedule_nonisoc_etd(struct imx21
*imx21
, struct urb
*urb
);
142 static void free_dmem(struct imx21
*imx21
, struct etd_priv
*etd
);
144 /* =========================================== */
146 /* =========================================== */
148 static int alloc_etd(struct imx21
*imx21
)
151 struct etd_priv
*etd
= imx21
->etd
;
153 for (i
= 0; i
< USB_NUM_ETD
; i
++, etd
++) {
154 if (etd
->alloc
== 0) {
155 memset(etd
, 0, sizeof(imx21
->etd
[0]));
157 debug_etd_allocated(imx21
);
164 static void disactivate_etd(struct imx21
*imx21
, int num
)
166 int etd_mask
= (1 << num
);
167 struct etd_priv
*etd
= &imx21
->etd
[num
];
169 writel(etd_mask
, imx21
->regs
+ USBH_ETDENCLR
);
170 clear_register_bits(imx21
, USBH_ETDDONEEN
, etd_mask
);
171 writel(etd_mask
, imx21
->regs
+ USB_ETDDMACHANLCLR
);
172 clear_toggle_bit(imx21
, USBH_ETDDONESTAT
, etd_mask
);
174 etd
->active_count
= 0;
176 DEBUG_LOG_FRAME(imx21
, etd
, disactivated
);
179 static void reset_etd(struct imx21
*imx21
, int num
)
181 struct etd_priv
*etd
= imx21
->etd
+ num
;
184 disactivate_etd(imx21
, num
);
186 for (i
= 0; i
< 4; i
++)
187 etd_writel(imx21
, num
, i
, 0);
191 etd
->bounce_buffer
= NULL
;
194 static void free_etd(struct imx21
*imx21
, int num
)
199 if (num
>= USB_NUM_ETD
) {
200 dev_err(imx21
->dev
, "BAD etd=%d!\n", num
);
203 if (imx21
->etd
[num
].alloc
== 0) {
204 dev_err(imx21
->dev
, "ETD %d already free!\n", num
);
208 debug_etd_freed(imx21
);
209 reset_etd(imx21
, num
);
210 memset(&imx21
->etd
[num
], 0, sizeof(imx21
->etd
[0]));
214 static void setup_etd_dword0(struct imx21
*imx21
,
215 int etd_num
, struct urb
*urb
, u8 dir
, u16 maxpacket
)
217 etd_writel(imx21
, etd_num
, 0,
218 ((u32
) usb_pipedevice(urb
->pipe
)) << DW0_ADDRESS
|
219 ((u32
) usb_pipeendpoint(urb
->pipe
) << DW0_ENDPNT
) |
220 ((u32
) dir
<< DW0_DIRECT
) |
221 ((u32
) ((urb
->dev
->speed
== USB_SPEED_LOW
) ?
222 1 : 0) << DW0_SPEED
) |
223 ((u32
) fmt_urb_to_etd
[usb_pipetype(urb
->pipe
)] << DW0_FORMAT
) |
224 ((u32
) maxpacket
<< DW0_MAXPKTSIZ
));
228 * Copy buffer to data controller data memory.
229 * We cannot use memcpy_toio() because the hardware requires 32bit writes
231 static void copy_to_dmem(
232 struct imx21
*imx21
, int dmem_offset
, void *src
, int count
)
234 void __iomem
*dmem
= imx21
->regs
+ USBOTG_DMEM
+ dmem_offset
;
240 for (i
= 0; i
< count
; i
++) {
242 word
+= (*p
++ << (byte
* 8));
250 if (count
&& byte
!= 3)
254 static void activate_etd(struct imx21
*imx21
, int etd_num
, u8 dir
)
256 u32 etd_mask
= 1 << etd_num
;
257 struct etd_priv
*etd
= &imx21
->etd
[etd_num
];
259 if (etd
->dma_handle
&& unsuitable_for_dma(etd
->dma_handle
)) {
260 /* For non aligned isoc the condition below is always true */
261 if (etd
->len
<= etd
->dmem_size
) {
262 /* Fits into data memory, use PIO */
263 if (dir
!= TD_DIR_IN
) {
266 etd
->cpu_buffer
, etd
->len
);
271 /* Too big for data memory, use bounce buffer */
272 enum dma_data_direction dmadir
;
274 if (dir
== TD_DIR_IN
) {
275 dmadir
= DMA_FROM_DEVICE
;
276 etd
->bounce_buffer
= kmalloc(etd
->len
,
279 dmadir
= DMA_TO_DEVICE
;
280 etd
->bounce_buffer
= kmemdup(etd
->cpu_buffer
,
284 if (!etd
->bounce_buffer
) {
285 dev_err(imx21
->dev
, "failed bounce alloc\n");
286 goto err_bounce_alloc
;
290 dma_map_single(imx21
->dev
,
294 if (dma_mapping_error(imx21
->dev
, etd
->dma_handle
)) {
295 dev_err(imx21
->dev
, "failed bounce map\n");
301 clear_toggle_bit(imx21
, USBH_ETDDONESTAT
, etd_mask
);
302 set_register_bits(imx21
, USBH_ETDDONEEN
, etd_mask
);
303 clear_toggle_bit(imx21
, USBH_XFILLSTAT
, etd_mask
);
304 clear_toggle_bit(imx21
, USBH_YFILLSTAT
, etd_mask
);
306 if (etd
->dma_handle
) {
307 set_register_bits(imx21
, USB_ETDDMACHANLCLR
, etd_mask
);
308 clear_toggle_bit(imx21
, USBH_XBUFSTAT
, etd_mask
);
309 clear_toggle_bit(imx21
, USBH_YBUFSTAT
, etd_mask
);
310 writel(etd
->dma_handle
, imx21
->regs
+ USB_ETDSMSA(etd_num
));
311 set_register_bits(imx21
, USB_ETDDMAEN
, etd_mask
);
313 if (dir
!= TD_DIR_IN
) {
314 /* need to set for ZLP and PIO */
315 set_toggle_bit(imx21
, USBH_XFILLSTAT
, etd_mask
);
316 set_toggle_bit(imx21
, USBH_YFILLSTAT
, etd_mask
);
320 DEBUG_LOG_FRAME(imx21
, etd
, activated
);
323 if (!etd
->active_count
) {
325 etd
->activated_frame
= readl(imx21
->regs
+ USBH_FRMNUB
);
326 etd
->disactivated_frame
= -1;
327 etd
->last_int_frame
= -1;
328 etd
->last_req_frame
= -1;
330 for (i
= 0; i
< 4; i
++)
331 etd
->submitted_dwords
[i
] = etd_readl(imx21
, etd_num
, i
);
335 etd
->active_count
= 1;
336 writel(etd_mask
, imx21
->regs
+ USBH_ETDENSET
);
340 kfree(etd
->bounce_buffer
);
343 free_dmem(imx21
, etd
);
344 nonisoc_urb_completed_for_etd(imx21
, etd
, -ENOMEM
);
347 /* =========================================== */
348 /* Data memory management */
349 /* =========================================== */
351 static int alloc_dmem(struct imx21
*imx21
, unsigned int size
,
352 struct usb_host_endpoint
*ep
)
354 unsigned int offset
= 0;
355 struct imx21_dmem_area
*area
;
356 struct imx21_dmem_area
*tmp
;
358 size
+= (~size
+ 1) & 0x3; /* Round to 4 byte multiple */
360 if (size
> DMEM_SIZE
) {
361 dev_err(imx21
->dev
, "size=%d > DMEM_SIZE(%d)\n",
366 list_for_each_entry(tmp
, &imx21
->dmem_list
, list
) {
367 if ((size
+ offset
) < offset
)
369 if ((size
+ offset
) <= tmp
->offset
)
371 offset
= tmp
->size
+ tmp
->offset
;
372 if ((offset
+ size
) > DMEM_SIZE
)
376 area
= kmalloc(sizeof(struct imx21_dmem_area
), GFP_ATOMIC
);
381 area
->offset
= offset
;
383 list_add_tail(&area
->list
, &tmp
->list
);
384 debug_dmem_allocated(imx21
, size
);
391 /* Memory now available for a queued ETD - activate it */
392 static void activate_queued_etd(struct imx21
*imx21
,
393 struct etd_priv
*etd
, u32 dmem_offset
)
395 struct urb_priv
*urb_priv
= etd
->urb
->hcpriv
;
396 int etd_num
= etd
- &imx21
->etd
[0];
397 u32 maxpacket
= etd_readl(imx21
, etd_num
, 1) >> DW1_YBUFSRTAD
;
398 u8 dir
= (etd_readl(imx21
, etd_num
, 2) >> DW2_DIRPID
) & 0x03;
400 dev_dbg(imx21
->dev
, "activating queued ETD %d now DMEM available\n",
402 etd_writel(imx21
, etd_num
, 1,
403 ((dmem_offset
+ maxpacket
) << DW1_YBUFSRTAD
) | dmem_offset
);
405 etd
->dmem_offset
= dmem_offset
;
406 urb_priv
->active
= 1;
407 activate_etd(imx21
, etd_num
, dir
);
410 static void free_dmem(struct imx21
*imx21
, struct etd_priv
*etd
)
412 struct imx21_dmem_area
*area
;
413 struct etd_priv
*tmp
;
421 offset
= etd
->dmem_offset
;
422 list_for_each_entry(area
, &imx21
->dmem_list
, list
) {
423 if (area
->offset
== offset
) {
424 debug_dmem_freed(imx21
, area
->size
);
425 list_del(&area
->list
);
434 "Trying to free unallocated DMEM %d\n", offset
);
438 /* Try again to allocate memory for anything we've queued */
439 list_for_each_entry_safe(etd
, tmp
, &imx21
->queue_for_dmem
, queue
) {
440 offset
= alloc_dmem(imx21
, etd
->dmem_size
, etd
->ep
);
442 list_del(&etd
->queue
);
443 activate_queued_etd(imx21
, etd
, (u32
)offset
);
448 static void free_epdmem(struct imx21
*imx21
, struct usb_host_endpoint
*ep
)
450 struct imx21_dmem_area
*area
, *tmp
;
452 list_for_each_entry_safe(area
, tmp
, &imx21
->dmem_list
, list
) {
453 if (area
->ep
== ep
) {
455 "Active DMEM %d for disabled ep=%p\n",
457 list_del(&area
->list
);
464 /* =========================================== */
466 /* =========================================== */
468 /* Endpoint now idle - release its ETD(s) or assign to queued request */
469 static void ep_idle(struct imx21
*imx21
, struct ep_priv
*ep_priv
)
473 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
474 int etd_num
= ep_priv
->etd
[i
];
475 struct etd_priv
*etd
;
479 etd
= &imx21
->etd
[etd_num
];
480 ep_priv
->etd
[i
] = -1;
482 free_dmem(imx21
, etd
); /* for isoc */
484 if (list_empty(&imx21
->queue_for_etd
)) {
485 free_etd(imx21
, etd_num
);
490 "assigning idle etd %d for queued request\n", etd_num
);
491 ep_priv
= list_first_entry(&imx21
->queue_for_etd
,
492 struct ep_priv
, queue
);
493 list_del(&ep_priv
->queue
);
494 reset_etd(imx21
, etd_num
);
495 ep_priv
->waiting_etd
= 0;
496 ep_priv
->etd
[i
] = etd_num
;
498 if (list_empty(&ep_priv
->ep
->urb_list
)) {
499 dev_err(imx21
->dev
, "No urb for queued ep!\n");
502 schedule_nonisoc_etd(imx21
, list_first_entry(
503 &ep_priv
->ep
->urb_list
, struct urb
, urb_list
));
507 static void urb_done(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
508 __releases(imx21
->lock
)
509 __acquires(imx21
->lock
)
511 struct imx21
*imx21
= hcd_to_imx21(hcd
);
512 struct ep_priv
*ep_priv
= urb
->ep
->hcpriv
;
513 struct urb_priv
*urb_priv
= urb
->hcpriv
;
515 debug_urb_completed(imx21
, urb
, status
);
516 dev_vdbg(imx21
->dev
, "urb %p done %d\n", urb
, status
);
518 kfree(urb_priv
->isoc_td
);
521 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
522 spin_unlock(&imx21
->lock
);
523 usb_hcd_giveback_urb(hcd
, urb
, status
);
524 spin_lock(&imx21
->lock
);
525 if (list_empty(&ep_priv
->ep
->urb_list
))
526 ep_idle(imx21
, ep_priv
);
529 static void nonisoc_urb_completed_for_etd(
530 struct imx21
*imx21
, struct etd_priv
*etd
, int status
)
532 struct usb_host_endpoint
*ep
= etd
->ep
;
534 urb_done(imx21
->hcd
, etd
->urb
, status
);
537 if (!list_empty(&ep
->urb_list
)) {
538 struct urb
*urb
= list_first_entry(
539 &ep
->urb_list
, struct urb
, urb_list
);
541 dev_vdbg(imx21
->dev
, "next URB %p\n", urb
);
542 schedule_nonisoc_etd(imx21
, urb
);
547 /* =========================================== */
548 /* ISOC Handling ... */
549 /* =========================================== */
551 static void schedule_isoc_etds(struct usb_hcd
*hcd
,
552 struct usb_host_endpoint
*ep
)
554 struct imx21
*imx21
= hcd_to_imx21(hcd
);
555 struct ep_priv
*ep_priv
= ep
->hcpriv
;
556 struct etd_priv
*etd
;
557 struct urb_priv
*urb_priv
;
564 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
566 if (list_empty(&ep_priv
->td_list
))
569 etd_num
= ep_priv
->etd
[i
];
573 etd
= &imx21
->etd
[etd_num
];
577 td
= list_entry(ep_priv
->td_list
.next
, struct td
, list
);
579 urb_priv
= td
->urb
->hcpriv
;
581 cur_frame
= imx21_hc_get_frame(hcd
);
582 if (frame_after(cur_frame
, td
->frame
)) {
583 dev_dbg(imx21
->dev
, "isoc too late frame %d > %d\n",
584 cur_frame
, td
->frame
);
585 urb_priv
->isoc_status
= -EXDEV
;
586 td
->urb
->iso_frame_desc
[
587 td
->isoc_index
].actual_length
= 0;
588 td
->urb
->iso_frame_desc
[td
->isoc_index
].status
= -EXDEV
;
589 if (--urb_priv
->isoc_remaining
== 0)
590 urb_done(hcd
, td
->urb
, urb_priv
->isoc_status
);
594 urb_priv
->active
= 1;
599 etd
->dma_handle
= td
->dma_handle
;
600 etd
->cpu_buffer
= td
->cpu_buffer
;
602 debug_isoc_submitted(imx21
, cur_frame
, td
);
604 dir
= usb_pipeout(td
->urb
->pipe
) ? TD_DIR_OUT
: TD_DIR_IN
;
605 setup_etd_dword0(imx21
, etd_num
, td
->urb
, dir
, etd
->dmem_size
);
606 etd_writel(imx21
, etd_num
, 1, etd
->dmem_offset
);
607 etd_writel(imx21
, etd_num
, 2,
608 (TD_NOTACCESSED
<< DW2_COMPCODE
) |
609 ((td
->frame
& 0xFFFF) << DW2_STARTFRM
));
610 etd_writel(imx21
, etd_num
, 3,
611 (TD_NOTACCESSED
<< DW3_COMPCODE0
) |
612 (td
->len
<< DW3_PKTLEN0
));
614 activate_etd(imx21
, etd_num
, dir
);
618 static void isoc_etd_done(struct usb_hcd
*hcd
, int etd_num
)
620 struct imx21
*imx21
= hcd_to_imx21(hcd
);
621 int etd_mask
= 1 << etd_num
;
622 struct etd_priv
*etd
= imx21
->etd
+ etd_num
;
623 struct urb
*urb
= etd
->urb
;
624 struct urb_priv
*urb_priv
= urb
->hcpriv
;
625 struct td
*td
= etd
->td
;
626 struct usb_host_endpoint
*ep
= etd
->ep
;
627 int isoc_index
= td
->isoc_index
;
628 unsigned int pipe
= urb
->pipe
;
629 int dir_in
= usb_pipein(pipe
);
633 disactivate_etd(imx21
, etd_num
);
635 cc
= (etd_readl(imx21
, etd_num
, 3) >> DW3_COMPCODE0
) & 0xf;
636 bytes_xfrd
= etd_readl(imx21
, etd_num
, 3) & 0x3ff;
638 /* Input doesn't always fill the buffer, don't generate an error
641 if (dir_in
&& (cc
== TD_DATAUNDERRUN
))
644 if (cc
== TD_NOTACCESSED
)
647 debug_isoc_completed(imx21
,
648 imx21_hc_get_frame(hcd
), td
, cc
, bytes_xfrd
);
650 urb_priv
->isoc_status
= -EXDEV
;
652 "bad iso cc=0x%X frame=%d sched frame=%d "
653 "cnt=%d len=%d urb=%p etd=%d index=%d\n",
654 cc
, imx21_hc_get_frame(hcd
), td
->frame
,
655 bytes_xfrd
, td
->len
, urb
, etd_num
, isoc_index
);
659 clear_toggle_bit(imx21
, USBH_XFILLSTAT
, etd_mask
);
660 if (!etd
->dma_handle
)
661 memcpy_fromio(etd
->cpu_buffer
,
662 imx21
->regs
+ USBOTG_DMEM
+ etd
->dmem_offset
,
666 urb
->actual_length
+= bytes_xfrd
;
667 urb
->iso_frame_desc
[isoc_index
].actual_length
= bytes_xfrd
;
668 urb
->iso_frame_desc
[isoc_index
].status
= cc_to_error
[cc
];
674 if (--urb_priv
->isoc_remaining
== 0)
675 urb_done(hcd
, urb
, urb_priv
->isoc_status
);
677 schedule_isoc_etds(hcd
, ep
);
680 static struct ep_priv
*alloc_isoc_ep(
681 struct imx21
*imx21
, struct usb_host_endpoint
*ep
)
683 struct ep_priv
*ep_priv
;
686 ep_priv
= kzalloc(sizeof(struct ep_priv
), GFP_ATOMIC
);
690 for (i
= 0; i
< NUM_ISO_ETDS
; i
++)
691 ep_priv
->etd
[i
] = -1;
693 INIT_LIST_HEAD(&ep_priv
->td_list
);
695 ep
->hcpriv
= ep_priv
;
699 static int alloc_isoc_etds(struct imx21
*imx21
, struct ep_priv
*ep_priv
)
704 /* Allocate the ETDs if required */
705 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
706 if (ep_priv
->etd
[i
] < 0) {
707 etd_num
= alloc_etd(imx21
);
709 goto alloc_etd_failed
;
711 ep_priv
->etd
[i
] = etd_num
;
712 imx21
->etd
[etd_num
].ep
= ep_priv
->ep
;
718 dev_err(imx21
->dev
, "isoc: Couldn't allocate etd\n");
719 for (j
= 0; j
< i
; j
++) {
720 free_etd(imx21
, ep_priv
->etd
[j
]);
721 ep_priv
->etd
[j
] = -1;
726 static int imx21_hc_urb_enqueue_isoc(struct usb_hcd
*hcd
,
727 struct usb_host_endpoint
*ep
,
728 struct urb
*urb
, gfp_t mem_flags
)
730 struct imx21
*imx21
= hcd_to_imx21(hcd
);
731 struct urb_priv
*urb_priv
;
733 struct ep_priv
*ep_priv
;
734 struct td
*td
= NULL
;
740 urb_priv
= kzalloc(sizeof(struct urb_priv
), mem_flags
);
741 if (urb_priv
== NULL
)
744 urb_priv
->isoc_td
= kcalloc(urb
->number_of_packets
, sizeof(struct td
),
746 if (urb_priv
->isoc_td
== NULL
) {
748 goto alloc_td_failed
;
751 spin_lock_irqsave(&imx21
->lock
, flags
);
753 if (ep
->hcpriv
== NULL
) {
754 ep_priv
= alloc_isoc_ep(imx21
, ep
);
755 if (ep_priv
== NULL
) {
757 goto alloc_ep_failed
;
760 ep_priv
= ep
->hcpriv
;
763 ret
= alloc_isoc_etds(imx21
, ep_priv
);
765 goto alloc_etd_failed
;
767 ret
= usb_hcd_link_urb_to_ep(hcd
, urb
);
771 urb
->status
= -EINPROGRESS
;
772 urb
->actual_length
= 0;
773 urb
->error_count
= 0;
774 urb
->hcpriv
= urb_priv
;
777 /* allocate data memory for largest packets if not already done */
778 maxpacket
= usb_maxpacket(urb
->dev
, urb
->pipe
, usb_pipeout(urb
->pipe
));
779 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
780 struct etd_priv
*etd
= &imx21
->etd
[ep_priv
->etd
[i
]];
782 if (etd
->dmem_size
> 0 && etd
->dmem_size
< maxpacket
) {
783 /* not sure if this can really occur.... */
784 dev_err(imx21
->dev
, "increasing isoc buffer %d->%d\n",
785 etd
->dmem_size
, maxpacket
);
787 goto alloc_dmem_failed
;
790 if (etd
->dmem_size
== 0) {
791 etd
->dmem_offset
= alloc_dmem(imx21
, maxpacket
, ep
);
792 if (etd
->dmem_offset
< 0) {
793 dev_dbg(imx21
->dev
, "failed alloc isoc dmem\n");
795 goto alloc_dmem_failed
;
797 etd
->dmem_size
= maxpacket
;
801 /* calculate frame */
802 cur_frame
= imx21_hc_get_frame(hcd
);
804 if (list_empty(&ep_priv
->td_list
)) {
805 urb
->start_frame
= wrap_frame(cur_frame
+ 5);
807 urb
->start_frame
= wrap_frame(list_entry(ep_priv
->td_list
.prev
,
808 struct td
, list
)->frame
+ urb
->interval
);
810 if (frame_after(cur_frame
, urb
->start_frame
)) {
812 "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
813 urb
->start_frame
, cur_frame
,
814 (urb
->transfer_flags
& URB_ISO_ASAP
) != 0);
815 i
= DIV_ROUND_UP(wrap_frame(
816 cur_frame
- urb
->start_frame
),
819 /* Treat underruns as if URB_ISO_ASAP was set */
820 if ((urb
->transfer_flags
& URB_ISO_ASAP
) ||
821 i
>= urb
->number_of_packets
) {
822 urb
->start_frame
= wrap_frame(urb
->start_frame
823 + i
* urb
->interval
);
829 /* set up transfers */
830 urb_priv
->isoc_remaining
= urb
->number_of_packets
- i
;
831 td
= urb_priv
->isoc_td
;
832 for (; i
< urb
->number_of_packets
; i
++, td
++) {
833 unsigned int offset
= urb
->iso_frame_desc
[i
].offset
;
836 td
->len
= urb
->iso_frame_desc
[i
].length
;
838 td
->frame
= wrap_frame(urb
->start_frame
+ urb
->interval
* i
);
839 td
->dma_handle
= urb
->transfer_dma
+ offset
;
840 td
->cpu_buffer
= urb
->transfer_buffer
+ offset
;
841 list_add_tail(&td
->list
, &ep_priv
->td_list
);
844 dev_vdbg(imx21
->dev
, "setup %d packets for iso frame %d->%d\n",
845 urb
->number_of_packets
, urb
->start_frame
, td
->frame
);
847 debug_urb_submitted(imx21
, urb
);
848 schedule_isoc_etds(hcd
, ep
);
850 spin_unlock_irqrestore(&imx21
->lock
, flags
);
854 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
859 spin_unlock_irqrestore(&imx21
->lock
, flags
);
860 kfree(urb_priv
->isoc_td
);
867 static void dequeue_isoc_urb(struct imx21
*imx21
,
868 struct urb
*urb
, struct ep_priv
*ep_priv
)
870 struct urb_priv
*urb_priv
= urb
->hcpriv
;
874 if (urb_priv
->active
) {
875 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
876 int etd_num
= ep_priv
->etd
[i
];
877 if (etd_num
!= -1 && imx21
->etd
[etd_num
].urb
== urb
) {
878 struct etd_priv
*etd
= imx21
->etd
+ etd_num
;
880 reset_etd(imx21
, etd_num
);
881 free_dmem(imx21
, etd
);
886 list_for_each_entry_safe(td
, tmp
, &ep_priv
->td_list
, list
) {
887 if (td
->urb
== urb
) {
888 dev_vdbg(imx21
->dev
, "removing td %p\n", td
);
894 /* =========================================== */
895 /* NON ISOC Handling ... */
896 /* =========================================== */
898 static void schedule_nonisoc_etd(struct imx21
*imx21
, struct urb
*urb
)
900 unsigned int pipe
= urb
->pipe
;
901 struct urb_priv
*urb_priv
= urb
->hcpriv
;
902 struct ep_priv
*ep_priv
= urb_priv
->ep
->hcpriv
;
903 int state
= urb_priv
->state
;
904 int etd_num
= ep_priv
->etd
[0];
905 struct etd_priv
*etd
;
916 dev_err(imx21
->dev
, "No valid ETD\n");
919 if (readl(imx21
->regs
+ USBH_ETDENSET
) & (1 << etd_num
))
920 dev_err(imx21
->dev
, "submitting to active ETD %d\n", etd_num
);
922 etd
= &imx21
->etd
[etd_num
];
923 maxpacket
= usb_maxpacket(urb
->dev
, pipe
, usb_pipeout(pipe
));
927 if (usb_pipecontrol(pipe
) && (state
!= US_CTRL_DATA
)) {
928 if (state
== US_CTRL_SETUP
) {
930 if (unsuitable_for_dma(urb
->setup_dma
))
931 usb_hcd_unmap_urb_setup_for_dma(imx21
->hcd
,
933 etd
->dma_handle
= urb
->setup_dma
;
934 etd
->cpu_buffer
= urb
->setup_packet
;
937 datatoggle
= TD_TOGGLE_DATA0
;
938 } else { /* US_CTRL_ACK */
939 dir
= usb_pipeout(pipe
) ? TD_DIR_IN
: TD_DIR_OUT
;
942 datatoggle
= TD_TOGGLE_DATA1
;
945 dir
= usb_pipeout(pipe
) ? TD_DIR_OUT
: TD_DIR_IN
;
946 bufround
= (dir
== TD_DIR_IN
) ? 1 : 0;
947 if (unsuitable_for_dma(urb
->transfer_dma
))
948 usb_hcd_unmap_urb_for_dma(imx21
->hcd
, urb
);
950 etd
->dma_handle
= urb
->transfer_dma
;
951 etd
->cpu_buffer
= urb
->transfer_buffer
;
952 if (usb_pipebulk(pipe
) && (state
== US_BULK0
))
955 count
= urb
->transfer_buffer_length
;
957 if (usb_pipecontrol(pipe
)) {
958 datatoggle
= TD_TOGGLE_DATA1
;
962 usb_pipeendpoint(urb
->pipe
),
963 usb_pipeout(urb
->pipe
)))
964 datatoggle
= TD_TOGGLE_DATA1
;
966 datatoggle
= TD_TOGGLE_DATA0
;
971 etd
->ep
= urb_priv
->ep
;
974 if (usb_pipeint(pipe
)) {
975 interval
= urb
->interval
;
976 relpolpos
= (readl(imx21
->regs
+ USBH_FRMNUB
) + 1) & 0xff;
979 /* Write ETD to device memory */
980 setup_etd_dword0(imx21
, etd_num
, urb
, dir
, maxpacket
);
982 etd_writel(imx21
, etd_num
, 2,
983 (u32
) interval
<< DW2_POLINTERV
|
984 ((u32
) relpolpos
<< DW2_RELPOLPOS
) |
985 ((u32
) dir
<< DW2_DIRPID
) |
986 ((u32
) bufround
<< DW2_BUFROUND
) |
987 ((u32
) datatoggle
<< DW2_DATATOG
) |
988 ((u32
) TD_NOTACCESSED
<< DW2_COMPCODE
));
990 /* DMA will always transfer buffer size even if TOBYCNT in DWORD3
991 is smaller. Make sure we don't overrun the buffer!
993 if (count
&& count
< maxpacket
)
994 etd_buf_size
= count
;
996 etd_buf_size
= maxpacket
;
998 etd_writel(imx21
, etd_num
, 3,
999 ((u32
) (etd_buf_size
- 1) << DW3_BUFSIZE
) | (u32
) count
);
1002 etd
->dma_handle
= 0;
1004 /* allocate x and y buffer space at once */
1005 etd
->dmem_size
= (count
> maxpacket
) ? maxpacket
* 2 : maxpacket
;
1006 etd
->dmem_offset
= alloc_dmem(imx21
, etd
->dmem_size
, urb_priv
->ep
);
1007 if (etd
->dmem_offset
< 0) {
1008 /* Setup everything we can in HW and update when we get DMEM */
1009 etd_writel(imx21
, etd_num
, 1, (u32
)maxpacket
<< 16);
1011 dev_dbg(imx21
->dev
, "Queuing etd %d for DMEM\n", etd_num
);
1012 debug_urb_queued_for_dmem(imx21
, urb
);
1013 list_add_tail(&etd
->queue
, &imx21
->queue_for_dmem
);
1017 etd_writel(imx21
, etd_num
, 1,
1018 (((u32
) etd
->dmem_offset
+ (u32
) maxpacket
) << DW1_YBUFSRTAD
) |
1019 (u32
) etd
->dmem_offset
);
1021 urb_priv
->active
= 1;
1023 /* enable the ETD to kick off transfer */
1024 dev_vdbg(imx21
->dev
, "Activating etd %d for %d bytes %s\n",
1025 etd_num
, count
, dir
!= TD_DIR_IN
? "out" : "in");
1026 activate_etd(imx21
, etd_num
, dir
);
1030 static void nonisoc_etd_done(struct usb_hcd
*hcd
, int etd_num
)
1032 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1033 struct etd_priv
*etd
= &imx21
->etd
[etd_num
];
1034 struct urb
*urb
= etd
->urb
;
1035 u32 etd_mask
= 1 << etd_num
;
1036 struct urb_priv
*urb_priv
= urb
->hcpriv
;
1042 disactivate_etd(imx21
, etd_num
);
1044 dir
= (etd_readl(imx21
, etd_num
, 0) >> DW0_DIRECT
) & 0x3;
1045 cc
= (etd_readl(imx21
, etd_num
, 2) >> DW2_COMPCODE
) & 0xf;
1046 bytes_xfrd
= etd
->len
- (etd_readl(imx21
, etd_num
, 3) & 0x1fffff);
1048 /* save toggle carry */
1049 usb_settoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
1050 usb_pipeout(urb
->pipe
),
1051 (etd_readl(imx21
, etd_num
, 0) >> DW0_TOGCRY
) & 0x1);
1053 if (dir
== TD_DIR_IN
) {
1054 clear_toggle_bit(imx21
, USBH_XFILLSTAT
, etd_mask
);
1055 clear_toggle_bit(imx21
, USBH_YFILLSTAT
, etd_mask
);
1057 if (etd
->bounce_buffer
) {
1058 memcpy(etd
->cpu_buffer
, etd
->bounce_buffer
, bytes_xfrd
);
1059 dma_unmap_single(imx21
->dev
,
1060 etd
->dma_handle
, etd
->len
, DMA_FROM_DEVICE
);
1061 } else if (!etd
->dma_handle
&& bytes_xfrd
) {/* PIO */
1062 memcpy_fromio(etd
->cpu_buffer
,
1063 imx21
->regs
+ USBOTG_DMEM
+ etd
->dmem_offset
,
1068 kfree(etd
->bounce_buffer
);
1069 etd
->bounce_buffer
= NULL
;
1070 free_dmem(imx21
, etd
);
1072 urb
->error_count
= 0;
1073 if (!(urb
->transfer_flags
& URB_SHORT_NOT_OK
)
1074 && (cc
== TD_DATAUNDERRUN
))
1078 dev_vdbg(imx21
->dev
, "cc is 0x%x\n", cc
);
1080 etd_done
= (cc_to_error
[cc
] != 0); /* stop if error */
1082 switch (usb_pipetype(urb
->pipe
)) {
1084 switch (urb_priv
->state
) {
1086 if (urb
->transfer_buffer_length
> 0)
1087 urb_priv
->state
= US_CTRL_DATA
;
1089 urb_priv
->state
= US_CTRL_ACK
;
1092 urb
->actual_length
+= bytes_xfrd
;
1093 urb_priv
->state
= US_CTRL_ACK
;
1100 "Invalid pipe state %d\n", urb_priv
->state
);
1107 urb
->actual_length
+= bytes_xfrd
;
1108 if ((urb_priv
->state
== US_BULK
)
1109 && (urb
->transfer_flags
& URB_ZERO_PACKET
)
1110 && urb
->transfer_buffer_length
> 0
1111 && ((urb
->transfer_buffer_length
%
1112 usb_maxpacket(urb
->dev
, urb
->pipe
,
1113 usb_pipeout(urb
->pipe
))) == 0)) {
1114 /* need a 0-packet */
1115 urb_priv
->state
= US_BULK0
;
1121 case PIPE_INTERRUPT
:
1122 urb
->actual_length
+= bytes_xfrd
;
1128 nonisoc_urb_completed_for_etd(imx21
, etd
, cc_to_error
[cc
]);
1130 dev_vdbg(imx21
->dev
, "next state=%d\n", urb_priv
->state
);
1131 schedule_nonisoc_etd(imx21
, urb
);
1136 static struct ep_priv
*alloc_ep(void)
1139 struct ep_priv
*ep_priv
;
1141 ep_priv
= kzalloc(sizeof(struct ep_priv
), GFP_ATOMIC
);
1145 for (i
= 0; i
< NUM_ISO_ETDS
; ++i
)
1146 ep_priv
->etd
[i
] = -1;
1151 static int imx21_hc_urb_enqueue(struct usb_hcd
*hcd
,
1152 struct urb
*urb
, gfp_t mem_flags
)
1154 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1155 struct usb_host_endpoint
*ep
= urb
->ep
;
1156 struct urb_priv
*urb_priv
;
1157 struct ep_priv
*ep_priv
;
1158 struct etd_priv
*etd
;
1160 unsigned long flags
;
1162 dev_vdbg(imx21
->dev
,
1163 "enqueue urb=%p ep=%p len=%d "
1164 "buffer=%p dma=%pad setupBuf=%p setupDma=%pad\n",
1166 urb
->transfer_buffer_length
,
1167 urb
->transfer_buffer
, &urb
->transfer_dma
,
1168 urb
->setup_packet
, &urb
->setup_dma
);
1170 if (usb_pipeisoc(urb
->pipe
))
1171 return imx21_hc_urb_enqueue_isoc(hcd
, ep
, urb
, mem_flags
);
1173 urb_priv
= kzalloc(sizeof(struct urb_priv
), mem_flags
);
1177 spin_lock_irqsave(&imx21
->lock
, flags
);
1179 ep_priv
= ep
->hcpriv
;
1180 if (ep_priv
== NULL
) {
1181 ep_priv
= alloc_ep();
1184 goto failed_alloc_ep
;
1186 ep
->hcpriv
= ep_priv
;
1190 ret
= usb_hcd_link_urb_to_ep(hcd
, urb
);
1194 urb
->status
= -EINPROGRESS
;
1195 urb
->actual_length
= 0;
1196 urb
->error_count
= 0;
1197 urb
->hcpriv
= urb_priv
;
1200 switch (usb_pipetype(urb
->pipe
)) {
1202 urb_priv
->state
= US_CTRL_SETUP
;
1205 urb_priv
->state
= US_BULK
;
1209 debug_urb_submitted(imx21
, urb
);
1210 if (ep_priv
->etd
[0] < 0) {
1211 if (ep_priv
->waiting_etd
) {
1213 "no ETD available already queued %p\n",
1215 debug_urb_queued_for_etd(imx21
, urb
);
1218 ep_priv
->etd
[0] = alloc_etd(imx21
);
1219 if (ep_priv
->etd
[0] < 0) {
1221 "no ETD available queueing %p\n", ep_priv
);
1222 debug_urb_queued_for_etd(imx21
, urb
);
1223 list_add_tail(&ep_priv
->queue
, &imx21
->queue_for_etd
);
1224 ep_priv
->waiting_etd
= 1;
1229 /* Schedule if no URB already active for this endpoint */
1230 etd
= &imx21
->etd
[ep_priv
->etd
[0]];
1231 if (etd
->urb
== NULL
) {
1232 DEBUG_LOG_FRAME(imx21
, etd
, last_req
);
1233 schedule_nonisoc_etd(imx21
, urb
);
1237 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1242 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1247 static int imx21_hc_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
,
1250 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1251 unsigned long flags
;
1252 struct usb_host_endpoint
*ep
;
1253 struct ep_priv
*ep_priv
;
1254 struct urb_priv
*urb_priv
= urb
->hcpriv
;
1257 dev_vdbg(imx21
->dev
, "dequeue urb=%p iso=%d status=%d\n",
1258 urb
, usb_pipeisoc(urb
->pipe
), status
);
1260 spin_lock_irqsave(&imx21
->lock
, flags
);
1262 ret
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
1266 ep_priv
= ep
->hcpriv
;
1268 debug_urb_unlinked(imx21
, urb
);
1270 if (usb_pipeisoc(urb
->pipe
)) {
1271 dequeue_isoc_urb(imx21
, urb
, ep_priv
);
1272 schedule_isoc_etds(hcd
, ep
);
1273 } else if (urb_priv
->active
) {
1274 int etd_num
= ep_priv
->etd
[0];
1275 if (etd_num
!= -1) {
1276 struct etd_priv
*etd
= &imx21
->etd
[etd_num
];
1278 disactivate_etd(imx21
, etd_num
);
1279 free_dmem(imx21
, etd
);
1281 kfree(etd
->bounce_buffer
);
1282 etd
->bounce_buffer
= NULL
;
1286 urb_done(hcd
, urb
, status
);
1288 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1292 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1296 /* =========================================== */
1297 /* Interrupt dispatch */
1298 /* =========================================== */
1300 static void process_etds(struct usb_hcd
*hcd
, struct imx21
*imx21
, int sof
)
1303 int enable_sof_int
= 0;
1304 unsigned long flags
;
1306 spin_lock_irqsave(&imx21
->lock
, flags
);
1308 for (etd_num
= 0; etd_num
< USB_NUM_ETD
; etd_num
++) {
1309 u32 etd_mask
= 1 << etd_num
;
1310 u32 enabled
= readl(imx21
->regs
+ USBH_ETDENSET
) & etd_mask
;
1311 u32 done
= readl(imx21
->regs
+ USBH_ETDDONESTAT
) & etd_mask
;
1312 struct etd_priv
*etd
= &imx21
->etd
[etd_num
];
1316 DEBUG_LOG_FRAME(imx21
, etd
, last_int
);
1321 * When multiple transfers are using the bus we sometimes get into a state
1322 * where the transfer has completed (the CC field of the ETD is != 0x0F),
1323 * the ETD has self disabled but the ETDDONESTAT flag is not set
1324 * (and hence no interrupt occurs).
1325 * This causes the transfer in question to hang.
1326 * The kludge below checks for this condition at each SOF and processes any
1327 * blocked ETDs (after an arbitrary 10 frame wait)
1329 * With a single active transfer the usbtest test suite will run for days
1330 * without the kludge.
1331 * With other bus activity (eg mass storage) even just test1 will hang without
1337 if (etd
->active_count
&& !enabled
) /* suspicious... */
1340 if (!sof
|| enabled
|| !etd
->active_count
)
1343 cc
= etd_readl(imx21
, etd_num
, 2) >> DW2_COMPCODE
;
1344 if (cc
== TD_NOTACCESSED
)
1347 if (++etd
->active_count
< 10)
1350 dword0
= etd_readl(imx21
, etd_num
, 0);
1352 "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
1353 etd_num
, dword0
& 0x7F,
1354 (dword0
>> DW0_ENDPNT
) & 0x0F,
1359 "frame: act=%d disact=%d"
1360 " int=%d req=%d cur=%d\n",
1361 etd
->activated_frame
,
1362 etd
->disactivated_frame
,
1363 etd
->last_int_frame
,
1364 etd
->last_req_frame
,
1365 readl(imx21
->regs
+ USBH_FRMNUB
));
1366 imx21
->debug_unblocks
++;
1368 etd
->active_count
= 0;
1372 if (etd
->ep
== NULL
|| etd
->urb
== NULL
) {
1374 "Interrupt for unexpected etd %d"
1376 etd_num
, etd
->ep
, etd
->urb
);
1377 disactivate_etd(imx21
, etd_num
);
1381 if (usb_pipeisoc(etd
->urb
->pipe
))
1382 isoc_etd_done(hcd
, etd_num
);
1384 nonisoc_etd_done(hcd
, etd_num
);
1387 /* only enable SOF interrupt if it may be needed for the kludge */
1389 set_register_bits(imx21
, USBH_SYSIEN
, USBH_SYSIEN_SOFINT
);
1391 clear_register_bits(imx21
, USBH_SYSIEN
, USBH_SYSIEN_SOFINT
);
1394 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1397 static irqreturn_t
imx21_irq(struct usb_hcd
*hcd
)
1399 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1400 u32 ints
= readl(imx21
->regs
+ USBH_SYSISR
);
1402 if (ints
& USBH_SYSIEN_HERRINT
)
1403 dev_dbg(imx21
->dev
, "Scheduling error\n");
1405 if (ints
& USBH_SYSIEN_SORINT
)
1406 dev_dbg(imx21
->dev
, "Scheduling overrun\n");
1408 if (ints
& (USBH_SYSISR_DONEINT
| USBH_SYSISR_SOFINT
))
1409 process_etds(hcd
, imx21
, ints
& USBH_SYSISR_SOFINT
);
1411 writel(ints
, imx21
->regs
+ USBH_SYSISR
);
1415 static void imx21_hc_endpoint_disable(struct usb_hcd
*hcd
,
1416 struct usb_host_endpoint
*ep
)
1418 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1419 unsigned long flags
;
1420 struct ep_priv
*ep_priv
;
1426 spin_lock_irqsave(&imx21
->lock
, flags
);
1427 ep_priv
= ep
->hcpriv
;
1428 dev_vdbg(imx21
->dev
, "disable ep=%p, ep->hcpriv=%p\n", ep
, ep_priv
);
1430 if (!list_empty(&ep
->urb_list
))
1431 dev_dbg(imx21
->dev
, "ep's URB list is not empty\n");
1433 if (ep_priv
!= NULL
) {
1434 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
1435 if (ep_priv
->etd
[i
] > -1)
1436 dev_dbg(imx21
->dev
, "free etd %d for disable\n",
1439 free_etd(imx21
, ep_priv
->etd
[i
]);
1445 for (i
= 0; i
< USB_NUM_ETD
; i
++) {
1446 if (imx21
->etd
[i
].alloc
&& imx21
->etd
[i
].ep
== ep
) {
1448 "Active etd %d for disabled ep=%p!\n", i
, ep
);
1452 free_epdmem(imx21
, ep
);
1453 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1456 /* =========================================== */
1458 /* =========================================== */
1460 static int get_hub_descriptor(struct usb_hcd
*hcd
,
1461 struct usb_hub_descriptor
*desc
)
1463 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1464 desc
->bDescriptorType
= USB_DT_HUB
; /* HUB descriptor */
1465 desc
->bHubContrCurrent
= 0;
1467 desc
->bNbrPorts
= readl(imx21
->regs
+ USBH_ROOTHUBA
)
1468 & USBH_ROOTHUBA_NDNSTMPRT_MASK
;
1469 desc
->bDescLength
= 9;
1470 desc
->bPwrOn2PwrGood
= 0;
1471 desc
->wHubCharacteristics
= (__force __u16
) cpu_to_le16(
1472 HUB_CHAR_NO_LPSM
| /* No power switching */
1473 HUB_CHAR_NO_OCPM
); /* No over current protection */
1475 desc
->u
.hs
.DeviceRemovable
[0] = 1 << 1;
1476 desc
->u
.hs
.DeviceRemovable
[1] = ~0;
1480 static int imx21_hc_hub_status_data(struct usb_hcd
*hcd
, char *buf
)
1482 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1486 unsigned long flags
;
1488 spin_lock_irqsave(&imx21
->lock
, flags
);
1489 ports
= readl(imx21
->regs
+ USBH_ROOTHUBA
)
1490 & USBH_ROOTHUBA_NDNSTMPRT_MASK
;
1493 dev_err(imx21
->dev
, "ports %d > 7\n", ports
);
1495 for (i
= 0; i
< ports
; i
++) {
1496 if (readl(imx21
->regs
+ USBH_PORTSTAT(i
)) &
1497 (USBH_PORTSTAT_CONNECTSC
|
1498 USBH_PORTSTAT_PRTENBLSC
|
1499 USBH_PORTSTAT_PRTSTATSC
|
1500 USBH_PORTSTAT_OVRCURIC
|
1501 USBH_PORTSTAT_PRTRSTSC
)) {
1504 buf
[0] |= 1 << (i
+ 1);
1507 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1510 dev_info(imx21
->dev
, "Hub status changed\n");
1514 static int imx21_hc_hub_control(struct usb_hcd
*hcd
,
1516 u16 wValue
, u16 wIndex
, char *buf
, u16 wLength
)
1518 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1520 u32 status_write
= 0;
1523 case ClearHubFeature
:
1524 dev_dbg(imx21
->dev
, "ClearHubFeature\n");
1526 case C_HUB_OVER_CURRENT
:
1527 dev_dbg(imx21
->dev
, " OVER_CURRENT\n");
1529 case C_HUB_LOCAL_POWER
:
1530 dev_dbg(imx21
->dev
, " LOCAL_POWER\n");
1533 dev_dbg(imx21
->dev
, " unknown\n");
1539 case ClearPortFeature
:
1540 dev_dbg(imx21
->dev
, "ClearPortFeature\n");
1542 case USB_PORT_FEAT_ENABLE
:
1543 dev_dbg(imx21
->dev
, " ENABLE\n");
1544 status_write
= USBH_PORTSTAT_CURCONST
;
1546 case USB_PORT_FEAT_SUSPEND
:
1547 dev_dbg(imx21
->dev
, " SUSPEND\n");
1548 status_write
= USBH_PORTSTAT_PRTOVRCURI
;
1550 case USB_PORT_FEAT_POWER
:
1551 dev_dbg(imx21
->dev
, " POWER\n");
1552 status_write
= USBH_PORTSTAT_LSDEVCON
;
1554 case USB_PORT_FEAT_C_ENABLE
:
1555 dev_dbg(imx21
->dev
, " C_ENABLE\n");
1556 status_write
= USBH_PORTSTAT_PRTENBLSC
;
1558 case USB_PORT_FEAT_C_SUSPEND
:
1559 dev_dbg(imx21
->dev
, " C_SUSPEND\n");
1560 status_write
= USBH_PORTSTAT_PRTSTATSC
;
1562 case USB_PORT_FEAT_C_CONNECTION
:
1563 dev_dbg(imx21
->dev
, " C_CONNECTION\n");
1564 status_write
= USBH_PORTSTAT_CONNECTSC
;
1566 case USB_PORT_FEAT_C_OVER_CURRENT
:
1567 dev_dbg(imx21
->dev
, " C_OVER_CURRENT\n");
1568 status_write
= USBH_PORTSTAT_OVRCURIC
;
1570 case USB_PORT_FEAT_C_RESET
:
1571 dev_dbg(imx21
->dev
, " C_RESET\n");
1572 status_write
= USBH_PORTSTAT_PRTRSTSC
;
1575 dev_dbg(imx21
->dev
, " unknown\n");
1582 case GetHubDescriptor
:
1583 dev_dbg(imx21
->dev
, "GetHubDescriptor\n");
1584 rc
= get_hub_descriptor(hcd
, (void *)buf
);
1588 dev_dbg(imx21
->dev
, " GetHubStatus\n");
1589 *(__le32
*) buf
= 0;
1593 dev_dbg(imx21
->dev
, "GetPortStatus: port: %d, 0x%x\n",
1594 wIndex
, USBH_PORTSTAT(wIndex
- 1));
1595 *(__le32
*) buf
= readl(imx21
->regs
+
1596 USBH_PORTSTAT(wIndex
- 1));
1600 dev_dbg(imx21
->dev
, "SetHubFeature\n");
1602 case C_HUB_OVER_CURRENT
:
1603 dev_dbg(imx21
->dev
, " OVER_CURRENT\n");
1606 case C_HUB_LOCAL_POWER
:
1607 dev_dbg(imx21
->dev
, " LOCAL_POWER\n");
1610 dev_dbg(imx21
->dev
, " unknown\n");
1617 case SetPortFeature
:
1618 dev_dbg(imx21
->dev
, "SetPortFeature\n");
1620 case USB_PORT_FEAT_SUSPEND
:
1621 dev_dbg(imx21
->dev
, " SUSPEND\n");
1622 status_write
= USBH_PORTSTAT_PRTSUSPST
;
1624 case USB_PORT_FEAT_POWER
:
1625 dev_dbg(imx21
->dev
, " POWER\n");
1626 status_write
= USBH_PORTSTAT_PRTPWRST
;
1628 case USB_PORT_FEAT_RESET
:
1629 dev_dbg(imx21
->dev
, " RESET\n");
1630 status_write
= USBH_PORTSTAT_PRTRSTST
;
1633 dev_dbg(imx21
->dev
, " unknown\n");
1640 dev_dbg(imx21
->dev
, " unknown\n");
1646 writel(status_write
, imx21
->regs
+ USBH_PORTSTAT(wIndex
- 1));
1650 /* =========================================== */
1651 /* Host controller management */
1652 /* =========================================== */
1654 static int imx21_hc_reset(struct usb_hcd
*hcd
)
1656 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1657 unsigned long timeout
;
1658 unsigned long flags
;
1660 spin_lock_irqsave(&imx21
->lock
, flags
);
1662 /* Reset the Host controller modules */
1663 writel(USBOTG_RST_RSTCTRL
| USBOTG_RST_RSTRH
|
1664 USBOTG_RST_RSTHSIE
| USBOTG_RST_RSTHC
,
1665 imx21
->regs
+ USBOTG_RST_CTRL
);
1667 /* Wait for reset to finish */
1668 timeout
= jiffies
+ HZ
;
1669 while (readl(imx21
->regs
+ USBOTG_RST_CTRL
) != 0) {
1670 if (time_after(jiffies
, timeout
)) {
1671 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1672 dev_err(imx21
->dev
, "timeout waiting for reset\n");
1675 spin_unlock_irq(&imx21
->lock
);
1676 schedule_timeout_uninterruptible(1);
1677 spin_lock_irq(&imx21
->lock
);
1679 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1683 static int imx21_hc_start(struct usb_hcd
*hcd
)
1685 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1686 unsigned long flags
;
1688 u32 hw_mode
= USBOTG_HWMODE_CRECFG_HOST
;
1689 u32 usb_control
= 0;
1691 hw_mode
|= ((imx21
->pdata
->host_xcvr
<< USBOTG_HWMODE_HOSTXCVR_SHIFT
) &
1692 USBOTG_HWMODE_HOSTXCVR_MASK
);
1693 hw_mode
|= ((imx21
->pdata
->otg_xcvr
<< USBOTG_HWMODE_OTGXCVR_SHIFT
) &
1694 USBOTG_HWMODE_OTGXCVR_MASK
);
1696 if (imx21
->pdata
->host1_txenoe
)
1697 usb_control
|= USBCTRL_HOST1_TXEN_OE
;
1699 if (!imx21
->pdata
->host1_xcverless
)
1700 usb_control
|= USBCTRL_HOST1_BYP_TLL
;
1702 if (imx21
->pdata
->otg_ext_xcvr
)
1703 usb_control
|= USBCTRL_OTC_RCV_RXDP
;
1706 spin_lock_irqsave(&imx21
->lock
, flags
);
1708 writel((USBOTG_CLK_CTRL_HST
| USBOTG_CLK_CTRL_MAIN
),
1709 imx21
->regs
+ USBOTG_CLK_CTRL
);
1710 writel(hw_mode
, imx21
->regs
+ USBOTG_HWMODE
);
1711 writel(usb_control
, imx21
->regs
+ USBCTRL
);
1712 writel(USB_MISCCONTROL_SKPRTRY
| USB_MISCCONTROL_ARBMODE
,
1713 imx21
->regs
+ USB_MISCCONTROL
);
1715 /* Clear the ETDs */
1716 for (i
= 0; i
< USB_NUM_ETD
; i
++)
1717 for (j
= 0; j
< 4; j
++)
1718 etd_writel(imx21
, i
, j
, 0);
1720 /* Take the HC out of reset */
1721 writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL
| USBH_HOST_CTRL_CTLBLKSR_1
,
1722 imx21
->regs
+ USBH_HOST_CTRL
);
1725 if (imx21
->pdata
->enable_otg_host
)
1726 writel(USBH_PORTSTAT_PRTPWRST
| USBH_PORTSTAT_PRTENABST
,
1727 imx21
->regs
+ USBH_PORTSTAT(0));
1729 if (imx21
->pdata
->enable_host1
)
1730 writel(USBH_PORTSTAT_PRTPWRST
| USBH_PORTSTAT_PRTENABST
,
1731 imx21
->regs
+ USBH_PORTSTAT(1));
1733 if (imx21
->pdata
->enable_host2
)
1734 writel(USBH_PORTSTAT_PRTPWRST
| USBH_PORTSTAT_PRTENABST
,
1735 imx21
->regs
+ USBH_PORTSTAT(2));
1738 hcd
->state
= HC_STATE_RUNNING
;
1740 /* Enable host controller interrupts */
1741 set_register_bits(imx21
, USBH_SYSIEN
,
1742 USBH_SYSIEN_HERRINT
|
1743 USBH_SYSIEN_DONEINT
| USBH_SYSIEN_SORINT
);
1744 set_register_bits(imx21
, USBOTG_CINT_STEN
, USBOTG_HCINT
);
1746 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1751 static void imx21_hc_stop(struct usb_hcd
*hcd
)
1753 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1754 unsigned long flags
;
1756 spin_lock_irqsave(&imx21
->lock
, flags
);
1758 writel(0, imx21
->regs
+ USBH_SYSIEN
);
1759 clear_register_bits(imx21
, USBOTG_CINT_STEN
, USBOTG_HCINT
);
1760 clear_register_bits(imx21
, USBOTG_CLK_CTRL_HST
| USBOTG_CLK_CTRL_MAIN
,
1762 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1765 /* =========================================== */
1767 /* =========================================== */
1769 static const struct hc_driver imx21_hc_driver
= {
1770 .description
= hcd_name
,
1771 .product_desc
= "IMX21 USB Host Controller",
1772 .hcd_priv_size
= sizeof(struct imx21
),
1777 .reset
= imx21_hc_reset
,
1778 .start
= imx21_hc_start
,
1779 .stop
= imx21_hc_stop
,
1782 .urb_enqueue
= imx21_hc_urb_enqueue
,
1783 .urb_dequeue
= imx21_hc_urb_dequeue
,
1784 .endpoint_disable
= imx21_hc_endpoint_disable
,
1786 /* scheduling support */
1787 .get_frame_number
= imx21_hc_get_frame
,
1789 /* Root hub support */
1790 .hub_status_data
= imx21_hc_hub_status_data
,
1791 .hub_control
= imx21_hc_hub_control
,
1795 static struct mx21_usbh_platform_data default_pdata
= {
1796 .host_xcvr
= MX21_USBXCVR_TXDIF_RXDIF
,
1797 .otg_xcvr
= MX21_USBXCVR_TXDIF_RXDIF
,
1800 .enable_otg_host
= 1,
1804 static int imx21_remove(struct platform_device
*pdev
)
1806 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
1807 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1808 struct resource
*res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1810 remove_debug_files(imx21
);
1811 usb_remove_hcd(hcd
);
1814 clk_disable_unprepare(imx21
->clk
);
1815 clk_put(imx21
->clk
);
1816 iounmap(imx21
->regs
);
1817 release_mem_region(res
->start
, resource_size(res
));
1825 static int imx21_probe(struct platform_device
*pdev
)
1827 struct usb_hcd
*hcd
;
1828 struct imx21
*imx21
;
1829 struct resource
*res
;
1833 printk(KERN_INFO
"%s\n", imx21_hc_driver
.product_desc
);
1835 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1838 irq
= platform_get_irq(pdev
, 0);
1840 dev_err(&pdev
->dev
, "Failed to get IRQ: %d\n", irq
);
1844 hcd
= usb_create_hcd(&imx21_hc_driver
,
1845 &pdev
->dev
, dev_name(&pdev
->dev
));
1847 dev_err(&pdev
->dev
, "Cannot create hcd (%s)\n",
1848 dev_name(&pdev
->dev
));
1852 imx21
= hcd_to_imx21(hcd
);
1854 imx21
->dev
= &pdev
->dev
;
1855 imx21
->pdata
= dev_get_platdata(&pdev
->dev
);
1857 imx21
->pdata
= &default_pdata
;
1859 spin_lock_init(&imx21
->lock
);
1860 INIT_LIST_HEAD(&imx21
->dmem_list
);
1861 INIT_LIST_HEAD(&imx21
->queue_for_etd
);
1862 INIT_LIST_HEAD(&imx21
->queue_for_dmem
);
1863 create_debug_files(imx21
);
1865 res
= request_mem_region(res
->start
, resource_size(res
), hcd_name
);
1868 goto failed_request_mem
;
1871 imx21
->regs
= ioremap(res
->start
, resource_size(res
));
1872 if (imx21
->regs
== NULL
) {
1873 dev_err(imx21
->dev
, "Cannot map registers\n");
1875 goto failed_ioremap
;
1878 /* Enable clocks source */
1879 imx21
->clk
= clk_get(imx21
->dev
, NULL
);
1880 if (IS_ERR(imx21
->clk
)) {
1881 dev_err(imx21
->dev
, "no clock found\n");
1882 ret
= PTR_ERR(imx21
->clk
);
1883 goto failed_clock_get
;
1886 ret
= clk_set_rate(imx21
->clk
, clk_round_rate(imx21
->clk
, 48000000));
1888 goto failed_clock_set
;
1889 ret
= clk_prepare_enable(imx21
->clk
);
1891 goto failed_clock_enable
;
1893 dev_info(imx21
->dev
, "Hardware HC revision: 0x%02X\n",
1894 (readl(imx21
->regs
+ USBOTG_HWMODE
) >> 16) & 0xFF);
1896 ret
= usb_add_hcd(hcd
, irq
, 0);
1898 dev_err(imx21
->dev
, "usb_add_hcd() returned %d\n", ret
);
1899 goto failed_add_hcd
;
1901 device_wakeup_enable(hcd
->self
.controller
);
1906 clk_disable_unprepare(imx21
->clk
);
1907 failed_clock_enable
:
1909 clk_put(imx21
->clk
);
1911 iounmap(imx21
->regs
);
1913 release_mem_region(res
->start
, resource_size(res
));
1915 remove_debug_files(imx21
);
1920 static struct platform_driver imx21_hcd_driver
= {
1924 .probe
= imx21_probe
,
1925 .remove
= imx21_remove
,
1930 module_platform_driver(imx21_hcd_driver
);
1932 MODULE_DESCRIPTION("i.MX21 USB Host controller");
1933 MODULE_AUTHOR("Martin Fuzzey");
1934 MODULE_LICENSE("GPL");
1935 MODULE_ALIAS("platform:imx21-hcd");