2 * USB Host Controller Driver for IMX21
4 * Copyright (C) 2006 Loping Dog Embedded Systems
5 * Copyright (C) 2009 Martin Fuzzey
6 * Originally written by Jay Monkman <jtm@lopingdog.com>
7 * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 * The i.MX21 USB hardware contains
27 * * 32 transfer descriptors (called ETDs)
28 * * 4Kb of Data memory
30 * The data memory is shared between the host and function controllers
31 * (but this driver only supports the host controller)
33 * So setting up a transfer involves:
35 * * Fill in ETD with appropriate information
36 * * Allocating data memory (and putting the offset in the ETD)
38 * * Get interrupt when done.
40 * An ETD is assigned to each active endpoint.
42 * Low resource (ETD and Data memory) situations are handled differently for
43 * isochronous and non insosynchronous transactions :
45 * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
47 * ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
48 * They allocate both ETDs and Data memory during URB submission
49 * (and fail if unavailable).
52 #include <linux/clk.h>
54 #include <linux/kernel.h>
55 #include <linux/list.h>
56 #include <linux/platform_device.h>
57 #include <linux/slab.h>
58 #include <linux/usb.h>
59 #include <linux/usb/hcd.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/module.h>
63 #include "imx21-hcd.h"
66 #define DEBUG_LOG_FRAME(imx21, etd, event) \
67 (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
69 #define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
72 static const char hcd_name
[] = "imx21-hcd";
74 static inline struct imx21
*hcd_to_imx21(struct usb_hcd
*hcd
)
76 return (struct imx21
*)hcd
->hcd_priv
;
80 /* =========================================== */
81 /* Hardware access helpers */
82 /* =========================================== */
84 static inline void set_register_bits(struct imx21
*imx21
, u32 offset
, u32 mask
)
86 void __iomem
*reg
= imx21
->regs
+ offset
;
87 writel(readl(reg
) | mask
, reg
);
90 static inline void clear_register_bits(struct imx21
*imx21
,
93 void __iomem
*reg
= imx21
->regs
+ offset
;
94 writel(readl(reg
) & ~mask
, reg
);
97 static inline void clear_toggle_bit(struct imx21
*imx21
, u32 offset
, u32 mask
)
99 void __iomem
*reg
= imx21
->regs
+ offset
;
101 if (readl(reg
) & mask
)
105 static inline void set_toggle_bit(struct imx21
*imx21
, u32 offset
, u32 mask
)
107 void __iomem
*reg
= imx21
->regs
+ offset
;
109 if (!(readl(reg
) & mask
))
113 static void etd_writel(struct imx21
*imx21
, int etd_num
, int dword
, u32 value
)
115 writel(value
, imx21
->regs
+ USB_ETD_DWORD(etd_num
, dword
));
118 static u32
etd_readl(struct imx21
*imx21
, int etd_num
, int dword
)
120 return readl(imx21
->regs
+ USB_ETD_DWORD(etd_num
, dword
));
123 static inline int wrap_frame(int counter
)
125 return counter
& 0xFFFF;
128 static inline int frame_after(int frame
, int after
)
130 /* handle wrapping like jiffies time_afer */
131 return (s16
)((s16
)after
- (s16
)frame
) < 0;
134 static int imx21_hc_get_frame(struct usb_hcd
*hcd
)
136 struct imx21
*imx21
= hcd_to_imx21(hcd
);
138 return wrap_frame(readl(imx21
->regs
+ USBH_FRMNUB
));
141 static inline bool unsuitable_for_dma(dma_addr_t addr
)
143 return (addr
& 3) != 0;
146 #include "imx21-dbg.c"
148 static void nonisoc_urb_completed_for_etd(
149 struct imx21
*imx21
, struct etd_priv
*etd
, int status
);
150 static void schedule_nonisoc_etd(struct imx21
*imx21
, struct urb
*urb
);
151 static void free_dmem(struct imx21
*imx21
, struct etd_priv
*etd
);
153 /* =========================================== */
155 /* =========================================== */
157 static int alloc_etd(struct imx21
*imx21
)
160 struct etd_priv
*etd
= imx21
->etd
;
162 for (i
= 0; i
< USB_NUM_ETD
; i
++, etd
++) {
163 if (etd
->alloc
== 0) {
164 memset(etd
, 0, sizeof(imx21
->etd
[0]));
166 debug_etd_allocated(imx21
);
173 static void disactivate_etd(struct imx21
*imx21
, int num
)
175 int etd_mask
= (1 << num
);
176 struct etd_priv
*etd
= &imx21
->etd
[num
];
178 writel(etd_mask
, imx21
->regs
+ USBH_ETDENCLR
);
179 clear_register_bits(imx21
, USBH_ETDDONEEN
, etd_mask
);
180 writel(etd_mask
, imx21
->regs
+ USB_ETDDMACHANLCLR
);
181 clear_toggle_bit(imx21
, USBH_ETDDONESTAT
, etd_mask
);
183 etd
->active_count
= 0;
185 DEBUG_LOG_FRAME(imx21
, etd
, disactivated
);
188 static void reset_etd(struct imx21
*imx21
, int num
)
190 struct etd_priv
*etd
= imx21
->etd
+ num
;
193 disactivate_etd(imx21
, num
);
195 for (i
= 0; i
< 4; i
++)
196 etd_writel(imx21
, num
, i
, 0);
200 etd
->bounce_buffer
= NULL
;
203 static void free_etd(struct imx21
*imx21
, int num
)
208 if (num
>= USB_NUM_ETD
) {
209 dev_err(imx21
->dev
, "BAD etd=%d!\n", num
);
212 if (imx21
->etd
[num
].alloc
== 0) {
213 dev_err(imx21
->dev
, "ETD %d already free!\n", num
);
217 debug_etd_freed(imx21
);
218 reset_etd(imx21
, num
);
219 memset(&imx21
->etd
[num
], 0, sizeof(imx21
->etd
[0]));
223 static void setup_etd_dword0(struct imx21
*imx21
,
224 int etd_num
, struct urb
*urb
, u8 dir
, u16 maxpacket
)
226 etd_writel(imx21
, etd_num
, 0,
227 ((u32
) usb_pipedevice(urb
->pipe
)) << DW0_ADDRESS
|
228 ((u32
) usb_pipeendpoint(urb
->pipe
) << DW0_ENDPNT
) |
229 ((u32
) dir
<< DW0_DIRECT
) |
230 ((u32
) ((urb
->dev
->speed
== USB_SPEED_LOW
) ?
231 1 : 0) << DW0_SPEED
) |
232 ((u32
) fmt_urb_to_etd
[usb_pipetype(urb
->pipe
)] << DW0_FORMAT
) |
233 ((u32
) maxpacket
<< DW0_MAXPKTSIZ
));
237 * Copy buffer to data controller data memory.
238 * We cannot use memcpy_toio() because the hardware requires 32bit writes
240 static void copy_to_dmem(
241 struct imx21
*imx21
, int dmem_offset
, void *src
, int count
)
243 void __iomem
*dmem
= imx21
->regs
+ USBOTG_DMEM
+ dmem_offset
;
249 for (i
= 0; i
< count
; i
++) {
251 word
+= (*p
++ << (byte
* 8));
259 if (count
&& byte
!= 3)
263 static void activate_etd(struct imx21
*imx21
, int etd_num
, u8 dir
)
265 u32 etd_mask
= 1 << etd_num
;
266 struct etd_priv
*etd
= &imx21
->etd
[etd_num
];
268 if (etd
->dma_handle
&& unsuitable_for_dma(etd
->dma_handle
)) {
269 /* For non aligned isoc the condition below is always true */
270 if (etd
->len
<= etd
->dmem_size
) {
271 /* Fits into data memory, use PIO */
272 if (dir
!= TD_DIR_IN
) {
275 etd
->cpu_buffer
, etd
->len
);
280 /* Too big for data memory, use bounce buffer */
281 enum dma_data_direction dmadir
;
283 if (dir
== TD_DIR_IN
) {
284 dmadir
= DMA_FROM_DEVICE
;
285 etd
->bounce_buffer
= kmalloc(etd
->len
,
288 dmadir
= DMA_TO_DEVICE
;
289 etd
->bounce_buffer
= kmemdup(etd
->cpu_buffer
,
293 if (!etd
->bounce_buffer
) {
294 dev_err(imx21
->dev
, "failed bounce alloc\n");
295 goto err_bounce_alloc
;
299 dma_map_single(imx21
->dev
,
303 if (dma_mapping_error(imx21
->dev
, etd
->dma_handle
)) {
304 dev_err(imx21
->dev
, "failed bounce map\n");
310 clear_toggle_bit(imx21
, USBH_ETDDONESTAT
, etd_mask
);
311 set_register_bits(imx21
, USBH_ETDDONEEN
, etd_mask
);
312 clear_toggle_bit(imx21
, USBH_XFILLSTAT
, etd_mask
);
313 clear_toggle_bit(imx21
, USBH_YFILLSTAT
, etd_mask
);
315 if (etd
->dma_handle
) {
316 set_register_bits(imx21
, USB_ETDDMACHANLCLR
, etd_mask
);
317 clear_toggle_bit(imx21
, USBH_XBUFSTAT
, etd_mask
);
318 clear_toggle_bit(imx21
, USBH_YBUFSTAT
, etd_mask
);
319 writel(etd
->dma_handle
, imx21
->regs
+ USB_ETDSMSA(etd_num
));
320 set_register_bits(imx21
, USB_ETDDMAEN
, etd_mask
);
322 if (dir
!= TD_DIR_IN
) {
323 /* need to set for ZLP and PIO */
324 set_toggle_bit(imx21
, USBH_XFILLSTAT
, etd_mask
);
325 set_toggle_bit(imx21
, USBH_YFILLSTAT
, etd_mask
);
329 DEBUG_LOG_FRAME(imx21
, etd
, activated
);
332 if (!etd
->active_count
) {
334 etd
->activated_frame
= readl(imx21
->regs
+ USBH_FRMNUB
);
335 etd
->disactivated_frame
= -1;
336 etd
->last_int_frame
= -1;
337 etd
->last_req_frame
= -1;
339 for (i
= 0; i
< 4; i
++)
340 etd
->submitted_dwords
[i
] = etd_readl(imx21
, etd_num
, i
);
344 etd
->active_count
= 1;
345 writel(etd_mask
, imx21
->regs
+ USBH_ETDENSET
);
349 kfree(etd
->bounce_buffer
);
352 free_dmem(imx21
, etd
);
353 nonisoc_urb_completed_for_etd(imx21
, etd
, -ENOMEM
);
356 /* =========================================== */
357 /* Data memory management */
358 /* =========================================== */
360 static int alloc_dmem(struct imx21
*imx21
, unsigned int size
,
361 struct usb_host_endpoint
*ep
)
363 unsigned int offset
= 0;
364 struct imx21_dmem_area
*area
;
365 struct imx21_dmem_area
*tmp
;
367 size
+= (~size
+ 1) & 0x3; /* Round to 4 byte multiple */
369 if (size
> DMEM_SIZE
) {
370 dev_err(imx21
->dev
, "size=%d > DMEM_SIZE(%d)\n",
375 list_for_each_entry(tmp
, &imx21
->dmem_list
, list
) {
376 if ((size
+ offset
) < offset
)
378 if ((size
+ offset
) <= tmp
->offset
)
380 offset
= tmp
->size
+ tmp
->offset
;
381 if ((offset
+ size
) > DMEM_SIZE
)
385 area
= kmalloc(sizeof(struct imx21_dmem_area
), GFP_ATOMIC
);
390 area
->offset
= offset
;
392 list_add_tail(&area
->list
, &tmp
->list
);
393 debug_dmem_allocated(imx21
, size
);
400 /* Memory now available for a queued ETD - activate it */
401 static void activate_queued_etd(struct imx21
*imx21
,
402 struct etd_priv
*etd
, u32 dmem_offset
)
404 struct urb_priv
*urb_priv
= etd
->urb
->hcpriv
;
405 int etd_num
= etd
- &imx21
->etd
[0];
406 u32 maxpacket
= etd_readl(imx21
, etd_num
, 1) >> DW1_YBUFSRTAD
;
407 u8 dir
= (etd_readl(imx21
, etd_num
, 2) >> DW2_DIRPID
) & 0x03;
409 dev_dbg(imx21
->dev
, "activating queued ETD %d now DMEM available\n",
411 etd_writel(imx21
, etd_num
, 1,
412 ((dmem_offset
+ maxpacket
) << DW1_YBUFSRTAD
) | dmem_offset
);
414 etd
->dmem_offset
= dmem_offset
;
415 urb_priv
->active
= 1;
416 activate_etd(imx21
, etd_num
, dir
);
419 static void free_dmem(struct imx21
*imx21
, struct etd_priv
*etd
)
421 struct imx21_dmem_area
*area
;
422 struct etd_priv
*tmp
;
430 offset
= etd
->dmem_offset
;
431 list_for_each_entry(area
, &imx21
->dmem_list
, list
) {
432 if (area
->offset
== offset
) {
433 debug_dmem_freed(imx21
, area
->size
);
434 list_del(&area
->list
);
443 "Trying to free unallocated DMEM %d\n", offset
);
447 /* Try again to allocate memory for anything we've queued */
448 list_for_each_entry_safe(etd
, tmp
, &imx21
->queue_for_dmem
, queue
) {
449 offset
= alloc_dmem(imx21
, etd
->dmem_size
, etd
->ep
);
451 list_del(&etd
->queue
);
452 activate_queued_etd(imx21
, etd
, (u32
)offset
);
457 static void free_epdmem(struct imx21
*imx21
, struct usb_host_endpoint
*ep
)
459 struct imx21_dmem_area
*area
, *tmp
;
461 list_for_each_entry_safe(area
, tmp
, &imx21
->dmem_list
, list
) {
462 if (area
->ep
== ep
) {
464 "Active DMEM %d for disabled ep=%p\n",
466 list_del(&area
->list
);
473 /* =========================================== */
475 /* =========================================== */
477 /* Endpoint now idle - release its ETD(s) or assign to queued request */
478 static void ep_idle(struct imx21
*imx21
, struct ep_priv
*ep_priv
)
482 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
483 int etd_num
= ep_priv
->etd
[i
];
484 struct etd_priv
*etd
;
488 etd
= &imx21
->etd
[etd_num
];
489 ep_priv
->etd
[i
] = -1;
491 free_dmem(imx21
, etd
); /* for isoc */
493 if (list_empty(&imx21
->queue_for_etd
)) {
494 free_etd(imx21
, etd_num
);
499 "assigning idle etd %d for queued request\n", etd_num
);
500 ep_priv
= list_first_entry(&imx21
->queue_for_etd
,
501 struct ep_priv
, queue
);
502 list_del(&ep_priv
->queue
);
503 reset_etd(imx21
, etd_num
);
504 ep_priv
->waiting_etd
= 0;
505 ep_priv
->etd
[i
] = etd_num
;
507 if (list_empty(&ep_priv
->ep
->urb_list
)) {
508 dev_err(imx21
->dev
, "No urb for queued ep!\n");
511 schedule_nonisoc_etd(imx21
, list_first_entry(
512 &ep_priv
->ep
->urb_list
, struct urb
, urb_list
));
516 static void urb_done(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
517 __releases(imx21
->lock
)
518 __acquires(imx21
->lock
)
520 struct imx21
*imx21
= hcd_to_imx21(hcd
);
521 struct ep_priv
*ep_priv
= urb
->ep
->hcpriv
;
522 struct urb_priv
*urb_priv
= urb
->hcpriv
;
524 debug_urb_completed(imx21
, urb
, status
);
525 dev_vdbg(imx21
->dev
, "urb %p done %d\n", urb
, status
);
527 kfree(urb_priv
->isoc_td
);
530 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
531 spin_unlock(&imx21
->lock
);
532 usb_hcd_giveback_urb(hcd
, urb
, status
);
533 spin_lock(&imx21
->lock
);
534 if (list_empty(&ep_priv
->ep
->urb_list
))
535 ep_idle(imx21
, ep_priv
);
538 static void nonisoc_urb_completed_for_etd(
539 struct imx21
*imx21
, struct etd_priv
*etd
, int status
)
541 struct usb_host_endpoint
*ep
= etd
->ep
;
543 urb_done(imx21
->hcd
, etd
->urb
, status
);
546 if (!list_empty(&ep
->urb_list
)) {
547 struct urb
*urb
= list_first_entry(
548 &ep
->urb_list
, struct urb
, urb_list
);
550 dev_vdbg(imx21
->dev
, "next URB %p\n", urb
);
551 schedule_nonisoc_etd(imx21
, urb
);
556 /* =========================================== */
557 /* ISOC Handling ... */
558 /* =========================================== */
560 static void schedule_isoc_etds(struct usb_hcd
*hcd
,
561 struct usb_host_endpoint
*ep
)
563 struct imx21
*imx21
= hcd_to_imx21(hcd
);
564 struct ep_priv
*ep_priv
= ep
->hcpriv
;
565 struct etd_priv
*etd
;
566 struct urb_priv
*urb_priv
;
573 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
575 if (list_empty(&ep_priv
->td_list
))
578 etd_num
= ep_priv
->etd
[i
];
582 etd
= &imx21
->etd
[etd_num
];
586 td
= list_entry(ep_priv
->td_list
.next
, struct td
, list
);
588 urb_priv
= td
->urb
->hcpriv
;
590 cur_frame
= imx21_hc_get_frame(hcd
);
591 if (frame_after(cur_frame
, td
->frame
)) {
592 dev_dbg(imx21
->dev
, "isoc too late frame %d > %d\n",
593 cur_frame
, td
->frame
);
594 urb_priv
->isoc_status
= -EXDEV
;
595 td
->urb
->iso_frame_desc
[
596 td
->isoc_index
].actual_length
= 0;
597 td
->urb
->iso_frame_desc
[td
->isoc_index
].status
= -EXDEV
;
598 if (--urb_priv
->isoc_remaining
== 0)
599 urb_done(hcd
, td
->urb
, urb_priv
->isoc_status
);
603 urb_priv
->active
= 1;
608 etd
->dma_handle
= td
->dma_handle
;
609 etd
->cpu_buffer
= td
->cpu_buffer
;
611 debug_isoc_submitted(imx21
, cur_frame
, td
);
613 dir
= usb_pipeout(td
->urb
->pipe
) ? TD_DIR_OUT
: TD_DIR_IN
;
614 setup_etd_dword0(imx21
, etd_num
, td
->urb
, dir
, etd
->dmem_size
);
615 etd_writel(imx21
, etd_num
, 1, etd
->dmem_offset
);
616 etd_writel(imx21
, etd_num
, 2,
617 (TD_NOTACCESSED
<< DW2_COMPCODE
) |
618 ((td
->frame
& 0xFFFF) << DW2_STARTFRM
));
619 etd_writel(imx21
, etd_num
, 3,
620 (TD_NOTACCESSED
<< DW3_COMPCODE0
) |
621 (td
->len
<< DW3_PKTLEN0
));
623 activate_etd(imx21
, etd_num
, dir
);
627 static void isoc_etd_done(struct usb_hcd
*hcd
, int etd_num
)
629 struct imx21
*imx21
= hcd_to_imx21(hcd
);
630 int etd_mask
= 1 << etd_num
;
631 struct etd_priv
*etd
= imx21
->etd
+ etd_num
;
632 struct urb
*urb
= etd
->urb
;
633 struct urb_priv
*urb_priv
= urb
->hcpriv
;
634 struct td
*td
= etd
->td
;
635 struct usb_host_endpoint
*ep
= etd
->ep
;
636 int isoc_index
= td
->isoc_index
;
637 unsigned int pipe
= urb
->pipe
;
638 int dir_in
= usb_pipein(pipe
);
642 disactivate_etd(imx21
, etd_num
);
644 cc
= (etd_readl(imx21
, etd_num
, 3) >> DW3_COMPCODE0
) & 0xf;
645 bytes_xfrd
= etd_readl(imx21
, etd_num
, 3) & 0x3ff;
647 /* Input doesn't always fill the buffer, don't generate an error
650 if (dir_in
&& (cc
== TD_DATAUNDERRUN
))
653 if (cc
== TD_NOTACCESSED
)
656 debug_isoc_completed(imx21
,
657 imx21_hc_get_frame(hcd
), td
, cc
, bytes_xfrd
);
659 urb_priv
->isoc_status
= -EXDEV
;
661 "bad iso cc=0x%X frame=%d sched frame=%d "
662 "cnt=%d len=%d urb=%p etd=%d index=%d\n",
663 cc
, imx21_hc_get_frame(hcd
), td
->frame
,
664 bytes_xfrd
, td
->len
, urb
, etd_num
, isoc_index
);
668 clear_toggle_bit(imx21
, USBH_XFILLSTAT
, etd_mask
);
669 if (!etd
->dma_handle
)
670 memcpy_fromio(etd
->cpu_buffer
,
671 imx21
->regs
+ USBOTG_DMEM
+ etd
->dmem_offset
,
675 urb
->actual_length
+= bytes_xfrd
;
676 urb
->iso_frame_desc
[isoc_index
].actual_length
= bytes_xfrd
;
677 urb
->iso_frame_desc
[isoc_index
].status
= cc_to_error
[cc
];
683 if (--urb_priv
->isoc_remaining
== 0)
684 urb_done(hcd
, urb
, urb_priv
->isoc_status
);
686 schedule_isoc_etds(hcd
, ep
);
689 static struct ep_priv
*alloc_isoc_ep(
690 struct imx21
*imx21
, struct usb_host_endpoint
*ep
)
692 struct ep_priv
*ep_priv
;
695 ep_priv
= kzalloc(sizeof(struct ep_priv
), GFP_ATOMIC
);
699 for (i
= 0; i
< NUM_ISO_ETDS
; i
++)
700 ep_priv
->etd
[i
] = -1;
702 INIT_LIST_HEAD(&ep_priv
->td_list
);
704 ep
->hcpriv
= ep_priv
;
708 static int alloc_isoc_etds(struct imx21
*imx21
, struct ep_priv
*ep_priv
)
713 /* Allocate the ETDs if required */
714 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
715 if (ep_priv
->etd
[i
] < 0) {
716 etd_num
= alloc_etd(imx21
);
718 goto alloc_etd_failed
;
720 ep_priv
->etd
[i
] = etd_num
;
721 imx21
->etd
[etd_num
].ep
= ep_priv
->ep
;
727 dev_err(imx21
->dev
, "isoc: Couldn't allocate etd\n");
728 for (j
= 0; j
< i
; j
++) {
729 free_etd(imx21
, ep_priv
->etd
[j
]);
730 ep_priv
->etd
[j
] = -1;
735 static int imx21_hc_urb_enqueue_isoc(struct usb_hcd
*hcd
,
736 struct usb_host_endpoint
*ep
,
737 struct urb
*urb
, gfp_t mem_flags
)
739 struct imx21
*imx21
= hcd_to_imx21(hcd
);
740 struct urb_priv
*urb_priv
;
742 struct ep_priv
*ep_priv
;
743 struct td
*td
= NULL
;
749 urb_priv
= kzalloc(sizeof(struct urb_priv
), mem_flags
);
750 if (urb_priv
== NULL
)
753 urb_priv
->isoc_td
= kzalloc(
754 sizeof(struct td
) * urb
->number_of_packets
, mem_flags
);
755 if (urb_priv
->isoc_td
== NULL
) {
757 goto alloc_td_failed
;
760 spin_lock_irqsave(&imx21
->lock
, flags
);
762 if (ep
->hcpriv
== NULL
) {
763 ep_priv
= alloc_isoc_ep(imx21
, ep
);
764 if (ep_priv
== NULL
) {
766 goto alloc_ep_failed
;
769 ep_priv
= ep
->hcpriv
;
772 ret
= alloc_isoc_etds(imx21
, ep_priv
);
774 goto alloc_etd_failed
;
776 ret
= usb_hcd_link_urb_to_ep(hcd
, urb
);
780 urb
->status
= -EINPROGRESS
;
781 urb
->actual_length
= 0;
782 urb
->error_count
= 0;
783 urb
->hcpriv
= urb_priv
;
786 /* allocate data memory for largest packets if not already done */
787 maxpacket
= usb_maxpacket(urb
->dev
, urb
->pipe
, usb_pipeout(urb
->pipe
));
788 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
789 struct etd_priv
*etd
= &imx21
->etd
[ep_priv
->etd
[i
]];
791 if (etd
->dmem_size
> 0 && etd
->dmem_size
< maxpacket
) {
792 /* not sure if this can really occur.... */
793 dev_err(imx21
->dev
, "increasing isoc buffer %d->%d\n",
794 etd
->dmem_size
, maxpacket
);
796 goto alloc_dmem_failed
;
799 if (etd
->dmem_size
== 0) {
800 etd
->dmem_offset
= alloc_dmem(imx21
, maxpacket
, ep
);
801 if (etd
->dmem_offset
< 0) {
802 dev_dbg(imx21
->dev
, "failed alloc isoc dmem\n");
804 goto alloc_dmem_failed
;
806 etd
->dmem_size
= maxpacket
;
810 /* calculate frame */
811 cur_frame
= imx21_hc_get_frame(hcd
);
813 if (list_empty(&ep_priv
->td_list
)) {
814 urb
->start_frame
= wrap_frame(cur_frame
+ 5);
816 urb
->start_frame
= wrap_frame(list_entry(ep_priv
->td_list
.prev
,
817 struct td
, list
)->frame
+ urb
->interval
);
819 if (frame_after(cur_frame
, urb
->start_frame
)) {
821 "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
822 urb
->start_frame
, cur_frame
,
823 (urb
->transfer_flags
& URB_ISO_ASAP
) != 0);
824 i
= DIV_ROUND_UP(wrap_frame(
825 cur_frame
- urb
->start_frame
),
828 /* Treat underruns as if URB_ISO_ASAP was set */
829 if ((urb
->transfer_flags
& URB_ISO_ASAP
) ||
830 i
>= urb
->number_of_packets
) {
831 urb
->start_frame
= wrap_frame(urb
->start_frame
832 + i
* urb
->interval
);
838 /* set up transfers */
839 urb_priv
->isoc_remaining
= urb
->number_of_packets
- i
;
840 td
= urb_priv
->isoc_td
;
841 for (; i
< urb
->number_of_packets
; i
++, td
++) {
842 unsigned int offset
= urb
->iso_frame_desc
[i
].offset
;
845 td
->len
= urb
->iso_frame_desc
[i
].length
;
847 td
->frame
= wrap_frame(urb
->start_frame
+ urb
->interval
* i
);
848 td
->dma_handle
= urb
->transfer_dma
+ offset
;
849 td
->cpu_buffer
= urb
->transfer_buffer
+ offset
;
850 list_add_tail(&td
->list
, &ep_priv
->td_list
);
853 dev_vdbg(imx21
->dev
, "setup %d packets for iso frame %d->%d\n",
854 urb
->number_of_packets
, urb
->start_frame
, td
->frame
);
856 debug_urb_submitted(imx21
, urb
);
857 schedule_isoc_etds(hcd
, ep
);
859 spin_unlock_irqrestore(&imx21
->lock
, flags
);
863 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
868 spin_unlock_irqrestore(&imx21
->lock
, flags
);
869 kfree(urb_priv
->isoc_td
);
876 static void dequeue_isoc_urb(struct imx21
*imx21
,
877 struct urb
*urb
, struct ep_priv
*ep_priv
)
879 struct urb_priv
*urb_priv
= urb
->hcpriv
;
883 if (urb_priv
->active
) {
884 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
885 int etd_num
= ep_priv
->etd
[i
];
886 if (etd_num
!= -1 && imx21
->etd
[etd_num
].urb
== urb
) {
887 struct etd_priv
*etd
= imx21
->etd
+ etd_num
;
889 reset_etd(imx21
, etd_num
);
890 free_dmem(imx21
, etd
);
895 list_for_each_entry_safe(td
, tmp
, &ep_priv
->td_list
, list
) {
896 if (td
->urb
== urb
) {
897 dev_vdbg(imx21
->dev
, "removing td %p\n", td
);
903 /* =========================================== */
904 /* NON ISOC Handling ... */
905 /* =========================================== */
907 static void schedule_nonisoc_etd(struct imx21
*imx21
, struct urb
*urb
)
909 unsigned int pipe
= urb
->pipe
;
910 struct urb_priv
*urb_priv
= urb
->hcpriv
;
911 struct ep_priv
*ep_priv
= urb_priv
->ep
->hcpriv
;
912 int state
= urb_priv
->state
;
913 int etd_num
= ep_priv
->etd
[0];
914 struct etd_priv
*etd
;
925 dev_err(imx21
->dev
, "No valid ETD\n");
928 if (readl(imx21
->regs
+ USBH_ETDENSET
) & (1 << etd_num
))
929 dev_err(imx21
->dev
, "submitting to active ETD %d\n", etd_num
);
931 etd
= &imx21
->etd
[etd_num
];
932 maxpacket
= usb_maxpacket(urb
->dev
, pipe
, usb_pipeout(pipe
));
936 if (usb_pipecontrol(pipe
) && (state
!= US_CTRL_DATA
)) {
937 if (state
== US_CTRL_SETUP
) {
939 if (unsuitable_for_dma(urb
->setup_dma
))
940 usb_hcd_unmap_urb_setup_for_dma(imx21
->hcd
,
942 etd
->dma_handle
= urb
->setup_dma
;
943 etd
->cpu_buffer
= urb
->setup_packet
;
946 datatoggle
= TD_TOGGLE_DATA0
;
947 } else { /* US_CTRL_ACK */
948 dir
= usb_pipeout(pipe
) ? TD_DIR_IN
: TD_DIR_OUT
;
951 datatoggle
= TD_TOGGLE_DATA1
;
954 dir
= usb_pipeout(pipe
) ? TD_DIR_OUT
: TD_DIR_IN
;
955 bufround
= (dir
== TD_DIR_IN
) ? 1 : 0;
956 if (unsuitable_for_dma(urb
->transfer_dma
))
957 usb_hcd_unmap_urb_for_dma(imx21
->hcd
, urb
);
959 etd
->dma_handle
= urb
->transfer_dma
;
960 etd
->cpu_buffer
= urb
->transfer_buffer
;
961 if (usb_pipebulk(pipe
) && (state
== US_BULK0
))
964 count
= urb
->transfer_buffer_length
;
966 if (usb_pipecontrol(pipe
)) {
967 datatoggle
= TD_TOGGLE_DATA1
;
971 usb_pipeendpoint(urb
->pipe
),
972 usb_pipeout(urb
->pipe
)))
973 datatoggle
= TD_TOGGLE_DATA1
;
975 datatoggle
= TD_TOGGLE_DATA0
;
980 etd
->ep
= urb_priv
->ep
;
983 if (usb_pipeint(pipe
)) {
984 interval
= urb
->interval
;
985 relpolpos
= (readl(imx21
->regs
+ USBH_FRMNUB
) + 1) & 0xff;
988 /* Write ETD to device memory */
989 setup_etd_dword0(imx21
, etd_num
, urb
, dir
, maxpacket
);
991 etd_writel(imx21
, etd_num
, 2,
992 (u32
) interval
<< DW2_POLINTERV
|
993 ((u32
) relpolpos
<< DW2_RELPOLPOS
) |
994 ((u32
) dir
<< DW2_DIRPID
) |
995 ((u32
) bufround
<< DW2_BUFROUND
) |
996 ((u32
) datatoggle
<< DW2_DATATOG
) |
997 ((u32
) TD_NOTACCESSED
<< DW2_COMPCODE
));
999 /* DMA will always transfer buffer size even if TOBYCNT in DWORD3
1000 is smaller. Make sure we don't overrun the buffer!
1002 if (count
&& count
< maxpacket
)
1003 etd_buf_size
= count
;
1005 etd_buf_size
= maxpacket
;
1007 etd_writel(imx21
, etd_num
, 3,
1008 ((u32
) (etd_buf_size
- 1) << DW3_BUFSIZE
) | (u32
) count
);
1011 etd
->dma_handle
= 0;
1013 /* allocate x and y buffer space at once */
1014 etd
->dmem_size
= (count
> maxpacket
) ? maxpacket
* 2 : maxpacket
;
1015 etd
->dmem_offset
= alloc_dmem(imx21
, etd
->dmem_size
, urb_priv
->ep
);
1016 if (etd
->dmem_offset
< 0) {
1017 /* Setup everything we can in HW and update when we get DMEM */
1018 etd_writel(imx21
, etd_num
, 1, (u32
)maxpacket
<< 16);
1020 dev_dbg(imx21
->dev
, "Queuing etd %d for DMEM\n", etd_num
);
1021 debug_urb_queued_for_dmem(imx21
, urb
);
1022 list_add_tail(&etd
->queue
, &imx21
->queue_for_dmem
);
1026 etd_writel(imx21
, etd_num
, 1,
1027 (((u32
) etd
->dmem_offset
+ (u32
) maxpacket
) << DW1_YBUFSRTAD
) |
1028 (u32
) etd
->dmem_offset
);
1030 urb_priv
->active
= 1;
1032 /* enable the ETD to kick off transfer */
1033 dev_vdbg(imx21
->dev
, "Activating etd %d for %d bytes %s\n",
1034 etd_num
, count
, dir
!= TD_DIR_IN
? "out" : "in");
1035 activate_etd(imx21
, etd_num
, dir
);
1039 static void nonisoc_etd_done(struct usb_hcd
*hcd
, int etd_num
)
1041 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1042 struct etd_priv
*etd
= &imx21
->etd
[etd_num
];
1043 struct urb
*urb
= etd
->urb
;
1044 u32 etd_mask
= 1 << etd_num
;
1045 struct urb_priv
*urb_priv
= urb
->hcpriv
;
1051 disactivate_etd(imx21
, etd_num
);
1053 dir
= (etd_readl(imx21
, etd_num
, 0) >> DW0_DIRECT
) & 0x3;
1054 cc
= (etd_readl(imx21
, etd_num
, 2) >> DW2_COMPCODE
) & 0xf;
1055 bytes_xfrd
= etd
->len
- (etd_readl(imx21
, etd_num
, 3) & 0x1fffff);
1057 /* save toggle carry */
1058 usb_settoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
1059 usb_pipeout(urb
->pipe
),
1060 (etd_readl(imx21
, etd_num
, 0) >> DW0_TOGCRY
) & 0x1);
1062 if (dir
== TD_DIR_IN
) {
1063 clear_toggle_bit(imx21
, USBH_XFILLSTAT
, etd_mask
);
1064 clear_toggle_bit(imx21
, USBH_YFILLSTAT
, etd_mask
);
1066 if (etd
->bounce_buffer
) {
1067 memcpy(etd
->cpu_buffer
, etd
->bounce_buffer
, bytes_xfrd
);
1068 dma_unmap_single(imx21
->dev
,
1069 etd
->dma_handle
, etd
->len
, DMA_FROM_DEVICE
);
1070 } else if (!etd
->dma_handle
&& bytes_xfrd
) {/* PIO */
1071 memcpy_fromio(etd
->cpu_buffer
,
1072 imx21
->regs
+ USBOTG_DMEM
+ etd
->dmem_offset
,
1077 kfree(etd
->bounce_buffer
);
1078 etd
->bounce_buffer
= NULL
;
1079 free_dmem(imx21
, etd
);
1081 urb
->error_count
= 0;
1082 if (!(urb
->transfer_flags
& URB_SHORT_NOT_OK
)
1083 && (cc
== TD_DATAUNDERRUN
))
1087 dev_vdbg(imx21
->dev
, "cc is 0x%x\n", cc
);
1089 etd_done
= (cc_to_error
[cc
] != 0); /* stop if error */
1091 switch (usb_pipetype(urb
->pipe
)) {
1093 switch (urb_priv
->state
) {
1095 if (urb
->transfer_buffer_length
> 0)
1096 urb_priv
->state
= US_CTRL_DATA
;
1098 urb_priv
->state
= US_CTRL_ACK
;
1101 urb
->actual_length
+= bytes_xfrd
;
1102 urb_priv
->state
= US_CTRL_ACK
;
1109 "Invalid pipe state %d\n", urb_priv
->state
);
1116 urb
->actual_length
+= bytes_xfrd
;
1117 if ((urb_priv
->state
== US_BULK
)
1118 && (urb
->transfer_flags
& URB_ZERO_PACKET
)
1119 && urb
->transfer_buffer_length
> 0
1120 && ((urb
->transfer_buffer_length
%
1121 usb_maxpacket(urb
->dev
, urb
->pipe
,
1122 usb_pipeout(urb
->pipe
))) == 0)) {
1123 /* need a 0-packet */
1124 urb_priv
->state
= US_BULK0
;
1130 case PIPE_INTERRUPT
:
1131 urb
->actual_length
+= bytes_xfrd
;
1137 nonisoc_urb_completed_for_etd(imx21
, etd
, cc_to_error
[cc
]);
1139 dev_vdbg(imx21
->dev
, "next state=%d\n", urb_priv
->state
);
1140 schedule_nonisoc_etd(imx21
, urb
);
1145 static struct ep_priv
*alloc_ep(void)
1148 struct ep_priv
*ep_priv
;
1150 ep_priv
= kzalloc(sizeof(struct ep_priv
), GFP_ATOMIC
);
1154 for (i
= 0; i
< NUM_ISO_ETDS
; ++i
)
1155 ep_priv
->etd
[i
] = -1;
1160 static int imx21_hc_urb_enqueue(struct usb_hcd
*hcd
,
1161 struct urb
*urb
, gfp_t mem_flags
)
1163 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1164 struct usb_host_endpoint
*ep
= urb
->ep
;
1165 struct urb_priv
*urb_priv
;
1166 struct ep_priv
*ep_priv
;
1167 struct etd_priv
*etd
;
1169 unsigned long flags
;
1171 dev_vdbg(imx21
->dev
,
1172 "enqueue urb=%p ep=%p len=%d "
1173 "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n",
1175 urb
->transfer_buffer_length
,
1176 urb
->transfer_buffer
, urb
->transfer_dma
,
1177 urb
->setup_packet
, urb
->setup_dma
);
1179 if (usb_pipeisoc(urb
->pipe
))
1180 return imx21_hc_urb_enqueue_isoc(hcd
, ep
, urb
, mem_flags
);
1182 urb_priv
= kzalloc(sizeof(struct urb_priv
), mem_flags
);
1186 spin_lock_irqsave(&imx21
->lock
, flags
);
1188 ep_priv
= ep
->hcpriv
;
1189 if (ep_priv
== NULL
) {
1190 ep_priv
= alloc_ep();
1193 goto failed_alloc_ep
;
1195 ep
->hcpriv
= ep_priv
;
1199 ret
= usb_hcd_link_urb_to_ep(hcd
, urb
);
1203 urb
->status
= -EINPROGRESS
;
1204 urb
->actual_length
= 0;
1205 urb
->error_count
= 0;
1206 urb
->hcpriv
= urb_priv
;
1209 switch (usb_pipetype(urb
->pipe
)) {
1211 urb_priv
->state
= US_CTRL_SETUP
;
1214 urb_priv
->state
= US_BULK
;
1218 debug_urb_submitted(imx21
, urb
);
1219 if (ep_priv
->etd
[0] < 0) {
1220 if (ep_priv
->waiting_etd
) {
1222 "no ETD available already queued %p\n",
1224 debug_urb_queued_for_etd(imx21
, urb
);
1227 ep_priv
->etd
[0] = alloc_etd(imx21
);
1228 if (ep_priv
->etd
[0] < 0) {
1230 "no ETD available queueing %p\n", ep_priv
);
1231 debug_urb_queued_for_etd(imx21
, urb
);
1232 list_add_tail(&ep_priv
->queue
, &imx21
->queue_for_etd
);
1233 ep_priv
->waiting_etd
= 1;
1238 /* Schedule if no URB already active for this endpoint */
1239 etd
= &imx21
->etd
[ep_priv
->etd
[0]];
1240 if (etd
->urb
== NULL
) {
1241 DEBUG_LOG_FRAME(imx21
, etd
, last_req
);
1242 schedule_nonisoc_etd(imx21
, urb
);
1246 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1251 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1256 static int imx21_hc_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
,
1259 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1260 unsigned long flags
;
1261 struct usb_host_endpoint
*ep
;
1262 struct ep_priv
*ep_priv
;
1263 struct urb_priv
*urb_priv
= urb
->hcpriv
;
1266 dev_vdbg(imx21
->dev
, "dequeue urb=%p iso=%d status=%d\n",
1267 urb
, usb_pipeisoc(urb
->pipe
), status
);
1269 spin_lock_irqsave(&imx21
->lock
, flags
);
1271 ret
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
1275 ep_priv
= ep
->hcpriv
;
1277 debug_urb_unlinked(imx21
, urb
);
1279 if (usb_pipeisoc(urb
->pipe
)) {
1280 dequeue_isoc_urb(imx21
, urb
, ep_priv
);
1281 schedule_isoc_etds(hcd
, ep
);
1282 } else if (urb_priv
->active
) {
1283 int etd_num
= ep_priv
->etd
[0];
1284 if (etd_num
!= -1) {
1285 struct etd_priv
*etd
= &imx21
->etd
[etd_num
];
1287 disactivate_etd(imx21
, etd_num
);
1288 free_dmem(imx21
, etd
);
1290 kfree(etd
->bounce_buffer
);
1291 etd
->bounce_buffer
= NULL
;
1295 urb_done(hcd
, urb
, status
);
1297 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1301 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1305 /* =========================================== */
1306 /* Interrupt dispatch */
1307 /* =========================================== */
1309 static void process_etds(struct usb_hcd
*hcd
, struct imx21
*imx21
, int sof
)
1312 int enable_sof_int
= 0;
1313 unsigned long flags
;
1315 spin_lock_irqsave(&imx21
->lock
, flags
);
1317 for (etd_num
= 0; etd_num
< USB_NUM_ETD
; etd_num
++) {
1318 u32 etd_mask
= 1 << etd_num
;
1319 u32 enabled
= readl(imx21
->regs
+ USBH_ETDENSET
) & etd_mask
;
1320 u32 done
= readl(imx21
->regs
+ USBH_ETDDONESTAT
) & etd_mask
;
1321 struct etd_priv
*etd
= &imx21
->etd
[etd_num
];
1325 DEBUG_LOG_FRAME(imx21
, etd
, last_int
);
1330 * When multiple transfers are using the bus we sometimes get into a state
1331 * where the transfer has completed (the CC field of the ETD is != 0x0F),
1332 * the ETD has self disabled but the ETDDONESTAT flag is not set
1333 * (and hence no interrupt occurs).
1334 * This causes the transfer in question to hang.
1335 * The kludge below checks for this condition at each SOF and processes any
1336 * blocked ETDs (after an arbitrary 10 frame wait)
1338 * With a single active transfer the usbtest test suite will run for days
1339 * without the kludge.
1340 * With other bus activity (eg mass storage) even just test1 will hang without
1346 if (etd
->active_count
&& !enabled
) /* suspicious... */
1349 if (!sof
|| enabled
|| !etd
->active_count
)
1352 cc
= etd_readl(imx21
, etd_num
, 2) >> DW2_COMPCODE
;
1353 if (cc
== TD_NOTACCESSED
)
1356 if (++etd
->active_count
< 10)
1359 dword0
= etd_readl(imx21
, etd_num
, 0);
1361 "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
1362 etd_num
, dword0
& 0x7F,
1363 (dword0
>> DW0_ENDPNT
) & 0x0F,
1368 "frame: act=%d disact=%d"
1369 " int=%d req=%d cur=%d\n",
1370 etd
->activated_frame
,
1371 etd
->disactivated_frame
,
1372 etd
->last_int_frame
,
1373 etd
->last_req_frame
,
1374 readl(imx21
->regs
+ USBH_FRMNUB
));
1375 imx21
->debug_unblocks
++;
1377 etd
->active_count
= 0;
1381 if (etd
->ep
== NULL
|| etd
->urb
== NULL
) {
1383 "Interrupt for unexpected etd %d"
1385 etd_num
, etd
->ep
, etd
->urb
);
1386 disactivate_etd(imx21
, etd_num
);
1390 if (usb_pipeisoc(etd
->urb
->pipe
))
1391 isoc_etd_done(hcd
, etd_num
);
1393 nonisoc_etd_done(hcd
, etd_num
);
1396 /* only enable SOF interrupt if it may be needed for the kludge */
1398 set_register_bits(imx21
, USBH_SYSIEN
, USBH_SYSIEN_SOFINT
);
1400 clear_register_bits(imx21
, USBH_SYSIEN
, USBH_SYSIEN_SOFINT
);
1403 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1406 static irqreturn_t
imx21_irq(struct usb_hcd
*hcd
)
1408 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1409 u32 ints
= readl(imx21
->regs
+ USBH_SYSISR
);
1411 if (ints
& USBH_SYSIEN_HERRINT
)
1412 dev_dbg(imx21
->dev
, "Scheduling error\n");
1414 if (ints
& USBH_SYSIEN_SORINT
)
1415 dev_dbg(imx21
->dev
, "Scheduling overrun\n");
1417 if (ints
& (USBH_SYSISR_DONEINT
| USBH_SYSISR_SOFINT
))
1418 process_etds(hcd
, imx21
, ints
& USBH_SYSISR_SOFINT
);
1420 writel(ints
, imx21
->regs
+ USBH_SYSISR
);
1424 static void imx21_hc_endpoint_disable(struct usb_hcd
*hcd
,
1425 struct usb_host_endpoint
*ep
)
1427 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1428 unsigned long flags
;
1429 struct ep_priv
*ep_priv
;
1435 spin_lock_irqsave(&imx21
->lock
, flags
);
1436 ep_priv
= ep
->hcpriv
;
1437 dev_vdbg(imx21
->dev
, "disable ep=%p, ep->hcpriv=%p\n", ep
, ep_priv
);
1439 if (!list_empty(&ep
->urb_list
))
1440 dev_dbg(imx21
->dev
, "ep's URB list is not empty\n");
1442 if (ep_priv
!= NULL
) {
1443 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
1444 if (ep_priv
->etd
[i
] > -1)
1445 dev_dbg(imx21
->dev
, "free etd %d for disable\n",
1448 free_etd(imx21
, ep_priv
->etd
[i
]);
1454 for (i
= 0; i
< USB_NUM_ETD
; i
++) {
1455 if (imx21
->etd
[i
].alloc
&& imx21
->etd
[i
].ep
== ep
) {
1457 "Active etd %d for disabled ep=%p!\n", i
, ep
);
1461 free_epdmem(imx21
, ep
);
1462 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1465 /* =========================================== */
1467 /* =========================================== */
1469 static int get_hub_descriptor(struct usb_hcd
*hcd
,
1470 struct usb_hub_descriptor
*desc
)
1472 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1473 desc
->bDescriptorType
= 0x29; /* HUB descriptor */
1474 desc
->bHubContrCurrent
= 0;
1476 desc
->bNbrPorts
= readl(imx21
->regs
+ USBH_ROOTHUBA
)
1477 & USBH_ROOTHUBA_NDNSTMPRT_MASK
;
1478 desc
->bDescLength
= 9;
1479 desc
->bPwrOn2PwrGood
= 0;
1480 desc
->wHubCharacteristics
= (__force __u16
) cpu_to_le16(
1481 0x0002 | /* No power switching */
1482 0x0010 | /* No over current protection */
1485 desc
->u
.hs
.DeviceRemovable
[0] = 1 << 1;
1486 desc
->u
.hs
.DeviceRemovable
[1] = ~0;
1490 static int imx21_hc_hub_status_data(struct usb_hcd
*hcd
, char *buf
)
1492 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1496 unsigned long flags
;
1498 spin_lock_irqsave(&imx21
->lock
, flags
);
1499 ports
= readl(imx21
->regs
+ USBH_ROOTHUBA
)
1500 & USBH_ROOTHUBA_NDNSTMPRT_MASK
;
1503 dev_err(imx21
->dev
, "ports %d > 7\n", ports
);
1505 for (i
= 0; i
< ports
; i
++) {
1506 if (readl(imx21
->regs
+ USBH_PORTSTAT(i
)) &
1507 (USBH_PORTSTAT_CONNECTSC
|
1508 USBH_PORTSTAT_PRTENBLSC
|
1509 USBH_PORTSTAT_PRTSTATSC
|
1510 USBH_PORTSTAT_OVRCURIC
|
1511 USBH_PORTSTAT_PRTRSTSC
)) {
1514 buf
[0] |= 1 << (i
+ 1);
1517 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1520 dev_info(imx21
->dev
, "Hub status changed\n");
1524 static int imx21_hc_hub_control(struct usb_hcd
*hcd
,
1526 u16 wValue
, u16 wIndex
, char *buf
, u16 wLength
)
1528 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1530 u32 status_write
= 0;
1533 case ClearHubFeature
:
1534 dev_dbg(imx21
->dev
, "ClearHubFeature\n");
1536 case C_HUB_OVER_CURRENT
:
1537 dev_dbg(imx21
->dev
, " OVER_CURRENT\n");
1539 case C_HUB_LOCAL_POWER
:
1540 dev_dbg(imx21
->dev
, " LOCAL_POWER\n");
1543 dev_dbg(imx21
->dev
, " unknown\n");
1549 case ClearPortFeature
:
1550 dev_dbg(imx21
->dev
, "ClearPortFeature\n");
1552 case USB_PORT_FEAT_ENABLE
:
1553 dev_dbg(imx21
->dev
, " ENABLE\n");
1554 status_write
= USBH_PORTSTAT_CURCONST
;
1556 case USB_PORT_FEAT_SUSPEND
:
1557 dev_dbg(imx21
->dev
, " SUSPEND\n");
1558 status_write
= USBH_PORTSTAT_PRTOVRCURI
;
1560 case USB_PORT_FEAT_POWER
:
1561 dev_dbg(imx21
->dev
, " POWER\n");
1562 status_write
= USBH_PORTSTAT_LSDEVCON
;
1564 case USB_PORT_FEAT_C_ENABLE
:
1565 dev_dbg(imx21
->dev
, " C_ENABLE\n");
1566 status_write
= USBH_PORTSTAT_PRTENBLSC
;
1568 case USB_PORT_FEAT_C_SUSPEND
:
1569 dev_dbg(imx21
->dev
, " C_SUSPEND\n");
1570 status_write
= USBH_PORTSTAT_PRTSTATSC
;
1572 case USB_PORT_FEAT_C_CONNECTION
:
1573 dev_dbg(imx21
->dev
, " C_CONNECTION\n");
1574 status_write
= USBH_PORTSTAT_CONNECTSC
;
1576 case USB_PORT_FEAT_C_OVER_CURRENT
:
1577 dev_dbg(imx21
->dev
, " C_OVER_CURRENT\n");
1578 status_write
= USBH_PORTSTAT_OVRCURIC
;
1580 case USB_PORT_FEAT_C_RESET
:
1581 dev_dbg(imx21
->dev
, " C_RESET\n");
1582 status_write
= USBH_PORTSTAT_PRTRSTSC
;
1585 dev_dbg(imx21
->dev
, " unknown\n");
1592 case GetHubDescriptor
:
1593 dev_dbg(imx21
->dev
, "GetHubDescriptor\n");
1594 rc
= get_hub_descriptor(hcd
, (void *)buf
);
1598 dev_dbg(imx21
->dev
, " GetHubStatus\n");
1599 *(__le32
*) buf
= 0;
1603 dev_dbg(imx21
->dev
, "GetPortStatus: port: %d, 0x%x\n",
1604 wIndex
, USBH_PORTSTAT(wIndex
- 1));
1605 *(__le32
*) buf
= readl(imx21
->regs
+
1606 USBH_PORTSTAT(wIndex
- 1));
1610 dev_dbg(imx21
->dev
, "SetHubFeature\n");
1612 case C_HUB_OVER_CURRENT
:
1613 dev_dbg(imx21
->dev
, " OVER_CURRENT\n");
1616 case C_HUB_LOCAL_POWER
:
1617 dev_dbg(imx21
->dev
, " LOCAL_POWER\n");
1620 dev_dbg(imx21
->dev
, " unknown\n");
1627 case SetPortFeature
:
1628 dev_dbg(imx21
->dev
, "SetPortFeature\n");
1630 case USB_PORT_FEAT_SUSPEND
:
1631 dev_dbg(imx21
->dev
, " SUSPEND\n");
1632 status_write
= USBH_PORTSTAT_PRTSUSPST
;
1634 case USB_PORT_FEAT_POWER
:
1635 dev_dbg(imx21
->dev
, " POWER\n");
1636 status_write
= USBH_PORTSTAT_PRTPWRST
;
1638 case USB_PORT_FEAT_RESET
:
1639 dev_dbg(imx21
->dev
, " RESET\n");
1640 status_write
= USBH_PORTSTAT_PRTRSTST
;
1643 dev_dbg(imx21
->dev
, " unknown\n");
1650 dev_dbg(imx21
->dev
, " unknown\n");
1656 writel(status_write
, imx21
->regs
+ USBH_PORTSTAT(wIndex
- 1));
1660 /* =========================================== */
1661 /* Host controller management */
1662 /* =========================================== */
1664 static int imx21_hc_reset(struct usb_hcd
*hcd
)
1666 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1667 unsigned long timeout
;
1668 unsigned long flags
;
1670 spin_lock_irqsave(&imx21
->lock
, flags
);
1672 /* Reset the Host controller modules */
1673 writel(USBOTG_RST_RSTCTRL
| USBOTG_RST_RSTRH
|
1674 USBOTG_RST_RSTHSIE
| USBOTG_RST_RSTHC
,
1675 imx21
->regs
+ USBOTG_RST_CTRL
);
1677 /* Wait for reset to finish */
1678 timeout
= jiffies
+ HZ
;
1679 while (readl(imx21
->regs
+ USBOTG_RST_CTRL
) != 0) {
1680 if (time_after(jiffies
, timeout
)) {
1681 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1682 dev_err(imx21
->dev
, "timeout waiting for reset\n");
1685 spin_unlock_irq(&imx21
->lock
);
1686 schedule_timeout_uninterruptible(1);
1687 spin_lock_irq(&imx21
->lock
);
1689 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1693 static int imx21_hc_start(struct usb_hcd
*hcd
)
1695 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1696 unsigned long flags
;
1698 u32 hw_mode
= USBOTG_HWMODE_CRECFG_HOST
;
1699 u32 usb_control
= 0;
1701 hw_mode
|= ((imx21
->pdata
->host_xcvr
<< USBOTG_HWMODE_HOSTXCVR_SHIFT
) &
1702 USBOTG_HWMODE_HOSTXCVR_MASK
);
1703 hw_mode
|= ((imx21
->pdata
->otg_xcvr
<< USBOTG_HWMODE_OTGXCVR_SHIFT
) &
1704 USBOTG_HWMODE_OTGXCVR_MASK
);
1706 if (imx21
->pdata
->host1_txenoe
)
1707 usb_control
|= USBCTRL_HOST1_TXEN_OE
;
1709 if (!imx21
->pdata
->host1_xcverless
)
1710 usb_control
|= USBCTRL_HOST1_BYP_TLL
;
1712 if (imx21
->pdata
->otg_ext_xcvr
)
1713 usb_control
|= USBCTRL_OTC_RCV_RXDP
;
1716 spin_lock_irqsave(&imx21
->lock
, flags
);
1718 writel((USBOTG_CLK_CTRL_HST
| USBOTG_CLK_CTRL_MAIN
),
1719 imx21
->regs
+ USBOTG_CLK_CTRL
);
1720 writel(hw_mode
, imx21
->regs
+ USBOTG_HWMODE
);
1721 writel(usb_control
, imx21
->regs
+ USBCTRL
);
1722 writel(USB_MISCCONTROL_SKPRTRY
| USB_MISCCONTROL_ARBMODE
,
1723 imx21
->regs
+ USB_MISCCONTROL
);
1725 /* Clear the ETDs */
1726 for (i
= 0; i
< USB_NUM_ETD
; i
++)
1727 for (j
= 0; j
< 4; j
++)
1728 etd_writel(imx21
, i
, j
, 0);
1730 /* Take the HC out of reset */
1731 writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL
| USBH_HOST_CTRL_CTLBLKSR_1
,
1732 imx21
->regs
+ USBH_HOST_CTRL
);
1735 if (imx21
->pdata
->enable_otg_host
)
1736 writel(USBH_PORTSTAT_PRTPWRST
| USBH_PORTSTAT_PRTENABST
,
1737 imx21
->regs
+ USBH_PORTSTAT(0));
1739 if (imx21
->pdata
->enable_host1
)
1740 writel(USBH_PORTSTAT_PRTPWRST
| USBH_PORTSTAT_PRTENABST
,
1741 imx21
->regs
+ USBH_PORTSTAT(1));
1743 if (imx21
->pdata
->enable_host2
)
1744 writel(USBH_PORTSTAT_PRTPWRST
| USBH_PORTSTAT_PRTENABST
,
1745 imx21
->regs
+ USBH_PORTSTAT(2));
1748 hcd
->state
= HC_STATE_RUNNING
;
1750 /* Enable host controller interrupts */
1751 set_register_bits(imx21
, USBH_SYSIEN
,
1752 USBH_SYSIEN_HERRINT
|
1753 USBH_SYSIEN_DONEINT
| USBH_SYSIEN_SORINT
);
1754 set_register_bits(imx21
, USBOTG_CINT_STEN
, USBOTG_HCINT
);
1756 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1761 static void imx21_hc_stop(struct usb_hcd
*hcd
)
1763 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1764 unsigned long flags
;
1766 spin_lock_irqsave(&imx21
->lock
, flags
);
1768 writel(0, imx21
->regs
+ USBH_SYSIEN
);
1769 clear_register_bits(imx21
, USBOTG_CINT_STEN
, USBOTG_HCINT
);
1770 clear_register_bits(imx21
, USBOTG_CLK_CTRL_HST
| USBOTG_CLK_CTRL_MAIN
,
1772 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1775 /* =========================================== */
1777 /* =========================================== */
1779 static struct hc_driver imx21_hc_driver
= {
1780 .description
= hcd_name
,
1781 .product_desc
= "IMX21 USB Host Controller",
1782 .hcd_priv_size
= sizeof(struct imx21
),
1787 .reset
= imx21_hc_reset
,
1788 .start
= imx21_hc_start
,
1789 .stop
= imx21_hc_stop
,
1792 .urb_enqueue
= imx21_hc_urb_enqueue
,
1793 .urb_dequeue
= imx21_hc_urb_dequeue
,
1794 .endpoint_disable
= imx21_hc_endpoint_disable
,
1796 /* scheduling support */
1797 .get_frame_number
= imx21_hc_get_frame
,
1799 /* Root hub support */
1800 .hub_status_data
= imx21_hc_hub_status_data
,
1801 .hub_control
= imx21_hc_hub_control
,
1805 static struct mx21_usbh_platform_data default_pdata
= {
1806 .host_xcvr
= MX21_USBXCVR_TXDIF_RXDIF
,
1807 .otg_xcvr
= MX21_USBXCVR_TXDIF_RXDIF
,
1810 .enable_otg_host
= 1,
1814 static int imx21_remove(struct platform_device
*pdev
)
1816 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
1817 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1818 struct resource
*res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1820 remove_debug_files(imx21
);
1821 usb_remove_hcd(hcd
);
1824 clk_disable_unprepare(imx21
->clk
);
1825 clk_put(imx21
->clk
);
1826 iounmap(imx21
->regs
);
1827 release_mem_region(res
->start
, resource_size(res
));
1835 static int imx21_probe(struct platform_device
*pdev
)
1837 struct usb_hcd
*hcd
;
1838 struct imx21
*imx21
;
1839 struct resource
*res
;
1843 printk(KERN_INFO
"%s\n", imx21_hc_driver
.product_desc
);
1845 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1848 irq
= platform_get_irq(pdev
, 0);
1852 hcd
= usb_create_hcd(&imx21_hc_driver
,
1853 &pdev
->dev
, dev_name(&pdev
->dev
));
1855 dev_err(&pdev
->dev
, "Cannot create hcd (%s)\n",
1856 dev_name(&pdev
->dev
));
1860 imx21
= hcd_to_imx21(hcd
);
1862 imx21
->dev
= &pdev
->dev
;
1863 imx21
->pdata
= dev_get_platdata(&pdev
->dev
);
1865 imx21
->pdata
= &default_pdata
;
1867 spin_lock_init(&imx21
->lock
);
1868 INIT_LIST_HEAD(&imx21
->dmem_list
);
1869 INIT_LIST_HEAD(&imx21
->queue_for_etd
);
1870 INIT_LIST_HEAD(&imx21
->queue_for_dmem
);
1871 create_debug_files(imx21
);
1873 res
= request_mem_region(res
->start
, resource_size(res
), hcd_name
);
1876 goto failed_request_mem
;
1879 imx21
->regs
= ioremap(res
->start
, resource_size(res
));
1880 if (imx21
->regs
== NULL
) {
1881 dev_err(imx21
->dev
, "Cannot map registers\n");
1883 goto failed_ioremap
;
1886 /* Enable clocks source */
1887 imx21
->clk
= clk_get(imx21
->dev
, NULL
);
1888 if (IS_ERR(imx21
->clk
)) {
1889 dev_err(imx21
->dev
, "no clock found\n");
1890 ret
= PTR_ERR(imx21
->clk
);
1891 goto failed_clock_get
;
1894 ret
= clk_set_rate(imx21
->clk
, clk_round_rate(imx21
->clk
, 48000000));
1896 goto failed_clock_set
;
1897 ret
= clk_prepare_enable(imx21
->clk
);
1899 goto failed_clock_enable
;
1901 dev_info(imx21
->dev
, "Hardware HC revision: 0x%02X\n",
1902 (readl(imx21
->regs
+ USBOTG_HWMODE
) >> 16) & 0xFF);
1904 ret
= usb_add_hcd(hcd
, irq
, 0);
1906 dev_err(imx21
->dev
, "usb_add_hcd() returned %d\n", ret
);
1907 goto failed_add_hcd
;
1913 clk_disable_unprepare(imx21
->clk
);
1914 failed_clock_enable
:
1916 clk_put(imx21
->clk
);
1918 iounmap(imx21
->regs
);
1920 release_mem_region(res
->start
, resource_size(res
));
1922 remove_debug_files(imx21
);
1927 static struct platform_driver imx21_hcd_driver
= {
1929 .name
= (char *)hcd_name
,
1931 .probe
= imx21_probe
,
1932 .remove
= imx21_remove
,
1937 module_platform_driver(imx21_hcd_driver
);
1939 MODULE_DESCRIPTION("i.MX21 USB Host controller");
1940 MODULE_AUTHOR("Martin Fuzzey");
1941 MODULE_LICENSE("GPL");
1942 MODULE_ALIAS("platform:imx21-hcd");