2 * USB Host Controller Driver for IMX21
4 * Copyright (C) 2006 Loping Dog Embedded Systems
5 * Copyright (C) 2009 Martin Fuzzey
6 * Originally written by Jay Monkman <jtm@lopingdog.com>
7 * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 * The i.MX21 USB hardware contains
27 * * 32 transfer descriptors (called ETDs)
28 * * 4Kb of Data memory
30 * The data memory is shared between the host and fuction controlers
31 * (but this driver only supports the host controler)
33 * So setting up a transfer involves:
35 * * Fill in ETD with appropriate information
36 * * Allocating data memory (and putting the offset in the ETD)
38 * * Get interrupt when done.
40 * An ETD is assigned to each active endpoint.
42 * Low resource (ETD and Data memory) situations are handled differently for
43 * isochronous and non insosynchronous transactions :
45 * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
47 * ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
48 * They allocate both ETDs and Data memory during URB submission
49 * (and fail if unavailable).
52 #include <linux/clk.h>
54 #include <linux/kernel.h>
55 #include <linux/list.h>
56 #include <linux/platform_device.h>
57 #include <linux/slab.h>
58 #include <linux/usb.h>
60 #include "../core/hcd.h"
61 #include "imx21-hcd.h"
64 #define DEBUG_LOG_FRAME(imx21, etd, event) \
65 (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
67 #define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
70 static const char hcd_name
[] = "imx21-hcd";
72 static inline struct imx21
*hcd_to_imx21(struct usb_hcd
*hcd
)
74 return (struct imx21
*)hcd
->hcd_priv
;
78 /* =========================================== */
79 /* Hardware access helpers */
80 /* =========================================== */
82 static inline void set_register_bits(struct imx21
*imx21
, u32 offset
, u32 mask
)
84 void __iomem
*reg
= imx21
->regs
+ offset
;
85 writel(readl(reg
) | mask
, reg
);
88 static inline void clear_register_bits(struct imx21
*imx21
,
91 void __iomem
*reg
= imx21
->regs
+ offset
;
92 writel(readl(reg
) & ~mask
, reg
);
95 static inline void clear_toggle_bit(struct imx21
*imx21
, u32 offset
, u32 mask
)
97 void __iomem
*reg
= imx21
->regs
+ offset
;
99 if (readl(reg
) & mask
)
103 static inline void set_toggle_bit(struct imx21
*imx21
, u32 offset
, u32 mask
)
105 void __iomem
*reg
= imx21
->regs
+ offset
;
107 if (!(readl(reg
) & mask
))
111 static void etd_writel(struct imx21
*imx21
, int etd_num
, int dword
, u32 value
)
113 writel(value
, imx21
->regs
+ USB_ETD_DWORD(etd_num
, dword
));
116 static u32
etd_readl(struct imx21
*imx21
, int etd_num
, int dword
)
118 return readl(imx21
->regs
+ USB_ETD_DWORD(etd_num
, dword
));
121 static inline int wrap_frame(int counter
)
123 return counter
& 0xFFFF;
126 static inline int frame_after(int frame
, int after
)
128 /* handle wrapping like jiffies time_afer */
129 return (s16
)((s16
)after
- (s16
)frame
) < 0;
132 static int imx21_hc_get_frame(struct usb_hcd
*hcd
)
134 struct imx21
*imx21
= hcd_to_imx21(hcd
);
136 return wrap_frame(readl(imx21
->regs
+ USBH_FRMNUB
));
140 #include "imx21-dbg.c"
142 /* =========================================== */
144 /* =========================================== */
146 static int alloc_etd(struct imx21
*imx21
)
149 struct etd_priv
*etd
= imx21
->etd
;
151 for (i
= 0; i
< USB_NUM_ETD
; i
++, etd
++) {
152 if (etd
->alloc
== 0) {
153 memset(etd
, 0, sizeof(imx21
->etd
[0]));
155 debug_etd_allocated(imx21
);
162 static void disactivate_etd(struct imx21
*imx21
, int num
)
164 int etd_mask
= (1 << num
);
165 struct etd_priv
*etd
= &imx21
->etd
[num
];
167 writel(etd_mask
, imx21
->regs
+ USBH_ETDENCLR
);
168 clear_register_bits(imx21
, USBH_ETDDONEEN
, etd_mask
);
169 writel(etd_mask
, imx21
->regs
+ USB_ETDDMACHANLCLR
);
170 clear_toggle_bit(imx21
, USBH_ETDDONESTAT
, etd_mask
);
172 etd
->active_count
= 0;
174 DEBUG_LOG_FRAME(imx21
, etd
, disactivated
);
177 static void reset_etd(struct imx21
*imx21
, int num
)
179 struct etd_priv
*etd
= imx21
->etd
+ num
;
182 disactivate_etd(imx21
, num
);
184 for (i
= 0; i
< 4; i
++)
185 etd_writel(imx21
, num
, i
, 0);
191 static void free_etd(struct imx21
*imx21
, int num
)
196 if (num
>= USB_NUM_ETD
) {
197 dev_err(imx21
->dev
, "BAD etd=%d!\n", num
);
200 if (imx21
->etd
[num
].alloc
== 0) {
201 dev_err(imx21
->dev
, "ETD %d already free!\n", num
);
205 debug_etd_freed(imx21
);
206 reset_etd(imx21
, num
);
207 memset(&imx21
->etd
[num
], 0, sizeof(imx21
->etd
[0]));
211 static void setup_etd_dword0(struct imx21
*imx21
,
212 int etd_num
, struct urb
*urb
, u8 dir
, u16 maxpacket
)
214 etd_writel(imx21
, etd_num
, 0,
215 ((u32
) usb_pipedevice(urb
->pipe
)) << DW0_ADDRESS
|
216 ((u32
) usb_pipeendpoint(urb
->pipe
) << DW0_ENDPNT
) |
217 ((u32
) dir
<< DW0_DIRECT
) |
218 ((u32
) ((urb
->dev
->speed
== USB_SPEED_LOW
) ?
219 1 : 0) << DW0_SPEED
) |
220 ((u32
) fmt_urb_to_etd
[usb_pipetype(urb
->pipe
)] << DW0_FORMAT
) |
221 ((u32
) maxpacket
<< DW0_MAXPKTSIZ
));
224 static void activate_etd(struct imx21
*imx21
,
225 int etd_num
, dma_addr_t dma
, u8 dir
)
227 u32 etd_mask
= 1 << etd_num
;
228 struct etd_priv
*etd
= &imx21
->etd
[etd_num
];
230 clear_toggle_bit(imx21
, USBH_ETDDONESTAT
, etd_mask
);
231 set_register_bits(imx21
, USBH_ETDDONEEN
, etd_mask
);
232 clear_toggle_bit(imx21
, USBH_XFILLSTAT
, etd_mask
);
233 clear_toggle_bit(imx21
, USBH_YFILLSTAT
, etd_mask
);
236 set_register_bits(imx21
, USB_ETDDMACHANLCLR
, etd_mask
);
237 clear_toggle_bit(imx21
, USBH_XBUFSTAT
, etd_mask
);
238 clear_toggle_bit(imx21
, USBH_YBUFSTAT
, etd_mask
);
239 writel(dma
, imx21
->regs
+ USB_ETDSMSA(etd_num
));
240 set_register_bits(imx21
, USB_ETDDMAEN
, etd_mask
);
242 if (dir
!= TD_DIR_IN
) {
243 /* need to set for ZLP */
244 set_toggle_bit(imx21
, USBH_XFILLSTAT
, etd_mask
);
245 set_toggle_bit(imx21
, USBH_YFILLSTAT
, etd_mask
);
249 DEBUG_LOG_FRAME(imx21
, etd
, activated
);
252 if (!etd
->active_count
) {
254 etd
->activated_frame
= readl(imx21
->regs
+ USBH_FRMNUB
);
255 etd
->disactivated_frame
= -1;
256 etd
->last_int_frame
= -1;
257 etd
->last_req_frame
= -1;
259 for (i
= 0; i
< 4; i
++)
260 etd
->submitted_dwords
[i
] = etd_readl(imx21
, etd_num
, i
);
264 etd
->active_count
= 1;
265 writel(etd_mask
, imx21
->regs
+ USBH_ETDENSET
);
268 /* =========================================== */
269 /* Data memory management */
270 /* =========================================== */
272 static int alloc_dmem(struct imx21
*imx21
, unsigned int size
,
273 struct usb_host_endpoint
*ep
)
275 unsigned int offset
= 0;
276 struct imx21_dmem_area
*area
;
277 struct imx21_dmem_area
*tmp
;
279 size
+= (~size
+ 1) & 0x3; /* Round to 4 byte multiple */
281 if (size
> DMEM_SIZE
) {
282 dev_err(imx21
->dev
, "size=%d > DMEM_SIZE(%d)\n",
287 list_for_each_entry(tmp
, &imx21
->dmem_list
, list
) {
288 if ((size
+ offset
) < offset
)
290 if ((size
+ offset
) <= tmp
->offset
)
292 offset
= tmp
->size
+ tmp
->offset
;
293 if ((offset
+ size
) > DMEM_SIZE
)
297 area
= kmalloc(sizeof(struct imx21_dmem_area
), GFP_ATOMIC
);
302 area
->offset
= offset
;
304 list_add_tail(&area
->list
, &tmp
->list
);
305 debug_dmem_allocated(imx21
, size
);
312 /* Memory now available for a queued ETD - activate it */
313 static void activate_queued_etd(struct imx21
*imx21
,
314 struct etd_priv
*etd
, u32 dmem_offset
)
316 struct urb_priv
*urb_priv
= etd
->urb
->hcpriv
;
317 int etd_num
= etd
- &imx21
->etd
[0];
318 u32 maxpacket
= etd_readl(imx21
, etd_num
, 1) >> DW1_YBUFSRTAD
;
319 u8 dir
= (etd_readl(imx21
, etd_num
, 2) >> DW2_DIRPID
) & 0x03;
321 dev_dbg(imx21
->dev
, "activating queued ETD %d now DMEM available\n",
323 etd_writel(imx21
, etd_num
, 1,
324 ((dmem_offset
+ maxpacket
) << DW1_YBUFSRTAD
) | dmem_offset
);
326 urb_priv
->active
= 1;
327 activate_etd(imx21
, etd_num
, etd
->dma_handle
, dir
);
330 static void free_dmem(struct imx21
*imx21
, int offset
)
332 struct imx21_dmem_area
*area
;
333 struct etd_priv
*etd
, *tmp
;
336 list_for_each_entry(area
, &imx21
->dmem_list
, list
) {
337 if (area
->offset
== offset
) {
338 debug_dmem_freed(imx21
, area
->size
);
339 list_del(&area
->list
);
348 "Trying to free unallocated DMEM %d\n", offset
);
352 /* Try again to allocate memory for anything we've queued */
353 list_for_each_entry_safe(etd
, tmp
, &imx21
->queue_for_dmem
, queue
) {
354 offset
= alloc_dmem(imx21
, etd
->dmem_size
, etd
->ep
);
356 list_del(&etd
->queue
);
357 activate_queued_etd(imx21
, etd
, (u32
)offset
);
362 static void free_epdmem(struct imx21
*imx21
, struct usb_host_endpoint
*ep
)
364 struct imx21_dmem_area
*area
, *tmp
;
366 list_for_each_entry_safe(area
, tmp
, &imx21
->dmem_list
, list
) {
367 if (area
->ep
== ep
) {
369 "Active DMEM %d for disabled ep=%p\n",
371 list_del(&area
->list
);
378 /* =========================================== */
380 /* =========================================== */
381 static void schedule_nonisoc_etd(struct imx21
*imx21
, struct urb
*urb
);
383 /* Endpoint now idle - release it's ETD(s) or asssign to queued request */
384 static void ep_idle(struct imx21
*imx21
, struct ep_priv
*ep_priv
)
389 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
390 etd_num
= ep_priv
->etd
[i
];
394 ep_priv
->etd
[i
] = -1;
395 if (list_empty(&imx21
->queue_for_etd
)) {
396 free_etd(imx21
, etd_num
);
401 "assigning idle etd %d for queued request\n", etd_num
);
402 ep_priv
= list_first_entry(&imx21
->queue_for_etd
,
403 struct ep_priv
, queue
);
404 list_del(&ep_priv
->queue
);
405 reset_etd(imx21
, etd_num
);
406 ep_priv
->waiting_etd
= 0;
407 ep_priv
->etd
[i
] = etd_num
;
409 if (list_empty(&ep_priv
->ep
->urb_list
)) {
410 dev_err(imx21
->dev
, "No urb for queued ep!\n");
413 schedule_nonisoc_etd(imx21
, list_first_entry(
414 &ep_priv
->ep
->urb_list
, struct urb
, urb_list
));
418 static void urb_done(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
419 __releases(imx21
->lock
)
420 __acquires(imx21
->lock
)
422 struct imx21
*imx21
= hcd_to_imx21(hcd
);
423 struct ep_priv
*ep_priv
= urb
->ep
->hcpriv
;
424 struct urb_priv
*urb_priv
= urb
->hcpriv
;
426 debug_urb_completed(imx21
, urb
, status
);
427 dev_vdbg(imx21
->dev
, "urb %p done %d\n", urb
, status
);
429 kfree(urb_priv
->isoc_td
);
432 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
433 spin_unlock(&imx21
->lock
);
434 usb_hcd_giveback_urb(hcd
, urb
, status
);
435 spin_lock(&imx21
->lock
);
436 if (list_empty(&ep_priv
->ep
->urb_list
))
437 ep_idle(imx21
, ep_priv
);
440 /* =========================================== */
441 /* ISOC Handling ... */
442 /* =========================================== */
444 static void schedule_isoc_etds(struct usb_hcd
*hcd
,
445 struct usb_host_endpoint
*ep
)
447 struct imx21
*imx21
= hcd_to_imx21(hcd
);
448 struct ep_priv
*ep_priv
= ep
->hcpriv
;
449 struct etd_priv
*etd
;
450 struct urb_priv
*urb_priv
;
457 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
459 if (list_empty(&ep_priv
->td_list
))
462 etd_num
= ep_priv
->etd
[i
];
466 etd
= &imx21
->etd
[etd_num
];
470 td
= list_entry(ep_priv
->td_list
.next
, struct td
, list
);
472 urb_priv
= td
->urb
->hcpriv
;
474 cur_frame
= imx21_hc_get_frame(hcd
);
475 if (frame_after(cur_frame
, td
->frame
)) {
476 dev_dbg(imx21
->dev
, "isoc too late frame %d > %d\n",
477 cur_frame
, td
->frame
);
478 urb_priv
->isoc_status
= -EXDEV
;
479 td
->urb
->iso_frame_desc
[
480 td
->isoc_index
].actual_length
= 0;
481 td
->urb
->iso_frame_desc
[td
->isoc_index
].status
= -EXDEV
;
482 if (--urb_priv
->isoc_remaining
== 0)
483 urb_done(hcd
, td
->urb
, urb_priv
->isoc_status
);
487 urb_priv
->active
= 1;
493 debug_isoc_submitted(imx21
, cur_frame
, td
);
495 dir
= usb_pipeout(td
->urb
->pipe
) ? TD_DIR_OUT
: TD_DIR_IN
;
496 setup_etd_dword0(imx21
, etd_num
, td
->urb
, dir
, etd
->dmem_size
);
497 etd_writel(imx21
, etd_num
, 1, etd
->dmem_offset
);
498 etd_writel(imx21
, etd_num
, 2,
499 (TD_NOTACCESSED
<< DW2_COMPCODE
) |
500 ((td
->frame
& 0xFFFF) << DW2_STARTFRM
));
501 etd_writel(imx21
, etd_num
, 3,
502 (TD_NOTACCESSED
<< DW3_COMPCODE0
) |
503 (td
->len
<< DW3_PKTLEN0
));
505 activate_etd(imx21
, etd_num
, td
->data
, dir
);
509 static void isoc_etd_done(struct usb_hcd
*hcd
, struct urb
*urb
, int etd_num
)
511 struct imx21
*imx21
= hcd_to_imx21(hcd
);
512 int etd_mask
= 1 << etd_num
;
513 struct urb_priv
*urb_priv
= urb
->hcpriv
;
514 struct etd_priv
*etd
= imx21
->etd
+ etd_num
;
515 struct td
*td
= etd
->td
;
516 struct usb_host_endpoint
*ep
= etd
->ep
;
517 int isoc_index
= td
->isoc_index
;
518 unsigned int pipe
= urb
->pipe
;
519 int dir_in
= usb_pipein(pipe
);
523 disactivate_etd(imx21
, etd_num
);
525 cc
= (etd_readl(imx21
, etd_num
, 3) >> DW3_COMPCODE0
) & 0xf;
526 bytes_xfrd
= etd_readl(imx21
, etd_num
, 3) & 0x3ff;
528 /* Input doesn't always fill the buffer, don't generate an error
531 if (dir_in
&& (cc
== TD_DATAUNDERRUN
))
534 if (cc
== TD_NOTACCESSED
)
537 debug_isoc_completed(imx21
,
538 imx21_hc_get_frame(hcd
), td
, cc
, bytes_xfrd
);
540 urb_priv
->isoc_status
= -EXDEV
;
542 "bad iso cc=0x%X frame=%d sched frame=%d "
543 "cnt=%d len=%d urb=%p etd=%d index=%d\n",
544 cc
, imx21_hc_get_frame(hcd
), td
->frame
,
545 bytes_xfrd
, td
->len
, urb
, etd_num
, isoc_index
);
549 clear_toggle_bit(imx21
, USBH_XFILLSTAT
, etd_mask
);
551 urb
->actual_length
+= bytes_xfrd
;
552 urb
->iso_frame_desc
[isoc_index
].actual_length
= bytes_xfrd
;
553 urb
->iso_frame_desc
[isoc_index
].status
= cc_to_error
[cc
];
559 if (--urb_priv
->isoc_remaining
== 0)
560 urb_done(hcd
, urb
, urb_priv
->isoc_status
);
562 schedule_isoc_etds(hcd
, ep
);
565 static struct ep_priv
*alloc_isoc_ep(
566 struct imx21
*imx21
, struct usb_host_endpoint
*ep
)
568 struct ep_priv
*ep_priv
;
571 ep_priv
= kzalloc(sizeof(struct ep_priv
), GFP_ATOMIC
);
575 /* Allocate the ETDs */
576 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
577 ep_priv
->etd
[i
] = alloc_etd(imx21
);
578 if (ep_priv
->etd
[i
] < 0) {
580 dev_err(imx21
->dev
, "isoc: Couldn't allocate etd\n");
581 for (j
= 0; j
< i
; j
++)
582 free_etd(imx21
, ep_priv
->etd
[j
]);
583 goto alloc_etd_failed
;
585 imx21
->etd
[ep_priv
->etd
[i
]].ep
= ep
;
588 INIT_LIST_HEAD(&ep_priv
->td_list
);
590 ep
->hcpriv
= ep_priv
;
598 static int imx21_hc_urb_enqueue_isoc(struct usb_hcd
*hcd
,
599 struct usb_host_endpoint
*ep
,
600 struct urb
*urb
, gfp_t mem_flags
)
602 struct imx21
*imx21
= hcd_to_imx21(hcd
);
603 struct urb_priv
*urb_priv
;
605 struct ep_priv
*ep_priv
;
606 struct td
*td
= NULL
;
612 urb_priv
= kzalloc(sizeof(struct urb_priv
), mem_flags
);
613 if (urb_priv
== NULL
)
616 urb_priv
->isoc_td
= kzalloc(
617 sizeof(struct td
) * urb
->number_of_packets
, mem_flags
);
618 if (urb_priv
->isoc_td
== NULL
) {
620 goto alloc_td_failed
;
623 spin_lock_irqsave(&imx21
->lock
, flags
);
625 if (ep
->hcpriv
== NULL
) {
626 ep_priv
= alloc_isoc_ep(imx21
, ep
);
627 if (ep_priv
== NULL
) {
629 goto alloc_ep_failed
;
632 ep_priv
= ep
->hcpriv
;
635 ret
= usb_hcd_link_urb_to_ep(hcd
, urb
);
639 urb
->status
= -EINPROGRESS
;
640 urb
->actual_length
= 0;
641 urb
->error_count
= 0;
642 urb
->hcpriv
= urb_priv
;
645 /* allocate data memory for largest packets if not already done */
646 maxpacket
= usb_maxpacket(urb
->dev
, urb
->pipe
, usb_pipeout(urb
->pipe
));
647 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
648 struct etd_priv
*etd
= &imx21
->etd
[ep_priv
->etd
[i
]];
650 if (etd
->dmem_size
> 0 && etd
->dmem_size
< maxpacket
) {
651 /* not sure if this can really occur.... */
652 dev_err(imx21
->dev
, "increasing isoc buffer %d->%d\n",
653 etd
->dmem_size
, maxpacket
);
655 goto alloc_dmem_failed
;
658 if (etd
->dmem_size
== 0) {
659 etd
->dmem_offset
= alloc_dmem(imx21
, maxpacket
, ep
);
660 if (etd
->dmem_offset
< 0) {
661 dev_dbg(imx21
->dev
, "failed alloc isoc dmem\n");
663 goto alloc_dmem_failed
;
665 etd
->dmem_size
= maxpacket
;
669 /* calculate frame */
670 cur_frame
= imx21_hc_get_frame(hcd
);
671 if (urb
->transfer_flags
& URB_ISO_ASAP
) {
672 if (list_empty(&ep_priv
->td_list
))
673 urb
->start_frame
= cur_frame
+ 5;
675 urb
->start_frame
= list_entry(
676 ep_priv
->td_list
.prev
,
677 struct td
, list
)->frame
+ urb
->interval
;
679 urb
->start_frame
= wrap_frame(urb
->start_frame
);
680 if (frame_after(cur_frame
, urb
->start_frame
)) {
682 "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
683 urb
->start_frame
, cur_frame
,
684 (urb
->transfer_flags
& URB_ISO_ASAP
) != 0);
685 urb
->start_frame
= wrap_frame(cur_frame
+ 1);
688 /* set up transfers */
689 td
= urb_priv
->isoc_td
;
690 for (i
= 0; i
< urb
->number_of_packets
; i
++, td
++) {
693 td
->len
= urb
->iso_frame_desc
[i
].length
;
695 td
->frame
= wrap_frame(urb
->start_frame
+ urb
->interval
* i
);
696 td
->data
= urb
->transfer_dma
+ urb
->iso_frame_desc
[i
].offset
;
697 list_add_tail(&td
->list
, &ep_priv
->td_list
);
700 urb_priv
->isoc_remaining
= urb
->number_of_packets
;
701 dev_vdbg(imx21
->dev
, "setup %d packets for iso frame %d->%d\n",
702 urb
->number_of_packets
, urb
->start_frame
, td
->frame
);
704 debug_urb_submitted(imx21
, urb
);
705 schedule_isoc_etds(hcd
, ep
);
707 spin_unlock_irqrestore(&imx21
->lock
, flags
);
711 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
715 spin_unlock_irqrestore(&imx21
->lock
, flags
);
716 kfree(urb_priv
->isoc_td
);
723 static void dequeue_isoc_urb(struct imx21
*imx21
,
724 struct urb
*urb
, struct ep_priv
*ep_priv
)
726 struct urb_priv
*urb_priv
= urb
->hcpriv
;
730 if (urb_priv
->active
) {
731 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
732 int etd_num
= ep_priv
->etd
[i
];
733 if (etd_num
!= -1 && imx21
->etd
[etd_num
].urb
== urb
) {
734 struct etd_priv
*etd
= imx21
->etd
+ etd_num
;
736 reset_etd(imx21
, etd_num
);
738 free_dmem(imx21
, etd
->dmem_offset
);
744 list_for_each_entry_safe(td
, tmp
, &ep_priv
->td_list
, list
) {
745 if (td
->urb
== urb
) {
746 dev_vdbg(imx21
->dev
, "removing td %p\n", td
);
752 /* =========================================== */
753 /* NON ISOC Handling ... */
754 /* =========================================== */
756 static void schedule_nonisoc_etd(struct imx21
*imx21
, struct urb
*urb
)
758 unsigned int pipe
= urb
->pipe
;
759 struct urb_priv
*urb_priv
= urb
->hcpriv
;
760 struct ep_priv
*ep_priv
= urb_priv
->ep
->hcpriv
;
761 int state
= urb_priv
->state
;
762 int etd_num
= ep_priv
->etd
[0];
763 struct etd_priv
*etd
;
775 dev_err(imx21
->dev
, "No valid ETD\n");
778 if (readl(imx21
->regs
+ USBH_ETDENSET
) & (1 << etd_num
))
779 dev_err(imx21
->dev
, "submitting to active ETD %d\n", etd_num
);
781 etd
= &imx21
->etd
[etd_num
];
782 maxpacket
= usb_maxpacket(urb
->dev
, pipe
, usb_pipeout(pipe
));
786 if (usb_pipecontrol(pipe
) && (state
!= US_CTRL_DATA
)) {
787 if (state
== US_CTRL_SETUP
) {
789 etd
->dma_handle
= urb
->setup_dma
;
792 datatoggle
= TD_TOGGLE_DATA0
;
793 } else { /* US_CTRL_ACK */
794 dir
= usb_pipeout(pipe
) ? TD_DIR_IN
: TD_DIR_OUT
;
795 etd
->dma_handle
= urb
->transfer_dma
;
798 datatoggle
= TD_TOGGLE_DATA1
;
801 dir
= usb_pipeout(pipe
) ? TD_DIR_OUT
: TD_DIR_IN
;
802 bufround
= (dir
== TD_DIR_IN
) ? 1 : 0;
803 etd
->dma_handle
= urb
->transfer_dma
;
804 if (usb_pipebulk(pipe
) && (state
== US_BULK0
))
807 count
= urb
->transfer_buffer_length
;
809 if (usb_pipecontrol(pipe
)) {
810 datatoggle
= TD_TOGGLE_DATA1
;
814 usb_pipeendpoint(urb
->pipe
),
815 usb_pipeout(urb
->pipe
)))
816 datatoggle
= TD_TOGGLE_DATA1
;
818 datatoggle
= TD_TOGGLE_DATA0
;
823 etd
->ep
= urb_priv
->ep
;
826 if (usb_pipeint(pipe
)) {
827 interval
= urb
->interval
;
828 relpolpos
= (readl(imx21
->regs
+ USBH_FRMNUB
) + 1) & 0xff;
831 /* Write ETD to device memory */
832 setup_etd_dword0(imx21
, etd_num
, urb
, dir
, maxpacket
);
834 etd_writel(imx21
, etd_num
, 2,
835 (u32
) interval
<< DW2_POLINTERV
|
836 ((u32
) relpolpos
<< DW2_RELPOLPOS
) |
837 ((u32
) dir
<< DW2_DIRPID
) |
838 ((u32
) bufround
<< DW2_BUFROUND
) |
839 ((u32
) datatoggle
<< DW2_DATATOG
) |
840 ((u32
) TD_NOTACCESSED
<< DW2_COMPCODE
));
842 /* DMA will always transfer buffer size even if TOBYCNT in DWORD3
843 is smaller. Make sure we don't overrun the buffer!
845 if (count
&& count
< maxpacket
)
846 etd_buf_size
= count
;
848 etd_buf_size
= maxpacket
;
850 etd_writel(imx21
, etd_num
, 3,
851 ((u32
) (etd_buf_size
- 1) << DW3_BUFSIZE
) | (u32
) count
);
856 /* allocate x and y buffer space at once */
857 etd
->dmem_size
= (count
> maxpacket
) ? maxpacket
* 2 : maxpacket
;
858 dmem_offset
= alloc_dmem(imx21
, etd
->dmem_size
, urb_priv
->ep
);
859 if (dmem_offset
< 0) {
860 /* Setup everything we can in HW and update when we get DMEM */
861 etd_writel(imx21
, etd_num
, 1, (u32
)maxpacket
<< 16);
863 dev_dbg(imx21
->dev
, "Queuing etd %d for DMEM\n", etd_num
);
864 debug_urb_queued_for_dmem(imx21
, urb
);
865 list_add_tail(&etd
->queue
, &imx21
->queue_for_dmem
);
869 etd_writel(imx21
, etd_num
, 1,
870 (((u32
) dmem_offset
+ (u32
) maxpacket
) << DW1_YBUFSRTAD
) |
873 urb_priv
->active
= 1;
875 /* enable the ETD to kick off transfer */
876 dev_vdbg(imx21
->dev
, "Activating etd %d for %d bytes %s\n",
877 etd_num
, count
, dir
!= TD_DIR_IN
? "out" : "in");
878 activate_etd(imx21
, etd_num
, etd
->dma_handle
, dir
);
882 static void nonisoc_etd_done(struct usb_hcd
*hcd
, struct urb
*urb
, int etd_num
)
884 struct imx21
*imx21
= hcd_to_imx21(hcd
);
885 struct etd_priv
*etd
= &imx21
->etd
[etd_num
];
886 u32 etd_mask
= 1 << etd_num
;
887 struct urb_priv
*urb_priv
= urb
->hcpriv
;
894 disactivate_etd(imx21
, etd_num
);
896 dir
= (etd_readl(imx21
, etd_num
, 0) >> DW0_DIRECT
) & 0x3;
897 xbufaddr
= etd_readl(imx21
, etd_num
, 1) & 0xffff;
898 cc
= (etd_readl(imx21
, etd_num
, 2) >> DW2_COMPCODE
) & 0xf;
899 bytes_xfrd
= etd
->len
- (etd_readl(imx21
, etd_num
, 3) & 0x1fffff);
901 /* save toggle carry */
902 usb_settoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
903 usb_pipeout(urb
->pipe
),
904 (etd_readl(imx21
, etd_num
, 0) >> DW0_TOGCRY
) & 0x1);
906 if (dir
== TD_DIR_IN
) {
907 clear_toggle_bit(imx21
, USBH_XFILLSTAT
, etd_mask
);
908 clear_toggle_bit(imx21
, USBH_YFILLSTAT
, etd_mask
);
910 free_dmem(imx21
, xbufaddr
);
912 urb
->error_count
= 0;
913 if (!(urb
->transfer_flags
& URB_SHORT_NOT_OK
)
914 && (cc
== TD_DATAUNDERRUN
))
918 dev_vdbg(imx21
->dev
, "cc is 0x%x\n", cc
);
920 etd_done
= (cc_to_error
[cc
] != 0); /* stop if error */
922 switch (usb_pipetype(urb
->pipe
)) {
924 switch (urb_priv
->state
) {
926 if (urb
->transfer_buffer_length
> 0)
927 urb_priv
->state
= US_CTRL_DATA
;
929 urb_priv
->state
= US_CTRL_ACK
;
932 urb
->actual_length
+= bytes_xfrd
;
933 urb_priv
->state
= US_CTRL_ACK
;
940 "Invalid pipe state %d\n", urb_priv
->state
);
947 urb
->actual_length
+= bytes_xfrd
;
948 if ((urb_priv
->state
== US_BULK
)
949 && (urb
->transfer_flags
& URB_ZERO_PACKET
)
950 && urb
->transfer_buffer_length
> 0
951 && ((urb
->transfer_buffer_length
%
952 usb_maxpacket(urb
->dev
, urb
->pipe
,
953 usb_pipeout(urb
->pipe
))) == 0)) {
954 /* need a 0-packet */
955 urb_priv
->state
= US_BULK0
;
962 urb
->actual_length
+= bytes_xfrd
;
968 dev_vdbg(imx21
->dev
, "next state=%d\n", urb_priv
->state
);
969 schedule_nonisoc_etd(imx21
, urb
);
971 struct usb_host_endpoint
*ep
= urb
->ep
;
973 urb_done(hcd
, urb
, cc_to_error
[cc
]);
976 if (!list_empty(&ep
->urb_list
)) {
977 urb
= list_first_entry(&ep
->urb_list
,
978 struct urb
, urb_list
);
979 dev_vdbg(imx21
->dev
, "next URB %p\n", urb
);
980 schedule_nonisoc_etd(imx21
, urb
);
985 static struct ep_priv
*alloc_ep(void)
988 struct ep_priv
*ep_priv
;
990 ep_priv
= kzalloc(sizeof(struct ep_priv
), GFP_ATOMIC
);
994 for (i
= 0; i
< NUM_ISO_ETDS
; ++i
)
995 ep_priv
->etd
[i
] = -1;
1000 static int imx21_hc_urb_enqueue(struct usb_hcd
*hcd
,
1001 struct urb
*urb
, gfp_t mem_flags
)
1003 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1004 struct usb_host_endpoint
*ep
= urb
->ep
;
1005 struct urb_priv
*urb_priv
;
1006 struct ep_priv
*ep_priv
;
1007 struct etd_priv
*etd
;
1009 unsigned long flags
;
1012 dev_vdbg(imx21
->dev
,
1013 "enqueue urb=%p ep=%p len=%d "
1014 "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n",
1016 urb
->transfer_buffer_length
,
1017 urb
->transfer_buffer
, urb
->transfer_dma
,
1018 urb
->setup_packet
, urb
->setup_dma
);
1020 if (usb_pipeisoc(urb
->pipe
))
1021 return imx21_hc_urb_enqueue_isoc(hcd
, ep
, urb
, mem_flags
);
1023 urb_priv
= kzalloc(sizeof(struct urb_priv
), mem_flags
);
1027 spin_lock_irqsave(&imx21
->lock
, flags
);
1029 ep_priv
= ep
->hcpriv
;
1030 if (ep_priv
== NULL
) {
1031 ep_priv
= alloc_ep();
1034 goto failed_alloc_ep
;
1036 ep
->hcpriv
= ep_priv
;
1041 ret
= usb_hcd_link_urb_to_ep(hcd
, urb
);
1045 urb
->status
= -EINPROGRESS
;
1046 urb
->actual_length
= 0;
1047 urb
->error_count
= 0;
1048 urb
->hcpriv
= urb_priv
;
1051 switch (usb_pipetype(urb
->pipe
)) {
1053 urb_priv
->state
= US_CTRL_SETUP
;
1056 urb_priv
->state
= US_BULK
;
1060 debug_urb_submitted(imx21
, urb
);
1061 if (ep_priv
->etd
[0] < 0) {
1062 if (ep_priv
->waiting_etd
) {
1064 "no ETD available already queued %p\n",
1066 debug_urb_queued_for_etd(imx21
, urb
);
1069 ep_priv
->etd
[0] = alloc_etd(imx21
);
1070 if (ep_priv
->etd
[0] < 0) {
1072 "no ETD available queueing %p\n", ep_priv
);
1073 debug_urb_queued_for_etd(imx21
, urb
);
1074 list_add_tail(&ep_priv
->queue
, &imx21
->queue_for_etd
);
1075 ep_priv
->waiting_etd
= 1;
1080 /* Schedule if no URB already active for this endpoint */
1081 etd
= &imx21
->etd
[ep_priv
->etd
[0]];
1082 if (etd
->urb
== NULL
) {
1083 DEBUG_LOG_FRAME(imx21
, etd
, last_req
);
1084 schedule_nonisoc_etd(imx21
, urb
);
1088 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1093 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1098 static int imx21_hc_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
,
1101 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1102 unsigned long flags
;
1103 struct usb_host_endpoint
*ep
;
1104 struct ep_priv
*ep_priv
;
1105 struct urb_priv
*urb_priv
= urb
->hcpriv
;
1108 dev_vdbg(imx21
->dev
, "dequeue urb=%p iso=%d status=%d\n",
1109 urb
, usb_pipeisoc(urb
->pipe
), status
);
1111 spin_lock_irqsave(&imx21
->lock
, flags
);
1113 ret
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
1117 ep_priv
= ep
->hcpriv
;
1119 debug_urb_unlinked(imx21
, urb
);
1121 if (usb_pipeisoc(urb
->pipe
)) {
1122 dequeue_isoc_urb(imx21
, urb
, ep_priv
);
1123 schedule_isoc_etds(hcd
, ep
);
1124 } else if (urb_priv
->active
) {
1125 int etd_num
= ep_priv
->etd
[0];
1126 if (etd_num
!= -1) {
1127 disactivate_etd(imx21
, etd_num
);
1128 free_dmem(imx21
, etd_readl(imx21
, etd_num
, 1) & 0xffff);
1129 imx21
->etd
[etd_num
].urb
= NULL
;
1133 urb_done(hcd
, urb
, status
);
1135 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1139 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1143 /* =========================================== */
1144 /* Interrupt dispatch */
1145 /* =========================================== */
1147 static void process_etds(struct usb_hcd
*hcd
, struct imx21
*imx21
, int sof
)
1150 int enable_sof_int
= 0;
1151 unsigned long flags
;
1153 spin_lock_irqsave(&imx21
->lock
, flags
);
1155 for (etd_num
= 0; etd_num
< USB_NUM_ETD
; etd_num
++) {
1156 u32 etd_mask
= 1 << etd_num
;
1157 u32 enabled
= readl(imx21
->regs
+ USBH_ETDENSET
) & etd_mask
;
1158 u32 done
= readl(imx21
->regs
+ USBH_ETDDONESTAT
) & etd_mask
;
1159 struct etd_priv
*etd
= &imx21
->etd
[etd_num
];
1163 DEBUG_LOG_FRAME(imx21
, etd
, last_int
);
1168 * When multiple transfers are using the bus we sometimes get into a state
1169 * where the transfer has completed (the CC field of the ETD is != 0x0F),
1170 * the ETD has self disabled but the ETDDONESTAT flag is not set
1171 * (and hence no interrupt occurs).
1172 * This causes the transfer in question to hang.
1173 * The kludge below checks for this condition at each SOF and processes any
1174 * blocked ETDs (after an arbitary 10 frame wait)
1176 * With a single active transfer the usbtest test suite will run for days
1177 * without the kludge.
1178 * With other bus activity (eg mass storage) even just test1 will hang without
1184 if (etd
->active_count
&& !enabled
) /* suspicious... */
1187 if (!sof
|| enabled
|| !etd
->active_count
)
1190 cc
= etd_readl(imx21
, etd_num
, 2) >> DW2_COMPCODE
;
1191 if (cc
== TD_NOTACCESSED
)
1194 if (++etd
->active_count
< 10)
1197 dword0
= etd_readl(imx21
, etd_num
, 0);
1199 "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
1200 etd_num
, dword0
& 0x7F,
1201 (dword0
>> DW0_ENDPNT
) & 0x0F,
1206 "frame: act=%d disact=%d"
1207 " int=%d req=%d cur=%d\n",
1208 etd
->activated_frame
,
1209 etd
->disactivated_frame
,
1210 etd
->last_int_frame
,
1211 etd
->last_req_frame
,
1212 readl(imx21
->regs
+ USBH_FRMNUB
));
1213 imx21
->debug_unblocks
++;
1215 etd
->active_count
= 0;
1219 if (etd
->ep
== NULL
|| etd
->urb
== NULL
) {
1221 "Interrupt for unexpected etd %d"
1223 etd_num
, etd
->ep
, etd
->urb
);
1224 disactivate_etd(imx21
, etd_num
);
1228 if (usb_pipeisoc(etd
->urb
->pipe
))
1229 isoc_etd_done(hcd
, etd
->urb
, etd_num
);
1231 nonisoc_etd_done(hcd
, etd
->urb
, etd_num
);
1234 /* only enable SOF interrupt if it may be needed for the kludge */
1236 set_register_bits(imx21
, USBH_SYSIEN
, USBH_SYSIEN_SOFINT
);
1238 clear_register_bits(imx21
, USBH_SYSIEN
, USBH_SYSIEN_SOFINT
);
1241 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1244 static irqreturn_t
imx21_irq(struct usb_hcd
*hcd
)
1246 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1247 u32 ints
= readl(imx21
->regs
+ USBH_SYSISR
);
1249 if (ints
& USBH_SYSIEN_HERRINT
)
1250 dev_dbg(imx21
->dev
, "Scheduling error\n");
1252 if (ints
& USBH_SYSIEN_SORINT
)
1253 dev_dbg(imx21
->dev
, "Scheduling overrun\n");
1255 if (ints
& (USBH_SYSISR_DONEINT
| USBH_SYSISR_SOFINT
))
1256 process_etds(hcd
, imx21
, ints
& USBH_SYSISR_SOFINT
);
1258 writel(ints
, imx21
->regs
+ USBH_SYSISR
);
1262 static void imx21_hc_endpoint_disable(struct usb_hcd
*hcd
,
1263 struct usb_host_endpoint
*ep
)
1265 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1266 unsigned long flags
;
1267 struct ep_priv
*ep_priv
;
1273 spin_lock_irqsave(&imx21
->lock
, flags
);
1274 ep_priv
= ep
->hcpriv
;
1275 dev_vdbg(imx21
->dev
, "disable ep=%p, ep->hcpriv=%p\n", ep
, ep_priv
);
1277 if (!list_empty(&ep
->urb_list
))
1278 dev_dbg(imx21
->dev
, "ep's URB list is not empty\n");
1280 if (ep_priv
!= NULL
) {
1281 for (i
= 0; i
< NUM_ISO_ETDS
; i
++) {
1282 if (ep_priv
->etd
[i
] > -1)
1283 dev_dbg(imx21
->dev
, "free etd %d for disable\n",
1286 free_etd(imx21
, ep_priv
->etd
[i
]);
1292 for (i
= 0; i
< USB_NUM_ETD
; i
++) {
1293 if (imx21
->etd
[i
].alloc
&& imx21
->etd
[i
].ep
== ep
) {
1295 "Active etd %d for disabled ep=%p!\n", i
, ep
);
1299 free_epdmem(imx21
, ep
);
1300 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1303 /* =========================================== */
1305 /* =========================================== */
1307 static int get_hub_descriptor(struct usb_hcd
*hcd
,
1308 struct usb_hub_descriptor
*desc
)
1310 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1311 desc
->bDescriptorType
= 0x29; /* HUB descriptor */
1312 desc
->bHubContrCurrent
= 0;
1314 desc
->bNbrPorts
= readl(imx21
->regs
+ USBH_ROOTHUBA
)
1315 & USBH_ROOTHUBA_NDNSTMPRT_MASK
;
1316 desc
->bDescLength
= 9;
1317 desc
->bPwrOn2PwrGood
= 0;
1318 desc
->wHubCharacteristics
= (__force __u16
) cpu_to_le16(
1319 0x0002 | /* No power switching */
1320 0x0010 | /* No over current protection */
1323 desc
->bitmap
[0] = 1 << 1;
1324 desc
->bitmap
[1] = ~0;
1328 static int imx21_hc_hub_status_data(struct usb_hcd
*hcd
, char *buf
)
1330 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1334 unsigned long flags
;
1336 spin_lock_irqsave(&imx21
->lock
, flags
);
1337 ports
= readl(imx21
->regs
+ USBH_ROOTHUBA
)
1338 & USBH_ROOTHUBA_NDNSTMPRT_MASK
;
1341 dev_err(imx21
->dev
, "ports %d > 7\n", ports
);
1343 for (i
= 0; i
< ports
; i
++) {
1344 if (readl(imx21
->regs
+ USBH_PORTSTAT(i
)) &
1345 (USBH_PORTSTAT_CONNECTSC
|
1346 USBH_PORTSTAT_PRTENBLSC
|
1347 USBH_PORTSTAT_PRTSTATSC
|
1348 USBH_PORTSTAT_OVRCURIC
|
1349 USBH_PORTSTAT_PRTRSTSC
)) {
1352 buf
[0] |= 1 << (i
+ 1);
1355 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1358 dev_info(imx21
->dev
, "Hub status changed\n");
1362 static int imx21_hc_hub_control(struct usb_hcd
*hcd
,
1364 u16 wValue
, u16 wIndex
, char *buf
, u16 wLength
)
1366 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1368 u32 status_write
= 0;
1371 case ClearHubFeature
:
1372 dev_dbg(imx21
->dev
, "ClearHubFeature\n");
1374 case C_HUB_OVER_CURRENT
:
1375 dev_dbg(imx21
->dev
, " OVER_CURRENT\n");
1377 case C_HUB_LOCAL_POWER
:
1378 dev_dbg(imx21
->dev
, " LOCAL_POWER\n");
1381 dev_dbg(imx21
->dev
, " unknown\n");
1387 case ClearPortFeature
:
1388 dev_dbg(imx21
->dev
, "ClearPortFeature\n");
1390 case USB_PORT_FEAT_ENABLE
:
1391 dev_dbg(imx21
->dev
, " ENABLE\n");
1392 status_write
= USBH_PORTSTAT_CURCONST
;
1394 case USB_PORT_FEAT_SUSPEND
:
1395 dev_dbg(imx21
->dev
, " SUSPEND\n");
1396 status_write
= USBH_PORTSTAT_PRTOVRCURI
;
1398 case USB_PORT_FEAT_POWER
:
1399 dev_dbg(imx21
->dev
, " POWER\n");
1400 status_write
= USBH_PORTSTAT_LSDEVCON
;
1402 case USB_PORT_FEAT_C_ENABLE
:
1403 dev_dbg(imx21
->dev
, " C_ENABLE\n");
1404 status_write
= USBH_PORTSTAT_PRTENBLSC
;
1406 case USB_PORT_FEAT_C_SUSPEND
:
1407 dev_dbg(imx21
->dev
, " C_SUSPEND\n");
1408 status_write
= USBH_PORTSTAT_PRTSTATSC
;
1410 case USB_PORT_FEAT_C_CONNECTION
:
1411 dev_dbg(imx21
->dev
, " C_CONNECTION\n");
1412 status_write
= USBH_PORTSTAT_CONNECTSC
;
1414 case USB_PORT_FEAT_C_OVER_CURRENT
:
1415 dev_dbg(imx21
->dev
, " C_OVER_CURRENT\n");
1416 status_write
= USBH_PORTSTAT_OVRCURIC
;
1418 case USB_PORT_FEAT_C_RESET
:
1419 dev_dbg(imx21
->dev
, " C_RESET\n");
1420 status_write
= USBH_PORTSTAT_PRTRSTSC
;
1423 dev_dbg(imx21
->dev
, " unknown\n");
1430 case GetHubDescriptor
:
1431 dev_dbg(imx21
->dev
, "GetHubDescriptor\n");
1432 rc
= get_hub_descriptor(hcd
, (void *)buf
);
1436 dev_dbg(imx21
->dev
, " GetHubStatus\n");
1437 *(__le32
*) buf
= 0;
1441 dev_dbg(imx21
->dev
, "GetPortStatus: port: %d, 0x%x\n",
1442 wIndex
, USBH_PORTSTAT(wIndex
- 1));
1443 *(__le32
*) buf
= readl(imx21
->regs
+
1444 USBH_PORTSTAT(wIndex
- 1));
1448 dev_dbg(imx21
->dev
, "SetHubFeature\n");
1450 case C_HUB_OVER_CURRENT
:
1451 dev_dbg(imx21
->dev
, " OVER_CURRENT\n");
1454 case C_HUB_LOCAL_POWER
:
1455 dev_dbg(imx21
->dev
, " LOCAL_POWER\n");
1458 dev_dbg(imx21
->dev
, " unknown\n");
1465 case SetPortFeature
:
1466 dev_dbg(imx21
->dev
, "SetPortFeature\n");
1468 case USB_PORT_FEAT_SUSPEND
:
1469 dev_dbg(imx21
->dev
, " SUSPEND\n");
1470 status_write
= USBH_PORTSTAT_PRTSUSPST
;
1472 case USB_PORT_FEAT_POWER
:
1473 dev_dbg(imx21
->dev
, " POWER\n");
1474 status_write
= USBH_PORTSTAT_PRTPWRST
;
1476 case USB_PORT_FEAT_RESET
:
1477 dev_dbg(imx21
->dev
, " RESET\n");
1478 status_write
= USBH_PORTSTAT_PRTRSTST
;
1481 dev_dbg(imx21
->dev
, " unknown\n");
1488 dev_dbg(imx21
->dev
, " unknown\n");
1494 writel(status_write
, imx21
->regs
+ USBH_PORTSTAT(wIndex
- 1));
1498 /* =========================================== */
1499 /* Host controller management */
1500 /* =========================================== */
1502 static int imx21_hc_reset(struct usb_hcd
*hcd
)
1504 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1505 unsigned long timeout
;
1506 unsigned long flags
;
1508 spin_lock_irqsave(&imx21
->lock
, flags
);
1510 /* Reset the Host controler modules */
1511 writel(USBOTG_RST_RSTCTRL
| USBOTG_RST_RSTRH
|
1512 USBOTG_RST_RSTHSIE
| USBOTG_RST_RSTHC
,
1513 imx21
->regs
+ USBOTG_RST_CTRL
);
1515 /* Wait for reset to finish */
1516 timeout
= jiffies
+ HZ
;
1517 while (readl(imx21
->regs
+ USBOTG_RST_CTRL
) != 0) {
1518 if (time_after(jiffies
, timeout
)) {
1519 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1520 dev_err(imx21
->dev
, "timeout waiting for reset\n");
1523 spin_unlock_irq(&imx21
->lock
);
1524 schedule_timeout(1);
1525 spin_lock_irq(&imx21
->lock
);
1527 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1531 static int __devinit
imx21_hc_start(struct usb_hcd
*hcd
)
1533 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1534 unsigned long flags
;
1536 u32 hw_mode
= USBOTG_HWMODE_CRECFG_HOST
;
1537 u32 usb_control
= 0;
1539 hw_mode
|= ((imx21
->pdata
->host_xcvr
<< USBOTG_HWMODE_HOSTXCVR_SHIFT
) &
1540 USBOTG_HWMODE_HOSTXCVR_MASK
);
1541 hw_mode
|= ((imx21
->pdata
->otg_xcvr
<< USBOTG_HWMODE_OTGXCVR_SHIFT
) &
1542 USBOTG_HWMODE_OTGXCVR_MASK
);
1544 if (imx21
->pdata
->host1_txenoe
)
1545 usb_control
|= USBCTRL_HOST1_TXEN_OE
;
1547 if (!imx21
->pdata
->host1_xcverless
)
1548 usb_control
|= USBCTRL_HOST1_BYP_TLL
;
1550 if (imx21
->pdata
->otg_ext_xcvr
)
1551 usb_control
|= USBCTRL_OTC_RCV_RXDP
;
1554 spin_lock_irqsave(&imx21
->lock
, flags
);
1556 writel((USBOTG_CLK_CTRL_HST
| USBOTG_CLK_CTRL_MAIN
),
1557 imx21
->regs
+ USBOTG_CLK_CTRL
);
1558 writel(hw_mode
, imx21
->regs
+ USBOTG_HWMODE
);
1559 writel(usb_control
, imx21
->regs
+ USBCTRL
);
1560 writel(USB_MISCCONTROL_SKPRTRY
| USB_MISCCONTROL_ARBMODE
,
1561 imx21
->regs
+ USB_MISCCONTROL
);
1563 /* Clear the ETDs */
1564 for (i
= 0; i
< USB_NUM_ETD
; i
++)
1565 for (j
= 0; j
< 4; j
++)
1566 etd_writel(imx21
, i
, j
, 0);
1568 /* Take the HC out of reset */
1569 writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL
| USBH_HOST_CTRL_CTLBLKSR_1
,
1570 imx21
->regs
+ USBH_HOST_CTRL
);
1573 if (imx21
->pdata
->enable_otg_host
)
1574 writel(USBH_PORTSTAT_PRTPWRST
| USBH_PORTSTAT_PRTENABST
,
1575 imx21
->regs
+ USBH_PORTSTAT(0));
1577 if (imx21
->pdata
->enable_host1
)
1578 writel(USBH_PORTSTAT_PRTPWRST
| USBH_PORTSTAT_PRTENABST
,
1579 imx21
->regs
+ USBH_PORTSTAT(1));
1581 if (imx21
->pdata
->enable_host2
)
1582 writel(USBH_PORTSTAT_PRTPWRST
| USBH_PORTSTAT_PRTENABST
,
1583 imx21
->regs
+ USBH_PORTSTAT(2));
1586 hcd
->state
= HC_STATE_RUNNING
;
1588 /* Enable host controller interrupts */
1589 set_register_bits(imx21
, USBH_SYSIEN
,
1590 USBH_SYSIEN_HERRINT
|
1591 USBH_SYSIEN_DONEINT
| USBH_SYSIEN_SORINT
);
1592 set_register_bits(imx21
, USBOTG_CINT_STEN
, USBOTG_HCINT
);
1594 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1599 static void imx21_hc_stop(struct usb_hcd
*hcd
)
1601 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1602 unsigned long flags
;
1604 spin_lock_irqsave(&imx21
->lock
, flags
);
1606 writel(0, imx21
->regs
+ USBH_SYSIEN
);
1607 clear_register_bits(imx21
, USBOTG_CINT_STEN
, USBOTG_HCINT
);
1608 clear_register_bits(imx21
, USBOTG_CLK_CTRL_HST
| USBOTG_CLK_CTRL_MAIN
,
1610 spin_unlock_irqrestore(&imx21
->lock
, flags
);
1613 /* =========================================== */
1615 /* =========================================== */
1617 static struct hc_driver imx21_hc_driver
= {
1618 .description
= hcd_name
,
1619 .product_desc
= "IMX21 USB Host Controller",
1620 .hcd_priv_size
= sizeof(struct imx21
),
1625 .reset
= imx21_hc_reset
,
1626 .start
= imx21_hc_start
,
1627 .stop
= imx21_hc_stop
,
1630 .urb_enqueue
= imx21_hc_urb_enqueue
,
1631 .urb_dequeue
= imx21_hc_urb_dequeue
,
1632 .endpoint_disable
= imx21_hc_endpoint_disable
,
1634 /* scheduling support */
1635 .get_frame_number
= imx21_hc_get_frame
,
1637 /* Root hub support */
1638 .hub_status_data
= imx21_hc_hub_status_data
,
1639 .hub_control
= imx21_hc_hub_control
,
1643 static struct mx21_usbh_platform_data default_pdata
= {
1644 .host_xcvr
= MX21_USBXCVR_TXDIF_RXDIF
,
1645 .otg_xcvr
= MX21_USBXCVR_TXDIF_RXDIF
,
1648 .enable_otg_host
= 1,
1652 static int imx21_remove(struct platform_device
*pdev
)
1654 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
1655 struct imx21
*imx21
= hcd_to_imx21(hcd
);
1656 struct resource
*res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1658 remove_debug_files(imx21
);
1659 usb_remove_hcd(hcd
);
1662 clk_disable(imx21
->clk
);
1663 clk_put(imx21
->clk
);
1664 iounmap(imx21
->regs
);
1665 release_mem_region(res
->start
, resource_size(res
));
1673 static int imx21_probe(struct platform_device
*pdev
)
1675 struct usb_hcd
*hcd
;
1676 struct imx21
*imx21
;
1677 struct resource
*res
;
1681 printk(KERN_INFO
"%s\n", imx21_hc_driver
.product_desc
);
1683 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1686 irq
= platform_get_irq(pdev
, 0);
1690 hcd
= usb_create_hcd(&imx21_hc_driver
,
1691 &pdev
->dev
, dev_name(&pdev
->dev
));
1693 dev_err(&pdev
->dev
, "Cannot create hcd (%s)\n",
1694 dev_name(&pdev
->dev
));
1698 imx21
= hcd_to_imx21(hcd
);
1699 imx21
->dev
= &pdev
->dev
;
1700 imx21
->pdata
= pdev
->dev
.platform_data
;
1702 imx21
->pdata
= &default_pdata
;
1704 spin_lock_init(&imx21
->lock
);
1705 INIT_LIST_HEAD(&imx21
->dmem_list
);
1706 INIT_LIST_HEAD(&imx21
->queue_for_etd
);
1707 INIT_LIST_HEAD(&imx21
->queue_for_dmem
);
1708 create_debug_files(imx21
);
1710 res
= request_mem_region(res
->start
, resource_size(res
), hcd_name
);
1713 goto failed_request_mem
;
1716 imx21
->regs
= ioremap(res
->start
, resource_size(res
));
1717 if (imx21
->regs
== NULL
) {
1718 dev_err(imx21
->dev
, "Cannot map registers\n");
1720 goto failed_ioremap
;
1723 /* Enable clocks source */
1724 imx21
->clk
= clk_get(imx21
->dev
, NULL
);
1725 if (IS_ERR(imx21
->clk
)) {
1726 dev_err(imx21
->dev
, "no clock found\n");
1727 ret
= PTR_ERR(imx21
->clk
);
1728 goto failed_clock_get
;
1731 ret
= clk_set_rate(imx21
->clk
, clk_round_rate(imx21
->clk
, 48000000));
1733 goto failed_clock_set
;
1734 ret
= clk_enable(imx21
->clk
);
1736 goto failed_clock_enable
;
1738 dev_info(imx21
->dev
, "Hardware HC revision: 0x%02X\n",
1739 (readl(imx21
->regs
+ USBOTG_HWMODE
) >> 16) & 0xFF);
1741 ret
= usb_add_hcd(hcd
, irq
, IRQF_DISABLED
);
1743 dev_err(imx21
->dev
, "usb_add_hcd() returned %d\n", ret
);
1744 goto failed_add_hcd
;
1750 clk_disable(imx21
->clk
);
1751 failed_clock_enable
:
1753 clk_put(imx21
->clk
);
1755 iounmap(imx21
->regs
);
1757 release_mem_region(res
->start
, res
->end
- res
->start
);
1759 remove_debug_files(imx21
);
1764 static struct platform_driver imx21_hcd_driver
= {
1766 .name
= (char *)hcd_name
,
1768 .probe
= imx21_probe
,
1769 .remove
= imx21_remove
,
1774 static int __init
imx21_hcd_init(void)
1776 return platform_driver_register(&imx21_hcd_driver
);
1779 static void __exit
imx21_hcd_cleanup(void)
1781 platform_driver_unregister(&imx21_hcd_driver
);
1784 module_init(imx21_hcd_init
);
1785 module_exit(imx21_hcd_cleanup
);
1787 MODULE_DESCRIPTION("i.MX21 USB Host controller");
1788 MODULE_AUTHOR("Martin Fuzzey");
1789 MODULE_LICENSE("GPL");
1790 MODULE_ALIAS("platform:imx21-hcd");