2 * ISP1362 HCD (Host Controller Driver) for USB.
4 * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
6 * Derived from the SL811 HCD, rewritten for ISP116x.
7 * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
10 * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
11 * Copyright (C) 2004 David Brownell
15 * The ISP1362 chip requires a large delay (300ns and 462ns) between
16 * accesses to the address and data register.
17 * The following timing options exist:
19 * 1. Configure your memory controller to add such delays if it can (the best)
20 * 2. Implement platform-specific delay function possibly
21 * combined with configuring the memory controller; see
22 * include/linux/usb_isp1362.h for more info.
23 * 3. Use ndelay (easiest, poorest).
25 * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
26 * platform specific section of isp1362.h to select the appropriate variant.
28 * Also note that according to the Philips "ISP1362 Errata" document
29 * Rev 1.00 from 27 May data corruption may occur when the #WR signal
30 * is reasserted (even with #CS deasserted) within 132ns after a
31 * write cycle to any controller register. If the hardware doesn't
32 * implement the recommended fix (gating the #WR with #CS) software
33 * must ensure that no further write cycle (not necessarily to the chip!)
34 * is issued by the CPU within this interval.
36 * For PXA25x this can be ensured by using VLIO with the maximum
37 * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
40 #ifdef CONFIG_USB_DEBUG
41 # define ISP1362_DEBUG
47 * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
48 * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
49 * requests are carried out in separate frames. This will delay any SETUP
50 * packets until the start of the next frame so that this situation is
51 * unlikely to occur (and makes usbtest happy running with a PXA255 target
54 #undef BUGGY_PXA2XX_UDC_USBTEST
61 /* This enables a memory test on the ISP1362 chip memory to make sure the
62 * chip access timing is correct.
64 #undef CHIP_BUFFER_TEST
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/kernel.h>
69 #include <linux/delay.h>
70 #include <linux/ioport.h>
71 #include <linux/sched.h>
72 #include <linux/slab.h>
73 #include <linux/smp_lock.h>
74 #include <linux/errno.h>
75 #include <linux/init.h>
76 #include <linux/list.h>
77 #include <linux/interrupt.h>
78 #include <linux/usb.h>
79 #include <linux/usb/isp1362.h>
80 #include <linux/platform_device.h>
83 #include <linux/bitops.h>
86 #include <asm/system.h>
87 #include <asm/byteorder.h>
88 #include <asm/unaligned.h>
92 module_param(dbg_level
, int, 0644);
94 module_param(dbg_level
, int, 0);
95 #define STUB_DEBUG_FILE
98 #include "../core/hcd.h"
99 #include "../core/usb.h"
103 #define DRIVER_VERSION "2005-04-04"
104 #define DRIVER_DESC "ISP1362 USB Host Controller Driver"
106 MODULE_DESCRIPTION(DRIVER_DESC
);
107 MODULE_LICENSE("GPL");
109 static const char hcd_name
[] = "isp1362-hcd";
111 static void isp1362_hc_stop(struct usb_hcd
*hcd
);
112 static int isp1362_hc_start(struct usb_hcd
*hcd
);
114 /*-------------------------------------------------------------------------*/
117 * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
118 * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
120 * We don't need a 'disable' counterpart, since interrupts will be disabled
121 * only by the interrupt handler.
123 static inline void isp1362_enable_int(struct isp1362_hcd
*isp1362_hcd
, u16 mask
)
125 if ((isp1362_hcd
->irqenb
| mask
) == isp1362_hcd
->irqenb
)
127 if (mask
& ~isp1362_hcd
->irqenb
)
128 isp1362_write_reg16(isp1362_hcd
, HCuPINT
, mask
& ~isp1362_hcd
->irqenb
);
129 isp1362_hcd
->irqenb
|= mask
;
130 if (isp1362_hcd
->irq_active
)
132 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, isp1362_hcd
->irqenb
);
135 /*-------------------------------------------------------------------------*/
137 static inline struct isp1362_ep_queue
*get_ptd_queue(struct isp1362_hcd
*isp1362_hcd
,
140 struct isp1362_ep_queue
*epq
= NULL
;
142 if (offset
< isp1362_hcd
->istl_queue
[1].buf_start
)
143 epq
= &isp1362_hcd
->istl_queue
[0];
144 else if (offset
< isp1362_hcd
->intl_queue
.buf_start
)
145 epq
= &isp1362_hcd
->istl_queue
[1];
146 else if (offset
< isp1362_hcd
->atl_queue
.buf_start
)
147 epq
= &isp1362_hcd
->intl_queue
;
148 else if (offset
< isp1362_hcd
->atl_queue
.buf_start
+
149 isp1362_hcd
->atl_queue
.buf_size
)
150 epq
= &isp1362_hcd
->atl_queue
;
153 DBG(1, "%s: PTD $%04x is on %s queue\n", __func__
, offset
, epq
->name
);
155 pr_warning("%s: invalid PTD $%04x\n", __func__
, offset
);
160 static inline int get_ptd_offset(struct isp1362_ep_queue
*epq
, u8 index
)
164 if (index
* epq
->blk_size
> epq
->buf_size
) {
165 pr_warning("%s: Bad %s index %d(%d)\n", __func__
, epq
->name
, index
,
166 epq
->buf_size
/ epq
->blk_size
);
169 offset
= epq
->buf_start
+ index
* epq
->blk_size
;
170 DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__
, epq
->name
, index
, offset
);
175 /*-------------------------------------------------------------------------*/
177 static inline u16
max_transfer_size(struct isp1362_ep_queue
*epq
, size_t size
,
180 u16 xfer_size
= min_t(size_t, MAX_XFER_SIZE
, size
);
182 xfer_size
= min_t(size_t, xfer_size
, epq
->buf_avail
* epq
->blk_size
- PTD_HEADER_SIZE
);
183 if (xfer_size
< size
&& xfer_size
% mps
)
184 xfer_size
-= xfer_size
% mps
;
189 static int claim_ptd_buffers(struct isp1362_ep_queue
*epq
,
190 struct isp1362_ep
*ep
, u16 len
)
192 int ptd_offset
= -EINVAL
;
194 int num_ptds
= ((len
+ PTD_HEADER_SIZE
- 1) / epq
->blk_size
) + 1;
198 BUG_ON(len
> epq
->buf_size
);
204 pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__
,
205 epq
->name
, len
, epq
->blk_size
, num_ptds
, epq
->buf_map
, epq
->skip_map
);
206 BUG_ON(ep
->num_ptds
!= 0);
208 for (index
= 0; index
<= epq
->buf_count
- num_ptds
; index
++) {
209 if (test_bit(index
, &epq
->buf_map
))
212 for (last
= index
+ 1; last
< index
+ num_ptds
; last
++) {
213 if (test_bit(last
, &epq
->buf_map
)) {
224 DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__
,
225 num_ptds
, found
, len
, (int)(epq
->blk_size
- PTD_HEADER_SIZE
));
226 ptd_offset
= get_ptd_offset(epq
, found
);
227 WARN_ON(ptd_offset
< 0);
228 ep
->ptd_offset
= ptd_offset
;
229 ep
->num_ptds
+= num_ptds
;
230 epq
->buf_avail
-= num_ptds
;
231 BUG_ON(epq
->buf_avail
> epq
->buf_count
);
232 ep
->ptd_index
= found
;
233 for (index
= found
; index
< last
; index
++)
234 __set_bit(index
, &epq
->buf_map
);
235 DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
236 __func__
, epq
->name
, ep
->ptd_index
, ep
->ptd_offset
,
237 epq
->buf_avail
, epq
->buf_count
, num_ptds
, epq
->buf_map
, epq
->skip_map
);
242 static inline void release_ptd_buffers(struct isp1362_ep_queue
*epq
, struct isp1362_ep
*ep
)
244 int index
= ep
->ptd_index
;
245 int last
= ep
->ptd_index
+ ep
->num_ptds
;
247 if (last
> epq
->buf_count
)
248 pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
249 __func__
, ep
, ep
->num_req
, ep
->length
, epq
->name
, ep
->ptd_index
,
250 ep
->ptd_offset
, ep
->num_ptds
, epq
->buf_count
, epq
->buf_avail
,
251 epq
->buf_map
, epq
->skip_map
);
252 BUG_ON(last
> epq
->buf_count
);
254 for (; index
< last
; index
++) {
255 __clear_bit(index
, &epq
->buf_map
);
256 __set_bit(index
, &epq
->skip_map
);
258 epq
->buf_avail
+= ep
->num_ptds
;
261 BUG_ON(epq
->buf_avail
> epq
->buf_count
);
262 BUG_ON(epq
->ptd_count
> epq
->buf_count
);
264 DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
266 ep
->ptd_offset
, ep
->num_ptds
, epq
->buf_avail
, epq
->buf_count
);
267 DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__
,
268 epq
->buf_map
, epq
->skip_map
);
271 ep
->ptd_offset
= -EINVAL
;
272 ep
->ptd_index
= -EINVAL
;
275 /*-------------------------------------------------------------------------*/
280 static void prepare_ptd(struct isp1362_hcd
*isp1362_hcd
, struct urb
*urb
,
281 struct isp1362_ep
*ep
, struct isp1362_ep_queue
*epq
,
288 size_t buf_len
= urb
->transfer_buffer_length
- urb
->actual_length
;
290 DBG(3, "%s: %s ep %p\n", __func__
, epq
->name
, ep
);
294 ep
->data
= (unsigned char *)urb
->transfer_buffer
+ urb
->actual_length
;
296 switch (ep
->nextpid
) {
298 toggle
= usb_gettoggle(urb
->dev
, ep
->epnum
, 0);
300 if (usb_pipecontrol(urb
->pipe
)) {
301 len
= min_t(size_t, ep
->maxpacket
, buf_len
);
302 } else if (usb_pipeisoc(urb
->pipe
)) {
303 len
= min_t(size_t, urb
->iso_frame_desc
[fno
].length
, MAX_XFER_SIZE
);
304 ep
->data
= urb
->transfer_buffer
+ urb
->iso_frame_desc
[fno
].offset
;
306 len
= max_transfer_size(epq
, buf_len
, ep
->maxpacket
);
307 DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__
, len
, ep
->maxpacket
,
311 toggle
= usb_gettoggle(urb
->dev
, ep
->epnum
, 1);
313 if (usb_pipecontrol(urb
->pipe
))
314 len
= min_t(size_t, ep
->maxpacket
, buf_len
);
315 else if (usb_pipeisoc(urb
->pipe
))
316 len
= min_t(size_t, urb
->iso_frame_desc
[0].length
, MAX_XFER_SIZE
);
318 len
= max_transfer_size(epq
, buf_len
, ep
->maxpacket
);
320 pr_info("%s: Sending ZERO packet: %d\n", __func__
,
321 urb
->transfer_flags
& URB_ZERO_PACKET
);
322 DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__
, len
, ep
->maxpacket
,
328 len
= sizeof(struct usb_ctrlrequest
);
329 DBG(1, "%s: SETUP len %d\n", __func__
, len
);
330 ep
->data
= urb
->setup_packet
;
335 dir
= (urb
->transfer_buffer_length
&& usb_pipein(urb
->pipe
)) ?
336 PTD_DIR_OUT
: PTD_DIR_IN
;
337 DBG(1, "%s: ACK len %d\n", __func__
, len
);
340 toggle
= dir
= len
= 0;
341 pr_err("%s@%d: ep->nextpid %02x\n", __func__
, __LINE__
, ep
->nextpid
);
349 ptd
->count
= PTD_CC_MSK
| PTD_ACTIVE_MSK
| PTD_TOGGLE(toggle
);
350 ptd
->mps
= PTD_MPS(ep
->maxpacket
) | PTD_SPD(urb
->dev
->speed
== USB_SPEED_LOW
) |
352 ptd
->len
= PTD_LEN(len
) | PTD_DIR(dir
);
353 ptd
->faddr
= PTD_FA(usb_pipedevice(urb
->pipe
));
355 if (usb_pipeint(urb
->pipe
)) {
356 ptd
->faddr
|= PTD_SF_INT(ep
->branch
);
357 ptd
->faddr
|= PTD_PR(ep
->interval
? __ffs(ep
->interval
) : 0);
359 if (usb_pipeisoc(urb
->pipe
))
360 ptd
->faddr
|= PTD_SF_ISO(fno
);
362 DBG(1, "%s: Finished\n", __func__
);
365 static void isp1362_write_ptd(struct isp1362_hcd
*isp1362_hcd
, struct isp1362_ep
*ep
,
366 struct isp1362_ep_queue
*epq
)
368 struct ptd
*ptd
= &ep
->ptd
;
369 int len
= PTD_GET_DIR(ptd
) == PTD_DIR_IN
? 0 : ep
->length
;
371 _BUG_ON(ep
->ptd_offset
< 0);
374 isp1362_write_buffer(isp1362_hcd
, ptd
, ep
->ptd_offset
, PTD_HEADER_SIZE
);
376 isp1362_write_buffer(isp1362_hcd
, ep
->data
,
377 ep
->ptd_offset
+ PTD_HEADER_SIZE
, len
);
380 dump_ptd_out_data(ptd
, ep
->data
);
383 static void isp1362_read_ptd(struct isp1362_hcd
*isp1362_hcd
, struct isp1362_ep
*ep
,
384 struct isp1362_ep_queue
*epq
)
386 struct ptd
*ptd
= &ep
->ptd
;
389 WARN_ON(list_empty(&ep
->active
));
390 BUG_ON(ep
->ptd_offset
< 0);
392 list_del_init(&ep
->active
);
393 DBG(1, "%s: ep %p removed from active list %p\n", __func__
, ep
, &epq
->active
);
396 isp1362_read_buffer(isp1362_hcd
, ptd
, ep
->ptd_offset
, PTD_HEADER_SIZE
);
398 act_len
= PTD_GET_COUNT(ptd
);
399 if (PTD_GET_DIR(ptd
) != PTD_DIR_IN
|| act_len
== 0)
401 if (act_len
> ep
->length
)
402 pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__
, ep
,
403 ep
->ptd_offset
, act_len
, ep
->length
);
404 BUG_ON(act_len
> ep
->length
);
405 /* Only transfer the amount of data that has actually been overwritten
406 * in the chip buffer. We don't want any data that doesn't belong to the
407 * transfer to leak out of the chip to the callers transfer buffer!
410 isp1362_read_buffer(isp1362_hcd
, ep
->data
,
411 ep
->ptd_offset
+ PTD_HEADER_SIZE
, act_len
);
412 dump_ptd_in_data(ptd
, ep
->data
);
416 * INT PTDs will stay in the chip until data is available.
417 * This function will remove a PTD from the chip when the URB is dequeued.
418 * Must be called with the spinlock held and IRQs disabled
420 static void remove_ptd(struct isp1362_hcd
*isp1362_hcd
, struct isp1362_ep
*ep
)
424 struct isp1362_ep_queue
*epq
;
426 DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__
, ep
, ep
->ptd_index
, ep
->ptd_offset
);
427 BUG_ON(ep
->ptd_offset
< 0);
429 epq
= get_ptd_queue(isp1362_hcd
, ep
->ptd_offset
);
432 /* put ep in remove_list for cleanup */
433 WARN_ON(!list_empty(&ep
->remove_list
));
434 list_add_tail(&ep
->remove_list
, &isp1362_hcd
->remove_list
);
435 /* let SOF interrupt handle the cleanup */
436 isp1362_enable_int(isp1362_hcd
, HCuPINT_SOF
);
438 index
= ep
->ptd_index
;
440 /* ISO queues don't have SKIP registers */
443 DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__
,
444 index
, ep
->ptd_offset
, epq
->skip_map
, 1 << index
);
446 /* prevent further processing of PTD (will be effective after next SOF) */
447 epq
->skip_map
|= 1 << index
;
448 if (epq
== &isp1362_hcd
->atl_queue
) {
449 DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__
,
450 isp1362_read_reg32(isp1362_hcd
, HCATLSKIP
), epq
->skip_map
);
451 isp1362_write_reg32(isp1362_hcd
, HCATLSKIP
, epq
->skip_map
);
452 if (~epq
->skip_map
== 0)
453 isp1362_clr_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_ATL_ACTIVE
);
454 } else if (epq
== &isp1362_hcd
->intl_queue
) {
455 DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__
,
456 isp1362_read_reg32(isp1362_hcd
, HCINTLSKIP
), epq
->skip_map
);
457 isp1362_write_reg32(isp1362_hcd
, HCINTLSKIP
, epq
->skip_map
);
458 if (~epq
->skip_map
== 0)
459 isp1362_clr_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_INTL_ACTIVE
);
464 Take done or failed requests out of schedule. Give back
467 static void finish_request(struct isp1362_hcd
*isp1362_hcd
, struct isp1362_ep
*ep
,
468 struct urb
*urb
, int status
)
469 __releases(isp1362_hcd
->lock
)
470 __acquires(isp1362_hcd
->lock
)
475 if (usb_pipecontrol(urb
->pipe
))
476 ep
->nextpid
= USB_PID_SETUP
;
478 URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__
,
479 ep
->num_req
, usb_pipedevice(urb
->pipe
),
480 usb_pipeendpoint(urb
->pipe
),
481 !usb_pipein(urb
->pipe
) ? "out" : "in",
482 usb_pipecontrol(urb
->pipe
) ? "ctrl" :
483 usb_pipeint(urb
->pipe
) ? "int" :
484 usb_pipebulk(urb
->pipe
) ? "bulk" :
486 urb
->actual_length
, urb
->transfer_buffer_length
,
487 !(urb
->transfer_flags
& URB_SHORT_NOT_OK
) ?
488 "short_ok" : "", urb
->status
);
491 usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd
), urb
);
492 spin_unlock(&isp1362_hcd
->lock
);
493 usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd
), urb
, status
);
494 spin_lock(&isp1362_hcd
->lock
);
496 /* take idle endpoints out of the schedule right away */
497 if (!list_empty(&ep
->hep
->urb_list
))
500 /* async deschedule */
501 if (!list_empty(&ep
->schedule
)) {
502 list_del_init(&ep
->schedule
);
508 /* periodic deschedule */
509 DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep
->interval
,
510 ep
, ep
->branch
, ep
->load
,
511 isp1362_hcd
->load
[ep
->branch
],
512 isp1362_hcd
->load
[ep
->branch
] - ep
->load
);
513 isp1362_hcd
->load
[ep
->branch
] -= ep
->load
;
514 ep
->branch
= PERIODIC_SIZE
;
519 * Analyze transfer results, handle partial transfers and errors
521 static void postproc_ep(struct isp1362_hcd
*isp1362_hcd
, struct isp1362_ep
*ep
)
523 struct urb
*urb
= get_urb(ep
);
524 struct usb_device
*udev
;
528 int urbstat
= -EINPROGRESS
;
531 DBG(2, "%s: ep %p req %d\n", __func__
, ep
, ep
->num_req
);
535 cc
= PTD_GET_CC(ptd
);
536 if (cc
== PTD_NOTACCESSED
) {
537 pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__
,
542 short_ok
= !(urb
->transfer_flags
& URB_SHORT_NOT_OK
);
543 len
= urb
->transfer_buffer_length
- urb
->actual_length
;
545 /* Data underrun is special. For allowed underrun
546 we clear the error and continue as normal. For
547 forbidden underrun we finish the DATA stage
548 immediately while for control transfer,
549 we do a STATUS stage.
551 if (cc
== PTD_DATAUNDERRUN
) {
553 DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
554 __func__
, ep
->num_req
, short_ok
? "" : "not_",
555 PTD_GET_COUNT(ptd
), ep
->maxpacket
, len
);
559 DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
560 __func__
, ep
->num_req
,
561 usb_pipein(urb
->pipe
) ? "IN" : "OUT", ep
->nextpid
,
562 short_ok
? "" : "not_",
563 PTD_GET_COUNT(ptd
), ep
->maxpacket
, len
);
564 if (usb_pipecontrol(urb
->pipe
)) {
565 ep
->nextpid
= USB_PID_ACK
;
566 /* save the data underrun error code for later and
567 * procede with the status stage
569 urb
->actual_length
+= PTD_GET_COUNT(ptd
);
570 BUG_ON(urb
->actual_length
> urb
->transfer_buffer_length
);
572 if (urb
->status
== -EINPROGRESS
)
573 urb
->status
= cc_to_error
[PTD_DATAUNDERRUN
];
575 usb_settoggle(udev
, ep
->epnum
, ep
->nextpid
== USB_PID_OUT
,
576 PTD_GET_TOGGLE(ptd
));
577 urbstat
= cc_to_error
[PTD_DATAUNDERRUN
];
583 if (cc
!= PTD_CC_NOERROR
) {
584 if (++ep
->error_count
>= 3 || cc
== PTD_CC_STALL
|| cc
== PTD_DATAOVERRUN
) {
585 urbstat
= cc_to_error
[cc
];
586 DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
587 __func__
, ep
->num_req
, ep
->nextpid
, urbstat
, cc
,
593 switch (ep
->nextpid
) {
595 if (PTD_GET_COUNT(ptd
) != ep
->length
)
596 pr_err("%s: count=%d len=%d\n", __func__
,
597 PTD_GET_COUNT(ptd
), ep
->length
);
598 BUG_ON(PTD_GET_COUNT(ptd
) != ep
->length
);
599 urb
->actual_length
+= ep
->length
;
600 BUG_ON(urb
->actual_length
> urb
->transfer_buffer_length
);
601 usb_settoggle(udev
, ep
->epnum
, 1, PTD_GET_TOGGLE(ptd
));
602 if (urb
->actual_length
== urb
->transfer_buffer_length
) {
603 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__
,
604 ep
->num_req
, len
, ep
->maxpacket
, urbstat
);
605 if (usb_pipecontrol(urb
->pipe
)) {
606 DBG(3, "%s: req %d %s Wait for ACK\n", __func__
,
608 usb_pipein(urb
->pipe
) ? "IN" : "OUT");
609 ep
->nextpid
= USB_PID_ACK
;
611 if (len
% ep
->maxpacket
||
612 !(urb
->transfer_flags
& URB_ZERO_PACKET
)) {
614 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
615 __func__
, ep
->num_req
, usb_pipein(urb
->pipe
) ? "IN" : "OUT",
616 urbstat
, len
, ep
->maxpacket
, urb
->actual_length
);
622 len
= PTD_GET_COUNT(ptd
);
623 BUG_ON(len
> ep
->length
);
624 urb
->actual_length
+= len
;
625 BUG_ON(urb
->actual_length
> urb
->transfer_buffer_length
);
626 usb_settoggle(udev
, ep
->epnum
, 0, PTD_GET_TOGGLE(ptd
));
627 /* if transfer completed or (allowed) data underrun */
628 if ((urb
->transfer_buffer_length
== urb
->actual_length
) ||
629 len
% ep
->maxpacket
) {
630 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__
,
631 ep
->num_req
, len
, ep
->maxpacket
, urbstat
);
632 if (usb_pipecontrol(urb
->pipe
)) {
633 DBG(3, "%s: req %d %s Wait for ACK\n", __func__
,
635 usb_pipein(urb
->pipe
) ? "IN" : "OUT");
636 ep
->nextpid
= USB_PID_ACK
;
639 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
640 __func__
, ep
->num_req
, usb_pipein(urb
->pipe
) ? "IN" : "OUT",
641 urbstat
, len
, ep
->maxpacket
, urb
->actual_length
);
646 if (urb
->transfer_buffer_length
== urb
->actual_length
) {
647 ep
->nextpid
= USB_PID_ACK
;
648 } else if (usb_pipeout(urb
->pipe
)) {
649 usb_settoggle(udev
, 0, 1, 1);
650 ep
->nextpid
= USB_PID_OUT
;
652 usb_settoggle(udev
, 0, 0, 1);
653 ep
->nextpid
= USB_PID_IN
;
657 DBG(3, "%s: req %d got ACK %d -> 0\n", __func__
, ep
->num_req
,
659 WARN_ON(urbstat
!= -EINPROGRESS
);
668 if (urbstat
!= -EINPROGRESS
) {
669 DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__
,
670 ep
, ep
->num_req
, urb
, urbstat
);
671 finish_request(isp1362_hcd
, ep
, urb
, urbstat
);
675 static void finish_unlinks(struct isp1362_hcd
*isp1362_hcd
)
677 struct isp1362_ep
*ep
;
678 struct isp1362_ep
*tmp
;
680 list_for_each_entry_safe(ep
, tmp
, &isp1362_hcd
->remove_list
, remove_list
) {
681 struct isp1362_ep_queue
*epq
=
682 get_ptd_queue(isp1362_hcd
, ep
->ptd_offset
);
683 int index
= ep
->ptd_index
;
687 DBG(1, "%s: remove PTD[%d] $%04x\n", __func__
, index
, ep
->ptd_offset
);
688 BUG_ON(ep
->num_ptds
== 0);
689 release_ptd_buffers(epq
, ep
);
691 if (!list_empty(&ep
->hep
->urb_list
)) {
692 struct urb
*urb
= get_urb(ep
);
694 DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__
,
696 finish_request(isp1362_hcd
, ep
, urb
, -ESHUTDOWN
);
698 WARN_ON(list_empty(&ep
->active
));
699 if (!list_empty(&ep
->active
)) {
700 list_del_init(&ep
->active
);
701 DBG(1, "%s: ep %p removed from active list\n", __func__
, ep
);
703 list_del_init(&ep
->remove_list
);
704 DBG(1, "%s: ep %p removed from remove_list\n", __func__
, ep
);
706 DBG(1, "%s: Done\n", __func__
);
709 static inline void enable_atl_transfers(struct isp1362_hcd
*isp1362_hcd
, int count
)
712 if (count
< isp1362_hcd
->atl_queue
.ptd_count
)
713 isp1362_write_reg16(isp1362_hcd
, HCATLDTC
, count
);
714 isp1362_enable_int(isp1362_hcd
, HCuPINT_ATL
);
715 isp1362_write_reg32(isp1362_hcd
, HCATLSKIP
, isp1362_hcd
->atl_queue
.skip_map
);
716 isp1362_set_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_ATL_ACTIVE
);
718 isp1362_enable_int(isp1362_hcd
, HCuPINT_SOF
);
721 static inline void enable_intl_transfers(struct isp1362_hcd
*isp1362_hcd
)
723 isp1362_enable_int(isp1362_hcd
, HCuPINT_INTL
);
724 isp1362_set_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_INTL_ACTIVE
);
725 isp1362_write_reg32(isp1362_hcd
, HCINTLSKIP
, isp1362_hcd
->intl_queue
.skip_map
);
728 static inline void enable_istl_transfers(struct isp1362_hcd
*isp1362_hcd
, int flip
)
730 isp1362_enable_int(isp1362_hcd
, flip
? HCuPINT_ISTL1
: HCuPINT_ISTL0
);
731 isp1362_set_mask16(isp1362_hcd
, HCBUFSTAT
, flip
?
732 HCBUFSTAT_ISTL1_FULL
: HCBUFSTAT_ISTL0_FULL
);
735 static int submit_req(struct isp1362_hcd
*isp1362_hcd
, struct urb
*urb
,
736 struct isp1362_ep
*ep
, struct isp1362_ep_queue
*epq
)
738 int index
= epq
->free_ptd
;
740 prepare_ptd(isp1362_hcd
, urb
, ep
, epq
, 0);
741 index
= claim_ptd_buffers(epq
, ep
, ep
->length
);
742 if (index
== -ENOMEM
) {
743 DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__
,
744 ep
->num_req
, epq
->name
, ep
->num_ptds
, epq
->buf_map
, epq
->skip_map
);
746 } else if (index
== -EOVERFLOW
) {
747 DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
748 __func__
, ep
->num_req
, ep
->length
, epq
->name
, ep
->num_ptds
,
749 epq
->buf_map
, epq
->skip_map
);
753 list_add_tail(&ep
->active
, &epq
->active
);
754 DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__
,
755 ep
, ep
->num_req
, ep
->length
, &epq
->active
);
756 DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__
, epq
->name
,
757 ep
->ptd_offset
, ep
, ep
->num_req
);
758 isp1362_write_ptd(isp1362_hcd
, ep
, epq
);
759 __clear_bit(ep
->ptd_index
, &epq
->skip_map
);
764 static void start_atl_transfers(struct isp1362_hcd
*isp1362_hcd
)
767 struct isp1362_ep_queue
*epq
= &isp1362_hcd
->atl_queue
;
768 struct isp1362_ep
*ep
;
771 if (atomic_read(&epq
->finishing
)) {
772 DBG(1, "%s: finish_transfers is active for %s\n", __func__
, epq
->name
);
776 list_for_each_entry(ep
, &isp1362_hcd
->async
, schedule
) {
777 struct urb
*urb
= get_urb(ep
);
780 if (!list_empty(&ep
->active
)) {
781 DBG(2, "%s: Skipping active %s ep %p\n", __func__
, epq
->name
, ep
);
785 DBG(1, "%s: Processing %s ep %p req %d\n", __func__
, epq
->name
,
788 ret
= submit_req(isp1362_hcd
, urb
, ep
, epq
);
789 if (ret
== -ENOMEM
) {
792 } else if (ret
== -EOVERFLOW
) {
796 #ifdef BUGGY_PXA2XX_UDC_USBTEST
797 defer
= ep
->nextpid
== USB_PID_SETUP
;
802 /* Avoid starving of endpoints */
803 if (isp1362_hcd
->async
.next
!= isp1362_hcd
->async
.prev
) {
804 DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__
, ptd_count
);
805 list_move(&isp1362_hcd
->async
, isp1362_hcd
->async
.next
);
807 if (ptd_count
|| defer
)
808 enable_atl_transfers(isp1362_hcd
, defer
? 0 : ptd_count
);
810 epq
->ptd_count
+= ptd_count
;
811 if (epq
->ptd_count
> epq
->stat_maxptds
) {
812 epq
->stat_maxptds
= epq
->ptd_count
;
813 DBG(0, "%s: max_ptds: %d\n", __func__
, epq
->stat_maxptds
);
817 static void start_intl_transfers(struct isp1362_hcd
*isp1362_hcd
)
820 struct isp1362_ep_queue
*epq
= &isp1362_hcd
->intl_queue
;
821 struct isp1362_ep
*ep
;
823 if (atomic_read(&epq
->finishing
)) {
824 DBG(1, "%s: finish_transfers is active for %s\n", __func__
, epq
->name
);
828 list_for_each_entry(ep
, &isp1362_hcd
->periodic
, schedule
) {
829 struct urb
*urb
= get_urb(ep
);
832 if (!list_empty(&ep
->active
)) {
833 DBG(1, "%s: Skipping active %s ep %p\n", __func__
,
838 DBG(1, "%s: Processing %s ep %p req %d\n", __func__
,
839 epq
->name
, ep
, ep
->num_req
);
840 ret
= submit_req(isp1362_hcd
, urb
, ep
, epq
);
843 else if (ret
== -EOVERFLOW
)
849 static int last_count
;
851 if (ptd_count
!= last_count
) {
852 DBG(0, "%s: ptd_count: %d\n", __func__
, ptd_count
);
853 last_count
= ptd_count
;
855 enable_intl_transfers(isp1362_hcd
);
858 epq
->ptd_count
+= ptd_count
;
859 if (epq
->ptd_count
> epq
->stat_maxptds
)
860 epq
->stat_maxptds
= epq
->ptd_count
;
863 static inline int next_ptd(struct isp1362_ep_queue
*epq
, struct isp1362_ep
*ep
)
865 u16 ptd_offset
= ep
->ptd_offset
;
866 int num_ptds
= (ep
->length
+ PTD_HEADER_SIZE
+ (epq
->blk_size
- 1)) / epq
->blk_size
;
868 DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__
, ptd_offset
,
869 ep
->length
, num_ptds
, epq
->blk_size
, ptd_offset
+ num_ptds
* epq
->blk_size
);
871 ptd_offset
+= num_ptds
* epq
->blk_size
;
872 if (ptd_offset
< epq
->buf_start
+ epq
->buf_size
)
878 static void start_iso_transfers(struct isp1362_hcd
*isp1362_hcd
)
881 int flip
= isp1362_hcd
->istl_flip
;
882 struct isp1362_ep_queue
*epq
;
884 struct isp1362_ep
*ep
;
885 struct isp1362_ep
*tmp
;
886 u16 fno
= isp1362_read_reg32(isp1362_hcd
, HCFMNUM
);
889 epq
= &isp1362_hcd
->istl_queue
[flip
];
890 if (atomic_read(&epq
->finishing
)) {
891 DBG(1, "%s: finish_transfers is active for %s\n", __func__
, epq
->name
);
895 if (!list_empty(&epq
->active
))
898 ptd_offset
= epq
->buf_start
;
899 list_for_each_entry_safe(ep
, tmp
, &isp1362_hcd
->isoc
, schedule
) {
900 struct urb
*urb
= get_urb(ep
);
901 s16 diff
= fno
- (u16
)urb
->start_frame
;
903 DBG(1, "%s: Processing %s ep %p\n", __func__
, epq
->name
, ep
);
905 if (diff
> urb
->number_of_packets
) {
906 /* time frame for this URB has elapsed */
907 finish_request(isp1362_hcd
, ep
, urb
, -EOVERFLOW
);
909 } else if (diff
< -1) {
910 /* URB is not due in this frame or the next one.
911 * Comparing with '-1' instead of '0' accounts for double
912 * buffering in the ISP1362 which enables us to queue the PTD
913 * one frame ahead of time
915 } else if (diff
== -1) {
916 /* submit PTD's that are due in the next frame */
917 prepare_ptd(isp1362_hcd
, urb
, ep
, epq
, fno
);
918 if (ptd_offset
+ PTD_HEADER_SIZE
+ ep
->length
>
919 epq
->buf_start
+ epq
->buf_size
) {
920 pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
921 __func__
, ep
->length
);
924 ep
->ptd_offset
= ptd_offset
;
925 list_add_tail(&ep
->active
, &epq
->active
);
927 ptd_offset
= next_ptd(epq
, ep
);
928 if (ptd_offset
< 0) {
929 pr_warning("%s: req %d No more %s PTD buffers available\n", __func__
,
930 ep
->num_req
, epq
->name
);
935 list_for_each_entry(ep
, &epq
->active
, active
) {
936 if (epq
->active
.next
== &ep
->active
)
937 ep
->ptd
.mps
|= PTD_LAST_MSK
;
938 isp1362_write_ptd(isp1362_hcd
, ep
, epq
);
943 enable_istl_transfers(isp1362_hcd
, flip
);
945 epq
->ptd_count
+= ptd_count
;
946 if (epq
->ptd_count
> epq
->stat_maxptds
)
947 epq
->stat_maxptds
= epq
->ptd_count
;
949 /* check, whether the second ISTL buffer may also be filled */
950 if (!(isp1362_read_reg16(isp1362_hcd
, HCBUFSTAT
) &
951 (flip
? HCBUFSTAT_ISTL0_FULL
: HCBUFSTAT_ISTL1_FULL
))) {
959 static void finish_transfers(struct isp1362_hcd
*isp1362_hcd
, unsigned long done_map
,
960 struct isp1362_ep_queue
*epq
)
962 struct isp1362_ep
*ep
;
963 struct isp1362_ep
*tmp
;
965 if (list_empty(&epq
->active
)) {
966 DBG(1, "%s: Nothing to do for %s queue\n", __func__
, epq
->name
);
970 DBG(1, "%s: Finishing %s transfers %08lx\n", __func__
, epq
->name
, done_map
);
972 atomic_inc(&epq
->finishing
);
973 list_for_each_entry_safe(ep
, tmp
, &epq
->active
, active
) {
974 int index
= ep
->ptd_index
;
976 DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__
, epq
->name
,
977 index
, ep
->ptd_offset
);
980 if (__test_and_clear_bit(index
, &done_map
)) {
981 isp1362_read_ptd(isp1362_hcd
, ep
, epq
);
982 epq
->free_ptd
= index
;
983 BUG_ON(ep
->num_ptds
== 0);
984 release_ptd_buffers(epq
, ep
);
986 DBG(1, "%s: ep %p req %d removed from active list\n", __func__
,
988 if (!list_empty(&ep
->remove_list
)) {
989 list_del_init(&ep
->remove_list
);
990 DBG(1, "%s: ep %p removed from remove list\n", __func__
, ep
);
992 DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__
, epq
->name
,
994 postproc_ep(isp1362_hcd
, ep
);
1000 pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__
, done_map
,
1002 atomic_dec(&epq
->finishing
);
1005 static void finish_iso_transfers(struct isp1362_hcd
*isp1362_hcd
, struct isp1362_ep_queue
*epq
)
1007 struct isp1362_ep
*ep
;
1008 struct isp1362_ep
*tmp
;
1010 if (list_empty(&epq
->active
)) {
1011 DBG(1, "%s: Nothing to do for %s queue\n", __func__
, epq
->name
);
1015 DBG(1, "%s: Finishing %s transfers\n", __func__
, epq
->name
);
1017 atomic_inc(&epq
->finishing
);
1018 list_for_each_entry_safe(ep
, tmp
, &epq
->active
, active
) {
1019 DBG(1, "%s: Checking PTD $%04x\n", __func__
, ep
->ptd_offset
);
1021 isp1362_read_ptd(isp1362_hcd
, ep
, epq
);
1022 DBG(1, "%s: Postprocessing %s ep %p\n", __func__
, epq
->name
, ep
);
1023 postproc_ep(isp1362_hcd
, ep
);
1025 WARN_ON(epq
->blk_size
!= 0);
1026 atomic_dec(&epq
->finishing
);
1029 static irqreturn_t
isp1362_irq(struct usb_hcd
*hcd
)
1032 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1036 spin_lock(&isp1362_hcd
->lock
);
1038 BUG_ON(isp1362_hcd
->irq_active
++);
1040 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, 0);
1042 irqstat
= isp1362_read_reg16(isp1362_hcd
, HCuPINT
);
1043 DBG(3, "%s: got IRQ %04x:%04x\n", __func__
, irqstat
, isp1362_hcd
->irqenb
);
1045 /* only handle interrupts that are currently enabled */
1046 irqstat
&= isp1362_hcd
->irqenb
;
1047 isp1362_write_reg16(isp1362_hcd
, HCuPINT
, irqstat
);
1050 if (irqstat
& HCuPINT_SOF
) {
1051 isp1362_hcd
->irqenb
&= ~HCuPINT_SOF
;
1052 isp1362_hcd
->irq_stat
[ISP1362_INT_SOF
]++;
1054 svc_mask
&= ~HCuPINT_SOF
;
1055 DBG(3, "%s: SOF\n", __func__
);
1056 isp1362_hcd
->fmindex
= isp1362_read_reg32(isp1362_hcd
, HCFMNUM
);
1057 if (!list_empty(&isp1362_hcd
->remove_list
))
1058 finish_unlinks(isp1362_hcd
);
1059 if (!list_empty(&isp1362_hcd
->async
) && !(irqstat
& HCuPINT_ATL
)) {
1060 if (list_empty(&isp1362_hcd
->atl_queue
.active
)) {
1061 start_atl_transfers(isp1362_hcd
);
1063 isp1362_enable_int(isp1362_hcd
, HCuPINT_ATL
);
1064 isp1362_write_reg32(isp1362_hcd
, HCATLSKIP
,
1065 isp1362_hcd
->atl_queue
.skip_map
);
1066 isp1362_set_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_ATL_ACTIVE
);
1071 if (irqstat
& HCuPINT_ISTL0
) {
1072 isp1362_hcd
->irq_stat
[ISP1362_INT_ISTL0
]++;
1074 svc_mask
&= ~HCuPINT_ISTL0
;
1075 isp1362_clr_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_ISTL0_FULL
);
1076 DBG(1, "%s: ISTL0\n", __func__
);
1077 WARN_ON((int)!!isp1362_hcd
->istl_flip
);
1078 WARN_ON(isp1362_read_reg16(isp1362_hcd
, HCBUFSTAT
) &
1079 HCBUFSTAT_ISTL0_ACTIVE
);
1080 WARN_ON(!(isp1362_read_reg16(isp1362_hcd
, HCBUFSTAT
) &
1081 HCBUFSTAT_ISTL0_DONE
));
1082 isp1362_hcd
->irqenb
&= ~HCuPINT_ISTL0
;
1085 if (irqstat
& HCuPINT_ISTL1
) {
1086 isp1362_hcd
->irq_stat
[ISP1362_INT_ISTL1
]++;
1088 svc_mask
&= ~HCuPINT_ISTL1
;
1089 isp1362_clr_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_ISTL1_FULL
);
1090 DBG(1, "%s: ISTL1\n", __func__
);
1091 WARN_ON(!(int)isp1362_hcd
->istl_flip
);
1092 WARN_ON(isp1362_read_reg16(isp1362_hcd
, HCBUFSTAT
) &
1093 HCBUFSTAT_ISTL1_ACTIVE
);
1094 WARN_ON(!(isp1362_read_reg16(isp1362_hcd
, HCBUFSTAT
) &
1095 HCBUFSTAT_ISTL1_DONE
));
1096 isp1362_hcd
->irqenb
&= ~HCuPINT_ISTL1
;
1099 if (irqstat
& (HCuPINT_ISTL0
| HCuPINT_ISTL1
)) {
1100 WARN_ON((irqstat
& (HCuPINT_ISTL0
| HCuPINT_ISTL1
)) ==
1101 (HCuPINT_ISTL0
| HCuPINT_ISTL1
));
1102 finish_iso_transfers(isp1362_hcd
,
1103 &isp1362_hcd
->istl_queue
[isp1362_hcd
->istl_flip
]);
1104 start_iso_transfers(isp1362_hcd
);
1105 isp1362_hcd
->istl_flip
= 1 - isp1362_hcd
->istl_flip
;
1108 if (irqstat
& HCuPINT_INTL
) {
1109 u32 done_map
= isp1362_read_reg32(isp1362_hcd
, HCINTLDONE
);
1110 u32 skip_map
= isp1362_read_reg32(isp1362_hcd
, HCINTLSKIP
);
1111 isp1362_hcd
->irq_stat
[ISP1362_INT_INTL
]++;
1113 DBG(2, "%s: INTL\n", __func__
);
1115 svc_mask
&= ~HCuPINT_INTL
;
1117 isp1362_write_reg32(isp1362_hcd
, HCINTLSKIP
, skip_map
| done_map
);
1118 if (~(done_map
| skip_map
) == 0)
1119 /* All PTDs are finished, disable INTL processing entirely */
1120 isp1362_clr_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_INTL_ACTIVE
);
1125 DBG(3, "%s: INTL done_map %08x\n", __func__
, done_map
);
1126 finish_transfers(isp1362_hcd
, done_map
, &isp1362_hcd
->intl_queue
);
1127 start_intl_transfers(isp1362_hcd
);
1131 if (irqstat
& HCuPINT_ATL
) {
1132 u32 done_map
= isp1362_read_reg32(isp1362_hcd
, HCATLDONE
);
1133 u32 skip_map
= isp1362_read_reg32(isp1362_hcd
, HCATLSKIP
);
1134 isp1362_hcd
->irq_stat
[ISP1362_INT_ATL
]++;
1136 DBG(2, "%s: ATL\n", __func__
);
1138 svc_mask
&= ~HCuPINT_ATL
;
1140 isp1362_write_reg32(isp1362_hcd
, HCATLSKIP
, skip_map
| done_map
);
1141 if (~(done_map
| skip_map
) == 0)
1142 isp1362_clr_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_ATL_ACTIVE
);
1144 DBG(3, "%s: ATL done_map %08x\n", __func__
, done_map
);
1145 finish_transfers(isp1362_hcd
, done_map
, &isp1362_hcd
->atl_queue
);
1146 start_atl_transfers(isp1362_hcd
);
1151 if (irqstat
& HCuPINT_OPR
) {
1152 u32 intstat
= isp1362_read_reg32(isp1362_hcd
, HCINTSTAT
);
1153 isp1362_hcd
->irq_stat
[ISP1362_INT_OPR
]++;
1155 svc_mask
&= ~HCuPINT_OPR
;
1156 DBG(2, "%s: OPR %08x:%08x\n", __func__
, intstat
, isp1362_hcd
->intenb
);
1157 intstat
&= isp1362_hcd
->intenb
;
1158 if (intstat
& OHCI_INTR_UE
) {
1159 pr_err("Unrecoverable error\n");
1160 /* FIXME: do here reset or cleanup or whatever */
1162 if (intstat
& OHCI_INTR_RHSC
) {
1163 isp1362_hcd
->rhstatus
= isp1362_read_reg32(isp1362_hcd
, HCRHSTATUS
);
1164 isp1362_hcd
->rhport
[0] = isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
);
1165 isp1362_hcd
->rhport
[1] = isp1362_read_reg32(isp1362_hcd
, HCRHPORT2
);
1167 if (intstat
& OHCI_INTR_RD
) {
1168 pr_info("%s: RESUME DETECTED\n", __func__
);
1169 isp1362_show_reg(isp1362_hcd
, HCCONTROL
);
1170 usb_hcd_resume_root_hub(hcd
);
1172 isp1362_write_reg32(isp1362_hcd
, HCINTSTAT
, intstat
);
1173 irqstat
&= ~HCuPINT_OPR
;
1177 if (irqstat
& HCuPINT_SUSP
) {
1178 isp1362_hcd
->irq_stat
[ISP1362_INT_SUSP
]++;
1180 svc_mask
&= ~HCuPINT_SUSP
;
1182 pr_info("%s: SUSPEND IRQ\n", __func__
);
1185 if (irqstat
& HCuPINT_CLKRDY
) {
1186 isp1362_hcd
->irq_stat
[ISP1362_INT_CLKRDY
]++;
1188 isp1362_hcd
->irqenb
&= ~HCuPINT_CLKRDY
;
1189 svc_mask
&= ~HCuPINT_CLKRDY
;
1190 pr_info("%s: CLKRDY IRQ\n", __func__
);
1194 pr_err("%s: Unserviced interrupt(s) %04x\n", __func__
, svc_mask
);
1196 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, isp1362_hcd
->irqenb
);
1197 isp1362_hcd
->irq_active
--;
1198 spin_unlock(&isp1362_hcd
->lock
);
1200 return IRQ_RETVAL(handled
);
1203 /*-------------------------------------------------------------------------*/
1205 #define MAX_PERIODIC_LOAD 900 /* out of 1000 usec */
1206 static int balance(struct isp1362_hcd
*isp1362_hcd
, u16 interval
, u16 load
)
1208 int i
, branch
= -ENOSPC
;
1210 /* search for the least loaded schedule branch of that interval
1211 * which has enough bandwidth left unreserved.
1213 for (i
= 0; i
< interval
; i
++) {
1214 if (branch
< 0 || isp1362_hcd
->load
[branch
] > isp1362_hcd
->load
[i
]) {
1217 for (j
= i
; j
< PERIODIC_SIZE
; j
+= interval
) {
1218 if ((isp1362_hcd
->load
[j
] + load
) > MAX_PERIODIC_LOAD
) {
1219 pr_err("%s: new load %d load[%02x] %d max %d\n", __func__
,
1220 load
, j
, isp1362_hcd
->load
[j
], MAX_PERIODIC_LOAD
);
1224 if (j
< PERIODIC_SIZE
)
1232 /* NB! ALL the code above this point runs with isp1362_hcd->lock
1236 /*-------------------------------------------------------------------------*/
1238 static int isp1362_urb_enqueue(struct usb_hcd
*hcd
,
1242 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1243 struct usb_device
*udev
= urb
->dev
;
1244 unsigned int pipe
= urb
->pipe
;
1245 int is_out
= !usb_pipein(pipe
);
1246 int type
= usb_pipetype(pipe
);
1247 int epnum
= usb_pipeendpoint(pipe
);
1248 struct usb_host_endpoint
*hep
= urb
->ep
;
1249 struct isp1362_ep
*ep
= NULL
;
1250 unsigned long flags
;
1253 DBG(3, "%s: urb %p\n", __func__
, urb
);
1255 if (type
== PIPE_ISOCHRONOUS
) {
1256 pr_err("Isochronous transfers not supported\n");
1260 URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__
,
1261 usb_pipedevice(pipe
), epnum
,
1262 is_out
? "out" : "in",
1263 usb_pipecontrol(pipe
) ? "ctrl" :
1264 usb_pipeint(pipe
) ? "int" :
1265 usb_pipebulk(pipe
) ? "bulk" :
1267 urb
->transfer_buffer_length
,
1268 (urb
->transfer_flags
& URB_ZERO_PACKET
) ? "ZERO_PACKET " : "",
1269 !(urb
->transfer_flags
& URB_SHORT_NOT_OK
) ?
1272 /* avoid all allocations within spinlocks: request or endpoint */
1274 ep
= kcalloc(1, sizeof *ep
, mem_flags
);
1278 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1280 /* don't submit to a dead or disabled port */
1281 if (!((isp1362_hcd
->rhport
[0] | isp1362_hcd
->rhport
[1]) &
1282 (1 << USB_PORT_FEAT_ENABLE
)) ||
1283 !HC_IS_RUNNING(hcd
->state
)) {
1286 goto fail_not_linked
;
1289 retval
= usb_hcd_link_urb_to_ep(hcd
, urb
);
1292 goto fail_not_linked
;
1298 INIT_LIST_HEAD(&ep
->schedule
);
1299 INIT_LIST_HEAD(&ep
->active
);
1300 INIT_LIST_HEAD(&ep
->remove_list
);
1301 ep
->udev
= usb_get_dev(udev
);
1304 ep
->maxpacket
= usb_maxpacket(udev
, urb
->pipe
, is_out
);
1305 ep
->ptd_offset
= -EINVAL
;
1306 ep
->ptd_index
= -EINVAL
;
1307 usb_settoggle(udev
, epnum
, is_out
, 0);
1309 if (type
== PIPE_CONTROL
)
1310 ep
->nextpid
= USB_PID_SETUP
;
1312 ep
->nextpid
= USB_PID_OUT
;
1314 ep
->nextpid
= USB_PID_IN
;
1317 case PIPE_ISOCHRONOUS
:
1318 case PIPE_INTERRUPT
:
1319 if (urb
->interval
> PERIODIC_SIZE
)
1320 urb
->interval
= PERIODIC_SIZE
;
1321 ep
->interval
= urb
->interval
;
1322 ep
->branch
= PERIODIC_SIZE
;
1323 ep
->load
= usb_calc_bus_time(udev
->speed
, !is_out
,
1324 (type
== PIPE_ISOCHRONOUS
),
1325 usb_maxpacket(udev
, pipe
, is_out
)) / 1000;
1330 ep
->num_req
= isp1362_hcd
->req_serial
++;
1332 /* maybe put endpoint into schedule */
1336 if (list_empty(&ep
->schedule
)) {
1337 DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1338 __func__
, ep
, ep
->num_req
);
1339 list_add_tail(&ep
->schedule
, &isp1362_hcd
->async
);
1342 case PIPE_ISOCHRONOUS
:
1343 case PIPE_INTERRUPT
:
1344 urb
->interval
= ep
->interval
;
1346 /* urb submitted for already existing EP */
1347 if (ep
->branch
< PERIODIC_SIZE
)
1350 retval
= balance(isp1362_hcd
, ep
->interval
, ep
->load
);
1352 pr_err("%s: balance returned %d\n", __func__
, retval
);
1355 ep
->branch
= retval
;
1357 isp1362_hcd
->fmindex
= isp1362_read_reg32(isp1362_hcd
, HCFMNUM
);
1358 DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1359 __func__
, isp1362_hcd
->fmindex
, ep
->branch
,
1360 ((isp1362_hcd
->fmindex
+ PERIODIC_SIZE
- 1) &
1361 ~(PERIODIC_SIZE
- 1)) + ep
->branch
,
1362 (isp1362_hcd
->fmindex
& (PERIODIC_SIZE
- 1)) + ep
->branch
);
1364 if (list_empty(&ep
->schedule
)) {
1365 if (type
== PIPE_ISOCHRONOUS
) {
1366 u16 frame
= isp1362_hcd
->fmindex
;
1368 frame
+= max_t(u16
, 8, ep
->interval
);
1369 frame
&= ~(ep
->interval
- 1);
1370 frame
|= ep
->branch
;
1371 if (frame_before(frame
, isp1362_hcd
->fmindex
))
1372 frame
+= ep
->interval
;
1373 urb
->start_frame
= frame
;
1375 DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__
, ep
);
1376 list_add_tail(&ep
->schedule
, &isp1362_hcd
->isoc
);
1378 DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__
, ep
);
1379 list_add_tail(&ep
->schedule
, &isp1362_hcd
->periodic
);
1382 DBG(1, "%s: ep %p already scheduled\n", __func__
, ep
);
1384 DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__
,
1385 ep
->load
/ ep
->interval
, isp1362_hcd
->load
[ep
->branch
],
1386 isp1362_hcd
->load
[ep
->branch
] + ep
->load
);
1387 isp1362_hcd
->load
[ep
->branch
] += ep
->load
;
1391 ALIGNSTAT(isp1362_hcd
, urb
->transfer_buffer
);
1396 start_atl_transfers(isp1362_hcd
);
1398 case PIPE_INTERRUPT
:
1399 start_intl_transfers(isp1362_hcd
);
1401 case PIPE_ISOCHRONOUS
:
1402 start_iso_transfers(isp1362_hcd
);
1409 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
1413 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1415 DBG(0, "%s: urb %p failed with %d\n", __func__
, urb
, retval
);
1419 static int isp1362_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
1421 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1422 struct usb_host_endpoint
*hep
;
1423 unsigned long flags
;
1424 struct isp1362_ep
*ep
;
1427 DBG(3, "%s: urb %p\n", __func__
, urb
);
1429 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1430 retval
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
1437 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1443 /* In front of queue? */
1444 if (ep
->hep
->urb_list
.next
== &urb
->urb_list
) {
1445 if (!list_empty(&ep
->active
)) {
1446 DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__
,
1447 urb
, ep
, ep
->num_req
, ep
->ptd_index
, ep
->ptd_offset
);
1448 /* disable processing and queue PTD for removal */
1449 remove_ptd(isp1362_hcd
, ep
);
1454 DBG(1, "%s: Finishing ep %p req %d\n", __func__
, ep
,
1456 finish_request(isp1362_hcd
, ep
, urb
, status
);
1458 DBG(1, "%s: urb %p active; wait4irq\n", __func__
, urb
);
1460 pr_warning("%s: No EP in URB %p\n", __func__
, urb
);
1464 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1466 DBG(3, "%s: exit\n", __func__
);
1471 static void isp1362_endpoint_disable(struct usb_hcd
*hcd
, struct usb_host_endpoint
*hep
)
1473 struct isp1362_ep
*ep
= hep
->hcpriv
;
1474 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1475 unsigned long flags
;
1477 DBG(1, "%s: ep %p\n", __func__
, ep
);
1480 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1481 if (!list_empty(&hep
->urb_list
)) {
1482 if (!list_empty(&ep
->active
) && list_empty(&ep
->remove_list
)) {
1483 DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__
,
1484 ep
, ep
->num_req
, ep
->ptd_index
, ep
->ptd_offset
);
1485 remove_ptd(isp1362_hcd
, ep
);
1486 pr_info("%s: Waiting for Interrupt to clean up\n", __func__
);
1489 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1490 /* Wait for interrupt to clear out active list */
1491 while (!list_empty(&ep
->active
))
1494 DBG(1, "%s: Freeing EP %p\n", __func__
, ep
);
1496 usb_put_dev(ep
->udev
);
1501 static int isp1362_get_frame(struct usb_hcd
*hcd
)
1503 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1505 unsigned long flags
;
1507 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1508 fmnum
= isp1362_read_reg32(isp1362_hcd
, HCFMNUM
);
1509 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1514 /*-------------------------------------------------------------------------*/
1516 /* Adapted from ohci-hub.c */
1517 static int isp1362_hub_status_data(struct usb_hcd
*hcd
, char *buf
)
1519 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1520 int ports
, i
, changed
= 0;
1521 unsigned long flags
;
1523 if (!HC_IS_RUNNING(hcd
->state
))
1526 /* Report no status change now, if we are scheduled to be
1528 if (timer_pending(&hcd
->rh_timer
))
1531 ports
= isp1362_hcd
->rhdesca
& RH_A_NDP
;
1534 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1536 if (isp1362_hcd
->rhstatus
& (RH_HS_LPSC
| RH_HS_OCIC
))
1537 buf
[0] = changed
= 1;
1541 for (i
= 0; i
< ports
; i
++) {
1542 u32 status
= isp1362_hcd
->rhport
[i
];
1544 if (status
& (RH_PS_CSC
| RH_PS_PESC
| RH_PS_PSSC
|
1545 RH_PS_OCIC
| RH_PS_PRSC
)) {
1547 buf
[0] |= 1 << (i
+ 1);
1551 if (!(status
& RH_PS_CCS
))
1554 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1558 static void isp1362_hub_descriptor(struct isp1362_hcd
*isp1362_hcd
,
1559 struct usb_hub_descriptor
*desc
)
1561 u32 reg
= isp1362_hcd
->rhdesca
;
1563 DBG(3, "%s: enter\n", __func__
);
1565 desc
->bDescriptorType
= 0x29;
1566 desc
->bDescLength
= 9;
1567 desc
->bHubContrCurrent
= 0;
1568 desc
->bNbrPorts
= reg
& 0x3;
1569 /* Power switching, device type, overcurrent. */
1570 desc
->wHubCharacteristics
= cpu_to_le16((reg
>> 8) & 0x1f);
1571 DBG(0, "%s: hubcharacteristics = %02x\n", __func__
, cpu_to_le16((reg
>> 8) & 0x1f));
1572 desc
->bPwrOn2PwrGood
= (reg
>> 24) & 0xff;
1573 /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */
1574 desc
->bitmap
[0] = desc
->bNbrPorts
== 1 ? 1 << 1 : 3 << 1;
1575 desc
->bitmap
[1] = ~0;
1577 DBG(3, "%s: exit\n", __func__
);
1580 /* Adapted from ohci-hub.c */
1581 static int isp1362_hub_control(struct usb_hcd
*hcd
, u16 typeReq
, u16 wValue
,
1582 u16 wIndex
, char *buf
, u16 wLength
)
1584 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1586 unsigned long flags
;
1588 int ports
= isp1362_hcd
->rhdesca
& RH_A_NDP
;
1592 case ClearHubFeature
:
1593 DBG(0, "ClearHubFeature: ");
1595 case C_HUB_OVER_CURRENT
:
1596 _DBG(0, "C_HUB_OVER_CURRENT\n");
1597 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1598 isp1362_write_reg32(isp1362_hcd
, HCRHSTATUS
, RH_HS_OCIC
);
1599 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1600 case C_HUB_LOCAL_POWER
:
1601 _DBG(0, "C_HUB_LOCAL_POWER\n");
1608 DBG(0, "SetHubFeature: ");
1610 case C_HUB_OVER_CURRENT
:
1611 case C_HUB_LOCAL_POWER
:
1612 _DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1618 case GetHubDescriptor
:
1619 DBG(0, "GetHubDescriptor\n");
1620 isp1362_hub_descriptor(isp1362_hcd
, (struct usb_hub_descriptor
*)buf
);
1623 DBG(0, "GetHubStatus\n");
1624 put_unaligned(cpu_to_le32(0), (__le32
*) buf
);
1628 DBG(0, "GetPortStatus\n");
1630 if (!wIndex
|| wIndex
> ports
)
1632 tmp
= isp1362_hcd
->rhport
[--wIndex
];
1633 put_unaligned(cpu_to_le32(tmp
), (__le32
*) buf
);
1635 case ClearPortFeature
:
1636 DBG(0, "ClearPortFeature: ");
1637 if (!wIndex
|| wIndex
> ports
)
1642 case USB_PORT_FEAT_ENABLE
:
1643 _DBG(0, "USB_PORT_FEAT_ENABLE\n");
1646 case USB_PORT_FEAT_C_ENABLE
:
1647 _DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1650 case USB_PORT_FEAT_SUSPEND
:
1651 _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1654 case USB_PORT_FEAT_C_SUSPEND
:
1655 _DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1658 case USB_PORT_FEAT_POWER
:
1659 _DBG(0, "USB_PORT_FEAT_POWER\n");
1663 case USB_PORT_FEAT_C_CONNECTION
:
1664 _DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1667 case USB_PORT_FEAT_C_OVER_CURRENT
:
1668 _DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1671 case USB_PORT_FEAT_C_RESET
:
1672 _DBG(0, "USB_PORT_FEAT_C_RESET\n");
1679 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1680 isp1362_write_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
, tmp
);
1681 isp1362_hcd
->rhport
[wIndex
] =
1682 isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
);
1683 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1685 case SetPortFeature
:
1686 DBG(0, "SetPortFeature: ");
1687 if (!wIndex
|| wIndex
> ports
)
1691 case USB_PORT_FEAT_SUSPEND
:
1692 _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1693 #ifdef CONFIG_USB_OTG
1694 if (ohci
->hcd
.self
.otg_port
== (wIndex
+ 1) &&
1695 ohci
->hcd
.self
.b_hnp_enable
) {
1700 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1701 isp1362_write_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
, RH_PS_PSS
);
1702 isp1362_hcd
->rhport
[wIndex
] =
1703 isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
);
1704 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1706 case USB_PORT_FEAT_POWER
:
1707 _DBG(0, "USB_PORT_FEAT_POWER\n");
1708 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1709 isp1362_write_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
, RH_PS_PPS
);
1710 isp1362_hcd
->rhport
[wIndex
] =
1711 isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
);
1712 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1714 case USB_PORT_FEAT_RESET
:
1715 _DBG(0, "USB_PORT_FEAT_RESET\n");
1716 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1718 t1
= jiffies
+ msecs_to_jiffies(USB_RESET_WIDTH
);
1719 while (time_before(jiffies
, t1
)) {
1720 /* spin until any current reset finishes */
1722 tmp
= isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
);
1723 if (!(tmp
& RH_PS_PRS
))
1727 if (!(tmp
& RH_PS_CCS
))
1729 /* Reset lasts 10ms (claims datasheet) */
1730 isp1362_write_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
, (RH_PS_PRS
));
1732 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1734 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1737 isp1362_hcd
->rhport
[wIndex
] = isp1362_read_reg32(isp1362_hcd
,
1738 HCRHPORT1
+ wIndex
);
1739 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1748 /* "protocol stall" on error */
1749 _DBG(0, "PROTOCOL STALL\n");
1757 static int isp1362_bus_suspend(struct usb_hcd
*hcd
)
1760 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1761 unsigned long flags
;
1763 if (time_before(jiffies
, isp1362_hcd
->next_statechange
))
1766 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1768 isp1362_hcd
->hc_control
= isp1362_read_reg32(isp1362_hcd
, HCCONTROL
);
1769 switch (isp1362_hcd
->hc_control
& OHCI_CTRL_HCFS
) {
1770 case OHCI_USB_RESUME
:
1771 DBG(0, "%s: resume/suspend?\n", __func__
);
1772 isp1362_hcd
->hc_control
&= ~OHCI_CTRL_HCFS
;
1773 isp1362_hcd
->hc_control
|= OHCI_USB_RESET
;
1774 isp1362_write_reg32(isp1362_hcd
, HCCONTROL
, isp1362_hcd
->hc_control
);
1776 case OHCI_USB_RESET
:
1778 pr_warning("%s: needs reinit!\n", __func__
);
1780 case OHCI_USB_SUSPEND
:
1781 pr_warning("%s: already suspended?\n", __func__
);
1784 DBG(0, "%s: suspend root hub\n", __func__
);
1786 /* First stop any processing */
1787 hcd
->state
= HC_STATE_QUIESCING
;
1788 if (!list_empty(&isp1362_hcd
->atl_queue
.active
) ||
1789 !list_empty(&isp1362_hcd
->intl_queue
.active
) ||
1790 !list_empty(&isp1362_hcd
->istl_queue
[0] .active
) ||
1791 !list_empty(&isp1362_hcd
->istl_queue
[1] .active
)) {
1794 isp1362_write_reg32(isp1362_hcd
, HCATLSKIP
, ~0);
1795 isp1362_write_reg32(isp1362_hcd
, HCINTLSKIP
, ~0);
1796 isp1362_write_reg16(isp1362_hcd
, HCBUFSTAT
, 0);
1797 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, 0);
1798 isp1362_write_reg32(isp1362_hcd
, HCINTSTAT
, OHCI_INTR_SF
);
1800 DBG(0, "%s: stopping schedules ...\n", __func__
);
1805 if (isp1362_read_reg32(isp1362_hcd
, HCINTSTAT
) & OHCI_INTR_SF
)
1809 if (isp1362_read_reg16(isp1362_hcd
, HCuPINT
) & HCuPINT_ATL
) {
1810 u32 done_map
= isp1362_read_reg32(isp1362_hcd
, HCATLDONE
);
1811 finish_transfers(isp1362_hcd
, done_map
, &isp1362_hcd
->atl_queue
);
1813 if (isp1362_read_reg16(isp1362_hcd
, HCuPINT
) & HCuPINT_INTL
) {
1814 u32 done_map
= isp1362_read_reg32(isp1362_hcd
, HCINTLDONE
);
1815 finish_transfers(isp1362_hcd
, done_map
, &isp1362_hcd
->intl_queue
);
1817 if (isp1362_read_reg16(isp1362_hcd
, HCuPINT
) & HCuPINT_ISTL0
)
1818 finish_iso_transfers(isp1362_hcd
, &isp1362_hcd
->istl_queue
[0]);
1819 if (isp1362_read_reg16(isp1362_hcd
, HCuPINT
) & HCuPINT_ISTL1
)
1820 finish_iso_transfers(isp1362_hcd
, &isp1362_hcd
->istl_queue
[1]);
1822 DBG(0, "%s: HCINTSTAT: %08x\n", __func__
,
1823 isp1362_read_reg32(isp1362_hcd
, HCINTSTAT
));
1824 isp1362_write_reg32(isp1362_hcd
, HCINTSTAT
,
1825 isp1362_read_reg32(isp1362_hcd
, HCINTSTAT
));
1828 isp1362_hcd
->hc_control
= OHCI_USB_SUSPEND
;
1829 isp1362_show_reg(isp1362_hcd
, HCCONTROL
);
1830 isp1362_write_reg32(isp1362_hcd
, HCCONTROL
, isp1362_hcd
->hc_control
);
1831 isp1362_show_reg(isp1362_hcd
, HCCONTROL
);
1834 isp1362_hcd
->hc_control
= isp1362_read_reg32(isp1362_hcd
, HCCONTROL
);
1835 if ((isp1362_hcd
->hc_control
& OHCI_CTRL_HCFS
) != OHCI_USB_SUSPEND
) {
1836 pr_err("%s: controller won't suspend %08x\n", __func__
,
1837 isp1362_hcd
->hc_control
);
1842 /* no resumes until devices finish suspending */
1843 isp1362_hcd
->next_statechange
= jiffies
+ msecs_to_jiffies(5);
1847 hcd
->state
= HC_STATE_SUSPENDED
;
1848 DBG(0, "%s: HCD suspended: %08x\n", __func__
,
1849 isp1362_read_reg32(isp1362_hcd
, HCCONTROL
));
1851 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1855 static int isp1362_bus_resume(struct usb_hcd
*hcd
)
1857 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1859 unsigned long flags
;
1860 int status
= -EINPROGRESS
;
1862 if (time_before(jiffies
, isp1362_hcd
->next_statechange
))
1865 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1866 isp1362_hcd
->hc_control
= isp1362_read_reg32(isp1362_hcd
, HCCONTROL
);
1867 pr_info("%s: HCCONTROL: %08x\n", __func__
, isp1362_hcd
->hc_control
);
1868 if (hcd
->state
== HC_STATE_RESUMING
) {
1869 pr_warning("%s: duplicate resume\n", __func__
);
1872 switch (isp1362_hcd
->hc_control
& OHCI_CTRL_HCFS
) {
1873 case OHCI_USB_SUSPEND
:
1874 DBG(0, "%s: resume root hub\n", __func__
);
1875 isp1362_hcd
->hc_control
&= ~OHCI_CTRL_HCFS
;
1876 isp1362_hcd
->hc_control
|= OHCI_USB_RESUME
;
1877 isp1362_write_reg32(isp1362_hcd
, HCCONTROL
, isp1362_hcd
->hc_control
);
1879 case OHCI_USB_RESUME
:
1880 /* HCFS changes sometime after INTR_RD */
1881 DBG(0, "%s: remote wakeup\n", __func__
);
1884 DBG(0, "%s: odd resume\n", __func__
);
1886 hcd
->self
.root_hub
->dev
.power
.power_state
= PMSG_ON
;
1888 default: /* RESET, we lost power */
1889 DBG(0, "%s: root hub hardware reset\n", __func__
);
1892 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1893 if (status
== -EBUSY
) {
1894 DBG(0, "%s: Restarting HC\n", __func__
);
1895 isp1362_hc_stop(hcd
);
1896 return isp1362_hc_start(hcd
);
1898 if (status
!= -EINPROGRESS
)
1900 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1901 port
= isp1362_read_reg32(isp1362_hcd
, HCRHDESCA
) & RH_A_NDP
;
1903 u32 stat
= isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
+ port
);
1905 /* force global, not selective, resume */
1906 if (!(stat
& RH_PS_PSS
)) {
1907 DBG(0, "%s: Not Resuming RH port %d\n", __func__
, port
);
1910 DBG(0, "%s: Resuming RH port %d\n", __func__
, port
);
1911 isp1362_write_reg32(isp1362_hcd
, HCRHPORT1
+ port
, RH_PS_POCI
);
1913 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1915 /* Some controllers (lucent) need extra-long delays */
1916 hcd
->state
= HC_STATE_RESUMING
;
1917 mdelay(20 /* usb 11.5.1.10 */ + 15);
1919 isp1362_hcd
->hc_control
= OHCI_USB_OPER
;
1920 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1921 isp1362_show_reg(isp1362_hcd
, HCCONTROL
);
1922 isp1362_write_reg32(isp1362_hcd
, HCCONTROL
, isp1362_hcd
->hc_control
);
1923 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1927 /* keep it alive for ~5x suspend + resume costs */
1928 isp1362_hcd
->next_statechange
= jiffies
+ msecs_to_jiffies(250);
1930 hcd
->self
.root_hub
->dev
.power
.power_state
= PMSG_ON
;
1931 hcd
->state
= HC_STATE_RUNNING
;
1935 #define isp1362_bus_suspend NULL
1936 #define isp1362_bus_resume NULL
1939 /*-------------------------------------------------------------------------*/
1941 #ifdef STUB_DEBUG_FILE
1943 static inline void create_debug_file(struct isp1362_hcd
*isp1362_hcd
)
1946 static inline void remove_debug_file(struct isp1362_hcd
*isp1362_hcd
)
1952 #include <linux/proc_fs.h>
1953 #include <linux/seq_file.h>
1955 static void dump_irq(struct seq_file
*s
, char *label
, u16 mask
)
1957 seq_printf(s
, "%-15s %04x%s%s%s%s%s%s\n", label
, mask
,
1958 mask
& HCuPINT_CLKRDY
? " clkrdy" : "",
1959 mask
& HCuPINT_SUSP
? " susp" : "",
1960 mask
& HCuPINT_OPR
? " opr" : "",
1961 mask
& HCuPINT_EOT
? " eot" : "",
1962 mask
& HCuPINT_ATL
? " atl" : "",
1963 mask
& HCuPINT_SOF
? " sof" : "");
1966 static void dump_int(struct seq_file
*s
, char *label
, u32 mask
)
1968 seq_printf(s
, "%-15s %08x%s%s%s%s%s%s%s\n", label
, mask
,
1969 mask
& OHCI_INTR_MIE
? " MIE" : "",
1970 mask
& OHCI_INTR_RHSC
? " rhsc" : "",
1971 mask
& OHCI_INTR_FNO
? " fno" : "",
1972 mask
& OHCI_INTR_UE
? " ue" : "",
1973 mask
& OHCI_INTR_RD
? " rd" : "",
1974 mask
& OHCI_INTR_SF
? " sof" : "",
1975 mask
& OHCI_INTR_SO
? " so" : "");
1978 static void dump_ctrl(struct seq_file
*s
, char *label
, u32 mask
)
1980 seq_printf(s
, "%-15s %08x%s%s%s\n", label
, mask
,
1981 mask
& OHCI_CTRL_RWC
? " rwc" : "",
1982 mask
& OHCI_CTRL_RWE
? " rwe" : "",
1985 switch (mask
& OHCI_CTRL_HCFS
) {
1989 case OHCI_USB_RESET
:
1992 case OHCI_USB_RESUME
:
1995 case OHCI_USB_SUSPEND
:
2005 static void dump_regs(struct seq_file
*s
, struct isp1362_hcd
*isp1362_hcd
)
2007 seq_printf(s
, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION
),
2008 isp1362_read_reg32(isp1362_hcd
, HCREVISION
));
2009 seq_printf(s
, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL
),
2010 isp1362_read_reg32(isp1362_hcd
, HCCONTROL
));
2011 seq_printf(s
, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT
),
2012 isp1362_read_reg32(isp1362_hcd
, HCCMDSTAT
));
2013 seq_printf(s
, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT
),
2014 isp1362_read_reg32(isp1362_hcd
, HCINTSTAT
));
2015 seq_printf(s
, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB
),
2016 isp1362_read_reg32(isp1362_hcd
, HCINTENB
));
2017 seq_printf(s
, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL
),
2018 isp1362_read_reg32(isp1362_hcd
, HCFMINTVL
));
2019 seq_printf(s
, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM
),
2020 isp1362_read_reg32(isp1362_hcd
, HCFMREM
));
2021 seq_printf(s
, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM
),
2022 isp1362_read_reg32(isp1362_hcd
, HCFMNUM
));
2023 seq_printf(s
, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH
),
2024 isp1362_read_reg32(isp1362_hcd
, HCLSTHRESH
));
2025 seq_printf(s
, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA
),
2026 isp1362_read_reg32(isp1362_hcd
, HCRHDESCA
));
2027 seq_printf(s
, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB
),
2028 isp1362_read_reg32(isp1362_hcd
, HCRHDESCB
));
2029 seq_printf(s
, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS
),
2030 isp1362_read_reg32(isp1362_hcd
, HCRHSTATUS
));
2031 seq_printf(s
, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1
),
2032 isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
));
2033 seq_printf(s
, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2
),
2034 isp1362_read_reg32(isp1362_hcd
, HCRHPORT2
));
2035 seq_printf(s
, "\n");
2036 seq_printf(s
, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG
),
2037 isp1362_read_reg16(isp1362_hcd
, HCHWCFG
));
2038 seq_printf(s
, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG
),
2039 isp1362_read_reg16(isp1362_hcd
, HCDMACFG
));
2040 seq_printf(s
, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR
),
2041 isp1362_read_reg16(isp1362_hcd
, HCXFERCTR
));
2042 seq_printf(s
, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT
),
2043 isp1362_read_reg16(isp1362_hcd
, HCuPINT
));
2044 seq_printf(s
, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB
),
2045 isp1362_read_reg16(isp1362_hcd
, HCuPINTENB
));
2046 seq_printf(s
, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID
),
2047 isp1362_read_reg16(isp1362_hcd
, HCCHIPID
));
2048 seq_printf(s
, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH
),
2049 isp1362_read_reg16(isp1362_hcd
, HCSCRATCH
));
2050 seq_printf(s
, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT
),
2051 isp1362_read_reg16(isp1362_hcd
, HCBUFSTAT
));
2052 seq_printf(s
, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR
),
2053 isp1362_read_reg32(isp1362_hcd
, HCDIRADDR
));
2055 seq_printf(s
, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA
),
2056 isp1362_read_reg16(isp1362_hcd
, HCDIRDATA
));
2058 seq_printf(s
, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ
),
2059 isp1362_read_reg16(isp1362_hcd
, HCISTLBUFSZ
));
2060 seq_printf(s
, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE
),
2061 isp1362_read_reg16(isp1362_hcd
, HCISTLRATE
));
2062 seq_printf(s
, "\n");
2063 seq_printf(s
, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ
),
2064 isp1362_read_reg16(isp1362_hcd
, HCINTLBUFSZ
));
2065 seq_printf(s
, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ
),
2066 isp1362_read_reg16(isp1362_hcd
, HCINTLBLKSZ
));
2067 seq_printf(s
, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE
),
2068 isp1362_read_reg32(isp1362_hcd
, HCINTLDONE
));
2069 seq_printf(s
, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP
),
2070 isp1362_read_reg32(isp1362_hcd
, HCINTLSKIP
));
2071 seq_printf(s
, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST
),
2072 isp1362_read_reg32(isp1362_hcd
, HCINTLLAST
));
2073 seq_printf(s
, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR
),
2074 isp1362_read_reg16(isp1362_hcd
, HCINTLCURR
));
2075 seq_printf(s
, "\n");
2076 seq_printf(s
, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ
),
2077 isp1362_read_reg16(isp1362_hcd
, HCATLBUFSZ
));
2078 seq_printf(s
, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ
),
2079 isp1362_read_reg16(isp1362_hcd
, HCATLBLKSZ
));
2081 seq_printf(s
, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE
),
2082 isp1362_read_reg32(isp1362_hcd
, HCATLDONE
));
2084 seq_printf(s
, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP
),
2085 isp1362_read_reg32(isp1362_hcd
, HCATLSKIP
));
2086 seq_printf(s
, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST
),
2087 isp1362_read_reg32(isp1362_hcd
, HCATLLAST
));
2088 seq_printf(s
, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR
),
2089 isp1362_read_reg16(isp1362_hcd
, HCATLCURR
));
2090 seq_printf(s
, "\n");
2091 seq_printf(s
, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC
),
2092 isp1362_read_reg16(isp1362_hcd
, HCATLDTC
));
2093 seq_printf(s
, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO
),
2094 isp1362_read_reg16(isp1362_hcd
, HCATLDTCTO
));
2097 static int proc_isp1362_show(struct seq_file
*s
, void *unused
)
2099 struct isp1362_hcd
*isp1362_hcd
= s
->private;
2100 struct isp1362_ep
*ep
;
2103 seq_printf(s
, "%s\n%s version %s\n",
2104 isp1362_hcd_to_hcd(isp1362_hcd
)->product_desc
, hcd_name
, DRIVER_VERSION
);
2106 /* collect statistics to help estimate potential win for
2107 * DMA engines that care about alignment (PXA)
2109 seq_printf(s
, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2110 isp1362_hcd
->stat16
, isp1362_hcd
->stat8
, isp1362_hcd
->stat4
,
2111 isp1362_hcd
->stat2
, isp1362_hcd
->stat1
);
2112 seq_printf(s
, "max # ptds in ATL fifo: %d\n", isp1362_hcd
->atl_queue
.stat_maxptds
);
2113 seq_printf(s
, "max # ptds in INTL fifo: %d\n", isp1362_hcd
->intl_queue
.stat_maxptds
);
2114 seq_printf(s
, "max # ptds in ISTL fifo: %d\n",
2115 max(isp1362_hcd
->istl_queue
[0] .stat_maxptds
,
2116 isp1362_hcd
->istl_queue
[1] .stat_maxptds
));
2118 /* FIXME: don't show the following in suspended state */
2119 spin_lock_irq(&isp1362_hcd
->lock
);
2121 dump_irq(s
, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd
, HCuPINTENB
));
2122 dump_irq(s
, "hc_irq_status", isp1362_read_reg16(isp1362_hcd
, HCuPINT
));
2123 dump_int(s
, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd
, HCINTENB
));
2124 dump_int(s
, "ohci_int_status", isp1362_read_reg32(isp1362_hcd
, HCINTSTAT
));
2125 dump_ctrl(s
, "ohci_control", isp1362_read_reg32(isp1362_hcd
, HCCONTROL
));
2127 for (i
= 0; i
< NUM_ISP1362_IRQS
; i
++)
2128 if (isp1362_hcd
->irq_stat
[i
])
2129 seq_printf(s
, "%-15s: %d\n",
2130 ISP1362_INT_NAME(i
), isp1362_hcd
->irq_stat
[i
]);
2132 dump_regs(s
, isp1362_hcd
);
2133 list_for_each_entry(ep
, &isp1362_hcd
->async
, schedule
) {
2136 seq_printf(s
, "%p, ep%d%s, maxpacket %d:\n", ep
, ep
->epnum
,
2139 switch (ep
->nextpid
) {
2156 s
;}), ep
->maxpacket
) ;
2157 list_for_each_entry(urb
, &ep
->hep
->urb_list
, urb_list
) {
2158 seq_printf(s
, " urb%p, %d/%d\n", urb
,
2160 urb
->transfer_buffer_length
);
2163 if (!list_empty(&isp1362_hcd
->async
))
2164 seq_printf(s
, "\n");
2165 dump_ptd_queue(&isp1362_hcd
->atl_queue
);
2167 seq_printf(s
, "periodic size= %d\n", PERIODIC_SIZE
);
2169 list_for_each_entry(ep
, &isp1362_hcd
->periodic
, schedule
) {
2170 seq_printf(s
, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep
->branch
,
2171 isp1362_hcd
->load
[ep
->branch
], ep
->ptd_index
, ep
->ptd_offset
);
2173 seq_printf(s
, " %d/%p (%sdev%d ep%d%s max %d)\n",
2175 (ep
->udev
->speed
== USB_SPEED_FULL
) ? "" : "ls ",
2176 ep
->udev
->devnum
, ep
->epnum
,
2177 (ep
->epnum
== 0) ? "" :
2178 ((ep
->nextpid
== USB_PID_IN
) ?
2179 "in" : "out"), ep
->maxpacket
);
2181 dump_ptd_queue(&isp1362_hcd
->intl_queue
);
2183 seq_printf(s
, "ISO:\n");
2185 list_for_each_entry(ep
, &isp1362_hcd
->isoc
, schedule
) {
2186 seq_printf(s
, " %d/%p (%sdev%d ep%d%s max %d)\n",
2188 (ep
->udev
->speed
== USB_SPEED_FULL
) ? "" : "ls ",
2189 ep
->udev
->devnum
, ep
->epnum
,
2190 (ep
->epnum
== 0) ? "" :
2191 ((ep
->nextpid
== USB_PID_IN
) ?
2192 "in" : "out"), ep
->maxpacket
);
2195 spin_unlock_irq(&isp1362_hcd
->lock
);
2196 seq_printf(s
, "\n");
2201 static int proc_isp1362_open(struct inode
*inode
, struct file
*file
)
2203 return single_open(file
, proc_isp1362_show
, PDE(inode
)->data
);
2206 static const struct file_operations proc_ops
= {
2207 .open
= proc_isp1362_open
,
2209 .llseek
= seq_lseek
,
2210 .release
= single_release
,
2213 /* expect just one isp1362_hcd per system */
2214 static const char proc_filename
[] = "driver/isp1362";
2216 static void create_debug_file(struct isp1362_hcd
*isp1362_hcd
)
2218 struct proc_dir_entry
*pde
;
2220 pde
= create_proc_entry(proc_filename
, 0, NULL
);
2222 pr_warning("%s: Failed to create debug file '%s'\n", __func__
, proc_filename
);
2226 pde
->proc_fops
= &proc_ops
;
2227 pde
->data
= isp1362_hcd
;
2228 isp1362_hcd
->pde
= pde
;
2231 static void remove_debug_file(struct isp1362_hcd
*isp1362_hcd
)
2233 if (isp1362_hcd
->pde
)
2234 remove_proc_entry(proc_filename
, 0);
2239 /*-------------------------------------------------------------------------*/
2241 static void isp1362_sw_reset(struct isp1362_hcd
*isp1362_hcd
)
2244 unsigned long flags
;
2246 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2248 isp1362_write_reg16(isp1362_hcd
, HCSWRES
, HCSWRES_MAGIC
);
2249 isp1362_write_reg32(isp1362_hcd
, HCCMDSTAT
, OHCI_HCR
);
2252 if (!(isp1362_read_reg32(isp1362_hcd
, HCCMDSTAT
) & OHCI_HCR
))
2256 pr_err("Software reset timeout\n");
2257 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2260 static int isp1362_mem_config(struct usb_hcd
*hcd
)
2262 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2263 unsigned long flags
;
2265 u16 istl_size
= ISP1362_ISTL_BUFSIZE
;
2266 u16 intl_blksize
= ISP1362_INTL_BLKSIZE
+ PTD_HEADER_SIZE
;
2267 u16 intl_size
= ISP1362_INTL_BUFFERS
* intl_blksize
;
2268 u16 atl_blksize
= ISP1362_ATL_BLKSIZE
+ PTD_HEADER_SIZE
;
2269 u16 atl_buffers
= (ISP1362_BUF_SIZE
- (istl_size
+ intl_size
)) / atl_blksize
;
2273 WARN_ON(istl_size
& 3);
2274 WARN_ON(atl_blksize
& 3);
2275 WARN_ON(intl_blksize
& 3);
2276 WARN_ON(atl_blksize
< PTD_HEADER_SIZE
);
2277 WARN_ON(intl_blksize
< PTD_HEADER_SIZE
);
2279 BUG_ON((unsigned)ISP1362_INTL_BUFFERS
> 32);
2280 if (atl_buffers
> 32)
2282 atl_size
= atl_buffers
* atl_blksize
;
2283 total
= atl_size
+ intl_size
+ istl_size
;
2284 dev_info(hcd
->self
.controller
, "ISP1362 Memory usage:\n");
2285 dev_info(hcd
->self
.controller
, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
2286 istl_size
/ 2, istl_size
, 0, istl_size
/ 2);
2287 dev_info(hcd
->self
.controller
, " INTL: %4d * (%3lu+8): %4d @ $%04x\n",
2288 ISP1362_INTL_BUFFERS
, intl_blksize
- PTD_HEADER_SIZE
,
2289 intl_size
, istl_size
);
2290 dev_info(hcd
->self
.controller
, " ATL : %4d * (%3lu+8): %4d @ $%04x\n",
2291 atl_buffers
, atl_blksize
- PTD_HEADER_SIZE
,
2292 atl_size
, istl_size
+ intl_size
);
2293 dev_info(hcd
->self
.controller
, " USED/FREE: %4d %4d\n", total
,
2294 ISP1362_BUF_SIZE
- total
);
2296 if (total
> ISP1362_BUF_SIZE
) {
2297 dev_err(hcd
->self
.controller
, "%s: Memory requested: %d, available %d\n",
2298 __func__
, total
, ISP1362_BUF_SIZE
);
2302 total
= istl_size
+ intl_size
+ atl_size
;
2303 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2305 for (i
= 0; i
< 2; i
++) {
2306 isp1362_hcd
->istl_queue
[i
].buf_start
= i
* istl_size
/ 2,
2307 isp1362_hcd
->istl_queue
[i
].buf_size
= istl_size
/ 2;
2308 isp1362_hcd
->istl_queue
[i
].blk_size
= 4;
2309 INIT_LIST_HEAD(&isp1362_hcd
->istl_queue
[i
].active
);
2310 snprintf(isp1362_hcd
->istl_queue
[i
].name
,
2311 sizeof(isp1362_hcd
->istl_queue
[i
].name
), "ISTL%d", i
);
2312 DBG(3, "%s: %5s buf $%04x %d\n", __func__
,
2313 isp1362_hcd
->istl_queue
[i
].name
,
2314 isp1362_hcd
->istl_queue
[i
].buf_start
,
2315 isp1362_hcd
->istl_queue
[i
].buf_size
);
2317 isp1362_write_reg16(isp1362_hcd
, HCISTLBUFSZ
, istl_size
/ 2);
2319 isp1362_hcd
->intl_queue
.buf_start
= istl_size
;
2320 isp1362_hcd
->intl_queue
.buf_size
= intl_size
;
2321 isp1362_hcd
->intl_queue
.buf_count
= ISP1362_INTL_BUFFERS
;
2322 isp1362_hcd
->intl_queue
.blk_size
= intl_blksize
;
2323 isp1362_hcd
->intl_queue
.buf_avail
= isp1362_hcd
->intl_queue
.buf_count
;
2324 isp1362_hcd
->intl_queue
.skip_map
= ~0;
2325 INIT_LIST_HEAD(&isp1362_hcd
->intl_queue
.active
);
2327 isp1362_write_reg16(isp1362_hcd
, HCINTLBUFSZ
,
2328 isp1362_hcd
->intl_queue
.buf_size
);
2329 isp1362_write_reg16(isp1362_hcd
, HCINTLBLKSZ
,
2330 isp1362_hcd
->intl_queue
.blk_size
- PTD_HEADER_SIZE
);
2331 isp1362_write_reg32(isp1362_hcd
, HCINTLSKIP
, ~0);
2332 isp1362_write_reg32(isp1362_hcd
, HCINTLLAST
,
2333 1 << (ISP1362_INTL_BUFFERS
- 1));
2335 isp1362_hcd
->atl_queue
.buf_start
= istl_size
+ intl_size
;
2336 isp1362_hcd
->atl_queue
.buf_size
= atl_size
;
2337 isp1362_hcd
->atl_queue
.buf_count
= atl_buffers
;
2338 isp1362_hcd
->atl_queue
.blk_size
= atl_blksize
;
2339 isp1362_hcd
->atl_queue
.buf_avail
= isp1362_hcd
->atl_queue
.buf_count
;
2340 isp1362_hcd
->atl_queue
.skip_map
= ~0;
2341 INIT_LIST_HEAD(&isp1362_hcd
->atl_queue
.active
);
2343 isp1362_write_reg16(isp1362_hcd
, HCATLBUFSZ
,
2344 isp1362_hcd
->atl_queue
.buf_size
);
2345 isp1362_write_reg16(isp1362_hcd
, HCATLBLKSZ
,
2346 isp1362_hcd
->atl_queue
.blk_size
- PTD_HEADER_SIZE
);
2347 isp1362_write_reg32(isp1362_hcd
, HCATLSKIP
, ~0);
2348 isp1362_write_reg32(isp1362_hcd
, HCATLLAST
,
2349 1 << (atl_buffers
- 1));
2351 snprintf(isp1362_hcd
->atl_queue
.name
,
2352 sizeof(isp1362_hcd
->atl_queue
.name
), "ATL");
2353 snprintf(isp1362_hcd
->intl_queue
.name
,
2354 sizeof(isp1362_hcd
->intl_queue
.name
), "INTL");
2355 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__
,
2356 isp1362_hcd
->intl_queue
.name
,
2357 isp1362_hcd
->intl_queue
.buf_start
,
2358 ISP1362_INTL_BUFFERS
, isp1362_hcd
->intl_queue
.blk_size
,
2359 isp1362_hcd
->intl_queue
.buf_size
);
2360 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__
,
2361 isp1362_hcd
->atl_queue
.name
,
2362 isp1362_hcd
->atl_queue
.buf_start
,
2363 atl_buffers
, isp1362_hcd
->atl_queue
.blk_size
,
2364 isp1362_hcd
->atl_queue
.buf_size
);
2366 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2371 static int isp1362_hc_reset(struct usb_hcd
*hcd
)
2374 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2376 unsigned long timeout
= 100;
2377 unsigned long flags
;
2380 pr_info("%s:\n", __func__
);
2382 if (isp1362_hcd
->board
&& isp1362_hcd
->board
->reset
) {
2383 isp1362_hcd
->board
->reset(hcd
->self
.controller
, 1);
2385 if (isp1362_hcd
->board
->clock
)
2386 isp1362_hcd
->board
->clock(hcd
->self
.controller
, 1);
2387 isp1362_hcd
->board
->reset(hcd
->self
.controller
, 0);
2389 isp1362_sw_reset(isp1362_hcd
);
2391 /* chip has been reset. First we need to see a clock */
2392 t
= jiffies
+ msecs_to_jiffies(timeout
);
2393 while (!clkrdy
&& time_before_eq(jiffies
, t
)) {
2394 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2395 clkrdy
= isp1362_read_reg16(isp1362_hcd
, HCuPINT
) & HCuPINT_CLKRDY
;
2396 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2401 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2402 isp1362_write_reg16(isp1362_hcd
, HCuPINT
, HCuPINT_CLKRDY
);
2403 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2405 pr_err("Clock not ready after %lums\n", timeout
);
2411 static void isp1362_hc_stop(struct usb_hcd
*hcd
)
2413 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2414 unsigned long flags
;
2417 pr_info("%s:\n", __func__
);
2419 del_timer_sync(&hcd
->rh_timer
);
2421 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2423 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, 0);
2425 /* Switch off power for all ports */
2426 tmp
= isp1362_read_reg32(isp1362_hcd
, HCRHDESCA
);
2427 tmp
&= ~(RH_A_NPS
| RH_A_PSM
);
2428 isp1362_write_reg32(isp1362_hcd
, HCRHDESCA
, tmp
);
2429 isp1362_write_reg32(isp1362_hcd
, HCRHSTATUS
, RH_HS_LPS
);
2431 /* Reset the chip */
2432 if (isp1362_hcd
->board
&& isp1362_hcd
->board
->reset
)
2433 isp1362_hcd
->board
->reset(hcd
->self
.controller
, 1);
2435 isp1362_sw_reset(isp1362_hcd
);
2437 if (isp1362_hcd
->board
&& isp1362_hcd
->board
->clock
)
2438 isp1362_hcd
->board
->clock(hcd
->self
.controller
, 0);
2440 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2443 #ifdef CHIP_BUFFER_TEST
2444 static int isp1362_chip_test(struct isp1362_hcd
*isp1362_hcd
)
2448 unsigned long flags
;
2450 ref
= kmalloc(2 * ISP1362_BUF_SIZE
, GFP_KERNEL
);
2453 u16
*tst
= &ref
[ISP1362_BUF_SIZE
/ 2];
2455 for (offset
= 0; offset
< ISP1362_BUF_SIZE
/ 2; offset
++) {
2456 ref
[offset
] = ~offset
;
2457 tst
[offset
] = offset
;
2460 for (offset
= 0; offset
< 4; offset
++) {
2463 for (j
= 0; j
< 8; j
++) {
2464 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2465 isp1362_write_buffer(isp1362_hcd
, (u8
*)ref
+ offset
, 0, j
);
2466 isp1362_read_buffer(isp1362_hcd
, (u8
*)tst
+ offset
, 0, j
);
2467 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2469 if (memcmp(ref
, tst
, j
)) {
2471 pr_err("%s: memory check with %d byte offset %d failed\n",
2472 __func__
, j
, offset
);
2473 dump_data((u8
*)ref
+ offset
, j
);
2474 dump_data((u8
*)tst
+ offset
, j
);
2479 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2480 isp1362_write_buffer(isp1362_hcd
, ref
, 0, ISP1362_BUF_SIZE
);
2481 isp1362_read_buffer(isp1362_hcd
, tst
, 0, ISP1362_BUF_SIZE
);
2482 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2484 if (memcmp(ref
, tst
, ISP1362_BUF_SIZE
)) {
2486 pr_err("%s: memory check failed\n", __func__
);
2487 dump_data((u8
*)tst
, ISP1362_BUF_SIZE
/ 2);
2490 for (offset
= 0; offset
< 256; offset
++) {
2495 memset(tst
, 0, ISP1362_BUF_SIZE
);
2496 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2497 isp1362_write_buffer(isp1362_hcd
, tst
, 0, ISP1362_BUF_SIZE
);
2498 isp1362_read_buffer(isp1362_hcd
, tst
, 0, ISP1362_BUF_SIZE
);
2499 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2500 if (memcmp(tst
, tst
+ (ISP1362_BUF_SIZE
/ (2 * sizeof(*tst
))),
2501 ISP1362_BUF_SIZE
/ 2)) {
2502 pr_err("%s: Failed to clear buffer\n", __func__
);
2503 dump_data((u8
*)tst
, ISP1362_BUF_SIZE
);
2506 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2507 isp1362_write_buffer(isp1362_hcd
, ref
, offset
* 2, PTD_HEADER_SIZE
);
2508 isp1362_write_buffer(isp1362_hcd
, ref
+ PTD_HEADER_SIZE
/ sizeof(*ref
),
2509 offset
* 2 + PTD_HEADER_SIZE
, test_size
);
2510 isp1362_read_buffer(isp1362_hcd
, tst
, offset
* 2,
2511 PTD_HEADER_SIZE
+ test_size
);
2512 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2513 if (memcmp(ref
, tst
, PTD_HEADER_SIZE
+ test_size
)) {
2514 dump_data(((u8
*)ref
) + offset
, PTD_HEADER_SIZE
+ test_size
);
2515 dump_data((u8
*)tst
, PTD_HEADER_SIZE
+ test_size
);
2516 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2517 isp1362_read_buffer(isp1362_hcd
, tst
, offset
* 2,
2518 PTD_HEADER_SIZE
+ test_size
);
2519 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2520 if (memcmp(ref
, tst
, PTD_HEADER_SIZE
+ test_size
)) {
2522 pr_err("%s: memory check with offset %02x failed\n",
2526 pr_warning("%s: memory check with offset %02x ok after second read\n",
2536 static int isp1362_hc_start(struct usb_hcd
*hcd
)
2539 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2540 struct isp1362_platform_data
*board
= isp1362_hcd
->board
;
2543 unsigned long flags
;
2545 pr_info("%s:\n", __func__
);
2547 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2548 chipid
= isp1362_read_reg16(isp1362_hcd
, HCCHIPID
);
2549 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2551 if ((chipid
& HCCHIPID_MASK
) != HCCHIPID_MAGIC
) {
2552 pr_err("%s: Invalid chip ID %04x\n", __func__
, chipid
);
2556 #ifdef CHIP_BUFFER_TEST
2557 ret
= isp1362_chip_test(isp1362_hcd
);
2561 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2562 /* clear interrupt status and disable all interrupt sources */
2563 isp1362_write_reg16(isp1362_hcd
, HCuPINT
, 0xff);
2564 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, 0);
2567 hwcfg
= HCHWCFG_INT_ENABLE
| HCHWCFG_DBWIDTH(1);
2568 if (board
->sel15Kres
)
2569 hwcfg
|= HCHWCFG_PULLDOWN_DS2
|
2570 ((MAX_ROOT_PORTS
> 1) ? HCHWCFG_PULLDOWN_DS1
: 0);
2571 if (board
->clknotstop
)
2572 hwcfg
|= HCHWCFG_CLKNOTSTOP
;
2573 if (board
->oc_enable
)
2574 hwcfg
|= HCHWCFG_ANALOG_OC
;
2575 if (board
->int_act_high
)
2576 hwcfg
|= HCHWCFG_INT_POL
;
2577 if (board
->int_edge_triggered
)
2578 hwcfg
|= HCHWCFG_INT_TRIGGER
;
2579 if (board
->dreq_act_high
)
2580 hwcfg
|= HCHWCFG_DREQ_POL
;
2581 if (board
->dack_act_high
)
2582 hwcfg
|= HCHWCFG_DACK_POL
;
2583 isp1362_write_reg16(isp1362_hcd
, HCHWCFG
, hwcfg
);
2584 isp1362_show_reg(isp1362_hcd
, HCHWCFG
);
2585 isp1362_write_reg16(isp1362_hcd
, HCDMACFG
, 0);
2586 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2588 ret
= isp1362_mem_config(hcd
);
2592 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2595 isp1362_hcd
->rhdesca
= 0;
2596 if (board
->no_power_switching
)
2597 isp1362_hcd
->rhdesca
|= RH_A_NPS
;
2598 if (board
->power_switching_mode
)
2599 isp1362_hcd
->rhdesca
|= RH_A_PSM
;
2601 isp1362_hcd
->rhdesca
|= (board
->potpg
<< 24) & RH_A_POTPGT
;
2603 isp1362_hcd
->rhdesca
|= (25 << 24) & RH_A_POTPGT
;
2605 isp1362_write_reg32(isp1362_hcd
, HCRHDESCA
, isp1362_hcd
->rhdesca
& ~RH_A_OCPM
);
2606 isp1362_write_reg32(isp1362_hcd
, HCRHDESCA
, isp1362_hcd
->rhdesca
| RH_A_OCPM
);
2607 isp1362_hcd
->rhdesca
= isp1362_read_reg32(isp1362_hcd
, HCRHDESCA
);
2609 isp1362_hcd
->rhdescb
= RH_B_PPCM
;
2610 isp1362_write_reg32(isp1362_hcd
, HCRHDESCB
, isp1362_hcd
->rhdescb
);
2611 isp1362_hcd
->rhdescb
= isp1362_read_reg32(isp1362_hcd
, HCRHDESCB
);
2613 isp1362_read_reg32(isp1362_hcd
, HCFMINTVL
);
2614 isp1362_write_reg32(isp1362_hcd
, HCFMINTVL
, (FSMP(FI
) << 16) | FI
);
2615 isp1362_write_reg32(isp1362_hcd
, HCLSTHRESH
, LSTHRESH
);
2617 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2619 isp1362_hcd
->hc_control
= OHCI_USB_OPER
;
2620 hcd
->state
= HC_STATE_RUNNING
;
2622 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2623 /* Set up interrupts */
2624 isp1362_hcd
->intenb
= OHCI_INTR_MIE
| OHCI_INTR_RHSC
| OHCI_INTR_UE
;
2625 isp1362_hcd
->intenb
|= OHCI_INTR_RD
;
2626 isp1362_hcd
->irqenb
= HCuPINT_OPR
| HCuPINT_SUSP
;
2627 isp1362_write_reg32(isp1362_hcd
, HCINTENB
, isp1362_hcd
->intenb
);
2628 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, isp1362_hcd
->irqenb
);
2630 /* Go operational */
2631 isp1362_write_reg32(isp1362_hcd
, HCCONTROL
, isp1362_hcd
->hc_control
);
2632 /* enable global power */
2633 isp1362_write_reg32(isp1362_hcd
, HCRHSTATUS
, RH_HS_LPSC
| RH_HS_DRWE
);
2635 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2640 /*-------------------------------------------------------------------------*/
2642 static struct hc_driver isp1362_hc_driver
= {
2643 .description
= hcd_name
,
2644 .product_desc
= "ISP1362 Host Controller",
2645 .hcd_priv_size
= sizeof(struct isp1362_hcd
),
2648 .flags
= HCD_USB11
| HCD_MEMORY
,
2650 .reset
= isp1362_hc_reset
,
2651 .start
= isp1362_hc_start
,
2652 .stop
= isp1362_hc_stop
,
2654 .urb_enqueue
= isp1362_urb_enqueue
,
2655 .urb_dequeue
= isp1362_urb_dequeue
,
2656 .endpoint_disable
= isp1362_endpoint_disable
,
2658 .get_frame_number
= isp1362_get_frame
,
2660 .hub_status_data
= isp1362_hub_status_data
,
2661 .hub_control
= isp1362_hub_control
,
2662 .bus_suspend
= isp1362_bus_suspend
,
2663 .bus_resume
= isp1362_bus_resume
,
2666 /*-------------------------------------------------------------------------*/
2668 #define resource_len(r) (((r)->end - (r)->start) + 1)
2670 static int __devexit
isp1362_remove(struct platform_device
*pdev
)
2672 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
2673 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2674 struct resource
*res
;
2676 remove_debug_file(isp1362_hcd
);
2677 DBG(0, "%s: Removing HCD\n", __func__
);
2678 usb_remove_hcd(hcd
);
2680 DBG(0, "%s: Unmapping data_reg @ %p\n", __func__
,
2681 isp1362_hcd
->data_reg
);
2682 iounmap(isp1362_hcd
->data_reg
);
2684 DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__
,
2685 isp1362_hcd
->addr_reg
);
2686 iounmap(isp1362_hcd
->addr_reg
);
2688 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
2689 DBG(0, "%s: release mem_region: %08lx\n", __func__
, (long unsigned int)res
->start
);
2691 release_mem_region(res
->start
, resource_len(res
));
2693 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2694 DBG(0, "%s: release mem_region: %08lx\n", __func__
, (long unsigned int)res
->start
);
2696 release_mem_region(res
->start
, resource_len(res
));
2698 DBG(0, "%s: put_hcd\n", __func__
);
2700 DBG(0, "%s: Done\n", __func__
);
2705 static int __init
isp1362_probe(struct platform_device
*pdev
)
2707 struct usb_hcd
*hcd
;
2708 struct isp1362_hcd
*isp1362_hcd
;
2709 struct resource
*addr
, *data
;
2710 void __iomem
*addr_reg
;
2711 void __iomem
*data_reg
;
2715 /* basic sanity checks first. board-specific init logic should
2716 * have initialized this the three resources and probably board
2717 * specific platform_data. we don't probe for IRQs, and do only
2718 * minimal sanity checking.
2720 if (pdev
->num_resources
< 3) {
2725 data
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2726 addr
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
2727 irq
= platform_get_irq(pdev
, 0);
2728 if (!addr
|| !data
|| irq
< 0) {
2733 #ifdef CONFIG_USB_HCD_DMA
2734 if (pdev
->dev
.dma_mask
) {
2735 struct resource
*dma_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 2);
2741 isp1362_hcd
->data_dma
= dma_res
->start
;
2742 isp1362_hcd
->max_dma_size
= resource_len(dma_res
);
2745 if (pdev
->dev
.dma_mask
) {
2746 DBG(1, "won't do DMA");
2752 if (!request_mem_region(addr
->start
, resource_len(addr
), hcd_name
)) {
2756 addr_reg
= ioremap(addr
->start
, resource_len(addr
));
2757 if (addr_reg
== NULL
) {
2762 if (!request_mem_region(data
->start
, resource_len(data
), hcd_name
)) {
2766 data_reg
= ioremap(data
->start
, resource_len(data
));
2767 if (data_reg
== NULL
) {
2772 /* allocate and initialize hcd */
2773 hcd
= usb_create_hcd(&isp1362_hc_driver
, &pdev
->dev
, dev_name(&pdev
->dev
));
2778 hcd
->rsrc_start
= data
->start
;
2779 isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2780 isp1362_hcd
->data_reg
= data_reg
;
2781 isp1362_hcd
->addr_reg
= addr_reg
;
2783 isp1362_hcd
->next_statechange
= jiffies
;
2784 spin_lock_init(&isp1362_hcd
->lock
);
2785 INIT_LIST_HEAD(&isp1362_hcd
->async
);
2786 INIT_LIST_HEAD(&isp1362_hcd
->periodic
);
2787 INIT_LIST_HEAD(&isp1362_hcd
->isoc
);
2788 INIT_LIST_HEAD(&isp1362_hcd
->remove_list
);
2789 isp1362_hcd
->board
= pdev
->dev
.platform_data
;
2790 #if USE_PLATFORM_DELAY
2791 if (!isp1362_hcd
->board
->delay
) {
2792 dev_err(hcd
->self
.controller
, "No platform delay function given\n");
2799 if (isp1362_hcd
->board
)
2800 set_irq_type(irq
, isp1362_hcd
->board
->int_act_high
? IRQT_RISING
: IRQT_FALLING
);
2803 retval
= usb_add_hcd(hcd
, irq
, IRQF_TRIGGER_LOW
| IRQF_DISABLED
| IRQF_SHARED
);
2806 pr_info("%s, irq %d\n", hcd
->product_desc
, irq
);
2808 create_debug_file(isp1362_hcd
);
2813 DBG(0, "%s: Freeing dev %p\n", __func__
, isp1362_hcd
);
2816 DBG(0, "%s: Unmapping data_reg @ %p\n", __func__
, data_reg
);
2819 DBG(0, "%s: Releasing mem region %08lx\n", __func__
, (long unsigned int)data
->start
);
2820 release_mem_region(data
->start
, resource_len(data
));
2822 DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__
, addr_reg
);
2825 DBG(0, "%s: Releasing mem region %08lx\n", __func__
, (long unsigned int)addr
->start
);
2826 release_mem_region(addr
->start
, resource_len(addr
));
2828 pr_err("%s: init error, %d\n", __func__
, retval
);
2834 static int isp1362_suspend(struct platform_device
*pdev
, pm_message_t state
)
2836 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
2837 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2838 unsigned long flags
;
2841 DBG(0, "%s: Suspending device\n", __func__
);
2843 if (state
.event
== PM_EVENT_FREEZE
) {
2844 DBG(0, "%s: Suspending root hub\n", __func__
);
2845 retval
= isp1362_bus_suspend(hcd
);
2847 DBG(0, "%s: Suspending RH ports\n", __func__
);
2848 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2849 isp1362_write_reg32(isp1362_hcd
, HCRHSTATUS
, RH_HS_LPS
);
2850 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2853 pdev
->dev
.power
.power_state
= state
;
2857 static int isp1362_resume(struct platform_device
*pdev
)
2859 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
2860 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2861 unsigned long flags
;
2863 DBG(0, "%s: Resuming\n", __func__
);
2865 if (pdev
->dev
.power
.power_state
.event
== PM_EVENT_SUSPEND
) {
2866 DBG(0, "%s: Resume RH ports\n", __func__
);
2867 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2868 isp1362_write_reg32(isp1362_hcd
, HCRHSTATUS
, RH_HS_LPSC
);
2869 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2873 pdev
->dev
.power
.power_state
= PMSG_ON
;
2875 return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd
));
2878 #define isp1362_suspend NULL
2879 #define isp1362_resume NULL
2882 static struct platform_driver isp1362_driver
= {
2883 .probe
= isp1362_probe
,
2884 .remove
= __devexit_p(isp1362_remove
),
2886 .suspend
= isp1362_suspend
,
2887 .resume
= isp1362_resume
,
2889 .name
= (char *)hcd_name
,
2890 .owner
= THIS_MODULE
,
2894 /*-------------------------------------------------------------------------*/
2896 static int __init
isp1362_init(void)
2900 pr_info("driver %s, %s\n", hcd_name
, DRIVER_VERSION
);
2901 return platform_driver_register(&isp1362_driver
);
2903 module_init(isp1362_init
);
2905 static void __exit
isp1362_cleanup(void)
2907 platform_driver_unregister(&isp1362_driver
);
2909 module_exit(isp1362_cleanup
);