2 * ISP1362 HCD (Host Controller Driver) for USB.
4 * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
6 * Derived from the SL811 HCD, rewritten for ISP116x.
7 * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
10 * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
11 * Copyright (C) 2004 David Brownell
15 * The ISP1362 chip requires a large delay (300ns and 462ns) between
16 * accesses to the address and data register.
17 * The following timing options exist:
19 * 1. Configure your memory controller to add such delays if it can (the best)
20 * 2. Implement platform-specific delay function possibly
21 * combined with configuring the memory controller; see
22 * include/linux/usb_isp1362.h for more info.
23 * 3. Use ndelay (easiest, poorest).
25 * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
26 * platform specific section of isp1362.h to select the appropriate variant.
28 * Also note that according to the Philips "ISP1362 Errata" document
29 * Rev 1.00 from 27 May data corruption may occur when the #WR signal
30 * is reasserted (even with #CS deasserted) within 132ns after a
31 * write cycle to any controller register. If the hardware doesn't
32 * implement the recommended fix (gating the #WR with #CS) software
33 * must ensure that no further write cycle (not necessarily to the chip!)
34 * is issued by the CPU within this interval.
36 * For PXA25x this can be ensured by using VLIO with the maximum
37 * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
43 * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
44 * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
45 * requests are carried out in separate frames. This will delay any SETUP
46 * packets until the start of the next frame so that this situation is
47 * unlikely to occur (and makes usbtest happy running with a PXA255 target
50 #undef BUGGY_PXA2XX_UDC_USBTEST
57 /* This enables a memory test on the ISP1362 chip memory to make sure the
58 * chip access timing is correct.
60 #undef CHIP_BUFFER_TEST
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/kernel.h>
65 #include <linux/delay.h>
66 #include <linux/ioport.h>
67 #include <linux/sched.h>
68 #include <linux/slab.h>
69 #include <linux/errno.h>
70 #include <linux/list.h>
71 #include <linux/interrupt.h>
72 #include <linux/usb.h>
73 #include <linux/usb/isp1362.h>
74 #include <linux/usb/hcd.h>
75 #include <linux/platform_device.h>
78 #include <linux/bitmap.h>
79 #include <linux/prefetch.h>
80 #include <linux/debugfs.h>
81 #include <linux/seq_file.h>
84 #include <asm/byteorder.h>
85 #include <asm/unaligned.h>
89 module_param(dbg_level
, int, 0644);
91 module_param(dbg_level
, int, 0);
94 #include "../core/usb.h"
98 #define DRIVER_VERSION "2005-04-04"
99 #define DRIVER_DESC "ISP1362 USB Host Controller Driver"
101 MODULE_DESCRIPTION(DRIVER_DESC
);
102 MODULE_LICENSE("GPL");
104 static const char hcd_name
[] = "isp1362-hcd";
106 static void isp1362_hc_stop(struct usb_hcd
*hcd
);
107 static int isp1362_hc_start(struct usb_hcd
*hcd
);
109 /*-------------------------------------------------------------------------*/
112 * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
113 * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
115 * We don't need a 'disable' counterpart, since interrupts will be disabled
116 * only by the interrupt handler.
118 static inline void isp1362_enable_int(struct isp1362_hcd
*isp1362_hcd
, u16 mask
)
120 if ((isp1362_hcd
->irqenb
| mask
) == isp1362_hcd
->irqenb
)
122 if (mask
& ~isp1362_hcd
->irqenb
)
123 isp1362_write_reg16(isp1362_hcd
, HCuPINT
, mask
& ~isp1362_hcd
->irqenb
);
124 isp1362_hcd
->irqenb
|= mask
;
125 if (isp1362_hcd
->irq_active
)
127 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, isp1362_hcd
->irqenb
);
130 /*-------------------------------------------------------------------------*/
132 static inline struct isp1362_ep_queue
*get_ptd_queue(struct isp1362_hcd
*isp1362_hcd
,
135 struct isp1362_ep_queue
*epq
= NULL
;
137 if (offset
< isp1362_hcd
->istl_queue
[1].buf_start
)
138 epq
= &isp1362_hcd
->istl_queue
[0];
139 else if (offset
< isp1362_hcd
->intl_queue
.buf_start
)
140 epq
= &isp1362_hcd
->istl_queue
[1];
141 else if (offset
< isp1362_hcd
->atl_queue
.buf_start
)
142 epq
= &isp1362_hcd
->intl_queue
;
143 else if (offset
< isp1362_hcd
->atl_queue
.buf_start
+
144 isp1362_hcd
->atl_queue
.buf_size
)
145 epq
= &isp1362_hcd
->atl_queue
;
148 DBG(1, "%s: PTD $%04x is on %s queue\n", __func__
, offset
, epq
->name
);
150 pr_warning("%s: invalid PTD $%04x\n", __func__
, offset
);
155 static inline int get_ptd_offset(struct isp1362_ep_queue
*epq
, u8 index
)
159 if (index
* epq
->blk_size
> epq
->buf_size
) {
160 pr_warning("%s: Bad %s index %d(%d)\n", __func__
, epq
->name
, index
,
161 epq
->buf_size
/ epq
->blk_size
);
164 offset
= epq
->buf_start
+ index
* epq
->blk_size
;
165 DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__
, epq
->name
, index
, offset
);
170 /*-------------------------------------------------------------------------*/
172 static inline u16
max_transfer_size(struct isp1362_ep_queue
*epq
, size_t size
,
175 u16 xfer_size
= min_t(size_t, MAX_XFER_SIZE
, size
);
177 xfer_size
= min_t(size_t, xfer_size
, epq
->buf_avail
* epq
->blk_size
- PTD_HEADER_SIZE
);
178 if (xfer_size
< size
&& xfer_size
% mps
)
179 xfer_size
-= xfer_size
% mps
;
184 static int claim_ptd_buffers(struct isp1362_ep_queue
*epq
,
185 struct isp1362_ep
*ep
, u16 len
)
187 int ptd_offset
= -EINVAL
;
188 int num_ptds
= ((len
+ PTD_HEADER_SIZE
- 1) / epq
->blk_size
) + 1;
191 BUG_ON(len
> epq
->buf_size
);
197 pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__
,
198 epq
->name
, len
, epq
->blk_size
, num_ptds
, epq
->buf_map
, epq
->skip_map
);
199 BUG_ON(ep
->num_ptds
!= 0);
201 found
= bitmap_find_next_zero_area(&epq
->buf_map
, epq
->buf_count
, 0,
203 if (found
>= epq
->buf_count
)
206 DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__
,
207 num_ptds
, found
, len
, (int)(epq
->blk_size
- PTD_HEADER_SIZE
));
208 ptd_offset
= get_ptd_offset(epq
, found
);
209 WARN_ON(ptd_offset
< 0);
210 ep
->ptd_offset
= ptd_offset
;
211 ep
->num_ptds
+= num_ptds
;
212 epq
->buf_avail
-= num_ptds
;
213 BUG_ON(epq
->buf_avail
> epq
->buf_count
);
214 ep
->ptd_index
= found
;
215 bitmap_set(&epq
->buf_map
, found
, num_ptds
);
216 DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
217 __func__
, epq
->name
, ep
->ptd_index
, ep
->ptd_offset
,
218 epq
->buf_avail
, epq
->buf_count
, num_ptds
, epq
->buf_map
, epq
->skip_map
);
223 static inline void release_ptd_buffers(struct isp1362_ep_queue
*epq
, struct isp1362_ep
*ep
)
225 int last
= ep
->ptd_index
+ ep
->num_ptds
;
227 if (last
> epq
->buf_count
)
228 pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
229 __func__
, ep
, ep
->num_req
, ep
->length
, epq
->name
, ep
->ptd_index
,
230 ep
->ptd_offset
, ep
->num_ptds
, epq
->buf_count
, epq
->buf_avail
,
231 epq
->buf_map
, epq
->skip_map
);
232 BUG_ON(last
> epq
->buf_count
);
234 bitmap_clear(&epq
->buf_map
, ep
->ptd_index
, ep
->num_ptds
);
235 bitmap_set(&epq
->skip_map
, ep
->ptd_index
, ep
->num_ptds
);
236 epq
->buf_avail
+= ep
->num_ptds
;
239 BUG_ON(epq
->buf_avail
> epq
->buf_count
);
240 BUG_ON(epq
->ptd_count
> epq
->buf_count
);
242 DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
244 ep
->ptd_offset
, ep
->num_ptds
, epq
->buf_avail
, epq
->buf_count
);
245 DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__
,
246 epq
->buf_map
, epq
->skip_map
);
249 ep
->ptd_offset
= -EINVAL
;
250 ep
->ptd_index
= -EINVAL
;
253 /*-------------------------------------------------------------------------*/
258 static void prepare_ptd(struct isp1362_hcd
*isp1362_hcd
, struct urb
*urb
,
259 struct isp1362_ep
*ep
, struct isp1362_ep_queue
*epq
,
266 size_t buf_len
= urb
->transfer_buffer_length
- urb
->actual_length
;
268 DBG(3, "%s: %s ep %p\n", __func__
, epq
->name
, ep
);
272 ep
->data
= (unsigned char *)urb
->transfer_buffer
+ urb
->actual_length
;
274 switch (ep
->nextpid
) {
276 toggle
= usb_gettoggle(urb
->dev
, ep
->epnum
, 0);
278 if (usb_pipecontrol(urb
->pipe
)) {
279 len
= min_t(size_t, ep
->maxpacket
, buf_len
);
280 } else if (usb_pipeisoc(urb
->pipe
)) {
281 len
= min_t(size_t, urb
->iso_frame_desc
[fno
].length
, MAX_XFER_SIZE
);
282 ep
->data
= urb
->transfer_buffer
+ urb
->iso_frame_desc
[fno
].offset
;
284 len
= max_transfer_size(epq
, buf_len
, ep
->maxpacket
);
285 DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__
, len
, ep
->maxpacket
,
289 toggle
= usb_gettoggle(urb
->dev
, ep
->epnum
, 1);
291 if (usb_pipecontrol(urb
->pipe
))
292 len
= min_t(size_t, ep
->maxpacket
, buf_len
);
293 else if (usb_pipeisoc(urb
->pipe
))
294 len
= min_t(size_t, urb
->iso_frame_desc
[0].length
, MAX_XFER_SIZE
);
296 len
= max_transfer_size(epq
, buf_len
, ep
->maxpacket
);
298 pr_info("%s: Sending ZERO packet: %d\n", __func__
,
299 urb
->transfer_flags
& URB_ZERO_PACKET
);
300 DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__
, len
, ep
->maxpacket
,
306 len
= sizeof(struct usb_ctrlrequest
);
307 DBG(1, "%s: SETUP len %d\n", __func__
, len
);
308 ep
->data
= urb
->setup_packet
;
313 dir
= (urb
->transfer_buffer_length
&& usb_pipein(urb
->pipe
)) ?
314 PTD_DIR_OUT
: PTD_DIR_IN
;
315 DBG(1, "%s: ACK len %d\n", __func__
, len
);
318 toggle
= dir
= len
= 0;
319 pr_err("%s@%d: ep->nextpid %02x\n", __func__
, __LINE__
, ep
->nextpid
);
327 ptd
->count
= PTD_CC_MSK
| PTD_ACTIVE_MSK
| PTD_TOGGLE(toggle
);
328 ptd
->mps
= PTD_MPS(ep
->maxpacket
) | PTD_SPD(urb
->dev
->speed
== USB_SPEED_LOW
) |
330 ptd
->len
= PTD_LEN(len
) | PTD_DIR(dir
);
331 ptd
->faddr
= PTD_FA(usb_pipedevice(urb
->pipe
));
333 if (usb_pipeint(urb
->pipe
)) {
334 ptd
->faddr
|= PTD_SF_INT(ep
->branch
);
335 ptd
->faddr
|= PTD_PR(ep
->interval
? __ffs(ep
->interval
) : 0);
337 if (usb_pipeisoc(urb
->pipe
))
338 ptd
->faddr
|= PTD_SF_ISO(fno
);
340 DBG(1, "%s: Finished\n", __func__
);
343 static void isp1362_write_ptd(struct isp1362_hcd
*isp1362_hcd
, struct isp1362_ep
*ep
,
344 struct isp1362_ep_queue
*epq
)
346 struct ptd
*ptd
= &ep
->ptd
;
347 int len
= PTD_GET_DIR(ptd
) == PTD_DIR_IN
? 0 : ep
->length
;
350 isp1362_write_buffer(isp1362_hcd
, ptd
, ep
->ptd_offset
, PTD_HEADER_SIZE
);
352 isp1362_write_buffer(isp1362_hcd
, ep
->data
,
353 ep
->ptd_offset
+ PTD_HEADER_SIZE
, len
);
356 dump_ptd_out_data(ptd
, ep
->data
);
359 static void isp1362_read_ptd(struct isp1362_hcd
*isp1362_hcd
, struct isp1362_ep
*ep
,
360 struct isp1362_ep_queue
*epq
)
362 struct ptd
*ptd
= &ep
->ptd
;
365 WARN_ON(list_empty(&ep
->active
));
366 BUG_ON(ep
->ptd_offset
< 0);
368 list_del_init(&ep
->active
);
369 DBG(1, "%s: ep %p removed from active list %p\n", __func__
, ep
, &epq
->active
);
372 isp1362_read_buffer(isp1362_hcd
, ptd
, ep
->ptd_offset
, PTD_HEADER_SIZE
);
374 act_len
= PTD_GET_COUNT(ptd
);
375 if (PTD_GET_DIR(ptd
) != PTD_DIR_IN
|| act_len
== 0)
377 if (act_len
> ep
->length
)
378 pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__
, ep
,
379 ep
->ptd_offset
, act_len
, ep
->length
);
380 BUG_ON(act_len
> ep
->length
);
381 /* Only transfer the amount of data that has actually been overwritten
382 * in the chip buffer. We don't want any data that doesn't belong to the
383 * transfer to leak out of the chip to the callers transfer buffer!
386 isp1362_read_buffer(isp1362_hcd
, ep
->data
,
387 ep
->ptd_offset
+ PTD_HEADER_SIZE
, act_len
);
388 dump_ptd_in_data(ptd
, ep
->data
);
392 * INT PTDs will stay in the chip until data is available.
393 * This function will remove a PTD from the chip when the URB is dequeued.
394 * Must be called with the spinlock held and IRQs disabled
396 static void remove_ptd(struct isp1362_hcd
*isp1362_hcd
, struct isp1362_ep
*ep
)
400 struct isp1362_ep_queue
*epq
;
402 DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__
, ep
, ep
->ptd_index
, ep
->ptd_offset
);
403 BUG_ON(ep
->ptd_offset
< 0);
405 epq
= get_ptd_queue(isp1362_hcd
, ep
->ptd_offset
);
408 /* put ep in remove_list for cleanup */
409 WARN_ON(!list_empty(&ep
->remove_list
));
410 list_add_tail(&ep
->remove_list
, &isp1362_hcd
->remove_list
);
411 /* let SOF interrupt handle the cleanup */
412 isp1362_enable_int(isp1362_hcd
, HCuPINT_SOF
);
414 index
= ep
->ptd_index
;
416 /* ISO queues don't have SKIP registers */
419 DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__
,
420 index
, ep
->ptd_offset
, epq
->skip_map
, 1 << index
);
422 /* prevent further processing of PTD (will be effective after next SOF) */
423 epq
->skip_map
|= 1 << index
;
424 if (epq
== &isp1362_hcd
->atl_queue
) {
425 DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__
,
426 isp1362_read_reg32(isp1362_hcd
, HCATLSKIP
), epq
->skip_map
);
427 isp1362_write_reg32(isp1362_hcd
, HCATLSKIP
, epq
->skip_map
);
428 if (~epq
->skip_map
== 0)
429 isp1362_clr_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_ATL_ACTIVE
);
430 } else if (epq
== &isp1362_hcd
->intl_queue
) {
431 DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__
,
432 isp1362_read_reg32(isp1362_hcd
, HCINTLSKIP
), epq
->skip_map
);
433 isp1362_write_reg32(isp1362_hcd
, HCINTLSKIP
, epq
->skip_map
);
434 if (~epq
->skip_map
== 0)
435 isp1362_clr_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_INTL_ACTIVE
);
440 Take done or failed requests out of schedule. Give back
443 static void finish_request(struct isp1362_hcd
*isp1362_hcd
, struct isp1362_ep
*ep
,
444 struct urb
*urb
, int status
)
445 __releases(isp1362_hcd
->lock
)
446 __acquires(isp1362_hcd
->lock
)
451 if (usb_pipecontrol(urb
->pipe
))
452 ep
->nextpid
= USB_PID_SETUP
;
454 URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__
,
455 ep
->num_req
, usb_pipedevice(urb
->pipe
),
456 usb_pipeendpoint(urb
->pipe
),
457 !usb_pipein(urb
->pipe
) ? "out" : "in",
458 usb_pipecontrol(urb
->pipe
) ? "ctrl" :
459 usb_pipeint(urb
->pipe
) ? "int" :
460 usb_pipebulk(urb
->pipe
) ? "bulk" :
462 urb
->actual_length
, urb
->transfer_buffer_length
,
463 !(urb
->transfer_flags
& URB_SHORT_NOT_OK
) ?
464 "short_ok" : "", urb
->status
);
467 usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd
), urb
);
468 spin_unlock(&isp1362_hcd
->lock
);
469 usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd
), urb
, status
);
470 spin_lock(&isp1362_hcd
->lock
);
472 /* take idle endpoints out of the schedule right away */
473 if (!list_empty(&ep
->hep
->urb_list
))
476 /* async deschedule */
477 if (!list_empty(&ep
->schedule
)) {
478 list_del_init(&ep
->schedule
);
484 /* periodic deschedule */
485 DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep
->interval
,
486 ep
, ep
->branch
, ep
->load
,
487 isp1362_hcd
->load
[ep
->branch
],
488 isp1362_hcd
->load
[ep
->branch
] - ep
->load
);
489 isp1362_hcd
->load
[ep
->branch
] -= ep
->load
;
490 ep
->branch
= PERIODIC_SIZE
;
495 * Analyze transfer results, handle partial transfers and errors
497 static void postproc_ep(struct isp1362_hcd
*isp1362_hcd
, struct isp1362_ep
*ep
)
499 struct urb
*urb
= get_urb(ep
);
500 struct usb_device
*udev
;
504 int urbstat
= -EINPROGRESS
;
507 DBG(2, "%s: ep %p req %d\n", __func__
, ep
, ep
->num_req
);
511 cc
= PTD_GET_CC(ptd
);
512 if (cc
== PTD_NOTACCESSED
) {
513 pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__
,
518 short_ok
= !(urb
->transfer_flags
& URB_SHORT_NOT_OK
);
519 len
= urb
->transfer_buffer_length
- urb
->actual_length
;
521 /* Data underrun is special. For allowed underrun
522 we clear the error and continue as normal. For
523 forbidden underrun we finish the DATA stage
524 immediately while for control transfer,
525 we do a STATUS stage.
527 if (cc
== PTD_DATAUNDERRUN
) {
529 DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
530 __func__
, ep
->num_req
, short_ok
? "" : "not_",
531 PTD_GET_COUNT(ptd
), ep
->maxpacket
, len
);
535 DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
536 __func__
, ep
->num_req
,
537 usb_pipein(urb
->pipe
) ? "IN" : "OUT", ep
->nextpid
,
538 short_ok
? "" : "not_",
539 PTD_GET_COUNT(ptd
), ep
->maxpacket
, len
);
540 /* save the data underrun error code for later and
541 * proceed with the status stage
543 urb
->actual_length
+= PTD_GET_COUNT(ptd
);
544 if (usb_pipecontrol(urb
->pipe
)) {
545 ep
->nextpid
= USB_PID_ACK
;
546 BUG_ON(urb
->actual_length
> urb
->transfer_buffer_length
);
548 if (urb
->status
== -EINPROGRESS
)
549 urb
->status
= cc_to_error
[PTD_DATAUNDERRUN
];
551 usb_settoggle(udev
, ep
->epnum
, ep
->nextpid
== USB_PID_OUT
,
552 PTD_GET_TOGGLE(ptd
));
553 urbstat
= cc_to_error
[PTD_DATAUNDERRUN
];
559 if (cc
!= PTD_CC_NOERROR
) {
560 if (++ep
->error_count
>= 3 || cc
== PTD_CC_STALL
|| cc
== PTD_DATAOVERRUN
) {
561 urbstat
= cc_to_error
[cc
];
562 DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
563 __func__
, ep
->num_req
, ep
->nextpid
, urbstat
, cc
,
569 switch (ep
->nextpid
) {
571 if (PTD_GET_COUNT(ptd
) != ep
->length
)
572 pr_err("%s: count=%d len=%d\n", __func__
,
573 PTD_GET_COUNT(ptd
), ep
->length
);
574 BUG_ON(PTD_GET_COUNT(ptd
) != ep
->length
);
575 urb
->actual_length
+= ep
->length
;
576 BUG_ON(urb
->actual_length
> urb
->transfer_buffer_length
);
577 usb_settoggle(udev
, ep
->epnum
, 1, PTD_GET_TOGGLE(ptd
));
578 if (urb
->actual_length
== urb
->transfer_buffer_length
) {
579 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__
,
580 ep
->num_req
, len
, ep
->maxpacket
, urbstat
);
581 if (usb_pipecontrol(urb
->pipe
)) {
582 DBG(3, "%s: req %d %s Wait for ACK\n", __func__
,
584 usb_pipein(urb
->pipe
) ? "IN" : "OUT");
585 ep
->nextpid
= USB_PID_ACK
;
587 if (len
% ep
->maxpacket
||
588 !(urb
->transfer_flags
& URB_ZERO_PACKET
)) {
590 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
591 __func__
, ep
->num_req
, usb_pipein(urb
->pipe
) ? "IN" : "OUT",
592 urbstat
, len
, ep
->maxpacket
, urb
->actual_length
);
598 len
= PTD_GET_COUNT(ptd
);
599 BUG_ON(len
> ep
->length
);
600 urb
->actual_length
+= len
;
601 BUG_ON(urb
->actual_length
> urb
->transfer_buffer_length
);
602 usb_settoggle(udev
, ep
->epnum
, 0, PTD_GET_TOGGLE(ptd
));
603 /* if transfer completed or (allowed) data underrun */
604 if ((urb
->transfer_buffer_length
== urb
->actual_length
) ||
605 len
% ep
->maxpacket
) {
606 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__
,
607 ep
->num_req
, len
, ep
->maxpacket
, urbstat
);
608 if (usb_pipecontrol(urb
->pipe
)) {
609 DBG(3, "%s: req %d %s Wait for ACK\n", __func__
,
611 usb_pipein(urb
->pipe
) ? "IN" : "OUT");
612 ep
->nextpid
= USB_PID_ACK
;
615 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
616 __func__
, ep
->num_req
, usb_pipein(urb
->pipe
) ? "IN" : "OUT",
617 urbstat
, len
, ep
->maxpacket
, urb
->actual_length
);
622 if (urb
->transfer_buffer_length
== urb
->actual_length
) {
623 ep
->nextpid
= USB_PID_ACK
;
624 } else if (usb_pipeout(urb
->pipe
)) {
625 usb_settoggle(udev
, 0, 1, 1);
626 ep
->nextpid
= USB_PID_OUT
;
628 usb_settoggle(udev
, 0, 0, 1);
629 ep
->nextpid
= USB_PID_IN
;
633 DBG(3, "%s: req %d got ACK %d -> 0\n", __func__
, ep
->num_req
,
635 WARN_ON(urbstat
!= -EINPROGRESS
);
644 if (urbstat
!= -EINPROGRESS
) {
645 DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__
,
646 ep
, ep
->num_req
, urb
, urbstat
);
647 finish_request(isp1362_hcd
, ep
, urb
, urbstat
);
651 static void finish_unlinks(struct isp1362_hcd
*isp1362_hcd
)
653 struct isp1362_ep
*ep
;
654 struct isp1362_ep
*tmp
;
656 list_for_each_entry_safe(ep
, tmp
, &isp1362_hcd
->remove_list
, remove_list
) {
657 struct isp1362_ep_queue
*epq
=
658 get_ptd_queue(isp1362_hcd
, ep
->ptd_offset
);
659 int index
= ep
->ptd_index
;
663 DBG(1, "%s: remove PTD[%d] $%04x\n", __func__
, index
, ep
->ptd_offset
);
664 BUG_ON(ep
->num_ptds
== 0);
665 release_ptd_buffers(epq
, ep
);
667 if (!list_empty(&ep
->hep
->urb_list
)) {
668 struct urb
*urb
= get_urb(ep
);
670 DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__
,
672 finish_request(isp1362_hcd
, ep
, urb
, -ESHUTDOWN
);
674 WARN_ON(list_empty(&ep
->active
));
675 if (!list_empty(&ep
->active
)) {
676 list_del_init(&ep
->active
);
677 DBG(1, "%s: ep %p removed from active list\n", __func__
, ep
);
679 list_del_init(&ep
->remove_list
);
680 DBG(1, "%s: ep %p removed from remove_list\n", __func__
, ep
);
682 DBG(1, "%s: Done\n", __func__
);
685 static inline void enable_atl_transfers(struct isp1362_hcd
*isp1362_hcd
, int count
)
688 if (count
< isp1362_hcd
->atl_queue
.ptd_count
)
689 isp1362_write_reg16(isp1362_hcd
, HCATLDTC
, count
);
690 isp1362_enable_int(isp1362_hcd
, HCuPINT_ATL
);
691 isp1362_write_reg32(isp1362_hcd
, HCATLSKIP
, isp1362_hcd
->atl_queue
.skip_map
);
692 isp1362_set_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_ATL_ACTIVE
);
694 isp1362_enable_int(isp1362_hcd
, HCuPINT_SOF
);
697 static inline void enable_intl_transfers(struct isp1362_hcd
*isp1362_hcd
)
699 isp1362_enable_int(isp1362_hcd
, HCuPINT_INTL
);
700 isp1362_set_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_INTL_ACTIVE
);
701 isp1362_write_reg32(isp1362_hcd
, HCINTLSKIP
, isp1362_hcd
->intl_queue
.skip_map
);
704 static inline void enable_istl_transfers(struct isp1362_hcd
*isp1362_hcd
, int flip
)
706 isp1362_enable_int(isp1362_hcd
, flip
? HCuPINT_ISTL1
: HCuPINT_ISTL0
);
707 isp1362_set_mask16(isp1362_hcd
, HCBUFSTAT
, flip
?
708 HCBUFSTAT_ISTL1_FULL
: HCBUFSTAT_ISTL0_FULL
);
711 static int submit_req(struct isp1362_hcd
*isp1362_hcd
, struct urb
*urb
,
712 struct isp1362_ep
*ep
, struct isp1362_ep_queue
*epq
)
714 int index
= epq
->free_ptd
;
716 prepare_ptd(isp1362_hcd
, urb
, ep
, epq
, 0);
717 index
= claim_ptd_buffers(epq
, ep
, ep
->length
);
718 if (index
== -ENOMEM
) {
719 DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__
,
720 ep
->num_req
, epq
->name
, ep
->num_ptds
, epq
->buf_map
, epq
->skip_map
);
722 } else if (index
== -EOVERFLOW
) {
723 DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
724 __func__
, ep
->num_req
, ep
->length
, epq
->name
, ep
->num_ptds
,
725 epq
->buf_map
, epq
->skip_map
);
729 list_add_tail(&ep
->active
, &epq
->active
);
730 DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__
,
731 ep
, ep
->num_req
, ep
->length
, &epq
->active
);
732 DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__
, epq
->name
,
733 ep
->ptd_offset
, ep
, ep
->num_req
);
734 isp1362_write_ptd(isp1362_hcd
, ep
, epq
);
735 __clear_bit(ep
->ptd_index
, &epq
->skip_map
);
740 static void start_atl_transfers(struct isp1362_hcd
*isp1362_hcd
)
743 struct isp1362_ep_queue
*epq
= &isp1362_hcd
->atl_queue
;
744 struct isp1362_ep
*ep
;
747 if (atomic_read(&epq
->finishing
)) {
748 DBG(1, "%s: finish_transfers is active for %s\n", __func__
, epq
->name
);
752 list_for_each_entry(ep
, &isp1362_hcd
->async
, schedule
) {
753 struct urb
*urb
= get_urb(ep
);
756 if (!list_empty(&ep
->active
)) {
757 DBG(2, "%s: Skipping active %s ep %p\n", __func__
, epq
->name
, ep
);
761 DBG(1, "%s: Processing %s ep %p req %d\n", __func__
, epq
->name
,
764 ret
= submit_req(isp1362_hcd
, urb
, ep
, epq
);
765 if (ret
== -ENOMEM
) {
768 } else if (ret
== -EOVERFLOW
) {
772 #ifdef BUGGY_PXA2XX_UDC_USBTEST
773 defer
= ep
->nextpid
== USB_PID_SETUP
;
778 /* Avoid starving of endpoints */
779 if (isp1362_hcd
->async
.next
!= isp1362_hcd
->async
.prev
) {
780 DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__
, ptd_count
);
781 list_move(&isp1362_hcd
->async
, isp1362_hcd
->async
.next
);
783 if (ptd_count
|| defer
)
784 enable_atl_transfers(isp1362_hcd
, defer
? 0 : ptd_count
);
786 epq
->ptd_count
+= ptd_count
;
787 if (epq
->ptd_count
> epq
->stat_maxptds
) {
788 epq
->stat_maxptds
= epq
->ptd_count
;
789 DBG(0, "%s: max_ptds: %d\n", __func__
, epq
->stat_maxptds
);
793 static void start_intl_transfers(struct isp1362_hcd
*isp1362_hcd
)
796 struct isp1362_ep_queue
*epq
= &isp1362_hcd
->intl_queue
;
797 struct isp1362_ep
*ep
;
799 if (atomic_read(&epq
->finishing
)) {
800 DBG(1, "%s: finish_transfers is active for %s\n", __func__
, epq
->name
);
804 list_for_each_entry(ep
, &isp1362_hcd
->periodic
, schedule
) {
805 struct urb
*urb
= get_urb(ep
);
808 if (!list_empty(&ep
->active
)) {
809 DBG(1, "%s: Skipping active %s ep %p\n", __func__
,
814 DBG(1, "%s: Processing %s ep %p req %d\n", __func__
,
815 epq
->name
, ep
, ep
->num_req
);
816 ret
= submit_req(isp1362_hcd
, urb
, ep
, epq
);
819 else if (ret
== -EOVERFLOW
)
825 static int last_count
;
827 if (ptd_count
!= last_count
) {
828 DBG(0, "%s: ptd_count: %d\n", __func__
, ptd_count
);
829 last_count
= ptd_count
;
831 enable_intl_transfers(isp1362_hcd
);
834 epq
->ptd_count
+= ptd_count
;
835 if (epq
->ptd_count
> epq
->stat_maxptds
)
836 epq
->stat_maxptds
= epq
->ptd_count
;
839 static inline int next_ptd(struct isp1362_ep_queue
*epq
, struct isp1362_ep
*ep
)
841 u16 ptd_offset
= ep
->ptd_offset
;
842 int num_ptds
= (ep
->length
+ PTD_HEADER_SIZE
+ (epq
->blk_size
- 1)) / epq
->blk_size
;
844 DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__
, ptd_offset
,
845 ep
->length
, num_ptds
, epq
->blk_size
, ptd_offset
+ num_ptds
* epq
->blk_size
);
847 ptd_offset
+= num_ptds
* epq
->blk_size
;
848 if (ptd_offset
< epq
->buf_start
+ epq
->buf_size
)
854 static void start_iso_transfers(struct isp1362_hcd
*isp1362_hcd
)
857 int flip
= isp1362_hcd
->istl_flip
;
858 struct isp1362_ep_queue
*epq
;
860 struct isp1362_ep
*ep
;
861 struct isp1362_ep
*tmp
;
862 u16 fno
= isp1362_read_reg32(isp1362_hcd
, HCFMNUM
);
865 epq
= &isp1362_hcd
->istl_queue
[flip
];
866 if (atomic_read(&epq
->finishing
)) {
867 DBG(1, "%s: finish_transfers is active for %s\n", __func__
, epq
->name
);
871 if (!list_empty(&epq
->active
))
874 ptd_offset
= epq
->buf_start
;
875 list_for_each_entry_safe(ep
, tmp
, &isp1362_hcd
->isoc
, schedule
) {
876 struct urb
*urb
= get_urb(ep
);
877 s16 diff
= fno
- (u16
)urb
->start_frame
;
879 DBG(1, "%s: Processing %s ep %p\n", __func__
, epq
->name
, ep
);
881 if (diff
> urb
->number_of_packets
) {
882 /* time frame for this URB has elapsed */
883 finish_request(isp1362_hcd
, ep
, urb
, -EOVERFLOW
);
885 } else if (diff
< -1) {
886 /* URB is not due in this frame or the next one.
887 * Comparing with '-1' instead of '0' accounts for double
888 * buffering in the ISP1362 which enables us to queue the PTD
889 * one frame ahead of time
891 } else if (diff
== -1) {
892 /* submit PTD's that are due in the next frame */
893 prepare_ptd(isp1362_hcd
, urb
, ep
, epq
, fno
);
894 if (ptd_offset
+ PTD_HEADER_SIZE
+ ep
->length
>
895 epq
->buf_start
+ epq
->buf_size
) {
896 pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
897 __func__
, ep
->length
);
900 ep
->ptd_offset
= ptd_offset
;
901 list_add_tail(&ep
->active
, &epq
->active
);
903 ptd_offset
= next_ptd(epq
, ep
);
904 if (ptd_offset
< 0) {
905 pr_warning("%s: req %d No more %s PTD buffers available\n", __func__
,
906 ep
->num_req
, epq
->name
);
911 list_for_each_entry(ep
, &epq
->active
, active
) {
912 if (epq
->active
.next
== &ep
->active
)
913 ep
->ptd
.mps
|= PTD_LAST_MSK
;
914 isp1362_write_ptd(isp1362_hcd
, ep
, epq
);
919 enable_istl_transfers(isp1362_hcd
, flip
);
921 epq
->ptd_count
+= ptd_count
;
922 if (epq
->ptd_count
> epq
->stat_maxptds
)
923 epq
->stat_maxptds
= epq
->ptd_count
;
925 /* check, whether the second ISTL buffer may also be filled */
926 if (!(isp1362_read_reg16(isp1362_hcd
, HCBUFSTAT
) &
927 (flip
? HCBUFSTAT_ISTL0_FULL
: HCBUFSTAT_ISTL1_FULL
))) {
935 static void finish_transfers(struct isp1362_hcd
*isp1362_hcd
, unsigned long done_map
,
936 struct isp1362_ep_queue
*epq
)
938 struct isp1362_ep
*ep
;
939 struct isp1362_ep
*tmp
;
941 if (list_empty(&epq
->active
)) {
942 DBG(1, "%s: Nothing to do for %s queue\n", __func__
, epq
->name
);
946 DBG(1, "%s: Finishing %s transfers %08lx\n", __func__
, epq
->name
, done_map
);
948 atomic_inc(&epq
->finishing
);
949 list_for_each_entry_safe(ep
, tmp
, &epq
->active
, active
) {
950 int index
= ep
->ptd_index
;
952 DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__
, epq
->name
,
953 index
, ep
->ptd_offset
);
956 if (__test_and_clear_bit(index
, &done_map
)) {
957 isp1362_read_ptd(isp1362_hcd
, ep
, epq
);
958 epq
->free_ptd
= index
;
959 BUG_ON(ep
->num_ptds
== 0);
960 release_ptd_buffers(epq
, ep
);
962 DBG(1, "%s: ep %p req %d removed from active list\n", __func__
,
964 if (!list_empty(&ep
->remove_list
)) {
965 list_del_init(&ep
->remove_list
);
966 DBG(1, "%s: ep %p removed from remove list\n", __func__
, ep
);
968 DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__
, epq
->name
,
970 postproc_ep(isp1362_hcd
, ep
);
976 pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__
, done_map
,
978 atomic_dec(&epq
->finishing
);
981 static void finish_iso_transfers(struct isp1362_hcd
*isp1362_hcd
, struct isp1362_ep_queue
*epq
)
983 struct isp1362_ep
*ep
;
984 struct isp1362_ep
*tmp
;
986 if (list_empty(&epq
->active
)) {
987 DBG(1, "%s: Nothing to do for %s queue\n", __func__
, epq
->name
);
991 DBG(1, "%s: Finishing %s transfers\n", __func__
, epq
->name
);
993 atomic_inc(&epq
->finishing
);
994 list_for_each_entry_safe(ep
, tmp
, &epq
->active
, active
) {
995 DBG(1, "%s: Checking PTD $%04x\n", __func__
, ep
->ptd_offset
);
997 isp1362_read_ptd(isp1362_hcd
, ep
, epq
);
998 DBG(1, "%s: Postprocessing %s ep %p\n", __func__
, epq
->name
, ep
);
999 postproc_ep(isp1362_hcd
, ep
);
1001 WARN_ON(epq
->blk_size
!= 0);
1002 atomic_dec(&epq
->finishing
);
1005 static irqreturn_t
isp1362_irq(struct usb_hcd
*hcd
)
1008 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1012 spin_lock(&isp1362_hcd
->lock
);
1014 BUG_ON(isp1362_hcd
->irq_active
++);
1016 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, 0);
1018 irqstat
= isp1362_read_reg16(isp1362_hcd
, HCuPINT
);
1019 DBG(3, "%s: got IRQ %04x:%04x\n", __func__
, irqstat
, isp1362_hcd
->irqenb
);
1021 /* only handle interrupts that are currently enabled */
1022 irqstat
&= isp1362_hcd
->irqenb
;
1023 isp1362_write_reg16(isp1362_hcd
, HCuPINT
, irqstat
);
1026 if (irqstat
& HCuPINT_SOF
) {
1027 isp1362_hcd
->irqenb
&= ~HCuPINT_SOF
;
1028 isp1362_hcd
->irq_stat
[ISP1362_INT_SOF
]++;
1030 svc_mask
&= ~HCuPINT_SOF
;
1031 DBG(3, "%s: SOF\n", __func__
);
1032 isp1362_hcd
->fmindex
= isp1362_read_reg32(isp1362_hcd
, HCFMNUM
);
1033 if (!list_empty(&isp1362_hcd
->remove_list
))
1034 finish_unlinks(isp1362_hcd
);
1035 if (!list_empty(&isp1362_hcd
->async
) && !(irqstat
& HCuPINT_ATL
)) {
1036 if (list_empty(&isp1362_hcd
->atl_queue
.active
)) {
1037 start_atl_transfers(isp1362_hcd
);
1039 isp1362_enable_int(isp1362_hcd
, HCuPINT_ATL
);
1040 isp1362_write_reg32(isp1362_hcd
, HCATLSKIP
,
1041 isp1362_hcd
->atl_queue
.skip_map
);
1042 isp1362_set_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_ATL_ACTIVE
);
1047 if (irqstat
& HCuPINT_ISTL0
) {
1048 isp1362_hcd
->irq_stat
[ISP1362_INT_ISTL0
]++;
1050 svc_mask
&= ~HCuPINT_ISTL0
;
1051 isp1362_clr_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_ISTL0_FULL
);
1052 DBG(1, "%s: ISTL0\n", __func__
);
1053 WARN_ON((int)!!isp1362_hcd
->istl_flip
);
1054 WARN_ON(isp1362_read_reg16(isp1362_hcd
, HCBUFSTAT
) &
1055 HCBUFSTAT_ISTL0_ACTIVE
);
1056 WARN_ON(!(isp1362_read_reg16(isp1362_hcd
, HCBUFSTAT
) &
1057 HCBUFSTAT_ISTL0_DONE
));
1058 isp1362_hcd
->irqenb
&= ~HCuPINT_ISTL0
;
1061 if (irqstat
& HCuPINT_ISTL1
) {
1062 isp1362_hcd
->irq_stat
[ISP1362_INT_ISTL1
]++;
1064 svc_mask
&= ~HCuPINT_ISTL1
;
1065 isp1362_clr_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_ISTL1_FULL
);
1066 DBG(1, "%s: ISTL1\n", __func__
);
1067 WARN_ON(!(int)isp1362_hcd
->istl_flip
);
1068 WARN_ON(isp1362_read_reg16(isp1362_hcd
, HCBUFSTAT
) &
1069 HCBUFSTAT_ISTL1_ACTIVE
);
1070 WARN_ON(!(isp1362_read_reg16(isp1362_hcd
, HCBUFSTAT
) &
1071 HCBUFSTAT_ISTL1_DONE
));
1072 isp1362_hcd
->irqenb
&= ~HCuPINT_ISTL1
;
1075 if (irqstat
& (HCuPINT_ISTL0
| HCuPINT_ISTL1
)) {
1076 WARN_ON((irqstat
& (HCuPINT_ISTL0
| HCuPINT_ISTL1
)) ==
1077 (HCuPINT_ISTL0
| HCuPINT_ISTL1
));
1078 finish_iso_transfers(isp1362_hcd
,
1079 &isp1362_hcd
->istl_queue
[isp1362_hcd
->istl_flip
]);
1080 start_iso_transfers(isp1362_hcd
);
1081 isp1362_hcd
->istl_flip
= 1 - isp1362_hcd
->istl_flip
;
1084 if (irqstat
& HCuPINT_INTL
) {
1085 u32 done_map
= isp1362_read_reg32(isp1362_hcd
, HCINTLDONE
);
1086 u32 skip_map
= isp1362_read_reg32(isp1362_hcd
, HCINTLSKIP
);
1087 isp1362_hcd
->irq_stat
[ISP1362_INT_INTL
]++;
1089 DBG(2, "%s: INTL\n", __func__
);
1091 svc_mask
&= ~HCuPINT_INTL
;
1093 isp1362_write_reg32(isp1362_hcd
, HCINTLSKIP
, skip_map
| done_map
);
1094 if (~(done_map
| skip_map
) == 0)
1095 /* All PTDs are finished, disable INTL processing entirely */
1096 isp1362_clr_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_INTL_ACTIVE
);
1101 DBG(3, "%s: INTL done_map %08x\n", __func__
, done_map
);
1102 finish_transfers(isp1362_hcd
, done_map
, &isp1362_hcd
->intl_queue
);
1103 start_intl_transfers(isp1362_hcd
);
1107 if (irqstat
& HCuPINT_ATL
) {
1108 u32 done_map
= isp1362_read_reg32(isp1362_hcd
, HCATLDONE
);
1109 u32 skip_map
= isp1362_read_reg32(isp1362_hcd
, HCATLSKIP
);
1110 isp1362_hcd
->irq_stat
[ISP1362_INT_ATL
]++;
1112 DBG(2, "%s: ATL\n", __func__
);
1114 svc_mask
&= ~HCuPINT_ATL
;
1116 isp1362_write_reg32(isp1362_hcd
, HCATLSKIP
, skip_map
| done_map
);
1117 if (~(done_map
| skip_map
) == 0)
1118 isp1362_clr_mask16(isp1362_hcd
, HCBUFSTAT
, HCBUFSTAT_ATL_ACTIVE
);
1120 DBG(3, "%s: ATL done_map %08x\n", __func__
, done_map
);
1121 finish_transfers(isp1362_hcd
, done_map
, &isp1362_hcd
->atl_queue
);
1122 start_atl_transfers(isp1362_hcd
);
1127 if (irqstat
& HCuPINT_OPR
) {
1128 u32 intstat
= isp1362_read_reg32(isp1362_hcd
, HCINTSTAT
);
1129 isp1362_hcd
->irq_stat
[ISP1362_INT_OPR
]++;
1131 svc_mask
&= ~HCuPINT_OPR
;
1132 DBG(2, "%s: OPR %08x:%08x\n", __func__
, intstat
, isp1362_hcd
->intenb
);
1133 intstat
&= isp1362_hcd
->intenb
;
1134 if (intstat
& OHCI_INTR_UE
) {
1135 pr_err("Unrecoverable error\n");
1136 /* FIXME: do here reset or cleanup or whatever */
1138 if (intstat
& OHCI_INTR_RHSC
) {
1139 isp1362_hcd
->rhstatus
= isp1362_read_reg32(isp1362_hcd
, HCRHSTATUS
);
1140 isp1362_hcd
->rhport
[0] = isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
);
1141 isp1362_hcd
->rhport
[1] = isp1362_read_reg32(isp1362_hcd
, HCRHPORT2
);
1143 if (intstat
& OHCI_INTR_RD
) {
1144 pr_info("%s: RESUME DETECTED\n", __func__
);
1145 isp1362_show_reg(isp1362_hcd
, HCCONTROL
);
1146 usb_hcd_resume_root_hub(hcd
);
1148 isp1362_write_reg32(isp1362_hcd
, HCINTSTAT
, intstat
);
1149 irqstat
&= ~HCuPINT_OPR
;
1153 if (irqstat
& HCuPINT_SUSP
) {
1154 isp1362_hcd
->irq_stat
[ISP1362_INT_SUSP
]++;
1156 svc_mask
&= ~HCuPINT_SUSP
;
1158 pr_info("%s: SUSPEND IRQ\n", __func__
);
1161 if (irqstat
& HCuPINT_CLKRDY
) {
1162 isp1362_hcd
->irq_stat
[ISP1362_INT_CLKRDY
]++;
1164 isp1362_hcd
->irqenb
&= ~HCuPINT_CLKRDY
;
1165 svc_mask
&= ~HCuPINT_CLKRDY
;
1166 pr_info("%s: CLKRDY IRQ\n", __func__
);
1170 pr_err("%s: Unserviced interrupt(s) %04x\n", __func__
, svc_mask
);
1172 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, isp1362_hcd
->irqenb
);
1173 isp1362_hcd
->irq_active
--;
1174 spin_unlock(&isp1362_hcd
->lock
);
1176 return IRQ_RETVAL(handled
);
1179 /*-------------------------------------------------------------------------*/
1181 #define MAX_PERIODIC_LOAD 900 /* out of 1000 usec */
1182 static int balance(struct isp1362_hcd
*isp1362_hcd
, u16 interval
, u16 load
)
1184 int i
, branch
= -ENOSPC
;
1186 /* search for the least loaded schedule branch of that interval
1187 * which has enough bandwidth left unreserved.
1189 for (i
= 0; i
< interval
; i
++) {
1190 if (branch
< 0 || isp1362_hcd
->load
[branch
] > isp1362_hcd
->load
[i
]) {
1193 for (j
= i
; j
< PERIODIC_SIZE
; j
+= interval
) {
1194 if ((isp1362_hcd
->load
[j
] + load
) > MAX_PERIODIC_LOAD
) {
1195 pr_err("%s: new load %d load[%02x] %d max %d\n", __func__
,
1196 load
, j
, isp1362_hcd
->load
[j
], MAX_PERIODIC_LOAD
);
1200 if (j
< PERIODIC_SIZE
)
1208 /* NB! ALL the code above this point runs with isp1362_hcd->lock
1212 /*-------------------------------------------------------------------------*/
1214 static int isp1362_urb_enqueue(struct usb_hcd
*hcd
,
1218 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1219 struct usb_device
*udev
= urb
->dev
;
1220 unsigned int pipe
= urb
->pipe
;
1221 int is_out
= !usb_pipein(pipe
);
1222 int type
= usb_pipetype(pipe
);
1223 int epnum
= usb_pipeendpoint(pipe
);
1224 struct usb_host_endpoint
*hep
= urb
->ep
;
1225 struct isp1362_ep
*ep
= NULL
;
1226 unsigned long flags
;
1229 DBG(3, "%s: urb %p\n", __func__
, urb
);
1231 if (type
== PIPE_ISOCHRONOUS
) {
1232 pr_err("Isochronous transfers not supported\n");
1236 URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__
,
1237 usb_pipedevice(pipe
), epnum
,
1238 is_out
? "out" : "in",
1239 usb_pipecontrol(pipe
) ? "ctrl" :
1240 usb_pipeint(pipe
) ? "int" :
1241 usb_pipebulk(pipe
) ? "bulk" :
1243 urb
->transfer_buffer_length
,
1244 (urb
->transfer_flags
& URB_ZERO_PACKET
) ? "ZERO_PACKET " : "",
1245 !(urb
->transfer_flags
& URB_SHORT_NOT_OK
) ?
1248 /* avoid all allocations within spinlocks: request or endpoint */
1250 ep
= kzalloc(sizeof *ep
, mem_flags
);
1254 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1256 /* don't submit to a dead or disabled port */
1257 if (!((isp1362_hcd
->rhport
[0] | isp1362_hcd
->rhport
[1]) &
1258 USB_PORT_STAT_ENABLE
) ||
1259 !HC_IS_RUNNING(hcd
->state
)) {
1262 goto fail_not_linked
;
1265 retval
= usb_hcd_link_urb_to_ep(hcd
, urb
);
1268 goto fail_not_linked
;
1274 INIT_LIST_HEAD(&ep
->schedule
);
1275 INIT_LIST_HEAD(&ep
->active
);
1276 INIT_LIST_HEAD(&ep
->remove_list
);
1277 ep
->udev
= usb_get_dev(udev
);
1280 ep
->maxpacket
= usb_maxpacket(udev
, urb
->pipe
, is_out
);
1281 ep
->ptd_offset
= -EINVAL
;
1282 ep
->ptd_index
= -EINVAL
;
1283 usb_settoggle(udev
, epnum
, is_out
, 0);
1285 if (type
== PIPE_CONTROL
)
1286 ep
->nextpid
= USB_PID_SETUP
;
1288 ep
->nextpid
= USB_PID_OUT
;
1290 ep
->nextpid
= USB_PID_IN
;
1293 case PIPE_ISOCHRONOUS
:
1294 case PIPE_INTERRUPT
:
1295 if (urb
->interval
> PERIODIC_SIZE
)
1296 urb
->interval
= PERIODIC_SIZE
;
1297 ep
->interval
= urb
->interval
;
1298 ep
->branch
= PERIODIC_SIZE
;
1299 ep
->load
= usb_calc_bus_time(udev
->speed
, !is_out
,
1300 (type
== PIPE_ISOCHRONOUS
),
1301 usb_maxpacket(udev
, pipe
, is_out
)) / 1000;
1306 ep
->num_req
= isp1362_hcd
->req_serial
++;
1308 /* maybe put endpoint into schedule */
1312 if (list_empty(&ep
->schedule
)) {
1313 DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1314 __func__
, ep
, ep
->num_req
);
1315 list_add_tail(&ep
->schedule
, &isp1362_hcd
->async
);
1318 case PIPE_ISOCHRONOUS
:
1319 case PIPE_INTERRUPT
:
1320 urb
->interval
= ep
->interval
;
1322 /* urb submitted for already existing EP */
1323 if (ep
->branch
< PERIODIC_SIZE
)
1326 retval
= balance(isp1362_hcd
, ep
->interval
, ep
->load
);
1328 pr_err("%s: balance returned %d\n", __func__
, retval
);
1331 ep
->branch
= retval
;
1333 isp1362_hcd
->fmindex
= isp1362_read_reg32(isp1362_hcd
, HCFMNUM
);
1334 DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1335 __func__
, isp1362_hcd
->fmindex
, ep
->branch
,
1336 ((isp1362_hcd
->fmindex
+ PERIODIC_SIZE
- 1) &
1337 ~(PERIODIC_SIZE
- 1)) + ep
->branch
,
1338 (isp1362_hcd
->fmindex
& (PERIODIC_SIZE
- 1)) + ep
->branch
);
1340 if (list_empty(&ep
->schedule
)) {
1341 if (type
== PIPE_ISOCHRONOUS
) {
1342 u16 frame
= isp1362_hcd
->fmindex
;
1344 frame
+= max_t(u16
, 8, ep
->interval
);
1345 frame
&= ~(ep
->interval
- 1);
1346 frame
|= ep
->branch
;
1347 if (frame_before(frame
, isp1362_hcd
->fmindex
))
1348 frame
+= ep
->interval
;
1349 urb
->start_frame
= frame
;
1351 DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__
, ep
);
1352 list_add_tail(&ep
->schedule
, &isp1362_hcd
->isoc
);
1354 DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__
, ep
);
1355 list_add_tail(&ep
->schedule
, &isp1362_hcd
->periodic
);
1358 DBG(1, "%s: ep %p already scheduled\n", __func__
, ep
);
1360 DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__
,
1361 ep
->load
/ ep
->interval
, isp1362_hcd
->load
[ep
->branch
],
1362 isp1362_hcd
->load
[ep
->branch
] + ep
->load
);
1363 isp1362_hcd
->load
[ep
->branch
] += ep
->load
;
1367 ALIGNSTAT(isp1362_hcd
, urb
->transfer_buffer
);
1372 start_atl_transfers(isp1362_hcd
);
1374 case PIPE_INTERRUPT
:
1375 start_intl_transfers(isp1362_hcd
);
1377 case PIPE_ISOCHRONOUS
:
1378 start_iso_transfers(isp1362_hcd
);
1385 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
1389 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1391 DBG(0, "%s: urb %p failed with %d\n", __func__
, urb
, retval
);
1395 static int isp1362_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
1397 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1398 struct usb_host_endpoint
*hep
;
1399 unsigned long flags
;
1400 struct isp1362_ep
*ep
;
1403 DBG(3, "%s: urb %p\n", __func__
, urb
);
1405 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1406 retval
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
1413 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1419 /* In front of queue? */
1420 if (ep
->hep
->urb_list
.next
== &urb
->urb_list
) {
1421 if (!list_empty(&ep
->active
)) {
1422 DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__
,
1423 urb
, ep
, ep
->num_req
, ep
->ptd_index
, ep
->ptd_offset
);
1424 /* disable processing and queue PTD for removal */
1425 remove_ptd(isp1362_hcd
, ep
);
1430 DBG(1, "%s: Finishing ep %p req %d\n", __func__
, ep
,
1432 finish_request(isp1362_hcd
, ep
, urb
, status
);
1434 DBG(1, "%s: urb %p active; wait4irq\n", __func__
, urb
);
1436 pr_warning("%s: No EP in URB %p\n", __func__
, urb
);
1440 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1442 DBG(3, "%s: exit\n", __func__
);
1447 static void isp1362_endpoint_disable(struct usb_hcd
*hcd
, struct usb_host_endpoint
*hep
)
1449 struct isp1362_ep
*ep
= hep
->hcpriv
;
1450 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1451 unsigned long flags
;
1453 DBG(1, "%s: ep %p\n", __func__
, ep
);
1456 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1457 if (!list_empty(&hep
->urb_list
)) {
1458 if (!list_empty(&ep
->active
) && list_empty(&ep
->remove_list
)) {
1459 DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__
,
1460 ep
, ep
->num_req
, ep
->ptd_index
, ep
->ptd_offset
);
1461 remove_ptd(isp1362_hcd
, ep
);
1462 pr_info("%s: Waiting for Interrupt to clean up\n", __func__
);
1465 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1466 /* Wait for interrupt to clear out active list */
1467 while (!list_empty(&ep
->active
))
1470 DBG(1, "%s: Freeing EP %p\n", __func__
, ep
);
1472 usb_put_dev(ep
->udev
);
1477 static int isp1362_get_frame(struct usb_hcd
*hcd
)
1479 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1481 unsigned long flags
;
1483 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1484 fmnum
= isp1362_read_reg32(isp1362_hcd
, HCFMNUM
);
1485 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1490 /*-------------------------------------------------------------------------*/
1492 /* Adapted from ohci-hub.c */
1493 static int isp1362_hub_status_data(struct usb_hcd
*hcd
, char *buf
)
1495 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1496 int ports
, i
, changed
= 0;
1497 unsigned long flags
;
1499 if (!HC_IS_RUNNING(hcd
->state
))
1502 /* Report no status change now, if we are scheduled to be
1504 if (timer_pending(&hcd
->rh_timer
))
1507 ports
= isp1362_hcd
->rhdesca
& RH_A_NDP
;
1510 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1512 if (isp1362_hcd
->rhstatus
& (RH_HS_LPSC
| RH_HS_OCIC
))
1513 buf
[0] = changed
= 1;
1517 for (i
= 0; i
< ports
; i
++) {
1518 u32 status
= isp1362_hcd
->rhport
[i
];
1520 if (status
& (RH_PS_CSC
| RH_PS_PESC
| RH_PS_PSSC
|
1521 RH_PS_OCIC
| RH_PS_PRSC
)) {
1523 buf
[0] |= 1 << (i
+ 1);
1527 if (!(status
& RH_PS_CCS
))
1530 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1534 static void isp1362_hub_descriptor(struct isp1362_hcd
*isp1362_hcd
,
1535 struct usb_hub_descriptor
*desc
)
1537 u32 reg
= isp1362_hcd
->rhdesca
;
1539 DBG(3, "%s: enter\n", __func__
);
1541 desc
->bDescriptorType
= USB_DT_HUB
;
1542 desc
->bDescLength
= 9;
1543 desc
->bHubContrCurrent
= 0;
1544 desc
->bNbrPorts
= reg
& 0x3;
1545 /* Power switching, device type, overcurrent. */
1546 desc
->wHubCharacteristics
= cpu_to_le16((reg
>> 8) &
1550 DBG(0, "%s: hubcharacteristics = %02x\n", __func__
,
1551 desc
->wHubCharacteristics
);
1552 desc
->bPwrOn2PwrGood
= (reg
>> 24) & 0xff;
1553 /* ports removable, and legacy PortPwrCtrlMask */
1554 desc
->u
.hs
.DeviceRemovable
[0] = desc
->bNbrPorts
== 1 ? 1 << 1 : 3 << 1;
1555 desc
->u
.hs
.DeviceRemovable
[1] = ~0;
1557 DBG(3, "%s: exit\n", __func__
);
1560 /* Adapted from ohci-hub.c */
1561 static int isp1362_hub_control(struct usb_hcd
*hcd
, u16 typeReq
, u16 wValue
,
1562 u16 wIndex
, char *buf
, u16 wLength
)
1564 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1566 unsigned long flags
;
1568 int ports
= isp1362_hcd
->rhdesca
& RH_A_NDP
;
1572 case ClearHubFeature
:
1573 DBG(0, "ClearHubFeature: ");
1575 case C_HUB_OVER_CURRENT
:
1576 DBG(0, "C_HUB_OVER_CURRENT\n");
1577 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1578 isp1362_write_reg32(isp1362_hcd
, HCRHSTATUS
, RH_HS_OCIC
);
1579 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1580 case C_HUB_LOCAL_POWER
:
1581 DBG(0, "C_HUB_LOCAL_POWER\n");
1588 DBG(0, "SetHubFeature: ");
1590 case C_HUB_OVER_CURRENT
:
1591 case C_HUB_LOCAL_POWER
:
1592 DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1598 case GetHubDescriptor
:
1599 DBG(0, "GetHubDescriptor\n");
1600 isp1362_hub_descriptor(isp1362_hcd
, (struct usb_hub_descriptor
*)buf
);
1603 DBG(0, "GetHubStatus\n");
1604 put_unaligned(cpu_to_le32(0), (__le32
*) buf
);
1608 DBG(0, "GetPortStatus\n");
1610 if (!wIndex
|| wIndex
> ports
)
1612 tmp
= isp1362_hcd
->rhport
[--wIndex
];
1613 put_unaligned(cpu_to_le32(tmp
), (__le32
*) buf
);
1615 case ClearPortFeature
:
1616 DBG(0, "ClearPortFeature: ");
1617 if (!wIndex
|| wIndex
> ports
)
1622 case USB_PORT_FEAT_ENABLE
:
1623 DBG(0, "USB_PORT_FEAT_ENABLE\n");
1626 case USB_PORT_FEAT_C_ENABLE
:
1627 DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1630 case USB_PORT_FEAT_SUSPEND
:
1631 DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1634 case USB_PORT_FEAT_C_SUSPEND
:
1635 DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1638 case USB_PORT_FEAT_POWER
:
1639 DBG(0, "USB_PORT_FEAT_POWER\n");
1643 case USB_PORT_FEAT_C_CONNECTION
:
1644 DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1647 case USB_PORT_FEAT_C_OVER_CURRENT
:
1648 DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1651 case USB_PORT_FEAT_C_RESET
:
1652 DBG(0, "USB_PORT_FEAT_C_RESET\n");
1659 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1660 isp1362_write_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
, tmp
);
1661 isp1362_hcd
->rhport
[wIndex
] =
1662 isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
);
1663 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1665 case SetPortFeature
:
1666 DBG(0, "SetPortFeature: ");
1667 if (!wIndex
|| wIndex
> ports
)
1671 case USB_PORT_FEAT_SUSPEND
:
1672 DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1673 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1674 isp1362_write_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
, RH_PS_PSS
);
1675 isp1362_hcd
->rhport
[wIndex
] =
1676 isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
);
1677 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1679 case USB_PORT_FEAT_POWER
:
1680 DBG(0, "USB_PORT_FEAT_POWER\n");
1681 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1682 isp1362_write_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
, RH_PS_PPS
);
1683 isp1362_hcd
->rhport
[wIndex
] =
1684 isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
);
1685 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1687 case USB_PORT_FEAT_RESET
:
1688 DBG(0, "USB_PORT_FEAT_RESET\n");
1689 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1691 t1
= jiffies
+ msecs_to_jiffies(USB_RESET_WIDTH
);
1692 while (time_before(jiffies
, t1
)) {
1693 /* spin until any current reset finishes */
1695 tmp
= isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
);
1696 if (!(tmp
& RH_PS_PRS
))
1700 if (!(tmp
& RH_PS_CCS
))
1702 /* Reset lasts 10ms (claims datasheet) */
1703 isp1362_write_reg32(isp1362_hcd
, HCRHPORT1
+ wIndex
, (RH_PS_PRS
));
1705 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1707 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1710 isp1362_hcd
->rhport
[wIndex
] = isp1362_read_reg32(isp1362_hcd
,
1711 HCRHPORT1
+ wIndex
);
1712 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1721 /* "protocol stall" on error */
1722 DBG(0, "PROTOCOL STALL\n");
1730 static int isp1362_bus_suspend(struct usb_hcd
*hcd
)
1733 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1734 unsigned long flags
;
1736 if (time_before(jiffies
, isp1362_hcd
->next_statechange
))
1739 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1741 isp1362_hcd
->hc_control
= isp1362_read_reg32(isp1362_hcd
, HCCONTROL
);
1742 switch (isp1362_hcd
->hc_control
& OHCI_CTRL_HCFS
) {
1743 case OHCI_USB_RESUME
:
1744 DBG(0, "%s: resume/suspend?\n", __func__
);
1745 isp1362_hcd
->hc_control
&= ~OHCI_CTRL_HCFS
;
1746 isp1362_hcd
->hc_control
|= OHCI_USB_RESET
;
1747 isp1362_write_reg32(isp1362_hcd
, HCCONTROL
, isp1362_hcd
->hc_control
);
1749 case OHCI_USB_RESET
:
1751 pr_warning("%s: needs reinit!\n", __func__
);
1753 case OHCI_USB_SUSPEND
:
1754 pr_warning("%s: already suspended?\n", __func__
);
1757 DBG(0, "%s: suspend root hub\n", __func__
);
1759 /* First stop any processing */
1760 hcd
->state
= HC_STATE_QUIESCING
;
1761 if (!list_empty(&isp1362_hcd
->atl_queue
.active
) ||
1762 !list_empty(&isp1362_hcd
->intl_queue
.active
) ||
1763 !list_empty(&isp1362_hcd
->istl_queue
[0] .active
) ||
1764 !list_empty(&isp1362_hcd
->istl_queue
[1] .active
)) {
1767 isp1362_write_reg32(isp1362_hcd
, HCATLSKIP
, ~0);
1768 isp1362_write_reg32(isp1362_hcd
, HCINTLSKIP
, ~0);
1769 isp1362_write_reg16(isp1362_hcd
, HCBUFSTAT
, 0);
1770 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, 0);
1771 isp1362_write_reg32(isp1362_hcd
, HCINTSTAT
, OHCI_INTR_SF
);
1773 DBG(0, "%s: stopping schedules ...\n", __func__
);
1778 if (isp1362_read_reg32(isp1362_hcd
, HCINTSTAT
) & OHCI_INTR_SF
)
1782 if (isp1362_read_reg16(isp1362_hcd
, HCuPINT
) & HCuPINT_ATL
) {
1783 u32 done_map
= isp1362_read_reg32(isp1362_hcd
, HCATLDONE
);
1784 finish_transfers(isp1362_hcd
, done_map
, &isp1362_hcd
->atl_queue
);
1786 if (isp1362_read_reg16(isp1362_hcd
, HCuPINT
) & HCuPINT_INTL
) {
1787 u32 done_map
= isp1362_read_reg32(isp1362_hcd
, HCINTLDONE
);
1788 finish_transfers(isp1362_hcd
, done_map
, &isp1362_hcd
->intl_queue
);
1790 if (isp1362_read_reg16(isp1362_hcd
, HCuPINT
) & HCuPINT_ISTL0
)
1791 finish_iso_transfers(isp1362_hcd
, &isp1362_hcd
->istl_queue
[0]);
1792 if (isp1362_read_reg16(isp1362_hcd
, HCuPINT
) & HCuPINT_ISTL1
)
1793 finish_iso_transfers(isp1362_hcd
, &isp1362_hcd
->istl_queue
[1]);
1795 DBG(0, "%s: HCINTSTAT: %08x\n", __func__
,
1796 isp1362_read_reg32(isp1362_hcd
, HCINTSTAT
));
1797 isp1362_write_reg32(isp1362_hcd
, HCINTSTAT
,
1798 isp1362_read_reg32(isp1362_hcd
, HCINTSTAT
));
1801 isp1362_hcd
->hc_control
= OHCI_USB_SUSPEND
;
1802 isp1362_show_reg(isp1362_hcd
, HCCONTROL
);
1803 isp1362_write_reg32(isp1362_hcd
, HCCONTROL
, isp1362_hcd
->hc_control
);
1804 isp1362_show_reg(isp1362_hcd
, HCCONTROL
);
1807 isp1362_hcd
->hc_control
= isp1362_read_reg32(isp1362_hcd
, HCCONTROL
);
1808 if ((isp1362_hcd
->hc_control
& OHCI_CTRL_HCFS
) != OHCI_USB_SUSPEND
) {
1809 pr_err("%s: controller won't suspend %08x\n", __func__
,
1810 isp1362_hcd
->hc_control
);
1815 /* no resumes until devices finish suspending */
1816 isp1362_hcd
->next_statechange
= jiffies
+ msecs_to_jiffies(5);
1820 hcd
->state
= HC_STATE_SUSPENDED
;
1821 DBG(0, "%s: HCD suspended: %08x\n", __func__
,
1822 isp1362_read_reg32(isp1362_hcd
, HCCONTROL
));
1824 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1828 static int isp1362_bus_resume(struct usb_hcd
*hcd
)
1830 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
1832 unsigned long flags
;
1833 int status
= -EINPROGRESS
;
1835 if (time_before(jiffies
, isp1362_hcd
->next_statechange
))
1838 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1839 isp1362_hcd
->hc_control
= isp1362_read_reg32(isp1362_hcd
, HCCONTROL
);
1840 pr_info("%s: HCCONTROL: %08x\n", __func__
, isp1362_hcd
->hc_control
);
1841 if (hcd
->state
== HC_STATE_RESUMING
) {
1842 pr_warning("%s: duplicate resume\n", __func__
);
1845 switch (isp1362_hcd
->hc_control
& OHCI_CTRL_HCFS
) {
1846 case OHCI_USB_SUSPEND
:
1847 DBG(0, "%s: resume root hub\n", __func__
);
1848 isp1362_hcd
->hc_control
&= ~OHCI_CTRL_HCFS
;
1849 isp1362_hcd
->hc_control
|= OHCI_USB_RESUME
;
1850 isp1362_write_reg32(isp1362_hcd
, HCCONTROL
, isp1362_hcd
->hc_control
);
1852 case OHCI_USB_RESUME
:
1853 /* HCFS changes sometime after INTR_RD */
1854 DBG(0, "%s: remote wakeup\n", __func__
);
1857 DBG(0, "%s: odd resume\n", __func__
);
1859 hcd
->self
.root_hub
->dev
.power
.power_state
= PMSG_ON
;
1861 default: /* RESET, we lost power */
1862 DBG(0, "%s: root hub hardware reset\n", __func__
);
1865 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1866 if (status
== -EBUSY
) {
1867 DBG(0, "%s: Restarting HC\n", __func__
);
1868 isp1362_hc_stop(hcd
);
1869 return isp1362_hc_start(hcd
);
1871 if (status
!= -EINPROGRESS
)
1873 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1874 port
= isp1362_read_reg32(isp1362_hcd
, HCRHDESCA
) & RH_A_NDP
;
1876 u32 stat
= isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
+ port
);
1878 /* force global, not selective, resume */
1879 if (!(stat
& RH_PS_PSS
)) {
1880 DBG(0, "%s: Not Resuming RH port %d\n", __func__
, port
);
1883 DBG(0, "%s: Resuming RH port %d\n", __func__
, port
);
1884 isp1362_write_reg32(isp1362_hcd
, HCRHPORT1
+ port
, RH_PS_POCI
);
1886 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1888 /* Some controllers (lucent) need extra-long delays */
1889 hcd
->state
= HC_STATE_RESUMING
;
1890 mdelay(20 /* usb 11.5.1.10 */ + 15);
1892 isp1362_hcd
->hc_control
= OHCI_USB_OPER
;
1893 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
1894 isp1362_show_reg(isp1362_hcd
, HCCONTROL
);
1895 isp1362_write_reg32(isp1362_hcd
, HCCONTROL
, isp1362_hcd
->hc_control
);
1896 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
1900 /* keep it alive for ~5x suspend + resume costs */
1901 isp1362_hcd
->next_statechange
= jiffies
+ msecs_to_jiffies(250);
1903 hcd
->self
.root_hub
->dev
.power
.power_state
= PMSG_ON
;
1904 hcd
->state
= HC_STATE_RUNNING
;
1908 #define isp1362_bus_suspend NULL
1909 #define isp1362_bus_resume NULL
1912 /*-------------------------------------------------------------------------*/
1914 static void dump_irq(struct seq_file
*s
, char *label
, u16 mask
)
1916 seq_printf(s
, "%-15s %04x%s%s%s%s%s%s\n", label
, mask
,
1917 mask
& HCuPINT_CLKRDY
? " clkrdy" : "",
1918 mask
& HCuPINT_SUSP
? " susp" : "",
1919 mask
& HCuPINT_OPR
? " opr" : "",
1920 mask
& HCuPINT_EOT
? " eot" : "",
1921 mask
& HCuPINT_ATL
? " atl" : "",
1922 mask
& HCuPINT_SOF
? " sof" : "");
1925 static void dump_int(struct seq_file
*s
, char *label
, u32 mask
)
1927 seq_printf(s
, "%-15s %08x%s%s%s%s%s%s%s\n", label
, mask
,
1928 mask
& OHCI_INTR_MIE
? " MIE" : "",
1929 mask
& OHCI_INTR_RHSC
? " rhsc" : "",
1930 mask
& OHCI_INTR_FNO
? " fno" : "",
1931 mask
& OHCI_INTR_UE
? " ue" : "",
1932 mask
& OHCI_INTR_RD
? " rd" : "",
1933 mask
& OHCI_INTR_SF
? " sof" : "",
1934 mask
& OHCI_INTR_SO
? " so" : "");
1937 static void dump_ctrl(struct seq_file
*s
, char *label
, u32 mask
)
1939 seq_printf(s
, "%-15s %08x%s%s%s\n", label
, mask
,
1940 mask
& OHCI_CTRL_RWC
? " rwc" : "",
1941 mask
& OHCI_CTRL_RWE
? " rwe" : "",
1944 switch (mask
& OHCI_CTRL_HCFS
) {
1948 case OHCI_USB_RESET
:
1951 case OHCI_USB_RESUME
:
1954 case OHCI_USB_SUSPEND
:
1964 static void dump_regs(struct seq_file
*s
, struct isp1362_hcd
*isp1362_hcd
)
1966 seq_printf(s
, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION
),
1967 isp1362_read_reg32(isp1362_hcd
, HCREVISION
));
1968 seq_printf(s
, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL
),
1969 isp1362_read_reg32(isp1362_hcd
, HCCONTROL
));
1970 seq_printf(s
, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT
),
1971 isp1362_read_reg32(isp1362_hcd
, HCCMDSTAT
));
1972 seq_printf(s
, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT
),
1973 isp1362_read_reg32(isp1362_hcd
, HCINTSTAT
));
1974 seq_printf(s
, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB
),
1975 isp1362_read_reg32(isp1362_hcd
, HCINTENB
));
1976 seq_printf(s
, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL
),
1977 isp1362_read_reg32(isp1362_hcd
, HCFMINTVL
));
1978 seq_printf(s
, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM
),
1979 isp1362_read_reg32(isp1362_hcd
, HCFMREM
));
1980 seq_printf(s
, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM
),
1981 isp1362_read_reg32(isp1362_hcd
, HCFMNUM
));
1982 seq_printf(s
, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH
),
1983 isp1362_read_reg32(isp1362_hcd
, HCLSTHRESH
));
1984 seq_printf(s
, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA
),
1985 isp1362_read_reg32(isp1362_hcd
, HCRHDESCA
));
1986 seq_printf(s
, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB
),
1987 isp1362_read_reg32(isp1362_hcd
, HCRHDESCB
));
1988 seq_printf(s
, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS
),
1989 isp1362_read_reg32(isp1362_hcd
, HCRHSTATUS
));
1990 seq_printf(s
, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1
),
1991 isp1362_read_reg32(isp1362_hcd
, HCRHPORT1
));
1992 seq_printf(s
, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2
),
1993 isp1362_read_reg32(isp1362_hcd
, HCRHPORT2
));
1994 seq_printf(s
, "\n");
1995 seq_printf(s
, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG
),
1996 isp1362_read_reg16(isp1362_hcd
, HCHWCFG
));
1997 seq_printf(s
, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG
),
1998 isp1362_read_reg16(isp1362_hcd
, HCDMACFG
));
1999 seq_printf(s
, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR
),
2000 isp1362_read_reg16(isp1362_hcd
, HCXFERCTR
));
2001 seq_printf(s
, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT
),
2002 isp1362_read_reg16(isp1362_hcd
, HCuPINT
));
2003 seq_printf(s
, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB
),
2004 isp1362_read_reg16(isp1362_hcd
, HCuPINTENB
));
2005 seq_printf(s
, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID
),
2006 isp1362_read_reg16(isp1362_hcd
, HCCHIPID
));
2007 seq_printf(s
, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH
),
2008 isp1362_read_reg16(isp1362_hcd
, HCSCRATCH
));
2009 seq_printf(s
, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT
),
2010 isp1362_read_reg16(isp1362_hcd
, HCBUFSTAT
));
2011 seq_printf(s
, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR
),
2012 isp1362_read_reg32(isp1362_hcd
, HCDIRADDR
));
2014 seq_printf(s
, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA
),
2015 isp1362_read_reg16(isp1362_hcd
, HCDIRDATA
));
2017 seq_printf(s
, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ
),
2018 isp1362_read_reg16(isp1362_hcd
, HCISTLBUFSZ
));
2019 seq_printf(s
, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE
),
2020 isp1362_read_reg16(isp1362_hcd
, HCISTLRATE
));
2021 seq_printf(s
, "\n");
2022 seq_printf(s
, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ
),
2023 isp1362_read_reg16(isp1362_hcd
, HCINTLBUFSZ
));
2024 seq_printf(s
, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ
),
2025 isp1362_read_reg16(isp1362_hcd
, HCINTLBLKSZ
));
2026 seq_printf(s
, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE
),
2027 isp1362_read_reg32(isp1362_hcd
, HCINTLDONE
));
2028 seq_printf(s
, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP
),
2029 isp1362_read_reg32(isp1362_hcd
, HCINTLSKIP
));
2030 seq_printf(s
, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST
),
2031 isp1362_read_reg32(isp1362_hcd
, HCINTLLAST
));
2032 seq_printf(s
, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR
),
2033 isp1362_read_reg16(isp1362_hcd
, HCINTLCURR
));
2034 seq_printf(s
, "\n");
2035 seq_printf(s
, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ
),
2036 isp1362_read_reg16(isp1362_hcd
, HCATLBUFSZ
));
2037 seq_printf(s
, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ
),
2038 isp1362_read_reg16(isp1362_hcd
, HCATLBLKSZ
));
2040 seq_printf(s
, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE
),
2041 isp1362_read_reg32(isp1362_hcd
, HCATLDONE
));
2043 seq_printf(s
, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP
),
2044 isp1362_read_reg32(isp1362_hcd
, HCATLSKIP
));
2045 seq_printf(s
, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST
),
2046 isp1362_read_reg32(isp1362_hcd
, HCATLLAST
));
2047 seq_printf(s
, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR
),
2048 isp1362_read_reg16(isp1362_hcd
, HCATLCURR
));
2049 seq_printf(s
, "\n");
2050 seq_printf(s
, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC
),
2051 isp1362_read_reg16(isp1362_hcd
, HCATLDTC
));
2052 seq_printf(s
, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO
),
2053 isp1362_read_reg16(isp1362_hcd
, HCATLDTCTO
));
2056 static int isp1362_show(struct seq_file
*s
, void *unused
)
2058 struct isp1362_hcd
*isp1362_hcd
= s
->private;
2059 struct isp1362_ep
*ep
;
2062 seq_printf(s
, "%s\n%s version %s\n",
2063 isp1362_hcd_to_hcd(isp1362_hcd
)->product_desc
, hcd_name
, DRIVER_VERSION
);
2065 /* collect statistics to help estimate potential win for
2066 * DMA engines that care about alignment (PXA)
2068 seq_printf(s
, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2069 isp1362_hcd
->stat16
, isp1362_hcd
->stat8
, isp1362_hcd
->stat4
,
2070 isp1362_hcd
->stat2
, isp1362_hcd
->stat1
);
2071 seq_printf(s
, "max # ptds in ATL fifo: %d\n", isp1362_hcd
->atl_queue
.stat_maxptds
);
2072 seq_printf(s
, "max # ptds in INTL fifo: %d\n", isp1362_hcd
->intl_queue
.stat_maxptds
);
2073 seq_printf(s
, "max # ptds in ISTL fifo: %d\n",
2074 max(isp1362_hcd
->istl_queue
[0] .stat_maxptds
,
2075 isp1362_hcd
->istl_queue
[1] .stat_maxptds
));
2077 /* FIXME: don't show the following in suspended state */
2078 spin_lock_irq(&isp1362_hcd
->lock
);
2080 dump_irq(s
, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd
, HCuPINTENB
));
2081 dump_irq(s
, "hc_irq_status", isp1362_read_reg16(isp1362_hcd
, HCuPINT
));
2082 dump_int(s
, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd
, HCINTENB
));
2083 dump_int(s
, "ohci_int_status", isp1362_read_reg32(isp1362_hcd
, HCINTSTAT
));
2084 dump_ctrl(s
, "ohci_control", isp1362_read_reg32(isp1362_hcd
, HCCONTROL
));
2086 for (i
= 0; i
< NUM_ISP1362_IRQS
; i
++)
2087 if (isp1362_hcd
->irq_stat
[i
])
2088 seq_printf(s
, "%-15s: %d\n",
2089 ISP1362_INT_NAME(i
), isp1362_hcd
->irq_stat
[i
]);
2091 dump_regs(s
, isp1362_hcd
);
2092 list_for_each_entry(ep
, &isp1362_hcd
->async
, schedule
) {
2095 seq_printf(s
, "%p, ep%d%s, maxpacket %d:\n", ep
, ep
->epnum
,
2098 switch (ep
->nextpid
) {
2115 s
;}), ep
->maxpacket
) ;
2116 list_for_each_entry(urb
, &ep
->hep
->urb_list
, urb_list
) {
2117 seq_printf(s
, " urb%p, %d/%d\n", urb
,
2119 urb
->transfer_buffer_length
);
2122 if (!list_empty(&isp1362_hcd
->async
))
2123 seq_printf(s
, "\n");
2124 dump_ptd_queue(&isp1362_hcd
->atl_queue
);
2126 seq_printf(s
, "periodic size= %d\n", PERIODIC_SIZE
);
2128 list_for_each_entry(ep
, &isp1362_hcd
->periodic
, schedule
) {
2129 seq_printf(s
, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep
->branch
,
2130 isp1362_hcd
->load
[ep
->branch
], ep
->ptd_index
, ep
->ptd_offset
);
2132 seq_printf(s
, " %d/%p (%sdev%d ep%d%s max %d)\n",
2134 (ep
->udev
->speed
== USB_SPEED_FULL
) ? "" : "ls ",
2135 ep
->udev
->devnum
, ep
->epnum
,
2136 (ep
->epnum
== 0) ? "" :
2137 ((ep
->nextpid
== USB_PID_IN
) ?
2138 "in" : "out"), ep
->maxpacket
);
2140 dump_ptd_queue(&isp1362_hcd
->intl_queue
);
2142 seq_printf(s
, "ISO:\n");
2144 list_for_each_entry(ep
, &isp1362_hcd
->isoc
, schedule
) {
2145 seq_printf(s
, " %d/%p (%sdev%d ep%d%s max %d)\n",
2147 (ep
->udev
->speed
== USB_SPEED_FULL
) ? "" : "ls ",
2148 ep
->udev
->devnum
, ep
->epnum
,
2149 (ep
->epnum
== 0) ? "" :
2150 ((ep
->nextpid
== USB_PID_IN
) ?
2151 "in" : "out"), ep
->maxpacket
);
2154 spin_unlock_irq(&isp1362_hcd
->lock
);
2155 seq_printf(s
, "\n");
2160 static int isp1362_open(struct inode
*inode
, struct file
*file
)
2162 return single_open(file
, isp1362_show
, inode
);
2165 static const struct file_operations debug_ops
= {
2166 .open
= isp1362_open
,
2168 .llseek
= seq_lseek
,
2169 .release
= single_release
,
2172 /* expect just one isp1362_hcd per system */
2173 static void create_debug_file(struct isp1362_hcd
*isp1362_hcd
)
2175 isp1362_hcd
->debug_file
= debugfs_create_file("isp1362", S_IRUGO
,
2177 isp1362_hcd
, &debug_ops
);
2180 static void remove_debug_file(struct isp1362_hcd
*isp1362_hcd
)
2182 debugfs_remove(isp1362_hcd
->debug_file
);
2185 /*-------------------------------------------------------------------------*/
2187 static void __isp1362_sw_reset(struct isp1362_hcd
*isp1362_hcd
)
2191 isp1362_write_reg16(isp1362_hcd
, HCSWRES
, HCSWRES_MAGIC
);
2192 isp1362_write_reg32(isp1362_hcd
, HCCMDSTAT
, OHCI_HCR
);
2195 if (!(isp1362_read_reg32(isp1362_hcd
, HCCMDSTAT
) & OHCI_HCR
))
2199 pr_err("Software reset timeout\n");
2202 static void isp1362_sw_reset(struct isp1362_hcd
*isp1362_hcd
)
2204 unsigned long flags
;
2206 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2207 __isp1362_sw_reset(isp1362_hcd
);
2208 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2211 static int isp1362_mem_config(struct usb_hcd
*hcd
)
2213 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2214 unsigned long flags
;
2216 u16 istl_size
= ISP1362_ISTL_BUFSIZE
;
2217 u16 intl_blksize
= ISP1362_INTL_BLKSIZE
+ PTD_HEADER_SIZE
;
2218 u16 intl_size
= ISP1362_INTL_BUFFERS
* intl_blksize
;
2219 u16 atl_blksize
= ISP1362_ATL_BLKSIZE
+ PTD_HEADER_SIZE
;
2220 u16 atl_buffers
= (ISP1362_BUF_SIZE
- (istl_size
+ intl_size
)) / atl_blksize
;
2224 WARN_ON(istl_size
& 3);
2225 WARN_ON(atl_blksize
& 3);
2226 WARN_ON(intl_blksize
& 3);
2227 WARN_ON(atl_blksize
< PTD_HEADER_SIZE
);
2228 WARN_ON(intl_blksize
< PTD_HEADER_SIZE
);
2230 BUG_ON((unsigned)ISP1362_INTL_BUFFERS
> 32);
2231 if (atl_buffers
> 32)
2233 atl_size
= atl_buffers
* atl_blksize
;
2234 total
= atl_size
+ intl_size
+ istl_size
;
2235 dev_info(hcd
->self
.controller
, "ISP1362 Memory usage:\n");
2236 dev_info(hcd
->self
.controller
, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
2237 istl_size
/ 2, istl_size
, 0, istl_size
/ 2);
2238 dev_info(hcd
->self
.controller
, " INTL: %4d * (%3zu+8): %4d @ $%04x\n",
2239 ISP1362_INTL_BUFFERS
, intl_blksize
- PTD_HEADER_SIZE
,
2240 intl_size
, istl_size
);
2241 dev_info(hcd
->self
.controller
, " ATL : %4d * (%3zu+8): %4d @ $%04x\n",
2242 atl_buffers
, atl_blksize
- PTD_HEADER_SIZE
,
2243 atl_size
, istl_size
+ intl_size
);
2244 dev_info(hcd
->self
.controller
, " USED/FREE: %4d %4d\n", total
,
2245 ISP1362_BUF_SIZE
- total
);
2247 if (total
> ISP1362_BUF_SIZE
) {
2248 dev_err(hcd
->self
.controller
, "%s: Memory requested: %d, available %d\n",
2249 __func__
, total
, ISP1362_BUF_SIZE
);
2253 total
= istl_size
+ intl_size
+ atl_size
;
2254 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2256 for (i
= 0; i
< 2; i
++) {
2257 isp1362_hcd
->istl_queue
[i
].buf_start
= i
* istl_size
/ 2,
2258 isp1362_hcd
->istl_queue
[i
].buf_size
= istl_size
/ 2;
2259 isp1362_hcd
->istl_queue
[i
].blk_size
= 4;
2260 INIT_LIST_HEAD(&isp1362_hcd
->istl_queue
[i
].active
);
2261 snprintf(isp1362_hcd
->istl_queue
[i
].name
,
2262 sizeof(isp1362_hcd
->istl_queue
[i
].name
), "ISTL%d", i
);
2263 DBG(3, "%s: %5s buf $%04x %d\n", __func__
,
2264 isp1362_hcd
->istl_queue
[i
].name
,
2265 isp1362_hcd
->istl_queue
[i
].buf_start
,
2266 isp1362_hcd
->istl_queue
[i
].buf_size
);
2268 isp1362_write_reg16(isp1362_hcd
, HCISTLBUFSZ
, istl_size
/ 2);
2270 isp1362_hcd
->intl_queue
.buf_start
= istl_size
;
2271 isp1362_hcd
->intl_queue
.buf_size
= intl_size
;
2272 isp1362_hcd
->intl_queue
.buf_count
= ISP1362_INTL_BUFFERS
;
2273 isp1362_hcd
->intl_queue
.blk_size
= intl_blksize
;
2274 isp1362_hcd
->intl_queue
.buf_avail
= isp1362_hcd
->intl_queue
.buf_count
;
2275 isp1362_hcd
->intl_queue
.skip_map
= ~0;
2276 INIT_LIST_HEAD(&isp1362_hcd
->intl_queue
.active
);
2278 isp1362_write_reg16(isp1362_hcd
, HCINTLBUFSZ
,
2279 isp1362_hcd
->intl_queue
.buf_size
);
2280 isp1362_write_reg16(isp1362_hcd
, HCINTLBLKSZ
,
2281 isp1362_hcd
->intl_queue
.blk_size
- PTD_HEADER_SIZE
);
2282 isp1362_write_reg32(isp1362_hcd
, HCINTLSKIP
, ~0);
2283 isp1362_write_reg32(isp1362_hcd
, HCINTLLAST
,
2284 1 << (ISP1362_INTL_BUFFERS
- 1));
2286 isp1362_hcd
->atl_queue
.buf_start
= istl_size
+ intl_size
;
2287 isp1362_hcd
->atl_queue
.buf_size
= atl_size
;
2288 isp1362_hcd
->atl_queue
.buf_count
= atl_buffers
;
2289 isp1362_hcd
->atl_queue
.blk_size
= atl_blksize
;
2290 isp1362_hcd
->atl_queue
.buf_avail
= isp1362_hcd
->atl_queue
.buf_count
;
2291 isp1362_hcd
->atl_queue
.skip_map
= ~0;
2292 INIT_LIST_HEAD(&isp1362_hcd
->atl_queue
.active
);
2294 isp1362_write_reg16(isp1362_hcd
, HCATLBUFSZ
,
2295 isp1362_hcd
->atl_queue
.buf_size
);
2296 isp1362_write_reg16(isp1362_hcd
, HCATLBLKSZ
,
2297 isp1362_hcd
->atl_queue
.blk_size
- PTD_HEADER_SIZE
);
2298 isp1362_write_reg32(isp1362_hcd
, HCATLSKIP
, ~0);
2299 isp1362_write_reg32(isp1362_hcd
, HCATLLAST
,
2300 1 << (atl_buffers
- 1));
2302 snprintf(isp1362_hcd
->atl_queue
.name
,
2303 sizeof(isp1362_hcd
->atl_queue
.name
), "ATL");
2304 snprintf(isp1362_hcd
->intl_queue
.name
,
2305 sizeof(isp1362_hcd
->intl_queue
.name
), "INTL");
2306 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__
,
2307 isp1362_hcd
->intl_queue
.name
,
2308 isp1362_hcd
->intl_queue
.buf_start
,
2309 ISP1362_INTL_BUFFERS
, isp1362_hcd
->intl_queue
.blk_size
,
2310 isp1362_hcd
->intl_queue
.buf_size
);
2311 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__
,
2312 isp1362_hcd
->atl_queue
.name
,
2313 isp1362_hcd
->atl_queue
.buf_start
,
2314 atl_buffers
, isp1362_hcd
->atl_queue
.blk_size
,
2315 isp1362_hcd
->atl_queue
.buf_size
);
2317 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2322 static int isp1362_hc_reset(struct usb_hcd
*hcd
)
2325 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2327 unsigned long timeout
= 100;
2328 unsigned long flags
;
2331 pr_debug("%s:\n", __func__
);
2333 if (isp1362_hcd
->board
&& isp1362_hcd
->board
->reset
) {
2334 isp1362_hcd
->board
->reset(hcd
->self
.controller
, 1);
2336 if (isp1362_hcd
->board
->clock
)
2337 isp1362_hcd
->board
->clock(hcd
->self
.controller
, 1);
2338 isp1362_hcd
->board
->reset(hcd
->self
.controller
, 0);
2340 isp1362_sw_reset(isp1362_hcd
);
2342 /* chip has been reset. First we need to see a clock */
2343 t
= jiffies
+ msecs_to_jiffies(timeout
);
2344 while (!clkrdy
&& time_before_eq(jiffies
, t
)) {
2345 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2346 clkrdy
= isp1362_read_reg16(isp1362_hcd
, HCuPINT
) & HCuPINT_CLKRDY
;
2347 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2352 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2353 isp1362_write_reg16(isp1362_hcd
, HCuPINT
, HCuPINT_CLKRDY
);
2354 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2356 pr_err("Clock not ready after %lums\n", timeout
);
2362 static void isp1362_hc_stop(struct usb_hcd
*hcd
)
2364 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2365 unsigned long flags
;
2368 pr_debug("%s:\n", __func__
);
2370 del_timer_sync(&hcd
->rh_timer
);
2372 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2374 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, 0);
2376 /* Switch off power for all ports */
2377 tmp
= isp1362_read_reg32(isp1362_hcd
, HCRHDESCA
);
2378 tmp
&= ~(RH_A_NPS
| RH_A_PSM
);
2379 isp1362_write_reg32(isp1362_hcd
, HCRHDESCA
, tmp
);
2380 isp1362_write_reg32(isp1362_hcd
, HCRHSTATUS
, RH_HS_LPS
);
2382 /* Reset the chip */
2383 if (isp1362_hcd
->board
&& isp1362_hcd
->board
->reset
)
2384 isp1362_hcd
->board
->reset(hcd
->self
.controller
, 1);
2386 __isp1362_sw_reset(isp1362_hcd
);
2388 if (isp1362_hcd
->board
&& isp1362_hcd
->board
->clock
)
2389 isp1362_hcd
->board
->clock(hcd
->self
.controller
, 0);
2391 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2394 #ifdef CHIP_BUFFER_TEST
2395 static int isp1362_chip_test(struct isp1362_hcd
*isp1362_hcd
)
2399 unsigned long flags
;
2401 ref
= kmalloc(2 * ISP1362_BUF_SIZE
, GFP_KERNEL
);
2404 u16
*tst
= &ref
[ISP1362_BUF_SIZE
/ 2];
2406 for (offset
= 0; offset
< ISP1362_BUF_SIZE
/ 2; offset
++) {
2407 ref
[offset
] = ~offset
;
2408 tst
[offset
] = offset
;
2411 for (offset
= 0; offset
< 4; offset
++) {
2414 for (j
= 0; j
< 8; j
++) {
2415 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2416 isp1362_write_buffer(isp1362_hcd
, (u8
*)ref
+ offset
, 0, j
);
2417 isp1362_read_buffer(isp1362_hcd
, (u8
*)tst
+ offset
, 0, j
);
2418 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2420 if (memcmp(ref
, tst
, j
)) {
2422 pr_err("%s: memory check with %d byte offset %d failed\n",
2423 __func__
, j
, offset
);
2424 dump_data((u8
*)ref
+ offset
, j
);
2425 dump_data((u8
*)tst
+ offset
, j
);
2430 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2431 isp1362_write_buffer(isp1362_hcd
, ref
, 0, ISP1362_BUF_SIZE
);
2432 isp1362_read_buffer(isp1362_hcd
, tst
, 0, ISP1362_BUF_SIZE
);
2433 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2435 if (memcmp(ref
, tst
, ISP1362_BUF_SIZE
)) {
2437 pr_err("%s: memory check failed\n", __func__
);
2438 dump_data((u8
*)tst
, ISP1362_BUF_SIZE
/ 2);
2441 for (offset
= 0; offset
< 256; offset
++) {
2446 memset(tst
, 0, ISP1362_BUF_SIZE
);
2447 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2448 isp1362_write_buffer(isp1362_hcd
, tst
, 0, ISP1362_BUF_SIZE
);
2449 isp1362_read_buffer(isp1362_hcd
, tst
, 0, ISP1362_BUF_SIZE
);
2450 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2451 if (memcmp(tst
, tst
+ (ISP1362_BUF_SIZE
/ (2 * sizeof(*tst
))),
2452 ISP1362_BUF_SIZE
/ 2)) {
2453 pr_err("%s: Failed to clear buffer\n", __func__
);
2454 dump_data((u8
*)tst
, ISP1362_BUF_SIZE
);
2457 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2458 isp1362_write_buffer(isp1362_hcd
, ref
, offset
* 2, PTD_HEADER_SIZE
);
2459 isp1362_write_buffer(isp1362_hcd
, ref
+ PTD_HEADER_SIZE
/ sizeof(*ref
),
2460 offset
* 2 + PTD_HEADER_SIZE
, test_size
);
2461 isp1362_read_buffer(isp1362_hcd
, tst
, offset
* 2,
2462 PTD_HEADER_SIZE
+ test_size
);
2463 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2464 if (memcmp(ref
, tst
, PTD_HEADER_SIZE
+ test_size
)) {
2465 dump_data(((u8
*)ref
) + offset
, PTD_HEADER_SIZE
+ test_size
);
2466 dump_data((u8
*)tst
, PTD_HEADER_SIZE
+ test_size
);
2467 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2468 isp1362_read_buffer(isp1362_hcd
, tst
, offset
* 2,
2469 PTD_HEADER_SIZE
+ test_size
);
2470 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2471 if (memcmp(ref
, tst
, PTD_HEADER_SIZE
+ test_size
)) {
2473 pr_err("%s: memory check with offset %02x failed\n",
2477 pr_warning("%s: memory check with offset %02x ok after second read\n",
2487 static int isp1362_hc_start(struct usb_hcd
*hcd
)
2490 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2491 struct isp1362_platform_data
*board
= isp1362_hcd
->board
;
2494 unsigned long flags
;
2496 pr_debug("%s:\n", __func__
);
2498 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2499 chipid
= isp1362_read_reg16(isp1362_hcd
, HCCHIPID
);
2500 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2502 if ((chipid
& HCCHIPID_MASK
) != HCCHIPID_MAGIC
) {
2503 pr_err("%s: Invalid chip ID %04x\n", __func__
, chipid
);
2507 #ifdef CHIP_BUFFER_TEST
2508 ret
= isp1362_chip_test(isp1362_hcd
);
2512 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2513 /* clear interrupt status and disable all interrupt sources */
2514 isp1362_write_reg16(isp1362_hcd
, HCuPINT
, 0xff);
2515 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, 0);
2518 hwcfg
= HCHWCFG_INT_ENABLE
| HCHWCFG_DBWIDTH(1);
2519 if (board
->sel15Kres
)
2520 hwcfg
|= HCHWCFG_PULLDOWN_DS2
|
2521 ((MAX_ROOT_PORTS
> 1) ? HCHWCFG_PULLDOWN_DS1
: 0);
2522 if (board
->clknotstop
)
2523 hwcfg
|= HCHWCFG_CLKNOTSTOP
;
2524 if (board
->oc_enable
)
2525 hwcfg
|= HCHWCFG_ANALOG_OC
;
2526 if (board
->int_act_high
)
2527 hwcfg
|= HCHWCFG_INT_POL
;
2528 if (board
->int_edge_triggered
)
2529 hwcfg
|= HCHWCFG_INT_TRIGGER
;
2530 if (board
->dreq_act_high
)
2531 hwcfg
|= HCHWCFG_DREQ_POL
;
2532 if (board
->dack_act_high
)
2533 hwcfg
|= HCHWCFG_DACK_POL
;
2534 isp1362_write_reg16(isp1362_hcd
, HCHWCFG
, hwcfg
);
2535 isp1362_show_reg(isp1362_hcd
, HCHWCFG
);
2536 isp1362_write_reg16(isp1362_hcd
, HCDMACFG
, 0);
2537 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2539 ret
= isp1362_mem_config(hcd
);
2543 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2546 isp1362_hcd
->rhdesca
= 0;
2547 if (board
->no_power_switching
)
2548 isp1362_hcd
->rhdesca
|= RH_A_NPS
;
2549 if (board
->power_switching_mode
)
2550 isp1362_hcd
->rhdesca
|= RH_A_PSM
;
2552 isp1362_hcd
->rhdesca
|= (board
->potpg
<< 24) & RH_A_POTPGT
;
2554 isp1362_hcd
->rhdesca
|= (25 << 24) & RH_A_POTPGT
;
2556 isp1362_write_reg32(isp1362_hcd
, HCRHDESCA
, isp1362_hcd
->rhdesca
& ~RH_A_OCPM
);
2557 isp1362_write_reg32(isp1362_hcd
, HCRHDESCA
, isp1362_hcd
->rhdesca
| RH_A_OCPM
);
2558 isp1362_hcd
->rhdesca
= isp1362_read_reg32(isp1362_hcd
, HCRHDESCA
);
2560 isp1362_hcd
->rhdescb
= RH_B_PPCM
;
2561 isp1362_write_reg32(isp1362_hcd
, HCRHDESCB
, isp1362_hcd
->rhdescb
);
2562 isp1362_hcd
->rhdescb
= isp1362_read_reg32(isp1362_hcd
, HCRHDESCB
);
2564 isp1362_read_reg32(isp1362_hcd
, HCFMINTVL
);
2565 isp1362_write_reg32(isp1362_hcd
, HCFMINTVL
, (FSMP(FI
) << 16) | FI
);
2566 isp1362_write_reg32(isp1362_hcd
, HCLSTHRESH
, LSTHRESH
);
2568 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2570 isp1362_hcd
->hc_control
= OHCI_USB_OPER
;
2571 hcd
->state
= HC_STATE_RUNNING
;
2573 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2574 /* Set up interrupts */
2575 isp1362_hcd
->intenb
= OHCI_INTR_MIE
| OHCI_INTR_RHSC
| OHCI_INTR_UE
;
2576 isp1362_hcd
->intenb
|= OHCI_INTR_RD
;
2577 isp1362_hcd
->irqenb
= HCuPINT_OPR
| HCuPINT_SUSP
;
2578 isp1362_write_reg32(isp1362_hcd
, HCINTENB
, isp1362_hcd
->intenb
);
2579 isp1362_write_reg16(isp1362_hcd
, HCuPINTENB
, isp1362_hcd
->irqenb
);
2581 /* Go operational */
2582 isp1362_write_reg32(isp1362_hcd
, HCCONTROL
, isp1362_hcd
->hc_control
);
2583 /* enable global power */
2584 isp1362_write_reg32(isp1362_hcd
, HCRHSTATUS
, RH_HS_LPSC
| RH_HS_DRWE
);
2586 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2591 /*-------------------------------------------------------------------------*/
2593 static struct hc_driver isp1362_hc_driver
= {
2594 .description
= hcd_name
,
2595 .product_desc
= "ISP1362 Host Controller",
2596 .hcd_priv_size
= sizeof(struct isp1362_hcd
),
2599 .flags
= HCD_USB11
| HCD_MEMORY
,
2601 .reset
= isp1362_hc_reset
,
2602 .start
= isp1362_hc_start
,
2603 .stop
= isp1362_hc_stop
,
2605 .urb_enqueue
= isp1362_urb_enqueue
,
2606 .urb_dequeue
= isp1362_urb_dequeue
,
2607 .endpoint_disable
= isp1362_endpoint_disable
,
2609 .get_frame_number
= isp1362_get_frame
,
2611 .hub_status_data
= isp1362_hub_status_data
,
2612 .hub_control
= isp1362_hub_control
,
2613 .bus_suspend
= isp1362_bus_suspend
,
2614 .bus_resume
= isp1362_bus_resume
,
2617 /*-------------------------------------------------------------------------*/
2619 static int isp1362_remove(struct platform_device
*pdev
)
2621 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
2622 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2624 remove_debug_file(isp1362_hcd
);
2625 DBG(0, "%s: Removing HCD\n", __func__
);
2626 usb_remove_hcd(hcd
);
2627 DBG(0, "%s: put_hcd\n", __func__
);
2629 DBG(0, "%s: Done\n", __func__
);
2634 static int isp1362_probe(struct platform_device
*pdev
)
2636 struct usb_hcd
*hcd
;
2637 struct isp1362_hcd
*isp1362_hcd
;
2638 struct resource
*addr
, *data
, *irq_res
;
2639 void __iomem
*addr_reg
;
2640 void __iomem
*data_reg
;
2643 unsigned int irq_flags
= 0;
2648 /* basic sanity checks first. board-specific init logic should
2649 * have initialized this the three resources and probably board
2650 * specific platform_data. we don't probe for IRQs, and do only
2651 * minimal sanity checking.
2653 if (pdev
->num_resources
< 3)
2656 if (pdev
->dev
.dma_mask
) {
2657 DBG(1, "won't do DMA");
2661 irq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
2665 irq
= irq_res
->start
;
2667 addr
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
2668 addr_reg
= devm_ioremap_resource(&pdev
->dev
, addr
);
2669 if (IS_ERR(addr_reg
))
2670 return PTR_ERR(addr_reg
);
2672 data
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2673 data_reg
= devm_ioremap_resource(&pdev
->dev
, data
);
2674 if (IS_ERR(data_reg
))
2675 return PTR_ERR(data_reg
);
2677 /* allocate and initialize hcd */
2678 hcd
= usb_create_hcd(&isp1362_hc_driver
, &pdev
->dev
, dev_name(&pdev
->dev
));
2682 hcd
->rsrc_start
= data
->start
;
2683 isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2684 isp1362_hcd
->data_reg
= data_reg
;
2685 isp1362_hcd
->addr_reg
= addr_reg
;
2687 isp1362_hcd
->next_statechange
= jiffies
;
2688 spin_lock_init(&isp1362_hcd
->lock
);
2689 INIT_LIST_HEAD(&isp1362_hcd
->async
);
2690 INIT_LIST_HEAD(&isp1362_hcd
->periodic
);
2691 INIT_LIST_HEAD(&isp1362_hcd
->isoc
);
2692 INIT_LIST_HEAD(&isp1362_hcd
->remove_list
);
2693 isp1362_hcd
->board
= dev_get_platdata(&pdev
->dev
);
2694 #if USE_PLATFORM_DELAY
2695 if (!isp1362_hcd
->board
->delay
) {
2696 dev_err(hcd
->self
.controller
, "No platform delay function given\n");
2702 if (irq_res
->flags
& IORESOURCE_IRQ_HIGHEDGE
)
2703 irq_flags
|= IRQF_TRIGGER_RISING
;
2704 if (irq_res
->flags
& IORESOURCE_IRQ_LOWEDGE
)
2705 irq_flags
|= IRQF_TRIGGER_FALLING
;
2706 if (irq_res
->flags
& IORESOURCE_IRQ_HIGHLEVEL
)
2707 irq_flags
|= IRQF_TRIGGER_HIGH
;
2708 if (irq_res
->flags
& IORESOURCE_IRQ_LOWLEVEL
)
2709 irq_flags
|= IRQF_TRIGGER_LOW
;
2711 retval
= usb_add_hcd(hcd
, irq
, irq_flags
| IRQF_SHARED
);
2714 device_wakeup_enable(hcd
->self
.controller
);
2716 dev_info(&pdev
->dev
, "%s, irq %d\n", hcd
->product_desc
, irq
);
2718 create_debug_file(isp1362_hcd
);
2729 static int isp1362_suspend(struct platform_device
*pdev
, pm_message_t state
)
2731 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
2732 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2733 unsigned long flags
;
2736 DBG(0, "%s: Suspending device\n", __func__
);
2738 if (state
.event
== PM_EVENT_FREEZE
) {
2739 DBG(0, "%s: Suspending root hub\n", __func__
);
2740 retval
= isp1362_bus_suspend(hcd
);
2742 DBG(0, "%s: Suspending RH ports\n", __func__
);
2743 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2744 isp1362_write_reg32(isp1362_hcd
, HCRHSTATUS
, RH_HS_LPS
);
2745 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2748 pdev
->dev
.power
.power_state
= state
;
2752 static int isp1362_resume(struct platform_device
*pdev
)
2754 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
2755 struct isp1362_hcd
*isp1362_hcd
= hcd_to_isp1362_hcd(hcd
);
2756 unsigned long flags
;
2758 DBG(0, "%s: Resuming\n", __func__
);
2760 if (pdev
->dev
.power
.power_state
.event
== PM_EVENT_SUSPEND
) {
2761 DBG(0, "%s: Resume RH ports\n", __func__
);
2762 spin_lock_irqsave(&isp1362_hcd
->lock
, flags
);
2763 isp1362_write_reg32(isp1362_hcd
, HCRHSTATUS
, RH_HS_LPSC
);
2764 spin_unlock_irqrestore(&isp1362_hcd
->lock
, flags
);
2768 pdev
->dev
.power
.power_state
= PMSG_ON
;
2770 return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd
));
2773 #define isp1362_suspend NULL
2774 #define isp1362_resume NULL
2777 static struct platform_driver isp1362_driver
= {
2778 .probe
= isp1362_probe
,
2779 .remove
= isp1362_remove
,
2781 .suspend
= isp1362_suspend
,
2782 .resume
= isp1362_resume
,
2788 module_platform_driver(isp1362_driver
);