2 * Universal Host Controller Interface driver for USB.
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8 * (C) Copyright 1999 Randy Dunlap
9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16 * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
19 static int uhci_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
);
20 static void uhci_unlink_generic(struct uhci_hcd
*uhci
, struct urb
*urb
);
21 static void uhci_remove_pending_urbps(struct uhci_hcd
*uhci
);
22 static void uhci_free_pending_qhs(struct uhci_hcd
*uhci
);
23 static void uhci_free_pending_tds(struct uhci_hcd
*uhci
);
26 * Technically, updating td->status here is a race, but it's not really a
27 * problem. The worst that can happen is that we set the IOC bit again
28 * generating a spurious interrupt. We could fix this by creating another
29 * QH and leaving the IOC bit always set, but then we would have to play
30 * games with the FSBR code to make sure we get the correct order in all
31 * the cases. I don't think it's worth the effort
33 static inline void uhci_set_next_interrupt(struct uhci_hcd
*uhci
)
36 mod_timer(&uhci_to_hcd(uhci
)->rh_timer
, jiffies
);
37 uhci
->term_td
->status
|= cpu_to_le32(TD_CTRL_IOC
);
40 static inline void uhci_clear_next_interrupt(struct uhci_hcd
*uhci
)
42 uhci
->term_td
->status
&= ~cpu_to_le32(TD_CTRL_IOC
);
45 static inline void uhci_moveto_complete(struct uhci_hcd
*uhci
,
46 struct urb_priv
*urbp
)
48 list_move_tail(&urbp
->urb_list
, &uhci
->complete_list
);
51 static struct uhci_td
*uhci_alloc_td(struct uhci_hcd
*uhci
)
53 dma_addr_t dma_handle
;
56 td
= dma_pool_alloc(uhci
->td_pool
, GFP_ATOMIC
, &dma_handle
);
60 td
->dma_handle
= dma_handle
;
62 td
->link
= UHCI_PTR_TERM
;
67 INIT_LIST_HEAD(&td
->list
);
68 INIT_LIST_HEAD(&td
->remove_list
);
69 INIT_LIST_HEAD(&td
->fl_list
);
74 static inline void uhci_fill_td(struct uhci_td
*td
, u32 status
,
75 u32 token
, u32 buffer
)
77 td
->status
= cpu_to_le32(status
);
78 td
->token
= cpu_to_le32(token
);
79 td
->buffer
= cpu_to_le32(buffer
);
83 * We insert Isochronous URBs directly into the frame list at the beginning
85 static void uhci_insert_td_frame_list(struct uhci_hcd
*uhci
, struct uhci_td
*td
, unsigned framenum
)
87 framenum
&= (UHCI_NUMFRAMES
- 1);
91 /* Is there a TD already mapped there? */
92 if (uhci
->frame_cpu
[framenum
]) {
93 struct uhci_td
*ftd
, *ltd
;
95 ftd
= uhci
->frame_cpu
[framenum
];
96 ltd
= list_entry(ftd
->fl_list
.prev
, struct uhci_td
, fl_list
);
98 list_add_tail(&td
->fl_list
, &ftd
->fl_list
);
100 td
->link
= ltd
->link
;
102 ltd
->link
= cpu_to_le32(td
->dma_handle
);
104 td
->link
= uhci
->frame
[framenum
];
106 uhci
->frame
[framenum
] = cpu_to_le32(td
->dma_handle
);
107 uhci
->frame_cpu
[framenum
] = td
;
111 static inline void uhci_remove_td_frame_list(struct uhci_hcd
*uhci
,
114 /* If it's not inserted, don't remove it */
115 if (td
->frame
== -1) {
116 WARN_ON(!list_empty(&td
->fl_list
));
120 if (uhci
->frame_cpu
[td
->frame
] == td
) {
121 if (list_empty(&td
->fl_list
)) {
122 uhci
->frame
[td
->frame
] = td
->link
;
123 uhci
->frame_cpu
[td
->frame
] = NULL
;
127 ntd
= list_entry(td
->fl_list
.next
, struct uhci_td
, fl_list
);
128 uhci
->frame
[td
->frame
] = cpu_to_le32(ntd
->dma_handle
);
129 uhci
->frame_cpu
[td
->frame
] = ntd
;
134 ptd
= list_entry(td
->fl_list
.prev
, struct uhci_td
, fl_list
);
135 ptd
->link
= td
->link
;
138 list_del_init(&td
->fl_list
);
142 static void unlink_isochronous_tds(struct uhci_hcd
*uhci
, struct urb
*urb
)
144 struct urb_priv
*urbp
= (struct urb_priv
*) urb
->hcpriv
;
147 list_for_each_entry(td
, &urbp
->td_list
, list
)
148 uhci_remove_td_frame_list(uhci
, td
);
153 * Inserts a td list into qh.
155 static void uhci_insert_tds_in_qh(struct uhci_qh
*qh
, struct urb
*urb
, __le32 breadth
)
157 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
161 /* Ordering isn't important here yet since the QH hasn't been */
162 /* inserted into the schedule yet */
163 plink
= &qh
->element
;
164 list_for_each_entry(td
, &urbp
->td_list
, list
) {
165 *plink
= cpu_to_le32(td
->dma_handle
) | breadth
;
168 *plink
= UHCI_PTR_TERM
;
171 static void uhci_free_td(struct uhci_hcd
*uhci
, struct uhci_td
*td
)
173 if (!list_empty(&td
->list
))
174 dev_warn(uhci_dev(uhci
), "td %p still in list!\n", td
);
175 if (!list_empty(&td
->remove_list
))
176 dev_warn(uhci_dev(uhci
), "td %p still in remove_list!\n", td
);
177 if (!list_empty(&td
->fl_list
))
178 dev_warn(uhci_dev(uhci
), "td %p still in fl_list!\n", td
);
180 dma_pool_free(uhci
->td_pool
, td
, td
->dma_handle
);
183 static struct uhci_qh
*uhci_alloc_qh(struct uhci_hcd
*uhci
)
185 dma_addr_t dma_handle
;
188 qh
= dma_pool_alloc(uhci
->qh_pool
, GFP_ATOMIC
, &dma_handle
);
192 qh
->dma_handle
= dma_handle
;
194 qh
->element
= UHCI_PTR_TERM
;
195 qh
->link
= UHCI_PTR_TERM
;
199 INIT_LIST_HEAD(&qh
->list
);
200 INIT_LIST_HEAD(&qh
->remove_list
);
205 static void uhci_free_qh(struct uhci_hcd
*uhci
, struct uhci_qh
*qh
)
207 if (!list_empty(&qh
->list
))
208 dev_warn(uhci_dev(uhci
), "qh %p list not empty!\n", qh
);
209 if (!list_empty(&qh
->remove_list
))
210 dev_warn(uhci_dev(uhci
), "qh %p still in remove_list!\n", qh
);
212 dma_pool_free(uhci
->qh_pool
, qh
, qh
->dma_handle
);
216 * Append this urb's qh after the last qh in skelqh->list
218 * Note that urb_priv.queue_list doesn't have a separate queue head;
219 * it's a ring with every element "live".
221 static void uhci_insert_qh(struct uhci_hcd
*uhci
, struct uhci_qh
*skelqh
, struct urb
*urb
)
223 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
224 struct urb_priv
*turbp
;
227 /* Grab the last QH */
228 lqh
= list_entry(skelqh
->list
.prev
, struct uhci_qh
, list
);
230 /* Point to the next skelqh */
231 urbp
->qh
->link
= lqh
->link
;
232 wmb(); /* Ordering is important */
235 * Patch QHs for previous endpoint's queued URBs? HC goes
236 * here next, not to the next skelqh it now points to.
238 * lqh --> td ... --> qh ... --> td --> qh ... --> td
241 * +<----------------+-----------------+
243 * newqh --> td ... --> td
248 * The HC could see (and use!) any of these as we write them.
250 lqh
->link
= cpu_to_le32(urbp
->qh
->dma_handle
) | UHCI_PTR_QH
;
252 list_for_each_entry(turbp
, &lqh
->urbp
->queue_list
, queue_list
)
253 turbp
->qh
->link
= lqh
->link
;
256 list_add_tail(&urbp
->qh
->list
, &skelqh
->list
);
260 * Start removal of QH from schedule; it finishes next frame.
261 * TDs should be unlinked before this is called.
263 static void uhci_remove_qh(struct uhci_hcd
*uhci
, struct uhci_qh
*qh
)
272 * Only go through the hoops if it's actually linked in
274 if (!list_empty(&qh
->list
)) {
276 /* If our queue is nonempty, make the next URB the head */
277 if (!list_empty(&qh
->urbp
->queue_list
)) {
278 struct urb_priv
*nurbp
;
280 nurbp
= list_entry(qh
->urbp
->queue_list
.next
,
281 struct urb_priv
, queue_list
);
283 list_add(&nurbp
->qh
->list
, &qh
->list
);
284 newlink
= cpu_to_le32(nurbp
->qh
->dma_handle
) | UHCI_PTR_QH
;
288 /* Fix up the previous QH's queue to link to either
289 * the new head of this queue or the start of the
290 * next endpoint's queue. */
291 pqh
= list_entry(qh
->list
.prev
, struct uhci_qh
, list
);
294 struct urb_priv
*turbp
;
296 list_for_each_entry(turbp
, &pqh
->urbp
->queue_list
,
298 turbp
->qh
->link
= newlink
;
302 /* Leave qh->link in case the HC is on the QH now, it will */
303 /* continue the rest of the schedule */
304 qh
->element
= UHCI_PTR_TERM
;
306 list_del_init(&qh
->list
);
309 list_del_init(&qh
->urbp
->queue_list
);
312 uhci_get_current_frame_number(uhci
);
313 if (uhci
->frame_number
+ uhci
->is_stopped
!= uhci
->qh_remove_age
) {
314 uhci_free_pending_qhs(uhci
);
315 uhci
->qh_remove_age
= uhci
->frame_number
;
318 /* Check to see if the remove list is empty. Set the IOC bit */
319 /* to force an interrupt so we can remove the QH */
320 if (list_empty(&uhci
->qh_remove_list
))
321 uhci_set_next_interrupt(uhci
);
323 list_add(&qh
->remove_list
, &uhci
->qh_remove_list
);
326 static int uhci_fixup_toggle(struct urb
*urb
, unsigned int toggle
)
328 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
331 list_for_each_entry(td
, &urbp
->td_list
, list
) {
333 td
->token
|= cpu_to_le32(TD_TOKEN_TOGGLE
);
335 td
->token
&= ~cpu_to_le32(TD_TOKEN_TOGGLE
);
343 /* This function will append one URB's QH to another URB's QH. This is for */
344 /* queuing interrupt, control or bulk transfers */
345 static void uhci_append_queued_urb(struct uhci_hcd
*uhci
, struct urb
*eurb
, struct urb
*urb
)
347 struct urb_priv
*eurbp
, *urbp
, *furbp
, *lurbp
;
348 struct uhci_td
*lltd
;
350 eurbp
= eurb
->hcpriv
;
353 /* Find the first URB in the queue */
356 list_for_each_entry(furbp
, &eurbp
->queue_list
, queue_list
)
361 lurbp
= list_entry(furbp
->queue_list
.prev
, struct urb_priv
, queue_list
);
363 lltd
= list_entry(lurbp
->td_list
.prev
, struct uhci_td
, list
);
365 /* Control transfers always start with toggle 0 */
366 if (!usb_pipecontrol(urb
->pipe
))
367 usb_settoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
368 usb_pipeout(urb
->pipe
),
369 uhci_fixup_toggle(urb
,
370 uhci_toggle(td_token(lltd
)) ^ 1));
372 /* All qhs in the queue need to link to the next queue */
373 urbp
->qh
->link
= eurbp
->qh
->link
;
375 wmb(); /* Make sure we flush everything */
377 lltd
->link
= cpu_to_le32(urbp
->qh
->dma_handle
) | UHCI_PTR_QH
;
379 list_add_tail(&urbp
->queue_list
, &furbp
->queue_list
);
384 static void uhci_delete_queued_urb(struct uhci_hcd
*uhci
, struct urb
*urb
)
386 struct urb_priv
*urbp
, *nurbp
, *purbp
, *turbp
;
387 struct uhci_td
*pltd
;
392 if (list_empty(&urbp
->queue_list
))
395 nurbp
= list_entry(urbp
->queue_list
.next
, struct urb_priv
, queue_list
);
398 * Fix up the toggle for the following URBs in the queue.
399 * Only needed for bulk and interrupt: control and isochronous
400 * endpoints don't propagate toggles between messages.
402 if (usb_pipebulk(urb
->pipe
) || usb_pipeint(urb
->pipe
)) {
404 /* We just set the toggle in uhci_unlink_generic */
405 toggle
= usb_gettoggle(urb
->dev
,
406 usb_pipeendpoint(urb
->pipe
),
407 usb_pipeout(urb
->pipe
));
409 /* If we're in the middle of the queue, grab the */
410 /* toggle from the TD previous to us */
411 purbp
= list_entry(urbp
->queue_list
.prev
,
412 struct urb_priv
, queue_list
);
413 pltd
= list_entry(purbp
->td_list
.prev
,
414 struct uhci_td
, list
);
415 toggle
= uhci_toggle(td_token(pltd
)) ^ 1;
418 list_for_each_entry(turbp
, &urbp
->queue_list
, queue_list
) {
421 toggle
= uhci_fixup_toggle(turbp
->urb
, toggle
);
424 usb_settoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
425 usb_pipeout(urb
->pipe
), toggle
);
429 /* We're somewhere in the middle (or end). The case where
430 * we're at the head is handled in uhci_remove_qh(). */
431 purbp
= list_entry(urbp
->queue_list
.prev
, struct urb_priv
,
434 pltd
= list_entry(purbp
->td_list
.prev
, struct uhci_td
, list
);
436 pltd
->link
= cpu_to_le32(nurbp
->qh
->dma_handle
) | UHCI_PTR_QH
;
438 /* The next URB happens to be the beginning, so */
439 /* we're the last, end the chain */
440 pltd
->link
= UHCI_PTR_TERM
;
443 /* urbp->queue_list is handled in uhci_remove_qh() */
446 static struct urb_priv
*uhci_alloc_urb_priv(struct uhci_hcd
*uhci
, struct urb
*urb
)
448 struct urb_priv
*urbp
;
450 urbp
= kmem_cache_alloc(uhci_up_cachep
, SLAB_ATOMIC
);
454 memset((void *)urbp
, 0, sizeof(*urbp
));
456 urbp
->fsbrtime
= jiffies
;
459 INIT_LIST_HEAD(&urbp
->td_list
);
460 INIT_LIST_HEAD(&urbp
->queue_list
);
461 INIT_LIST_HEAD(&urbp
->urb_list
);
463 list_add_tail(&urbp
->urb_list
, &uhci
->urb_list
);
470 static void uhci_add_td_to_urb(struct urb
*urb
, struct uhci_td
*td
)
472 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
474 list_add_tail(&td
->list
, &urbp
->td_list
);
477 static void uhci_remove_td_from_urb(struct uhci_td
*td
)
479 if (list_empty(&td
->list
))
482 list_del_init(&td
->list
);
485 static void uhci_destroy_urb_priv(struct uhci_hcd
*uhci
, struct urb
*urb
)
487 struct uhci_td
*td
, *tmp
;
488 struct urb_priv
*urbp
;
490 urbp
= (struct urb_priv
*)urb
->hcpriv
;
494 if (!list_empty(&urbp
->urb_list
))
495 dev_warn(uhci_dev(uhci
), "urb %p still on uhci->urb_list "
496 "or uhci->remove_list!\n", urb
);
498 uhci_get_current_frame_number(uhci
);
499 if (uhci
->frame_number
+ uhci
->is_stopped
!= uhci
->td_remove_age
) {
500 uhci_free_pending_tds(uhci
);
501 uhci
->td_remove_age
= uhci
->frame_number
;
504 /* Check to see if the remove list is empty. Set the IOC bit */
505 /* to force an interrupt so we can remove the TDs*/
506 if (list_empty(&uhci
->td_remove_list
))
507 uhci_set_next_interrupt(uhci
);
509 list_for_each_entry_safe(td
, tmp
, &urbp
->td_list
, list
) {
510 uhci_remove_td_from_urb(td
);
511 list_add(&td
->remove_list
, &uhci
->td_remove_list
);
515 kmem_cache_free(uhci_up_cachep
, urbp
);
518 static void uhci_inc_fsbr(struct uhci_hcd
*uhci
, struct urb
*urb
)
520 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
522 if ((!(urb
->transfer_flags
& URB_NO_FSBR
)) && !urbp
->fsbr
) {
524 if (!uhci
->fsbr
++ && !uhci
->fsbrtimeout
)
525 uhci
->skel_term_qh
->link
= cpu_to_le32(uhci
->skel_fs_control_qh
->dma_handle
) | UHCI_PTR_QH
;
529 static void uhci_dec_fsbr(struct uhci_hcd
*uhci
, struct urb
*urb
)
531 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
533 if ((!(urb
->transfer_flags
& URB_NO_FSBR
)) && urbp
->fsbr
) {
536 uhci
->fsbrtimeout
= jiffies
+ FSBR_DELAY
;
541 * Map status to standard result codes
543 * <status> is (td_status(td) & 0xF60000), a.k.a.
544 * uhci_status_bits(td_status(td)).
545 * Note: <status> does not include the TD_CTRL_NAK bit.
546 * <dir_out> is True for output TDs and False for input TDs.
548 static int uhci_map_status(int status
, int dir_out
)
552 if (status
& TD_CTRL_BITSTUFF
) /* Bitstuff error */
554 if (status
& TD_CTRL_CRCTIMEO
) { /* CRC/Timeout */
560 if (status
& TD_CTRL_BABBLE
) /* Babble */
562 if (status
& TD_CTRL_DBUFERR
) /* Buffer error */
564 if (status
& TD_CTRL_STALLED
) /* Stalled */
566 WARN_ON(status
& TD_CTRL_ACTIVE
); /* Active */
573 static int uhci_submit_control(struct uhci_hcd
*uhci
, struct urb
*urb
, struct urb
*eurb
)
575 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
577 struct uhci_qh
*qh
, *skelqh
;
578 unsigned long destination
, status
;
579 int maxsze
= usb_maxpacket(urb
->dev
, urb
->pipe
, usb_pipeout(urb
->pipe
));
580 int len
= urb
->transfer_buffer_length
;
581 dma_addr_t data
= urb
->transfer_dma
;
583 /* The "pipe" thing contains the destination in bits 8--18 */
584 destination
= (urb
->pipe
& PIPE_DEVEP_MASK
) | USB_PID_SETUP
;
587 status
= TD_CTRL_ACTIVE
| uhci_maxerr(3);
588 if (urb
->dev
->speed
== USB_SPEED_LOW
)
589 status
|= TD_CTRL_LS
;
592 * Build the TD for the control request setup packet
594 td
= uhci_alloc_td(uhci
);
598 uhci_add_td_to_urb(urb
, td
);
599 uhci_fill_td(td
, status
, destination
| uhci_explen(8),
603 * If direction is "send", change the packet ID from SETUP (0x2D)
604 * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
605 * set Short Packet Detect (SPD) for all data packets.
607 if (usb_pipeout(urb
->pipe
))
608 destination
^= (USB_PID_SETUP
^ USB_PID_OUT
);
610 destination
^= (USB_PID_SETUP
^ USB_PID_IN
);
611 status
|= TD_CTRL_SPD
;
623 td
= uhci_alloc_td(uhci
);
627 /* Alternate Data0/1 (start with Data1) */
628 destination
^= TD_TOKEN_TOGGLE
;
630 uhci_add_td_to_urb(urb
, td
);
631 uhci_fill_td(td
, status
, destination
| uhci_explen(pktsze
),
639 * Build the final TD for control status
641 td
= uhci_alloc_td(uhci
);
646 * It's IN if the pipe is an output pipe or we're not expecting
649 destination
&= ~TD_TOKEN_PID_MASK
;
650 if (usb_pipeout(urb
->pipe
) || !urb
->transfer_buffer_length
)
651 destination
|= USB_PID_IN
;
653 destination
|= USB_PID_OUT
;
655 destination
|= TD_TOKEN_TOGGLE
; /* End in Data1 */
657 status
&= ~TD_CTRL_SPD
;
659 uhci_add_td_to_urb(urb
, td
);
660 uhci_fill_td(td
, status
| TD_CTRL_IOC
,
661 destination
| uhci_explen(0), 0);
663 qh
= uhci_alloc_qh(uhci
);
670 uhci_insert_tds_in_qh(qh
, urb
, UHCI_PTR_BREADTH
);
672 /* Low-speed transfers get a different queue, and won't hog the bus.
673 * Also, some devices enumerate better without FSBR; the easiest way
674 * to do that is to put URBs on the low-speed queue while the device
675 * isn't in the CONFIGURED state. */
676 if (urb
->dev
->speed
== USB_SPEED_LOW
||
677 urb
->dev
->state
!= USB_STATE_CONFIGURED
)
678 skelqh
= uhci
->skel_ls_control_qh
;
680 skelqh
= uhci
->skel_fs_control_qh
;
681 uhci_inc_fsbr(uhci
, urb
);
685 uhci_append_queued_urb(uhci
, eurb
, urb
);
687 uhci_insert_qh(uhci
, skelqh
, urb
);
693 * If control-IN transfer was short, the status packet wasn't sent.
694 * This routine changes the element pointer in the QH to point at the
695 * status TD. It's safe to do this even while the QH is live, because
696 * the hardware only updates the element pointer following a successful
697 * transfer. The inactive TD for the short packet won't cause an update,
698 * so the pointer won't get overwritten. The next time the controller
699 * sees this QH, it will send the status packet.
701 static int usb_control_retrigger_status(struct uhci_hcd
*uhci
, struct urb
*urb
)
703 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
706 urbp
->short_control_packet
= 1;
708 td
= list_entry(urbp
->td_list
.prev
, struct uhci_td
, list
);
709 urbp
->qh
->element
= cpu_to_le32(td
->dma_handle
);
715 static int uhci_result_control(struct uhci_hcd
*uhci
, struct urb
*urb
)
717 struct list_head
*tmp
, *head
;
718 struct urb_priv
*urbp
= urb
->hcpriv
;
723 if (list_empty(&urbp
->td_list
))
726 head
= &urbp
->td_list
;
728 if (urbp
->short_control_packet
) {
734 td
= list_entry(tmp
, struct uhci_td
, list
);
736 /* The first TD is the SETUP stage, check the status, but skip */
738 status
= uhci_status_bits(td_status(td
));
739 if (status
& TD_CTRL_ACTIVE
)
745 urb
->actual_length
= 0;
747 /* The rest of the TDs (but the last) are data */
749 while (tmp
!= head
&& tmp
->next
!= head
) {
750 unsigned int ctrlstat
;
752 td
= list_entry(tmp
, struct uhci_td
, list
);
755 ctrlstat
= td_status(td
);
756 status
= uhci_status_bits(ctrlstat
);
757 if (status
& TD_CTRL_ACTIVE
)
760 urb
->actual_length
+= uhci_actual_length(ctrlstat
);
765 /* Check to see if we received a short packet */
766 if (uhci_actual_length(ctrlstat
) <
767 uhci_expected_length(td_token(td
))) {
768 if (urb
->transfer_flags
& URB_SHORT_NOT_OK
) {
773 if (uhci_packetid(td_token(td
)) == USB_PID_IN
)
774 return usb_control_retrigger_status(uhci
, urb
);
781 td
= list_entry(tmp
, struct uhci_td
, list
);
783 /* Control status stage */
784 status
= td_status(td
);
786 #ifdef I_HAVE_BUGGY_APC_BACKUPS
787 /* APC BackUPS Pro kludge */
788 /* It tries to send all of the descriptor instead of the amount */
790 if (status
& TD_CTRL_IOC
&& /* IOC is masked out by uhci_status_bits */
791 status
& TD_CTRL_ACTIVE
&&
792 status
& TD_CTRL_NAK
)
796 status
= uhci_status_bits(status
);
797 if (status
& TD_CTRL_ACTIVE
)
806 ret
= uhci_map_status(status
, uhci_packetout(td_token(td
)));
809 if ((debug
== 1 && ret
!= -EPIPE
) || debug
> 1) {
810 /* Some debugging code */
811 dev_dbg(uhci_dev(uhci
), "%s: failed with status %x\n",
812 __FUNCTION__
, status
);
815 /* Print the chain for debugging purposes */
816 uhci_show_qh(urbp
->qh
, errbuf
, ERRBUF_LEN
, 0);
826 * Common submit for bulk and interrupt
828 static int uhci_submit_common(struct uhci_hcd
*uhci
, struct urb
*urb
, struct urb
*eurb
, struct uhci_qh
*skelqh
)
832 unsigned long destination
, status
;
833 int maxsze
= usb_maxpacket(urb
->dev
, urb
->pipe
, usb_pipeout(urb
->pipe
));
834 int len
= urb
->transfer_buffer_length
;
835 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
836 dma_addr_t data
= urb
->transfer_dma
;
841 /* The "pipe" thing contains the destination in bits 8--18 */
842 destination
= (urb
->pipe
& PIPE_DEVEP_MASK
) | usb_packetid(urb
->pipe
);
844 status
= uhci_maxerr(3) | TD_CTRL_ACTIVE
;
845 if (urb
->dev
->speed
== USB_SPEED_LOW
)
846 status
|= TD_CTRL_LS
;
847 if (usb_pipein(urb
->pipe
))
848 status
|= TD_CTRL_SPD
;
853 do { /* Allow zero length packets */
858 if (!(urb
->transfer_flags
& URB_SHORT_NOT_OK
))
859 status
&= ~TD_CTRL_SPD
;
862 td
= uhci_alloc_td(uhci
);
866 uhci_add_td_to_urb(urb
, td
);
867 uhci_fill_td(td
, status
, destination
| uhci_explen(pktsze
) |
868 (usb_gettoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
869 usb_pipeout(urb
->pipe
)) << TD_TOKEN_TOGGLE_SHIFT
),
875 usb_dotoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
876 usb_pipeout(urb
->pipe
));
880 * URB_ZERO_PACKET means adding a 0-length packet, if direction
881 * is OUT and the transfer_length was an exact multiple of maxsze,
882 * hence (len = transfer_length - N * maxsze) == 0
883 * however, if transfer_length == 0, the zero packet was already
886 if (usb_pipeout(urb
->pipe
) && (urb
->transfer_flags
& URB_ZERO_PACKET
) &&
887 !len
&& urb
->transfer_buffer_length
) {
888 td
= uhci_alloc_td(uhci
);
892 uhci_add_td_to_urb(urb
, td
);
893 uhci_fill_td(td
, status
, destination
| uhci_explen(0) |
894 (usb_gettoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
895 usb_pipeout(urb
->pipe
)) << TD_TOKEN_TOGGLE_SHIFT
),
898 usb_dotoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
899 usb_pipeout(urb
->pipe
));
902 /* Set the interrupt-on-completion flag on the last packet.
903 * A more-or-less typical 4 KB URB (= size of one memory page)
904 * will require about 3 ms to transfer; that's a little on the
905 * fast side but not enough to justify delaying an interrupt
906 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
908 td
->status
|= cpu_to_le32(TD_CTRL_IOC
);
910 qh
= uhci_alloc_qh(uhci
);
917 /* Always breadth first */
918 uhci_insert_tds_in_qh(qh
, urb
, UHCI_PTR_BREADTH
);
921 uhci_append_queued_urb(uhci
, eurb
, urb
);
923 uhci_insert_qh(uhci
, skelqh
, urb
);
929 * Common result for bulk and interrupt
931 static int uhci_result_common(struct uhci_hcd
*uhci
, struct urb
*urb
)
933 struct urb_priv
*urbp
= urb
->hcpriv
;
935 unsigned int status
= 0;
938 urb
->actual_length
= 0;
940 list_for_each_entry(td
, &urbp
->td_list
, list
) {
941 unsigned int ctrlstat
= td_status(td
);
943 status
= uhci_status_bits(ctrlstat
);
944 if (status
& TD_CTRL_ACTIVE
)
947 urb
->actual_length
+= uhci_actual_length(ctrlstat
);
952 if (uhci_actual_length(ctrlstat
) <
953 uhci_expected_length(td_token(td
))) {
954 if (urb
->transfer_flags
& URB_SHORT_NOT_OK
) {
965 ret
= uhci_map_status(status
, uhci_packetout(td_token(td
)));
969 * Enable this chunk of code if you want to see some more debugging.
970 * But be careful, it has the tendancy to starve out khubd and prevent
971 * disconnects from happening successfully if you have a slow debug
972 * log interface (like a serial console.
975 if ((debug
== 1 && ret
!= -EPIPE
) || debug
> 1) {
976 /* Some debugging code */
977 dev_dbg(uhci_dev(uhci
), "%s: failed with status %x\n",
978 __FUNCTION__
, status
);
981 /* Print the chain for debugging purposes */
982 uhci_show_qh(urbp
->qh
, errbuf
, ERRBUF_LEN
, 0);
991 static inline int uhci_submit_bulk(struct uhci_hcd
*uhci
, struct urb
*urb
, struct urb
*eurb
)
995 /* Can't have low-speed bulk transfers */
996 if (urb
->dev
->speed
== USB_SPEED_LOW
)
999 ret
= uhci_submit_common(uhci
, urb
, eurb
, uhci
->skel_bulk_qh
);
1000 if (ret
== -EINPROGRESS
)
1001 uhci_inc_fsbr(uhci
, urb
);
1006 static inline int uhci_submit_interrupt(struct uhci_hcd
*uhci
, struct urb
*urb
, struct urb
*eurb
)
1008 /* USB 1.1 interrupt transfers only involve one packet per interval;
1009 * that's the uhci_submit_common() "breadth first" policy. Drivers
1010 * can submit urbs of any length, but longer ones might need many
1011 * intervals to complete.
1013 return uhci_submit_common(uhci
, urb
, eurb
, uhci
->skelqh
[__interval_to_skel(urb
->interval
)]);
1017 * Isochronous transfers
1019 static int isochronous_find_limits(struct uhci_hcd
*uhci
, struct urb
*urb
, unsigned int *start
, unsigned int *end
)
1021 struct urb
*last_urb
= NULL
;
1022 struct urb_priv
*up
;
1025 list_for_each_entry(up
, &uhci
->urb_list
, urb_list
) {
1026 struct urb
*u
= up
->urb
;
1028 /* look for pending URBs with identical pipe handle */
1029 if ((urb
->pipe
== u
->pipe
) && (urb
->dev
== u
->dev
) &&
1030 (u
->status
== -EINPROGRESS
) && (u
!= urb
)) {
1032 *start
= u
->start_frame
;
1038 *end
= (last_urb
->start_frame
+ last_urb
->number_of_packets
*
1039 last_urb
->interval
) & (UHCI_NUMFRAMES
-1);
1042 ret
= -1; /* no previous urb found */
1047 static int isochronous_find_start(struct uhci_hcd
*uhci
, struct urb
*urb
)
1050 unsigned int start
= 0, end
= 0;
1052 if (urb
->number_of_packets
> 900) /* 900? Why? */
1055 limits
= isochronous_find_limits(uhci
, urb
, &start
, &end
);
1057 if (urb
->transfer_flags
& URB_ISO_ASAP
) {
1059 uhci_get_current_frame_number(uhci
);
1060 urb
->start_frame
= (uhci
->frame_number
+ 10)
1061 & (UHCI_NUMFRAMES
- 1);
1063 urb
->start_frame
= end
;
1065 urb
->start_frame
&= (UHCI_NUMFRAMES
- 1);
1066 /* FIXME: Sanity check */
1073 * Isochronous transfers
1075 static int uhci_submit_isochronous(struct uhci_hcd
*uhci
, struct urb
*urb
)
1079 int status
, destination
;
1080 struct urb_priv
*urbp
= (struct urb_priv
*) urb
->hcpriv
;
1082 status
= TD_CTRL_ACTIVE
| TD_CTRL_IOS
;
1083 destination
= (urb
->pipe
& PIPE_DEVEP_MASK
) | usb_packetid(urb
->pipe
);
1085 ret
= isochronous_find_start(uhci
, urb
);
1089 for (i
= 0; i
< urb
->number_of_packets
; i
++) {
1090 td
= uhci_alloc_td(uhci
);
1094 uhci_add_td_to_urb(urb
, td
);
1095 uhci_fill_td(td
, status
, destination
| uhci_explen(urb
->iso_frame_desc
[i
].length
),
1096 urb
->transfer_dma
+ urb
->iso_frame_desc
[i
].offset
);
1098 if (i
+ 1 >= urb
->number_of_packets
)
1099 td
->status
|= cpu_to_le32(TD_CTRL_IOC
);
1102 frame
= urb
->start_frame
;
1103 list_for_each_entry(td
, &urbp
->td_list
, list
) {
1104 uhci_insert_td_frame_list(uhci
, td
, frame
);
1105 frame
+= urb
->interval
;
1108 return -EINPROGRESS
;
1111 static int uhci_result_isochronous(struct uhci_hcd
*uhci
, struct urb
*urb
)
1114 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
1118 urb
->actual_length
= urb
->error_count
= 0;
1121 list_for_each_entry(td
, &urbp
->td_list
, list
) {
1123 unsigned int ctrlstat
= td_status(td
);
1125 if (ctrlstat
& TD_CTRL_ACTIVE
)
1126 return -EINPROGRESS
;
1128 actlength
= uhci_actual_length(ctrlstat
);
1129 urb
->iso_frame_desc
[i
].actual_length
= actlength
;
1130 urb
->actual_length
+= actlength
;
1132 status
= uhci_map_status(uhci_status_bits(ctrlstat
),
1133 usb_pipeout(urb
->pipe
));
1134 urb
->iso_frame_desc
[i
].status
= status
;
1142 unlink_isochronous_tds(uhci
, urb
);
1147 static struct urb
*uhci_find_urb_ep(struct uhci_hcd
*uhci
, struct urb
*urb
)
1149 struct urb_priv
*up
;
1151 /* We don't match Isoc transfers since they are special */
1152 if (usb_pipeisoc(urb
->pipe
))
1155 list_for_each_entry(up
, &uhci
->urb_list
, urb_list
) {
1156 struct urb
*u
= up
->urb
;
1158 if (u
->dev
== urb
->dev
&& u
->status
== -EINPROGRESS
) {
1159 /* For control, ignore the direction */
1160 if (usb_pipecontrol(urb
->pipe
) &&
1161 (u
->pipe
& ~USB_DIR_IN
) == (urb
->pipe
& ~USB_DIR_IN
))
1163 else if (u
->pipe
== urb
->pipe
)
1171 static int uhci_urb_enqueue(struct usb_hcd
*hcd
,
1172 struct usb_host_endpoint
*ep
,
1173 struct urb
*urb
, gfp_t mem_flags
)
1176 struct uhci_hcd
*uhci
= hcd_to_uhci(hcd
);
1177 unsigned long flags
;
1181 spin_lock_irqsave(&uhci
->lock
, flags
);
1184 if (ret
!= -EINPROGRESS
) /* URB already unlinked! */
1187 eurb
= uhci_find_urb_ep(uhci
, urb
);
1189 if (!uhci_alloc_urb_priv(uhci
, urb
)) {
1194 switch (usb_pipetype(urb
->pipe
)) {
1196 ret
= uhci_submit_control(uhci
, urb
, eurb
);
1198 case PIPE_INTERRUPT
:
1200 bustime
= usb_check_bandwidth(urb
->dev
, urb
);
1204 ret
= uhci_submit_interrupt(uhci
, urb
, eurb
);
1205 if (ret
== -EINPROGRESS
)
1206 usb_claim_bandwidth(urb
->dev
, urb
, bustime
, 0);
1208 } else { /* inherit from parent */
1209 urb
->bandwidth
= eurb
->bandwidth
;
1210 ret
= uhci_submit_interrupt(uhci
, urb
, eurb
);
1214 ret
= uhci_submit_bulk(uhci
, urb
, eurb
);
1216 case PIPE_ISOCHRONOUS
:
1217 bustime
= usb_check_bandwidth(urb
->dev
, urb
);
1223 ret
= uhci_submit_isochronous(uhci
, urb
);
1224 if (ret
== -EINPROGRESS
)
1225 usb_claim_bandwidth(urb
->dev
, urb
, bustime
, 1);
1229 if (ret
!= -EINPROGRESS
) {
1230 /* Submit failed, so delete it from the urb_list */
1231 struct urb_priv
*urbp
= urb
->hcpriv
;
1233 list_del_init(&urbp
->urb_list
);
1234 uhci_destroy_urb_priv(uhci
, urb
);
1239 spin_unlock_irqrestore(&uhci
->lock
, flags
);
1244 * Return the result of a transfer
1246 static void uhci_transfer_result(struct uhci_hcd
*uhci
, struct urb
*urb
)
1248 int ret
= -EINPROGRESS
;
1249 struct urb_priv
*urbp
;
1251 spin_lock(&urb
->lock
);
1253 urbp
= (struct urb_priv
*)urb
->hcpriv
;
1255 if (urb
->status
!= -EINPROGRESS
) /* URB already dequeued */
1258 switch (usb_pipetype(urb
->pipe
)) {
1260 ret
= uhci_result_control(uhci
, urb
);
1263 case PIPE_INTERRUPT
:
1264 ret
= uhci_result_common(uhci
, urb
);
1266 case PIPE_ISOCHRONOUS
:
1267 ret
= uhci_result_isochronous(uhci
, urb
);
1271 if (ret
== -EINPROGRESS
)
1275 switch (usb_pipetype(urb
->pipe
)) {
1278 case PIPE_ISOCHRONOUS
:
1279 /* Release bandwidth for Interrupt or Isoc. transfers */
1281 usb_release_bandwidth(urb
->dev
, urb
, 1);
1282 uhci_unlink_generic(uhci
, urb
);
1284 case PIPE_INTERRUPT
:
1285 /* Release bandwidth for Interrupt or Isoc. transfers */
1286 /* Make sure we don't release if we have a queued URB */
1287 if (list_empty(&urbp
->queue_list
) && urb
->bandwidth
)
1288 usb_release_bandwidth(urb
->dev
, urb
, 0);
1290 /* bandwidth was passed on to queued URB, */
1291 /* so don't let usb_unlink_urb() release it */
1293 uhci_unlink_generic(uhci
, urb
);
1296 dev_info(uhci_dev(uhci
), "%s: unknown pipe type %d "
1298 __FUNCTION__
, usb_pipetype(urb
->pipe
), urb
);
1301 /* Move it from uhci->urb_list to uhci->complete_list */
1302 uhci_moveto_complete(uhci
, urbp
);
1305 spin_unlock(&urb
->lock
);
1308 static void uhci_unlink_generic(struct uhci_hcd
*uhci
, struct urb
*urb
)
1310 struct list_head
*head
;
1312 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
1315 uhci_dec_fsbr(uhci
, urb
); /* Safe since it checks */
1318 * Now we need to find out what the last successful toggle was
1319 * so we can update the local data toggle for the next transfer
1321 * There are 2 ways the last successful completed TD is found:
1323 * 1) The TD is NOT active and the actual length < expected length
1324 * 2) The TD is NOT active and it's the last TD in the chain
1326 * and a third way the first uncompleted TD is found:
1328 * 3) The TD is active and the previous TD is NOT active
1330 * Control and Isochronous ignore the toggle, so this is safe
1333 * FIXME: The toggle fixups won't be 100% reliable until we
1334 * change over to using a single queue for each endpoint and
1335 * stop the queue before unlinking.
1337 head
= &urbp
->td_list
;
1338 list_for_each_entry(td
, head
, list
) {
1339 unsigned int ctrlstat
= td_status(td
);
1341 if (!(ctrlstat
& TD_CTRL_ACTIVE
) &&
1342 (uhci_actual_length(ctrlstat
) <
1343 uhci_expected_length(td_token(td
)) ||
1344 td
->list
.next
== head
))
1345 usb_settoggle(urb
->dev
, uhci_endpoint(td_token(td
)),
1346 uhci_packetout(td_token(td
)),
1347 uhci_toggle(td_token(td
)) ^ 1);
1348 else if ((ctrlstat
& TD_CTRL_ACTIVE
) && !prevactive
)
1349 usb_settoggle(urb
->dev
, uhci_endpoint(td_token(td
)),
1350 uhci_packetout(td_token(td
)),
1351 uhci_toggle(td_token(td
)));
1353 prevactive
= ctrlstat
& TD_CTRL_ACTIVE
;
1356 uhci_delete_queued_urb(uhci
, urb
);
1358 /* The interrupt loop will reclaim the QHs */
1359 uhci_remove_qh(uhci
, urbp
->qh
);
1363 static int uhci_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
)
1365 struct uhci_hcd
*uhci
= hcd_to_uhci(hcd
);
1366 unsigned long flags
;
1367 struct urb_priv
*urbp
;
1369 spin_lock_irqsave(&uhci
->lock
, flags
);
1371 if (!urbp
) /* URB was never linked! */
1373 list_del_init(&urbp
->urb_list
);
1375 if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
)
1376 unlink_isochronous_tds(uhci
, urb
);
1377 uhci_unlink_generic(uhci
, urb
);
1379 uhci_get_current_frame_number(uhci
);
1380 if (uhci
->frame_number
+ uhci
->is_stopped
!= uhci
->urb_remove_age
) {
1381 uhci_remove_pending_urbps(uhci
);
1382 uhci
->urb_remove_age
= uhci
->frame_number
;
1385 /* If we're the first, set the next interrupt bit */
1386 if (list_empty(&uhci
->urb_remove_list
))
1387 uhci_set_next_interrupt(uhci
);
1388 list_add_tail(&urbp
->urb_list
, &uhci
->urb_remove_list
);
1391 spin_unlock_irqrestore(&uhci
->lock
, flags
);
1395 static int uhci_fsbr_timeout(struct uhci_hcd
*uhci
, struct urb
*urb
)
1397 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
1398 struct list_head
*head
;
1402 uhci_dec_fsbr(uhci
, urb
);
1404 urbp
->fsbr_timeout
= 1;
1407 * Ideally we would want to fix qh->element as well, but it's
1408 * read/write by the HC, so that can introduce a race. It's not
1409 * really worth the hassle
1412 head
= &urbp
->td_list
;
1413 list_for_each_entry(td
, head
, list
) {
1415 * Make sure we don't do the last one (since it'll have the
1416 * TERM bit set) as well as we skip every so many TDs to
1417 * make sure it doesn't hog the bandwidth
1419 if (td
->list
.next
!= head
&& (count
% DEPTH_INTERVAL
) ==
1420 (DEPTH_INTERVAL
- 1))
1421 td
->link
|= UHCI_PTR_DEPTH
;
1429 static void uhci_free_pending_qhs(struct uhci_hcd
*uhci
)
1431 struct uhci_qh
*qh
, *tmp
;
1433 list_for_each_entry_safe(qh
, tmp
, &uhci
->qh_remove_list
, remove_list
) {
1434 list_del_init(&qh
->remove_list
);
1436 uhci_free_qh(uhci
, qh
);
1440 static void uhci_free_pending_tds(struct uhci_hcd
*uhci
)
1442 struct uhci_td
*td
, *tmp
;
1444 list_for_each_entry_safe(td
, tmp
, &uhci
->td_remove_list
, remove_list
) {
1445 list_del_init(&td
->remove_list
);
1447 uhci_free_td(uhci
, td
);
1452 uhci_finish_urb(struct usb_hcd
*hcd
, struct urb
*urb
, struct pt_regs
*regs
)
1453 __releases(uhci
->lock
)
1454 __acquires(uhci
->lock
)
1456 struct uhci_hcd
*uhci
= hcd_to_uhci(hcd
);
1458 uhci_destroy_urb_priv(uhci
, urb
);
1460 spin_unlock(&uhci
->lock
);
1461 usb_hcd_giveback_urb(hcd
, urb
, regs
);
1462 spin_lock(&uhci
->lock
);
1465 static void uhci_finish_completion(struct uhci_hcd
*uhci
, struct pt_regs
*regs
)
1467 struct urb_priv
*urbp
, *tmp
;
1469 list_for_each_entry_safe(urbp
, tmp
, &uhci
->complete_list
, urb_list
) {
1470 struct urb
*urb
= urbp
->urb
;
1472 list_del_init(&urbp
->urb_list
);
1473 uhci_finish_urb(uhci_to_hcd(uhci
), urb
, regs
);
1477 static void uhci_remove_pending_urbps(struct uhci_hcd
*uhci
)
1480 /* Splice the urb_remove_list onto the end of the complete_list */
1481 list_splice_init(&uhci
->urb_remove_list
, uhci
->complete_list
.prev
);
1484 /* Process events in the schedule, but only in one thread at a time */
1485 static void uhci_scan_schedule(struct uhci_hcd
*uhci
, struct pt_regs
*regs
)
1487 struct urb_priv
*urbp
, *tmp
;
1489 /* Don't allow re-entrant calls */
1490 if (uhci
->scan_in_progress
) {
1491 uhci
->need_rescan
= 1;
1494 uhci
->scan_in_progress
= 1;
1496 uhci
->need_rescan
= 0;
1498 uhci_clear_next_interrupt(uhci
);
1499 uhci_get_current_frame_number(uhci
);
1501 if (uhci
->frame_number
+ uhci
->is_stopped
!= uhci
->qh_remove_age
)
1502 uhci_free_pending_qhs(uhci
);
1503 if (uhci
->frame_number
+ uhci
->is_stopped
!= uhci
->td_remove_age
)
1504 uhci_free_pending_tds(uhci
);
1505 if (uhci
->frame_number
+ uhci
->is_stopped
!= uhci
->urb_remove_age
)
1506 uhci_remove_pending_urbps(uhci
);
1508 /* Walk the list of pending URBs to see which ones completed
1509 * (must be _safe because uhci_transfer_result() dequeues URBs) */
1510 list_for_each_entry_safe(urbp
, tmp
, &uhci
->urb_list
, urb_list
) {
1511 struct urb
*urb
= urbp
->urb
;
1513 /* Checks the status and does all of the magic necessary */
1514 uhci_transfer_result(uhci
, urb
);
1516 uhci_finish_completion(uhci
, regs
);
1518 /* If the controller is stopped, we can finish these off right now */
1519 if (uhci
->is_stopped
) {
1520 uhci_free_pending_qhs(uhci
);
1521 uhci_free_pending_tds(uhci
);
1522 uhci_remove_pending_urbps(uhci
);
1525 if (uhci
->need_rescan
)
1527 uhci
->scan_in_progress
= 0;
1529 if (list_empty(&uhci
->urb_remove_list
) &&
1530 list_empty(&uhci
->td_remove_list
) &&
1531 list_empty(&uhci
->qh_remove_list
))
1532 uhci_clear_next_interrupt(uhci
);
1534 uhci_set_next_interrupt(uhci
);
1536 /* Wake up anyone waiting for an URB to complete */
1537 wake_up_all(&uhci
->waitqh
);
1540 static void check_fsbr(struct uhci_hcd
*uhci
)
1542 struct urb_priv
*up
;
1544 list_for_each_entry(up
, &uhci
->urb_list
, urb_list
) {
1545 struct urb
*u
= up
->urb
;
1547 spin_lock(&u
->lock
);
1549 /* Check if the FSBR timed out */
1550 if (up
->fsbr
&& !up
->fsbr_timeout
&& time_after_eq(jiffies
, up
->fsbrtime
+ IDLE_TIMEOUT
))
1551 uhci_fsbr_timeout(uhci
, u
);
1553 spin_unlock(&u
->lock
);
1556 /* Really disable FSBR */
1557 if (!uhci
->fsbr
&& uhci
->fsbrtimeout
&& time_after_eq(jiffies
, uhci
->fsbrtimeout
)) {
1558 uhci
->fsbrtimeout
= 0;
1559 uhci
->skel_term_qh
->link
= UHCI_PTR_TERM
;