2 * Universal Host Controller Interface driver for USB.
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8 * (C) Copyright 1999 Randy Dunlap
9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16 * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
19 static int uhci_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
);
20 static void uhci_unlink_generic(struct uhci_hcd
*uhci
, struct urb
*urb
);
21 static void uhci_remove_pending_urbps(struct uhci_hcd
*uhci
);
22 static void uhci_free_pending_qhs(struct uhci_hcd
*uhci
);
23 static void uhci_free_pending_tds(struct uhci_hcd
*uhci
);
26 * Technically, updating td->status here is a race, but it's not really a
27 * problem. The worst that can happen is that we set the IOC bit again
28 * generating a spurious interrupt. We could fix this by creating another
29 * QH and leaving the IOC bit always set, but then we would have to play
30 * games with the FSBR code to make sure we get the correct order in all
31 * the cases. I don't think it's worth the effort
33 static inline void uhci_set_next_interrupt(struct uhci_hcd
*uhci
)
36 mod_timer(&uhci
->stall_timer
, jiffies
);
37 uhci
->term_td
->status
|= cpu_to_le32(TD_CTRL_IOC
);
40 static inline void uhci_clear_next_interrupt(struct uhci_hcd
*uhci
)
42 uhci
->term_td
->status
&= ~cpu_to_le32(TD_CTRL_IOC
);
45 static inline void uhci_moveto_complete(struct uhci_hcd
*uhci
,
46 struct urb_priv
*urbp
)
48 list_move_tail(&urbp
->urb_list
, &uhci
->complete_list
);
51 static struct uhci_td
*uhci_alloc_td(struct uhci_hcd
*uhci
)
53 dma_addr_t dma_handle
;
56 td
= dma_pool_alloc(uhci
->td_pool
, GFP_ATOMIC
, &dma_handle
);
60 td
->dma_handle
= dma_handle
;
62 td
->link
= UHCI_PTR_TERM
;
67 INIT_LIST_HEAD(&td
->list
);
68 INIT_LIST_HEAD(&td
->remove_list
);
69 INIT_LIST_HEAD(&td
->fl_list
);
74 static inline void uhci_fill_td(struct uhci_td
*td
, u32 status
,
75 u32 token
, u32 buffer
)
77 td
->status
= cpu_to_le32(status
);
78 td
->token
= cpu_to_le32(token
);
79 td
->buffer
= cpu_to_le32(buffer
);
83 * We insert Isochronous URB's directly into the frame list at the beginning
85 static void uhci_insert_td_frame_list(struct uhci_hcd
*uhci
, struct uhci_td
*td
, unsigned framenum
)
87 framenum
&= (UHCI_NUMFRAMES
- 1);
91 /* Is there a TD already mapped there? */
92 if (uhci
->fl
->frame_cpu
[framenum
]) {
93 struct uhci_td
*ftd
, *ltd
;
95 ftd
= uhci
->fl
->frame_cpu
[framenum
];
96 ltd
= list_entry(ftd
->fl_list
.prev
, struct uhci_td
, fl_list
);
98 list_add_tail(&td
->fl_list
, &ftd
->fl_list
);
100 td
->link
= ltd
->link
;
102 ltd
->link
= cpu_to_le32(td
->dma_handle
);
104 td
->link
= uhci
->fl
->frame
[framenum
];
106 uhci
->fl
->frame
[framenum
] = cpu_to_le32(td
->dma_handle
);
107 uhci
->fl
->frame_cpu
[framenum
] = td
;
111 static void uhci_remove_td(struct uhci_hcd
*uhci
, struct uhci_td
*td
)
113 /* If it's not inserted, don't remove it */
114 if (td
->frame
== -1 && list_empty(&td
->fl_list
))
117 if (td
->frame
!= -1 && uhci
->fl
->frame_cpu
[td
->frame
] == td
) {
118 if (list_empty(&td
->fl_list
)) {
119 uhci
->fl
->frame
[td
->frame
] = td
->link
;
120 uhci
->fl
->frame_cpu
[td
->frame
] = NULL
;
124 ntd
= list_entry(td
->fl_list
.next
, struct uhci_td
, fl_list
);
125 uhci
->fl
->frame
[td
->frame
] = cpu_to_le32(ntd
->dma_handle
);
126 uhci
->fl
->frame_cpu
[td
->frame
] = ntd
;
131 ptd
= list_entry(td
->fl_list
.prev
, struct uhci_td
, fl_list
);
132 ptd
->link
= td
->link
;
136 td
->link
= UHCI_PTR_TERM
;
138 list_del_init(&td
->fl_list
);
143 * Inserts a td list into qh.
145 static void uhci_insert_tds_in_qh(struct uhci_qh
*qh
, struct urb
*urb
, __le32 breadth
)
147 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
151 /* Ordering isn't important here yet since the QH hasn't been */
152 /* inserted into the schedule yet */
153 plink
= &qh
->element
;
154 list_for_each_entry(td
, &urbp
->td_list
, list
) {
155 *plink
= cpu_to_le32(td
->dma_handle
) | breadth
;
158 *plink
= UHCI_PTR_TERM
;
161 static void uhci_free_td(struct uhci_hcd
*uhci
, struct uhci_td
*td
)
163 if (!list_empty(&td
->list
))
164 dev_warn(uhci_dev(uhci
), "td %p still in list!\n", td
);
165 if (!list_empty(&td
->remove_list
))
166 dev_warn(uhci_dev(uhci
), "td %p still in remove_list!\n", td
);
167 if (!list_empty(&td
->fl_list
))
168 dev_warn(uhci_dev(uhci
), "td %p still in fl_list!\n", td
);
170 dma_pool_free(uhci
->td_pool
, td
, td
->dma_handle
);
173 static struct uhci_qh
*uhci_alloc_qh(struct uhci_hcd
*uhci
)
175 dma_addr_t dma_handle
;
178 qh
= dma_pool_alloc(uhci
->qh_pool
, GFP_ATOMIC
, &dma_handle
);
182 qh
->dma_handle
= dma_handle
;
184 qh
->element
= UHCI_PTR_TERM
;
185 qh
->link
= UHCI_PTR_TERM
;
189 INIT_LIST_HEAD(&qh
->list
);
190 INIT_LIST_HEAD(&qh
->remove_list
);
195 static void uhci_free_qh(struct uhci_hcd
*uhci
, struct uhci_qh
*qh
)
197 if (!list_empty(&qh
->list
))
198 dev_warn(uhci_dev(uhci
), "qh %p list not empty!\n", qh
);
199 if (!list_empty(&qh
->remove_list
))
200 dev_warn(uhci_dev(uhci
), "qh %p still in remove_list!\n", qh
);
202 dma_pool_free(uhci
->qh_pool
, qh
, qh
->dma_handle
);
206 * Append this urb's qh after the last qh in skelqh->list
208 * Note that urb_priv.queue_list doesn't have a separate queue head;
209 * it's a ring with every element "live".
211 static void uhci_insert_qh(struct uhci_hcd
*uhci
, struct uhci_qh
*skelqh
, struct urb
*urb
)
213 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
214 struct urb_priv
*turbp
;
217 /* Grab the last QH */
218 lqh
= list_entry(skelqh
->list
.prev
, struct uhci_qh
, list
);
220 /* Point to the next skelqh */
221 urbp
->qh
->link
= lqh
->link
;
222 wmb(); /* Ordering is important */
225 * Patch QHs for previous endpoint's queued URBs? HC goes
226 * here next, not to the next skelqh it now points to.
228 * lqh --> td ... --> qh ... --> td --> qh ... --> td
231 * +<----------------+-----------------+
233 * newqh --> td ... --> td
238 * The HC could see (and use!) any of these as we write them.
240 lqh
->link
= cpu_to_le32(urbp
->qh
->dma_handle
) | UHCI_PTR_QH
;
242 list_for_each_entry(turbp
, &lqh
->urbp
->queue_list
, queue_list
)
243 turbp
->qh
->link
= lqh
->link
;
246 list_add_tail(&urbp
->qh
->list
, &skelqh
->list
);
250 * Start removal of QH from schedule; it finishes next frame.
251 * TDs should be unlinked before this is called.
253 static void uhci_remove_qh(struct uhci_hcd
*uhci
, struct uhci_qh
*qh
)
262 * Only go through the hoops if it's actually linked in
264 if (!list_empty(&qh
->list
)) {
266 /* If our queue is nonempty, make the next URB the head */
267 if (!list_empty(&qh
->urbp
->queue_list
)) {
268 struct urb_priv
*nurbp
;
270 nurbp
= list_entry(qh
->urbp
->queue_list
.next
,
271 struct urb_priv
, queue_list
);
273 list_add(&nurbp
->qh
->list
, &qh
->list
);
274 newlink
= cpu_to_le32(nurbp
->qh
->dma_handle
) | UHCI_PTR_QH
;
278 /* Fix up the previous QH's queue to link to either
279 * the new head of this queue or the start of the
280 * next endpoint's queue. */
281 pqh
= list_entry(qh
->list
.prev
, struct uhci_qh
, list
);
284 struct urb_priv
*turbp
;
286 list_for_each_entry(turbp
, &pqh
->urbp
->queue_list
,
288 turbp
->qh
->link
= newlink
;
292 /* Leave qh->link in case the HC is on the QH now, it will */
293 /* continue the rest of the schedule */
294 qh
->element
= UHCI_PTR_TERM
;
296 list_del_init(&qh
->list
);
299 list_del_init(&qh
->urbp
->queue_list
);
302 uhci_get_current_frame_number(uhci
);
303 if (uhci
->frame_number
+ uhci
->is_stopped
!= uhci
->qh_remove_age
) {
304 uhci_free_pending_qhs(uhci
);
305 uhci
->qh_remove_age
= uhci
->frame_number
;
308 /* Check to see if the remove list is empty. Set the IOC bit */
309 /* to force an interrupt so we can remove the QH */
310 if (list_empty(&uhci
->qh_remove_list
))
311 uhci_set_next_interrupt(uhci
);
313 list_add(&qh
->remove_list
, &uhci
->qh_remove_list
);
316 static int uhci_fixup_toggle(struct urb
*urb
, unsigned int toggle
)
318 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
321 list_for_each_entry(td
, &urbp
->td_list
, list
) {
323 td
->token
|= cpu_to_le32(TD_TOKEN_TOGGLE
);
325 td
->token
&= ~cpu_to_le32(TD_TOKEN_TOGGLE
);
333 /* This function will append one URB's QH to another URB's QH. This is for */
334 /* queuing interrupt, control or bulk transfers */
335 static void uhci_append_queued_urb(struct uhci_hcd
*uhci
, struct urb
*eurb
, struct urb
*urb
)
337 struct urb_priv
*eurbp
, *urbp
, *furbp
, *lurbp
;
338 struct uhci_td
*lltd
;
340 eurbp
= eurb
->hcpriv
;
343 /* Find the first URB in the queue */
346 list_for_each_entry(furbp
, &eurbp
->queue_list
, queue_list
)
351 lurbp
= list_entry(furbp
->queue_list
.prev
, struct urb_priv
, queue_list
);
353 lltd
= list_entry(lurbp
->td_list
.prev
, struct uhci_td
, list
);
355 /* Control transfers always start with toggle 0 */
356 if (!usb_pipecontrol(urb
->pipe
))
357 usb_settoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
358 usb_pipeout(urb
->pipe
),
359 uhci_fixup_toggle(urb
,
360 uhci_toggle(td_token(lltd
)) ^ 1));
362 /* All qh's in the queue need to link to the next queue */
363 urbp
->qh
->link
= eurbp
->qh
->link
;
365 wmb(); /* Make sure we flush everything */
367 lltd
->link
= cpu_to_le32(urbp
->qh
->dma_handle
) | UHCI_PTR_QH
;
369 list_add_tail(&urbp
->queue_list
, &furbp
->queue_list
);
374 static void uhci_delete_queued_urb(struct uhci_hcd
*uhci
, struct urb
*urb
)
376 struct urb_priv
*urbp
, *nurbp
, *purbp
, *turbp
;
377 struct uhci_td
*pltd
;
382 if (list_empty(&urbp
->queue_list
))
385 nurbp
= list_entry(urbp
->queue_list
.next
, struct urb_priv
, queue_list
);
388 * Fix up the toggle for the following URBs in the queue.
389 * Only needed for bulk and interrupt: control and isochronous
390 * endpoints don't propagate toggles between messages.
392 if (usb_pipebulk(urb
->pipe
) || usb_pipeint(urb
->pipe
)) {
394 /* We just set the toggle in uhci_unlink_generic */
395 toggle
= usb_gettoggle(urb
->dev
,
396 usb_pipeendpoint(urb
->pipe
),
397 usb_pipeout(urb
->pipe
));
399 /* If we're in the middle of the queue, grab the */
400 /* toggle from the TD previous to us */
401 purbp
= list_entry(urbp
->queue_list
.prev
,
402 struct urb_priv
, queue_list
);
403 pltd
= list_entry(purbp
->td_list
.prev
,
404 struct uhci_td
, list
);
405 toggle
= uhci_toggle(td_token(pltd
)) ^ 1;
408 list_for_each_entry(turbp
, &urbp
->queue_list
, queue_list
) {
411 toggle
= uhci_fixup_toggle(turbp
->urb
, toggle
);
414 usb_settoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
415 usb_pipeout(urb
->pipe
), toggle
);
419 /* We're somewhere in the middle (or end). The case where
420 * we're at the head is handled in uhci_remove_qh(). */
421 purbp
= list_entry(urbp
->queue_list
.prev
, struct urb_priv
,
424 pltd
= list_entry(purbp
->td_list
.prev
, struct uhci_td
, list
);
426 pltd
->link
= cpu_to_le32(nurbp
->qh
->dma_handle
) | UHCI_PTR_QH
;
428 /* The next URB happens to be the beginning, so */
429 /* we're the last, end the chain */
430 pltd
->link
= UHCI_PTR_TERM
;
433 /* urbp->queue_list is handled in uhci_remove_qh() */
436 static struct urb_priv
*uhci_alloc_urb_priv(struct uhci_hcd
*uhci
, struct urb
*urb
)
438 struct urb_priv
*urbp
;
440 urbp
= kmem_cache_alloc(uhci_up_cachep
, SLAB_ATOMIC
);
444 memset((void *)urbp
, 0, sizeof(*urbp
));
446 urbp
->inserttime
= jiffies
;
447 urbp
->fsbrtime
= jiffies
;
450 INIT_LIST_HEAD(&urbp
->td_list
);
451 INIT_LIST_HEAD(&urbp
->queue_list
);
452 INIT_LIST_HEAD(&urbp
->urb_list
);
454 list_add_tail(&urbp
->urb_list
, &uhci
->urb_list
);
461 static void uhci_add_td_to_urb(struct urb
*urb
, struct uhci_td
*td
)
463 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
467 list_add_tail(&td
->list
, &urbp
->td_list
);
470 static void uhci_remove_td_from_urb(struct uhci_td
*td
)
472 if (list_empty(&td
->list
))
475 list_del_init(&td
->list
);
480 static void uhci_destroy_urb_priv(struct uhci_hcd
*uhci
, struct urb
*urb
)
482 struct uhci_td
*td
, *tmp
;
483 struct urb_priv
*urbp
;
485 urbp
= (struct urb_priv
*)urb
->hcpriv
;
489 if (!list_empty(&urbp
->urb_list
))
490 dev_warn(uhci_dev(uhci
), "urb %p still on uhci->urb_list "
491 "or uhci->remove_list!\n", urb
);
493 uhci_get_current_frame_number(uhci
);
494 if (uhci
->frame_number
+ uhci
->is_stopped
!= uhci
->td_remove_age
) {
495 uhci_free_pending_tds(uhci
);
496 uhci
->td_remove_age
= uhci
->frame_number
;
499 /* Check to see if the remove list is empty. Set the IOC bit */
500 /* to force an interrupt so we can remove the TD's*/
501 if (list_empty(&uhci
->td_remove_list
))
502 uhci_set_next_interrupt(uhci
);
504 list_for_each_entry_safe(td
, tmp
, &urbp
->td_list
, list
) {
505 uhci_remove_td_from_urb(td
);
506 uhci_remove_td(uhci
, td
);
507 list_add(&td
->remove_list
, &uhci
->td_remove_list
);
511 kmem_cache_free(uhci_up_cachep
, urbp
);
514 static void uhci_inc_fsbr(struct uhci_hcd
*uhci
, struct urb
*urb
)
516 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
518 if ((!(urb
->transfer_flags
& URB_NO_FSBR
)) && !urbp
->fsbr
) {
520 if (!uhci
->fsbr
++ && !uhci
->fsbrtimeout
)
521 uhci
->skel_term_qh
->link
= cpu_to_le32(uhci
->skel_fs_control_qh
->dma_handle
) | UHCI_PTR_QH
;
525 static void uhci_dec_fsbr(struct uhci_hcd
*uhci
, struct urb
*urb
)
527 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
529 if ((!(urb
->transfer_flags
& URB_NO_FSBR
)) && urbp
->fsbr
) {
532 uhci
->fsbrtimeout
= jiffies
+ FSBR_DELAY
;
537 * Map status to standard result codes
539 * <status> is (td_status(td) & 0xF60000), a.k.a.
540 * uhci_status_bits(td_status(td)).
541 * Note: <status> does not include the TD_CTRL_NAK bit.
542 * <dir_out> is True for output TDs and False for input TDs.
544 static int uhci_map_status(int status
, int dir_out
)
548 if (status
& TD_CTRL_BITSTUFF
) /* Bitstuff error */
550 if (status
& TD_CTRL_CRCTIMEO
) { /* CRC/Timeout */
556 if (status
& TD_CTRL_BABBLE
) /* Babble */
558 if (status
& TD_CTRL_DBUFERR
) /* Buffer error */
560 if (status
& TD_CTRL_STALLED
) /* Stalled */
562 WARN_ON(status
& TD_CTRL_ACTIVE
); /* Active */
569 static int uhci_submit_control(struct uhci_hcd
*uhci
, struct urb
*urb
, struct urb
*eurb
)
571 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
573 struct uhci_qh
*qh
, *skelqh
;
574 unsigned long destination
, status
;
575 int maxsze
= usb_maxpacket(urb
->dev
, urb
->pipe
, usb_pipeout(urb
->pipe
));
576 int len
= urb
->transfer_buffer_length
;
577 dma_addr_t data
= urb
->transfer_dma
;
579 /* The "pipe" thing contains the destination in bits 8--18 */
580 destination
= (urb
->pipe
& PIPE_DEVEP_MASK
) | USB_PID_SETUP
;
583 status
= TD_CTRL_ACTIVE
| uhci_maxerr(3);
584 if (urb
->dev
->speed
== USB_SPEED_LOW
)
585 status
|= TD_CTRL_LS
;
588 * Build the TD for the control request setup packet
590 td
= uhci_alloc_td(uhci
);
594 uhci_add_td_to_urb(urb
, td
);
595 uhci_fill_td(td
, status
, destination
| uhci_explen(7),
599 * If direction is "send", change the packet ID from SETUP (0x2D)
600 * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
601 * set Short Packet Detect (SPD) for all data packets.
603 if (usb_pipeout(urb
->pipe
))
604 destination
^= (USB_PID_SETUP
^ USB_PID_OUT
);
606 destination
^= (USB_PID_SETUP
^ USB_PID_IN
);
607 status
|= TD_CTRL_SPD
;
611 * Build the DATA TD's
619 td
= uhci_alloc_td(uhci
);
623 /* Alternate Data0/1 (start with Data1) */
624 destination
^= TD_TOKEN_TOGGLE
;
626 uhci_add_td_to_urb(urb
, td
);
627 uhci_fill_td(td
, status
, destination
| uhci_explen(pktsze
- 1),
635 * Build the final TD for control status
637 td
= uhci_alloc_td(uhci
);
642 * It's IN if the pipe is an output pipe or we're not expecting
645 destination
&= ~TD_TOKEN_PID_MASK
;
646 if (usb_pipeout(urb
->pipe
) || !urb
->transfer_buffer_length
)
647 destination
|= USB_PID_IN
;
649 destination
|= USB_PID_OUT
;
651 destination
|= TD_TOKEN_TOGGLE
; /* End in Data1 */
653 status
&= ~TD_CTRL_SPD
;
655 uhci_add_td_to_urb(urb
, td
);
656 uhci_fill_td(td
, status
| TD_CTRL_IOC
,
657 destination
| uhci_explen(UHCI_NULL_DATA_SIZE
), 0);
659 qh
= uhci_alloc_qh(uhci
);
666 uhci_insert_tds_in_qh(qh
, urb
, UHCI_PTR_BREADTH
);
668 /* Low-speed transfers get a different queue, and won't hog the bus.
669 * Also, some devices enumerate better without FSBR; the easiest way
670 * to do that is to put URBs on the low-speed queue while the device
671 * is in the DEFAULT state. */
672 if (urb
->dev
->speed
== USB_SPEED_LOW
||
673 urb
->dev
->state
== USB_STATE_DEFAULT
)
674 skelqh
= uhci
->skel_ls_control_qh
;
676 skelqh
= uhci
->skel_fs_control_qh
;
677 uhci_inc_fsbr(uhci
, urb
);
681 uhci_append_queued_urb(uhci
, eurb
, urb
);
683 uhci_insert_qh(uhci
, skelqh
, urb
);
689 * If control-IN transfer was short, the status packet wasn't sent.
690 * This routine changes the element pointer in the QH to point at the
691 * status TD. It's safe to do this even while the QH is live, because
692 * the hardware only updates the element pointer following a successful
693 * transfer. The inactive TD for the short packet won't cause an update,
694 * so the pointer won't get overwritten. The next time the controller
695 * sees this QH, it will send the status packet.
697 static int usb_control_retrigger_status(struct uhci_hcd
*uhci
, struct urb
*urb
)
699 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
702 urbp
->short_control_packet
= 1;
704 td
= list_entry(urbp
->td_list
.prev
, struct uhci_td
, list
);
705 urbp
->qh
->element
= cpu_to_le32(td
->dma_handle
);
711 static int uhci_result_control(struct uhci_hcd
*uhci
, struct urb
*urb
)
713 struct list_head
*tmp
, *head
;
714 struct urb_priv
*urbp
= urb
->hcpriv
;
719 if (list_empty(&urbp
->td_list
))
722 head
= &urbp
->td_list
;
724 if (urbp
->short_control_packet
) {
730 td
= list_entry(tmp
, struct uhci_td
, list
);
732 /* The first TD is the SETUP stage, check the status, but skip */
734 status
= uhci_status_bits(td_status(td
));
735 if (status
& TD_CTRL_ACTIVE
)
741 urb
->actual_length
= 0;
743 /* The rest of the TD's (but the last) are data */
745 while (tmp
!= head
&& tmp
->next
!= head
) {
746 unsigned int ctrlstat
;
748 td
= list_entry(tmp
, struct uhci_td
, list
);
751 ctrlstat
= td_status(td
);
752 status
= uhci_status_bits(ctrlstat
);
753 if (status
& TD_CTRL_ACTIVE
)
756 urb
->actual_length
+= uhci_actual_length(ctrlstat
);
761 /* Check to see if we received a short packet */
762 if (uhci_actual_length(ctrlstat
) <
763 uhci_expected_length(td_token(td
))) {
764 if (urb
->transfer_flags
& URB_SHORT_NOT_OK
) {
769 if (uhci_packetid(td_token(td
)) == USB_PID_IN
)
770 return usb_control_retrigger_status(uhci
, urb
);
777 td
= list_entry(tmp
, struct uhci_td
, list
);
779 /* Control status stage */
780 status
= td_status(td
);
782 #ifdef I_HAVE_BUGGY_APC_BACKUPS
783 /* APC BackUPS Pro kludge */
784 /* It tries to send all of the descriptor instead of the amount */
786 if (status
& TD_CTRL_IOC
&& /* IOC is masked out by uhci_status_bits */
787 status
& TD_CTRL_ACTIVE
&&
788 status
& TD_CTRL_NAK
)
792 status
= uhci_status_bits(status
);
793 if (status
& TD_CTRL_ACTIVE
)
802 ret
= uhci_map_status(status
, uhci_packetout(td_token(td
)));
805 if ((debug
== 1 && ret
!= -EPIPE
) || debug
> 1) {
806 /* Some debugging code */
807 dev_dbg(uhci_dev(uhci
), "%s: failed with status %x\n",
808 __FUNCTION__
, status
);
811 /* Print the chain for debugging purposes */
812 uhci_show_qh(urbp
->qh
, errbuf
, ERRBUF_LEN
, 0);
822 * Common submit for bulk and interrupt
824 static int uhci_submit_common(struct uhci_hcd
*uhci
, struct urb
*urb
, struct urb
*eurb
, struct uhci_qh
*skelqh
)
828 unsigned long destination
, status
;
829 int maxsze
= usb_maxpacket(urb
->dev
, urb
->pipe
, usb_pipeout(urb
->pipe
));
830 int len
= urb
->transfer_buffer_length
;
831 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
832 dma_addr_t data
= urb
->transfer_dma
;
837 /* The "pipe" thing contains the destination in bits 8--18 */
838 destination
= (urb
->pipe
& PIPE_DEVEP_MASK
) | usb_packetid(urb
->pipe
);
840 status
= uhci_maxerr(3) | TD_CTRL_ACTIVE
;
841 if (urb
->dev
->speed
== USB_SPEED_LOW
)
842 status
|= TD_CTRL_LS
;
843 if (usb_pipein(urb
->pipe
))
844 status
|= TD_CTRL_SPD
;
847 * Build the DATA TD's
849 do { /* Allow zero length packets */
854 if (!(urb
->transfer_flags
& URB_SHORT_NOT_OK
))
855 status
&= ~TD_CTRL_SPD
;
858 td
= uhci_alloc_td(uhci
);
862 uhci_add_td_to_urb(urb
, td
);
863 uhci_fill_td(td
, status
, destination
| uhci_explen(pktsze
- 1) |
864 (usb_gettoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
865 usb_pipeout(urb
->pipe
)) << TD_TOKEN_TOGGLE_SHIFT
),
871 usb_dotoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
872 usb_pipeout(urb
->pipe
));
876 * URB_ZERO_PACKET means adding a 0-length packet, if direction
877 * is OUT and the transfer_length was an exact multiple of maxsze,
878 * hence (len = transfer_length - N * maxsze) == 0
879 * however, if transfer_length == 0, the zero packet was already
882 if (usb_pipeout(urb
->pipe
) && (urb
->transfer_flags
& URB_ZERO_PACKET
) &&
883 !len
&& urb
->transfer_buffer_length
) {
884 td
= uhci_alloc_td(uhci
);
888 uhci_add_td_to_urb(urb
, td
);
889 uhci_fill_td(td
, status
, destination
| uhci_explen(UHCI_NULL_DATA_SIZE
) |
890 (usb_gettoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
891 usb_pipeout(urb
->pipe
)) << TD_TOKEN_TOGGLE_SHIFT
),
894 usb_dotoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
895 usb_pipeout(urb
->pipe
));
898 /* Set the interrupt-on-completion flag on the last packet.
899 * A more-or-less typical 4 KB URB (= size of one memory page)
900 * will require about 3 ms to transfer; that's a little on the
901 * fast side but not enough to justify delaying an interrupt
902 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
904 td
->status
|= cpu_to_le32(TD_CTRL_IOC
);
906 qh
= uhci_alloc_qh(uhci
);
913 /* Always breadth first */
914 uhci_insert_tds_in_qh(qh
, urb
, UHCI_PTR_BREADTH
);
917 uhci_append_queued_urb(uhci
, eurb
, urb
);
919 uhci_insert_qh(uhci
, skelqh
, urb
);
925 * Common result for bulk and interrupt
927 static int uhci_result_common(struct uhci_hcd
*uhci
, struct urb
*urb
)
929 struct urb_priv
*urbp
= urb
->hcpriv
;
931 unsigned int status
= 0;
934 urb
->actual_length
= 0;
936 list_for_each_entry(td
, &urbp
->td_list
, list
) {
937 unsigned int ctrlstat
= td_status(td
);
939 status
= uhci_status_bits(ctrlstat
);
940 if (status
& TD_CTRL_ACTIVE
)
943 urb
->actual_length
+= uhci_actual_length(ctrlstat
);
948 if (uhci_actual_length(ctrlstat
) <
949 uhci_expected_length(td_token(td
))) {
950 if (urb
->transfer_flags
& URB_SHORT_NOT_OK
) {
961 ret
= uhci_map_status(status
, uhci_packetout(td_token(td
)));
965 * Enable this chunk of code if you want to see some more debugging.
966 * But be careful, it has the tendancy to starve out khubd and prevent
967 * disconnects from happening successfully if you have a slow debug
968 * log interface (like a serial console.
971 if ((debug
== 1 && ret
!= -EPIPE
) || debug
> 1) {
972 /* Some debugging code */
973 dev_dbg(uhci_dev(uhci
), "%s: failed with status %x\n",
974 __FUNCTION__
, status
);
977 /* Print the chain for debugging purposes */
978 uhci_show_qh(urbp
->qh
, errbuf
, ERRBUF_LEN
, 0);
987 static inline int uhci_submit_bulk(struct uhci_hcd
*uhci
, struct urb
*urb
, struct urb
*eurb
)
991 /* Can't have low-speed bulk transfers */
992 if (urb
->dev
->speed
== USB_SPEED_LOW
)
995 ret
= uhci_submit_common(uhci
, urb
, eurb
, uhci
->skel_bulk_qh
);
996 if (ret
== -EINPROGRESS
)
997 uhci_inc_fsbr(uhci
, urb
);
1002 static inline int uhci_submit_interrupt(struct uhci_hcd
*uhci
, struct urb
*urb
, struct urb
*eurb
)
1004 /* USB 1.1 interrupt transfers only involve one packet per interval;
1005 * that's the uhci_submit_common() "breadth first" policy. Drivers
1006 * can submit urbs of any length, but longer ones might need many
1007 * intervals to complete.
1009 return uhci_submit_common(uhci
, urb
, eurb
, uhci
->skelqh
[__interval_to_skel(urb
->interval
)]);
1013 * Isochronous transfers
1015 static int isochronous_find_limits(struct uhci_hcd
*uhci
, struct urb
*urb
, unsigned int *start
, unsigned int *end
)
1017 struct urb
*last_urb
= NULL
;
1018 struct urb_priv
*up
;
1021 list_for_each_entry(up
, &uhci
->urb_list
, urb_list
) {
1022 struct urb
*u
= up
->urb
;
1024 /* look for pending URB's with identical pipe handle */
1025 if ((urb
->pipe
== u
->pipe
) && (urb
->dev
== u
->dev
) &&
1026 (u
->status
== -EINPROGRESS
) && (u
!= urb
)) {
1028 *start
= u
->start_frame
;
1034 *end
= (last_urb
->start_frame
+ last_urb
->number_of_packets
*
1035 last_urb
->interval
) & (UHCI_NUMFRAMES
-1);
1038 ret
= -1; /* no previous urb found */
1043 static int isochronous_find_start(struct uhci_hcd
*uhci
, struct urb
*urb
)
1046 unsigned int start
= 0, end
= 0;
1048 if (urb
->number_of_packets
> 900) /* 900? Why? */
1051 limits
= isochronous_find_limits(uhci
, urb
, &start
, &end
);
1053 if (urb
->transfer_flags
& URB_ISO_ASAP
) {
1055 uhci_get_current_frame_number(uhci
);
1056 urb
->start_frame
= (uhci
->frame_number
+ 10)
1057 & (UHCI_NUMFRAMES
- 1);
1059 urb
->start_frame
= end
;
1061 urb
->start_frame
&= (UHCI_NUMFRAMES
- 1);
1062 /* FIXME: Sanity check */
1069 * Isochronous transfers
1071 static int uhci_submit_isochronous(struct uhci_hcd
*uhci
, struct urb
*urb
)
1075 int status
, destination
;
1077 status
= TD_CTRL_ACTIVE
| TD_CTRL_IOS
;
1078 destination
= (urb
->pipe
& PIPE_DEVEP_MASK
) | usb_packetid(urb
->pipe
);
1080 ret
= isochronous_find_start(uhci
, urb
);
1084 frame
= urb
->start_frame
;
1085 for (i
= 0; i
< urb
->number_of_packets
; i
++, frame
+= urb
->interval
) {
1086 if (!urb
->iso_frame_desc
[i
].length
)
1089 td
= uhci_alloc_td(uhci
);
1093 uhci_add_td_to_urb(urb
, td
);
1094 uhci_fill_td(td
, status
, destination
| uhci_explen(urb
->iso_frame_desc
[i
].length
- 1),
1095 urb
->transfer_dma
+ urb
->iso_frame_desc
[i
].offset
);
1097 if (i
+ 1 >= urb
->number_of_packets
)
1098 td
->status
|= cpu_to_le32(TD_CTRL_IOC
);
1100 uhci_insert_td_frame_list(uhci
, td
, frame
);
1103 return -EINPROGRESS
;
1106 static int uhci_result_isochronous(struct uhci_hcd
*uhci
, struct urb
*urb
)
1109 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
1113 urb
->actual_length
= 0;
1116 list_for_each_entry(td
, &urbp
->td_list
, list
) {
1118 unsigned int ctrlstat
= td_status(td
);
1120 if (ctrlstat
& TD_CTRL_ACTIVE
)
1121 return -EINPROGRESS
;
1123 actlength
= uhci_actual_length(ctrlstat
);
1124 urb
->iso_frame_desc
[i
].actual_length
= actlength
;
1125 urb
->actual_length
+= actlength
;
1127 status
= uhci_map_status(uhci_status_bits(ctrlstat
),
1128 usb_pipeout(urb
->pipe
));
1129 urb
->iso_frame_desc
[i
].status
= status
;
1141 static struct urb
*uhci_find_urb_ep(struct uhci_hcd
*uhci
, struct urb
*urb
)
1143 struct urb_priv
*up
;
1145 /* We don't match Isoc transfers since they are special */
1146 if (usb_pipeisoc(urb
->pipe
))
1149 list_for_each_entry(up
, &uhci
->urb_list
, urb_list
) {
1150 struct urb
*u
= up
->urb
;
1152 if (u
->dev
== urb
->dev
&& u
->status
== -EINPROGRESS
) {
1153 /* For control, ignore the direction */
1154 if (usb_pipecontrol(urb
->pipe
) &&
1155 (u
->pipe
& ~USB_DIR_IN
) == (urb
->pipe
& ~USB_DIR_IN
))
1157 else if (u
->pipe
== urb
->pipe
)
1165 static int uhci_urb_enqueue(struct usb_hcd
*hcd
,
1166 struct usb_host_endpoint
*ep
,
1167 struct urb
*urb
, unsigned mem_flags
)
1170 struct uhci_hcd
*uhci
= hcd_to_uhci(hcd
);
1171 unsigned long flags
;
1175 spin_lock_irqsave(&uhci
->lock
, flags
);
1178 if (ret
!= -EINPROGRESS
) /* URB already unlinked! */
1181 eurb
= uhci_find_urb_ep(uhci
, urb
);
1183 if (!uhci_alloc_urb_priv(uhci
, urb
)) {
1188 switch (usb_pipetype(urb
->pipe
)) {
1190 ret
= uhci_submit_control(uhci
, urb
, eurb
);
1192 case PIPE_INTERRUPT
:
1194 bustime
= usb_check_bandwidth(urb
->dev
, urb
);
1198 ret
= uhci_submit_interrupt(uhci
, urb
, eurb
);
1199 if (ret
== -EINPROGRESS
)
1200 usb_claim_bandwidth(urb
->dev
, urb
, bustime
, 0);
1202 } else { /* inherit from parent */
1203 urb
->bandwidth
= eurb
->bandwidth
;
1204 ret
= uhci_submit_interrupt(uhci
, urb
, eurb
);
1208 ret
= uhci_submit_bulk(uhci
, urb
, eurb
);
1210 case PIPE_ISOCHRONOUS
:
1211 bustime
= usb_check_bandwidth(urb
->dev
, urb
);
1217 ret
= uhci_submit_isochronous(uhci
, urb
);
1218 if (ret
== -EINPROGRESS
)
1219 usb_claim_bandwidth(urb
->dev
, urb
, bustime
, 1);
1223 if (ret
!= -EINPROGRESS
) {
1224 /* Submit failed, so delete it from the urb_list */
1225 struct urb_priv
*urbp
= urb
->hcpriv
;
1227 list_del_init(&urbp
->urb_list
);
1228 uhci_destroy_urb_priv(uhci
, urb
);
1233 spin_unlock_irqrestore(&uhci
->lock
, flags
);
1238 * Return the result of a transfer
1240 static void uhci_transfer_result(struct uhci_hcd
*uhci
, struct urb
*urb
)
1242 int ret
= -EINPROGRESS
;
1243 struct urb_priv
*urbp
;
1245 spin_lock(&urb
->lock
);
1247 urbp
= (struct urb_priv
*)urb
->hcpriv
;
1249 if (urb
->status
!= -EINPROGRESS
) /* URB already dequeued */
1252 switch (usb_pipetype(urb
->pipe
)) {
1254 ret
= uhci_result_control(uhci
, urb
);
1257 case PIPE_INTERRUPT
:
1258 ret
= uhci_result_common(uhci
, urb
);
1260 case PIPE_ISOCHRONOUS
:
1261 ret
= uhci_result_isochronous(uhci
, urb
);
1265 if (ret
== -EINPROGRESS
)
1269 switch (usb_pipetype(urb
->pipe
)) {
1272 case PIPE_ISOCHRONOUS
:
1273 /* Release bandwidth for Interrupt or Isoc. transfers */
1275 usb_release_bandwidth(urb
->dev
, urb
, 1);
1276 uhci_unlink_generic(uhci
, urb
);
1278 case PIPE_INTERRUPT
:
1279 /* Release bandwidth for Interrupt or Isoc. transfers */
1280 /* Make sure we don't release if we have a queued URB */
1281 if (list_empty(&urbp
->queue_list
) && urb
->bandwidth
)
1282 usb_release_bandwidth(urb
->dev
, urb
, 0);
1284 /* bandwidth was passed on to queued URB, */
1285 /* so don't let usb_unlink_urb() release it */
1287 uhci_unlink_generic(uhci
, urb
);
1290 dev_info(uhci_dev(uhci
), "%s: unknown pipe type %d "
1292 __FUNCTION__
, usb_pipetype(urb
->pipe
), urb
);
1295 /* Move it from uhci->urb_list to uhci->complete_list */
1296 uhci_moveto_complete(uhci
, urbp
);
1299 spin_unlock(&urb
->lock
);
1302 static void uhci_unlink_generic(struct uhci_hcd
*uhci
, struct urb
*urb
)
1304 struct list_head
*head
;
1306 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
1309 uhci_dec_fsbr(uhci
, urb
); /* Safe since it checks */
1312 * Now we need to find out what the last successful toggle was
1313 * so we can update the local data toggle for the next transfer
1315 * There are 2 ways the last successful completed TD is found:
1317 * 1) The TD is NOT active and the actual length < expected length
1318 * 2) The TD is NOT active and it's the last TD in the chain
1320 * and a third way the first uncompleted TD is found:
1322 * 3) The TD is active and the previous TD is NOT active
1324 * Control and Isochronous ignore the toggle, so this is safe
1327 * FIXME: The toggle fixups won't be 100% reliable until we
1328 * change over to using a single queue for each endpoint and
1329 * stop the queue before unlinking.
1331 head
= &urbp
->td_list
;
1332 list_for_each_entry(td
, head
, list
) {
1333 unsigned int ctrlstat
= td_status(td
);
1335 if (!(ctrlstat
& TD_CTRL_ACTIVE
) &&
1336 (uhci_actual_length(ctrlstat
) <
1337 uhci_expected_length(td_token(td
)) ||
1338 td
->list
.next
== head
))
1339 usb_settoggle(urb
->dev
, uhci_endpoint(td_token(td
)),
1340 uhci_packetout(td_token(td
)),
1341 uhci_toggle(td_token(td
)) ^ 1);
1342 else if ((ctrlstat
& TD_CTRL_ACTIVE
) && !prevactive
)
1343 usb_settoggle(urb
->dev
, uhci_endpoint(td_token(td
)),
1344 uhci_packetout(td_token(td
)),
1345 uhci_toggle(td_token(td
)));
1347 prevactive
= ctrlstat
& TD_CTRL_ACTIVE
;
1350 uhci_delete_queued_urb(uhci
, urb
);
1352 /* The interrupt loop will reclaim the QH's */
1353 uhci_remove_qh(uhci
, urbp
->qh
);
1357 static int uhci_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
)
1359 struct uhci_hcd
*uhci
= hcd_to_uhci(hcd
);
1360 unsigned long flags
;
1361 struct urb_priv
*urbp
;
1363 spin_lock_irqsave(&uhci
->lock
, flags
);
1365 if (!urbp
) /* URB was never linked! */
1367 list_del_init(&urbp
->urb_list
);
1369 uhci_unlink_generic(uhci
, urb
);
1371 uhci_get_current_frame_number(uhci
);
1372 if (uhci
->frame_number
+ uhci
->is_stopped
!= uhci
->urb_remove_age
) {
1373 uhci_remove_pending_urbps(uhci
);
1374 uhci
->urb_remove_age
= uhci
->frame_number
;
1377 /* If we're the first, set the next interrupt bit */
1378 if (list_empty(&uhci
->urb_remove_list
))
1379 uhci_set_next_interrupt(uhci
);
1380 list_add_tail(&urbp
->urb_list
, &uhci
->urb_remove_list
);
1383 spin_unlock_irqrestore(&uhci
->lock
, flags
);
1387 static int uhci_fsbr_timeout(struct uhci_hcd
*uhci
, struct urb
*urb
)
1389 struct urb_priv
*urbp
= (struct urb_priv
*)urb
->hcpriv
;
1390 struct list_head
*head
;
1394 uhci_dec_fsbr(uhci
, urb
);
1396 urbp
->fsbr_timeout
= 1;
1399 * Ideally we would want to fix qh->element as well, but it's
1400 * read/write by the HC, so that can introduce a race. It's not
1401 * really worth the hassle
1404 head
= &urbp
->td_list
;
1405 list_for_each_entry(td
, head
, list
) {
1407 * Make sure we don't do the last one (since it'll have the
1408 * TERM bit set) as well as we skip every so many TD's to
1409 * make sure it doesn't hog the bandwidth
1411 if (td
->list
.next
!= head
&& (count
% DEPTH_INTERVAL
) ==
1412 (DEPTH_INTERVAL
- 1))
1413 td
->link
|= UHCI_PTR_DEPTH
;
1421 static void uhci_free_pending_qhs(struct uhci_hcd
*uhci
)
1423 struct uhci_qh
*qh
, *tmp
;
1425 list_for_each_entry_safe(qh
, tmp
, &uhci
->qh_remove_list
, remove_list
) {
1426 list_del_init(&qh
->remove_list
);
1428 uhci_free_qh(uhci
, qh
);
1432 static void uhci_free_pending_tds(struct uhci_hcd
*uhci
)
1434 struct uhci_td
*td
, *tmp
;
1436 list_for_each_entry_safe(td
, tmp
, &uhci
->td_remove_list
, remove_list
) {
1437 list_del_init(&td
->remove_list
);
1439 uhci_free_td(uhci
, td
);
1444 uhci_finish_urb(struct usb_hcd
*hcd
, struct urb
*urb
, struct pt_regs
*regs
)
1445 __releases(uhci
->lock
)
1446 __acquires(uhci
->lock
)
1448 struct uhci_hcd
*uhci
= hcd_to_uhci(hcd
);
1450 uhci_destroy_urb_priv(uhci
, urb
);
1452 spin_unlock(&uhci
->lock
);
1453 usb_hcd_giveback_urb(hcd
, urb
, regs
);
1454 spin_lock(&uhci
->lock
);
1457 static void uhci_finish_completion(struct uhci_hcd
*uhci
, struct pt_regs
*regs
)
1459 struct urb_priv
*urbp
, *tmp
;
1461 list_for_each_entry_safe(urbp
, tmp
, &uhci
->complete_list
, urb_list
) {
1462 struct urb
*urb
= urbp
->urb
;
1464 list_del_init(&urbp
->urb_list
);
1465 uhci_finish_urb(uhci_to_hcd(uhci
), urb
, regs
);
1469 static void uhci_remove_pending_urbps(struct uhci_hcd
*uhci
)
1472 /* Splice the urb_remove_list onto the end of the complete_list */
1473 list_splice_init(&uhci
->urb_remove_list
, uhci
->complete_list
.prev
);
1476 /* Process events in the schedule, but only in one thread at a time */
1477 static void uhci_scan_schedule(struct uhci_hcd
*uhci
, struct pt_regs
*regs
)
1479 struct urb_priv
*urbp
, *tmp
;
1481 /* Don't allow re-entrant calls */
1482 if (uhci
->scan_in_progress
) {
1483 uhci
->need_rescan
= 1;
1486 uhci
->scan_in_progress
= 1;
1488 uhci
->need_rescan
= 0;
1490 uhci_clear_next_interrupt(uhci
);
1491 uhci_get_current_frame_number(uhci
);
1493 if (uhci
->frame_number
+ uhci
->is_stopped
!= uhci
->qh_remove_age
)
1494 uhci_free_pending_qhs(uhci
);
1495 if (uhci
->frame_number
+ uhci
->is_stopped
!= uhci
->td_remove_age
)
1496 uhci_free_pending_tds(uhci
);
1497 if (uhci
->frame_number
+ uhci
->is_stopped
!= uhci
->urb_remove_age
)
1498 uhci_remove_pending_urbps(uhci
);
1500 /* Walk the list of pending URBs to see which ones completed
1501 * (must be _safe because uhci_transfer_result() dequeues URBs) */
1502 list_for_each_entry_safe(urbp
, tmp
, &uhci
->urb_list
, urb_list
) {
1503 struct urb
*urb
= urbp
->urb
;
1505 /* Checks the status and does all of the magic necessary */
1506 uhci_transfer_result(uhci
, urb
);
1508 uhci_finish_completion(uhci
, regs
);
1510 /* If the controller is stopped, we can finish these off right now */
1511 if (uhci
->is_stopped
) {
1512 uhci_free_pending_qhs(uhci
);
1513 uhci_free_pending_tds(uhci
);
1514 uhci_remove_pending_urbps(uhci
);
1517 if (uhci
->need_rescan
)
1519 uhci
->scan_in_progress
= 0;
1521 if (list_empty(&uhci
->urb_remove_list
) &&
1522 list_empty(&uhci
->td_remove_list
) &&
1523 list_empty(&uhci
->qh_remove_list
))
1524 uhci_clear_next_interrupt(uhci
);
1526 uhci_set_next_interrupt(uhci
);
1528 /* Wake up anyone waiting for an URB to complete */
1529 wake_up_all(&uhci
->waitqh
);
1532 static void check_fsbr(struct uhci_hcd
*uhci
)
1534 struct urb_priv
*up
;
1536 list_for_each_entry(up
, &uhci
->urb_list
, urb_list
) {
1537 struct urb
*u
= up
->urb
;
1539 spin_lock(&u
->lock
);
1541 /* Check if the FSBR timed out */
1542 if (up
->fsbr
&& !up
->fsbr_timeout
&& time_after_eq(jiffies
, up
->fsbrtime
+ IDLE_TIMEOUT
))
1543 uhci_fsbr_timeout(uhci
, u
);
1545 spin_unlock(&u
->lock
);
1548 /* Really disable FSBR */
1549 if (!uhci
->fsbr
&& uhci
->fsbrtimeout
&& time_after_eq(jiffies
, uhci
->fsbrtimeout
)) {
1550 uhci
->fsbrtimeout
= 0;
1551 uhci
->skel_term_qh
->link
= UHCI_PTR_TERM
;