Linux 2.6.36-rc5
[linux-2.6/get_maintainer.git] / drivers / usb / host / uhci-q.c
blobd3ade4018487187fd986766bce8a5135979a95e8
1 /*
2 * Universal Host Controller Interface driver for USB.
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8 * (C) Copyright 1999 Randy Dunlap
9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16 * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
21 * Technically, updating td->status here is a race, but it's not really a
22 * problem. The worst that can happen is that we set the IOC bit again
23 * generating a spurious interrupt. We could fix this by creating another
24 * QH and leaving the IOC bit always set, but then we would have to play
25 * games with the FSBR code to make sure we get the correct order in all
26 * the cases. I don't think it's worth the effort
28 static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
30 if (uhci->is_stopped)
31 mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
32 uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
35 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
37 uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
42 * Full-Speed Bandwidth Reclamation (FSBR).
43 * We turn on FSBR whenever a queue that wants it is advancing,
44 * and leave it on for a short time thereafter.
46 static void uhci_fsbr_on(struct uhci_hcd *uhci)
48 struct uhci_qh *lqh;
50 /* The terminating skeleton QH always points back to the first
51 * FSBR QH. Make the last async QH point to the terminating
52 * skeleton QH. */
53 uhci->fsbr_is_on = 1;
54 lqh = list_entry(uhci->skel_async_qh->node.prev,
55 struct uhci_qh, node);
56 lqh->link = LINK_TO_QH(uhci->skel_term_qh);
59 static void uhci_fsbr_off(struct uhci_hcd *uhci)
61 struct uhci_qh *lqh;
63 /* Remove the link from the last async QH to the terminating
64 * skeleton QH. */
65 uhci->fsbr_is_on = 0;
66 lqh = list_entry(uhci->skel_async_qh->node.prev,
67 struct uhci_qh, node);
68 lqh->link = UHCI_PTR_TERM;
71 static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
73 struct urb_priv *urbp = urb->hcpriv;
75 if (!(urb->transfer_flags & URB_NO_FSBR))
76 urbp->fsbr = 1;
79 static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
81 if (urbp->fsbr) {
82 uhci->fsbr_is_wanted = 1;
83 if (!uhci->fsbr_is_on)
84 uhci_fsbr_on(uhci);
85 else if (uhci->fsbr_expiring) {
86 uhci->fsbr_expiring = 0;
87 del_timer(&uhci->fsbr_timer);
92 static void uhci_fsbr_timeout(unsigned long _uhci)
94 struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;
95 unsigned long flags;
97 spin_lock_irqsave(&uhci->lock, flags);
98 if (uhci->fsbr_expiring) {
99 uhci->fsbr_expiring = 0;
100 uhci_fsbr_off(uhci);
102 spin_unlock_irqrestore(&uhci->lock, flags);
106 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
108 dma_addr_t dma_handle;
109 struct uhci_td *td;
111 td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
112 if (!td)
113 return NULL;
115 td->dma_handle = dma_handle;
116 td->frame = -1;
118 INIT_LIST_HEAD(&td->list);
119 INIT_LIST_HEAD(&td->fl_list);
121 return td;
124 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
126 if (!list_empty(&td->list))
127 dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td);
128 if (!list_empty(&td->fl_list))
129 dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td);
131 dma_pool_free(uhci->td_pool, td, td->dma_handle);
134 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
135 u32 token, u32 buffer)
137 td->status = cpu_to_le32(status);
138 td->token = cpu_to_le32(token);
139 td->buffer = cpu_to_le32(buffer);
142 static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
144 list_add_tail(&td->list, &urbp->td_list);
147 static void uhci_remove_td_from_urbp(struct uhci_td *td)
149 list_del_init(&td->list);
153 * We insert Isochronous URBs directly into the frame list at the beginning
155 static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
156 struct uhci_td *td, unsigned framenum)
158 framenum &= (UHCI_NUMFRAMES - 1);
160 td->frame = framenum;
162 /* Is there a TD already mapped there? */
163 if (uhci->frame_cpu[framenum]) {
164 struct uhci_td *ftd, *ltd;
166 ftd = uhci->frame_cpu[framenum];
167 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
169 list_add_tail(&td->fl_list, &ftd->fl_list);
171 td->link = ltd->link;
172 wmb();
173 ltd->link = LINK_TO_TD(td);
174 } else {
175 td->link = uhci->frame[framenum];
176 wmb();
177 uhci->frame[framenum] = LINK_TO_TD(td);
178 uhci->frame_cpu[framenum] = td;
182 static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
183 struct uhci_td *td)
185 /* If it's not inserted, don't remove it */
186 if (td->frame == -1) {
187 WARN_ON(!list_empty(&td->fl_list));
188 return;
191 if (uhci->frame_cpu[td->frame] == td) {
192 if (list_empty(&td->fl_list)) {
193 uhci->frame[td->frame] = td->link;
194 uhci->frame_cpu[td->frame] = NULL;
195 } else {
196 struct uhci_td *ntd;
198 ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
199 uhci->frame[td->frame] = LINK_TO_TD(ntd);
200 uhci->frame_cpu[td->frame] = ntd;
202 } else {
203 struct uhci_td *ptd;
205 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
206 ptd->link = td->link;
209 list_del_init(&td->fl_list);
210 td->frame = -1;
213 static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,
214 unsigned int framenum)
216 struct uhci_td *ftd, *ltd;
218 framenum &= (UHCI_NUMFRAMES - 1);
220 ftd = uhci->frame_cpu[framenum];
221 if (ftd) {
222 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
223 uhci->frame[framenum] = ltd->link;
224 uhci->frame_cpu[framenum] = NULL;
226 while (!list_empty(&ftd->fl_list))
227 list_del_init(ftd->fl_list.prev);
232 * Remove all the TDs for an Isochronous URB from the frame list
234 static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
236 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
237 struct uhci_td *td;
239 list_for_each_entry(td, &urbp->td_list, list)
240 uhci_remove_td_from_frame_list(uhci, td);
243 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
244 struct usb_device *udev, struct usb_host_endpoint *hep)
246 dma_addr_t dma_handle;
247 struct uhci_qh *qh;
249 qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
250 if (!qh)
251 return NULL;
253 memset(qh, 0, sizeof(*qh));
254 qh->dma_handle = dma_handle;
256 qh->element = UHCI_PTR_TERM;
257 qh->link = UHCI_PTR_TERM;
259 INIT_LIST_HEAD(&qh->queue);
260 INIT_LIST_HEAD(&qh->node);
262 if (udev) { /* Normal QH */
263 qh->type = usb_endpoint_type(&hep->desc);
264 if (qh->type != USB_ENDPOINT_XFER_ISOC) {
265 qh->dummy_td = uhci_alloc_td(uhci);
266 if (!qh->dummy_td) {
267 dma_pool_free(uhci->qh_pool, qh, dma_handle);
268 return NULL;
271 qh->state = QH_STATE_IDLE;
272 qh->hep = hep;
273 qh->udev = udev;
274 hep->hcpriv = qh;
276 if (qh->type == USB_ENDPOINT_XFER_INT ||
277 qh->type == USB_ENDPOINT_XFER_ISOC)
278 qh->load = usb_calc_bus_time(udev->speed,
279 usb_endpoint_dir_in(&hep->desc),
280 qh->type == USB_ENDPOINT_XFER_ISOC,
281 le16_to_cpu(hep->desc.wMaxPacketSize))
282 / 1000 + 1;
284 } else { /* Skeleton QH */
285 qh->state = QH_STATE_ACTIVE;
286 qh->type = -1;
288 return qh;
291 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
293 WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
294 if (!list_empty(&qh->queue))
295 dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh);
297 list_del(&qh->node);
298 if (qh->udev) {
299 qh->hep->hcpriv = NULL;
300 if (qh->dummy_td)
301 uhci_free_td(uhci, qh->dummy_td);
303 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
307 * When a queue is stopped and a dequeued URB is given back, adjust
308 * the previous TD link (if the URB isn't first on the queue) or
309 * save its toggle value (if it is first and is currently executing).
311 * Returns 0 if the URB should not yet be given back, 1 otherwise.
313 static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
314 struct urb *urb)
316 struct urb_priv *urbp = urb->hcpriv;
317 struct uhci_td *td;
318 int ret = 1;
320 /* Isochronous pipes don't use toggles and their TD link pointers
321 * get adjusted during uhci_urb_dequeue(). But since their queues
322 * cannot truly be stopped, we have to watch out for dequeues
323 * occurring after the nominal unlink frame. */
324 if (qh->type == USB_ENDPOINT_XFER_ISOC) {
325 ret = (uhci->frame_number + uhci->is_stopped !=
326 qh->unlink_frame);
327 goto done;
330 /* If the URB isn't first on its queue, adjust the link pointer
331 * of the last TD in the previous URB. The toggle doesn't need
332 * to be saved since this URB can't be executing yet. */
333 if (qh->queue.next != &urbp->node) {
334 struct urb_priv *purbp;
335 struct uhci_td *ptd;
337 purbp = list_entry(urbp->node.prev, struct urb_priv, node);
338 WARN_ON(list_empty(&purbp->td_list));
339 ptd = list_entry(purbp->td_list.prev, struct uhci_td,
340 list);
341 td = list_entry(urbp->td_list.prev, struct uhci_td,
342 list);
343 ptd->link = td->link;
344 goto done;
347 /* If the QH element pointer is UHCI_PTR_TERM then then currently
348 * executing URB has already been unlinked, so this one isn't it. */
349 if (qh_element(qh) == UHCI_PTR_TERM)
350 goto done;
351 qh->element = UHCI_PTR_TERM;
353 /* Control pipes don't have to worry about toggles */
354 if (qh->type == USB_ENDPOINT_XFER_CONTROL)
355 goto done;
357 /* Save the next toggle value */
358 WARN_ON(list_empty(&urbp->td_list));
359 td = list_entry(urbp->td_list.next, struct uhci_td, list);
360 qh->needs_fixup = 1;
361 qh->initial_toggle = uhci_toggle(td_token(td));
363 done:
364 return ret;
368 * Fix up the data toggles for URBs in a queue, when one of them
369 * terminates early (short transfer, error, or dequeued).
371 static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
373 struct urb_priv *urbp = NULL;
374 struct uhci_td *td;
375 unsigned int toggle = qh->initial_toggle;
376 unsigned int pipe;
378 /* Fixups for a short transfer start with the second URB in the
379 * queue (the short URB is the first). */
380 if (skip_first)
381 urbp = list_entry(qh->queue.next, struct urb_priv, node);
383 /* When starting with the first URB, if the QH element pointer is
384 * still valid then we know the URB's toggles are okay. */
385 else if (qh_element(qh) != UHCI_PTR_TERM)
386 toggle = 2;
388 /* Fix up the toggle for the URBs in the queue. Normally this
389 * loop won't run more than once: When an error or short transfer
390 * occurs, the queue usually gets emptied. */
391 urbp = list_prepare_entry(urbp, &qh->queue, node);
392 list_for_each_entry_continue(urbp, &qh->queue, node) {
394 /* If the first TD has the right toggle value, we don't
395 * need to change any toggles in this URB */
396 td = list_entry(urbp->td_list.next, struct uhci_td, list);
397 if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {
398 td = list_entry(urbp->td_list.prev, struct uhci_td,
399 list);
400 toggle = uhci_toggle(td_token(td)) ^ 1;
402 /* Otherwise all the toggles in the URB have to be switched */
403 } else {
404 list_for_each_entry(td, &urbp->td_list, list) {
405 td->token ^= cpu_to_le32(
406 TD_TOKEN_TOGGLE);
407 toggle ^= 1;
412 wmb();
413 pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
414 usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
415 usb_pipeout(pipe), toggle);
416 qh->needs_fixup = 0;
420 * Link an Isochronous QH into its skeleton's list
422 static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh)
424 list_add_tail(&qh->node, &uhci->skel_iso_qh->node);
426 /* Isochronous QHs aren't linked by the hardware */
430 * Link a high-period interrupt QH into the schedule at the end of its
431 * skeleton's list
433 static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
435 struct uhci_qh *pqh;
437 list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);
439 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
440 qh->link = pqh->link;
441 wmb();
442 pqh->link = LINK_TO_QH(qh);
446 * Link a period-1 interrupt or async QH into the schedule at the
447 * correct spot in the async skeleton's list, and update the FSBR link
449 static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
451 struct uhci_qh *pqh;
452 __le32 link_to_new_qh;
454 /* Find the predecessor QH for our new one and insert it in the list.
455 * The list of QHs is expected to be short, so linear search won't
456 * take too long. */
457 list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
458 if (pqh->skel <= qh->skel)
459 break;
461 list_add(&qh->node, &pqh->node);
463 /* Link it into the schedule */
464 qh->link = pqh->link;
465 wmb();
466 link_to_new_qh = LINK_TO_QH(qh);
467 pqh->link = link_to_new_qh;
469 /* If this is now the first FSBR QH, link the terminating skeleton
470 * QH to it. */
471 if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
472 uhci->skel_term_qh->link = link_to_new_qh;
476 * Put a QH on the schedule in both hardware and software
478 static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
480 WARN_ON(list_empty(&qh->queue));
482 /* Set the element pointer if it isn't set already.
483 * This isn't needed for Isochronous queues, but it doesn't hurt. */
484 if (qh_element(qh) == UHCI_PTR_TERM) {
485 struct urb_priv *urbp = list_entry(qh->queue.next,
486 struct urb_priv, node);
487 struct uhci_td *td = list_entry(urbp->td_list.next,
488 struct uhci_td, list);
490 qh->element = LINK_TO_TD(td);
493 /* Treat the queue as if it has just advanced */
494 qh->wait_expired = 0;
495 qh->advance_jiffies = jiffies;
497 if (qh->state == QH_STATE_ACTIVE)
498 return;
499 qh->state = QH_STATE_ACTIVE;
501 /* Move the QH from its old list to the correct spot in the appropriate
502 * skeleton's list */
503 if (qh == uhci->next_qh)
504 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
505 node);
506 list_del(&qh->node);
508 if (qh->skel == SKEL_ISO)
509 link_iso(uhci, qh);
510 else if (qh->skel < SKEL_ASYNC)
511 link_interrupt(uhci, qh);
512 else
513 link_async(uhci, qh);
517 * Unlink a high-period interrupt QH from the schedule
519 static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
521 struct uhci_qh *pqh;
523 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
524 pqh->link = qh->link;
525 mb();
529 * Unlink a period-1 interrupt or async QH from the schedule
531 static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
533 struct uhci_qh *pqh;
534 __le32 link_to_next_qh = qh->link;
536 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
537 pqh->link = link_to_next_qh;
539 /* If this was the old first FSBR QH, link the terminating skeleton
540 * QH to the next (new first FSBR) QH. */
541 if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
542 uhci->skel_term_qh->link = link_to_next_qh;
543 mb();
547 * Take a QH off the hardware schedule
549 static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
551 if (qh->state == QH_STATE_UNLINKING)
552 return;
553 WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
554 qh->state = QH_STATE_UNLINKING;
556 /* Unlink the QH from the schedule and record when we did it */
557 if (qh->skel == SKEL_ISO)
559 else if (qh->skel < SKEL_ASYNC)
560 unlink_interrupt(uhci, qh);
561 else
562 unlink_async(uhci, qh);
564 uhci_get_current_frame_number(uhci);
565 qh->unlink_frame = uhci->frame_number;
567 /* Force an interrupt so we know when the QH is fully unlinked */
568 if (list_empty(&uhci->skel_unlink_qh->node) || uhci->is_stopped)
569 uhci_set_next_interrupt(uhci);
571 /* Move the QH from its old list to the end of the unlinking list */
572 if (qh == uhci->next_qh)
573 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
574 node);
575 list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
579 * When we and the controller are through with a QH, it becomes IDLE.
580 * This happens when a QH has been off the schedule (on the unlinking
581 * list) for more than one frame, or when an error occurs while adding
582 * the first URB onto a new QH.
584 static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
586 WARN_ON(qh->state == QH_STATE_ACTIVE);
588 if (qh == uhci->next_qh)
589 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
590 node);
591 list_move(&qh->node, &uhci->idle_qh_list);
592 qh->state = QH_STATE_IDLE;
594 /* Now that the QH is idle, its post_td isn't being used */
595 if (qh->post_td) {
596 uhci_free_td(uhci, qh->post_td);
597 qh->post_td = NULL;
600 /* If anyone is waiting for a QH to become idle, wake them up */
601 if (uhci->num_waiting)
602 wake_up_all(&uhci->waitqh);
606 * Find the highest existing bandwidth load for a given phase and period.
608 static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
610 int highest_load = uhci->load[phase];
612 for (phase += period; phase < MAX_PHASE; phase += period)
613 highest_load = max_t(int, highest_load, uhci->load[phase]);
614 return highest_load;
618 * Set qh->phase to the optimal phase for a periodic transfer and
619 * check whether the bandwidth requirement is acceptable.
621 static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
623 int minimax_load;
625 /* Find the optimal phase (unless it is already set) and get
626 * its load value. */
627 if (qh->phase >= 0)
628 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
629 else {
630 int phase, load;
631 int max_phase = min_t(int, MAX_PHASE, qh->period);
633 qh->phase = 0;
634 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
635 for (phase = 1; phase < max_phase; ++phase) {
636 load = uhci_highest_load(uhci, phase, qh->period);
637 if (load < minimax_load) {
638 minimax_load = load;
639 qh->phase = phase;
644 /* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */
645 if (minimax_load + qh->load > 900) {
646 dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
647 "period %d, phase %d, %d + %d us\n",
648 qh->period, qh->phase, minimax_load, qh->load);
649 return -ENOSPC;
651 return 0;
655 * Reserve a periodic QH's bandwidth in the schedule
657 static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
659 int i;
660 int load = qh->load;
661 char *p = "??";
663 for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
664 uhci->load[i] += load;
665 uhci->total_load += load;
667 uhci_to_hcd(uhci)->self.bandwidth_allocated =
668 uhci->total_load / MAX_PHASE;
669 switch (qh->type) {
670 case USB_ENDPOINT_XFER_INT:
671 ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
672 p = "INT";
673 break;
674 case USB_ENDPOINT_XFER_ISOC:
675 ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
676 p = "ISO";
677 break;
679 qh->bandwidth_reserved = 1;
680 dev_dbg(uhci_dev(uhci),
681 "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
682 "reserve", qh->udev->devnum,
683 qh->hep->desc.bEndpointAddress, p,
684 qh->period, qh->phase, load);
688 * Release a periodic QH's bandwidth reservation
690 static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
692 int i;
693 int load = qh->load;
694 char *p = "??";
696 for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
697 uhci->load[i] -= load;
698 uhci->total_load -= load;
700 uhci_to_hcd(uhci)->self.bandwidth_allocated =
701 uhci->total_load / MAX_PHASE;
702 switch (qh->type) {
703 case USB_ENDPOINT_XFER_INT:
704 --uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
705 p = "INT";
706 break;
707 case USB_ENDPOINT_XFER_ISOC:
708 --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
709 p = "ISO";
710 break;
712 qh->bandwidth_reserved = 0;
713 dev_dbg(uhci_dev(uhci),
714 "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
715 "release", qh->udev->devnum,
716 qh->hep->desc.bEndpointAddress, p,
717 qh->period, qh->phase, load);
720 static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
721 struct urb *urb)
723 struct urb_priv *urbp;
725 urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
726 if (!urbp)
727 return NULL;
729 urbp->urb = urb;
730 urb->hcpriv = urbp;
732 INIT_LIST_HEAD(&urbp->node);
733 INIT_LIST_HEAD(&urbp->td_list);
735 return urbp;
738 static void uhci_free_urb_priv(struct uhci_hcd *uhci,
739 struct urb_priv *urbp)
741 struct uhci_td *td, *tmp;
743 if (!list_empty(&urbp->node))
744 dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n",
745 urbp->urb);
747 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
748 uhci_remove_td_from_urbp(td);
749 uhci_free_td(uhci, td);
752 kmem_cache_free(uhci_up_cachep, urbp);
756 * Map status to standard result codes
758 * <status> is (td_status(td) & 0xF60000), a.k.a.
759 * uhci_status_bits(td_status(td)).
760 * Note: <status> does not include the TD_CTRL_NAK bit.
761 * <dir_out> is True for output TDs and False for input TDs.
763 static int uhci_map_status(int status, int dir_out)
765 if (!status)
766 return 0;
767 if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
768 return -EPROTO;
769 if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
770 if (dir_out)
771 return -EPROTO;
772 else
773 return -EILSEQ;
775 if (status & TD_CTRL_BABBLE) /* Babble */
776 return -EOVERFLOW;
777 if (status & TD_CTRL_DBUFERR) /* Buffer error */
778 return -ENOSR;
779 if (status & TD_CTRL_STALLED) /* Stalled */
780 return -EPIPE;
781 return 0;
785 * Control transfers
787 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
788 struct uhci_qh *qh)
790 struct uhci_td *td;
791 unsigned long destination, status;
792 int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
793 int len = urb->transfer_buffer_length;
794 dma_addr_t data = urb->transfer_dma;
795 __le32 *plink;
796 struct urb_priv *urbp = urb->hcpriv;
797 int skel;
799 /* The "pipe" thing contains the destination in bits 8--18 */
800 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
802 /* 3 errors, dummy TD remains inactive */
803 status = uhci_maxerr(3);
804 if (urb->dev->speed == USB_SPEED_LOW)
805 status |= TD_CTRL_LS;
808 * Build the TD for the control request setup packet
810 td = qh->dummy_td;
811 uhci_add_td_to_urbp(td, urbp);
812 uhci_fill_td(td, status, destination | uhci_explen(8),
813 urb->setup_dma);
814 plink = &td->link;
815 status |= TD_CTRL_ACTIVE;
818 * If direction is "send", change the packet ID from SETUP (0x2D)
819 * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
820 * set Short Packet Detect (SPD) for all data packets.
822 * 0-length transfers always get treated as "send".
824 if (usb_pipeout(urb->pipe) || len == 0)
825 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
826 else {
827 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
828 status |= TD_CTRL_SPD;
832 * Build the DATA TDs
834 while (len > 0) {
835 int pktsze = maxsze;
837 if (len <= pktsze) { /* The last data packet */
838 pktsze = len;
839 status &= ~TD_CTRL_SPD;
842 td = uhci_alloc_td(uhci);
843 if (!td)
844 goto nomem;
845 *plink = LINK_TO_TD(td);
847 /* Alternate Data0/1 (start with Data1) */
848 destination ^= TD_TOKEN_TOGGLE;
850 uhci_add_td_to_urbp(td, urbp);
851 uhci_fill_td(td, status, destination | uhci_explen(pktsze),
852 data);
853 plink = &td->link;
855 data += pktsze;
856 len -= pktsze;
860 * Build the final TD for control status
862 td = uhci_alloc_td(uhci);
863 if (!td)
864 goto nomem;
865 *plink = LINK_TO_TD(td);
867 /* Change direction for the status transaction */
868 destination ^= (USB_PID_IN ^ USB_PID_OUT);
869 destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
871 uhci_add_td_to_urbp(td, urbp);
872 uhci_fill_td(td, status | TD_CTRL_IOC,
873 destination | uhci_explen(0), 0);
874 plink = &td->link;
877 * Build the new dummy TD and activate the old one
879 td = uhci_alloc_td(uhci);
880 if (!td)
881 goto nomem;
882 *plink = LINK_TO_TD(td);
884 uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
885 wmb();
886 qh->dummy_td->status |= cpu_to_le32(TD_CTRL_ACTIVE);
887 qh->dummy_td = td;
889 /* Low-speed transfers get a different queue, and won't hog the bus.
890 * Also, some devices enumerate better without FSBR; the easiest way
891 * to do that is to put URBs on the low-speed queue while the device
892 * isn't in the CONFIGURED state. */
893 if (urb->dev->speed == USB_SPEED_LOW ||
894 urb->dev->state != USB_STATE_CONFIGURED)
895 skel = SKEL_LS_CONTROL;
896 else {
897 skel = SKEL_FS_CONTROL;
898 uhci_add_fsbr(uhci, urb);
900 if (qh->state != QH_STATE_ACTIVE)
901 qh->skel = skel;
902 return 0;
904 nomem:
905 /* Remove the dummy TD from the td_list so it doesn't get freed */
906 uhci_remove_td_from_urbp(qh->dummy_td);
907 return -ENOMEM;
911 * Common submit for bulk and interrupt
913 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
914 struct uhci_qh *qh)
916 struct uhci_td *td;
917 unsigned long destination, status;
918 int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
919 int len = urb->transfer_buffer_length;
920 dma_addr_t data = urb->transfer_dma;
921 __le32 *plink;
922 struct urb_priv *urbp = urb->hcpriv;
923 unsigned int toggle;
925 if (len < 0)
926 return -EINVAL;
928 /* The "pipe" thing contains the destination in bits 8--18 */
929 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
930 toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
931 usb_pipeout(urb->pipe));
933 /* 3 errors, dummy TD remains inactive */
934 status = uhci_maxerr(3);
935 if (urb->dev->speed == USB_SPEED_LOW)
936 status |= TD_CTRL_LS;
937 if (usb_pipein(urb->pipe))
938 status |= TD_CTRL_SPD;
941 * Build the DATA TDs
943 plink = NULL;
944 td = qh->dummy_td;
945 do { /* Allow zero length packets */
946 int pktsze = maxsze;
948 if (len <= pktsze) { /* The last packet */
949 pktsze = len;
950 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
951 status &= ~TD_CTRL_SPD;
954 if (plink) {
955 td = uhci_alloc_td(uhci);
956 if (!td)
957 goto nomem;
958 *plink = LINK_TO_TD(td);
960 uhci_add_td_to_urbp(td, urbp);
961 uhci_fill_td(td, status,
962 destination | uhci_explen(pktsze) |
963 (toggle << TD_TOKEN_TOGGLE_SHIFT),
964 data);
965 plink = &td->link;
966 status |= TD_CTRL_ACTIVE;
968 data += pktsze;
969 len -= maxsze;
970 toggle ^= 1;
971 } while (len > 0);
974 * URB_ZERO_PACKET means adding a 0-length packet, if direction
975 * is OUT and the transfer_length was an exact multiple of maxsze,
976 * hence (len = transfer_length - N * maxsze) == 0
977 * however, if transfer_length == 0, the zero packet was already
978 * prepared above.
980 if ((urb->transfer_flags & URB_ZERO_PACKET) &&
981 usb_pipeout(urb->pipe) && len == 0 &&
982 urb->transfer_buffer_length > 0) {
983 td = uhci_alloc_td(uhci);
984 if (!td)
985 goto nomem;
986 *plink = LINK_TO_TD(td);
988 uhci_add_td_to_urbp(td, urbp);
989 uhci_fill_td(td, status,
990 destination | uhci_explen(0) |
991 (toggle << TD_TOKEN_TOGGLE_SHIFT),
992 data);
993 plink = &td->link;
995 toggle ^= 1;
998 /* Set the interrupt-on-completion flag on the last packet.
999 * A more-or-less typical 4 KB URB (= size of one memory page)
1000 * will require about 3 ms to transfer; that's a little on the
1001 * fast side but not enough to justify delaying an interrupt
1002 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
1003 * flag setting. */
1004 td->status |= cpu_to_le32(TD_CTRL_IOC);
1007 * Build the new dummy TD and activate the old one
1009 td = uhci_alloc_td(uhci);
1010 if (!td)
1011 goto nomem;
1012 *plink = LINK_TO_TD(td);
1014 uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
1015 wmb();
1016 qh->dummy_td->status |= cpu_to_le32(TD_CTRL_ACTIVE);
1017 qh->dummy_td = td;
1019 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1020 usb_pipeout(urb->pipe), toggle);
1021 return 0;
1023 nomem:
1024 /* Remove the dummy TD from the td_list so it doesn't get freed */
1025 uhci_remove_td_from_urbp(qh->dummy_td);
1026 return -ENOMEM;
1029 static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
1030 struct uhci_qh *qh)
1032 int ret;
1034 /* Can't have low-speed bulk transfers */
1035 if (urb->dev->speed == USB_SPEED_LOW)
1036 return -EINVAL;
1038 if (qh->state != QH_STATE_ACTIVE)
1039 qh->skel = SKEL_BULK;
1040 ret = uhci_submit_common(uhci, urb, qh);
1041 if (ret == 0)
1042 uhci_add_fsbr(uhci, urb);
1043 return ret;
1046 static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
1047 struct uhci_qh *qh)
1049 int ret;
1051 /* USB 1.1 interrupt transfers only involve one packet per interval.
1052 * Drivers can submit URBs of any length, but longer ones will need
1053 * multiple intervals to complete.
1056 if (!qh->bandwidth_reserved) {
1057 int exponent;
1059 /* Figure out which power-of-two queue to use */
1060 for (exponent = 7; exponent >= 0; --exponent) {
1061 if ((1 << exponent) <= urb->interval)
1062 break;
1064 if (exponent < 0)
1065 return -EINVAL;
1067 /* If the slot is full, try a lower period */
1068 do {
1069 qh->period = 1 << exponent;
1070 qh->skel = SKEL_INDEX(exponent);
1072 /* For now, interrupt phase is fixed by the layout
1073 * of the QH lists.
1075 qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
1076 ret = uhci_check_bandwidth(uhci, qh);
1077 } while (ret != 0 && --exponent >= 0);
1078 if (ret)
1079 return ret;
1080 } else if (qh->period > urb->interval)
1081 return -EINVAL; /* Can't decrease the period */
1083 ret = uhci_submit_common(uhci, urb, qh);
1084 if (ret == 0) {
1085 urb->interval = qh->period;
1086 if (!qh->bandwidth_reserved)
1087 uhci_reserve_bandwidth(uhci, qh);
1089 return ret;
1093 * Fix up the data structures following a short transfer
1095 static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
1096 struct uhci_qh *qh, struct urb_priv *urbp)
1098 struct uhci_td *td;
1099 struct list_head *tmp;
1100 int ret;
1102 td = list_entry(urbp->td_list.prev, struct uhci_td, list);
1103 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1105 /* When a control transfer is short, we have to restart
1106 * the queue at the status stage transaction, which is
1107 * the last TD. */
1108 WARN_ON(list_empty(&urbp->td_list));
1109 qh->element = LINK_TO_TD(td);
1110 tmp = td->list.prev;
1111 ret = -EINPROGRESS;
1113 } else {
1115 /* When a bulk/interrupt transfer is short, we have to
1116 * fix up the toggles of the following URBs on the queue
1117 * before restarting the queue at the next URB. */
1118 qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1;
1119 uhci_fixup_toggles(qh, 1);
1121 if (list_empty(&urbp->td_list))
1122 td = qh->post_td;
1123 qh->element = td->link;
1124 tmp = urbp->td_list.prev;
1125 ret = 0;
1128 /* Remove all the TDs we skipped over, from tmp back to the start */
1129 while (tmp != &urbp->td_list) {
1130 td = list_entry(tmp, struct uhci_td, list);
1131 tmp = tmp->prev;
1133 uhci_remove_td_from_urbp(td);
1134 uhci_free_td(uhci, td);
1136 return ret;
1140 * Common result for control, bulk, and interrupt
1142 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1144 struct urb_priv *urbp = urb->hcpriv;
1145 struct uhci_qh *qh = urbp->qh;
1146 struct uhci_td *td, *tmp;
1147 unsigned status;
1148 int ret = 0;
1150 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1151 unsigned int ctrlstat;
1152 int len;
1154 ctrlstat = td_status(td);
1155 status = uhci_status_bits(ctrlstat);
1156 if (status & TD_CTRL_ACTIVE)
1157 return -EINPROGRESS;
1159 len = uhci_actual_length(ctrlstat);
1160 urb->actual_length += len;
1162 if (status) {
1163 ret = uhci_map_status(status,
1164 uhci_packetout(td_token(td)));
1165 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1166 /* Some debugging code */
1167 dev_dbg(&urb->dev->dev,
1168 "%s: failed with status %x\n",
1169 __func__, status);
1171 if (debug > 1 && errbuf) {
1172 /* Print the chain for debugging */
1173 uhci_show_qh(uhci, urbp->qh, errbuf,
1174 ERRBUF_LEN, 0);
1175 lprintk(errbuf);
1179 /* Did we receive a short packet? */
1180 } else if (len < uhci_expected_length(td_token(td))) {
1182 /* For control transfers, go to the status TD if
1183 * this isn't already the last data TD */
1184 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1185 if (td->list.next != urbp->td_list.prev)
1186 ret = 1;
1189 /* For bulk and interrupt, this may be an error */
1190 else if (urb->transfer_flags & URB_SHORT_NOT_OK)
1191 ret = -EREMOTEIO;
1193 /* Fixup needed only if this isn't the URB's last TD */
1194 else if (&td->list != urbp->td_list.prev)
1195 ret = 1;
1198 uhci_remove_td_from_urbp(td);
1199 if (qh->post_td)
1200 uhci_free_td(uhci, qh->post_td);
1201 qh->post_td = td;
1203 if (ret != 0)
1204 goto err;
1206 return ret;
1208 err:
1209 if (ret < 0) {
1210 /* Note that the queue has stopped and save
1211 * the next toggle value */
1212 qh->element = UHCI_PTR_TERM;
1213 qh->is_stopped = 1;
1214 qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
1215 qh->initial_toggle = uhci_toggle(td_token(td)) ^
1216 (ret == -EREMOTEIO);
1218 } else /* Short packet received */
1219 ret = uhci_fixup_short_transfer(uhci, qh, urbp);
1220 return ret;
1224 * Isochronous transfers
1226 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1227 struct uhci_qh *qh)
1229 struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */
1230 int i, frame;
1231 unsigned long destination, status;
1232 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1234 /* Values must not be too big (could overflow below) */
1235 if (urb->interval >= UHCI_NUMFRAMES ||
1236 urb->number_of_packets >= UHCI_NUMFRAMES)
1237 return -EFBIG;
1239 /* Check the period and figure out the starting frame number */
1240 if (!qh->bandwidth_reserved) {
1241 qh->period = urb->interval;
1242 if (urb->transfer_flags & URB_ISO_ASAP) {
1243 qh->phase = -1; /* Find the best phase */
1244 i = uhci_check_bandwidth(uhci, qh);
1245 if (i)
1246 return i;
1248 /* Allow a little time to allocate the TDs */
1249 uhci_get_current_frame_number(uhci);
1250 frame = uhci->frame_number + 10;
1252 /* Move forward to the first frame having the
1253 * correct phase */
1254 urb->start_frame = frame + ((qh->phase - frame) &
1255 (qh->period - 1));
1256 } else {
1257 i = urb->start_frame - uhci->last_iso_frame;
1258 if (i <= 0 || i >= UHCI_NUMFRAMES)
1259 return -EINVAL;
1260 qh->phase = urb->start_frame & (qh->period - 1);
1261 i = uhci_check_bandwidth(uhci, qh);
1262 if (i)
1263 return i;
1266 } else if (qh->period != urb->interval) {
1267 return -EINVAL; /* Can't change the period */
1269 } else {
1270 /* Find the next unused frame */
1271 if (list_empty(&qh->queue)) {
1272 frame = qh->iso_frame;
1273 } else {
1274 struct urb *lurb;
1276 lurb = list_entry(qh->queue.prev,
1277 struct urb_priv, node)->urb;
1278 frame = lurb->start_frame +
1279 lurb->number_of_packets *
1280 lurb->interval;
1282 if (urb->transfer_flags & URB_ISO_ASAP) {
1283 /* Skip some frames if necessary to insure
1284 * the start frame is in the future.
1286 uhci_get_current_frame_number(uhci);
1287 if (uhci_frame_before_eq(frame, uhci->frame_number)) {
1288 frame = uhci->frame_number + 1;
1289 frame += ((qh->phase - frame) &
1290 (qh->period - 1));
1292 } /* Otherwise pick up where the last URB leaves off */
1293 urb->start_frame = frame;
1296 /* Make sure we won't have to go too far into the future */
1297 if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
1298 urb->start_frame + urb->number_of_packets *
1299 urb->interval))
1300 return -EFBIG;
1302 status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1303 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1305 for (i = 0; i < urb->number_of_packets; i++) {
1306 td = uhci_alloc_td(uhci);
1307 if (!td)
1308 return -ENOMEM;
1310 uhci_add_td_to_urbp(td, urbp);
1311 uhci_fill_td(td, status, destination |
1312 uhci_explen(urb->iso_frame_desc[i].length),
1313 urb->transfer_dma +
1314 urb->iso_frame_desc[i].offset);
1317 /* Set the interrupt-on-completion flag on the last packet. */
1318 td->status |= cpu_to_le32(TD_CTRL_IOC);
1320 /* Add the TDs to the frame list */
1321 frame = urb->start_frame;
1322 list_for_each_entry(td, &urbp->td_list, list) {
1323 uhci_insert_td_in_frame_list(uhci, td, frame);
1324 frame += qh->period;
1327 if (list_empty(&qh->queue)) {
1328 qh->iso_packet_desc = &urb->iso_frame_desc[0];
1329 qh->iso_frame = urb->start_frame;
1332 qh->skel = SKEL_ISO;
1333 if (!qh->bandwidth_reserved)
1334 uhci_reserve_bandwidth(uhci, qh);
1335 return 0;
1338 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1340 struct uhci_td *td, *tmp;
1341 struct urb_priv *urbp = urb->hcpriv;
1342 struct uhci_qh *qh = urbp->qh;
1344 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1345 unsigned int ctrlstat;
1346 int status;
1347 int actlength;
1349 if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame))
1350 return -EINPROGRESS;
1352 uhci_remove_tds_from_frame(uhci, qh->iso_frame);
1354 ctrlstat = td_status(td);
1355 if (ctrlstat & TD_CTRL_ACTIVE) {
1356 status = -EXDEV; /* TD was added too late? */
1357 } else {
1358 status = uhci_map_status(uhci_status_bits(ctrlstat),
1359 usb_pipeout(urb->pipe));
1360 actlength = uhci_actual_length(ctrlstat);
1362 urb->actual_length += actlength;
1363 qh->iso_packet_desc->actual_length = actlength;
1364 qh->iso_packet_desc->status = status;
1366 if (status)
1367 urb->error_count++;
1369 uhci_remove_td_from_urbp(td);
1370 uhci_free_td(uhci, td);
1371 qh->iso_frame += qh->period;
1372 ++qh->iso_packet_desc;
1374 return 0;
1377 static int uhci_urb_enqueue(struct usb_hcd *hcd,
1378 struct urb *urb, gfp_t mem_flags)
1380 int ret;
1381 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1382 unsigned long flags;
1383 struct urb_priv *urbp;
1384 struct uhci_qh *qh;
1386 spin_lock_irqsave(&uhci->lock, flags);
1388 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1389 if (ret)
1390 goto done_not_linked;
1392 ret = -ENOMEM;
1393 urbp = uhci_alloc_urb_priv(uhci, urb);
1394 if (!urbp)
1395 goto done;
1397 if (urb->ep->hcpriv)
1398 qh = urb->ep->hcpriv;
1399 else {
1400 qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
1401 if (!qh)
1402 goto err_no_qh;
1404 urbp->qh = qh;
1406 switch (qh->type) {
1407 case USB_ENDPOINT_XFER_CONTROL:
1408 ret = uhci_submit_control(uhci, urb, qh);
1409 break;
1410 case USB_ENDPOINT_XFER_BULK:
1411 ret = uhci_submit_bulk(uhci, urb, qh);
1412 break;
1413 case USB_ENDPOINT_XFER_INT:
1414 ret = uhci_submit_interrupt(uhci, urb, qh);
1415 break;
1416 case USB_ENDPOINT_XFER_ISOC:
1417 urb->error_count = 0;
1418 ret = uhci_submit_isochronous(uhci, urb, qh);
1419 break;
1421 if (ret != 0)
1422 goto err_submit_failed;
1424 /* Add this URB to the QH */
1425 list_add_tail(&urbp->node, &qh->queue);
1427 /* If the new URB is the first and only one on this QH then either
1428 * the QH is new and idle or else it's unlinked and waiting to
1429 * become idle, so we can activate it right away. But only if the
1430 * queue isn't stopped. */
1431 if (qh->queue.next == &urbp->node && !qh->is_stopped) {
1432 uhci_activate_qh(uhci, qh);
1433 uhci_urbp_wants_fsbr(uhci, urbp);
1435 goto done;
1437 err_submit_failed:
1438 if (qh->state == QH_STATE_IDLE)
1439 uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */
1440 err_no_qh:
1441 uhci_free_urb_priv(uhci, urbp);
1442 done:
1443 if (ret)
1444 usb_hcd_unlink_urb_from_ep(hcd, urb);
1445 done_not_linked:
1446 spin_unlock_irqrestore(&uhci->lock, flags);
1447 return ret;
1450 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1452 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1453 unsigned long flags;
1454 struct uhci_qh *qh;
1455 int rc;
1457 spin_lock_irqsave(&uhci->lock, flags);
1458 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
1459 if (rc)
1460 goto done;
1462 qh = ((struct urb_priv *) urb->hcpriv)->qh;
1464 /* Remove Isochronous TDs from the frame list ASAP */
1465 if (qh->type == USB_ENDPOINT_XFER_ISOC) {
1466 uhci_unlink_isochronous_tds(uhci, urb);
1467 mb();
1469 /* If the URB has already started, update the QH unlink time */
1470 uhci_get_current_frame_number(uhci);
1471 if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
1472 qh->unlink_frame = uhci->frame_number;
1475 uhci_unlink_qh(uhci, qh);
1477 done:
1478 spin_unlock_irqrestore(&uhci->lock, flags);
1479 return rc;
1483 * Finish unlinking an URB and give it back
1485 static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
1486 struct urb *urb, int status)
1487 __releases(uhci->lock)
1488 __acquires(uhci->lock)
1490 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1492 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1494 /* Subtract off the length of the SETUP packet from
1495 * urb->actual_length.
1497 urb->actual_length -= min_t(u32, 8, urb->actual_length);
1500 /* When giving back the first URB in an Isochronous queue,
1501 * reinitialize the QH's iso-related members for the next URB. */
1502 else if (qh->type == USB_ENDPOINT_XFER_ISOC &&
1503 urbp->node.prev == &qh->queue &&
1504 urbp->node.next != &qh->queue) {
1505 struct urb *nurb = list_entry(urbp->node.next,
1506 struct urb_priv, node)->urb;
1508 qh->iso_packet_desc = &nurb->iso_frame_desc[0];
1509 qh->iso_frame = nurb->start_frame;
1512 /* Take the URB off the QH's queue. If the queue is now empty,
1513 * this is a perfect time for a toggle fixup. */
1514 list_del_init(&urbp->node);
1515 if (list_empty(&qh->queue) && qh->needs_fixup) {
1516 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1517 usb_pipeout(urb->pipe), qh->initial_toggle);
1518 qh->needs_fixup = 0;
1521 uhci_free_urb_priv(uhci, urbp);
1522 usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
1524 spin_unlock(&uhci->lock);
1525 usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status);
1526 spin_lock(&uhci->lock);
1528 /* If the queue is now empty, we can unlink the QH and give up its
1529 * reserved bandwidth. */
1530 if (list_empty(&qh->queue)) {
1531 uhci_unlink_qh(uhci, qh);
1532 if (qh->bandwidth_reserved)
1533 uhci_release_bandwidth(uhci, qh);
1538 * Scan the URBs in a QH's queue
1540 #define QH_FINISHED_UNLINKING(qh) \
1541 (qh->state == QH_STATE_UNLINKING && \
1542 uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
1544 static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
1546 struct urb_priv *urbp;
1547 struct urb *urb;
1548 int status;
1550 while (!list_empty(&qh->queue)) {
1551 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1552 urb = urbp->urb;
1554 if (qh->type == USB_ENDPOINT_XFER_ISOC)
1555 status = uhci_result_isochronous(uhci, urb);
1556 else
1557 status = uhci_result_common(uhci, urb);
1558 if (status == -EINPROGRESS)
1559 break;
1561 /* Dequeued but completed URBs can't be given back unless
1562 * the QH is stopped or has finished unlinking. */
1563 if (urb->unlinked) {
1564 if (QH_FINISHED_UNLINKING(qh))
1565 qh->is_stopped = 1;
1566 else if (!qh->is_stopped)
1567 return;
1570 uhci_giveback_urb(uhci, qh, urb, status);
1571 if (status < 0)
1572 break;
1575 /* If the QH is neither stopped nor finished unlinking (normal case),
1576 * our work here is done. */
1577 if (QH_FINISHED_UNLINKING(qh))
1578 qh->is_stopped = 1;
1579 else if (!qh->is_stopped)
1580 return;
1582 /* Otherwise give back each of the dequeued URBs */
1583 restart:
1584 list_for_each_entry(urbp, &qh->queue, node) {
1585 urb = urbp->urb;
1586 if (urb->unlinked) {
1588 /* Fix up the TD links and save the toggles for
1589 * non-Isochronous queues. For Isochronous queues,
1590 * test for too-recent dequeues. */
1591 if (!uhci_cleanup_queue(uhci, qh, urb)) {
1592 qh->is_stopped = 0;
1593 return;
1595 uhci_giveback_urb(uhci, qh, urb, 0);
1596 goto restart;
1599 qh->is_stopped = 0;
1601 /* There are no more dequeued URBs. If there are still URBs on the
1602 * queue, the QH can now be re-activated. */
1603 if (!list_empty(&qh->queue)) {
1604 if (qh->needs_fixup)
1605 uhci_fixup_toggles(qh, 0);
1607 /* If the first URB on the queue wants FSBR but its time
1608 * limit has expired, set the next TD to interrupt on
1609 * completion before reactivating the QH. */
1610 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1611 if (urbp->fsbr && qh->wait_expired) {
1612 struct uhci_td *td = list_entry(urbp->td_list.next,
1613 struct uhci_td, list);
1615 td->status |= __cpu_to_le32(TD_CTRL_IOC);
1618 uhci_activate_qh(uhci, qh);
1621 /* The queue is empty. The QH can become idle if it is fully
1622 * unlinked. */
1623 else if (QH_FINISHED_UNLINKING(qh))
1624 uhci_make_qh_idle(uhci, qh);
1628 * Check for queues that have made some forward progress.
1629 * Returns 0 if the queue is not Isochronous, is ACTIVE, and
1630 * has not advanced since last examined; 1 otherwise.
1632 * Early Intel controllers have a bug which causes qh->element sometimes
1633 * not to advance when a TD completes successfully. The queue remains
1634 * stuck on the inactive completed TD. We detect such cases and advance
1635 * the element pointer by hand.
1637 static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
1639 struct urb_priv *urbp = NULL;
1640 struct uhci_td *td;
1641 int ret = 1;
1642 unsigned status;
1644 if (qh->type == USB_ENDPOINT_XFER_ISOC)
1645 goto done;
1647 /* Treat an UNLINKING queue as though it hasn't advanced.
1648 * This is okay because reactivation will treat it as though
1649 * it has advanced, and if it is going to become IDLE then
1650 * this doesn't matter anyway. Furthermore it's possible
1651 * for an UNLINKING queue not to have any URBs at all, or
1652 * for its first URB not to have any TDs (if it was dequeued
1653 * just as it completed). So it's not easy in any case to
1654 * test whether such queues have advanced. */
1655 if (qh->state != QH_STATE_ACTIVE) {
1656 urbp = NULL;
1657 status = 0;
1659 } else {
1660 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1661 td = list_entry(urbp->td_list.next, struct uhci_td, list);
1662 status = td_status(td);
1663 if (!(status & TD_CTRL_ACTIVE)) {
1665 /* We're okay, the queue has advanced */
1666 qh->wait_expired = 0;
1667 qh->advance_jiffies = jiffies;
1668 goto done;
1670 ret = uhci->is_stopped;
1673 /* The queue hasn't advanced; check for timeout */
1674 if (qh->wait_expired)
1675 goto done;
1677 if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
1679 /* Detect the Intel bug and work around it */
1680 if (qh->post_td && qh_element(qh) == LINK_TO_TD(qh->post_td)) {
1681 qh->element = qh->post_td->link;
1682 qh->advance_jiffies = jiffies;
1683 ret = 1;
1684 goto done;
1687 qh->wait_expired = 1;
1689 /* If the current URB wants FSBR, unlink it temporarily
1690 * so that we can safely set the next TD to interrupt on
1691 * completion. That way we'll know as soon as the queue
1692 * starts moving again. */
1693 if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC))
1694 uhci_unlink_qh(uhci, qh);
1696 } else {
1697 /* Unmoving but not-yet-expired queues keep FSBR alive */
1698 if (urbp)
1699 uhci_urbp_wants_fsbr(uhci, urbp);
1702 done:
1703 return ret;
1707 * Process events in the schedule, but only in one thread at a time
1709 static void uhci_scan_schedule(struct uhci_hcd *uhci)
1711 int i;
1712 struct uhci_qh *qh;
1714 /* Don't allow re-entrant calls */
1715 if (uhci->scan_in_progress) {
1716 uhci->need_rescan = 1;
1717 return;
1719 uhci->scan_in_progress = 1;
1720 rescan:
1721 uhci->need_rescan = 0;
1722 uhci->fsbr_is_wanted = 0;
1724 uhci_clear_next_interrupt(uhci);
1725 uhci_get_current_frame_number(uhci);
1726 uhci->cur_iso_frame = uhci->frame_number;
1728 /* Go through all the QH queues and process the URBs in each one */
1729 for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
1730 uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
1731 struct uhci_qh, node);
1732 while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
1733 uhci->next_qh = list_entry(qh->node.next,
1734 struct uhci_qh, node);
1736 if (uhci_advance_check(uhci, qh)) {
1737 uhci_scan_qh(uhci, qh);
1738 if (qh->state == QH_STATE_ACTIVE) {
1739 uhci_urbp_wants_fsbr(uhci,
1740 list_entry(qh->queue.next, struct urb_priv, node));
1746 uhci->last_iso_frame = uhci->cur_iso_frame;
1747 if (uhci->need_rescan)
1748 goto rescan;
1749 uhci->scan_in_progress = 0;
1751 if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted &&
1752 !uhci->fsbr_expiring) {
1753 uhci->fsbr_expiring = 1;
1754 mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY);
1757 if (list_empty(&uhci->skel_unlink_qh->node))
1758 uhci_clear_next_interrupt(uhci);
1759 else
1760 uhci_set_next_interrupt(uhci);