inet: frag: enforce memory limits earlier
[linux/fpc-iii.git] / drivers / usb / host / uhci-q.c
blobc17ea1589b8335b94abb1e176d13eb3cfee2b034
1 /*
2 * Universal Host Controller Interface driver for USB.
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8 * (C) Copyright 1999 Randy Dunlap
9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16 * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
21 * Technically, updating td->status here is a race, but it's not really a
22 * problem. The worst that can happen is that we set the IOC bit again
23 * generating a spurious interrupt. We could fix this by creating another
24 * QH and leaving the IOC bit always set, but then we would have to play
25 * games with the FSBR code to make sure we get the correct order in all
26 * the cases. I don't think it's worth the effort
28 static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
30 if (uhci->is_stopped)
31 mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
32 uhci->term_td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
35 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
37 uhci->term_td->status &= ~cpu_to_hc32(uhci, TD_CTRL_IOC);
42 * Full-Speed Bandwidth Reclamation (FSBR).
43 * We turn on FSBR whenever a queue that wants it is advancing,
44 * and leave it on for a short time thereafter.
46 static void uhci_fsbr_on(struct uhci_hcd *uhci)
48 struct uhci_qh *lqh;
50 /* The terminating skeleton QH always points back to the first
51 * FSBR QH. Make the last async QH point to the terminating
52 * skeleton QH. */
53 uhci->fsbr_is_on = 1;
54 lqh = list_entry(uhci->skel_async_qh->node.prev,
55 struct uhci_qh, node);
56 lqh->link = LINK_TO_QH(uhci, uhci->skel_term_qh);
59 static void uhci_fsbr_off(struct uhci_hcd *uhci)
61 struct uhci_qh *lqh;
63 /* Remove the link from the last async QH to the terminating
64 * skeleton QH. */
65 uhci->fsbr_is_on = 0;
66 lqh = list_entry(uhci->skel_async_qh->node.prev,
67 struct uhci_qh, node);
68 lqh->link = UHCI_PTR_TERM(uhci);
71 static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
73 struct urb_priv *urbp = urb->hcpriv;
75 if (!(urb->transfer_flags & URB_NO_FSBR))
76 urbp->fsbr = 1;
79 static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
81 if (urbp->fsbr) {
82 uhci->fsbr_is_wanted = 1;
83 if (!uhci->fsbr_is_on)
84 uhci_fsbr_on(uhci);
85 else if (uhci->fsbr_expiring) {
86 uhci->fsbr_expiring = 0;
87 del_timer(&uhci->fsbr_timer);
92 static void uhci_fsbr_timeout(unsigned long _uhci)
94 struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;
95 unsigned long flags;
97 spin_lock_irqsave(&uhci->lock, flags);
98 if (uhci->fsbr_expiring) {
99 uhci->fsbr_expiring = 0;
100 uhci_fsbr_off(uhci);
102 spin_unlock_irqrestore(&uhci->lock, flags);
106 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
108 dma_addr_t dma_handle;
109 struct uhci_td *td;
111 td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
112 if (!td)
113 return NULL;
115 td->dma_handle = dma_handle;
116 td->frame = -1;
118 INIT_LIST_HEAD(&td->list);
119 INIT_LIST_HEAD(&td->fl_list);
121 return td;
124 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
126 if (!list_empty(&td->list))
127 dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td);
128 if (!list_empty(&td->fl_list))
129 dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td);
131 dma_pool_free(uhci->td_pool, td, td->dma_handle);
134 static inline void uhci_fill_td(struct uhci_hcd *uhci, struct uhci_td *td,
135 u32 status, u32 token, u32 buffer)
137 td->status = cpu_to_hc32(uhci, status);
138 td->token = cpu_to_hc32(uhci, token);
139 td->buffer = cpu_to_hc32(uhci, buffer);
142 static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
144 list_add_tail(&td->list, &urbp->td_list);
147 static void uhci_remove_td_from_urbp(struct uhci_td *td)
149 list_del_init(&td->list);
153 * We insert Isochronous URBs directly into the frame list at the beginning
155 static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
156 struct uhci_td *td, unsigned framenum)
158 framenum &= (UHCI_NUMFRAMES - 1);
160 td->frame = framenum;
162 /* Is there a TD already mapped there? */
163 if (uhci->frame_cpu[framenum]) {
164 struct uhci_td *ftd, *ltd;
166 ftd = uhci->frame_cpu[framenum];
167 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
169 list_add_tail(&td->fl_list, &ftd->fl_list);
171 td->link = ltd->link;
172 wmb();
173 ltd->link = LINK_TO_TD(uhci, td);
174 } else {
175 td->link = uhci->frame[framenum];
176 wmb();
177 uhci->frame[framenum] = LINK_TO_TD(uhci, td);
178 uhci->frame_cpu[framenum] = td;
182 static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
183 struct uhci_td *td)
185 /* If it's not inserted, don't remove it */
186 if (td->frame == -1) {
187 WARN_ON(!list_empty(&td->fl_list));
188 return;
191 if (uhci->frame_cpu[td->frame] == td) {
192 if (list_empty(&td->fl_list)) {
193 uhci->frame[td->frame] = td->link;
194 uhci->frame_cpu[td->frame] = NULL;
195 } else {
196 struct uhci_td *ntd;
198 ntd = list_entry(td->fl_list.next,
199 struct uhci_td,
200 fl_list);
201 uhci->frame[td->frame] = LINK_TO_TD(uhci, ntd);
202 uhci->frame_cpu[td->frame] = ntd;
204 } else {
205 struct uhci_td *ptd;
207 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
208 ptd->link = td->link;
211 list_del_init(&td->fl_list);
212 td->frame = -1;
215 static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,
216 unsigned int framenum)
218 struct uhci_td *ftd, *ltd;
220 framenum &= (UHCI_NUMFRAMES - 1);
222 ftd = uhci->frame_cpu[framenum];
223 if (ftd) {
224 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
225 uhci->frame[framenum] = ltd->link;
226 uhci->frame_cpu[framenum] = NULL;
228 while (!list_empty(&ftd->fl_list))
229 list_del_init(ftd->fl_list.prev);
234 * Remove all the TDs for an Isochronous URB from the frame list
236 static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
238 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
239 struct uhci_td *td;
241 list_for_each_entry(td, &urbp->td_list, list)
242 uhci_remove_td_from_frame_list(uhci, td);
245 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
246 struct usb_device *udev, struct usb_host_endpoint *hep)
248 dma_addr_t dma_handle;
249 struct uhci_qh *qh;
251 qh = dma_pool_zalloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
252 if (!qh)
253 return NULL;
255 qh->dma_handle = dma_handle;
257 qh->element = UHCI_PTR_TERM(uhci);
258 qh->link = UHCI_PTR_TERM(uhci);
260 INIT_LIST_HEAD(&qh->queue);
261 INIT_LIST_HEAD(&qh->node);
263 if (udev) { /* Normal QH */
264 qh->type = usb_endpoint_type(&hep->desc);
265 if (qh->type != USB_ENDPOINT_XFER_ISOC) {
266 qh->dummy_td = uhci_alloc_td(uhci);
267 if (!qh->dummy_td) {
268 dma_pool_free(uhci->qh_pool, qh, dma_handle);
269 return NULL;
272 qh->state = QH_STATE_IDLE;
273 qh->hep = hep;
274 qh->udev = udev;
275 hep->hcpriv = qh;
277 if (qh->type == USB_ENDPOINT_XFER_INT ||
278 qh->type == USB_ENDPOINT_XFER_ISOC)
279 qh->load = usb_calc_bus_time(udev->speed,
280 usb_endpoint_dir_in(&hep->desc),
281 qh->type == USB_ENDPOINT_XFER_ISOC,
282 usb_endpoint_maxp(&hep->desc))
283 / 1000 + 1;
285 } else { /* Skeleton QH */
286 qh->state = QH_STATE_ACTIVE;
287 qh->type = -1;
289 return qh;
292 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
294 WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
295 if (!list_empty(&qh->queue))
296 dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh);
298 list_del(&qh->node);
299 if (qh->udev) {
300 qh->hep->hcpriv = NULL;
301 if (qh->dummy_td)
302 uhci_free_td(uhci, qh->dummy_td);
304 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
308 * When a queue is stopped and a dequeued URB is given back, adjust
309 * the previous TD link (if the URB isn't first on the queue) or
310 * save its toggle value (if it is first and is currently executing).
312 * Returns 0 if the URB should not yet be given back, 1 otherwise.
314 static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
315 struct urb *urb)
317 struct urb_priv *urbp = urb->hcpriv;
318 struct uhci_td *td;
319 int ret = 1;
321 /* Isochronous pipes don't use toggles and their TD link pointers
322 * get adjusted during uhci_urb_dequeue(). But since their queues
323 * cannot truly be stopped, we have to watch out for dequeues
324 * occurring after the nominal unlink frame. */
325 if (qh->type == USB_ENDPOINT_XFER_ISOC) {
326 ret = (uhci->frame_number + uhci->is_stopped !=
327 qh->unlink_frame);
328 goto done;
331 /* If the URB isn't first on its queue, adjust the link pointer
332 * of the last TD in the previous URB. The toggle doesn't need
333 * to be saved since this URB can't be executing yet. */
334 if (qh->queue.next != &urbp->node) {
335 struct urb_priv *purbp;
336 struct uhci_td *ptd;
338 purbp = list_entry(urbp->node.prev, struct urb_priv, node);
339 WARN_ON(list_empty(&purbp->td_list));
340 ptd = list_entry(purbp->td_list.prev, struct uhci_td,
341 list);
342 td = list_entry(urbp->td_list.prev, struct uhci_td,
343 list);
344 ptd->link = td->link;
345 goto done;
348 /* If the QH element pointer is UHCI_PTR_TERM then then currently
349 * executing URB has already been unlinked, so this one isn't it. */
350 if (qh_element(qh) == UHCI_PTR_TERM(uhci))
351 goto done;
352 qh->element = UHCI_PTR_TERM(uhci);
354 /* Control pipes don't have to worry about toggles */
355 if (qh->type == USB_ENDPOINT_XFER_CONTROL)
356 goto done;
358 /* Save the next toggle value */
359 WARN_ON(list_empty(&urbp->td_list));
360 td = list_entry(urbp->td_list.next, struct uhci_td, list);
361 qh->needs_fixup = 1;
362 qh->initial_toggle = uhci_toggle(td_token(uhci, td));
364 done:
365 return ret;
369 * Fix up the data toggles for URBs in a queue, when one of them
370 * terminates early (short transfer, error, or dequeued).
372 static void uhci_fixup_toggles(struct uhci_hcd *uhci, struct uhci_qh *qh,
373 int skip_first)
375 struct urb_priv *urbp = NULL;
376 struct uhci_td *td;
377 unsigned int toggle = qh->initial_toggle;
378 unsigned int pipe;
380 /* Fixups for a short transfer start with the second URB in the
381 * queue (the short URB is the first). */
382 if (skip_first)
383 urbp = list_entry(qh->queue.next, struct urb_priv, node);
385 /* When starting with the first URB, if the QH element pointer is
386 * still valid then we know the URB's toggles are okay. */
387 else if (qh_element(qh) != UHCI_PTR_TERM(uhci))
388 toggle = 2;
390 /* Fix up the toggle for the URBs in the queue. Normally this
391 * loop won't run more than once: When an error or short transfer
392 * occurs, the queue usually gets emptied. */
393 urbp = list_prepare_entry(urbp, &qh->queue, node);
394 list_for_each_entry_continue(urbp, &qh->queue, node) {
396 /* If the first TD has the right toggle value, we don't
397 * need to change any toggles in this URB */
398 td = list_entry(urbp->td_list.next, struct uhci_td, list);
399 if (toggle > 1 || uhci_toggle(td_token(uhci, td)) == toggle) {
400 td = list_entry(urbp->td_list.prev, struct uhci_td,
401 list);
402 toggle = uhci_toggle(td_token(uhci, td)) ^ 1;
404 /* Otherwise all the toggles in the URB have to be switched */
405 } else {
406 list_for_each_entry(td, &urbp->td_list, list) {
407 td->token ^= cpu_to_hc32(uhci,
408 TD_TOKEN_TOGGLE);
409 toggle ^= 1;
414 wmb();
415 pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
416 usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
417 usb_pipeout(pipe), toggle);
418 qh->needs_fixup = 0;
422 * Link an Isochronous QH into its skeleton's list
424 static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh)
426 list_add_tail(&qh->node, &uhci->skel_iso_qh->node);
428 /* Isochronous QHs aren't linked by the hardware */
432 * Link a high-period interrupt QH into the schedule at the end of its
433 * skeleton's list
435 static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
437 struct uhci_qh *pqh;
439 list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);
441 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
442 qh->link = pqh->link;
443 wmb();
444 pqh->link = LINK_TO_QH(uhci, qh);
448 * Link a period-1 interrupt or async QH into the schedule at the
449 * correct spot in the async skeleton's list, and update the FSBR link
451 static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
453 struct uhci_qh *pqh;
454 __hc32 link_to_new_qh;
456 /* Find the predecessor QH for our new one and insert it in the list.
457 * The list of QHs is expected to be short, so linear search won't
458 * take too long. */
459 list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
460 if (pqh->skel <= qh->skel)
461 break;
463 list_add(&qh->node, &pqh->node);
465 /* Link it into the schedule */
466 qh->link = pqh->link;
467 wmb();
468 link_to_new_qh = LINK_TO_QH(uhci, qh);
469 pqh->link = link_to_new_qh;
471 /* If this is now the first FSBR QH, link the terminating skeleton
472 * QH to it. */
473 if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
474 uhci->skel_term_qh->link = link_to_new_qh;
478 * Put a QH on the schedule in both hardware and software
480 static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
482 WARN_ON(list_empty(&qh->queue));
484 /* Set the element pointer if it isn't set already.
485 * This isn't needed for Isochronous queues, but it doesn't hurt. */
486 if (qh_element(qh) == UHCI_PTR_TERM(uhci)) {
487 struct urb_priv *urbp = list_entry(qh->queue.next,
488 struct urb_priv, node);
489 struct uhci_td *td = list_entry(urbp->td_list.next,
490 struct uhci_td, list);
492 qh->element = LINK_TO_TD(uhci, td);
495 /* Treat the queue as if it has just advanced */
496 qh->wait_expired = 0;
497 qh->advance_jiffies = jiffies;
499 if (qh->state == QH_STATE_ACTIVE)
500 return;
501 qh->state = QH_STATE_ACTIVE;
503 /* Move the QH from its old list to the correct spot in the appropriate
504 * skeleton's list */
505 if (qh == uhci->next_qh)
506 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
507 node);
508 list_del(&qh->node);
510 if (qh->skel == SKEL_ISO)
511 link_iso(uhci, qh);
512 else if (qh->skel < SKEL_ASYNC)
513 link_interrupt(uhci, qh);
514 else
515 link_async(uhci, qh);
519 * Unlink a high-period interrupt QH from the schedule
521 static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
523 struct uhci_qh *pqh;
525 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
526 pqh->link = qh->link;
527 mb();
531 * Unlink a period-1 interrupt or async QH from the schedule
533 static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
535 struct uhci_qh *pqh;
536 __hc32 link_to_next_qh = qh->link;
538 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
539 pqh->link = link_to_next_qh;
541 /* If this was the old first FSBR QH, link the terminating skeleton
542 * QH to the next (new first FSBR) QH. */
543 if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
544 uhci->skel_term_qh->link = link_to_next_qh;
545 mb();
549 * Take a QH off the hardware schedule
551 static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
553 if (qh->state == QH_STATE_UNLINKING)
554 return;
555 WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
556 qh->state = QH_STATE_UNLINKING;
558 /* Unlink the QH from the schedule and record when we did it */
559 if (qh->skel == SKEL_ISO)
561 else if (qh->skel < SKEL_ASYNC)
562 unlink_interrupt(uhci, qh);
563 else
564 unlink_async(uhci, qh);
566 uhci_get_current_frame_number(uhci);
567 qh->unlink_frame = uhci->frame_number;
569 /* Force an interrupt so we know when the QH is fully unlinked */
570 if (list_empty(&uhci->skel_unlink_qh->node) || uhci->is_stopped)
571 uhci_set_next_interrupt(uhci);
573 /* Move the QH from its old list to the end of the unlinking list */
574 if (qh == uhci->next_qh)
575 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
576 node);
577 list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
581 * When we and the controller are through with a QH, it becomes IDLE.
582 * This happens when a QH has been off the schedule (on the unlinking
583 * list) for more than one frame, or when an error occurs while adding
584 * the first URB onto a new QH.
586 static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
588 WARN_ON(qh->state == QH_STATE_ACTIVE);
590 if (qh == uhci->next_qh)
591 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
592 node);
593 list_move(&qh->node, &uhci->idle_qh_list);
594 qh->state = QH_STATE_IDLE;
596 /* Now that the QH is idle, its post_td isn't being used */
597 if (qh->post_td) {
598 uhci_free_td(uhci, qh->post_td);
599 qh->post_td = NULL;
602 /* If anyone is waiting for a QH to become idle, wake them up */
603 if (uhci->num_waiting)
604 wake_up_all(&uhci->waitqh);
608 * Find the highest existing bandwidth load for a given phase and period.
610 static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
612 int highest_load = uhci->load[phase];
614 for (phase += period; phase < MAX_PHASE; phase += period)
615 highest_load = max_t(int, highest_load, uhci->load[phase]);
616 return highest_load;
620 * Set qh->phase to the optimal phase for a periodic transfer and
621 * check whether the bandwidth requirement is acceptable.
623 static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
625 int minimax_load;
627 /* Find the optimal phase (unless it is already set) and get
628 * its load value. */
629 if (qh->phase >= 0)
630 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
631 else {
632 int phase, load;
633 int max_phase = min_t(int, MAX_PHASE, qh->period);
635 qh->phase = 0;
636 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
637 for (phase = 1; phase < max_phase; ++phase) {
638 load = uhci_highest_load(uhci, phase, qh->period);
639 if (load < minimax_load) {
640 minimax_load = load;
641 qh->phase = phase;
646 /* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */
647 if (minimax_load + qh->load > 900) {
648 dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
649 "period %d, phase %d, %d + %d us\n",
650 qh->period, qh->phase, minimax_load, qh->load);
651 return -ENOSPC;
653 return 0;
657 * Reserve a periodic QH's bandwidth in the schedule
659 static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
661 int i;
662 int load = qh->load;
663 char *p = "??";
665 for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
666 uhci->load[i] += load;
667 uhci->total_load += load;
669 uhci_to_hcd(uhci)->self.bandwidth_allocated =
670 uhci->total_load / MAX_PHASE;
671 switch (qh->type) {
672 case USB_ENDPOINT_XFER_INT:
673 ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
674 p = "INT";
675 break;
676 case USB_ENDPOINT_XFER_ISOC:
677 ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
678 p = "ISO";
679 break;
681 qh->bandwidth_reserved = 1;
682 dev_dbg(uhci_dev(uhci),
683 "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
684 "reserve", qh->udev->devnum,
685 qh->hep->desc.bEndpointAddress, p,
686 qh->period, qh->phase, load);
690 * Release a periodic QH's bandwidth reservation
692 static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
694 int i;
695 int load = qh->load;
696 char *p = "??";
698 for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
699 uhci->load[i] -= load;
700 uhci->total_load -= load;
702 uhci_to_hcd(uhci)->self.bandwidth_allocated =
703 uhci->total_load / MAX_PHASE;
704 switch (qh->type) {
705 case USB_ENDPOINT_XFER_INT:
706 --uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
707 p = "INT";
708 break;
709 case USB_ENDPOINT_XFER_ISOC:
710 --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
711 p = "ISO";
712 break;
714 qh->bandwidth_reserved = 0;
715 dev_dbg(uhci_dev(uhci),
716 "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
717 "release", qh->udev->devnum,
718 qh->hep->desc.bEndpointAddress, p,
719 qh->period, qh->phase, load);
722 static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
723 struct urb *urb)
725 struct urb_priv *urbp;
727 urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
728 if (!urbp)
729 return NULL;
731 urbp->urb = urb;
732 urb->hcpriv = urbp;
734 INIT_LIST_HEAD(&urbp->node);
735 INIT_LIST_HEAD(&urbp->td_list);
737 return urbp;
740 static void uhci_free_urb_priv(struct uhci_hcd *uhci,
741 struct urb_priv *urbp)
743 struct uhci_td *td, *tmp;
745 if (!list_empty(&urbp->node))
746 dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n",
747 urbp->urb);
749 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
750 uhci_remove_td_from_urbp(td);
751 uhci_free_td(uhci, td);
754 kmem_cache_free(uhci_up_cachep, urbp);
758 * Map status to standard result codes
760 * <status> is (td_status(uhci, td) & 0xF60000), a.k.a.
761 * uhci_status_bits(td_status(uhci, td)).
762 * Note: <status> does not include the TD_CTRL_NAK bit.
763 * <dir_out> is True for output TDs and False for input TDs.
765 static int uhci_map_status(int status, int dir_out)
767 if (!status)
768 return 0;
769 if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
770 return -EPROTO;
771 if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
772 if (dir_out)
773 return -EPROTO;
774 else
775 return -EILSEQ;
777 if (status & TD_CTRL_BABBLE) /* Babble */
778 return -EOVERFLOW;
779 if (status & TD_CTRL_DBUFERR) /* Buffer error */
780 return -ENOSR;
781 if (status & TD_CTRL_STALLED) /* Stalled */
782 return -EPIPE;
783 return 0;
787 * Control transfers
789 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
790 struct uhci_qh *qh)
792 struct uhci_td *td;
793 unsigned long destination, status;
794 int maxsze = usb_endpoint_maxp(&qh->hep->desc);
795 int len = urb->transfer_buffer_length;
796 dma_addr_t data = urb->transfer_dma;
797 __hc32 *plink;
798 struct urb_priv *urbp = urb->hcpriv;
799 int skel;
801 /* The "pipe" thing contains the destination in bits 8--18 */
802 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
804 /* 3 errors, dummy TD remains inactive */
805 status = uhci_maxerr(3);
806 if (urb->dev->speed == USB_SPEED_LOW)
807 status |= TD_CTRL_LS;
810 * Build the TD for the control request setup packet
812 td = qh->dummy_td;
813 uhci_add_td_to_urbp(td, urbp);
814 uhci_fill_td(uhci, td, status, destination | uhci_explen(8),
815 urb->setup_dma);
816 plink = &td->link;
817 status |= TD_CTRL_ACTIVE;
820 * If direction is "send", change the packet ID from SETUP (0x2D)
821 * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
822 * set Short Packet Detect (SPD) for all data packets.
824 * 0-length transfers always get treated as "send".
826 if (usb_pipeout(urb->pipe) || len == 0)
827 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
828 else {
829 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
830 status |= TD_CTRL_SPD;
834 * Build the DATA TDs
836 while (len > 0) {
837 int pktsze = maxsze;
839 if (len <= pktsze) { /* The last data packet */
840 pktsze = len;
841 status &= ~TD_CTRL_SPD;
844 td = uhci_alloc_td(uhci);
845 if (!td)
846 goto nomem;
847 *plink = LINK_TO_TD(uhci, td);
849 /* Alternate Data0/1 (start with Data1) */
850 destination ^= TD_TOKEN_TOGGLE;
852 uhci_add_td_to_urbp(td, urbp);
853 uhci_fill_td(uhci, td, status,
854 destination | uhci_explen(pktsze), data);
855 plink = &td->link;
857 data += pktsze;
858 len -= pktsze;
862 * Build the final TD for control status
864 td = uhci_alloc_td(uhci);
865 if (!td)
866 goto nomem;
867 *plink = LINK_TO_TD(uhci, td);
869 /* Change direction for the status transaction */
870 destination ^= (USB_PID_IN ^ USB_PID_OUT);
871 destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
873 uhci_add_td_to_urbp(td, urbp);
874 uhci_fill_td(uhci, td, status | TD_CTRL_IOC,
875 destination | uhci_explen(0), 0);
876 plink = &td->link;
879 * Build the new dummy TD and activate the old one
881 td = uhci_alloc_td(uhci);
882 if (!td)
883 goto nomem;
884 *plink = LINK_TO_TD(uhci, td);
886 uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
887 wmb();
888 qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
889 qh->dummy_td = td;
891 /* Low-speed transfers get a different queue, and won't hog the bus.
892 * Also, some devices enumerate better without FSBR; the easiest way
893 * to do that is to put URBs on the low-speed queue while the device
894 * isn't in the CONFIGURED state. */
895 if (urb->dev->speed == USB_SPEED_LOW ||
896 urb->dev->state != USB_STATE_CONFIGURED)
897 skel = SKEL_LS_CONTROL;
898 else {
899 skel = SKEL_FS_CONTROL;
900 uhci_add_fsbr(uhci, urb);
902 if (qh->state != QH_STATE_ACTIVE)
903 qh->skel = skel;
904 return 0;
906 nomem:
907 /* Remove the dummy TD from the td_list so it doesn't get freed */
908 uhci_remove_td_from_urbp(qh->dummy_td);
909 return -ENOMEM;
913 * Common submit for bulk and interrupt
915 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
916 struct uhci_qh *qh)
918 struct uhci_td *td;
919 unsigned long destination, status;
920 int maxsze = usb_endpoint_maxp(&qh->hep->desc);
921 int len = urb->transfer_buffer_length;
922 int this_sg_len;
923 dma_addr_t data;
924 __hc32 *plink;
925 struct urb_priv *urbp = urb->hcpriv;
926 unsigned int toggle;
927 struct scatterlist *sg;
928 int i;
930 if (len < 0)
931 return -EINVAL;
933 /* The "pipe" thing contains the destination in bits 8--18 */
934 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
935 toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
936 usb_pipeout(urb->pipe));
938 /* 3 errors, dummy TD remains inactive */
939 status = uhci_maxerr(3);
940 if (urb->dev->speed == USB_SPEED_LOW)
941 status |= TD_CTRL_LS;
942 if (usb_pipein(urb->pipe))
943 status |= TD_CTRL_SPD;
945 i = urb->num_mapped_sgs;
946 if (len > 0 && i > 0) {
947 sg = urb->sg;
948 data = sg_dma_address(sg);
950 /* urb->transfer_buffer_length may be smaller than the
951 * size of the scatterlist (or vice versa)
953 this_sg_len = min_t(int, sg_dma_len(sg), len);
954 } else {
955 sg = NULL;
956 data = urb->transfer_dma;
957 this_sg_len = len;
960 * Build the DATA TDs
962 plink = NULL;
963 td = qh->dummy_td;
964 for (;;) { /* Allow zero length packets */
965 int pktsze = maxsze;
967 if (len <= pktsze) { /* The last packet */
968 pktsze = len;
969 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
970 status &= ~TD_CTRL_SPD;
973 if (plink) {
974 td = uhci_alloc_td(uhci);
975 if (!td)
976 goto nomem;
977 *plink = LINK_TO_TD(uhci, td);
979 uhci_add_td_to_urbp(td, urbp);
980 uhci_fill_td(uhci, td, status,
981 destination | uhci_explen(pktsze) |
982 (toggle << TD_TOKEN_TOGGLE_SHIFT),
983 data);
984 plink = &td->link;
985 status |= TD_CTRL_ACTIVE;
987 toggle ^= 1;
988 data += pktsze;
989 this_sg_len -= pktsze;
990 len -= maxsze;
991 if (this_sg_len <= 0) {
992 if (--i <= 0 || len <= 0)
993 break;
994 sg = sg_next(sg);
995 data = sg_dma_address(sg);
996 this_sg_len = min_t(int, sg_dma_len(sg), len);
1001 * URB_ZERO_PACKET means adding a 0-length packet, if direction
1002 * is OUT and the transfer_length was an exact multiple of maxsze,
1003 * hence (len = transfer_length - N * maxsze) == 0
1004 * however, if transfer_length == 0, the zero packet was already
1005 * prepared above.
1007 if ((urb->transfer_flags & URB_ZERO_PACKET) &&
1008 usb_pipeout(urb->pipe) && len == 0 &&
1009 urb->transfer_buffer_length > 0) {
1010 td = uhci_alloc_td(uhci);
1011 if (!td)
1012 goto nomem;
1013 *plink = LINK_TO_TD(uhci, td);
1015 uhci_add_td_to_urbp(td, urbp);
1016 uhci_fill_td(uhci, td, status,
1017 destination | uhci_explen(0) |
1018 (toggle << TD_TOKEN_TOGGLE_SHIFT),
1019 data);
1020 plink = &td->link;
1022 toggle ^= 1;
1025 /* Set the interrupt-on-completion flag on the last packet.
1026 * A more-or-less typical 4 KB URB (= size of one memory page)
1027 * will require about 3 ms to transfer; that's a little on the
1028 * fast side but not enough to justify delaying an interrupt
1029 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
1030 * flag setting. */
1031 td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
1034 * Build the new dummy TD and activate the old one
1036 td = uhci_alloc_td(uhci);
1037 if (!td)
1038 goto nomem;
1039 *plink = LINK_TO_TD(uhci, td);
1041 uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
1042 wmb();
1043 qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
1044 qh->dummy_td = td;
1046 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1047 usb_pipeout(urb->pipe), toggle);
1048 return 0;
1050 nomem:
1051 /* Remove the dummy TD from the td_list so it doesn't get freed */
1052 uhci_remove_td_from_urbp(qh->dummy_td);
1053 return -ENOMEM;
1056 static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
1057 struct uhci_qh *qh)
1059 int ret;
1061 /* Can't have low-speed bulk transfers */
1062 if (urb->dev->speed == USB_SPEED_LOW)
1063 return -EINVAL;
1065 if (qh->state != QH_STATE_ACTIVE)
1066 qh->skel = SKEL_BULK;
1067 ret = uhci_submit_common(uhci, urb, qh);
1068 if (ret == 0)
1069 uhci_add_fsbr(uhci, urb);
1070 return ret;
1073 static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
1074 struct uhci_qh *qh)
1076 int ret;
1078 /* USB 1.1 interrupt transfers only involve one packet per interval.
1079 * Drivers can submit URBs of any length, but longer ones will need
1080 * multiple intervals to complete.
1083 if (!qh->bandwidth_reserved) {
1084 int exponent;
1086 /* Figure out which power-of-two queue to use */
1087 for (exponent = 7; exponent >= 0; --exponent) {
1088 if ((1 << exponent) <= urb->interval)
1089 break;
1091 if (exponent < 0)
1092 return -EINVAL;
1094 /* If the slot is full, try a lower period */
1095 do {
1096 qh->period = 1 << exponent;
1097 qh->skel = SKEL_INDEX(exponent);
1099 /* For now, interrupt phase is fixed by the layout
1100 * of the QH lists.
1102 qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
1103 ret = uhci_check_bandwidth(uhci, qh);
1104 } while (ret != 0 && --exponent >= 0);
1105 if (ret)
1106 return ret;
1107 } else if (qh->period > urb->interval)
1108 return -EINVAL; /* Can't decrease the period */
1110 ret = uhci_submit_common(uhci, urb, qh);
1111 if (ret == 0) {
1112 urb->interval = qh->period;
1113 if (!qh->bandwidth_reserved)
1114 uhci_reserve_bandwidth(uhci, qh);
1116 return ret;
1120 * Fix up the data structures following a short transfer
1122 static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
1123 struct uhci_qh *qh, struct urb_priv *urbp)
1125 struct uhci_td *td;
1126 struct list_head *tmp;
1127 int ret;
1129 td = list_entry(urbp->td_list.prev, struct uhci_td, list);
1130 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1132 /* When a control transfer is short, we have to restart
1133 * the queue at the status stage transaction, which is
1134 * the last TD. */
1135 WARN_ON(list_empty(&urbp->td_list));
1136 qh->element = LINK_TO_TD(uhci, td);
1137 tmp = td->list.prev;
1138 ret = -EINPROGRESS;
1140 } else {
1142 /* When a bulk/interrupt transfer is short, we have to
1143 * fix up the toggles of the following URBs on the queue
1144 * before restarting the queue at the next URB. */
1145 qh->initial_toggle =
1146 uhci_toggle(td_token(uhci, qh->post_td)) ^ 1;
1147 uhci_fixup_toggles(uhci, qh, 1);
1149 if (list_empty(&urbp->td_list))
1150 td = qh->post_td;
1151 qh->element = td->link;
1152 tmp = urbp->td_list.prev;
1153 ret = 0;
1156 /* Remove all the TDs we skipped over, from tmp back to the start */
1157 while (tmp != &urbp->td_list) {
1158 td = list_entry(tmp, struct uhci_td, list);
1159 tmp = tmp->prev;
1161 uhci_remove_td_from_urbp(td);
1162 uhci_free_td(uhci, td);
1164 return ret;
1168 * Common result for control, bulk, and interrupt
1170 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1172 struct urb_priv *urbp = urb->hcpriv;
1173 struct uhci_qh *qh = urbp->qh;
1174 struct uhci_td *td, *tmp;
1175 unsigned status;
1176 int ret = 0;
1178 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1179 unsigned int ctrlstat;
1180 int len;
1182 ctrlstat = td_status(uhci, td);
1183 status = uhci_status_bits(ctrlstat);
1184 if (status & TD_CTRL_ACTIVE)
1185 return -EINPROGRESS;
1187 len = uhci_actual_length(ctrlstat);
1188 urb->actual_length += len;
1190 if (status) {
1191 ret = uhci_map_status(status,
1192 uhci_packetout(td_token(uhci, td)));
1193 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1194 /* Some debugging code */
1195 dev_dbg(&urb->dev->dev,
1196 "%s: failed with status %x\n",
1197 __func__, status);
1199 if (debug > 1 && errbuf) {
1200 /* Print the chain for debugging */
1201 uhci_show_qh(uhci, urbp->qh, errbuf,
1202 ERRBUF_LEN - EXTRA_SPACE, 0);
1203 lprintk(errbuf);
1207 /* Did we receive a short packet? */
1208 } else if (len < uhci_expected_length(td_token(uhci, td))) {
1210 /* For control transfers, go to the status TD if
1211 * this isn't already the last data TD */
1212 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1213 if (td->list.next != urbp->td_list.prev)
1214 ret = 1;
1217 /* For bulk and interrupt, this may be an error */
1218 else if (urb->transfer_flags & URB_SHORT_NOT_OK)
1219 ret = -EREMOTEIO;
1221 /* Fixup needed only if this isn't the URB's last TD */
1222 else if (&td->list != urbp->td_list.prev)
1223 ret = 1;
1226 uhci_remove_td_from_urbp(td);
1227 if (qh->post_td)
1228 uhci_free_td(uhci, qh->post_td);
1229 qh->post_td = td;
1231 if (ret != 0)
1232 goto err;
1234 return ret;
1236 err:
1237 if (ret < 0) {
1238 /* Note that the queue has stopped and save
1239 * the next toggle value */
1240 qh->element = UHCI_PTR_TERM(uhci);
1241 qh->is_stopped = 1;
1242 qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
1243 qh->initial_toggle = uhci_toggle(td_token(uhci, td)) ^
1244 (ret == -EREMOTEIO);
1246 } else /* Short packet received */
1247 ret = uhci_fixup_short_transfer(uhci, qh, urbp);
1248 return ret;
1252 * Isochronous transfers
1254 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1255 struct uhci_qh *qh)
1257 struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */
1258 int i;
1259 unsigned frame, next;
1260 unsigned long destination, status;
1261 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1263 /* Values must not be too big (could overflow below) */
1264 if (urb->interval >= UHCI_NUMFRAMES ||
1265 urb->number_of_packets >= UHCI_NUMFRAMES)
1266 return -EFBIG;
1268 uhci_get_current_frame_number(uhci);
1270 /* Check the period and figure out the starting frame number */
1271 if (!qh->bandwidth_reserved) {
1272 qh->period = urb->interval;
1273 qh->phase = -1; /* Find the best phase */
1274 i = uhci_check_bandwidth(uhci, qh);
1275 if (i)
1276 return i;
1278 /* Allow a little time to allocate the TDs */
1279 next = uhci->frame_number + 10;
1280 frame = qh->phase;
1282 /* Round up to the first available slot */
1283 frame += (next - frame + qh->period - 1) & -qh->period;
1285 } else if (qh->period != urb->interval) {
1286 return -EINVAL; /* Can't change the period */
1288 } else {
1289 next = uhci->frame_number + 1;
1291 /* Find the next unused frame */
1292 if (list_empty(&qh->queue)) {
1293 frame = qh->iso_frame;
1294 } else {
1295 struct urb *lurb;
1297 lurb = list_entry(qh->queue.prev,
1298 struct urb_priv, node)->urb;
1299 frame = lurb->start_frame +
1300 lurb->number_of_packets *
1301 lurb->interval;
1304 /* Fell behind? */
1305 if (!uhci_frame_before_eq(next, frame)) {
1307 /* USB_ISO_ASAP: Round up to the first available slot */
1308 if (urb->transfer_flags & URB_ISO_ASAP)
1309 frame += (next - frame + qh->period - 1) &
1310 -qh->period;
1313 * Not ASAP: Use the next slot in the stream,
1314 * no matter what.
1316 else if (!uhci_frame_before_eq(next,
1317 frame + (urb->number_of_packets - 1) *
1318 qh->period))
1319 dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n",
1320 urb, frame,
1321 (urb->number_of_packets - 1) *
1322 qh->period,
1323 next);
1327 /* Make sure we won't have to go too far into the future */
1328 if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
1329 frame + urb->number_of_packets * urb->interval))
1330 return -EFBIG;
1331 urb->start_frame = frame;
1333 status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1334 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1336 for (i = 0; i < urb->number_of_packets; i++) {
1337 td = uhci_alloc_td(uhci);
1338 if (!td)
1339 return -ENOMEM;
1341 uhci_add_td_to_urbp(td, urbp);
1342 uhci_fill_td(uhci, td, status, destination |
1343 uhci_explen(urb->iso_frame_desc[i].length),
1344 urb->transfer_dma +
1345 urb->iso_frame_desc[i].offset);
1348 /* Set the interrupt-on-completion flag on the last packet. */
1349 td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
1351 /* Add the TDs to the frame list */
1352 frame = urb->start_frame;
1353 list_for_each_entry(td, &urbp->td_list, list) {
1354 uhci_insert_td_in_frame_list(uhci, td, frame);
1355 frame += qh->period;
1358 if (list_empty(&qh->queue)) {
1359 qh->iso_packet_desc = &urb->iso_frame_desc[0];
1360 qh->iso_frame = urb->start_frame;
1363 qh->skel = SKEL_ISO;
1364 if (!qh->bandwidth_reserved)
1365 uhci_reserve_bandwidth(uhci, qh);
1366 return 0;
1369 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1371 struct uhci_td *td, *tmp;
1372 struct urb_priv *urbp = urb->hcpriv;
1373 struct uhci_qh *qh = urbp->qh;
1375 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1376 unsigned int ctrlstat;
1377 int status;
1378 int actlength;
1380 if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame))
1381 return -EINPROGRESS;
1383 uhci_remove_tds_from_frame(uhci, qh->iso_frame);
1385 ctrlstat = td_status(uhci, td);
1386 if (ctrlstat & TD_CTRL_ACTIVE) {
1387 status = -EXDEV; /* TD was added too late? */
1388 } else {
1389 status = uhci_map_status(uhci_status_bits(ctrlstat),
1390 usb_pipeout(urb->pipe));
1391 actlength = uhci_actual_length(ctrlstat);
1393 urb->actual_length += actlength;
1394 qh->iso_packet_desc->actual_length = actlength;
1395 qh->iso_packet_desc->status = status;
1397 if (status)
1398 urb->error_count++;
1400 uhci_remove_td_from_urbp(td);
1401 uhci_free_td(uhci, td);
1402 qh->iso_frame += qh->period;
1403 ++qh->iso_packet_desc;
1405 return 0;
1408 static int uhci_urb_enqueue(struct usb_hcd *hcd,
1409 struct urb *urb, gfp_t mem_flags)
1411 int ret;
1412 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1413 unsigned long flags;
1414 struct urb_priv *urbp;
1415 struct uhci_qh *qh;
1417 spin_lock_irqsave(&uhci->lock, flags);
1419 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1420 if (ret)
1421 goto done_not_linked;
1423 ret = -ENOMEM;
1424 urbp = uhci_alloc_urb_priv(uhci, urb);
1425 if (!urbp)
1426 goto done;
1428 if (urb->ep->hcpriv)
1429 qh = urb->ep->hcpriv;
1430 else {
1431 qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
1432 if (!qh)
1433 goto err_no_qh;
1435 urbp->qh = qh;
1437 switch (qh->type) {
1438 case USB_ENDPOINT_XFER_CONTROL:
1439 ret = uhci_submit_control(uhci, urb, qh);
1440 break;
1441 case USB_ENDPOINT_XFER_BULK:
1442 ret = uhci_submit_bulk(uhci, urb, qh);
1443 break;
1444 case USB_ENDPOINT_XFER_INT:
1445 ret = uhci_submit_interrupt(uhci, urb, qh);
1446 break;
1447 case USB_ENDPOINT_XFER_ISOC:
1448 urb->error_count = 0;
1449 ret = uhci_submit_isochronous(uhci, urb, qh);
1450 break;
1452 if (ret != 0)
1453 goto err_submit_failed;
1455 /* Add this URB to the QH */
1456 list_add_tail(&urbp->node, &qh->queue);
1458 /* If the new URB is the first and only one on this QH then either
1459 * the QH is new and idle or else it's unlinked and waiting to
1460 * become idle, so we can activate it right away. But only if the
1461 * queue isn't stopped. */
1462 if (qh->queue.next == &urbp->node && !qh->is_stopped) {
1463 uhci_activate_qh(uhci, qh);
1464 uhci_urbp_wants_fsbr(uhci, urbp);
1466 goto done;
1468 err_submit_failed:
1469 if (qh->state == QH_STATE_IDLE)
1470 uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */
1471 err_no_qh:
1472 uhci_free_urb_priv(uhci, urbp);
1473 done:
1474 if (ret)
1475 usb_hcd_unlink_urb_from_ep(hcd, urb);
1476 done_not_linked:
1477 spin_unlock_irqrestore(&uhci->lock, flags);
1478 return ret;
1481 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1483 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1484 unsigned long flags;
1485 struct uhci_qh *qh;
1486 int rc;
1488 spin_lock_irqsave(&uhci->lock, flags);
1489 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
1490 if (rc)
1491 goto done;
1493 qh = ((struct urb_priv *) urb->hcpriv)->qh;
1495 /* Remove Isochronous TDs from the frame list ASAP */
1496 if (qh->type == USB_ENDPOINT_XFER_ISOC) {
1497 uhci_unlink_isochronous_tds(uhci, urb);
1498 mb();
1500 /* If the URB has already started, update the QH unlink time */
1501 uhci_get_current_frame_number(uhci);
1502 if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
1503 qh->unlink_frame = uhci->frame_number;
1506 uhci_unlink_qh(uhci, qh);
1508 done:
1509 spin_unlock_irqrestore(&uhci->lock, flags);
1510 return rc;
1514 * Finish unlinking an URB and give it back
1516 static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
1517 struct urb *urb, int status)
1518 __releases(uhci->lock)
1519 __acquires(uhci->lock)
1521 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1523 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1525 /* Subtract off the length of the SETUP packet from
1526 * urb->actual_length.
1528 urb->actual_length -= min_t(u32, 8, urb->actual_length);
1531 /* When giving back the first URB in an Isochronous queue,
1532 * reinitialize the QH's iso-related members for the next URB. */
1533 else if (qh->type == USB_ENDPOINT_XFER_ISOC &&
1534 urbp->node.prev == &qh->queue &&
1535 urbp->node.next != &qh->queue) {
1536 struct urb *nurb = list_entry(urbp->node.next,
1537 struct urb_priv, node)->urb;
1539 qh->iso_packet_desc = &nurb->iso_frame_desc[0];
1540 qh->iso_frame = nurb->start_frame;
1543 /* Take the URB off the QH's queue. If the queue is now empty,
1544 * this is a perfect time for a toggle fixup. */
1545 list_del_init(&urbp->node);
1546 if (list_empty(&qh->queue) && qh->needs_fixup) {
1547 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1548 usb_pipeout(urb->pipe), qh->initial_toggle);
1549 qh->needs_fixup = 0;
1552 uhci_free_urb_priv(uhci, urbp);
1553 usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
1555 spin_unlock(&uhci->lock);
1556 usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status);
1557 spin_lock(&uhci->lock);
1559 /* If the queue is now empty, we can unlink the QH and give up its
1560 * reserved bandwidth. */
1561 if (list_empty(&qh->queue)) {
1562 uhci_unlink_qh(uhci, qh);
1563 if (qh->bandwidth_reserved)
1564 uhci_release_bandwidth(uhci, qh);
1569 * Scan the URBs in a QH's queue
1571 #define QH_FINISHED_UNLINKING(qh) \
1572 (qh->state == QH_STATE_UNLINKING && \
1573 uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
1575 static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
1577 struct urb_priv *urbp;
1578 struct urb *urb;
1579 int status;
1581 while (!list_empty(&qh->queue)) {
1582 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1583 urb = urbp->urb;
1585 if (qh->type == USB_ENDPOINT_XFER_ISOC)
1586 status = uhci_result_isochronous(uhci, urb);
1587 else
1588 status = uhci_result_common(uhci, urb);
1589 if (status == -EINPROGRESS)
1590 break;
1592 /* Dequeued but completed URBs can't be given back unless
1593 * the QH is stopped or has finished unlinking. */
1594 if (urb->unlinked) {
1595 if (QH_FINISHED_UNLINKING(qh))
1596 qh->is_stopped = 1;
1597 else if (!qh->is_stopped)
1598 return;
1601 uhci_giveback_urb(uhci, qh, urb, status);
1602 if (status < 0)
1603 break;
1606 /* If the QH is neither stopped nor finished unlinking (normal case),
1607 * our work here is done. */
1608 if (QH_FINISHED_UNLINKING(qh))
1609 qh->is_stopped = 1;
1610 else if (!qh->is_stopped)
1611 return;
1613 /* Otherwise give back each of the dequeued URBs */
1614 restart:
1615 list_for_each_entry(urbp, &qh->queue, node) {
1616 urb = urbp->urb;
1617 if (urb->unlinked) {
1619 /* Fix up the TD links and save the toggles for
1620 * non-Isochronous queues. For Isochronous queues,
1621 * test for too-recent dequeues. */
1622 if (!uhci_cleanup_queue(uhci, qh, urb)) {
1623 qh->is_stopped = 0;
1624 return;
1626 uhci_giveback_urb(uhci, qh, urb, 0);
1627 goto restart;
1630 qh->is_stopped = 0;
1632 /* There are no more dequeued URBs. If there are still URBs on the
1633 * queue, the QH can now be re-activated. */
1634 if (!list_empty(&qh->queue)) {
1635 if (qh->needs_fixup)
1636 uhci_fixup_toggles(uhci, qh, 0);
1638 /* If the first URB on the queue wants FSBR but its time
1639 * limit has expired, set the next TD to interrupt on
1640 * completion before reactivating the QH. */
1641 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1642 if (urbp->fsbr && qh->wait_expired) {
1643 struct uhci_td *td = list_entry(urbp->td_list.next,
1644 struct uhci_td, list);
1646 td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
1649 uhci_activate_qh(uhci, qh);
1652 /* The queue is empty. The QH can become idle if it is fully
1653 * unlinked. */
1654 else if (QH_FINISHED_UNLINKING(qh))
1655 uhci_make_qh_idle(uhci, qh);
1659 * Check for queues that have made some forward progress.
1660 * Returns 0 if the queue is not Isochronous, is ACTIVE, and
1661 * has not advanced since last examined; 1 otherwise.
1663 * Early Intel controllers have a bug which causes qh->element sometimes
1664 * not to advance when a TD completes successfully. The queue remains
1665 * stuck on the inactive completed TD. We detect such cases and advance
1666 * the element pointer by hand.
1668 static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
1670 struct urb_priv *urbp = NULL;
1671 struct uhci_td *td;
1672 int ret = 1;
1673 unsigned status;
1675 if (qh->type == USB_ENDPOINT_XFER_ISOC)
1676 goto done;
1678 /* Treat an UNLINKING queue as though it hasn't advanced.
1679 * This is okay because reactivation will treat it as though
1680 * it has advanced, and if it is going to become IDLE then
1681 * this doesn't matter anyway. Furthermore it's possible
1682 * for an UNLINKING queue not to have any URBs at all, or
1683 * for its first URB not to have any TDs (if it was dequeued
1684 * just as it completed). So it's not easy in any case to
1685 * test whether such queues have advanced. */
1686 if (qh->state != QH_STATE_ACTIVE) {
1687 urbp = NULL;
1688 status = 0;
1690 } else {
1691 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1692 td = list_entry(urbp->td_list.next, struct uhci_td, list);
1693 status = td_status(uhci, td);
1694 if (!(status & TD_CTRL_ACTIVE)) {
1696 /* We're okay, the queue has advanced */
1697 qh->wait_expired = 0;
1698 qh->advance_jiffies = jiffies;
1699 goto done;
1701 ret = uhci->is_stopped;
1704 /* The queue hasn't advanced; check for timeout */
1705 if (qh->wait_expired)
1706 goto done;
1708 if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
1710 /* Detect the Intel bug and work around it */
1711 if (qh->post_td && qh_element(qh) ==
1712 LINK_TO_TD(uhci, qh->post_td)) {
1713 qh->element = qh->post_td->link;
1714 qh->advance_jiffies = jiffies;
1715 ret = 1;
1716 goto done;
1719 qh->wait_expired = 1;
1721 /* If the current URB wants FSBR, unlink it temporarily
1722 * so that we can safely set the next TD to interrupt on
1723 * completion. That way we'll know as soon as the queue
1724 * starts moving again. */
1725 if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC))
1726 uhci_unlink_qh(uhci, qh);
1728 } else {
1729 /* Unmoving but not-yet-expired queues keep FSBR alive */
1730 if (urbp)
1731 uhci_urbp_wants_fsbr(uhci, urbp);
1734 done:
1735 return ret;
1739 * Process events in the schedule, but only in one thread at a time
1741 static void uhci_scan_schedule(struct uhci_hcd *uhci)
1743 int i;
1744 struct uhci_qh *qh;
1746 /* Don't allow re-entrant calls */
1747 if (uhci->scan_in_progress) {
1748 uhci->need_rescan = 1;
1749 return;
1751 uhci->scan_in_progress = 1;
1752 rescan:
1753 uhci->need_rescan = 0;
1754 uhci->fsbr_is_wanted = 0;
1756 uhci_clear_next_interrupt(uhci);
1757 uhci_get_current_frame_number(uhci);
1758 uhci->cur_iso_frame = uhci->frame_number;
1760 /* Go through all the QH queues and process the URBs in each one */
1761 for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
1762 uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
1763 struct uhci_qh, node);
1764 while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
1765 uhci->next_qh = list_entry(qh->node.next,
1766 struct uhci_qh, node);
1768 if (uhci_advance_check(uhci, qh)) {
1769 uhci_scan_qh(uhci, qh);
1770 if (qh->state == QH_STATE_ACTIVE) {
1771 uhci_urbp_wants_fsbr(uhci,
1772 list_entry(qh->queue.next, struct urb_priv, node));
1778 uhci->last_iso_frame = uhci->cur_iso_frame;
1779 if (uhci->need_rescan)
1780 goto rescan;
1781 uhci->scan_in_progress = 0;
1783 if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted &&
1784 !uhci->fsbr_expiring) {
1785 uhci->fsbr_expiring = 1;
1786 mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY);
1789 if (list_empty(&uhci->skel_unlink_qh->node))
1790 uhci_clear_next_interrupt(uhci);
1791 else
1792 uhci_set_next_interrupt(uhci);