2 * Copyright (C) 2001-2004 by David Brownell
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 /* this file is part of ehci-hcd.c */
21 /*-------------------------------------------------------------------------*/
24 * EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
26 * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
27 * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
28 * buffers needed for the larger number). We use one QH per endpoint, queue
29 * multiple urbs (all three types) per endpoint. URBs may need several qtds.
31 * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with
32 * interrupts) needs careful scheduling. Performance improvements can be
33 * an ongoing challenge. That's in "ehci-sched.c".
35 * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
36 * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
37 * (b) special fields in qh entries or (c) split iso entries. TTs will
38 * buffer low/full speed data so the host collects it at high speed.
41 /*-------------------------------------------------------------------------*/
43 /* fill a qtd, returning how much of the buffer we were able to queue up */
46 qtd_fill(struct ehci_hcd
*ehci
, struct ehci_qtd
*qtd
, dma_addr_t buf
,
47 size_t len
, int token
, int maxpacket
)
52 /* one buffer entry per 4K ... first might be short or unaligned */
53 qtd
->hw_buf
[0] = cpu_to_hc32(ehci
, (u32
)addr
);
54 qtd
->hw_buf_hi
[0] = cpu_to_hc32(ehci
, (u32
)(addr
>> 32));
55 count
= 0x1000 - (buf
& 0x0fff); /* rest of that page */
56 if (likely (len
< count
)) /* ... iff needed */
62 /* per-qtd limit: from 16K to 20K (best alignment) */
63 for (i
= 1; count
< len
&& i
< 5; i
++) {
65 qtd
->hw_buf
[i
] = cpu_to_hc32(ehci
, (u32
)addr
);
66 qtd
->hw_buf_hi
[i
] = cpu_to_hc32(ehci
,
69 if ((count
+ 0x1000) < len
)
75 /* short packets may only terminate transfers */
77 count
-= (count
% maxpacket
);
79 qtd
->hw_token
= cpu_to_hc32(ehci
, (count
<< 16) | token
);
85 /*-------------------------------------------------------------------------*/
88 qh_update (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
, struct ehci_qtd
*qtd
)
90 struct ehci_qh_hw
*hw
= qh
->hw
;
92 /* writes to an active overlay are unsafe */
93 WARN_ON(qh
->qh_state
!= QH_STATE_IDLE
);
95 hw
->hw_qtd_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
96 hw
->hw_alt_next
= EHCI_LIST_END(ehci
);
98 /* Except for control endpoints, we make hardware maintain data
99 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
100 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
103 if (!(hw
->hw_info1
& cpu_to_hc32(ehci
, QH_TOGGLE_CTL
))) {
104 unsigned is_out
, epnum
;
107 epnum
= (hc32_to_cpup(ehci
, &hw
->hw_info1
) >> 8) & 0x0f;
108 if (unlikely(!usb_gettoggle(qh
->ps
.udev
, epnum
, is_out
))) {
109 hw
->hw_token
&= ~cpu_to_hc32(ehci
, QTD_TOGGLE
);
110 usb_settoggle(qh
->ps
.udev
, epnum
, is_out
, 1);
114 hw
->hw_token
&= cpu_to_hc32(ehci
, QTD_TOGGLE
| QTD_STS_PING
);
117 /* if it weren't for a common silicon quirk (writing the dummy into the qh
118 * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
119 * recovery (including urb dequeue) would need software changes to a QH...
122 qh_refresh (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
124 struct ehci_qtd
*qtd
;
126 qtd
= list_entry(qh
->qtd_list
.next
, struct ehci_qtd
, qtd_list
);
129 * first qtd may already be partially processed.
130 * If we come here during unlink, the QH overlay region
131 * might have reference to the just unlinked qtd. The
132 * qtd is updated in qh_completions(). Update the QH
135 if (qh
->hw
->hw_token
& ACTIVE_BIT(ehci
)) {
136 qh
->hw
->hw_qtd_next
= qtd
->hw_next
;
137 if (qh
->should_be_inactive
)
138 ehci_warn(ehci
, "qh %p should be inactive!\n", qh
);
140 qh_update(ehci
, qh
, qtd
);
142 qh
->should_be_inactive
= 0;
145 /*-------------------------------------------------------------------------*/
147 static void qh_link_async(struct ehci_hcd
*ehci
, struct ehci_qh
*qh
);
149 static void ehci_clear_tt_buffer_complete(struct usb_hcd
*hcd
,
150 struct usb_host_endpoint
*ep
)
152 struct ehci_hcd
*ehci
= hcd_to_ehci(hcd
);
153 struct ehci_qh
*qh
= ep
->hcpriv
;
156 spin_lock_irqsave(&ehci
->lock
, flags
);
158 if (qh
->qh_state
== QH_STATE_IDLE
&& !list_empty(&qh
->qtd_list
)
159 && ehci
->rh_state
== EHCI_RH_RUNNING
)
160 qh_link_async(ehci
, qh
);
161 spin_unlock_irqrestore(&ehci
->lock
, flags
);
164 static void ehci_clear_tt_buffer(struct ehci_hcd
*ehci
, struct ehci_qh
*qh
,
165 struct urb
*urb
, u32 token
)
168 /* If an async split transaction gets an error or is unlinked,
169 * the TT buffer may be left in an indeterminate state. We
170 * have to clear the TT buffer.
172 * Note: this routine is never called for Isochronous transfers.
174 if (urb
->dev
->tt
&& !usb_pipeint(urb
->pipe
) && !qh
->clearing_tt
) {
175 #ifdef CONFIG_DYNAMIC_DEBUG
176 struct usb_device
*tt
= urb
->dev
->tt
->hub
;
178 "clear tt buffer port %d, a%d ep%d t%08x\n",
179 urb
->dev
->ttport
, urb
->dev
->devnum
,
180 usb_pipeendpoint(urb
->pipe
), token
);
181 #endif /* CONFIG_DYNAMIC_DEBUG */
182 if (!ehci_is_TDI(ehci
)
183 || urb
->dev
->tt
->hub
!=
184 ehci_to_hcd(ehci
)->self
.root_hub
) {
185 if (usb_hub_clear_tt_buffer(urb
) == 0)
189 /* REVISIT ARC-derived cores don't clear the root
190 * hub TT buffer in this way...
196 static int qtd_copy_status (
197 struct ehci_hcd
*ehci
,
203 int status
= -EINPROGRESS
;
205 /* count IN/OUT bytes, not SETUP (even short packets) */
206 if (likely (QTD_PID (token
) != 2))
207 urb
->actual_length
+= length
- QTD_LENGTH (token
);
209 /* don't modify error codes */
210 if (unlikely(urb
->unlinked
))
213 /* force cleanup after short read; not always an error */
214 if (unlikely (IS_SHORT_READ (token
)))
217 /* serious "can't proceed" faults reported by the hardware */
218 if (token
& QTD_STS_HALT
) {
219 if (token
& QTD_STS_BABBLE
) {
220 /* FIXME "must" disable babbling device's port too */
222 /* CERR nonzero + halt --> stall */
223 } else if (QTD_CERR(token
)) {
226 /* In theory, more than one of the following bits can be set
227 * since they are sticky and the transaction is retried.
228 * Which to test first is rather arbitrary.
230 } else if (token
& QTD_STS_MMF
) {
231 /* fs/ls interrupt xfer missed the complete-split */
233 } else if (token
& QTD_STS_DBE
) {
234 status
= (QTD_PID (token
) == 1) /* IN ? */
235 ? -ENOSR
/* hc couldn't read data */
236 : -ECOMM
; /* hc couldn't write data */
237 } else if (token
& QTD_STS_XACT
) {
238 /* timeout, bad CRC, wrong PID, etc */
239 ehci_dbg(ehci
, "devpath %s ep%d%s 3strikes\n",
241 usb_pipeendpoint(urb
->pipe
),
242 usb_pipein(urb
->pipe
) ? "in" : "out");
244 } else { /* unknown */
253 ehci_urb_done(struct ehci_hcd
*ehci
, struct urb
*urb
, int status
)
255 if (usb_pipetype(urb
->pipe
) == PIPE_INTERRUPT
) {
256 /* ... update hc-wide periodic stats */
257 ehci_to_hcd(ehci
)->self
.bandwidth_int_reqs
--;
260 if (unlikely(urb
->unlinked
)) {
261 COUNT(ehci
->stats
.unlink
);
263 /* report non-error and short read status as zero */
264 if (status
== -EINPROGRESS
|| status
== -EREMOTEIO
)
266 COUNT(ehci
->stats
.complete
);
269 #ifdef EHCI_URB_TRACE
271 "%s %s urb %p ep%d%s status %d len %d/%d\n",
272 __func__
, urb
->dev
->devpath
, urb
,
273 usb_pipeendpoint (urb
->pipe
),
274 usb_pipein (urb
->pipe
) ? "in" : "out",
276 urb
->actual_length
, urb
->transfer_buffer_length
);
279 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci
), urb
);
280 usb_hcd_giveback_urb(ehci_to_hcd(ehci
), urb
, status
);
283 static int qh_schedule (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
);
286 * Process and free completed qtds for a qh, returning URBs to drivers.
287 * Chases up to qh->hw_current. Returns nonzero if the caller should
291 qh_completions (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
293 struct ehci_qtd
*last
, *end
= qh
->dummy
;
294 struct list_head
*entry
, *tmp
;
298 struct ehci_qh_hw
*hw
= qh
->hw
;
300 /* completions (or tasks on other cpus) must never clobber HALT
301 * till we've gone through and cleaned everything up, even when
302 * they add urbs to this qh's queue or mark them for unlinking.
304 * NOTE: unlinking expects to be done in queue order.
306 * It's a bug for qh->qh_state to be anything other than
307 * QH_STATE_IDLE, unless our caller is scan_async() or
310 state
= qh
->qh_state
;
311 qh
->qh_state
= QH_STATE_COMPLETING
;
312 stopped
= (state
== QH_STATE_IDLE
);
316 last_status
= -EINPROGRESS
;
317 qh
->dequeue_during_giveback
= 0;
319 /* remove de-activated QTDs from front of queue.
320 * after faults (including short reads), cleanup this urb
321 * then let the queue advance.
322 * if queue is stopped, handles unlinks.
324 list_for_each_safe (entry
, tmp
, &qh
->qtd_list
) {
325 struct ehci_qtd
*qtd
;
329 qtd
= list_entry (entry
, struct ehci_qtd
, qtd_list
);
332 /* clean up any state from previous QTD ...*/
334 if (likely (last
->urb
!= urb
)) {
335 ehci_urb_done(ehci
, last
->urb
, last_status
);
336 last_status
= -EINPROGRESS
;
338 ehci_qtd_free (ehci
, last
);
342 /* ignore urbs submitted during completions we reported */
346 /* hardware copies qtd out of qh overlay */
348 token
= hc32_to_cpu(ehci
, qtd
->hw_token
);
350 /* always clean up qtds the hc de-activated */
352 if ((token
& QTD_STS_ACTIVE
) == 0) {
354 /* Report Data Buffer Error: non-fatal but useful */
355 if (token
& QTD_STS_DBE
)
357 "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
359 usb_endpoint_num(&urb
->ep
->desc
),
360 usb_endpoint_dir_in(&urb
->ep
->desc
) ? "in" : "out",
361 urb
->transfer_buffer_length
,
365 /* on STALL, error, and short reads this urb must
366 * complete and all its qtds must be recycled.
368 if ((token
& QTD_STS_HALT
) != 0) {
370 /* retry transaction errors until we
371 * reach the software xacterr limit
373 if ((token
& QTD_STS_XACT
) &&
374 QTD_CERR(token
) == 0 &&
375 ++qh
->xacterrs
< QH_XACTERR_MAX
&&
378 "detected XactErr len %zu/%zu retry %d\n",
379 qtd
->length
- QTD_LENGTH(token
), qtd
->length
, qh
->xacterrs
);
381 /* reset the token in the qtd and the
382 * qh overlay (which still contains
383 * the qtd) so that we pick up from
386 token
&= ~QTD_STS_HALT
;
387 token
|= QTD_STS_ACTIVE
|
388 (EHCI_TUNE_CERR
<< 10);
389 qtd
->hw_token
= cpu_to_hc32(ehci
,
392 hw
->hw_token
= cpu_to_hc32(ehci
,
397 qh
->unlink_reason
|= QH_UNLINK_HALTED
;
399 /* magic dummy for some short reads; qh won't advance.
400 * that silicon quirk can kick in with this dummy too.
402 * other short reads won't stop the queue, including
403 * control transfers (status stage handles that) or
404 * most other single-qtd reads ... the queue stops if
405 * URB_SHORT_NOT_OK was set so the driver submitting
406 * the urbs could clean it up.
408 } else if (IS_SHORT_READ (token
)
409 && !(qtd
->hw_alt_next
410 & EHCI_LIST_END(ehci
))) {
412 qh
->unlink_reason
|= QH_UNLINK_SHORT_READ
;
415 /* stop scanning when we reach qtds the hc is using */
416 } else if (likely (!stopped
417 && ehci
->rh_state
>= EHCI_RH_RUNNING
)) {
420 /* scan the whole queue for unlinks whenever it stops */
424 /* cancel everything if we halt, suspend, etc */
425 if (ehci
->rh_state
< EHCI_RH_RUNNING
) {
426 last_status
= -ESHUTDOWN
;
427 qh
->unlink_reason
|= QH_UNLINK_SHUTDOWN
;
430 /* this qtd is active; skip it unless a previous qtd
431 * for its urb faulted, or its urb was canceled.
433 else if (last_status
== -EINPROGRESS
&& !urb
->unlinked
)
437 * If this was the active qtd when the qh was unlinked
438 * and the overlay's token is active, then the overlay
439 * hasn't been written back to the qtd yet so use its
440 * token instead of the qtd's. After the qtd is
441 * processed and removed, the overlay won't be valid
444 if (state
== QH_STATE_IDLE
&&
445 qh
->qtd_list
.next
== &qtd
->qtd_list
&&
446 (hw
->hw_token
& ACTIVE_BIT(ehci
))) {
447 token
= hc32_to_cpu(ehci
, hw
->hw_token
);
448 hw
->hw_token
&= ~ACTIVE_BIT(ehci
);
449 qh
->should_be_inactive
= 1;
451 /* An unlink may leave an incomplete
452 * async transaction in the TT buffer.
453 * We have to clear it.
455 ehci_clear_tt_buffer(ehci
, qh
, urb
, token
);
459 /* unless we already know the urb's status, collect qtd status
460 * and update count of bytes transferred. in common short read
461 * cases with only one data qtd (including control transfers),
462 * queue processing won't halt. but with two or more qtds (for
463 * example, with a 32 KB transfer), when the first qtd gets a
464 * short read the second must be removed by hand.
466 if (last_status
== -EINPROGRESS
) {
467 last_status
= qtd_copy_status(ehci
, urb
,
469 if (last_status
== -EREMOTEIO
471 & EHCI_LIST_END(ehci
)))
472 last_status
= -EINPROGRESS
;
474 /* As part of low/full-speed endpoint-halt processing
475 * we must clear the TT buffer (11.17.5).
477 if (unlikely(last_status
!= -EINPROGRESS
&&
478 last_status
!= -EREMOTEIO
)) {
479 /* The TT's in some hubs malfunction when they
480 * receive this request following a STALL (they
481 * stop sending isochronous packets). Since a
482 * STALL can't leave the TT buffer in a busy
483 * state (if you believe Figures 11-48 - 11-51
484 * in the USB 2.0 spec), we won't clear the TT
485 * buffer in this case. Strictly speaking this
486 * is a violation of the spec.
488 if (last_status
!= -EPIPE
)
489 ehci_clear_tt_buffer(ehci
, qh
, urb
,
494 /* if we're removing something not at the queue head,
495 * patch the hardware queue pointer.
497 if (stopped
&& qtd
->qtd_list
.prev
!= &qh
->qtd_list
) {
498 last
= list_entry (qtd
->qtd_list
.prev
,
499 struct ehci_qtd
, qtd_list
);
500 last
->hw_next
= qtd
->hw_next
;
503 /* remove qtd; it's recycled after possible urb completion */
504 list_del (&qtd
->qtd_list
);
507 /* reinit the xacterr counter for the next qtd */
511 /* last urb's completion might still need calling */
512 if (likely (last
!= NULL
)) {
513 ehci_urb_done(ehci
, last
->urb
, last_status
);
514 ehci_qtd_free (ehci
, last
);
517 /* Do we need to rescan for URBs dequeued during a giveback? */
518 if (unlikely(qh
->dequeue_during_giveback
)) {
519 /* If the QH is already unlinked, do the rescan now. */
520 if (state
== QH_STATE_IDLE
)
523 /* Otherwise the caller must unlink the QH. */
526 /* restore original state; caller must unlink or relink */
527 qh
->qh_state
= state
;
529 /* be sure the hardware's done with the qh before refreshing
530 * it after fault cleanup, or recovering from silicon wrongly
531 * overlaying the dummy qtd (which reduces DMA chatter).
533 * We won't refresh a QH that's linked (after the HC
534 * stopped the queue). That avoids a race:
535 * - HC reads first part of QH;
536 * - CPU updates that first part and the token;
537 * - HC reads rest of that QH, including token
538 * Result: HC gets an inconsistent image, and then
539 * DMAs to/from the wrong memory (corrupting it).
541 * That should be rare for interrupt transfers,
542 * except maybe high bandwidth ...
544 if (stopped
!= 0 || hw
->hw_qtd_next
== EHCI_LIST_END(ehci
))
545 qh
->unlink_reason
|= QH_UNLINK_DUMMY_OVERLAY
;
547 /* Let the caller know if the QH needs to be unlinked. */
548 return qh
->unlink_reason
;
551 /*-------------------------------------------------------------------------*/
554 * reverse of qh_urb_transaction: free a list of TDs.
555 * used for cleanup after errors, before HC sees an URB's TDs.
557 static void qtd_list_free (
558 struct ehci_hcd
*ehci
,
560 struct list_head
*qtd_list
562 struct list_head
*entry
, *temp
;
564 list_for_each_safe (entry
, temp
, qtd_list
) {
565 struct ehci_qtd
*qtd
;
567 qtd
= list_entry (entry
, struct ehci_qtd
, qtd_list
);
568 list_del (&qtd
->qtd_list
);
569 ehci_qtd_free (ehci
, qtd
);
574 * create a list of filled qtds for this URB; won't link into qh.
576 static struct list_head
*
578 struct ehci_hcd
*ehci
,
580 struct list_head
*head
,
583 struct ehci_qtd
*qtd
, *qtd_prev
;
585 int len
, this_sg_len
, maxpacket
;
589 struct scatterlist
*sg
;
592 * URBs map to sequences of QTDs: one logical transaction
594 qtd
= ehci_qtd_alloc (ehci
, flags
);
597 list_add_tail (&qtd
->qtd_list
, head
);
600 token
= QTD_STS_ACTIVE
;
601 token
|= (EHCI_TUNE_CERR
<< 10);
602 /* for split transactions, SplitXState initialized to zero */
604 len
= urb
->transfer_buffer_length
;
605 is_input
= usb_pipein (urb
->pipe
);
606 if (usb_pipecontrol (urb
->pipe
)) {
608 qtd_fill(ehci
, qtd
, urb
->setup_dma
,
609 sizeof (struct usb_ctrlrequest
),
610 token
| (2 /* "setup" */ << 8), 8);
612 /* ... and always at least one more pid */
615 qtd
= ehci_qtd_alloc (ehci
, flags
);
619 qtd_prev
->hw_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
620 list_add_tail (&qtd
->qtd_list
, head
);
622 /* for zero length DATA stages, STATUS is always IN */
624 token
|= (1 /* "in" */ << 8);
628 * data transfer stage: buffer setup
630 i
= urb
->num_mapped_sgs
;
631 if (len
> 0 && i
> 0) {
633 buf
= sg_dma_address(sg
);
635 /* urb->transfer_buffer_length may be smaller than the
636 * size of the scatterlist (or vice versa)
638 this_sg_len
= min_t(int, sg_dma_len(sg
), len
);
641 buf
= urb
->transfer_dma
;
646 token
|= (1 /* "in" */ << 8);
647 /* else it's already initted to "out" pid (0 << 8) */
649 maxpacket
= usb_maxpacket(urb
->dev
, urb
->pipe
, !is_input
);
652 * buffer gets wrapped in one or more qtds;
653 * last one may be "short" (including zero len)
654 * and may serve as a control status ack
659 this_qtd_len
= qtd_fill(ehci
, qtd
, buf
, this_sg_len
, token
,
661 this_sg_len
-= this_qtd_len
;
666 * short reads advance to a "magic" dummy instead of the next
667 * qtd ... that forces the queue to stop, for manual cleanup.
668 * (this will usually be overridden later.)
671 qtd
->hw_alt_next
= ehci
->async
->hw
->hw_alt_next
;
673 /* qh makes control packets use qtd toggle; maybe switch it */
674 if ((maxpacket
& (this_qtd_len
+ (maxpacket
- 1))) == 0)
677 if (likely(this_sg_len
<= 0)) {
678 if (--i
<= 0 || len
<= 0)
681 buf
= sg_dma_address(sg
);
682 this_sg_len
= min_t(int, sg_dma_len(sg
), len
);
686 qtd
= ehci_qtd_alloc (ehci
, flags
);
690 qtd_prev
->hw_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
691 list_add_tail (&qtd
->qtd_list
, head
);
695 * unless the caller requires manual cleanup after short reads,
696 * have the alt_next mechanism keep the queue running after the
697 * last data qtd (the only one, for control and most other cases).
699 if (likely ((urb
->transfer_flags
& URB_SHORT_NOT_OK
) == 0
700 || usb_pipecontrol (urb
->pipe
)))
701 qtd
->hw_alt_next
= EHCI_LIST_END(ehci
);
704 * control requests may need a terminating data "status" ack;
705 * other OUT ones may need a terminating short packet
708 if (likely (urb
->transfer_buffer_length
!= 0)) {
711 if (usb_pipecontrol (urb
->pipe
)) {
713 token
^= 0x0100; /* "in" <--> "out" */
714 token
|= QTD_TOGGLE
; /* force DATA1 */
715 } else if (usb_pipeout(urb
->pipe
)
716 && (urb
->transfer_flags
& URB_ZERO_PACKET
)
717 && !(urb
->transfer_buffer_length
% maxpacket
)) {
722 qtd
= ehci_qtd_alloc (ehci
, flags
);
726 qtd_prev
->hw_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
727 list_add_tail (&qtd
->qtd_list
, head
);
729 /* never any data in such packets */
730 qtd_fill(ehci
, qtd
, 0, 0, token
, 0);
734 /* by default, enable interrupt on urb completion */
735 if (likely (!(urb
->transfer_flags
& URB_NO_INTERRUPT
)))
736 qtd
->hw_token
|= cpu_to_hc32(ehci
, QTD_IOC
);
740 qtd_list_free (ehci
, urb
, head
);
744 /*-------------------------------------------------------------------------*/
746 // Would be best to create all qh's from config descriptors,
747 // when each interface/altsetting is established. Unlink
748 // any previous qh and cancel its urbs first; endpoints are
749 // implicitly reset then (data toggle too).
750 // That'd mean updating how usbcore talks to HCDs. (2.7?)
754 * Each QH holds a qtd list; a QH is used for everything except iso.
756 * For interrupt urbs, the scheduler must set the microframe scheduling
757 * mask(s) each time the QH gets scheduled. For highspeed, that's
758 * just one microframe in the s-mask. For split interrupt transactions
759 * there are additional complications: c-mask, maybe FSTNs.
761 static struct ehci_qh
*
763 struct ehci_hcd
*ehci
,
767 struct ehci_qh
*qh
= ehci_qh_alloc (ehci
, flags
);
768 struct usb_host_endpoint
*ep
;
769 u32 info1
= 0, info2
= 0;
773 struct usb_tt
*tt
= urb
->dev
->tt
;
774 struct ehci_qh_hw
*hw
;
780 * init endpoint/device data for this QH
782 info1
|= usb_pipeendpoint (urb
->pipe
) << 8;
783 info1
|= usb_pipedevice (urb
->pipe
) << 0;
785 is_input
= usb_pipein (urb
->pipe
);
786 type
= usb_pipetype (urb
->pipe
);
787 ep
= usb_pipe_endpoint (urb
->dev
, urb
->pipe
);
788 maxp
= usb_endpoint_maxp (&ep
->desc
);
789 mult
= usb_endpoint_maxp_mult (&ep
->desc
);
791 /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
792 * acts like up to 3KB, but is built from smaller packets.
795 ehci_dbg(ehci
, "bogus qh maxpacket %d\n", maxp
);
799 /* Compute interrupt scheduling parameters just once, and save.
800 * - allowing for high bandwidth, how many nsec/uframe are used?
801 * - split transactions need a second CSPLIT uframe; same question
802 * - splits also need a schedule gap (for full/low speed I/O)
803 * - qh has a polling interval
805 * For control/bulk requests, the HC or TT handles these.
807 if (type
== PIPE_INTERRUPT
) {
810 qh
->ps
.usecs
= NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH
,
811 is_input
, 0, mult
* maxp
));
812 qh
->ps
.phase
= NO_FRAME
;
814 if (urb
->dev
->speed
== USB_SPEED_HIGH
) {
818 if (urb
->interval
> 1 && urb
->interval
< 8) {
819 /* NOTE interval 2 or 4 uframes could work.
820 * But interval 1 scheduling is simpler, and
821 * includes high bandwidth.
824 } else if (urb
->interval
> ehci
->periodic_size
<< 3) {
825 urb
->interval
= ehci
->periodic_size
<< 3;
827 qh
->ps
.period
= urb
->interval
>> 3;
829 /* period for bandwidth allocation */
830 tmp
= min_t(unsigned, EHCI_BANDWIDTH_SIZE
,
831 1 << (urb
->ep
->desc
.bInterval
- 1));
833 /* Allow urb->interval to override */
834 qh
->ps
.bw_uperiod
= min_t(unsigned, tmp
, urb
->interval
);
835 qh
->ps
.bw_period
= qh
->ps
.bw_uperiod
>> 3;
839 /* gap is f(FS/LS transfer times) */
840 qh
->gap_uf
= 1 + usb_calc_bus_time (urb
->dev
->speed
,
841 is_input
, 0, maxp
) / (125 * 1000);
843 /* FIXME this just approximates SPLIT/CSPLIT times */
844 if (is_input
) { // SPLIT, gap, CSPLIT+DATA
845 qh
->ps
.c_usecs
= qh
->ps
.usecs
+ HS_USECS(0);
846 qh
->ps
.usecs
= HS_USECS(1);
847 } else { // SPLIT+DATA, gap, CSPLIT
848 qh
->ps
.usecs
+= HS_USECS(1);
849 qh
->ps
.c_usecs
= HS_USECS(0);
852 think_time
= tt
? tt
->think_time
: 0;
853 qh
->ps
.tt_usecs
= NS_TO_US(think_time
+
854 usb_calc_bus_time (urb
->dev
->speed
,
856 if (urb
->interval
> ehci
->periodic_size
)
857 urb
->interval
= ehci
->periodic_size
;
858 qh
->ps
.period
= urb
->interval
;
860 /* period for bandwidth allocation */
861 tmp
= min_t(unsigned, EHCI_BANDWIDTH_FRAMES
,
862 urb
->ep
->desc
.bInterval
);
863 tmp
= rounddown_pow_of_two(tmp
);
865 /* Allow urb->interval to override */
866 qh
->ps
.bw_period
= min_t(unsigned, tmp
, urb
->interval
);
867 qh
->ps
.bw_uperiod
= qh
->ps
.bw_period
<< 3;
871 /* support for tt scheduling, and access to toggles */
872 qh
->ps
.udev
= urb
->dev
;
876 switch (urb
->dev
->speed
) {
878 info1
|= QH_LOW_SPEED
;
882 /* EPS 0 means "full" */
883 if (type
!= PIPE_INTERRUPT
)
884 info1
|= (EHCI_TUNE_RL_TT
<< 28);
885 if (type
== PIPE_CONTROL
) {
886 info1
|= QH_CONTROL_EP
; /* for TT */
887 info1
|= QH_TOGGLE_CTL
; /* toggle from qtd */
891 info2
|= (EHCI_TUNE_MULT_TT
<< 30);
893 /* Some Freescale processors have an erratum in which the
894 * port number in the queue head was 0..N-1 instead of 1..N.
896 if (ehci_has_fsl_portno_bug(ehci
))
897 info2
|= (urb
->dev
->ttport
-1) << 23;
899 info2
|= urb
->dev
->ttport
<< 23;
901 /* set the address of the TT; for TDI's integrated
902 * root hub tt, leave it zeroed.
904 if (tt
&& tt
->hub
!= ehci_to_hcd(ehci
)->self
.root_hub
)
905 info2
|= tt
->hub
->devnum
<< 16;
907 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
911 case USB_SPEED_HIGH
: /* no TT involved */
912 info1
|= QH_HIGH_SPEED
;
913 if (type
== PIPE_CONTROL
) {
914 info1
|= (EHCI_TUNE_RL_HS
<< 28);
915 info1
|= 64 << 16; /* usb2 fixed maxpacket */
916 info1
|= QH_TOGGLE_CTL
; /* toggle from qtd */
917 info2
|= (EHCI_TUNE_MULT_HS
<< 30);
918 } else if (type
== PIPE_BULK
) {
919 info1
|= (EHCI_TUNE_RL_HS
<< 28);
920 /* The USB spec says that high speed bulk endpoints
921 * always use 512 byte maxpacket. But some device
922 * vendors decided to ignore that, and MSFT is happy
923 * to help them do so. So now people expect to use
924 * such nonconformant devices with Linux too; sigh.
927 info2
|= (EHCI_TUNE_MULT_HS
<< 30);
928 } else { /* PIPE_INTERRUPT */
934 ehci_dbg(ehci
, "bogus dev %p speed %d\n", urb
->dev
,
937 qh_destroy(ehci
, qh
);
941 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
943 /* init as live, toggle clear */
944 qh
->qh_state
= QH_STATE_IDLE
;
946 hw
->hw_info1
= cpu_to_hc32(ehci
, info1
);
947 hw
->hw_info2
= cpu_to_hc32(ehci
, info2
);
948 qh
->is_out
= !is_input
;
949 usb_settoggle (urb
->dev
, usb_pipeendpoint (urb
->pipe
), !is_input
, 1);
953 /*-------------------------------------------------------------------------*/
955 static void enable_async(struct ehci_hcd
*ehci
)
957 if (ehci
->async_count
++)
960 /* Stop waiting to turn off the async schedule */
961 ehci
->enabled_hrtimer_events
&= ~BIT(EHCI_HRTIMER_DISABLE_ASYNC
);
963 /* Don't start the schedule until ASS is 0 */
965 turn_on_io_watchdog(ehci
);
968 static void disable_async(struct ehci_hcd
*ehci
)
970 if (--ehci
->async_count
)
973 /* The async schedule and unlink lists are supposed to be empty */
974 WARN_ON(ehci
->async
->qh_next
.qh
|| !list_empty(&ehci
->async_unlink
) ||
975 !list_empty(&ehci
->async_idle
));
977 /* Don't turn off the schedule until ASS is 1 */
981 /* move qh (and its qtds) onto async queue; maybe enable queue. */
983 static void qh_link_async (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
985 __hc32 dma
= QH_NEXT(ehci
, qh
->qh_dma
);
986 struct ehci_qh
*head
;
988 /* Don't link a QH if there's a Clear-TT-Buffer pending */
989 if (unlikely(qh
->clearing_tt
))
992 WARN_ON(qh
->qh_state
!= QH_STATE_IDLE
);
994 /* clear halt and/or toggle; and maybe recover from silicon quirk */
995 qh_refresh(ehci
, qh
);
997 /* splice right after start */
999 qh
->qh_next
= head
->qh_next
;
1000 qh
->hw
->hw_next
= head
->hw
->hw_next
;
1003 head
->qh_next
.qh
= qh
;
1004 head
->hw
->hw_next
= dma
;
1006 qh
->qh_state
= QH_STATE_LINKED
;
1008 qh
->unlink_reason
= 0;
1009 /* qtd completions reported later by interrupt */
1014 /*-------------------------------------------------------------------------*/
1017 * For control/bulk/interrupt, return QH with these TDs appended.
1018 * Allocates and initializes the QH if necessary.
1019 * Returns null if it can't allocate a QH it needs to.
1020 * If the QH has TDs (urbs) already, that's great.
1022 static struct ehci_qh
*qh_append_tds (
1023 struct ehci_hcd
*ehci
,
1025 struct list_head
*qtd_list
,
1030 struct ehci_qh
*qh
= NULL
;
1031 __hc32 qh_addr_mask
= cpu_to_hc32(ehci
, 0x7f);
1033 qh
= (struct ehci_qh
*) *ptr
;
1034 if (unlikely (qh
== NULL
)) {
1035 /* can't sleep here, we have ehci->lock... */
1036 qh
= qh_make (ehci
, urb
, GFP_ATOMIC
);
1039 if (likely (qh
!= NULL
)) {
1040 struct ehci_qtd
*qtd
;
1042 if (unlikely (list_empty (qtd_list
)))
1045 qtd
= list_entry (qtd_list
->next
, struct ehci_qtd
,
1048 /* control qh may need patching ... */
1049 if (unlikely (epnum
== 0)) {
1051 /* usb_reset_device() briefly reverts to address 0 */
1052 if (usb_pipedevice (urb
->pipe
) == 0)
1053 qh
->hw
->hw_info1
&= ~qh_addr_mask
;
1056 /* just one way to queue requests: swap with the dummy qtd.
1057 * only hc or qh_refresh() ever modify the overlay.
1059 if (likely (qtd
!= NULL
)) {
1060 struct ehci_qtd
*dummy
;
1064 /* to avoid racing the HC, use the dummy td instead of
1065 * the first td of our list (becomes new dummy). both
1066 * tds stay deactivated until we're done, when the
1067 * HC is allowed to fetch the old dummy (4.10.2).
1069 token
= qtd
->hw_token
;
1070 qtd
->hw_token
= HALT_BIT(ehci
);
1074 dma
= dummy
->qtd_dma
;
1076 dummy
->qtd_dma
= dma
;
1078 list_del (&qtd
->qtd_list
);
1079 list_add (&dummy
->qtd_list
, qtd_list
);
1080 list_splice_tail(qtd_list
, &qh
->qtd_list
);
1082 ehci_qtd_init(ehci
, qtd
, qtd
->qtd_dma
);
1085 /* hc must see the new dummy at list end */
1087 qtd
= list_entry (qh
->qtd_list
.prev
,
1088 struct ehci_qtd
, qtd_list
);
1089 qtd
->hw_next
= QTD_NEXT(ehci
, dma
);
1091 /* let the hc process these next qtds */
1093 dummy
->hw_token
= token
;
1101 /*-------------------------------------------------------------------------*/
1105 struct ehci_hcd
*ehci
,
1107 struct list_head
*qtd_list
,
1111 unsigned long flags
;
1112 struct ehci_qh
*qh
= NULL
;
1115 epnum
= urb
->ep
->desc
.bEndpointAddress
;
1117 #ifdef EHCI_URB_TRACE
1119 struct ehci_qtd
*qtd
;
1120 qtd
= list_entry(qtd_list
->next
, struct ehci_qtd
, qtd_list
);
1122 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
1123 __func__
, urb
->dev
->devpath
, urb
,
1124 epnum
& 0x0f, (epnum
& USB_DIR_IN
) ? "in" : "out",
1125 urb
->transfer_buffer_length
,
1126 qtd
, urb
->ep
->hcpriv
);
1130 spin_lock_irqsave (&ehci
->lock
, flags
);
1131 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci
)))) {
1135 rc
= usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci
), urb
);
1139 qh
= qh_append_tds(ehci
, urb
, qtd_list
, epnum
, &urb
->ep
->hcpriv
);
1140 if (unlikely(qh
== NULL
)) {
1141 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci
), urb
);
1146 /* Control/bulk operations through TTs don't need scheduling,
1147 * the HC and TT handle it when the TT has a buffer ready.
1149 if (likely (qh
->qh_state
== QH_STATE_IDLE
))
1150 qh_link_async(ehci
, qh
);
1152 spin_unlock_irqrestore (&ehci
->lock
, flags
);
1153 if (unlikely (qh
== NULL
))
1154 qtd_list_free (ehci
, urb
, qtd_list
);
1158 /*-------------------------------------------------------------------------*/
1159 #ifdef CONFIG_USB_HCD_TEST_MODE
1161 * This function creates the qtds and submits them for the
1162 * SINGLE_STEP_SET_FEATURE Test.
1163 * This is done in two parts: first SETUP req for GetDesc is sent then
1164 * 15 seconds later, the IN stage for GetDesc starts to req data from dev
1166 * is_setup : i/p arguement decides which of the two stage needs to be
1167 * performed; TRUE - SETUP and FALSE - IN+STATUS
1168 * Returns 0 if success
1170 static int submit_single_step_set_feature(
1171 struct usb_hcd
*hcd
,
1175 struct ehci_hcd
*ehci
= hcd_to_ehci(hcd
);
1176 struct list_head qtd_list
;
1177 struct list_head
*head
;
1179 struct ehci_qtd
*qtd
, *qtd_prev
;
1184 INIT_LIST_HEAD(&qtd_list
);
1187 /* URBs map to sequences of QTDs: one logical transaction */
1188 qtd
= ehci_qtd_alloc(ehci
, GFP_KERNEL
);
1191 list_add_tail(&qtd
->qtd_list
, head
);
1194 token
= QTD_STS_ACTIVE
;
1195 token
|= (EHCI_TUNE_CERR
<< 10);
1197 len
= urb
->transfer_buffer_length
;
1199 * Check if the request is to perform just the SETUP stage (getDesc)
1200 * as in SINGLE_STEP_SET_FEATURE test, DATA stage (IN) happens
1201 * 15 secs after the setup
1205 qtd_fill(ehci
, qtd
, urb
->setup_dma
,
1206 sizeof(struct usb_ctrlrequest
),
1207 token
| (2 /* "setup" */ << 8), 8);
1209 submit_async(ehci
, urb
, &qtd_list
, GFP_ATOMIC
);
1210 return 0; /*Return now; we shall come back after 15 seconds*/
1214 * IN: data transfer stage: buffer setup : start the IN txn phase for
1215 * the get_Desc SETUP which was sent 15seconds back
1217 token
^= QTD_TOGGLE
; /*We need to start IN with DATA-1 Pid-sequence*/
1218 buf
= urb
->transfer_dma
;
1220 token
|= (1 /* "in" */ << 8); /*This is IN stage*/
1222 maxpacket
= usb_maxpacket(urb
->dev
, urb
->pipe
, 0);
1224 qtd_fill(ehci
, qtd
, buf
, len
, token
, maxpacket
);
1227 * Our IN phase shall always be a short read; so keep the queue running
1228 * and let it advance to the next qtd which zero length OUT status
1230 qtd
->hw_alt_next
= EHCI_LIST_END(ehci
);
1232 /* STATUS stage for GetDesc control request */
1233 token
^= 0x0100; /* "in" <--> "out" */
1234 token
|= QTD_TOGGLE
; /* force DATA1 */
1237 qtd
= ehci_qtd_alloc(ehci
, GFP_ATOMIC
);
1241 qtd_prev
->hw_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
1242 list_add_tail(&qtd
->qtd_list
, head
);
1244 /* dont fill any data in such packets */
1245 qtd_fill(ehci
, qtd
, 0, 0, token
, 0);
1247 /* by default, enable interrupt on urb completion */
1248 if (likely(!(urb
->transfer_flags
& URB_NO_INTERRUPT
)))
1249 qtd
->hw_token
|= cpu_to_hc32(ehci
, QTD_IOC
);
1251 submit_async(ehci
, urb
, &qtd_list
, GFP_KERNEL
);
1256 qtd_list_free(ehci
, urb
, head
);
1259 #endif /* CONFIG_USB_HCD_TEST_MODE */
1261 /*-------------------------------------------------------------------------*/
1263 static void single_unlink_async(struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
1265 struct ehci_qh
*prev
;
1267 /* Add to the end of the list of QHs waiting for the next IAAD */
1268 qh
->qh_state
= QH_STATE_UNLINK_WAIT
;
1269 list_add_tail(&qh
->unlink_node
, &ehci
->async_unlink
);
1271 /* Unlink it from the schedule */
1273 while (prev
->qh_next
.qh
!= qh
)
1274 prev
= prev
->qh_next
.qh
;
1276 prev
->hw
->hw_next
= qh
->hw
->hw_next
;
1277 prev
->qh_next
= qh
->qh_next
;
1278 if (ehci
->qh_scan_next
== qh
)
1279 ehci
->qh_scan_next
= qh
->qh_next
.qh
;
1282 static void start_iaa_cycle(struct ehci_hcd
*ehci
)
1284 /* If the controller isn't running, we don't have to wait for it */
1285 if (unlikely(ehci
->rh_state
< EHCI_RH_RUNNING
)) {
1286 end_unlink_async(ehci
);
1288 /* Otherwise start a new IAA cycle if one isn't already running */
1289 } else if (ehci
->rh_state
== EHCI_RH_RUNNING
&&
1290 !ehci
->iaa_in_progress
) {
1292 /* Make sure the unlinks are all visible to the hardware */
1295 ehci_writel(ehci
, ehci
->command
| CMD_IAAD
,
1296 &ehci
->regs
->command
);
1297 ehci_readl(ehci
, &ehci
->regs
->command
);
1298 ehci
->iaa_in_progress
= true;
1299 ehci_enable_event(ehci
, EHCI_HRTIMER_IAA_WATCHDOG
, true);
1303 static void end_iaa_cycle(struct ehci_hcd
*ehci
)
1305 if (ehci
->has_synopsys_hc_bug
)
1306 ehci_writel(ehci
, (u32
) ehci
->async
->qh_dma
,
1307 &ehci
->regs
->async_next
);
1309 /* The current IAA cycle has ended */
1310 ehci
->iaa_in_progress
= false;
1312 end_unlink_async(ehci
);
1315 /* See if the async qh for the qtds being unlinked are now gone from the HC */
1317 static void end_unlink_async(struct ehci_hcd
*ehci
)
1322 if (list_empty(&ehci
->async_unlink
))
1324 qh
= list_first_entry(&ehci
->async_unlink
, struct ehci_qh
,
1325 unlink_node
); /* QH whose IAA cycle just ended */
1328 * If async_unlinking is set then this routine is already running,
1329 * either on the stack or on another CPU.
1331 early_exit
= ehci
->async_unlinking
;
1333 /* If the controller isn't running, process all the waiting QHs */
1334 if (ehci
->rh_state
< EHCI_RH_RUNNING
)
1335 list_splice_tail_init(&ehci
->async_unlink
, &ehci
->async_idle
);
1338 * Intel (?) bug: The HC can write back the overlay region even
1339 * after the IAA interrupt occurs. In self-defense, always go
1340 * through two IAA cycles for each QH.
1342 else if (qh
->qh_state
== QH_STATE_UNLINK
) {
1344 * Second IAA cycle has finished. Process only the first
1345 * waiting QH (NVIDIA (?) bug).
1347 list_move_tail(&qh
->unlink_node
, &ehci
->async_idle
);
1351 * AMD/ATI (?) bug: The HC can continue to use an active QH long
1352 * after the IAA interrupt occurs. To prevent problems, QHs that
1353 * may still be active will wait until 2 ms have passed with no
1354 * change to the hw_current and hw_token fields (this delay occurs
1355 * between the two IAA cycles).
1357 * The EHCI spec (4.8.2) says that active QHs must not be removed
1358 * from the async schedule and recommends waiting until the QH
1359 * goes inactive. This is ridiculous because the QH will _never_
1360 * become inactive if the endpoint NAKs indefinitely.
1363 /* Some reasons for unlinking guarantee the QH can't be active */
1364 else if (qh
->unlink_reason
& (QH_UNLINK_HALTED
|
1365 QH_UNLINK_SHORT_READ
| QH_UNLINK_DUMMY_OVERLAY
))
1368 /* The QH can't be active if the queue was and still is empty... */
1369 else if ((qh
->unlink_reason
& QH_UNLINK_QUEUE_EMPTY
) &&
1370 list_empty(&qh
->qtd_list
))
1373 /* ... or if the QH has halted */
1374 else if (qh
->hw
->hw_token
& cpu_to_hc32(ehci
, QTD_STS_HALT
))
1377 /* Otherwise we have to wait until the QH stops changing */
1379 __hc32 qh_current
, qh_token
;
1381 qh_current
= qh
->hw
->hw_current
;
1382 qh_token
= qh
->hw
->hw_token
;
1383 if (qh_current
!= ehci
->old_current
||
1384 qh_token
!= ehci
->old_token
) {
1385 ehci
->old_current
= qh_current
;
1386 ehci
->old_token
= qh_token
;
1387 ehci_enable_event(ehci
,
1388 EHCI_HRTIMER_ACTIVE_UNLINK
, true);
1392 qh
->qh_state
= QH_STATE_UNLINK
;
1395 ehci
->old_current
= ~0; /* Prepare for next QH */
1397 /* Start a new IAA cycle if any QHs are waiting for it */
1398 if (!list_empty(&ehci
->async_unlink
))
1399 start_iaa_cycle(ehci
);
1402 * Don't allow nesting or concurrent calls,
1403 * or wait for the second IAA cycle for the next QH.
1408 /* Process the idle QHs */
1409 ehci
->async_unlinking
= true;
1410 while (!list_empty(&ehci
->async_idle
)) {
1411 qh
= list_first_entry(&ehci
->async_idle
, struct ehci_qh
,
1413 list_del(&qh
->unlink_node
);
1415 qh
->qh_state
= QH_STATE_IDLE
;
1416 qh
->qh_next
.qh
= NULL
;
1418 if (!list_empty(&qh
->qtd_list
))
1419 qh_completions(ehci
, qh
);
1420 if (!list_empty(&qh
->qtd_list
) &&
1421 ehci
->rh_state
== EHCI_RH_RUNNING
)
1422 qh_link_async(ehci
, qh
);
1423 disable_async(ehci
);
1425 ehci
->async_unlinking
= false;
1428 static void start_unlink_async(struct ehci_hcd
*ehci
, struct ehci_qh
*qh
);
1430 static void unlink_empty_async(struct ehci_hcd
*ehci
)
1433 struct ehci_qh
*qh_to_unlink
= NULL
;
1436 /* Find the last async QH which has been empty for a timer cycle */
1437 for (qh
= ehci
->async
->qh_next
.qh
; qh
; qh
= qh
->qh_next
.qh
) {
1438 if (list_empty(&qh
->qtd_list
) &&
1439 qh
->qh_state
== QH_STATE_LINKED
) {
1441 if (qh
->unlink_cycle
!= ehci
->async_unlink_cycle
)
1446 /* If nothing else is being unlinked, unlink the last empty QH */
1447 if (list_empty(&ehci
->async_unlink
) && qh_to_unlink
) {
1448 qh_to_unlink
->unlink_reason
|= QH_UNLINK_QUEUE_EMPTY
;
1449 start_unlink_async(ehci
, qh_to_unlink
);
1453 /* Other QHs will be handled later */
1455 ehci_enable_event(ehci
, EHCI_HRTIMER_ASYNC_UNLINKS
, true);
1456 ++ehci
->async_unlink_cycle
;
1462 /* The root hub is suspended; unlink all the async QHs */
1463 static void unlink_empty_async_suspended(struct ehci_hcd
*ehci
)
1467 while (ehci
->async
->qh_next
.qh
) {
1468 qh
= ehci
->async
->qh_next
.qh
;
1469 WARN_ON(!list_empty(&qh
->qtd_list
));
1470 single_unlink_async(ehci
, qh
);
1476 /* makes sure the async qh will become idle */
1477 /* caller must own ehci->lock */
1479 static void start_unlink_async(struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
1481 /* If the QH isn't linked then there's nothing we can do. */
1482 if (qh
->qh_state
!= QH_STATE_LINKED
)
1485 single_unlink_async(ehci
, qh
);
1486 start_iaa_cycle(ehci
);
1489 /*-------------------------------------------------------------------------*/
1491 static void scan_async (struct ehci_hcd
*ehci
)
1494 bool check_unlinks_later
= false;
1496 ehci
->qh_scan_next
= ehci
->async
->qh_next
.qh
;
1497 while (ehci
->qh_scan_next
) {
1498 qh
= ehci
->qh_scan_next
;
1499 ehci
->qh_scan_next
= qh
->qh_next
.qh
;
1501 /* clean any finished work for this qh */
1502 if (!list_empty(&qh
->qtd_list
)) {
1506 * Unlinks could happen here; completion reporting
1507 * drops the lock. That's why ehci->qh_scan_next
1508 * always holds the next qh to scan; if the next qh
1509 * gets unlinked then ehci->qh_scan_next is adjusted
1510 * in single_unlink_async().
1512 temp
= qh_completions(ehci
, qh
);
1513 if (unlikely(temp
)) {
1514 start_unlink_async(ehci
, qh
);
1515 } else if (list_empty(&qh
->qtd_list
)
1516 && qh
->qh_state
== QH_STATE_LINKED
) {
1517 qh
->unlink_cycle
= ehci
->async_unlink_cycle
;
1518 check_unlinks_later
= true;
1524 * Unlink empty entries, reducing DMA usage as well
1525 * as HCD schedule-scanning costs. Delay for any qh
1526 * we just scanned, there's a not-unusual case that it
1527 * doesn't stay idle for long.
1529 if (check_unlinks_later
&& ehci
->rh_state
== EHCI_RH_RUNNING
&&
1530 !(ehci
->enabled_hrtimer_events
&
1531 BIT(EHCI_HRTIMER_ASYNC_UNLINKS
))) {
1532 ehci_enable_event(ehci
, EHCI_HRTIMER_ASYNC_UNLINKS
, true);
1533 ++ehci
->async_unlink_cycle
;