2 * Copyright (C) 2001-2004 by David Brownell
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 /* this file is part of ehci-hcd.c */
21 /*-------------------------------------------------------------------------*/
24 * EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
26 * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
27 * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
28 * buffers needed for the larger number). We use one QH per endpoint, queue
29 * multiple urbs (all three types) per endpoint. URBs may need several qtds.
31 * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with
32 * interrupts) needs careful scheduling. Performance improvements can be
33 * an ongoing challenge. That's in "ehci-sched.c".
35 * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
36 * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
37 * (b) special fields in qh entries or (c) split iso entries. TTs will
38 * buffer low/full speed data so the host collects it at high speed.
41 /*-------------------------------------------------------------------------*/
43 /* fill a qtd, returning how much of the buffer we were able to queue up */
46 qtd_fill(struct ehci_hcd
*ehci
, struct ehci_qtd
*qtd
, dma_addr_t buf
,
47 size_t len
, int token
, int maxpacket
)
52 /* one buffer entry per 4K ... first might be short or unaligned */
53 qtd
->hw_buf
[0] = cpu_to_hc32(ehci
, (u32
)addr
);
54 qtd
->hw_buf_hi
[0] = cpu_to_hc32(ehci
, (u32
)(addr
>> 32));
55 count
= 0x1000 - (buf
& 0x0fff); /* rest of that page */
56 if (likely (len
< count
)) /* ... iff needed */
62 /* per-qtd limit: from 16K to 20K (best alignment) */
63 for (i
= 1; count
< len
&& i
< 5; i
++) {
65 qtd
->hw_buf
[i
] = cpu_to_hc32(ehci
, (u32
)addr
);
66 qtd
->hw_buf_hi
[i
] = cpu_to_hc32(ehci
,
69 if ((count
+ 0x1000) < len
)
75 /* short packets may only terminate transfers */
77 count
-= (count
% maxpacket
);
79 qtd
->hw_token
= cpu_to_hc32(ehci
, (count
<< 16) | token
);
85 /*-------------------------------------------------------------------------*/
88 qh_update (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
, struct ehci_qtd
*qtd
)
90 /* writes to an active overlay are unsafe */
91 BUG_ON(qh
->qh_state
!= QH_STATE_IDLE
);
93 qh
->hw_qtd_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
94 qh
->hw_alt_next
= EHCI_LIST_END(ehci
);
96 /* Except for control endpoints, we make hardware maintain data
97 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
98 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
101 if (!(qh
->hw_info1
& cpu_to_hc32(ehci
, 1 << 14))) {
102 unsigned is_out
, epnum
;
104 is_out
= !(qtd
->hw_token
& cpu_to_hc32(ehci
, 1 << 8));
105 epnum
= (hc32_to_cpup(ehci
, &qh
->hw_info1
) >> 8) & 0x0f;
106 if (unlikely (!usb_gettoggle (qh
->dev
, epnum
, is_out
))) {
107 qh
->hw_token
&= ~cpu_to_hc32(ehci
, QTD_TOGGLE
);
108 usb_settoggle (qh
->dev
, epnum
, is_out
, 1);
112 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
114 qh
->hw_token
&= cpu_to_hc32(ehci
, QTD_TOGGLE
| QTD_STS_PING
);
117 /* if it weren't for a common silicon quirk (writing the dummy into the qh
118 * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
119 * recovery (including urb dequeue) would need software changes to a QH...
122 qh_refresh (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
124 struct ehci_qtd
*qtd
;
126 if (list_empty (&qh
->qtd_list
))
129 qtd
= list_entry (qh
->qtd_list
.next
,
130 struct ehci_qtd
, qtd_list
);
131 /* first qtd may already be partially processed */
132 if (cpu_to_hc32(ehci
, qtd
->qtd_dma
) == qh
->hw_current
)
137 qh_update (ehci
, qh
, qtd
);
140 /*-------------------------------------------------------------------------*/
142 static int qtd_copy_status (
143 struct ehci_hcd
*ehci
,
149 int status
= -EINPROGRESS
;
151 /* count IN/OUT bytes, not SETUP (even short packets) */
152 if (likely (QTD_PID (token
) != 2))
153 urb
->actual_length
+= length
- QTD_LENGTH (token
);
155 /* don't modify error codes */
156 if (unlikely(urb
->unlinked
))
159 /* force cleanup after short read; not always an error */
160 if (unlikely (IS_SHORT_READ (token
)))
163 /* serious "can't proceed" faults reported by the hardware */
164 if (token
& QTD_STS_HALT
) {
165 if (token
& QTD_STS_BABBLE
) {
166 /* FIXME "must" disable babbling device's port too */
168 } else if (token
& QTD_STS_MMF
) {
169 /* fs/ls interrupt xfer missed the complete-split */
171 } else if (token
& QTD_STS_DBE
) {
172 status
= (QTD_PID (token
) == 1) /* IN ? */
173 ? -ENOSR
/* hc couldn't read data */
174 : -ECOMM
; /* hc couldn't write data */
175 } else if (token
& QTD_STS_XACT
) {
176 /* timeout, bad crc, wrong PID, etc; retried */
177 if (QTD_CERR (token
))
180 ehci_dbg (ehci
, "devpath %s ep%d%s 3strikes\n",
182 usb_pipeendpoint (urb
->pipe
),
183 usb_pipein (urb
->pipe
) ? "in" : "out");
186 /* CERR nonzero + no errors + halt --> stall */
187 } else if (QTD_CERR (token
))
193 "dev%d ep%d%s qtd token %08x --> status %d\n",
194 usb_pipedevice (urb
->pipe
),
195 usb_pipeendpoint (urb
->pipe
),
196 usb_pipein (urb
->pipe
) ? "in" : "out",
199 /* if async CSPLIT failed, try cleaning out the TT buffer */
202 && !usb_pipeint(urb
->pipe
)
203 && ((token
& QTD_STS_MMF
) != 0
204 || QTD_CERR(token
) == 0)
205 && (!ehci_is_TDI(ehci
)
206 || urb
->dev
->tt
->hub
!=
207 ehci_to_hcd(ehci
)->self
.root_hub
)) {
209 struct usb_device
*tt
= urb
->dev
->tt
->hub
;
211 "clear tt buffer port %d, a%d ep%d t%08x\n",
212 urb
->dev
->ttport
, urb
->dev
->devnum
,
213 usb_pipeendpoint (urb
->pipe
), token
);
215 /* REVISIT ARC-derived cores don't clear the root
216 * hub TT buffer in this way...
218 usb_hub_tt_clear_buffer (urb
->dev
, urb
->pipe
);
226 ehci_urb_done(struct ehci_hcd
*ehci
, struct urb
*urb
, int status
)
227 __releases(ehci
->lock
)
228 __acquires(ehci
->lock
)
230 if (likely (urb
->hcpriv
!= NULL
)) {
231 struct ehci_qh
*qh
= (struct ehci_qh
*) urb
->hcpriv
;
233 /* S-mask in a QH means it's an interrupt urb */
234 if ((qh
->hw_info2
& cpu_to_hc32(ehci
, QH_SMASK
)) != 0) {
236 /* ... update hc-wide periodic stats (for usbfs) */
237 ehci_to_hcd(ehci
)->self
.bandwidth_int_reqs
--;
242 if (unlikely(urb
->unlinked
)) {
243 COUNT(ehci
->stats
.unlink
);
245 if (likely(status
== -EINPROGRESS
))
247 COUNT(ehci
->stats
.complete
);
250 #ifdef EHCI_URB_TRACE
252 "%s %s urb %p ep%d%s status %d len %d/%d\n",
253 __FUNCTION__
, urb
->dev
->devpath
, urb
,
254 usb_pipeendpoint (urb
->pipe
),
255 usb_pipein (urb
->pipe
) ? "in" : "out",
257 urb
->actual_length
, urb
->transfer_buffer_length
);
260 /* complete() can reenter this HCD */
261 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci
), urb
);
262 spin_unlock (&ehci
->lock
);
263 usb_hcd_giveback_urb(ehci_to_hcd(ehci
), urb
, status
);
264 spin_lock (&ehci
->lock
);
267 static void start_unlink_async (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
);
268 static void unlink_async (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
);
270 static void intr_deschedule (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
);
271 static int qh_schedule (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
);
274 * Process and free completed qtds for a qh, returning URBs to drivers.
275 * Chases up to qh->hw_current. Returns number of completions called,
276 * indicating how much "real" work we did.
279 qh_completions (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
281 struct ehci_qtd
*last
= NULL
, *end
= qh
->dummy
;
282 struct list_head
*entry
, *tmp
;
283 int last_status
= -EINPROGRESS
;
288 u32 halt
= HALT_BIT(ehci
);
290 if (unlikely (list_empty (&qh
->qtd_list
)))
293 /* completions (or tasks on other cpus) must never clobber HALT
294 * till we've gone through and cleaned everything up, even when
295 * they add urbs to this qh's queue or mark them for unlinking.
297 * NOTE: unlinking expects to be done in queue order.
299 state
= qh
->qh_state
;
300 qh
->qh_state
= QH_STATE_COMPLETING
;
301 stopped
= (state
== QH_STATE_IDLE
);
303 /* remove de-activated QTDs from front of queue.
304 * after faults (including short reads), cleanup this urb
305 * then let the queue advance.
306 * if queue is stopped, handles unlinks.
308 list_for_each_safe (entry
, tmp
, &qh
->qtd_list
) {
309 struct ehci_qtd
*qtd
;
314 qtd
= list_entry (entry
, struct ehci_qtd
, qtd_list
);
317 /* clean up any state from previous QTD ...*/
319 if (likely (last
->urb
!= urb
)) {
320 ehci_urb_done(ehci
, last
->urb
, last_status
);
322 last_status
= -EINPROGRESS
;
324 ehci_qtd_free (ehci
, last
);
328 /* ignore urbs submitted during completions we reported */
332 /* hardware copies qtd out of qh overlay */
334 token
= hc32_to_cpu(ehci
, qtd
->hw_token
);
336 /* always clean up qtds the hc de-activated */
337 if ((token
& QTD_STS_ACTIVE
) == 0) {
339 if ((token
& QTD_STS_HALT
) != 0) {
342 /* magic dummy for some short reads; qh won't advance.
343 * that silicon quirk can kick in with this dummy too.
345 } else if (IS_SHORT_READ (token
)
346 && !(qtd
->hw_alt_next
347 & EHCI_LIST_END(ehci
))) {
352 /* stop scanning when we reach qtds the hc is using */
353 } else if (likely (!stopped
354 && HC_IS_RUNNING (ehci_to_hcd(ehci
)->state
))) {
360 if (unlikely (!HC_IS_RUNNING (ehci_to_hcd(ehci
)->state
)))
361 last_status
= -ESHUTDOWN
;
363 /* ignore active urbs unless some previous qtd
364 * for the urb faulted (including short read) or
365 * its urb was canceled. we may patch qh or qtds.
367 if (likely(last_status
== -EINPROGRESS
&&
371 /* issue status after short control reads */
372 if (unlikely (do_status
!= 0)
373 && QTD_PID (token
) == 0 /* OUT */) {
378 /* token in overlay may be most current */
379 if (state
== QH_STATE_IDLE
380 && cpu_to_hc32(ehci
, qtd
->qtd_dma
)
382 token
= hc32_to_cpu(ehci
, qh
->hw_token
);
384 /* force halt for unlinked or blocked qh, so we'll
385 * patch the qh later and so that completions can't
386 * activate it while we "know" it's stopped.
388 if ((halt
& qh
->hw_token
) == 0) {
390 qh
->hw_token
|= halt
;
395 /* remove it from the queue */
396 qtd_status
= qtd_copy_status(ehci
, urb
, qtd
->length
, token
);
397 if (unlikely(qtd_status
== -EREMOTEIO
)) {
398 do_status
= (!urb
->unlinked
&&
399 usb_pipecontrol(urb
->pipe
));
402 if (likely(last_status
== -EINPROGRESS
))
403 last_status
= qtd_status
;
405 if (stopped
&& qtd
->qtd_list
.prev
!= &qh
->qtd_list
) {
406 last
= list_entry (qtd
->qtd_list
.prev
,
407 struct ehci_qtd
, qtd_list
);
408 last
->hw_next
= qtd
->hw_next
;
410 list_del (&qtd
->qtd_list
);
414 /* last urb's completion might still need calling */
415 if (likely (last
!= NULL
)) {
416 ehci_urb_done(ehci
, last
->urb
, last_status
);
418 ehci_qtd_free (ehci
, last
);
421 /* restore original state; caller must unlink or relink */
422 qh
->qh_state
= state
;
424 /* be sure the hardware's done with the qh before refreshing
425 * it after fault cleanup, or recovering from silicon wrongly
426 * overlaying the dummy qtd (which reduces DMA chatter).
428 if (stopped
!= 0 || qh
->hw_qtd_next
== EHCI_LIST_END(ehci
)) {
431 qh_refresh(ehci
, qh
);
433 case QH_STATE_LINKED
:
434 /* should be rare for periodic transfers,
435 * except maybe high bandwidth ...
437 if ((cpu_to_hc32(ehci
, QH_SMASK
)
438 & qh
->hw_info2
) != 0) {
439 intr_deschedule (ehci
, qh
);
440 (void) qh_schedule (ehci
, qh
);
442 unlink_async (ehci
, qh
);
444 /* otherwise, unlink already started */
451 /*-------------------------------------------------------------------------*/
453 // high bandwidth multiplier, as encoded in highspeed endpoint descriptors
454 #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
455 // ... and packet size, for any kind of endpoint descriptor
456 #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
459 * reverse of qh_urb_transaction: free a list of TDs.
460 * used for cleanup after errors, before HC sees an URB's TDs.
462 static void qtd_list_free (
463 struct ehci_hcd
*ehci
,
465 struct list_head
*qtd_list
467 struct list_head
*entry
, *temp
;
469 list_for_each_safe (entry
, temp
, qtd_list
) {
470 struct ehci_qtd
*qtd
;
472 qtd
= list_entry (entry
, struct ehci_qtd
, qtd_list
);
473 list_del (&qtd
->qtd_list
);
474 ehci_qtd_free (ehci
, qtd
);
479 * create a list of filled qtds for this URB; won't link into qh.
481 static struct list_head
*
483 struct ehci_hcd
*ehci
,
485 struct list_head
*head
,
488 struct ehci_qtd
*qtd
, *qtd_prev
;
495 * URBs map to sequences of QTDs: one logical transaction
497 qtd
= ehci_qtd_alloc (ehci
, flags
);
500 list_add_tail (&qtd
->qtd_list
, head
);
503 token
= QTD_STS_ACTIVE
;
504 token
|= (EHCI_TUNE_CERR
<< 10);
505 /* for split transactions, SplitXState initialized to zero */
507 len
= urb
->transfer_buffer_length
;
508 is_input
= usb_pipein (urb
->pipe
);
509 if (usb_pipecontrol (urb
->pipe
)) {
511 qtd_fill(ehci
, qtd
, urb
->setup_dma
,
512 sizeof (struct usb_ctrlrequest
),
513 token
| (2 /* "setup" */ << 8), 8);
515 /* ... and always at least one more pid */
518 qtd
= ehci_qtd_alloc (ehci
, flags
);
522 qtd_prev
->hw_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
523 list_add_tail (&qtd
->qtd_list
, head
);
525 /* for zero length DATA stages, STATUS is always IN */
527 token
|= (1 /* "in" */ << 8);
531 * data transfer stage: buffer setup
533 buf
= urb
->transfer_dma
;
536 token
|= (1 /* "in" */ << 8);
537 /* else it's already initted to "out" pid (0 << 8) */
539 maxpacket
= max_packet(usb_maxpacket(urb
->dev
, urb
->pipe
, !is_input
));
542 * buffer gets wrapped in one or more qtds;
543 * last one may be "short" (including zero len)
544 * and may serve as a control status ack
549 this_qtd_len
= qtd_fill(ehci
, qtd
, buf
, len
, token
, maxpacket
);
553 qtd
->hw_alt_next
= ehci
->async
->hw_alt_next
;
555 /* qh makes control packets use qtd toggle; maybe switch it */
556 if ((maxpacket
& (this_qtd_len
+ (maxpacket
- 1))) == 0)
559 if (likely (len
<= 0))
563 qtd
= ehci_qtd_alloc (ehci
, flags
);
567 qtd_prev
->hw_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
568 list_add_tail (&qtd
->qtd_list
, head
);
571 /* unless the bulk/interrupt caller wants a chance to clean
572 * up after short reads, hc should advance qh past this urb
574 if (likely ((urb
->transfer_flags
& URB_SHORT_NOT_OK
) == 0
575 || usb_pipecontrol (urb
->pipe
)))
576 qtd
->hw_alt_next
= EHCI_LIST_END(ehci
);
579 * control requests may need a terminating data "status" ack;
580 * bulk ones may need a terminating short packet (zero length).
582 if (likely (urb
->transfer_buffer_length
!= 0)) {
585 if (usb_pipecontrol (urb
->pipe
)) {
587 token
^= 0x0100; /* "in" <--> "out" */
588 token
|= QTD_TOGGLE
; /* force DATA1 */
589 } else if (usb_pipebulk (urb
->pipe
)
590 && (urb
->transfer_flags
& URB_ZERO_PACKET
)
591 && !(urb
->transfer_buffer_length
% maxpacket
)) {
596 qtd
= ehci_qtd_alloc (ehci
, flags
);
600 qtd_prev
->hw_next
= QTD_NEXT(ehci
, qtd
->qtd_dma
);
601 list_add_tail (&qtd
->qtd_list
, head
);
603 /* never any data in such packets */
604 qtd_fill(ehci
, qtd
, 0, 0, token
, 0);
608 /* by default, enable interrupt on urb completion */
609 if (likely (!(urb
->transfer_flags
& URB_NO_INTERRUPT
)))
610 qtd
->hw_token
|= cpu_to_hc32(ehci
, QTD_IOC
);
614 qtd_list_free (ehci
, urb
, head
);
618 /*-------------------------------------------------------------------------*/
620 // Would be best to create all qh's from config descriptors,
621 // when each interface/altsetting is established. Unlink
622 // any previous qh and cancel its urbs first; endpoints are
623 // implicitly reset then (data toggle too).
624 // That'd mean updating how usbcore talks to HCDs. (2.7?)
628 * Each QH holds a qtd list; a QH is used for everything except iso.
630 * For interrupt urbs, the scheduler must set the microframe scheduling
631 * mask(s) each time the QH gets scheduled. For highspeed, that's
632 * just one microframe in the s-mask. For split interrupt transactions
633 * there are additional complications: c-mask, maybe FSTNs.
635 static struct ehci_qh
*
637 struct ehci_hcd
*ehci
,
641 struct ehci_qh
*qh
= ehci_qh_alloc (ehci
, flags
);
642 u32 info1
= 0, info2
= 0;
645 struct usb_tt
*tt
= urb
->dev
->tt
;
651 * init endpoint/device data for this QH
653 info1
|= usb_pipeendpoint (urb
->pipe
) << 8;
654 info1
|= usb_pipedevice (urb
->pipe
) << 0;
656 is_input
= usb_pipein (urb
->pipe
);
657 type
= usb_pipetype (urb
->pipe
);
658 maxp
= usb_maxpacket (urb
->dev
, urb
->pipe
, !is_input
);
660 /* Compute interrupt scheduling parameters just once, and save.
661 * - allowing for high bandwidth, how many nsec/uframe are used?
662 * - split transactions need a second CSPLIT uframe; same question
663 * - splits also need a schedule gap (for full/low speed I/O)
664 * - qh has a polling interval
666 * For control/bulk requests, the HC or TT handles these.
668 if (type
== PIPE_INTERRUPT
) {
669 qh
->usecs
= NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH
,
671 hb_mult(maxp
) * max_packet(maxp
)));
672 qh
->start
= NO_FRAME
;
674 if (urb
->dev
->speed
== USB_SPEED_HIGH
) {
678 qh
->period
= urb
->interval
>> 3;
679 if (qh
->period
== 0 && urb
->interval
!= 1) {
680 /* NOTE interval 2 or 4 uframes could work.
681 * But interval 1 scheduling is simpler, and
682 * includes high bandwidth.
684 dbg ("intr period %d uframes, NYET!",
691 /* gap is f(FS/LS transfer times) */
692 qh
->gap_uf
= 1 + usb_calc_bus_time (urb
->dev
->speed
,
693 is_input
, 0, maxp
) / (125 * 1000);
695 /* FIXME this just approximates SPLIT/CSPLIT times */
696 if (is_input
) { // SPLIT, gap, CSPLIT+DATA
697 qh
->c_usecs
= qh
->usecs
+ HS_USECS (0);
698 qh
->usecs
= HS_USECS (1);
699 } else { // SPLIT+DATA, gap, CSPLIT
700 qh
->usecs
+= HS_USECS (1);
701 qh
->c_usecs
= HS_USECS (0);
704 think_time
= tt
? tt
->think_time
: 0;
705 qh
->tt_usecs
= NS_TO_US (think_time
+
706 usb_calc_bus_time (urb
->dev
->speed
,
707 is_input
, 0, max_packet (maxp
)));
708 qh
->period
= urb
->interval
;
712 /* support for tt scheduling, and access to toggles */
716 switch (urb
->dev
->speed
) {
718 info1
|= (1 << 12); /* EPS "low" */
722 /* EPS 0 means "full" */
723 if (type
!= PIPE_INTERRUPT
)
724 info1
|= (EHCI_TUNE_RL_TT
<< 28);
725 if (type
== PIPE_CONTROL
) {
726 info1
|= (1 << 27); /* for TT */
727 info1
|= 1 << 14; /* toggle from qtd */
731 info2
|= (EHCI_TUNE_MULT_TT
<< 30);
733 /* Some Freescale processors have an erratum in which the
734 * port number in the queue head was 0..N-1 instead of 1..N.
736 if (ehci_has_fsl_portno_bug(ehci
))
737 info2
|= (urb
->dev
->ttport
-1) << 23;
739 info2
|= urb
->dev
->ttport
<< 23;
741 /* set the address of the TT; for TDI's integrated
742 * root hub tt, leave it zeroed.
744 if (tt
&& tt
->hub
!= ehci_to_hcd(ehci
)->self
.root_hub
)
745 info2
|= tt
->hub
->devnum
<< 16;
747 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
751 case USB_SPEED_HIGH
: /* no TT involved */
752 info1
|= (2 << 12); /* EPS "high" */
753 if (type
== PIPE_CONTROL
) {
754 info1
|= (EHCI_TUNE_RL_HS
<< 28);
755 info1
|= 64 << 16; /* usb2 fixed maxpacket */
756 info1
|= 1 << 14; /* toggle from qtd */
757 info2
|= (EHCI_TUNE_MULT_HS
<< 30);
758 } else if (type
== PIPE_BULK
) {
759 info1
|= (EHCI_TUNE_RL_HS
<< 28);
760 info1
|= 512 << 16; /* usb2 fixed maxpacket */
761 info2
|= (EHCI_TUNE_MULT_HS
<< 30);
762 } else { /* PIPE_INTERRUPT */
763 info1
|= max_packet (maxp
) << 16;
764 info2
|= hb_mult (maxp
) << 30;
768 dbg ("bogus dev %p speed %d", urb
->dev
, urb
->dev
->speed
);
774 /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
776 /* init as live, toggle clear, advance to dummy */
777 qh
->qh_state
= QH_STATE_IDLE
;
778 qh
->hw_info1
= cpu_to_hc32(ehci
, info1
);
779 qh
->hw_info2
= cpu_to_hc32(ehci
, info2
);
780 usb_settoggle (urb
->dev
, usb_pipeendpoint (urb
->pipe
), !is_input
, 1);
781 qh_refresh (ehci
, qh
);
785 /*-------------------------------------------------------------------------*/
787 /* move qh (and its qtds) onto async queue; maybe enable queue. */
789 static void qh_link_async (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
791 __hc32 dma
= QH_NEXT(ehci
, qh
->qh_dma
);
792 struct ehci_qh
*head
;
794 /* (re)start the async schedule? */
796 timer_action_done (ehci
, TIMER_ASYNC_OFF
);
797 if (!head
->qh_next
.qh
) {
798 u32 cmd
= ehci_readl(ehci
, &ehci
->regs
->command
);
800 if (!(cmd
& CMD_ASE
)) {
801 /* in case a clear of CMD_ASE didn't take yet */
802 (void)handshake(ehci
, &ehci
->regs
->status
,
804 cmd
|= CMD_ASE
| CMD_RUN
;
805 ehci_writel(ehci
, cmd
, &ehci
->regs
->command
);
806 ehci_to_hcd(ehci
)->state
= HC_STATE_RUNNING
;
807 /* posted write need not be known to HC yet ... */
811 /* clear halt and/or toggle; and maybe recover from silicon quirk */
812 if (qh
->qh_state
== QH_STATE_IDLE
)
813 qh_refresh (ehci
, qh
);
815 /* splice right after start */
816 qh
->qh_next
= head
->qh_next
;
817 qh
->hw_next
= head
->hw_next
;
820 head
->qh_next
.qh
= qh
;
823 qh
->qh_state
= QH_STATE_LINKED
;
824 /* qtd completions reported later by interrupt */
827 /*-------------------------------------------------------------------------*/
830 * For control/bulk/interrupt, return QH with these TDs appended.
831 * Allocates and initializes the QH if necessary.
832 * Returns null if it can't allocate a QH it needs to.
833 * If the QH has TDs (urbs) already, that's great.
835 static struct ehci_qh
*qh_append_tds (
836 struct ehci_hcd
*ehci
,
838 struct list_head
*qtd_list
,
843 struct ehci_qh
*qh
= NULL
;
844 u32 qh_addr_mask
= cpu_to_hc32(ehci
, 0x7f);
846 qh
= (struct ehci_qh
*) *ptr
;
847 if (unlikely (qh
== NULL
)) {
848 /* can't sleep here, we have ehci->lock... */
849 qh
= qh_make (ehci
, urb
, GFP_ATOMIC
);
852 if (likely (qh
!= NULL
)) {
853 struct ehci_qtd
*qtd
;
855 if (unlikely (list_empty (qtd_list
)))
858 qtd
= list_entry (qtd_list
->next
, struct ehci_qtd
,
861 /* control qh may need patching ... */
862 if (unlikely (epnum
== 0)) {
864 /* usb_reset_device() briefly reverts to address 0 */
865 if (usb_pipedevice (urb
->pipe
) == 0)
866 qh
->hw_info1
&= ~qh_addr_mask
;
869 /* just one way to queue requests: swap with the dummy qtd.
870 * only hc or qh_refresh() ever modify the overlay.
872 if (likely (qtd
!= NULL
)) {
873 struct ehci_qtd
*dummy
;
877 /* to avoid racing the HC, use the dummy td instead of
878 * the first td of our list (becomes new dummy). both
879 * tds stay deactivated until we're done, when the
880 * HC is allowed to fetch the old dummy (4.10.2).
882 token
= qtd
->hw_token
;
883 qtd
->hw_token
= HALT_BIT(ehci
);
887 dma
= dummy
->qtd_dma
;
889 dummy
->qtd_dma
= dma
;
891 list_del (&qtd
->qtd_list
);
892 list_add (&dummy
->qtd_list
, qtd_list
);
893 __list_splice (qtd_list
, qh
->qtd_list
.prev
);
895 ehci_qtd_init(ehci
, qtd
, qtd
->qtd_dma
);
898 /* hc must see the new dummy at list end */
900 qtd
= list_entry (qh
->qtd_list
.prev
,
901 struct ehci_qtd
, qtd_list
);
902 qtd
->hw_next
= QTD_NEXT(ehci
, dma
);
904 /* let the hc process these next qtds */
906 dummy
->hw_token
= token
;
908 urb
->hcpriv
= qh_get (qh
);
914 /*-------------------------------------------------------------------------*/
918 struct ehci_hcd
*ehci
,
920 struct list_head
*qtd_list
,
923 struct ehci_qtd
*qtd
;
926 struct ehci_qh
*qh
= NULL
;
929 qtd
= list_entry (qtd_list
->next
, struct ehci_qtd
, qtd_list
);
930 epnum
= urb
->ep
->desc
.bEndpointAddress
;
932 #ifdef EHCI_URB_TRACE
934 "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
935 __FUNCTION__
, urb
->dev
->devpath
, urb
,
936 epnum
& 0x0f, (epnum
& USB_DIR_IN
) ? "in" : "out",
937 urb
->transfer_buffer_length
,
938 qtd
, urb
->ep
->hcpriv
);
941 spin_lock_irqsave (&ehci
->lock
, flags
);
942 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE
,
943 &ehci_to_hcd(ehci
)->flags
))) {
947 rc
= usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci
), urb
);
951 qh
= qh_append_tds(ehci
, urb
, qtd_list
, epnum
, &urb
->ep
->hcpriv
);
952 if (unlikely(qh
== NULL
)) {
953 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci
), urb
);
958 /* Control/bulk operations through TTs don't need scheduling,
959 * the HC and TT handle it when the TT has a buffer ready.
961 if (likely (qh
->qh_state
== QH_STATE_IDLE
))
962 qh_link_async (ehci
, qh_get (qh
));
964 spin_unlock_irqrestore (&ehci
->lock
, flags
);
965 if (unlikely (qh
== NULL
))
966 qtd_list_free (ehci
, urb
, qtd_list
);
970 /*-------------------------------------------------------------------------*/
972 /* the async qh for the qtds being reclaimed are now unlinked from the HC */
974 static void end_unlink_async (struct ehci_hcd
*ehci
)
976 struct ehci_qh
*qh
= ehci
->reclaim
;
977 struct ehci_qh
*next
;
979 iaa_watchdog_done(ehci
);
981 // qh->hw_next = cpu_to_hc32(qh->qh_dma);
982 qh
->qh_state
= QH_STATE_IDLE
;
983 qh
->qh_next
.qh
= NULL
;
984 qh_put (qh
); // refcount from reclaim
986 /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
988 ehci
->reclaim
= next
;
991 qh_completions (ehci
, qh
);
993 if (!list_empty (&qh
->qtd_list
)
994 && HC_IS_RUNNING (ehci_to_hcd(ehci
)->state
))
995 qh_link_async (ehci
, qh
);
997 qh_put (qh
); // refcount from async list
999 /* it's not free to turn the async schedule on/off; leave it
1000 * active but idle for a while once it empties.
1002 if (HC_IS_RUNNING (ehci_to_hcd(ehci
)->state
)
1003 && ehci
->async
->qh_next
.qh
== NULL
)
1004 timer_action (ehci
, TIMER_ASYNC_OFF
);
1008 ehci
->reclaim
= NULL
;
1009 start_unlink_async (ehci
, next
);
1013 /* makes sure the async qh will become idle */
1014 /* caller must own ehci->lock */
1016 static void start_unlink_async (struct ehci_hcd
*ehci
, struct ehci_qh
*qh
)
1018 int cmd
= ehci_readl(ehci
, &ehci
->regs
->command
);
1019 struct ehci_qh
*prev
;
1022 assert_spin_locked(&ehci
->lock
);
1024 || (qh
->qh_state
!= QH_STATE_LINKED
1025 && qh
->qh_state
!= QH_STATE_UNLINK_WAIT
)
1030 /* stop async schedule right now? */
1031 if (unlikely (qh
== ehci
->async
)) {
1032 /* can't get here without STS_ASS set */
1033 if (ehci_to_hcd(ehci
)->state
!= HC_STATE_HALT
1034 && !ehci
->reclaim
) {
1035 /* ... and CMD_IAAD clear */
1036 ehci_writel(ehci
, cmd
& ~CMD_ASE
,
1037 &ehci
->regs
->command
);
1039 // handshake later, if we need to
1040 timer_action_done (ehci
, TIMER_ASYNC_OFF
);
1045 qh
->qh_state
= QH_STATE_UNLINK
;
1046 ehci
->reclaim
= qh
= qh_get (qh
);
1049 while (prev
->qh_next
.qh
!= qh
)
1050 prev
= prev
->qh_next
.qh
;
1052 prev
->hw_next
= qh
->hw_next
;
1053 prev
->qh_next
= qh
->qh_next
;
1056 if (unlikely (ehci_to_hcd(ehci
)->state
== HC_STATE_HALT
)) {
1057 /* if (unlikely (qh->reclaim != 0))
1058 * this will recurse, probably not much
1060 end_unlink_async (ehci
);
1065 ehci_writel(ehci
, cmd
, &ehci
->regs
->command
);
1066 (void)ehci_readl(ehci
, &ehci
->regs
->command
);
1067 iaa_watchdog_start(ehci
);
1070 /*-------------------------------------------------------------------------*/
1072 static void scan_async (struct ehci_hcd
*ehci
)
1075 enum ehci_timer_action action
= TIMER_IO_WATCHDOG
;
1077 if (!++(ehci
->stamp
))
1079 timer_action_done (ehci
, TIMER_ASYNC_SHRINK
);
1081 qh
= ehci
->async
->qh_next
.qh
;
1082 if (likely (qh
!= NULL
)) {
1084 /* clean any finished work for this qh */
1085 if (!list_empty (&qh
->qtd_list
)
1086 && qh
->stamp
!= ehci
->stamp
) {
1089 /* unlinks could happen here; completion
1090 * reporting drops the lock. rescan using
1091 * the latest schedule, but don't rescan
1092 * qhs we already finished (no looping).
1095 qh
->stamp
= ehci
->stamp
;
1096 temp
= qh_completions (ehci
, qh
);
1103 /* unlink idle entries, reducing HC PCI usage as well
1104 * as HCD schedule-scanning costs. delay for any qh
1105 * we just scanned, there's a not-unusual case that it
1106 * doesn't stay idle for long.
1107 * (plus, avoids some kind of re-activation race.)
1109 if (list_empty (&qh
->qtd_list
)) {
1110 if (qh
->stamp
== ehci
->stamp
)
1111 action
= TIMER_ASYNC_SHRINK
;
1112 else if (!ehci
->reclaim
1113 && qh
->qh_state
== QH_STATE_LINKED
)
1114 start_unlink_async (ehci
, qh
);
1117 qh
= qh
->qh_next
.qh
;
1120 if (action
== TIMER_ASYNC_SHRINK
)
1121 timer_action (ehci
, TIMER_ASYNC_SHRINK
);