2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/irq.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
29 #define DRIVER_AUTHOR "Sarah Sharp"
30 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
32 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
33 static int link_quirk
;
34 module_param(link_quirk
, int, S_IRUGO
| S_IWUSR
);
35 MODULE_PARM_DESC(link_quirk
, "Don't clear the chain bit on a link TRB");
37 /* TODO: copied from ehci-hcd.c - can this be refactored? */
39 * handshake - spin reading hc until handshake completes or fails
40 * @ptr: address of hc register to be read
41 * @mask: bits to look at in result of read
42 * @done: value of those bits when handshake succeeds
43 * @usec: timeout in microseconds
45 * Returns negative errno, or zero on success
47 * Success happens when the "mask" bits have the specified value (hardware
48 * handshake done). There are two failure modes: "usec" have passed (major
49 * hardware flakeout), or the register reads as all-ones (hardware removed).
51 static int handshake(struct xhci_hcd
*xhci
, void __iomem
*ptr
,
52 u32 mask
, u32 done
, int usec
)
57 result
= xhci_readl(xhci
, ptr
);
58 if (result
== ~(u32
)0) /* card removed */
70 * Force HC into halt state.
72 * Disable any IRQs and clear the run/stop bit.
73 * HC will complete any current and actively pipelined transactions, and
74 * should halt within 16 microframes of the run/stop bit being cleared.
75 * Read HC Halted bit in the status register to see when the HC is finished.
76 * XXX: shouldn't we set HC_STATE_HALT here somewhere?
78 int xhci_halt(struct xhci_hcd
*xhci
)
84 xhci_dbg(xhci
, "// Halt the HC\n");
85 /* Disable all interrupts from the host controller */
87 halted
= xhci_readl(xhci
, &xhci
->op_regs
->status
) & STS_HALT
;
91 cmd
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
93 xhci_writel(xhci
, cmd
, &xhci
->op_regs
->command
);
95 return handshake(xhci
, &xhci
->op_regs
->status
,
96 STS_HALT
, STS_HALT
, XHCI_MAX_HALT_USEC
);
100 * Set the run bit and wait for the host to be running.
102 int xhci_start(struct xhci_hcd
*xhci
)
107 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
109 xhci_dbg(xhci
, "// Turn on HC, cmd = 0x%x.\n",
111 xhci_writel(xhci
, temp
, &xhci
->op_regs
->command
);
114 * Wait for the HCHalted Status bit to be 0 to indicate the host is
117 ret
= handshake(xhci
, &xhci
->op_regs
->status
,
118 STS_HALT
, 0, XHCI_MAX_HALT_USEC
);
119 if (ret
== -ETIMEDOUT
)
120 xhci_err(xhci
, "Host took too long to start, "
121 "waited %u microseconds.\n",
127 * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
129 * This resets pipelines, timers, counters, state machines, etc.
130 * Transactions will be terminated immediately, and operational registers
131 * will be set to their defaults.
133 int xhci_reset(struct xhci_hcd
*xhci
)
139 state
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
140 if ((state
& STS_HALT
) == 0) {
141 xhci_warn(xhci
, "Host controller not halted, aborting reset.\n");
145 xhci_dbg(xhci
, "// Reset the HC\n");
146 command
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
147 command
|= CMD_RESET
;
148 xhci_writel(xhci
, command
, &xhci
->op_regs
->command
);
149 /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
150 xhci_to_hcd(xhci
)->state
= HC_STATE_HALT
;
152 ret
= handshake(xhci
, &xhci
->op_regs
->command
,
153 CMD_RESET
, 0, 10 * 1000 * 1000);
157 xhci_dbg(xhci
, "Wait for controller to be ready for doorbell rings\n");
159 * xHCI cannot write to any doorbells or operational registers other
160 * than status until the "Controller Not Ready" flag is cleared.
162 return handshake(xhci
, &xhci
->op_regs
->status
, STS_CNR
, 0, 250 * 1000);
166 * Stop the HC from processing the endpoint queues.
168 static void xhci_quiesce(struct xhci_hcd
*xhci
)
171 * Queues are per endpoint, so we need to disable an endpoint or slot.
173 * To disable a slot, we need to insert a disable slot command on the
174 * command ring and ring the doorbell. This will also free any internal
175 * resources associated with the slot (which might not be what we want).
177 * A Release Endpoint command sounds better - doesn't free internal HC
178 * memory, but removes the endpoints from the schedule and releases the
179 * bandwidth, disables the doorbells, and clears the endpoint enable
180 * flag. Usually used prior to a set interface command.
182 * TODO: Implement after command ring code is done.
184 BUG_ON(!HC_IS_RUNNING(xhci_to_hcd(xhci
)->state
));
185 xhci_dbg(xhci
, "Finished quiescing -- code not written yet\n");
189 /* Set up MSI-X table for entry 0 (may claim other entries later) */
190 static int xhci_setup_msix(struct xhci_hcd
*xhci
)
193 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
195 xhci
->msix_count
= 0;
196 /* XXX: did I do this right? ixgbe does kcalloc for more than one */
197 xhci
->msix_entries
= kmalloc(sizeof(struct msix_entry
), GFP_KERNEL
);
198 if (!xhci
->msix_entries
) {
199 xhci_err(xhci
, "Failed to allocate MSI-X entries\n");
202 xhci
->msix_entries
[0].entry
= 0;
204 ret
= pci_enable_msix(pdev
, xhci
->msix_entries
, xhci
->msix_count
);
206 xhci_err(xhci
, "Failed to enable MSI-X\n");
211 * Pass the xhci pointer value as the request_irq "cookie".
212 * If more irqs are added, this will need to be unique for each one.
214 ret
= request_irq(xhci
->msix_entries
[0].vector
, &xhci_irq
, 0,
215 "xHCI", xhci_to_hcd(xhci
));
217 xhci_err(xhci
, "Failed to allocate MSI-X interrupt\n");
220 xhci_dbg(xhci
, "Finished setting up MSI-X\n");
224 pci_disable_msix(pdev
);
226 kfree(xhci
->msix_entries
);
227 xhci
->msix_entries
= NULL
;
231 /* XXX: code duplication; can xhci_setup_msix call this? */
232 /* Free any IRQs and disable MSI-X */
233 static void xhci_cleanup_msix(struct xhci_hcd
*xhci
)
235 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
236 if (!xhci
->msix_entries
)
239 free_irq(xhci
->msix_entries
[0].vector
, xhci
);
240 pci_disable_msix(pdev
);
241 kfree(xhci
->msix_entries
);
242 xhci
->msix_entries
= NULL
;
243 xhci_dbg(xhci
, "Finished cleaning up MSI-X\n");
248 * Initialize memory for HCD and xHC (one-time init).
250 * Program the PAGESIZE register, initialize the device context array, create
251 * device contexts (?), set up a command ring segment (or two?), create event
252 * ring (one for now).
254 int xhci_init(struct usb_hcd
*hcd
)
256 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
259 xhci_dbg(xhci
, "xhci_init\n");
260 spin_lock_init(&xhci
->lock
);
262 xhci_dbg(xhci
, "QUIRK: Not clearing Link TRB chain bits.\n");
263 xhci
->quirks
|= XHCI_LINK_TRB_QUIRK
;
265 xhci_dbg(xhci
, "xHCI doesn't need link TRB QUIRK\n");
267 retval
= xhci_mem_init(xhci
, GFP_KERNEL
);
268 xhci_dbg(xhci
, "Finished xhci_init\n");
274 * Called in interrupt context when there might be work
275 * queued on the event ring
277 * xhci->lock must be held by caller.
279 static void xhci_work(struct xhci_hcd
*xhci
)
285 * Clear the op reg interrupt status first,
286 * so we can receive interrupts from other MSI-X interrupters.
287 * Write 1 to clear the interrupt status.
289 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
291 xhci_writel(xhci
, temp
, &xhci
->op_regs
->status
);
292 /* FIXME when MSI-X is supported and there are multiple vectors */
293 /* Clear the MSI-X event interrupt status */
295 /* Acknowledge the interrupt */
296 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
298 xhci_writel(xhci
, temp
, &xhci
->ir_set
->irq_pending
);
299 /* Flush posted writes */
300 xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
302 /* FIXME this should be a delayed service routine that clears the EHB */
303 xhci_handle_event(xhci
);
305 /* Clear the event handler busy flag (RW1C); the event ring should be empty. */
306 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
307 xhci_write_64(xhci
, temp_64
| ERST_EHB
, &xhci
->ir_set
->erst_dequeue
);
308 /* Flush posted writes -- FIXME is this necessary? */
309 xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
312 /*-------------------------------------------------------------------------*/
315 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
316 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
317 * indicators of an event TRB error, but we check the status *first* to be safe.
319 irqreturn_t
xhci_irq(struct usb_hcd
*hcd
)
321 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
325 spin_lock(&xhci
->lock
);
326 trb
= xhci
->event_ring
->dequeue
;
327 /* Check if the xHC generated the interrupt, or the irq is shared */
328 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
329 temp2
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
330 if (temp
== 0xffffffff && temp2
== 0xffffffff)
333 if (!(temp
& STS_EINT
) && !ER_IRQ_PENDING(temp2
)) {
334 spin_unlock(&xhci
->lock
);
337 xhci_dbg(xhci
, "op reg status = %08x\n", temp
);
338 xhci_dbg(xhci
, "ir set irq_pending = %08x\n", temp2
);
339 xhci_dbg(xhci
, "Event ring dequeue ptr:\n");
340 xhci_dbg(xhci
, "@%llx %08x %08x %08x %08x\n",
341 (unsigned long long)xhci_trb_virt_to_dma(xhci
->event_ring
->deq_seg
, trb
),
342 lower_32_bits(trb
->link
.segment_ptr
),
343 upper_32_bits(trb
->link
.segment_ptr
),
344 (unsigned int) trb
->link
.intr_target
,
345 (unsigned int) trb
->link
.control
);
347 if (temp
& STS_FATAL
) {
348 xhci_warn(xhci
, "WARNING: Host System Error\n");
351 xhci_to_hcd(xhci
)->state
= HC_STATE_HALT
;
352 spin_unlock(&xhci
->lock
);
357 spin_unlock(&xhci
->lock
);
362 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
363 void xhci_event_ring_work(unsigned long arg
)
368 struct xhci_hcd
*xhci
= (struct xhci_hcd
*) arg
;
371 xhci_dbg(xhci
, "Poll event ring: %lu\n", jiffies
);
373 spin_lock_irqsave(&xhci
->lock
, flags
);
374 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
375 xhci_dbg(xhci
, "op reg status = 0x%x\n", temp
);
376 if (temp
== 0xffffffff) {
377 xhci_dbg(xhci
, "HW died, polling stopped.\n");
378 spin_unlock_irqrestore(&xhci
->lock
, flags
);
382 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
383 xhci_dbg(xhci
, "ir_set 0 pending = 0x%x\n", temp
);
384 xhci_dbg(xhci
, "No-op commands handled = %d\n", xhci
->noops_handled
);
385 xhci_dbg(xhci
, "HC error bitmask = 0x%x\n", xhci
->error_bitmask
);
386 xhci
->error_bitmask
= 0;
387 xhci_dbg(xhci
, "Event ring:\n");
388 xhci_debug_segment(xhci
, xhci
->event_ring
->deq_seg
);
389 xhci_dbg_ring_ptrs(xhci
, xhci
->event_ring
);
390 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
391 temp_64
&= ~ERST_PTR_MASK
;
392 xhci_dbg(xhci
, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64
);
393 xhci_dbg(xhci
, "Command ring:\n");
394 xhci_debug_segment(xhci
, xhci
->cmd_ring
->deq_seg
);
395 xhci_dbg_ring_ptrs(xhci
, xhci
->cmd_ring
);
396 xhci_dbg_cmd_ptrs(xhci
);
397 for (i
= 0; i
< MAX_HC_SLOTS
; ++i
) {
400 for (j
= 0; j
< 31; ++j
) {
401 struct xhci_ring
*ring
= xhci
->devs
[i
]->eps
[j
].ring
;
404 xhci_dbg(xhci
, "Dev %d endpoint ring %d:\n", i
, j
);
405 xhci_debug_segment(xhci
, ring
->deq_seg
);
409 if (xhci
->noops_submitted
!= NUM_TEST_NOOPS
)
410 if (xhci_setup_one_noop(xhci
))
411 xhci_ring_cmd_db(xhci
);
412 spin_unlock_irqrestore(&xhci
->lock
, flags
);
415 mod_timer(&xhci
->event_ring_timer
, jiffies
+ POLL_TIMEOUT
* HZ
);
417 xhci_dbg(xhci
, "Quit polling the event ring.\n");
422 * Start the HC after it was halted.
424 * This function is called by the USB core when the HC driver is added.
425 * Its opposite is xhci_stop().
427 * xhci_init() must be called once before this function can be called.
428 * Reset the HC, enable device slot contexts, program DCBAAP, and
429 * set command ring pointer and event ring pointer.
431 * Setup MSI-X vectors and enable interrupts.
433 int xhci_run(struct usb_hcd
*hcd
)
437 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
438 void (*doorbell
)(struct xhci_hcd
*) = NULL
;
440 hcd
->uses_new_polling
= 1;
443 xhci_dbg(xhci
, "xhci_run\n");
444 #if 0 /* FIXME: MSI not setup yet */
445 /* Do this at the very last minute */
446 ret
= xhci_setup_msix(xhci
);
452 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
453 init_timer(&xhci
->event_ring_timer
);
454 xhci
->event_ring_timer
.data
= (unsigned long) xhci
;
455 xhci
->event_ring_timer
.function
= xhci_event_ring_work
;
456 /* Poll the event ring */
457 xhci
->event_ring_timer
.expires
= jiffies
+ POLL_TIMEOUT
* HZ
;
459 xhci_dbg(xhci
, "Setting event ring polling timer\n");
460 add_timer(&xhci
->event_ring_timer
);
463 xhci_dbg(xhci
, "Command ring memory map follows:\n");
464 xhci_debug_ring(xhci
, xhci
->cmd_ring
);
465 xhci_dbg_ring_ptrs(xhci
, xhci
->cmd_ring
);
466 xhci_dbg_cmd_ptrs(xhci
);
468 xhci_dbg(xhci
, "ERST memory map follows:\n");
469 xhci_dbg_erst(xhci
, &xhci
->erst
);
470 xhci_dbg(xhci
, "Event ring:\n");
471 xhci_debug_ring(xhci
, xhci
->event_ring
);
472 xhci_dbg_ring_ptrs(xhci
, xhci
->event_ring
);
473 temp_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_dequeue
);
474 temp_64
&= ~ERST_PTR_MASK
;
475 xhci_dbg(xhci
, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64
);
477 xhci_dbg(xhci
, "// Set the interrupt modulation register\n");
478 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_control
);
479 temp
&= ~ER_IRQ_INTERVAL_MASK
;
481 xhci_writel(xhci
, temp
, &xhci
->ir_set
->irq_control
);
483 /* Set the HCD state before we enable the irqs */
484 hcd
->state
= HC_STATE_RUNNING
;
485 temp
= xhci_readl(xhci
, &xhci
->op_regs
->command
);
487 xhci_dbg(xhci
, "// Enable interrupts, cmd = 0x%x.\n",
489 xhci_writel(xhci
, temp
, &xhci
->op_regs
->command
);
491 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
492 xhci_dbg(xhci
, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
493 xhci
->ir_set
, (unsigned int) ER_IRQ_ENABLE(temp
));
494 xhci_writel(xhci
, ER_IRQ_ENABLE(temp
),
495 &xhci
->ir_set
->irq_pending
);
496 xhci_print_ir_set(xhci
, xhci
->ir_set
, 0);
498 if (NUM_TEST_NOOPS
> 0)
499 doorbell
= xhci_setup_one_noop(xhci
);
501 if (xhci_start(xhci
)) {
506 xhci_dbg(xhci
, "// @%p = 0x%x\n", &xhci
->op_regs
->command
, temp
);
510 xhci_dbg(xhci
, "Finished xhci_run\n");
517 * This function is called by the USB core when the HC driver is removed.
518 * Its opposite is xhci_run().
520 * Disable device contexts, disable IRQs, and quiesce the HC.
521 * Reset the HC, finish any completed transactions, and cleanup memory.
523 void xhci_stop(struct usb_hcd
*hcd
)
526 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
528 spin_lock_irq(&xhci
->lock
);
529 if (HC_IS_RUNNING(hcd
->state
))
533 spin_unlock_irq(&xhci
->lock
);
535 #if 0 /* No MSI yet */
536 xhci_cleanup_msix(xhci
);
538 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
539 /* Tell the event ring poll function not to reschedule */
541 del_timer_sync(&xhci
->event_ring_timer
);
544 xhci_dbg(xhci
, "// Disabling event ring interrupts\n");
545 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
546 xhci_writel(xhci
, temp
& ~STS_EINT
, &xhci
->op_regs
->status
);
547 temp
= xhci_readl(xhci
, &xhci
->ir_set
->irq_pending
);
548 xhci_writel(xhci
, ER_IRQ_DISABLE(temp
),
549 &xhci
->ir_set
->irq_pending
);
550 xhci_print_ir_set(xhci
, xhci
->ir_set
, 0);
552 xhci_dbg(xhci
, "cleaning up memory\n");
553 xhci_mem_cleanup(xhci
);
554 xhci_dbg(xhci
, "xhci_stop completed - status = %x\n",
555 xhci_readl(xhci
, &xhci
->op_regs
->status
));
559 * Shutdown HC (not bus-specific)
561 * This is called when the machine is rebooting or halting. We assume that the
562 * machine will be powered off, and the HC's internal state will be reset.
563 * Don't bother to free memory.
565 void xhci_shutdown(struct usb_hcd
*hcd
)
567 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
569 spin_lock_irq(&xhci
->lock
);
571 spin_unlock_irq(&xhci
->lock
);
574 xhci_cleanup_msix(xhci
);
577 xhci_dbg(xhci
, "xhci_shutdown completed - status = %x\n",
578 xhci_readl(xhci
, &xhci
->op_regs
->status
));
581 /*-------------------------------------------------------------------------*/
584 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
585 * HCDs. Find the index for an endpoint given its descriptor. Use the return
586 * value to right shift 1 for the bitmask.
588 * Index = (epnum * 2) + direction - 1,
589 * where direction = 0 for OUT, 1 for IN.
590 * For control endpoints, the IN index is used (OUT index is unused), so
591 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
593 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor
*desc
)
596 if (usb_endpoint_xfer_control(desc
))
597 index
= (unsigned int) (usb_endpoint_num(desc
)*2);
599 index
= (unsigned int) (usb_endpoint_num(desc
)*2) +
600 (usb_endpoint_dir_in(desc
) ? 1 : 0) - 1;
604 /* Find the flag for this endpoint (for use in the control context). Use the
605 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
608 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor
*desc
)
610 return 1 << (xhci_get_endpoint_index(desc
) + 1);
613 /* Find the flag for this endpoint (for use in the control context). Use the
614 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
617 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index
)
619 return 1 << (ep_index
+ 1);
622 /* Compute the last valid endpoint context index. Basically, this is the
623 * endpoint index plus one. For slot contexts with more than valid endpoint,
624 * we find the most significant bit set in the added contexts flags.
625 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
626 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
628 unsigned int xhci_last_valid_endpoint(u32 added_ctxs
)
630 return fls(added_ctxs
) - 1;
633 /* Returns 1 if the arguments are OK;
634 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
636 int xhci_check_args(struct usb_hcd
*hcd
, struct usb_device
*udev
,
637 struct usb_host_endpoint
*ep
, int check_ep
, const char *func
) {
638 if (!hcd
|| (check_ep
&& !ep
) || !udev
) {
639 printk(KERN_DEBUG
"xHCI %s called with invalid args\n",
644 printk(KERN_DEBUG
"xHCI %s called for root hub\n",
648 if (!udev
->slot_id
) {
649 printk(KERN_DEBUG
"xHCI %s called with unaddressed device\n",
656 static int xhci_configure_endpoint(struct xhci_hcd
*xhci
,
657 struct usb_device
*udev
, struct xhci_command
*command
,
658 bool ctx_change
, bool must_succeed
);
661 * Full speed devices may have a max packet size greater than 8 bytes, but the
662 * USB core doesn't know that until it reads the first 8 bytes of the
663 * descriptor. If the usb_device's max packet size changes after that point,
664 * we need to issue an evaluate context command and wait on it.
666 static int xhci_check_maxpacket(struct xhci_hcd
*xhci
, unsigned int slot_id
,
667 unsigned int ep_index
, struct urb
*urb
)
669 struct xhci_container_ctx
*in_ctx
;
670 struct xhci_container_ctx
*out_ctx
;
671 struct xhci_input_control_ctx
*ctrl_ctx
;
672 struct xhci_ep_ctx
*ep_ctx
;
674 int hw_max_packet_size
;
677 out_ctx
= xhci
->devs
[slot_id
]->out_ctx
;
678 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
679 hw_max_packet_size
= MAX_PACKET_DECODED(ep_ctx
->ep_info2
);
680 max_packet_size
= urb
->dev
->ep0
.desc
.wMaxPacketSize
;
681 if (hw_max_packet_size
!= max_packet_size
) {
682 xhci_dbg(xhci
, "Max Packet Size for ep 0 changed.\n");
683 xhci_dbg(xhci
, "Max packet size in usb_device = %d\n",
685 xhci_dbg(xhci
, "Max packet size in xHCI HW = %d\n",
687 xhci_dbg(xhci
, "Issuing evaluate context command.\n");
689 /* Set up the modified control endpoint 0 */
690 xhci_endpoint_copy(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
691 xhci
->devs
[slot_id
]->out_ctx
, ep_index
);
692 in_ctx
= xhci
->devs
[slot_id
]->in_ctx
;
693 ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
694 ep_ctx
->ep_info2
&= ~MAX_PACKET_MASK
;
695 ep_ctx
->ep_info2
|= MAX_PACKET(max_packet_size
);
697 /* Set up the input context flags for the command */
698 /* FIXME: This won't work if a non-default control endpoint
699 * changes max packet sizes.
701 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
702 ctrl_ctx
->add_flags
= EP0_FLAG
;
703 ctrl_ctx
->drop_flags
= 0;
705 xhci_dbg(xhci
, "Slot %d input context\n", slot_id
);
706 xhci_dbg_ctx(xhci
, in_ctx
, ep_index
);
707 xhci_dbg(xhci
, "Slot %d output context\n", slot_id
);
708 xhci_dbg_ctx(xhci
, out_ctx
, ep_index
);
710 ret
= xhci_configure_endpoint(xhci
, urb
->dev
, NULL
,
713 /* Clean up the input context for later use by bandwidth
716 ctrl_ctx
->add_flags
= SLOT_FLAG
;
722 * non-error returns are a promise to giveback() the urb later
723 * we drop ownership so next owner (or urb unlink) can get it
725 int xhci_urb_enqueue(struct usb_hcd
*hcd
, struct urb
*urb
, gfp_t mem_flags
)
727 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
730 unsigned int slot_id
, ep_index
;
733 if (!urb
|| xhci_check_args(hcd
, urb
->dev
, urb
->ep
, true, __func__
) <= 0)
736 slot_id
= urb
->dev
->slot_id
;
737 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
739 if (!xhci
->devs
|| !xhci
->devs
[slot_id
]) {
741 dev_warn(&urb
->dev
->dev
, "WARN: urb submitted for dev with no Slot ID\n");
745 if (!test_bit(HCD_FLAG_HW_ACCESSIBLE
, &hcd
->flags
)) {
747 xhci_dbg(xhci
, "urb submitted during PCI suspend\n");
751 if (usb_endpoint_xfer_control(&urb
->ep
->desc
)) {
752 /* Check to see if the max packet size for the default control
753 * endpoint changed during FS device enumeration
755 if (urb
->dev
->speed
== USB_SPEED_FULL
) {
756 ret
= xhci_check_maxpacket(xhci
, slot_id
,
762 /* We have a spinlock and interrupts disabled, so we must pass
763 * atomic context to this function, which may allocate memory.
765 spin_lock_irqsave(&xhci
->lock
, flags
);
766 ret
= xhci_queue_ctrl_tx(xhci
, GFP_ATOMIC
, urb
,
768 spin_unlock_irqrestore(&xhci
->lock
, flags
);
769 } else if (usb_endpoint_xfer_bulk(&urb
->ep
->desc
)) {
770 spin_lock_irqsave(&xhci
->lock
, flags
);
771 ret
= xhci_queue_bulk_tx(xhci
, GFP_ATOMIC
, urb
,
773 spin_unlock_irqrestore(&xhci
->lock
, flags
);
774 } else if (usb_endpoint_xfer_int(&urb
->ep
->desc
)) {
775 spin_lock_irqsave(&xhci
->lock
, flags
);
776 ret
= xhci_queue_intr_tx(xhci
, GFP_ATOMIC
, urb
,
778 spin_unlock_irqrestore(&xhci
->lock
, flags
);
787 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
788 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
789 * should pick up where it left off in the TD, unless a Set Transfer Ring
790 * Dequeue Pointer is issued.
792 * The TRBs that make up the buffers for the canceled URB will be "removed" from
793 * the ring. Since the ring is a contiguous structure, they can't be physically
794 * removed. Instead, there are two options:
796 * 1) If the HC is in the middle of processing the URB to be canceled, we
797 * simply move the ring's dequeue pointer past those TRBs using the Set
798 * Transfer Ring Dequeue Pointer command. This will be the common case,
799 * when drivers timeout on the last submitted URB and attempt to cancel.
801 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
802 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
803 * HC will need to invalidate the any TRBs it has cached after the stop
804 * endpoint command, as noted in the xHCI 0.95 errata.
806 * 3) The TD may have completed by the time the Stop Endpoint Command
807 * completes, so software needs to handle that case too.
809 * This function should protect against the TD enqueueing code ringing the
810 * doorbell while this code is waiting for a Stop Endpoint command to complete.
811 * It also needs to account for multiple cancellations on happening at the same
812 * time for the same endpoint.
814 * Note that this function can be called in any context, or so says
815 * usb_hcd_unlink_urb()
817 int xhci_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
822 struct xhci_hcd
*xhci
;
824 unsigned int ep_index
;
825 struct xhci_ring
*ep_ring
;
826 struct xhci_virt_ep
*ep
;
828 xhci
= hcd_to_xhci(hcd
);
829 spin_lock_irqsave(&xhci
->lock
, flags
);
830 /* Make sure the URB hasn't completed or been unlinked already */
831 ret
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
832 if (ret
|| !urb
->hcpriv
)
834 temp
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
835 if (temp
== 0xffffffff) {
836 xhci_dbg(xhci
, "HW died, freeing TD.\n");
837 td
= (struct xhci_td
*) urb
->hcpriv
;
839 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
840 spin_unlock_irqrestore(&xhci
->lock
, flags
);
841 usb_hcd_giveback_urb(xhci_to_hcd(xhci
), urb
, -ESHUTDOWN
);
846 xhci_dbg(xhci
, "Cancel URB %p\n", urb
);
847 xhci_dbg(xhci
, "Event ring:\n");
848 xhci_debug_ring(xhci
, xhci
->event_ring
);
849 ep_index
= xhci_get_endpoint_index(&urb
->ep
->desc
);
850 ep
= &xhci
->devs
[urb
->dev
->slot_id
]->eps
[ep_index
];
852 xhci_dbg(xhci
, "Endpoint ring:\n");
853 xhci_debug_ring(xhci
, ep_ring
);
854 td
= (struct xhci_td
*) urb
->hcpriv
;
856 ep
->cancels_pending
++;
857 list_add_tail(&td
->cancelled_td_list
, &ep
->cancelled_td_list
);
858 /* Queue a stop endpoint command, but only if this is
859 * the first cancellation to be handled.
861 if (ep
->cancels_pending
== 1) {
862 xhci_queue_stop_endpoint(xhci
, urb
->dev
->slot_id
, ep_index
);
863 xhci_ring_cmd_db(xhci
);
866 spin_unlock_irqrestore(&xhci
->lock
, flags
);
870 /* Drop an endpoint from a new bandwidth configuration for this device.
871 * Only one call to this function is allowed per endpoint before
872 * check_bandwidth() or reset_bandwidth() must be called.
873 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
874 * add the endpoint to the schedule with possibly new parameters denoted by a
875 * different endpoint descriptor in usb_host_endpoint.
876 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
879 * The USB core will not allow URBs to be queued to an endpoint that is being
880 * disabled, so there's no need for mutual exclusion to protect
881 * the xhci->devs[slot_id] structure.
883 int xhci_drop_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
,
884 struct usb_host_endpoint
*ep
)
886 struct xhci_hcd
*xhci
;
887 struct xhci_container_ctx
*in_ctx
, *out_ctx
;
888 struct xhci_input_control_ctx
*ctrl_ctx
;
889 struct xhci_slot_ctx
*slot_ctx
;
890 unsigned int last_ctx
;
891 unsigned int ep_index
;
892 struct xhci_ep_ctx
*ep_ctx
;
894 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
897 ret
= xhci_check_args(hcd
, udev
, ep
, 1, __func__
);
900 xhci
= hcd_to_xhci(hcd
);
901 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
903 drop_flag
= xhci_get_endpoint_flag(&ep
->desc
);
904 if (drop_flag
== SLOT_FLAG
|| drop_flag
== EP0_FLAG
) {
905 xhci_dbg(xhci
, "xHCI %s - can't drop slot or ep 0 %#x\n",
906 __func__
, drop_flag
);
910 if (!xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
911 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
916 in_ctx
= xhci
->devs
[udev
->slot_id
]->in_ctx
;
917 out_ctx
= xhci
->devs
[udev
->slot_id
]->out_ctx
;
918 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
919 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
920 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
921 /* If the HC already knows the endpoint is disabled,
922 * or the HCD has noted it is disabled, ignore this request
924 if ((ep_ctx
->ep_info
& EP_STATE_MASK
) == EP_STATE_DISABLED
||
925 ctrl_ctx
->drop_flags
& xhci_get_endpoint_flag(&ep
->desc
)) {
926 xhci_warn(xhci
, "xHCI %s called with disabled ep %p\n",
931 ctrl_ctx
->drop_flags
|= drop_flag
;
932 new_drop_flags
= ctrl_ctx
->drop_flags
;
934 ctrl_ctx
->add_flags
&= ~drop_flag
;
935 new_add_flags
= ctrl_ctx
->add_flags
;
937 last_ctx
= xhci_last_valid_endpoint(ctrl_ctx
->add_flags
);
938 slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
939 /* Update the last valid endpoint context, if we deleted the last one */
940 if ((slot_ctx
->dev_info
& LAST_CTX_MASK
) > LAST_CTX(last_ctx
)) {
941 slot_ctx
->dev_info
&= ~LAST_CTX_MASK
;
942 slot_ctx
->dev_info
|= LAST_CTX(last_ctx
);
944 new_slot_info
= slot_ctx
->dev_info
;
946 xhci_endpoint_zero(xhci
, xhci
->devs
[udev
->slot_id
], ep
);
948 xhci_dbg(xhci
, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
949 (unsigned int) ep
->desc
.bEndpointAddress
,
951 (unsigned int) new_drop_flags
,
952 (unsigned int) new_add_flags
,
953 (unsigned int) new_slot_info
);
957 /* Add an endpoint to a new possible bandwidth configuration for this device.
958 * Only one call to this function is allowed per endpoint before
959 * check_bandwidth() or reset_bandwidth() must be called.
960 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
961 * add the endpoint to the schedule with possibly new parameters denoted by a
962 * different endpoint descriptor in usb_host_endpoint.
963 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
966 * The USB core will not allow URBs to be queued to an endpoint until the
967 * configuration or alt setting is installed in the device, so there's no need
968 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
970 int xhci_add_endpoint(struct usb_hcd
*hcd
, struct usb_device
*udev
,
971 struct usb_host_endpoint
*ep
)
973 struct xhci_hcd
*xhci
;
974 struct xhci_container_ctx
*in_ctx
, *out_ctx
;
975 unsigned int ep_index
;
976 struct xhci_ep_ctx
*ep_ctx
;
977 struct xhci_slot_ctx
*slot_ctx
;
978 struct xhci_input_control_ctx
*ctrl_ctx
;
980 unsigned int last_ctx
;
981 u32 new_add_flags
, new_drop_flags
, new_slot_info
;
982 struct xhci_virt_device
*virt_dev
;
985 ret
= xhci_check_args(hcd
, udev
, ep
, 1, __func__
);
987 /* So we won't queue a reset ep command for a root hub */
991 xhci
= hcd_to_xhci(hcd
);
993 added_ctxs
= xhci_get_endpoint_flag(&ep
->desc
);
994 last_ctx
= xhci_last_valid_endpoint(added_ctxs
);
995 if (added_ctxs
== SLOT_FLAG
|| added_ctxs
== EP0_FLAG
) {
996 /* FIXME when we have to issue an evaluate endpoint command to
997 * deal with ep0 max packet size changing once we get the
1000 xhci_dbg(xhci
, "xHCI %s - can't add slot or ep 0 %#x\n",
1001 __func__
, added_ctxs
);
1005 if (!xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
1006 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
1011 virt_dev
= xhci
->devs
[udev
->slot_id
];
1012 in_ctx
= virt_dev
->in_ctx
;
1013 out_ctx
= virt_dev
->out_ctx
;
1014 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1015 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1016 ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
1018 /* If this endpoint is already in use, and the upper layers are trying
1019 * to add it again without dropping it, reject the addition.
1021 if (virt_dev
->eps
[ep_index
].ring
&&
1022 !(le32_to_cpu(ctrl_ctx
->drop_flags
) &
1023 xhci_get_endpoint_flag(&ep
->desc
))) {
1024 xhci_warn(xhci
, "Trying to add endpoint 0x%x "
1025 "without dropping it.\n",
1026 (unsigned int) ep
->desc
.bEndpointAddress
);
1030 /* If the HCD has already noted the endpoint is enabled,
1031 * ignore this request.
1033 if (ctrl_ctx
->add_flags
& xhci_get_endpoint_flag(&ep
->desc
)) {
1034 xhci_warn(xhci
, "xHCI %s called with enabled ep %p\n",
1040 * Configuration and alternate setting changes must be done in
1041 * process context, not interrupt context (or so documenation
1042 * for usb_set_interface() and usb_set_configuration() claim).
1044 if (xhci_endpoint_init(xhci
, virt_dev
, udev
, ep
, GFP_NOIO
) < 0) {
1045 dev_dbg(&udev
->dev
, "%s - could not initialize ep %#x\n",
1046 __func__
, ep
->desc
.bEndpointAddress
);
1050 ctrl_ctx
->add_flags
|= added_ctxs
;
1051 new_add_flags
= ctrl_ctx
->add_flags
;
1053 /* If xhci_endpoint_disable() was called for this endpoint, but the
1054 * xHC hasn't been notified yet through the check_bandwidth() call,
1055 * this re-adds a new state for the endpoint from the new endpoint
1056 * descriptors. We must drop and re-add this endpoint, so we leave the
1059 new_drop_flags
= ctrl_ctx
->drop_flags
;
1061 slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
1062 /* Update the last valid endpoint context, if we just added one past */
1063 if ((slot_ctx
->dev_info
& LAST_CTX_MASK
) < LAST_CTX(last_ctx
)) {
1064 slot_ctx
->dev_info
&= ~LAST_CTX_MASK
;
1065 slot_ctx
->dev_info
|= LAST_CTX(last_ctx
);
1067 new_slot_info
= slot_ctx
->dev_info
;
1069 /* Store the usb_device pointer for later use */
1072 xhci_dbg(xhci
, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1073 (unsigned int) ep
->desc
.bEndpointAddress
,
1075 (unsigned int) new_drop_flags
,
1076 (unsigned int) new_add_flags
,
1077 (unsigned int) new_slot_info
);
1081 static void xhci_zero_in_ctx(struct xhci_hcd
*xhci
, struct xhci_virt_device
*virt_dev
)
1083 struct xhci_input_control_ctx
*ctrl_ctx
;
1084 struct xhci_ep_ctx
*ep_ctx
;
1085 struct xhci_slot_ctx
*slot_ctx
;
1088 /* When a device's add flag and drop flag are zero, any subsequent
1089 * configure endpoint command will leave that endpoint's state
1090 * untouched. Make sure we don't leave any old state in the input
1091 * endpoint contexts.
1093 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
1094 ctrl_ctx
->drop_flags
= 0;
1095 ctrl_ctx
->add_flags
= 0;
1096 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
1097 slot_ctx
->dev_info
&= ~LAST_CTX_MASK
;
1098 /* Endpoint 0 is always valid */
1099 slot_ctx
->dev_info
|= LAST_CTX(1);
1100 for (i
= 1; i
< 31; ++i
) {
1101 ep_ctx
= xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, i
);
1102 ep_ctx
->ep_info
= 0;
1103 ep_ctx
->ep_info2
= 0;
1105 ep_ctx
->tx_info
= 0;
1109 static int xhci_configure_endpoint_result(struct xhci_hcd
*xhci
,
1110 struct usb_device
*udev
, int *cmd_status
)
1114 switch (*cmd_status
) {
1116 dev_warn(&udev
->dev
, "Not enough host controller resources "
1117 "for new device state.\n");
1119 /* FIXME: can we allocate more resources for the HC? */
1122 dev_warn(&udev
->dev
, "Not enough bandwidth "
1123 "for new device state.\n");
1125 /* FIXME: can we go back to the old state? */
1128 /* the HCD set up something wrong */
1129 dev_warn(&udev
->dev
, "ERROR: Endpoint drop flag = 0, "
1131 "and endpoint is not disabled.\n");
1135 dev_dbg(&udev
->dev
, "Successful Endpoint Configure command\n");
1139 xhci_err(xhci
, "ERROR: unexpected command completion "
1140 "code 0x%x.\n", *cmd_status
);
1147 static int xhci_evaluate_context_result(struct xhci_hcd
*xhci
,
1148 struct usb_device
*udev
, int *cmd_status
)
1151 struct xhci_virt_device
*virt_dev
= xhci
->devs
[udev
->slot_id
];
1153 switch (*cmd_status
) {
1155 dev_warn(&udev
->dev
, "WARN: xHCI driver setup invalid evaluate "
1156 "context command.\n");
1160 dev_warn(&udev
->dev
, "WARN: slot not enabled for"
1161 "evaluate context command.\n");
1162 case COMP_CTX_STATE
:
1163 dev_warn(&udev
->dev
, "WARN: invalid context state for "
1164 "evaluate context command.\n");
1165 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 1);
1169 dev_dbg(&udev
->dev
, "Successful evaluate context command\n");
1173 xhci_err(xhci
, "ERROR: unexpected command completion "
1174 "code 0x%x.\n", *cmd_status
);
1181 /* Issue a configure endpoint command or evaluate context command
1182 * and wait for it to finish.
1184 static int xhci_configure_endpoint(struct xhci_hcd
*xhci
,
1185 struct usb_device
*udev
,
1186 struct xhci_command
*command
,
1187 bool ctx_change
, bool must_succeed
)
1191 unsigned long flags
;
1192 struct xhci_container_ctx
*in_ctx
;
1193 struct completion
*cmd_completion
;
1195 struct xhci_virt_device
*virt_dev
;
1197 spin_lock_irqsave(&xhci
->lock
, flags
);
1198 virt_dev
= xhci
->devs
[udev
->slot_id
];
1200 in_ctx
= command
->in_ctx
;
1201 cmd_completion
= command
->completion
;
1202 cmd_status
= &command
->status
;
1203 command
->command_trb
= xhci
->cmd_ring
->enqueue
;
1204 list_add_tail(&command
->cmd_list
, &virt_dev
->cmd_list
);
1206 in_ctx
= virt_dev
->in_ctx
;
1207 cmd_completion
= &virt_dev
->cmd_completion
;
1208 cmd_status
= &virt_dev
->cmd_status
;
1210 init_completion(cmd_completion
);
1213 ret
= xhci_queue_configure_endpoint(xhci
, in_ctx
->dma
,
1214 udev
->slot_id
, must_succeed
);
1216 ret
= xhci_queue_evaluate_context(xhci
, in_ctx
->dma
,
1219 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1220 xhci_dbg(xhci
, "FIXME allocate a new ring segment\n");
1223 xhci_ring_cmd_db(xhci
);
1224 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1226 /* Wait for the configure endpoint command to complete */
1227 timeleft
= wait_for_completion_interruptible_timeout(
1229 USB_CTRL_SET_TIMEOUT
);
1230 if (timeleft
<= 0) {
1231 xhci_warn(xhci
, "%s while waiting for %s command\n",
1232 timeleft
== 0 ? "Timeout" : "Signal",
1234 "configure endpoint" :
1235 "evaluate context");
1236 /* FIXME cancel the configure endpoint command */
1241 return xhci_configure_endpoint_result(xhci
, udev
, cmd_status
);
1242 return xhci_evaluate_context_result(xhci
, udev
, cmd_status
);
1245 /* Called after one or more calls to xhci_add_endpoint() or
1246 * xhci_drop_endpoint(). If this call fails, the USB core is expected
1247 * to call xhci_reset_bandwidth().
1249 * Since we are in the middle of changing either configuration or
1250 * installing a new alt setting, the USB core won't allow URBs to be
1251 * enqueued for any endpoint on the old config or interface. Nothing
1252 * else should be touching the xhci->devs[slot_id] structure, so we
1253 * don't need to take the xhci->lock for manipulating that.
1255 int xhci_check_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1259 struct xhci_hcd
*xhci
;
1260 struct xhci_virt_device
*virt_dev
;
1261 struct xhci_input_control_ctx
*ctrl_ctx
;
1262 struct xhci_slot_ctx
*slot_ctx
;
1264 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, __func__
);
1267 xhci
= hcd_to_xhci(hcd
);
1269 if (!udev
->slot_id
|| !xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
1270 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
1274 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
1275 virt_dev
= xhci
->devs
[udev
->slot_id
];
1277 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
1278 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
1279 ctrl_ctx
->add_flags
|= SLOT_FLAG
;
1280 ctrl_ctx
->add_flags
&= ~EP0_FLAG
;
1281 ctrl_ctx
->drop_flags
&= ~SLOT_FLAG
;
1282 ctrl_ctx
->drop_flags
&= ~EP0_FLAG
;
1283 xhci_dbg(xhci
, "New Input Control Context:\n");
1284 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->in_ctx
);
1285 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
,
1286 LAST_CTX_TO_EP_NUM(slot_ctx
->dev_info
));
1288 ret
= xhci_configure_endpoint(xhci
, udev
, NULL
,
1291 /* Callee should call reset_bandwidth() */
1295 xhci_dbg(xhci
, "Output context after successful config ep cmd:\n");
1296 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
,
1297 LAST_CTX_TO_EP_NUM(slot_ctx
->dev_info
));
1299 xhci_zero_in_ctx(xhci
, virt_dev
);
1300 /* Free any old rings */
1301 for (i
= 1; i
< 31; ++i
) {
1302 if (virt_dev
->eps
[i
].new_ring
) {
1303 xhci_ring_free(xhci
, virt_dev
->eps
[i
].ring
);
1304 virt_dev
->eps
[i
].ring
= virt_dev
->eps
[i
].new_ring
;
1305 virt_dev
->eps
[i
].new_ring
= NULL
;
1312 void xhci_reset_bandwidth(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1314 struct xhci_hcd
*xhci
;
1315 struct xhci_virt_device
*virt_dev
;
1318 ret
= xhci_check_args(hcd
, udev
, NULL
, 0, __func__
);
1321 xhci
= hcd_to_xhci(hcd
);
1323 if (!xhci
->devs
|| !xhci
->devs
[udev
->slot_id
]) {
1324 xhci_warn(xhci
, "xHCI %s called with unaddressed device\n",
1328 xhci_dbg(xhci
, "%s called for udev %p\n", __func__
, udev
);
1329 virt_dev
= xhci
->devs
[udev
->slot_id
];
1330 /* Free any rings allocated for added endpoints */
1331 for (i
= 0; i
< 31; ++i
) {
1332 if (virt_dev
->eps
[i
].new_ring
) {
1333 xhci_ring_free(xhci
, virt_dev
->eps
[i
].new_ring
);
1334 virt_dev
->eps
[i
].new_ring
= NULL
;
1337 xhci_zero_in_ctx(xhci
, virt_dev
);
1340 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd
*xhci
,
1341 struct xhci_container_ctx
*in_ctx
,
1342 struct xhci_container_ctx
*out_ctx
,
1343 u32 add_flags
, u32 drop_flags
)
1345 struct xhci_input_control_ctx
*ctrl_ctx
;
1346 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, in_ctx
);
1347 ctrl_ctx
->add_flags
= add_flags
;
1348 ctrl_ctx
->drop_flags
= drop_flags
;
1349 xhci_slot_copy(xhci
, in_ctx
, out_ctx
);
1350 ctrl_ctx
->add_flags
|= SLOT_FLAG
;
1352 xhci_dbg(xhci
, "Input Context:\n");
1353 xhci_dbg_ctx(xhci
, in_ctx
, xhci_last_valid_endpoint(add_flags
));
1356 void xhci_setup_input_ctx_for_quirk(struct xhci_hcd
*xhci
,
1357 unsigned int slot_id
, unsigned int ep_index
,
1358 struct xhci_dequeue_state
*deq_state
)
1360 struct xhci_container_ctx
*in_ctx
;
1361 struct xhci_ep_ctx
*ep_ctx
;
1365 xhci_endpoint_copy(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
1366 xhci
->devs
[slot_id
]->out_ctx
, ep_index
);
1367 in_ctx
= xhci
->devs
[slot_id
]->in_ctx
;
1368 ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
1369 addr
= xhci_trb_virt_to_dma(deq_state
->new_deq_seg
,
1370 deq_state
->new_deq_ptr
);
1372 xhci_warn(xhci
, "WARN Cannot submit config ep after "
1373 "reset ep command\n");
1374 xhci_warn(xhci
, "WARN deq seg = %p, deq ptr = %p\n",
1375 deq_state
->new_deq_seg
,
1376 deq_state
->new_deq_ptr
);
1379 ep_ctx
->deq
= addr
| deq_state
->new_cycle_state
;
1381 added_ctxs
= xhci_get_endpoint_flag_from_index(ep_index
);
1382 xhci_setup_input_ctx_for_config_ep(xhci
, xhci
->devs
[slot_id
]->in_ctx
,
1383 xhci
->devs
[slot_id
]->out_ctx
, added_ctxs
, added_ctxs
);
1386 void xhci_cleanup_stalled_ring(struct xhci_hcd
*xhci
,
1387 struct usb_device
*udev
, unsigned int ep_index
)
1389 struct xhci_dequeue_state deq_state
;
1390 struct xhci_virt_ep
*ep
;
1392 xhci_dbg(xhci
, "Cleaning up stalled endpoint ring\n");
1393 ep
= &xhci
->devs
[udev
->slot_id
]->eps
[ep_index
];
1394 /* We need to move the HW's dequeue pointer past this TD,
1395 * or it will attempt to resend it on the next doorbell ring.
1397 xhci_find_new_dequeue_state(xhci
, udev
->slot_id
,
1398 ep_index
, ep
->stopped_td
,
1401 /* HW with the reset endpoint quirk will use the saved dequeue state to
1402 * issue a configure endpoint command later.
1404 if (!(xhci
->quirks
& XHCI_RESET_EP_QUIRK
)) {
1405 xhci_dbg(xhci
, "Queueing new dequeue state\n");
1406 xhci_queue_new_dequeue_state(xhci
, udev
->slot_id
,
1407 ep_index
, &deq_state
);
1409 /* Better hope no one uses the input context between now and the
1410 * reset endpoint completion!
1412 xhci_dbg(xhci
, "Setting up input context for "
1413 "configure endpoint command\n");
1414 xhci_setup_input_ctx_for_quirk(xhci
, udev
->slot_id
,
1415 ep_index
, &deq_state
);
1419 /* Deal with stalled endpoints. The core should have sent the control message
1420 * to clear the halt condition. However, we need to make the xHCI hardware
1421 * reset its sequence number, since a device will expect a sequence number of
1422 * zero after the halt condition is cleared.
1423 * Context: in_interrupt
1425 void xhci_endpoint_reset(struct usb_hcd
*hcd
,
1426 struct usb_host_endpoint
*ep
)
1428 struct xhci_hcd
*xhci
;
1429 struct usb_device
*udev
;
1430 unsigned int ep_index
;
1431 unsigned long flags
;
1433 struct xhci_virt_ep
*virt_ep
;
1435 xhci
= hcd_to_xhci(hcd
);
1436 udev
= (struct usb_device
*) ep
->hcpriv
;
1437 /* Called with a root hub endpoint (or an endpoint that wasn't added
1438 * with xhci_add_endpoint()
1442 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
1443 virt_ep
= &xhci
->devs
[udev
->slot_id
]->eps
[ep_index
];
1444 if (!virt_ep
->stopped_td
) {
1445 xhci_dbg(xhci
, "Endpoint 0x%x not halted, refusing to reset.\n",
1446 ep
->desc
.bEndpointAddress
);
1449 if (usb_endpoint_xfer_control(&ep
->desc
)) {
1450 xhci_dbg(xhci
, "Control endpoint stall already handled.\n");
1454 xhci_dbg(xhci
, "Queueing reset endpoint command\n");
1455 spin_lock_irqsave(&xhci
->lock
, flags
);
1456 ret
= xhci_queue_reset_ep(xhci
, udev
->slot_id
, ep_index
);
1458 * Can't change the ring dequeue pointer until it's transitioned to the
1459 * stopped state, which is only upon a successful reset endpoint
1460 * command. Better hope that last command worked!
1463 xhci_cleanup_stalled_ring(xhci
, udev
, ep_index
);
1464 kfree(virt_ep
->stopped_td
);
1465 xhci_ring_cmd_db(xhci
);
1467 virt_ep
->stopped_td
= NULL
;
1468 virt_ep
->stopped_trb
= NULL
;
1469 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1472 xhci_warn(xhci
, "FIXME allocate a new ring segment\n");
1476 * At this point, the struct usb_device is about to go away, the device has
1477 * disconnected, and all traffic has been stopped and the endpoints have been
1478 * disabled. Free any HC data structures associated with that device.
1480 void xhci_free_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1482 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
1483 unsigned long flags
;
1486 if (udev
->slot_id
== 0)
1489 spin_lock_irqsave(&xhci
->lock
, flags
);
1490 /* Don't disable the slot if the host controller is dead. */
1491 state
= xhci_readl(xhci
, &xhci
->op_regs
->status
);
1492 if (state
== 0xffffffff) {
1493 xhci_free_virt_device(xhci
, udev
->slot_id
);
1494 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1498 if (xhci_queue_slot_control(xhci
, TRB_DISABLE_SLOT
, udev
->slot_id
)) {
1499 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1500 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
1503 xhci_ring_cmd_db(xhci
);
1504 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1506 * Event command completion handler will free any data structures
1507 * associated with the slot. XXX Can free sleep?
1512 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
1513 * timed out, or allocating memory failed. Returns 1 on success.
1515 int xhci_alloc_dev(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1517 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
1518 unsigned long flags
;
1522 spin_lock_irqsave(&xhci
->lock
, flags
);
1523 ret
= xhci_queue_slot_control(xhci
, TRB_ENABLE_SLOT
, 0);
1525 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1526 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
1529 xhci_ring_cmd_db(xhci
);
1530 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1532 /* XXX: how much time for xHC slot assignment? */
1533 timeleft
= wait_for_completion_interruptible_timeout(&xhci
->addr_dev
,
1534 USB_CTRL_SET_TIMEOUT
);
1535 if (timeleft
<= 0) {
1536 xhci_warn(xhci
, "%s while waiting for a slot\n",
1537 timeleft
== 0 ? "Timeout" : "Signal");
1538 /* FIXME cancel the enable slot request */
1542 if (!xhci
->slot_id
) {
1543 xhci_err(xhci
, "Error while assigning device slot ID\n");
1546 /* xhci_alloc_virt_device() does not touch rings; no need to lock */
1547 if (!xhci_alloc_virt_device(xhci
, xhci
->slot_id
, udev
, GFP_KERNEL
)) {
1548 /* Disable slot, if we can do it without mem alloc */
1549 xhci_warn(xhci
, "Could not allocate xHCI USB device data structures\n");
1550 spin_lock_irqsave(&xhci
->lock
, flags
);
1551 if (!xhci_queue_slot_control(xhci
, TRB_DISABLE_SLOT
, udev
->slot_id
))
1552 xhci_ring_cmd_db(xhci
);
1553 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1556 udev
->slot_id
= xhci
->slot_id
;
1557 /* Is this a LS or FS device under a HS hub? */
1558 /* Hub or peripherial? */
1563 * Issue an Address Device command (which will issue a SetAddress request to
1565 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
1566 * we should only issue and wait on one address command at the same time.
1568 * We add one to the device address issued by the hardware because the USB core
1569 * uses address 1 for the root hubs (even though they're not really devices).
1571 int xhci_address_device(struct usb_hcd
*hcd
, struct usb_device
*udev
)
1573 unsigned long flags
;
1575 struct xhci_virt_device
*virt_dev
;
1577 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
1578 struct xhci_slot_ctx
*slot_ctx
;
1579 struct xhci_input_control_ctx
*ctrl_ctx
;
1582 if (!udev
->slot_id
) {
1583 xhci_dbg(xhci
, "Bad Slot ID %d\n", udev
->slot_id
);
1587 virt_dev
= xhci
->devs
[udev
->slot_id
];
1589 /* If this is a Set Address to an unconfigured device, setup ep 0 */
1591 xhci_setup_addressable_virt_dev(xhci
, udev
);
1592 /* Otherwise, assume the core has the device configured how it wants */
1593 xhci_dbg(xhci
, "Slot ID %d Input Context:\n", udev
->slot_id
);
1594 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
, 2);
1596 spin_lock_irqsave(&xhci
->lock
, flags
);
1597 ret
= xhci_queue_address_device(xhci
, virt_dev
->in_ctx
->dma
,
1600 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1601 xhci_dbg(xhci
, "FIXME: allocate a command ring segment\n");
1604 xhci_ring_cmd_db(xhci
);
1605 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1607 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
1608 timeleft
= wait_for_completion_interruptible_timeout(&xhci
->addr_dev
,
1609 USB_CTRL_SET_TIMEOUT
);
1610 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
1611 * the SetAddress() "recovery interval" required by USB and aborting the
1612 * command on a timeout.
1614 if (timeleft
<= 0) {
1615 xhci_warn(xhci
, "%s while waiting for a slot\n",
1616 timeleft
== 0 ? "Timeout" : "Signal");
1617 /* FIXME cancel the address device command */
1621 switch (virt_dev
->cmd_status
) {
1622 case COMP_CTX_STATE
:
1624 xhci_err(xhci
, "Setup ERROR: address device command for slot %d.\n",
1629 dev_warn(&udev
->dev
, "Device not responding to set address.\n");
1633 xhci_dbg(xhci
, "Successful Address Device command\n");
1636 xhci_err(xhci
, "ERROR: unexpected command completion "
1637 "code 0x%x.\n", virt_dev
->cmd_status
);
1638 xhci_dbg(xhci
, "Slot ID %d Output Context:\n", udev
->slot_id
);
1639 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 2);
1646 temp_64
= xhci_read_64(xhci
, &xhci
->op_regs
->dcbaa_ptr
);
1647 xhci_dbg(xhci
, "Op regs DCBAA ptr = %#016llx\n", temp_64
);
1648 xhci_dbg(xhci
, "Slot ID %d dcbaa entry @%p = %#016llx\n",
1650 &xhci
->dcbaa
->dev_context_ptrs
[udev
->slot_id
],
1651 (unsigned long long)
1652 xhci
->dcbaa
->dev_context_ptrs
[udev
->slot_id
]);
1653 xhci_dbg(xhci
, "Output Context DMA address = %#08llx\n",
1654 (unsigned long long)virt_dev
->out_ctx
->dma
);
1655 xhci_dbg(xhci
, "Slot ID %d Input Context:\n", udev
->slot_id
);
1656 xhci_dbg_ctx(xhci
, virt_dev
->in_ctx
, 2);
1657 xhci_dbg(xhci
, "Slot ID %d Output Context:\n", udev
->slot_id
);
1658 xhci_dbg_ctx(xhci
, virt_dev
->out_ctx
, 2);
1660 * USB core uses address 1 for the roothubs, so we add one to the
1661 * address given back to us by the HC.
1663 slot_ctx
= xhci_get_slot_ctx(xhci
, virt_dev
->out_ctx
);
1664 udev
->devnum
= (slot_ctx
->dev_state
& DEV_ADDR_MASK
) + 1;
1665 /* Zero the input context control for later use */
1666 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, virt_dev
->in_ctx
);
1667 ctrl_ctx
->add_flags
= 0;
1668 ctrl_ctx
->drop_flags
= 0;
1670 xhci_dbg(xhci
, "Device address = %d\n", udev
->devnum
);
1671 /* XXX Meh, not sure if anyone else but choose_address uses this. */
1672 set_bit(udev
->devnum
, udev
->bus
->devmap
.devicemap
);
1677 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
1678 * internal data structures for the device.
1680 int xhci_update_hub_device(struct usb_hcd
*hcd
, struct usb_device
*hdev
,
1681 struct usb_tt
*tt
, gfp_t mem_flags
)
1683 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
1684 struct xhci_virt_device
*vdev
;
1685 struct xhci_command
*config_cmd
;
1686 struct xhci_input_control_ctx
*ctrl_ctx
;
1687 struct xhci_slot_ctx
*slot_ctx
;
1688 unsigned long flags
;
1689 unsigned think_time
;
1692 /* Ignore root hubs */
1696 vdev
= xhci
->devs
[hdev
->slot_id
];
1698 xhci_warn(xhci
, "Cannot update hub desc for unknown device.\n");
1701 config_cmd
= xhci_alloc_command(xhci
, true, mem_flags
);
1703 xhci_dbg(xhci
, "Could not allocate xHCI command structure.\n");
1707 spin_lock_irqsave(&xhci
->lock
, flags
);
1708 xhci_slot_copy(xhci
, config_cmd
->in_ctx
, vdev
->out_ctx
);
1709 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, config_cmd
->in_ctx
);
1710 ctrl_ctx
->add_flags
|= SLOT_FLAG
;
1711 slot_ctx
= xhci_get_slot_ctx(xhci
, config_cmd
->in_ctx
);
1712 slot_ctx
->dev_info
|= DEV_HUB
;
1714 slot_ctx
->dev_info
|= DEV_MTT
;
1715 if (xhci
->hci_version
> 0x95) {
1716 xhci_dbg(xhci
, "xHCI version %x needs hub "
1717 "TT think time and number of ports\n",
1718 (unsigned int) xhci
->hci_version
);
1719 slot_ctx
->dev_info2
|= XHCI_MAX_PORTS(hdev
->maxchild
);
1720 /* Set TT think time - convert from ns to FS bit times.
1721 * 0 = 8 FS bit times, 1 = 16 FS bit times,
1722 * 2 = 24 FS bit times, 3 = 32 FS bit times.
1724 think_time
= tt
->think_time
;
1725 if (think_time
!= 0)
1726 think_time
= (think_time
/ 666) - 1;
1727 slot_ctx
->tt_info
|= TT_THINK_TIME(think_time
);
1729 xhci_dbg(xhci
, "xHCI version %x doesn't need hub "
1730 "TT think time or number of ports\n",
1731 (unsigned int) xhci
->hci_version
);
1733 slot_ctx
->dev_state
= 0;
1734 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1736 xhci_dbg(xhci
, "Set up %s for hub device.\n",
1737 (xhci
->hci_version
> 0x95) ?
1738 "configure endpoint" : "evaluate context");
1739 xhci_dbg(xhci
, "Slot %u Input Context:\n", hdev
->slot_id
);
1740 xhci_dbg_ctx(xhci
, config_cmd
->in_ctx
, 0);
1742 /* Issue and wait for the configure endpoint or
1743 * evaluate context command.
1745 if (xhci
->hci_version
> 0x95)
1746 ret
= xhci_configure_endpoint(xhci
, hdev
, config_cmd
,
1749 ret
= xhci_configure_endpoint(xhci
, hdev
, config_cmd
,
1752 xhci_dbg(xhci
, "Slot %u Output Context:\n", hdev
->slot_id
);
1753 xhci_dbg_ctx(xhci
, vdev
->out_ctx
, 0);
1755 xhci_free_command(xhci
, config_cmd
);
1759 int xhci_get_frame(struct usb_hcd
*hcd
)
1761 struct xhci_hcd
*xhci
= hcd_to_xhci(hcd
);
1762 /* EHCI mods by the periodic size. Why? */
1763 return xhci_readl(xhci
, &xhci
->run_regs
->microframe_index
) >> 3;
1766 MODULE_DESCRIPTION(DRIVER_DESC
);
1767 MODULE_AUTHOR(DRIVER_AUTHOR
);
1768 MODULE_LICENSE("GPL");
1770 static int __init
xhci_hcd_init(void)
1775 retval
= xhci_register_pci();
1778 printk(KERN_DEBUG
"Problem registering PCI driver.");
1783 * Check the compiler generated sizes of structures that must be laid
1784 * out in specific ways for hardware access.
1786 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array
) != 256*32/8);
1787 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx
) != 8*32/8);
1788 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx
) != 8*32/8);
1789 /* xhci_device_control has eight fields, and also
1790 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
1792 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx
) != 4*32/8);
1793 BUILD_BUG_ON(sizeof(union xhci_trb
) != 4*32/8);
1794 BUILD_BUG_ON(sizeof(struct xhci_erst_entry
) != 4*32/8);
1795 BUILD_BUG_ON(sizeof(struct xhci_cap_regs
) != 7*32/8);
1796 BUILD_BUG_ON(sizeof(struct xhci_intr_reg
) != 8*32/8);
1797 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
1798 BUILD_BUG_ON(sizeof(struct xhci_run_regs
) != (8+8*128)*32/8);
1799 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array
) != 256*32/8);
1802 module_init(xhci_hcd_init
);
1804 static void __exit
xhci_hcd_cleanup(void)
1807 xhci_unregister_pci();
1810 module_exit(xhci_hcd_cleanup
);