2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #define XHCI_INIT_VALUE 0x0
27 /* Add verbose debugging later, just print everything for now */
29 void xhci_dbg_regs(struct xhci_hcd
*xhci
)
33 xhci_dbg(xhci
, "// xHCI capability registers at %p:\n",
35 temp
= readl(&xhci
->cap_regs
->hc_capbase
);
36 xhci_dbg(xhci
, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
37 &xhci
->cap_regs
->hc_capbase
, temp
);
38 xhci_dbg(xhci
, "// CAPLENGTH: 0x%x\n",
39 (unsigned int) HC_LENGTH(temp
));
41 xhci_dbg(xhci
, "// HCIVERSION: 0x%x\n",
42 (unsigned int) HC_VERSION(temp
));
45 xhci_dbg(xhci
, "// xHCI operational registers at %p:\n", xhci
->op_regs
);
47 temp
= readl(&xhci
->cap_regs
->run_regs_off
);
48 xhci_dbg(xhci
, "// @%p = 0x%x RTSOFF\n",
49 &xhci
->cap_regs
->run_regs_off
,
50 (unsigned int) temp
& RTSOFF_MASK
);
51 xhci_dbg(xhci
, "// xHCI runtime registers at %p:\n", xhci
->run_regs
);
53 temp
= readl(&xhci
->cap_regs
->db_off
);
54 xhci_dbg(xhci
, "// @%p = 0x%x DBOFF\n", &xhci
->cap_regs
->db_off
, temp
);
55 xhci_dbg(xhci
, "// Doorbell array at %p:\n", xhci
->dba
);
58 static void xhci_print_cap_regs(struct xhci_hcd
*xhci
)
63 xhci_dbg(xhci
, "xHCI capability registers at %p:\n", xhci
->cap_regs
);
65 temp
= readl(&xhci
->cap_regs
->hc_capbase
);
66 hci_version
= HC_VERSION(temp
);
67 xhci_dbg(xhci
, "CAPLENGTH AND HCIVERSION 0x%x:\n",
69 xhci_dbg(xhci
, "CAPLENGTH: 0x%x\n",
70 (unsigned int) HC_LENGTH(temp
));
71 xhci_dbg(xhci
, "HCIVERSION: 0x%x\n", hci_version
);
73 temp
= readl(&xhci
->cap_regs
->hcs_params1
);
74 xhci_dbg(xhci
, "HCSPARAMS 1: 0x%x\n",
76 xhci_dbg(xhci
, " Max device slots: %u\n",
77 (unsigned int) HCS_MAX_SLOTS(temp
));
78 xhci_dbg(xhci
, " Max interrupters: %u\n",
79 (unsigned int) HCS_MAX_INTRS(temp
));
80 xhci_dbg(xhci
, " Max ports: %u\n",
81 (unsigned int) HCS_MAX_PORTS(temp
));
83 temp
= readl(&xhci
->cap_regs
->hcs_params2
);
84 xhci_dbg(xhci
, "HCSPARAMS 2: 0x%x\n",
86 xhci_dbg(xhci
, " Isoc scheduling threshold: %u\n",
87 (unsigned int) HCS_IST(temp
));
88 xhci_dbg(xhci
, " Maximum allowed segments in event ring: %u\n",
89 (unsigned int) HCS_ERST_MAX(temp
));
91 temp
= readl(&xhci
->cap_regs
->hcs_params3
);
92 xhci_dbg(xhci
, "HCSPARAMS 3 0x%x:\n",
94 xhci_dbg(xhci
, " Worst case U1 device exit latency: %u\n",
95 (unsigned int) HCS_U1_LATENCY(temp
));
96 xhci_dbg(xhci
, " Worst case U2 device exit latency: %u\n",
97 (unsigned int) HCS_U2_LATENCY(temp
));
99 temp
= readl(&xhci
->cap_regs
->hcc_params
);
100 xhci_dbg(xhci
, "HCC PARAMS 0x%x:\n", (unsigned int) temp
);
101 xhci_dbg(xhci
, " HC generates %s bit addresses\n",
102 HCC_64BIT_ADDR(temp
) ? "64" : "32");
103 xhci_dbg(xhci
, " HC %s Contiguous Frame ID Capability\n",
104 HCC_CFC(temp
) ? "has" : "hasn't");
105 xhci_dbg(xhci
, " HC %s generate Stopped - Short Package event\n",
106 HCC_SPC(temp
) ? "can" : "can't");
108 xhci_dbg(xhci
, " FIXME: more HCCPARAMS debugging\n");
110 temp
= readl(&xhci
->cap_regs
->run_regs_off
);
111 xhci_dbg(xhci
, "RTSOFF 0x%x:\n", temp
& RTSOFF_MASK
);
113 /* xhci 1.1 controllers have the HCCPARAMS2 register */
114 if (hci_version
> 0x100) {
115 temp
= readl(&xhci
->cap_regs
->hcc_params2
);
116 xhci_dbg(xhci
, "HCC PARAMS2 0x%x:\n", (unsigned int) temp
);
117 xhci_dbg(xhci
, " HC %s Force save context capability",
118 HCC2_FSC(temp
) ? "supports" : "doesn't support");
119 xhci_dbg(xhci
, " HC %s Large ESIT Payload Capability",
120 HCC2_LEC(temp
) ? "supports" : "doesn't support");
121 xhci_dbg(xhci
, " HC %s Extended TBC capability",
122 HCC2_ETC(temp
) ? "supports" : "doesn't support");
126 static void xhci_print_command_reg(struct xhci_hcd
*xhci
)
130 temp
= readl(&xhci
->op_regs
->command
);
131 xhci_dbg(xhci
, "USBCMD 0x%x:\n", temp
);
132 xhci_dbg(xhci
, " HC is %s\n",
133 (temp
& CMD_RUN
) ? "running" : "being stopped");
134 xhci_dbg(xhci
, " HC has %sfinished hard reset\n",
135 (temp
& CMD_RESET
) ? "not " : "");
136 xhci_dbg(xhci
, " Event Interrupts %s\n",
137 (temp
& CMD_EIE
) ? "enabled " : "disabled");
138 xhci_dbg(xhci
, " Host System Error Interrupts %s\n",
139 (temp
& CMD_HSEIE
) ? "enabled " : "disabled");
140 xhci_dbg(xhci
, " HC has %sfinished light reset\n",
141 (temp
& CMD_LRESET
) ? "not " : "");
144 static void xhci_print_status(struct xhci_hcd
*xhci
)
148 temp
= readl(&xhci
->op_regs
->status
);
149 xhci_dbg(xhci
, "USBSTS 0x%x:\n", temp
);
150 xhci_dbg(xhci
, " Event ring is %sempty\n",
151 (temp
& STS_EINT
) ? "not " : "");
152 xhci_dbg(xhci
, " %sHost System Error\n",
153 (temp
& STS_FATAL
) ? "WARNING: " : "No ");
154 xhci_dbg(xhci
, " HC is %s\n",
155 (temp
& STS_HALT
) ? "halted" : "running");
158 static void xhci_print_op_regs(struct xhci_hcd
*xhci
)
160 xhci_dbg(xhci
, "xHCI operational registers at %p:\n", xhci
->op_regs
);
161 xhci_print_command_reg(xhci
);
162 xhci_print_status(xhci
);
165 static void xhci_print_ports(struct xhci_hcd
*xhci
)
167 __le32 __iomem
*addr
;
170 char *names
[NUM_PORT_REGS
] = {
177 ports
= HCS_MAX_PORTS(xhci
->hcs_params1
);
178 addr
= &xhci
->op_regs
->port_status_base
;
179 for (i
= 0; i
< ports
; i
++) {
180 for (j
= 0; j
< NUM_PORT_REGS
; ++j
) {
181 xhci_dbg(xhci
, "%p port %s reg = 0x%x\n",
183 (unsigned int) readl(addr
));
189 void xhci_print_ir_set(struct xhci_hcd
*xhci
, int set_num
)
191 struct xhci_intr_reg __iomem
*ir_set
= &xhci
->run_regs
->ir_set
[set_num
];
196 addr
= &ir_set
->irq_pending
;
198 if (temp
== XHCI_INIT_VALUE
)
201 xhci_dbg(xhci
, " %p: ir_set[%i]\n", ir_set
, set_num
);
203 xhci_dbg(xhci
, " %p: ir_set.pending = 0x%x\n", addr
,
206 addr
= &ir_set
->irq_control
;
208 xhci_dbg(xhci
, " %p: ir_set.control = 0x%x\n", addr
,
211 addr
= &ir_set
->erst_size
;
213 xhci_dbg(xhci
, " %p: ir_set.erst_size = 0x%x\n", addr
,
216 addr
= &ir_set
->rsvd
;
218 if (temp
!= XHCI_INIT_VALUE
)
219 xhci_dbg(xhci
, " WARN: %p: ir_set.rsvd = 0x%x\n",
220 addr
, (unsigned int)temp
);
222 addr
= &ir_set
->erst_base
;
223 temp_64
= xhci_read_64(xhci
, addr
);
224 xhci_dbg(xhci
, " %p: ir_set.erst_base = @%08llx\n",
227 addr
= &ir_set
->erst_dequeue
;
228 temp_64
= xhci_read_64(xhci
, addr
);
229 xhci_dbg(xhci
, " %p: ir_set.erst_dequeue = @%08llx\n",
233 void xhci_print_run_regs(struct xhci_hcd
*xhci
)
238 xhci_dbg(xhci
, "xHCI runtime registers at %p:\n", xhci
->run_regs
);
239 temp
= readl(&xhci
->run_regs
->microframe_index
);
240 xhci_dbg(xhci
, " %p: Microframe index = 0x%x\n",
241 &xhci
->run_regs
->microframe_index
,
242 (unsigned int) temp
);
243 for (i
= 0; i
< 7; ++i
) {
244 temp
= readl(&xhci
->run_regs
->rsvd
[i
]);
245 if (temp
!= XHCI_INIT_VALUE
)
246 xhci_dbg(xhci
, " WARN: %p: Rsvd[%i] = 0x%x\n",
247 &xhci
->run_regs
->rsvd
[i
],
248 i
, (unsigned int) temp
);
252 void xhci_print_registers(struct xhci_hcd
*xhci
)
254 xhci_print_cap_regs(xhci
);
255 xhci_print_op_regs(xhci
);
256 xhci_print_ports(xhci
);
259 void xhci_print_trb_offsets(struct xhci_hcd
*xhci
, union xhci_trb
*trb
)
262 for (i
= 0; i
< 4; ++i
)
263 xhci_dbg(xhci
, "Offset 0x%x = 0x%x\n",
264 i
*4, trb
->generic
.field
[i
]);
268 * Debug a transfer request block (TRB).
270 void xhci_debug_trb(struct xhci_hcd
*xhci
, union xhci_trb
*trb
)
273 u32 type
= le32_to_cpu(trb
->link
.control
) & TRB_TYPE_BITMASK
;
276 case TRB_TYPE(TRB_LINK
):
277 xhci_dbg(xhci
, "Link TRB:\n");
278 xhci_print_trb_offsets(xhci
, trb
);
280 address
= le64_to_cpu(trb
->link
.segment_ptr
);
281 xhci_dbg(xhci
, "Next ring segment DMA address = 0x%llx\n", address
);
283 xhci_dbg(xhci
, "Interrupter target = 0x%x\n",
284 GET_INTR_TARGET(le32_to_cpu(trb
->link
.intr_target
)));
285 xhci_dbg(xhci
, "Cycle bit = %u\n",
286 le32_to_cpu(trb
->link
.control
) & TRB_CYCLE
);
287 xhci_dbg(xhci
, "Toggle cycle bit = %u\n",
288 le32_to_cpu(trb
->link
.control
) & LINK_TOGGLE
);
289 xhci_dbg(xhci
, "No Snoop bit = %u\n",
290 le32_to_cpu(trb
->link
.control
) & TRB_NO_SNOOP
);
292 case TRB_TYPE(TRB_TRANSFER
):
293 address
= le64_to_cpu(trb
->trans_event
.buffer
);
295 * FIXME: look at flags to figure out if it's an address or if
296 * the data is directly in the buffer field.
298 xhci_dbg(xhci
, "DMA address or buffer contents= %llu\n", address
);
300 case TRB_TYPE(TRB_COMPLETION
):
301 address
= le64_to_cpu(trb
->event_cmd
.cmd_trb
);
302 xhci_dbg(xhci
, "Command TRB pointer = %llu\n", address
);
303 xhci_dbg(xhci
, "Completion status = %u\n",
304 GET_COMP_CODE(le32_to_cpu(trb
->event_cmd
.status
)));
305 xhci_dbg(xhci
, "Flags = 0x%x\n",
306 le32_to_cpu(trb
->event_cmd
.flags
));
309 xhci_dbg(xhci
, "Unknown TRB with TRB type ID %u\n",
310 (unsigned int) type
>>10);
311 xhci_print_trb_offsets(xhci
, trb
);
317 * Debug a segment with an xHCI ring.
319 * @return The Link TRB of the segment, or NULL if there is no Link TRB
320 * (which is a bug, since all segments must have a Link TRB).
322 * Prints out all TRBs in the segment, even those after the Link TRB.
324 * XXX: should we print out TRBs that the HC owns? As long as we don't
325 * write, that should be fine... We shouldn't expect that the memory pointed to
326 * by the TRB is valid at all. Do we care about ones the HC owns? Probably,
329 void xhci_debug_segment(struct xhci_hcd
*xhci
, struct xhci_segment
*seg
)
333 union xhci_trb
*trb
= seg
->trbs
;
335 for (i
= 0; i
< TRBS_PER_SEGMENT
; ++i
) {
337 xhci_dbg(xhci
, "@%016llx %08x %08x %08x %08x\n", addr
,
338 lower_32_bits(le64_to_cpu(trb
->link
.segment_ptr
)),
339 upper_32_bits(le64_to_cpu(trb
->link
.segment_ptr
)),
340 le32_to_cpu(trb
->link
.intr_target
),
341 le32_to_cpu(trb
->link
.control
));
342 addr
+= sizeof(*trb
);
346 void xhci_dbg_ring_ptrs(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
)
348 xhci_dbg(xhci
, "Ring deq = %p (virt), 0x%llx (dma)\n",
350 (unsigned long long)xhci_trb_virt_to_dma(ring
->deq_seg
,
352 xhci_dbg(xhci
, "Ring deq updated %u times\n",
354 xhci_dbg(xhci
, "Ring enq = %p (virt), 0x%llx (dma)\n",
356 (unsigned long long)xhci_trb_virt_to_dma(ring
->enq_seg
,
358 xhci_dbg(xhci
, "Ring enq updated %u times\n",
363 * Debugging for an xHCI ring, which is a queue broken into multiple segments.
365 * Print out each segment in the ring. Check that the DMA address in
366 * each link segment actually matches the segment's stored DMA address.
367 * Check that the link end bit is only set at the end of the ring.
368 * Check that the dequeue and enqueue pointers point to real data in this ring
369 * (not some other ring).
371 void xhci_debug_ring(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
)
373 /* FIXME: Throw an error if any segment doesn't have a Link TRB */
374 struct xhci_segment
*seg
;
375 struct xhci_segment
*first_seg
= ring
->first_seg
;
376 xhci_debug_segment(xhci
, first_seg
);
378 if (!ring
->enq_updates
&& !ring
->deq_updates
) {
379 xhci_dbg(xhci
, " Ring has not been updated\n");
382 for (seg
= first_seg
->next
; seg
!= first_seg
; seg
= seg
->next
)
383 xhci_debug_segment(xhci
, seg
);
386 void xhci_dbg_ep_rings(struct xhci_hcd
*xhci
,
387 unsigned int slot_id
, unsigned int ep_index
,
388 struct xhci_virt_ep
*ep
)
391 struct xhci_ring
*ring
;
393 if (ep
->ep_state
& EP_HAS_STREAMS
) {
394 for (i
= 1; i
< ep
->stream_info
->num_streams
; i
++) {
395 ring
= ep
->stream_info
->stream_rings
[i
];
396 xhci_dbg(xhci
, "Dev %d endpoint %d stream ID %d:\n",
397 slot_id
, ep_index
, i
);
398 xhci_debug_segment(xhci
, ring
->deq_seg
);
404 xhci_dbg(xhci
, "Dev %d endpoint ring %d:\n",
406 xhci_debug_segment(xhci
, ring
->deq_seg
);
410 void xhci_dbg_erst(struct xhci_hcd
*xhci
, struct xhci_erst
*erst
)
412 u64 addr
= erst
->erst_dma_addr
;
414 struct xhci_erst_entry
*entry
;
416 for (i
= 0; i
< erst
->num_entries
; ++i
) {
417 entry
= &erst
->entries
[i
];
418 xhci_dbg(xhci
, "@%016llx %08x %08x %08x %08x\n",
420 lower_32_bits(le64_to_cpu(entry
->seg_addr
)),
421 upper_32_bits(le64_to_cpu(entry
->seg_addr
)),
422 le32_to_cpu(entry
->seg_size
),
423 le32_to_cpu(entry
->rsvd
));
424 addr
+= sizeof(*entry
);
428 void xhci_dbg_cmd_ptrs(struct xhci_hcd
*xhci
)
432 val
= xhci_read_64(xhci
, &xhci
->op_regs
->cmd_ring
);
433 xhci_dbg(xhci
, "// xHC command ring deq ptr low bits + flags = @%08x\n",
435 xhci_dbg(xhci
, "// xHC command ring deq ptr high bits = @%08x\n",
439 /* Print the last 32 bytes for 64-byte contexts */
440 static void dbg_rsvd64(struct xhci_hcd
*xhci
, u64
*ctx
, dma_addr_t dma
)
443 for (i
= 0; i
< 4; ++i
) {
444 xhci_dbg(xhci
, "@%p (virt) @%08llx "
445 "(dma) %#08llx - rsvd64[%d]\n",
446 &ctx
[4 + i
], (unsigned long long)dma
,
452 char *xhci_get_slot_state(struct xhci_hcd
*xhci
,
453 struct xhci_container_ctx
*ctx
)
455 struct xhci_slot_ctx
*slot_ctx
= xhci_get_slot_ctx(xhci
, ctx
);
457 switch (GET_SLOT_STATE(le32_to_cpu(slot_ctx
->dev_state
))) {
458 case SLOT_STATE_ENABLED
:
459 return "enabled/disabled";
460 case SLOT_STATE_DEFAULT
:
462 case SLOT_STATE_ADDRESSED
:
464 case SLOT_STATE_CONFIGURED
:
471 static void xhci_dbg_slot_ctx(struct xhci_hcd
*xhci
, struct xhci_container_ctx
*ctx
)
473 /* Fields are 32 bits wide, DMA addresses are in bytes */
474 int field_size
= 32 / 8;
477 struct xhci_slot_ctx
*slot_ctx
= xhci_get_slot_ctx(xhci
, ctx
);
478 dma_addr_t dma
= ctx
->dma
+
479 ((unsigned long)slot_ctx
- (unsigned long)ctx
->bytes
);
480 int csz
= HCC_64BYTE_CONTEXT(xhci
->hcc_params
);
482 xhci_dbg(xhci
, "Slot Context:\n");
483 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
485 (unsigned long long)dma
, slot_ctx
->dev_info
);
487 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
488 &slot_ctx
->dev_info2
,
489 (unsigned long long)dma
, slot_ctx
->dev_info2
);
491 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
493 (unsigned long long)dma
, slot_ctx
->tt_info
);
495 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
496 &slot_ctx
->dev_state
,
497 (unsigned long long)dma
, slot_ctx
->dev_state
);
499 for (i
= 0; i
< 4; ++i
) {
500 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
501 &slot_ctx
->reserved
[i
], (unsigned long long)dma
,
502 slot_ctx
->reserved
[i
], i
);
507 dbg_rsvd64(xhci
, (u64
*)slot_ctx
, dma
);
510 static void xhci_dbg_ep_ctx(struct xhci_hcd
*xhci
,
511 struct xhci_container_ctx
*ctx
,
512 unsigned int last_ep
)
515 int last_ep_ctx
= 31;
516 /* Fields are 32 bits wide, DMA addresses are in bytes */
517 int field_size
= 32 / 8;
518 int csz
= HCC_64BYTE_CONTEXT(xhci
->hcc_params
);
521 last_ep_ctx
= last_ep
+ 1;
522 for (i
= 0; i
< last_ep_ctx
; ++i
) {
523 unsigned int epaddr
= xhci_get_endpoint_address(i
);
524 struct xhci_ep_ctx
*ep_ctx
= xhci_get_ep_ctx(xhci
, ctx
, i
);
525 dma_addr_t dma
= ctx
->dma
+
526 ((unsigned long)ep_ctx
- (unsigned long)ctx
->bytes
);
528 xhci_dbg(xhci
, "%s Endpoint %02d Context (ep_index %02d):\n",
529 usb_endpoint_out(epaddr
) ? "OUT" : "IN",
530 epaddr
& USB_ENDPOINT_NUMBER_MASK
, i
);
531 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
533 (unsigned long long)dma
, ep_ctx
->ep_info
);
535 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
537 (unsigned long long)dma
, ep_ctx
->ep_info2
);
539 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
541 (unsigned long long)dma
, ep_ctx
->deq
);
543 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
545 (unsigned long long)dma
, ep_ctx
->tx_info
);
547 for (j
= 0; j
< 3; ++j
) {
548 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
549 &ep_ctx
->reserved
[j
],
550 (unsigned long long)dma
,
551 ep_ctx
->reserved
[j
], j
);
556 dbg_rsvd64(xhci
, (u64
*)ep_ctx
, dma
);
560 void xhci_dbg_ctx(struct xhci_hcd
*xhci
,
561 struct xhci_container_ctx
*ctx
,
562 unsigned int last_ep
)
565 /* Fields are 32 bits wide, DMA addresses are in bytes */
566 int field_size
= 32 / 8;
567 dma_addr_t dma
= ctx
->dma
;
568 int csz
= HCC_64BYTE_CONTEXT(xhci
->hcc_params
);
570 if (ctx
->type
== XHCI_CTX_TYPE_INPUT
) {
571 struct xhci_input_control_ctx
*ctrl_ctx
=
572 xhci_get_input_control_ctx(ctx
);
574 xhci_warn(xhci
, "Could not get input context, bad type.\n");
578 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
579 &ctrl_ctx
->drop_flags
, (unsigned long long)dma
,
580 ctrl_ctx
->drop_flags
);
582 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
583 &ctrl_ctx
->add_flags
, (unsigned long long)dma
,
584 ctrl_ctx
->add_flags
);
586 for (i
= 0; i
< 6; ++i
) {
587 xhci_dbg(xhci
, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
588 &ctrl_ctx
->rsvd2
[i
], (unsigned long long)dma
,
589 ctrl_ctx
->rsvd2
[i
], i
);
594 dbg_rsvd64(xhci
, (u64
*)ctrl_ctx
, dma
);
597 xhci_dbg_slot_ctx(xhci
, ctx
);
598 xhci_dbg_ep_ctx(xhci
, ctx
, last_ep
);
601 void xhci_dbg_trace(struct xhci_hcd
*xhci
, void (*trace
)(struct va_format
*),
602 const char *fmt
, ...)
604 struct va_format vaf
;
610 xhci_dbg(xhci
, "%pV\n", &vaf
);
614 EXPORT_SYMBOL_GPL(xhci_dbg_trace
);