1 // SPDX-License-Identifier: GPL-2.0
3 * xhci-dbc.c - xHCI debug capability early driver
5 * Copyright (C) 2016 Intel Corporation
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
10 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
12 #include <linux/console.h>
13 #include <linux/pci_regs.h>
14 #include <linux/pci_ids.h>
15 #include <linux/memblock.h>
17 #include <asm/pci-direct.h>
18 #include <asm/fixmap.h>
19 #include <linux/bcd.h>
20 #include <linux/export.h>
21 #include <linux/module.h>
22 #include <linux/delay.h>
23 #include <linux/kthread.h>
24 #include <linux/usb/xhci-dbgp.h>
26 #include "../host/xhci.h"
29 static struct xdbc_state xdbc
;
30 static bool early_console_keep
;
33 #define xdbc_trace trace_printk
35 static inline void xdbc_trace(const char *fmt
, ...) { }
36 #endif /* XDBC_TRACE */
38 static void __iomem
* __init
xdbc_map_pci_mmio(u32 bus
, u32 dev
, u32 func
)
40 u64 val64
, sz64
, mask64
;
45 val
= read_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
);
46 write_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
, ~0);
47 sz
= read_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
);
48 write_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
, val
);
50 if (val
== 0xffffffff || sz
== 0xffffffff) {
51 pr_notice("invalid mmio bar\n");
55 val64
= val
& PCI_BASE_ADDRESS_MEM_MASK
;
56 sz64
= sz
& PCI_BASE_ADDRESS_MEM_MASK
;
57 mask64
= PCI_BASE_ADDRESS_MEM_MASK
;
59 if ((val
& PCI_BASE_ADDRESS_MEM_TYPE_MASK
) == PCI_BASE_ADDRESS_MEM_TYPE_64
) {
60 val
= read_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
+ 4);
61 write_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
+ 4, ~0);
62 sz
= read_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
+ 4);
63 write_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
+ 4, val
);
65 val64
|= (u64
)val
<< 32;
66 sz64
|= (u64
)sz
<< 32;
67 mask64
|= ~0ULL << 32;
73 pr_notice("invalid mmio address\n");
77 sz64
= 1ULL << __ffs64(sz64
);
79 /* Check if the mem space is enabled: */
80 byte
= read_pci_config_byte(bus
, dev
, func
, PCI_COMMAND
);
81 if (!(byte
& PCI_COMMAND_MEMORY
)) {
82 byte
|= PCI_COMMAND_MEMORY
;
83 write_pci_config_byte(bus
, dev
, func
, PCI_COMMAND
, byte
);
86 xdbc
.xhci_start
= val64
;
87 xdbc
.xhci_length
= sz64
;
88 base
= early_ioremap(val64
, sz64
);
93 static void * __init
xdbc_get_page(dma_addr_t
*dma_addr
)
97 virt
= memblock_alloc(PAGE_SIZE
, PAGE_SIZE
);
102 *dma_addr
= (dma_addr_t
)__pa(virt
);
107 static u32 __init
xdbc_find_dbgp(int xdbc_num
, u32
*b
, u32
*d
, u32
*f
)
109 u32 bus
, dev
, func
, class;
111 for (bus
= 0; bus
< XDBC_PCI_MAX_BUSES
; bus
++) {
112 for (dev
= 0; dev
< XDBC_PCI_MAX_DEVICES
; dev
++) {
113 for (func
= 0; func
< XDBC_PCI_MAX_FUNCTION
; func
++) {
115 class = read_pci_config(bus
, dev
, func
, PCI_CLASS_REVISION
);
116 if ((class >> 8) != PCI_CLASS_SERIAL_USB_XHCI
)
134 static int handshake(void __iomem
*ptr
, u32 mask
, u32 done
, int wait
, int delay
)
138 /* Can not use readl_poll_timeout_atomic() for early boot things */
151 static void __init
xdbc_bios_handoff(void)
156 offset
= xhci_find_next_ext_cap(xdbc
.xhci_base
, 0, XHCI_EXT_CAPS_LEGACY
);
157 val
= readl(xdbc
.xhci_base
+ offset
);
159 if (val
& XHCI_HC_BIOS_OWNED
) {
160 writel(val
| XHCI_HC_OS_OWNED
, xdbc
.xhci_base
+ offset
);
161 timeout
= handshake(xdbc
.xhci_base
+ offset
, XHCI_HC_BIOS_OWNED
, 0, 5000, 10);
164 pr_notice("failed to hand over xHCI control from BIOS\n");
165 writel(val
& ~XHCI_HC_BIOS_OWNED
, xdbc
.xhci_base
+ offset
);
169 /* Disable BIOS SMIs and clear all SMI events: */
170 val
= readl(xdbc
.xhci_base
+ offset
+ XHCI_LEGACY_CONTROL_OFFSET
);
171 val
&= XHCI_LEGACY_DISABLE_SMI
;
172 val
|= XHCI_LEGACY_SMI_EVENTS
;
173 writel(val
, xdbc
.xhci_base
+ offset
+ XHCI_LEGACY_CONTROL_OFFSET
);
177 xdbc_alloc_ring(struct xdbc_segment
*seg
, struct xdbc_ring
*ring
)
179 seg
->trbs
= xdbc_get_page(&seg
->dma
);
188 static void __init
xdbc_free_ring(struct xdbc_ring
*ring
)
190 struct xdbc_segment
*seg
= ring
->segment
;
195 memblock_phys_free(seg
->dma
, PAGE_SIZE
);
196 ring
->segment
= NULL
;
199 static void xdbc_reset_ring(struct xdbc_ring
*ring
)
201 struct xdbc_segment
*seg
= ring
->segment
;
202 struct xdbc_trb
*link_trb
;
204 memset(seg
->trbs
, 0, PAGE_SIZE
);
206 ring
->enqueue
= seg
->trbs
;
207 ring
->dequeue
= seg
->trbs
;
208 ring
->cycle_state
= 1;
210 if (ring
!= &xdbc
.evt_ring
) {
211 link_trb
= &seg
->trbs
[XDBC_TRBS_PER_SEGMENT
- 1];
212 link_trb
->field
[0] = cpu_to_le32(lower_32_bits(seg
->dma
));
213 link_trb
->field
[1] = cpu_to_le32(upper_32_bits(seg
->dma
));
214 link_trb
->field
[3] = cpu_to_le32(TRB_TYPE(TRB_LINK
)) | cpu_to_le32(LINK_TOGGLE
);
218 static inline void xdbc_put_utf16(u16
*s
, const char *c
, size_t size
)
222 for (i
= 0; i
< size
; i
++)
223 s
[i
] = cpu_to_le16(c
[i
]);
226 static void xdbc_mem_init(void)
228 struct xdbc_ep_context
*ep_in
, *ep_out
;
229 struct usb_string_descriptor
*s_desc
;
230 struct xdbc_erst_entry
*entry
;
231 struct xdbc_strings
*strings
;
232 struct xdbc_context
*ctx
;
233 unsigned int max_burst
;
238 xdbc_reset_ring(&xdbc
.evt_ring
);
239 xdbc_reset_ring(&xdbc
.in_ring
);
240 xdbc_reset_ring(&xdbc
.out_ring
);
241 memset(xdbc
.table_base
, 0, PAGE_SIZE
);
242 memset(xdbc
.out_buf
, 0, PAGE_SIZE
);
244 /* Initialize event ring segment table: */
246 xdbc
.erst_base
= xdbc
.table_base
+ index
* XDBC_TABLE_ENTRY_SIZE
;
247 xdbc
.erst_dma
= xdbc
.table_dma
+ index
* XDBC_TABLE_ENTRY_SIZE
;
249 index
+= XDBC_ERST_ENTRY_NUM
;
250 entry
= (struct xdbc_erst_entry
*)xdbc
.erst_base
;
252 entry
->seg_addr
= cpu_to_le64(xdbc
.evt_seg
.dma
);
253 entry
->seg_size
= cpu_to_le32(XDBC_TRBS_PER_SEGMENT
);
254 entry
->__reserved_0
= 0;
256 /* Initialize ERST registers: */
257 writel(1, &xdbc
.xdbc_reg
->ersts
);
258 xdbc_write64(xdbc
.erst_dma
, &xdbc
.xdbc_reg
->erstba
);
259 xdbc_write64(xdbc
.evt_seg
.dma
, &xdbc
.xdbc_reg
->erdp
);
261 /* Debug capability contexts: */
262 xdbc
.dbcc_size
= 64 * 3;
263 xdbc
.dbcc_base
= xdbc
.table_base
+ index
* XDBC_TABLE_ENTRY_SIZE
;
264 xdbc
.dbcc_dma
= xdbc
.table_dma
+ index
* XDBC_TABLE_ENTRY_SIZE
;
266 index
+= XDBC_DBCC_ENTRY_NUM
;
268 /* Popluate the strings: */
269 xdbc
.string_size
= sizeof(struct xdbc_strings
);
270 xdbc
.string_base
= xdbc
.table_base
+ index
* XDBC_TABLE_ENTRY_SIZE
;
271 xdbc
.string_dma
= xdbc
.table_dma
+ index
* XDBC_TABLE_ENTRY_SIZE
;
272 strings
= (struct xdbc_strings
*)xdbc
.string_base
;
274 index
+= XDBC_STRING_ENTRY_NUM
;
277 s_desc
= (struct usb_string_descriptor
*)strings
->serial
;
278 s_desc
->bLength
= (strlen(XDBC_STRING_SERIAL
) + 1) * 2;
279 s_desc
->bDescriptorType
= USB_DT_STRING
;
281 xdbc_put_utf16(s_desc
->wData
, XDBC_STRING_SERIAL
, strlen(XDBC_STRING_SERIAL
));
282 string_length
= s_desc
->bLength
;
285 /* Product string: */
286 s_desc
= (struct usb_string_descriptor
*)strings
->product
;
287 s_desc
->bLength
= (strlen(XDBC_STRING_PRODUCT
) + 1) * 2;
288 s_desc
->bDescriptorType
= USB_DT_STRING
;
290 xdbc_put_utf16(s_desc
->wData
, XDBC_STRING_PRODUCT
, strlen(XDBC_STRING_PRODUCT
));
291 string_length
+= s_desc
->bLength
;
294 /* Manufacture string: */
295 s_desc
= (struct usb_string_descriptor
*)strings
->manufacturer
;
296 s_desc
->bLength
= (strlen(XDBC_STRING_MANUFACTURER
) + 1) * 2;
297 s_desc
->bDescriptorType
= USB_DT_STRING
;
299 xdbc_put_utf16(s_desc
->wData
, XDBC_STRING_MANUFACTURER
, strlen(XDBC_STRING_MANUFACTURER
));
300 string_length
+= s_desc
->bLength
;
304 strings
->string0
[0] = 4;
305 strings
->string0
[1] = USB_DT_STRING
;
306 strings
->string0
[2] = 0x09;
307 strings
->string0
[3] = 0x04;
311 /* Populate info Context: */
312 ctx
= (struct xdbc_context
*)xdbc
.dbcc_base
;
314 ctx
->info
.string0
= cpu_to_le64(xdbc
.string_dma
);
315 ctx
->info
.manufacturer
= cpu_to_le64(xdbc
.string_dma
+ XDBC_MAX_STRING_LENGTH
);
316 ctx
->info
.product
= cpu_to_le64(xdbc
.string_dma
+ XDBC_MAX_STRING_LENGTH
* 2);
317 ctx
->info
.serial
= cpu_to_le64(xdbc
.string_dma
+ XDBC_MAX_STRING_LENGTH
* 3);
318 ctx
->info
.length
= cpu_to_le32(string_length
);
320 /* Populate bulk out endpoint context: */
321 max_burst
= DEBUG_MAX_BURST(readl(&xdbc
.xdbc_reg
->control
));
322 ep_out
= (struct xdbc_ep_context
*)&ctx
->out
;
324 ep_out
->ep_info1
= 0;
325 ep_out
->ep_info2
= cpu_to_le32(EP_TYPE(BULK_OUT_EP
) | MAX_PACKET(1024) | MAX_BURST(max_burst
));
326 ep_out
->deq
= cpu_to_le64(xdbc
.out_seg
.dma
| xdbc
.out_ring
.cycle_state
);
328 /* Populate bulk in endpoint context: */
329 ep_in
= (struct xdbc_ep_context
*)&ctx
->in
;
332 ep_in
->ep_info2
= cpu_to_le32(EP_TYPE(BULK_IN_EP
) | MAX_PACKET(1024) | MAX_BURST(max_burst
));
333 ep_in
->deq
= cpu_to_le64(xdbc
.in_seg
.dma
| xdbc
.in_ring
.cycle_state
);
335 /* Set DbC context and info registers: */
336 xdbc_write64(xdbc
.dbcc_dma
, &xdbc
.xdbc_reg
->dccp
);
338 dev_info
= cpu_to_le32((XDBC_VENDOR_ID
<< 16) | XDBC_PROTOCOL
);
339 writel(dev_info
, &xdbc
.xdbc_reg
->devinfo1
);
341 dev_info
= cpu_to_le32((XDBC_DEVICE_REV
<< 16) | XDBC_PRODUCT_ID
);
342 writel(dev_info
, &xdbc
.xdbc_reg
->devinfo2
);
344 xdbc
.in_buf
= xdbc
.out_buf
+ XDBC_MAX_PACKET
;
345 xdbc
.in_dma
= xdbc
.out_dma
+ XDBC_MAX_PACKET
;
348 static void xdbc_do_reset_debug_port(u32 id
, u32 count
)
350 void __iomem
*ops_reg
;
351 void __iomem
*portsc
;
355 cap_length
= readl(xdbc
.xhci_base
) & 0xff;
356 ops_reg
= xdbc
.xhci_base
+ cap_length
;
359 for (i
= id
; i
< (id
+ count
); i
++) {
360 portsc
= ops_reg
+ 0x400 + i
* 0x10;
362 if (!(val
& PORT_CONNECT
))
363 writel(val
| PORT_RESET
, portsc
);
367 static void xdbc_reset_debug_port(void)
369 u32 val
, port_offset
, port_count
;
373 offset
= xhci_find_next_ext_cap(xdbc
.xhci_base
, offset
, XHCI_EXT_CAPS_PROTOCOL
);
377 val
= readl(xdbc
.xhci_base
+ offset
);
378 if (XHCI_EXT_PORT_MAJOR(val
) != 0x3)
381 val
= readl(xdbc
.xhci_base
+ offset
+ 8);
382 port_offset
= XHCI_EXT_PORT_OFF(val
);
383 port_count
= XHCI_EXT_PORT_COUNT(val
);
385 xdbc_do_reset_debug_port(port_offset
, port_count
);
390 xdbc_queue_trb(struct xdbc_ring
*ring
, u32 field1
, u32 field2
, u32 field3
, u32 field4
)
392 struct xdbc_trb
*trb
, *link_trb
;
395 trb
->field
[0] = cpu_to_le32(field1
);
396 trb
->field
[1] = cpu_to_le32(field2
);
397 trb
->field
[2] = cpu_to_le32(field3
);
398 trb
->field
[3] = cpu_to_le32(field4
);
401 if (ring
->enqueue
>= &ring
->segment
->trbs
[TRBS_PER_SEGMENT
- 1]) {
402 link_trb
= ring
->enqueue
;
403 if (ring
->cycle_state
)
404 link_trb
->field
[3] |= cpu_to_le32(TRB_CYCLE
);
406 link_trb
->field
[3] &= cpu_to_le32(~TRB_CYCLE
);
408 ring
->enqueue
= ring
->segment
->trbs
;
409 ring
->cycle_state
^= 1;
413 static void xdbc_ring_doorbell(int target
)
415 writel(DOOR_BELL_TARGET(target
), &xdbc
.xdbc_reg
->doorbell
);
418 static int xdbc_start(void)
423 ctrl
= readl(&xdbc
.xdbc_reg
->control
);
424 writel(ctrl
| CTRL_DBC_ENABLE
| CTRL_PORT_ENABLE
, &xdbc
.xdbc_reg
->control
);
425 ret
= handshake(&xdbc
.xdbc_reg
->control
, CTRL_DBC_ENABLE
, CTRL_DBC_ENABLE
, 100000, 100);
427 xdbc_trace("failed to initialize hardware\n");
431 /* Reset port to avoid bus hang: */
432 if (xdbc
.vendor
== PCI_VENDOR_ID_INTEL
)
433 xdbc_reset_debug_port();
435 /* Wait for port connection: */
436 ret
= handshake(&xdbc
.xdbc_reg
->portsc
, PORTSC_CONN_STATUS
, PORTSC_CONN_STATUS
, 5000000, 100);
438 xdbc_trace("waiting for connection timed out\n");
442 /* Wait for debug device to be configured: */
443 ret
= handshake(&xdbc
.xdbc_reg
->control
, CTRL_DBC_RUN
, CTRL_DBC_RUN
, 5000000, 100);
445 xdbc_trace("waiting for device configuration timed out\n");
449 /* Check port number: */
450 status
= readl(&xdbc
.xdbc_reg
->status
);
451 if (!DCST_DEBUG_PORT(status
)) {
452 xdbc_trace("invalid root hub port number\n");
456 xdbc
.port_number
= DCST_DEBUG_PORT(status
);
458 xdbc_trace("DbC is running now, control 0x%08x port ID %d\n",
459 readl(&xdbc
.xdbc_reg
->control
), xdbc
.port_number
);
464 static int xdbc_bulk_transfer(void *data
, int size
, bool read
)
466 struct xdbc_ring
*ring
;
467 struct xdbc_trb
*trb
;
472 if (size
> XDBC_MAX_PACKET
) {
473 xdbc_trace("bad parameter, size %d\n", size
);
477 if (!(xdbc
.flags
& XDBC_FLAGS_INITIALIZED
) ||
478 !(xdbc
.flags
& XDBC_FLAGS_CONFIGURED
) ||
479 (!read
&& (xdbc
.flags
& XDBC_FLAGS_OUT_STALL
)) ||
480 (read
&& (xdbc
.flags
& XDBC_FLAGS_IN_STALL
))) {
482 xdbc_trace("connection not ready, flags %08x\n", xdbc
.flags
);
486 ring
= (read
? &xdbc
.in_ring
: &xdbc
.out_ring
);
488 cycle
= ring
->cycle_state
;
489 length
= TRB_LEN(size
);
490 control
= TRB_TYPE(TRB_NORMAL
) | TRB_IOC
;
493 control
&= cpu_to_le32(~TRB_CYCLE
);
495 control
|= cpu_to_le32(TRB_CYCLE
);
498 memset(xdbc
.in_buf
, 0, XDBC_MAX_PACKET
);
500 xdbc
.flags
|= XDBC_FLAGS_IN_PROCESS
;
502 memcpy_and_pad(xdbc
.out_buf
, XDBC_MAX_PACKET
, data
, size
, 0);
504 xdbc
.flags
|= XDBC_FLAGS_OUT_PROCESS
;
507 xdbc_queue_trb(ring
, lower_32_bits(addr
), upper_32_bits(addr
), length
, control
);
510 * Add a barrier between writes of trb fields and flipping
515 trb
->field
[3] |= cpu_to_le32(cycle
);
517 trb
->field
[3] &= cpu_to_le32(~TRB_CYCLE
);
519 xdbc_ring_doorbell(read
? IN_EP_DOORBELL
: OUT_EP_DOORBELL
);
524 static int xdbc_handle_external_reset(void)
529 writel(0, &xdbc
.xdbc_reg
->control
);
530 ret
= handshake(&xdbc
.xdbc_reg
->control
, CTRL_DBC_ENABLE
, 0, 100000, 10);
540 xdbc_trace("dbc recovered\n");
542 xdbc
.flags
|= XDBC_FLAGS_INITIALIZED
| XDBC_FLAGS_CONFIGURED
;
544 xdbc_bulk_transfer(NULL
, XDBC_MAX_PACKET
, true);
549 xdbc_trace("failed to recover from external reset\n");
553 static int __init
xdbc_early_setup(void)
557 writel(0, &xdbc
.xdbc_reg
->control
);
558 ret
= handshake(&xdbc
.xdbc_reg
->control
, CTRL_DBC_ENABLE
, 0, 100000, 100);
562 /* Allocate the table page: */
563 xdbc
.table_base
= xdbc_get_page(&xdbc
.table_dma
);
564 if (!xdbc
.table_base
)
567 /* Get and store the transfer buffer: */
568 xdbc
.out_buf
= xdbc_get_page(&xdbc
.out_dma
);
572 /* Allocate the event ring: */
573 ret
= xdbc_alloc_ring(&xdbc
.evt_seg
, &xdbc
.evt_ring
);
577 /* Allocate IN/OUT endpoint transfer rings: */
578 ret
= xdbc_alloc_ring(&xdbc
.in_seg
, &xdbc
.in_ring
);
582 ret
= xdbc_alloc_ring(&xdbc
.out_seg
, &xdbc
.out_ring
);
590 writel(0, &xdbc
.xdbc_reg
->control
);
594 xdbc
.flags
|= XDBC_FLAGS_INITIALIZED
| XDBC_FLAGS_CONFIGURED
;
596 xdbc_bulk_transfer(NULL
, XDBC_MAX_PACKET
, true);
601 int __init
early_xdbc_parse_parameter(char *s
, int keep_early
)
603 unsigned long dbgp_num
= 0;
604 u32 bus
, dev
, func
, offset
;
608 if (!early_pci_allowed())
611 early_console_keep
= keep_early
;
617 dbgp_num
= simple_strtoul(s
, &e
, 10);
622 pr_notice("dbgp_num: %lu\n", dbgp_num
);
624 /* Locate the host controller: */
625 ret
= xdbc_find_dbgp(dbgp_num
, &bus
, &dev
, &func
);
627 pr_notice("failed to locate xhci host\n");
631 xdbc
.vendor
= read_pci_config_16(bus
, dev
, func
, PCI_VENDOR_ID
);
632 xdbc
.device
= read_pci_config_16(bus
, dev
, func
, PCI_DEVICE_ID
);
637 /* Map the IO memory: */
638 xdbc
.xhci_base
= xdbc_map_pci_mmio(bus
, dev
, func
);
642 /* Locate DbC registers: */
643 offset
= xhci_find_next_ext_cap(xdbc
.xhci_base
, 0, XHCI_EXT_CAPS_DEBUG
);
645 pr_notice("xhci host doesn't support debug capability\n");
646 early_iounmap(xdbc
.xhci_base
, xdbc
.xhci_length
);
647 xdbc
.xhci_base
= NULL
;
648 xdbc
.xhci_length
= 0;
652 xdbc
.xdbc_reg
= (struct xdbc_regs __iomem
*)(xdbc
.xhci_base
+ offset
);
657 int __init
early_xdbc_setup_hardware(void)
666 raw_spin_lock_init(&xdbc
.lock
);
668 ret
= xdbc_early_setup();
670 pr_notice("failed to setup the connection to host\n");
672 xdbc_free_ring(&xdbc
.evt_ring
);
673 xdbc_free_ring(&xdbc
.out_ring
);
674 xdbc_free_ring(&xdbc
.in_ring
);
677 memblock_phys_free(xdbc
.table_dma
, PAGE_SIZE
);
680 memblock_phys_free(xdbc
.out_dma
, PAGE_SIZE
);
682 xdbc
.table_base
= NULL
;
689 static void xdbc_handle_port_status(struct xdbc_trb
*evt_trb
)
693 port_reg
= readl(&xdbc
.xdbc_reg
->portsc
);
694 if (port_reg
& PORTSC_CONN_CHANGE
) {
695 xdbc_trace("connect status change event\n");
697 /* Check whether cable unplugged: */
698 if (!(port_reg
& PORTSC_CONN_STATUS
)) {
700 xdbc_trace("cable unplugged\n");
704 if (port_reg
& PORTSC_RESET_CHANGE
)
705 xdbc_trace("port reset change event\n");
707 if (port_reg
& PORTSC_LINK_CHANGE
)
708 xdbc_trace("port link status change event\n");
710 if (port_reg
& PORTSC_CONFIG_CHANGE
)
711 xdbc_trace("config error change\n");
713 /* Write back the value to clear RW1C bits: */
714 writel(port_reg
, &xdbc
.xdbc_reg
->portsc
);
717 static void xdbc_handle_tx_event(struct xdbc_trb
*evt_trb
)
722 comp_code
= GET_COMP_CODE(le32_to_cpu(evt_trb
->field
[2]));
723 ep_id
= TRB_TO_EP_ID(le32_to_cpu(evt_trb
->field
[3]));
727 case COMP_SHORT_PACKET
:
730 case COMP_BABBLE_DETECTED_ERROR
:
731 case COMP_USB_TRANSACTION_ERROR
:
732 case COMP_STALL_ERROR
:
734 if (ep_id
== XDBC_EPID_OUT
|| ep_id
== XDBC_EPID_OUT_INTEL
)
735 xdbc
.flags
|= XDBC_FLAGS_OUT_STALL
;
736 if (ep_id
== XDBC_EPID_IN
|| ep_id
== XDBC_EPID_IN_INTEL
)
737 xdbc
.flags
|= XDBC_FLAGS_IN_STALL
;
739 xdbc_trace("endpoint %d stalled\n", ep_id
);
743 if (ep_id
== XDBC_EPID_IN
|| ep_id
== XDBC_EPID_IN_INTEL
) {
744 xdbc
.flags
&= ~XDBC_FLAGS_IN_PROCESS
;
745 xdbc_bulk_transfer(NULL
, XDBC_MAX_PACKET
, true);
746 } else if (ep_id
== XDBC_EPID_OUT
|| ep_id
== XDBC_EPID_OUT_INTEL
) {
747 xdbc
.flags
&= ~XDBC_FLAGS_OUT_PROCESS
;
749 xdbc_trace("invalid endpoint id %d\n", ep_id
);
753 static void xdbc_handle_events(void)
755 struct xdbc_trb
*evt_trb
;
756 bool update_erdp
= false;
760 cmd
= read_pci_config_byte(xdbc
.bus
, xdbc
.dev
, xdbc
.func
, PCI_COMMAND
);
761 if (!(cmd
& PCI_COMMAND_MASTER
)) {
762 cmd
|= PCI_COMMAND_MASTER
| PCI_COMMAND_MEMORY
;
763 write_pci_config_byte(xdbc
.bus
, xdbc
.dev
, xdbc
.func
, PCI_COMMAND
, cmd
);
766 if (!(xdbc
.flags
& XDBC_FLAGS_INITIALIZED
))
769 /* Handle external reset events: */
770 reg
= readl(&xdbc
.xdbc_reg
->control
);
771 if (!(reg
& CTRL_DBC_ENABLE
)) {
772 if (xdbc_handle_external_reset()) {
773 xdbc_trace("failed to recover connection\n");
778 /* Handle configure-exit event: */
779 reg
= readl(&xdbc
.xdbc_reg
->control
);
780 if (reg
& CTRL_DBC_RUN_CHANGE
) {
781 writel(reg
, &xdbc
.xdbc_reg
->control
);
782 if (reg
& CTRL_DBC_RUN
)
783 xdbc
.flags
|= XDBC_FLAGS_CONFIGURED
;
785 xdbc
.flags
&= ~XDBC_FLAGS_CONFIGURED
;
788 /* Handle endpoint stall event: */
789 reg
= readl(&xdbc
.xdbc_reg
->control
);
790 if (reg
& CTRL_HALT_IN_TR
) {
791 xdbc
.flags
|= XDBC_FLAGS_IN_STALL
;
793 xdbc
.flags
&= ~XDBC_FLAGS_IN_STALL
;
794 if (!(xdbc
.flags
& XDBC_FLAGS_IN_PROCESS
))
795 xdbc_bulk_transfer(NULL
, XDBC_MAX_PACKET
, true);
798 if (reg
& CTRL_HALT_OUT_TR
)
799 xdbc
.flags
|= XDBC_FLAGS_OUT_STALL
;
801 xdbc
.flags
&= ~XDBC_FLAGS_OUT_STALL
;
803 /* Handle the events in the event ring: */
804 evt_trb
= xdbc
.evt_ring
.dequeue
;
805 while ((le32_to_cpu(evt_trb
->field
[3]) & TRB_CYCLE
) == xdbc
.evt_ring
.cycle_state
) {
807 * Add a barrier between reading the cycle flag and any
808 * reads of the event's flags/data below:
812 switch ((le32_to_cpu(evt_trb
->field
[3]) & TRB_TYPE_BITMASK
)) {
813 case TRB_TYPE(TRB_PORT_STATUS
):
814 xdbc_handle_port_status(evt_trb
);
816 case TRB_TYPE(TRB_TRANSFER
):
817 xdbc_handle_tx_event(evt_trb
);
823 ++(xdbc
.evt_ring
.dequeue
);
824 if (xdbc
.evt_ring
.dequeue
== &xdbc
.evt_seg
.trbs
[TRBS_PER_SEGMENT
]) {
825 xdbc
.evt_ring
.dequeue
= xdbc
.evt_seg
.trbs
;
826 xdbc
.evt_ring
.cycle_state
^= 1;
829 evt_trb
= xdbc
.evt_ring
.dequeue
;
833 /* Update event ring dequeue pointer: */
835 xdbc_write64(__pa(xdbc
.evt_ring
.dequeue
), &xdbc
.xdbc_reg
->erdp
);
838 static int xdbc_bulk_write(const char *bytes
, int size
)
840 int ret
, timeout
= 0;
845 if (!raw_spin_trylock_irqsave(&xdbc
.lock
, flags
))
848 raw_spin_lock_irqsave(&xdbc
.lock
, flags
);
851 xdbc_handle_events();
853 /* Check completion of the previous request: */
854 if ((xdbc
.flags
& XDBC_FLAGS_OUT_PROCESS
) && (timeout
< 2000000)) {
855 raw_spin_unlock_irqrestore(&xdbc
.lock
, flags
);
861 if (xdbc
.flags
& XDBC_FLAGS_OUT_PROCESS
) {
862 raw_spin_unlock_irqrestore(&xdbc
.lock
, flags
);
863 xdbc_trace("previous transfer not completed yet\n");
868 ret
= xdbc_bulk_transfer((void *)bytes
, size
, false);
869 raw_spin_unlock_irqrestore(&xdbc
.lock
, flags
);
874 static void early_xdbc_write(struct console
*con
, const char *str
, u32 n
)
876 /* static variables are zeroed, so buf is always NULL terminated */
877 static char buf
[XDBC_MAX_PACKET
+ 1];
885 for (chunk
= 0; chunk
< XDBC_MAX_PACKET
&& n
> 0; str
++, chunk
++, n
--) {
887 if (!use_cr
&& *str
== '\n') {
901 ret
= xdbc_bulk_write(buf
, chunk
);
903 xdbc_trace("missed message {%s}\n", buf
);
908 static struct console early_xdbc_console
= {
910 .write
= early_xdbc_write
,
911 .flags
= CON_PRINTBUFFER
,
915 void __init
early_xdbc_register_console(void)
920 early_console
= &early_xdbc_console
;
921 if (early_console_keep
)
922 early_console
->flags
&= ~CON_BOOT
;
924 early_console
->flags
|= CON_BOOT
;
925 register_console(early_console
);
928 static void xdbc_unregister_console(void)
930 if (console_is_registered(&early_xdbc_console
))
931 unregister_console(&early_xdbc_console
);
934 static int xdbc_scrub_function(void *ptr
)
939 raw_spin_lock_irqsave(&xdbc
.lock
, flags
);
940 xdbc_handle_events();
942 if (!(xdbc
.flags
& XDBC_FLAGS_INITIALIZED
)) {
943 raw_spin_unlock_irqrestore(&xdbc
.lock
, flags
);
947 raw_spin_unlock_irqrestore(&xdbc
.lock
, flags
);
948 schedule_timeout_interruptible(1);
951 xdbc_unregister_console();
952 writel(0, &xdbc
.xdbc_reg
->control
);
953 xdbc_trace("dbc scrub function exits\n");
958 static int __init
xdbc_init(void)
965 if (!(xdbc
.flags
& XDBC_FLAGS_INITIALIZED
))
969 * It's time to shut down the DbC, so that the debug
970 * port can be reused by the host controller:
972 if (early_xdbc_console
.index
== -1 ||
973 (early_xdbc_console
.flags
& CON_BOOT
)) {
974 xdbc_trace("hardware not used anymore\n");
978 base
= ioremap(xdbc
.xhci_start
, xdbc
.xhci_length
);
980 xdbc_trace("failed to remap the io address\n");
985 raw_spin_lock_irqsave(&xdbc
.lock
, flags
);
986 early_iounmap(xdbc
.xhci_base
, xdbc
.xhci_length
);
987 xdbc
.xhci_base
= base
;
988 offset
= xhci_find_next_ext_cap(xdbc
.xhci_base
, 0, XHCI_EXT_CAPS_DEBUG
);
989 xdbc
.xdbc_reg
= (struct xdbc_regs __iomem
*)(xdbc
.xhci_base
+ offset
);
990 raw_spin_unlock_irqrestore(&xdbc
.lock
, flags
);
992 kthread_run(xdbc_scrub_function
, NULL
, "%s", "xdbc");
997 xdbc_free_ring(&xdbc
.evt_ring
);
998 xdbc_free_ring(&xdbc
.out_ring
);
999 xdbc_free_ring(&xdbc
.in_ring
);
1000 memblock_phys_free(xdbc
.table_dma
, PAGE_SIZE
);
1001 memblock_phys_free(xdbc
.out_dma
, PAGE_SIZE
);
1002 writel(0, &xdbc
.xdbc_reg
->control
);
1003 early_iounmap(xdbc
.xhci_base
, xdbc
.xhci_length
);
1007 subsys_initcall(xdbc_init
);