1 // SPDX-License-Identifier: GPL-2.0
3 * xhci-dbc.c - xHCI debug capability early driver
5 * Copyright (C) 2016 Intel Corporation
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
10 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
12 #include <linux/console.h>
13 #include <linux/pci_regs.h>
14 #include <linux/pci_ids.h>
15 #include <linux/memblock.h>
17 #include <linux/iopoll.h>
18 #include <asm/pci-direct.h>
19 #include <asm/fixmap.h>
20 #include <linux/bcd.h>
21 #include <linux/export.h>
22 #include <linux/module.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/usb/xhci-dbgp.h>
27 #include "../host/xhci.h"
30 static struct xdbc_state xdbc
;
31 static bool early_console_keep
;
34 #define xdbc_trace trace_printk
36 static inline void xdbc_trace(const char *fmt
, ...) { }
37 #endif /* XDBC_TRACE */
39 static void __iomem
* __init
xdbc_map_pci_mmio(u32 bus
, u32 dev
, u32 func
)
41 u64 val64
, sz64
, mask64
;
46 val
= read_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
);
47 write_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
, ~0);
48 sz
= read_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
);
49 write_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
, val
);
51 if (val
== 0xffffffff || sz
== 0xffffffff) {
52 pr_notice("invalid mmio bar\n");
56 val64
= val
& PCI_BASE_ADDRESS_MEM_MASK
;
57 sz64
= sz
& PCI_BASE_ADDRESS_MEM_MASK
;
58 mask64
= PCI_BASE_ADDRESS_MEM_MASK
;
60 if ((val
& PCI_BASE_ADDRESS_MEM_TYPE_MASK
) == PCI_BASE_ADDRESS_MEM_TYPE_64
) {
61 val
= read_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
+ 4);
62 write_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
+ 4, ~0);
63 sz
= read_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
+ 4);
64 write_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
+ 4, val
);
66 val64
|= (u64
)val
<< 32;
67 sz64
|= (u64
)sz
<< 32;
68 mask64
|= ~0ULL << 32;
74 pr_notice("invalid mmio address\n");
78 sz64
= 1ULL << __ffs64(sz64
);
80 /* Check if the mem space is enabled: */
81 byte
= read_pci_config_byte(bus
, dev
, func
, PCI_COMMAND
);
82 if (!(byte
& PCI_COMMAND_MEMORY
)) {
83 byte
|= PCI_COMMAND_MEMORY
;
84 write_pci_config_byte(bus
, dev
, func
, PCI_COMMAND
, byte
);
87 xdbc
.xhci_start
= val64
;
88 xdbc
.xhci_length
= sz64
;
89 base
= early_ioremap(val64
, sz64
);
94 static void * __init
xdbc_get_page(dma_addr_t
*dma_addr
)
98 virt
= memblock_alloc(PAGE_SIZE
, PAGE_SIZE
);
103 *dma_addr
= (dma_addr_t
)__pa(virt
);
108 static u32 __init
xdbc_find_dbgp(int xdbc_num
, u32
*b
, u32
*d
, u32
*f
)
110 u32 bus
, dev
, func
, class;
112 for (bus
= 0; bus
< XDBC_PCI_MAX_BUSES
; bus
++) {
113 for (dev
= 0; dev
< XDBC_PCI_MAX_DEVICES
; dev
++) {
114 for (func
= 0; func
< XDBC_PCI_MAX_FUNCTION
; func
++) {
116 class = read_pci_config(bus
, dev
, func
, PCI_CLASS_REVISION
);
117 if ((class >> 8) != PCI_CLASS_SERIAL_USB_XHCI
)
135 static int handshake(void __iomem
*ptr
, u32 mask
, u32 done
, int wait
, int delay
)
139 return readl_poll_timeout_atomic(ptr
, result
,
140 ((result
& mask
) == done
),
144 static void __init
xdbc_bios_handoff(void)
149 offset
= xhci_find_next_ext_cap(xdbc
.xhci_base
, 0, XHCI_EXT_CAPS_LEGACY
);
150 val
= readl(xdbc
.xhci_base
+ offset
);
152 if (val
& XHCI_HC_BIOS_OWNED
) {
153 writel(val
| XHCI_HC_OS_OWNED
, xdbc
.xhci_base
+ offset
);
154 timeout
= handshake(xdbc
.xhci_base
+ offset
, XHCI_HC_BIOS_OWNED
, 0, 5000, 10);
157 pr_notice("failed to hand over xHCI control from BIOS\n");
158 writel(val
& ~XHCI_HC_BIOS_OWNED
, xdbc
.xhci_base
+ offset
);
162 /* Disable BIOS SMIs and clear all SMI events: */
163 val
= readl(xdbc
.xhci_base
+ offset
+ XHCI_LEGACY_CONTROL_OFFSET
);
164 val
&= XHCI_LEGACY_DISABLE_SMI
;
165 val
|= XHCI_LEGACY_SMI_EVENTS
;
166 writel(val
, xdbc
.xhci_base
+ offset
+ XHCI_LEGACY_CONTROL_OFFSET
);
170 xdbc_alloc_ring(struct xdbc_segment
*seg
, struct xdbc_ring
*ring
)
172 seg
->trbs
= xdbc_get_page(&seg
->dma
);
181 static void __init
xdbc_free_ring(struct xdbc_ring
*ring
)
183 struct xdbc_segment
*seg
= ring
->segment
;
188 memblock_free(seg
->dma
, PAGE_SIZE
);
189 ring
->segment
= NULL
;
192 static void xdbc_reset_ring(struct xdbc_ring
*ring
)
194 struct xdbc_segment
*seg
= ring
->segment
;
195 struct xdbc_trb
*link_trb
;
197 memset(seg
->trbs
, 0, PAGE_SIZE
);
199 ring
->enqueue
= seg
->trbs
;
200 ring
->dequeue
= seg
->trbs
;
201 ring
->cycle_state
= 1;
203 if (ring
!= &xdbc
.evt_ring
) {
204 link_trb
= &seg
->trbs
[XDBC_TRBS_PER_SEGMENT
- 1];
205 link_trb
->field
[0] = cpu_to_le32(lower_32_bits(seg
->dma
));
206 link_trb
->field
[1] = cpu_to_le32(upper_32_bits(seg
->dma
));
207 link_trb
->field
[3] = cpu_to_le32(TRB_TYPE(TRB_LINK
)) | cpu_to_le32(LINK_TOGGLE
);
211 static inline void xdbc_put_utf16(u16
*s
, const char *c
, size_t size
)
215 for (i
= 0; i
< size
; i
++)
216 s
[i
] = cpu_to_le16(c
[i
]);
219 static void xdbc_mem_init(void)
221 struct xdbc_ep_context
*ep_in
, *ep_out
;
222 struct usb_string_descriptor
*s_desc
;
223 struct xdbc_erst_entry
*entry
;
224 struct xdbc_strings
*strings
;
225 struct xdbc_context
*ctx
;
226 unsigned int max_burst
;
231 xdbc_reset_ring(&xdbc
.evt_ring
);
232 xdbc_reset_ring(&xdbc
.in_ring
);
233 xdbc_reset_ring(&xdbc
.out_ring
);
234 memset(xdbc
.table_base
, 0, PAGE_SIZE
);
235 memset(xdbc
.out_buf
, 0, PAGE_SIZE
);
237 /* Initialize event ring segment table: */
239 xdbc
.erst_base
= xdbc
.table_base
+ index
* XDBC_TABLE_ENTRY_SIZE
;
240 xdbc
.erst_dma
= xdbc
.table_dma
+ index
* XDBC_TABLE_ENTRY_SIZE
;
242 index
+= XDBC_ERST_ENTRY_NUM
;
243 entry
= (struct xdbc_erst_entry
*)xdbc
.erst_base
;
245 entry
->seg_addr
= cpu_to_le64(xdbc
.evt_seg
.dma
);
246 entry
->seg_size
= cpu_to_le32(XDBC_TRBS_PER_SEGMENT
);
247 entry
->__reserved_0
= 0;
249 /* Initialize ERST registers: */
250 writel(1, &xdbc
.xdbc_reg
->ersts
);
251 xdbc_write64(xdbc
.erst_dma
, &xdbc
.xdbc_reg
->erstba
);
252 xdbc_write64(xdbc
.evt_seg
.dma
, &xdbc
.xdbc_reg
->erdp
);
254 /* Debug capability contexts: */
255 xdbc
.dbcc_size
= 64 * 3;
256 xdbc
.dbcc_base
= xdbc
.table_base
+ index
* XDBC_TABLE_ENTRY_SIZE
;
257 xdbc
.dbcc_dma
= xdbc
.table_dma
+ index
* XDBC_TABLE_ENTRY_SIZE
;
259 index
+= XDBC_DBCC_ENTRY_NUM
;
261 /* Popluate the strings: */
262 xdbc
.string_size
= sizeof(struct xdbc_strings
);
263 xdbc
.string_base
= xdbc
.table_base
+ index
* XDBC_TABLE_ENTRY_SIZE
;
264 xdbc
.string_dma
= xdbc
.table_dma
+ index
* XDBC_TABLE_ENTRY_SIZE
;
265 strings
= (struct xdbc_strings
*)xdbc
.string_base
;
267 index
+= XDBC_STRING_ENTRY_NUM
;
270 s_desc
= (struct usb_string_descriptor
*)strings
->serial
;
271 s_desc
->bLength
= (strlen(XDBC_STRING_SERIAL
) + 1) * 2;
272 s_desc
->bDescriptorType
= USB_DT_STRING
;
274 xdbc_put_utf16(s_desc
->wData
, XDBC_STRING_SERIAL
, strlen(XDBC_STRING_SERIAL
));
275 string_length
= s_desc
->bLength
;
278 /* Product string: */
279 s_desc
= (struct usb_string_descriptor
*)strings
->product
;
280 s_desc
->bLength
= (strlen(XDBC_STRING_PRODUCT
) + 1) * 2;
281 s_desc
->bDescriptorType
= USB_DT_STRING
;
283 xdbc_put_utf16(s_desc
->wData
, XDBC_STRING_PRODUCT
, strlen(XDBC_STRING_PRODUCT
));
284 string_length
+= s_desc
->bLength
;
287 /* Manufacture string: */
288 s_desc
= (struct usb_string_descriptor
*)strings
->manufacturer
;
289 s_desc
->bLength
= (strlen(XDBC_STRING_MANUFACTURER
) + 1) * 2;
290 s_desc
->bDescriptorType
= USB_DT_STRING
;
292 xdbc_put_utf16(s_desc
->wData
, XDBC_STRING_MANUFACTURER
, strlen(XDBC_STRING_MANUFACTURER
));
293 string_length
+= s_desc
->bLength
;
297 strings
->string0
[0] = 4;
298 strings
->string0
[1] = USB_DT_STRING
;
299 strings
->string0
[2] = 0x09;
300 strings
->string0
[3] = 0x04;
304 /* Populate info Context: */
305 ctx
= (struct xdbc_context
*)xdbc
.dbcc_base
;
307 ctx
->info
.string0
= cpu_to_le64(xdbc
.string_dma
);
308 ctx
->info
.manufacturer
= cpu_to_le64(xdbc
.string_dma
+ XDBC_MAX_STRING_LENGTH
);
309 ctx
->info
.product
= cpu_to_le64(xdbc
.string_dma
+ XDBC_MAX_STRING_LENGTH
* 2);
310 ctx
->info
.serial
= cpu_to_le64(xdbc
.string_dma
+ XDBC_MAX_STRING_LENGTH
* 3);
311 ctx
->info
.length
= cpu_to_le32(string_length
);
313 /* Populate bulk out endpoint context: */
314 max_burst
= DEBUG_MAX_BURST(readl(&xdbc
.xdbc_reg
->control
));
315 ep_out
= (struct xdbc_ep_context
*)&ctx
->out
;
317 ep_out
->ep_info1
= 0;
318 ep_out
->ep_info2
= cpu_to_le32(EP_TYPE(BULK_OUT_EP
) | MAX_PACKET(1024) | MAX_BURST(max_burst
));
319 ep_out
->deq
= cpu_to_le64(xdbc
.out_seg
.dma
| xdbc
.out_ring
.cycle_state
);
321 /* Populate bulk in endpoint context: */
322 ep_in
= (struct xdbc_ep_context
*)&ctx
->in
;
325 ep_in
->ep_info2
= cpu_to_le32(EP_TYPE(BULK_IN_EP
) | MAX_PACKET(1024) | MAX_BURST(max_burst
));
326 ep_in
->deq
= cpu_to_le64(xdbc
.in_seg
.dma
| xdbc
.in_ring
.cycle_state
);
328 /* Set DbC context and info registers: */
329 xdbc_write64(xdbc
.dbcc_dma
, &xdbc
.xdbc_reg
->dccp
);
331 dev_info
= cpu_to_le32((XDBC_VENDOR_ID
<< 16) | XDBC_PROTOCOL
);
332 writel(dev_info
, &xdbc
.xdbc_reg
->devinfo1
);
334 dev_info
= cpu_to_le32((XDBC_DEVICE_REV
<< 16) | XDBC_PRODUCT_ID
);
335 writel(dev_info
, &xdbc
.xdbc_reg
->devinfo2
);
337 xdbc
.in_buf
= xdbc
.out_buf
+ XDBC_MAX_PACKET
;
338 xdbc
.in_dma
= xdbc
.out_dma
+ XDBC_MAX_PACKET
;
341 static void xdbc_do_reset_debug_port(u32 id
, u32 count
)
343 void __iomem
*ops_reg
;
344 void __iomem
*portsc
;
348 cap_length
= readl(xdbc
.xhci_base
) & 0xff;
349 ops_reg
= xdbc
.xhci_base
+ cap_length
;
352 for (i
= id
; i
< (id
+ count
); i
++) {
353 portsc
= ops_reg
+ 0x400 + i
* 0x10;
355 if (!(val
& PORT_CONNECT
))
356 writel(val
| PORT_RESET
, portsc
);
360 static void xdbc_reset_debug_port(void)
362 u32 val
, port_offset
, port_count
;
366 offset
= xhci_find_next_ext_cap(xdbc
.xhci_base
, offset
, XHCI_EXT_CAPS_PROTOCOL
);
370 val
= readl(xdbc
.xhci_base
+ offset
);
371 if (XHCI_EXT_PORT_MAJOR(val
) != 0x3)
374 val
= readl(xdbc
.xhci_base
+ offset
+ 8);
375 port_offset
= XHCI_EXT_PORT_OFF(val
);
376 port_count
= XHCI_EXT_PORT_COUNT(val
);
378 xdbc_do_reset_debug_port(port_offset
, port_count
);
383 xdbc_queue_trb(struct xdbc_ring
*ring
, u32 field1
, u32 field2
, u32 field3
, u32 field4
)
385 struct xdbc_trb
*trb
, *link_trb
;
388 trb
->field
[0] = cpu_to_le32(field1
);
389 trb
->field
[1] = cpu_to_le32(field2
);
390 trb
->field
[2] = cpu_to_le32(field3
);
391 trb
->field
[3] = cpu_to_le32(field4
);
394 if (ring
->enqueue
>= &ring
->segment
->trbs
[TRBS_PER_SEGMENT
- 1]) {
395 link_trb
= ring
->enqueue
;
396 if (ring
->cycle_state
)
397 link_trb
->field
[3] |= cpu_to_le32(TRB_CYCLE
);
399 link_trb
->field
[3] &= cpu_to_le32(~TRB_CYCLE
);
401 ring
->enqueue
= ring
->segment
->trbs
;
402 ring
->cycle_state
^= 1;
406 static void xdbc_ring_doorbell(int target
)
408 writel(DOOR_BELL_TARGET(target
), &xdbc
.xdbc_reg
->doorbell
);
411 static int xdbc_start(void)
416 ctrl
= readl(&xdbc
.xdbc_reg
->control
);
417 writel(ctrl
| CTRL_DBC_ENABLE
| CTRL_PORT_ENABLE
, &xdbc
.xdbc_reg
->control
);
418 ret
= handshake(&xdbc
.xdbc_reg
->control
, CTRL_DBC_ENABLE
, CTRL_DBC_ENABLE
, 100000, 100);
420 xdbc_trace("failed to initialize hardware\n");
424 /* Reset port to avoid bus hang: */
425 if (xdbc
.vendor
== PCI_VENDOR_ID_INTEL
)
426 xdbc_reset_debug_port();
428 /* Wait for port connection: */
429 ret
= handshake(&xdbc
.xdbc_reg
->portsc
, PORTSC_CONN_STATUS
, PORTSC_CONN_STATUS
, 5000000, 100);
431 xdbc_trace("waiting for connection timed out\n");
435 /* Wait for debug device to be configured: */
436 ret
= handshake(&xdbc
.xdbc_reg
->control
, CTRL_DBC_RUN
, CTRL_DBC_RUN
, 5000000, 100);
438 xdbc_trace("waiting for device configuration timed out\n");
442 /* Check port number: */
443 status
= readl(&xdbc
.xdbc_reg
->status
);
444 if (!DCST_DEBUG_PORT(status
)) {
445 xdbc_trace("invalid root hub port number\n");
449 xdbc
.port_number
= DCST_DEBUG_PORT(status
);
451 xdbc_trace("DbC is running now, control 0x%08x port ID %d\n",
452 readl(&xdbc
.xdbc_reg
->control
), xdbc
.port_number
);
457 static int xdbc_bulk_transfer(void *data
, int size
, bool read
)
459 struct xdbc_ring
*ring
;
460 struct xdbc_trb
*trb
;
465 if (size
> XDBC_MAX_PACKET
) {
466 xdbc_trace("bad parameter, size %d\n", size
);
470 if (!(xdbc
.flags
& XDBC_FLAGS_INITIALIZED
) ||
471 !(xdbc
.flags
& XDBC_FLAGS_CONFIGURED
) ||
472 (!read
&& (xdbc
.flags
& XDBC_FLAGS_OUT_STALL
)) ||
473 (read
&& (xdbc
.flags
& XDBC_FLAGS_IN_STALL
))) {
475 xdbc_trace("connection not ready, flags %08x\n", xdbc
.flags
);
479 ring
= (read
? &xdbc
.in_ring
: &xdbc
.out_ring
);
481 cycle
= ring
->cycle_state
;
482 length
= TRB_LEN(size
);
483 control
= TRB_TYPE(TRB_NORMAL
) | TRB_IOC
;
486 control
&= cpu_to_le32(~TRB_CYCLE
);
488 control
|= cpu_to_le32(TRB_CYCLE
);
491 memset(xdbc
.in_buf
, 0, XDBC_MAX_PACKET
);
493 xdbc
.flags
|= XDBC_FLAGS_IN_PROCESS
;
495 memset(xdbc
.out_buf
, 0, XDBC_MAX_PACKET
);
496 memcpy(xdbc
.out_buf
, data
, size
);
498 xdbc
.flags
|= XDBC_FLAGS_OUT_PROCESS
;
501 xdbc_queue_trb(ring
, lower_32_bits(addr
), upper_32_bits(addr
), length
, control
);
504 * Add a barrier between writes of trb fields and flipping
509 trb
->field
[3] |= cpu_to_le32(cycle
);
511 trb
->field
[3] &= cpu_to_le32(~TRB_CYCLE
);
513 xdbc_ring_doorbell(read
? IN_EP_DOORBELL
: OUT_EP_DOORBELL
);
518 static int xdbc_handle_external_reset(void)
523 writel(0, &xdbc
.xdbc_reg
->control
);
524 ret
= handshake(&xdbc
.xdbc_reg
->control
, CTRL_DBC_ENABLE
, 0, 100000, 10);
534 xdbc_trace("dbc recovered\n");
536 xdbc
.flags
|= XDBC_FLAGS_INITIALIZED
| XDBC_FLAGS_CONFIGURED
;
538 xdbc_bulk_transfer(NULL
, XDBC_MAX_PACKET
, true);
543 xdbc_trace("failed to recover from external reset\n");
547 static int __init
xdbc_early_setup(void)
551 writel(0, &xdbc
.xdbc_reg
->control
);
552 ret
= handshake(&xdbc
.xdbc_reg
->control
, CTRL_DBC_ENABLE
, 0, 100000, 100);
556 /* Allocate the table page: */
557 xdbc
.table_base
= xdbc_get_page(&xdbc
.table_dma
);
558 if (!xdbc
.table_base
)
561 /* Get and store the transfer buffer: */
562 xdbc
.out_buf
= xdbc_get_page(&xdbc
.out_dma
);
566 /* Allocate the event ring: */
567 ret
= xdbc_alloc_ring(&xdbc
.evt_seg
, &xdbc
.evt_ring
);
571 /* Allocate IN/OUT endpoint transfer rings: */
572 ret
= xdbc_alloc_ring(&xdbc
.in_seg
, &xdbc
.in_ring
);
576 ret
= xdbc_alloc_ring(&xdbc
.out_seg
, &xdbc
.out_ring
);
584 writel(0, &xdbc
.xdbc_reg
->control
);
588 xdbc
.flags
|= XDBC_FLAGS_INITIALIZED
| XDBC_FLAGS_CONFIGURED
;
590 xdbc_bulk_transfer(NULL
, XDBC_MAX_PACKET
, true);
595 int __init
early_xdbc_parse_parameter(char *s
)
597 unsigned long dbgp_num
= 0;
598 u32 bus
, dev
, func
, offset
;
601 if (!early_pci_allowed())
604 if (strstr(s
, "keep"))
605 early_console_keep
= true;
610 if (*s
&& kstrtoul(s
, 0, &dbgp_num
))
613 pr_notice("dbgp_num: %lu\n", dbgp_num
);
615 /* Locate the host controller: */
616 ret
= xdbc_find_dbgp(dbgp_num
, &bus
, &dev
, &func
);
618 pr_notice("failed to locate xhci host\n");
622 xdbc
.vendor
= read_pci_config_16(bus
, dev
, func
, PCI_VENDOR_ID
);
623 xdbc
.device
= read_pci_config_16(bus
, dev
, func
, PCI_DEVICE_ID
);
628 /* Map the IO memory: */
629 xdbc
.xhci_base
= xdbc_map_pci_mmio(bus
, dev
, func
);
633 /* Locate DbC registers: */
634 offset
= xhci_find_next_ext_cap(xdbc
.xhci_base
, 0, XHCI_EXT_CAPS_DEBUG
);
636 pr_notice("xhci host doesn't support debug capability\n");
637 early_iounmap(xdbc
.xhci_base
, xdbc
.xhci_length
);
638 xdbc
.xhci_base
= NULL
;
639 xdbc
.xhci_length
= 0;
643 xdbc
.xdbc_reg
= (struct xdbc_regs __iomem
*)(xdbc
.xhci_base
+ offset
);
648 int __init
early_xdbc_setup_hardware(void)
657 raw_spin_lock_init(&xdbc
.lock
);
659 ret
= xdbc_early_setup();
661 pr_notice("failed to setup the connection to host\n");
663 xdbc_free_ring(&xdbc
.evt_ring
);
664 xdbc_free_ring(&xdbc
.out_ring
);
665 xdbc_free_ring(&xdbc
.in_ring
);
668 memblock_free(xdbc
.table_dma
, PAGE_SIZE
);
671 memblock_free(xdbc
.out_dma
, PAGE_SIZE
);
673 xdbc
.table_base
= NULL
;
680 static void xdbc_handle_port_status(struct xdbc_trb
*evt_trb
)
684 port_reg
= readl(&xdbc
.xdbc_reg
->portsc
);
685 if (port_reg
& PORTSC_CONN_CHANGE
) {
686 xdbc_trace("connect status change event\n");
688 /* Check whether cable unplugged: */
689 if (!(port_reg
& PORTSC_CONN_STATUS
)) {
691 xdbc_trace("cable unplugged\n");
695 if (port_reg
& PORTSC_RESET_CHANGE
)
696 xdbc_trace("port reset change event\n");
698 if (port_reg
& PORTSC_LINK_CHANGE
)
699 xdbc_trace("port link status change event\n");
701 if (port_reg
& PORTSC_CONFIG_CHANGE
)
702 xdbc_trace("config error change\n");
704 /* Write back the value to clear RW1C bits: */
705 writel(port_reg
, &xdbc
.xdbc_reg
->portsc
);
708 static void xdbc_handle_tx_event(struct xdbc_trb
*evt_trb
)
713 comp_code
= GET_COMP_CODE(le32_to_cpu(evt_trb
->field
[2]));
714 ep_id
= TRB_TO_EP_ID(le32_to_cpu(evt_trb
->field
[3]));
718 case COMP_SHORT_PACKET
:
721 case COMP_BABBLE_DETECTED_ERROR
:
722 case COMP_USB_TRANSACTION_ERROR
:
723 case COMP_STALL_ERROR
:
725 if (ep_id
== XDBC_EPID_OUT
|| ep_id
== XDBC_EPID_OUT_INTEL
)
726 xdbc
.flags
|= XDBC_FLAGS_OUT_STALL
;
727 if (ep_id
== XDBC_EPID_IN
|| ep_id
== XDBC_EPID_IN_INTEL
)
728 xdbc
.flags
|= XDBC_FLAGS_IN_STALL
;
730 xdbc_trace("endpoint %d stalled\n", ep_id
);
734 if (ep_id
== XDBC_EPID_IN
|| ep_id
== XDBC_EPID_IN_INTEL
) {
735 xdbc
.flags
&= ~XDBC_FLAGS_IN_PROCESS
;
736 xdbc_bulk_transfer(NULL
, XDBC_MAX_PACKET
, true);
737 } else if (ep_id
== XDBC_EPID_OUT
|| ep_id
== XDBC_EPID_OUT_INTEL
) {
738 xdbc
.flags
&= ~XDBC_FLAGS_OUT_PROCESS
;
740 xdbc_trace("invalid endpoint id %d\n", ep_id
);
744 static void xdbc_handle_events(void)
746 struct xdbc_trb
*evt_trb
;
747 bool update_erdp
= false;
751 cmd
= read_pci_config_byte(xdbc
.bus
, xdbc
.dev
, xdbc
.func
, PCI_COMMAND
);
752 if (!(cmd
& PCI_COMMAND_MASTER
)) {
753 cmd
|= PCI_COMMAND_MASTER
| PCI_COMMAND_MEMORY
;
754 write_pci_config_byte(xdbc
.bus
, xdbc
.dev
, xdbc
.func
, PCI_COMMAND
, cmd
);
757 if (!(xdbc
.flags
& XDBC_FLAGS_INITIALIZED
))
760 /* Handle external reset events: */
761 reg
= readl(&xdbc
.xdbc_reg
->control
);
762 if (!(reg
& CTRL_DBC_ENABLE
)) {
763 if (xdbc_handle_external_reset()) {
764 xdbc_trace("failed to recover connection\n");
769 /* Handle configure-exit event: */
770 reg
= readl(&xdbc
.xdbc_reg
->control
);
771 if (reg
& CTRL_DBC_RUN_CHANGE
) {
772 writel(reg
, &xdbc
.xdbc_reg
->control
);
773 if (reg
& CTRL_DBC_RUN
)
774 xdbc
.flags
|= XDBC_FLAGS_CONFIGURED
;
776 xdbc
.flags
&= ~XDBC_FLAGS_CONFIGURED
;
779 /* Handle endpoint stall event: */
780 reg
= readl(&xdbc
.xdbc_reg
->control
);
781 if (reg
& CTRL_HALT_IN_TR
) {
782 xdbc
.flags
|= XDBC_FLAGS_IN_STALL
;
784 xdbc
.flags
&= ~XDBC_FLAGS_IN_STALL
;
785 if (!(xdbc
.flags
& XDBC_FLAGS_IN_PROCESS
))
786 xdbc_bulk_transfer(NULL
, XDBC_MAX_PACKET
, true);
789 if (reg
& CTRL_HALT_OUT_TR
)
790 xdbc
.flags
|= XDBC_FLAGS_OUT_STALL
;
792 xdbc
.flags
&= ~XDBC_FLAGS_OUT_STALL
;
794 /* Handle the events in the event ring: */
795 evt_trb
= xdbc
.evt_ring
.dequeue
;
796 while ((le32_to_cpu(evt_trb
->field
[3]) & TRB_CYCLE
) == xdbc
.evt_ring
.cycle_state
) {
798 * Add a barrier between reading the cycle flag and any
799 * reads of the event's flags/data below:
803 switch ((le32_to_cpu(evt_trb
->field
[3]) & TRB_TYPE_BITMASK
)) {
804 case TRB_TYPE(TRB_PORT_STATUS
):
805 xdbc_handle_port_status(evt_trb
);
807 case TRB_TYPE(TRB_TRANSFER
):
808 xdbc_handle_tx_event(evt_trb
);
814 ++(xdbc
.evt_ring
.dequeue
);
815 if (xdbc
.evt_ring
.dequeue
== &xdbc
.evt_seg
.trbs
[TRBS_PER_SEGMENT
]) {
816 xdbc
.evt_ring
.dequeue
= xdbc
.evt_seg
.trbs
;
817 xdbc
.evt_ring
.cycle_state
^= 1;
820 evt_trb
= xdbc
.evt_ring
.dequeue
;
824 /* Update event ring dequeue pointer: */
826 xdbc_write64(__pa(xdbc
.evt_ring
.dequeue
), &xdbc
.xdbc_reg
->erdp
);
829 static int xdbc_bulk_write(const char *bytes
, int size
)
831 int ret
, timeout
= 0;
836 if (!raw_spin_trylock_irqsave(&xdbc
.lock
, flags
))
839 raw_spin_lock_irqsave(&xdbc
.lock
, flags
);
842 xdbc_handle_events();
844 /* Check completion of the previous request: */
845 if ((xdbc
.flags
& XDBC_FLAGS_OUT_PROCESS
) && (timeout
< 2000000)) {
846 raw_spin_unlock_irqrestore(&xdbc
.lock
, flags
);
852 if (xdbc
.flags
& XDBC_FLAGS_OUT_PROCESS
) {
853 raw_spin_unlock_irqrestore(&xdbc
.lock
, flags
);
854 xdbc_trace("previous transfer not completed yet\n");
859 ret
= xdbc_bulk_transfer((void *)bytes
, size
, false);
860 raw_spin_unlock_irqrestore(&xdbc
.lock
, flags
);
865 static void early_xdbc_write(struct console
*con
, const char *str
, u32 n
)
867 static char buf
[XDBC_MAX_PACKET
];
873 memset(buf
, 0, XDBC_MAX_PACKET
);
875 for (chunk
= 0; chunk
< XDBC_MAX_PACKET
&& n
> 0; str
++, chunk
++, n
--) {
877 if (!use_cr
&& *str
== '\n') {
891 ret
= xdbc_bulk_write(buf
, chunk
);
893 xdbc_trace("missed message {%s}\n", buf
);
898 static struct console early_xdbc_console
= {
900 .write
= early_xdbc_write
,
901 .flags
= CON_PRINTBUFFER
,
905 void __init
early_xdbc_register_console(void)
910 early_console
= &early_xdbc_console
;
911 if (early_console_keep
)
912 early_console
->flags
&= ~CON_BOOT
;
914 early_console
->flags
|= CON_BOOT
;
915 register_console(early_console
);
918 static void xdbc_unregister_console(void)
920 if (early_xdbc_console
.flags
& CON_ENABLED
)
921 unregister_console(&early_xdbc_console
);
924 static int xdbc_scrub_function(void *ptr
)
929 raw_spin_lock_irqsave(&xdbc
.lock
, flags
);
930 xdbc_handle_events();
932 if (!(xdbc
.flags
& XDBC_FLAGS_INITIALIZED
)) {
933 raw_spin_unlock_irqrestore(&xdbc
.lock
, flags
);
937 raw_spin_unlock_irqrestore(&xdbc
.lock
, flags
);
938 schedule_timeout_interruptible(1);
941 xdbc_unregister_console();
942 writel(0, &xdbc
.xdbc_reg
->control
);
943 xdbc_trace("dbc scrub function exits\n");
948 static int __init
xdbc_init(void)
955 if (!(xdbc
.flags
& XDBC_FLAGS_INITIALIZED
))
959 * It's time to shut down the DbC, so that the debug
960 * port can be reused by the host controller:
962 if (early_xdbc_console
.index
== -1 ||
963 (early_xdbc_console
.flags
& CON_BOOT
)) {
964 xdbc_trace("hardware not used anymore\n");
968 base
= ioremap(xdbc
.xhci_start
, xdbc
.xhci_length
);
970 xdbc_trace("failed to remap the io address\n");
975 raw_spin_lock_irqsave(&xdbc
.lock
, flags
);
976 early_iounmap(xdbc
.xhci_base
, xdbc
.xhci_length
);
977 xdbc
.xhci_base
= base
;
978 offset
= xhci_find_next_ext_cap(xdbc
.xhci_base
, 0, XHCI_EXT_CAPS_DEBUG
);
979 xdbc
.xdbc_reg
= (struct xdbc_regs __iomem
*)(xdbc
.xhci_base
+ offset
);
980 raw_spin_unlock_irqrestore(&xdbc
.lock
, flags
);
982 kthread_run(xdbc_scrub_function
, NULL
, "%s", "xdbc");
987 xdbc_free_ring(&xdbc
.evt_ring
);
988 xdbc_free_ring(&xdbc
.out_ring
);
989 xdbc_free_ring(&xdbc
.in_ring
);
990 memblock_free(xdbc
.table_dma
, PAGE_SIZE
);
991 memblock_free(xdbc
.out_dma
, PAGE_SIZE
);
992 writel(0, &xdbc
.xdbc_reg
->control
);
993 early_iounmap(xdbc
.xhci_base
, xdbc
.xhci_length
);
997 subsys_initcall(xdbc_init
);