1 // SPDX-License-Identifier: GPL-2.0
3 * xhci-dbc.c - xHCI debug capability early driver
5 * Copyright (C) 2016 Intel Corporation
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
10 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
12 #include <linux/console.h>
13 #include <linux/pci_regs.h>
14 #include <linux/pci_ids.h>
15 #include <linux/bootmem.h>
17 #include <asm/pci-direct.h>
18 #include <asm/fixmap.h>
19 #include <linux/bcd.h>
20 #include <linux/export.h>
21 #include <linux/version.h>
22 #include <linux/module.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
26 #include "../host/xhci.h"
29 static struct xdbc_state xdbc
;
30 static bool early_console_keep
;
33 #define xdbc_trace trace_printk
35 static inline void xdbc_trace(const char *fmt
, ...) { }
36 #endif /* XDBC_TRACE */
38 static void __iomem
* __init
xdbc_map_pci_mmio(u32 bus
, u32 dev
, u32 func
)
40 u64 val64
, sz64
, mask64
;
45 val
= read_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
);
46 write_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
, ~0);
47 sz
= read_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
);
48 write_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
, val
);
50 if (val
== 0xffffffff || sz
== 0xffffffff) {
51 pr_notice("invalid mmio bar\n");
55 val64
= val
& PCI_BASE_ADDRESS_MEM_MASK
;
56 sz64
= sz
& PCI_BASE_ADDRESS_MEM_MASK
;
57 mask64
= PCI_BASE_ADDRESS_MEM_MASK
;
59 if ((val
& PCI_BASE_ADDRESS_MEM_TYPE_MASK
) == PCI_BASE_ADDRESS_MEM_TYPE_64
) {
60 val
= read_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
+ 4);
61 write_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
+ 4, ~0);
62 sz
= read_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
+ 4);
63 write_pci_config(bus
, dev
, func
, PCI_BASE_ADDRESS_0
+ 4, val
);
65 val64
|= (u64
)val
<< 32;
66 sz64
|= (u64
)sz
<< 32;
67 mask64
|= ~0ULL << 32;
73 pr_notice("invalid mmio address\n");
77 sz64
= 1ULL << __ffs64(sz64
);
79 /* Check if the mem space is enabled: */
80 byte
= read_pci_config_byte(bus
, dev
, func
, PCI_COMMAND
);
81 if (!(byte
& PCI_COMMAND_MEMORY
)) {
82 byte
|= PCI_COMMAND_MEMORY
;
83 write_pci_config_byte(bus
, dev
, func
, PCI_COMMAND
, byte
);
86 xdbc
.xhci_start
= val64
;
87 xdbc
.xhci_length
= sz64
;
88 base
= early_ioremap(val64
, sz64
);
93 static void * __init
xdbc_get_page(dma_addr_t
*dma_addr
)
97 virt
= alloc_bootmem_pages_nopanic(PAGE_SIZE
);
102 *dma_addr
= (dma_addr_t
)__pa(virt
);
107 static u32 __init
xdbc_find_dbgp(int xdbc_num
, u32
*b
, u32
*d
, u32
*f
)
109 u32 bus
, dev
, func
, class;
111 for (bus
= 0; bus
< XDBC_PCI_MAX_BUSES
; bus
++) {
112 for (dev
= 0; dev
< XDBC_PCI_MAX_DEVICES
; dev
++) {
113 for (func
= 0; func
< XDBC_PCI_MAX_FUNCTION
; func
++) {
115 class = read_pci_config(bus
, dev
, func
, PCI_CLASS_REVISION
);
116 if ((class >> 8) != PCI_CLASS_SERIAL_USB_XHCI
)
134 static int handshake(void __iomem
*ptr
, u32 mask
, u32 done
, int wait
, int delay
)
150 static void __init
xdbc_bios_handoff(void)
155 offset
= xhci_find_next_ext_cap(xdbc
.xhci_base
, 0, XHCI_EXT_CAPS_LEGACY
);
156 val
= readl(xdbc
.xhci_base
+ offset
);
158 if (val
& XHCI_HC_BIOS_OWNED
) {
159 writel(val
| XHCI_HC_OS_OWNED
, xdbc
.xhci_base
+ offset
);
160 timeout
= handshake(xdbc
.xhci_base
+ offset
, XHCI_HC_BIOS_OWNED
, 0, 5000, 10);
163 pr_notice("failed to hand over xHCI control from BIOS\n");
164 writel(val
& ~XHCI_HC_BIOS_OWNED
, xdbc
.xhci_base
+ offset
);
168 /* Disable BIOS SMIs and clear all SMI events: */
169 val
= readl(xdbc
.xhci_base
+ offset
+ XHCI_LEGACY_CONTROL_OFFSET
);
170 val
&= XHCI_LEGACY_DISABLE_SMI
;
171 val
|= XHCI_LEGACY_SMI_EVENTS
;
172 writel(val
, xdbc
.xhci_base
+ offset
+ XHCI_LEGACY_CONTROL_OFFSET
);
176 xdbc_alloc_ring(struct xdbc_segment
*seg
, struct xdbc_ring
*ring
)
178 seg
->trbs
= xdbc_get_page(&seg
->dma
);
187 static void __init
xdbc_free_ring(struct xdbc_ring
*ring
)
189 struct xdbc_segment
*seg
= ring
->segment
;
194 free_bootmem(seg
->dma
, PAGE_SIZE
);
195 ring
->segment
= NULL
;
198 static void xdbc_reset_ring(struct xdbc_ring
*ring
)
200 struct xdbc_segment
*seg
= ring
->segment
;
201 struct xdbc_trb
*link_trb
;
203 memset(seg
->trbs
, 0, PAGE_SIZE
);
205 ring
->enqueue
= seg
->trbs
;
206 ring
->dequeue
= seg
->trbs
;
207 ring
->cycle_state
= 1;
209 if (ring
!= &xdbc
.evt_ring
) {
210 link_trb
= &seg
->trbs
[XDBC_TRBS_PER_SEGMENT
- 1];
211 link_trb
->field
[0] = cpu_to_le32(lower_32_bits(seg
->dma
));
212 link_trb
->field
[1] = cpu_to_le32(upper_32_bits(seg
->dma
));
213 link_trb
->field
[3] = cpu_to_le32(TRB_TYPE(TRB_LINK
)) | cpu_to_le32(LINK_TOGGLE
);
217 static inline void xdbc_put_utf16(u16
*s
, const char *c
, size_t size
)
221 for (i
= 0; i
< size
; i
++)
222 s
[i
] = cpu_to_le16(c
[i
]);
225 static void xdbc_mem_init(void)
227 struct xdbc_ep_context
*ep_in
, *ep_out
;
228 struct usb_string_descriptor
*s_desc
;
229 struct xdbc_erst_entry
*entry
;
230 struct xdbc_strings
*strings
;
231 struct xdbc_context
*ctx
;
232 unsigned int max_burst
;
237 xdbc_reset_ring(&xdbc
.evt_ring
);
238 xdbc_reset_ring(&xdbc
.in_ring
);
239 xdbc_reset_ring(&xdbc
.out_ring
);
240 memset(xdbc
.table_base
, 0, PAGE_SIZE
);
241 memset(xdbc
.out_buf
, 0, PAGE_SIZE
);
243 /* Initialize event ring segment table: */
245 xdbc
.erst_base
= xdbc
.table_base
+ index
* XDBC_TABLE_ENTRY_SIZE
;
246 xdbc
.erst_dma
= xdbc
.table_dma
+ index
* XDBC_TABLE_ENTRY_SIZE
;
248 index
+= XDBC_ERST_ENTRY_NUM
;
249 entry
= (struct xdbc_erst_entry
*)xdbc
.erst_base
;
251 entry
->seg_addr
= cpu_to_le64(xdbc
.evt_seg
.dma
);
252 entry
->seg_size
= cpu_to_le32(XDBC_TRBS_PER_SEGMENT
);
253 entry
->__reserved_0
= 0;
255 /* Initialize ERST registers: */
256 writel(1, &xdbc
.xdbc_reg
->ersts
);
257 xdbc_write64(xdbc
.erst_dma
, &xdbc
.xdbc_reg
->erstba
);
258 xdbc_write64(xdbc
.evt_seg
.dma
, &xdbc
.xdbc_reg
->erdp
);
260 /* Debug capability contexts: */
261 xdbc
.dbcc_size
= 64 * 3;
262 xdbc
.dbcc_base
= xdbc
.table_base
+ index
* XDBC_TABLE_ENTRY_SIZE
;
263 xdbc
.dbcc_dma
= xdbc
.table_dma
+ index
* XDBC_TABLE_ENTRY_SIZE
;
265 index
+= XDBC_DBCC_ENTRY_NUM
;
267 /* Popluate the strings: */
268 xdbc
.string_size
= sizeof(struct xdbc_strings
);
269 xdbc
.string_base
= xdbc
.table_base
+ index
* XDBC_TABLE_ENTRY_SIZE
;
270 xdbc
.string_dma
= xdbc
.table_dma
+ index
* XDBC_TABLE_ENTRY_SIZE
;
271 strings
= (struct xdbc_strings
*)xdbc
.string_base
;
273 index
+= XDBC_STRING_ENTRY_NUM
;
276 s_desc
= (struct usb_string_descriptor
*)strings
->serial
;
277 s_desc
->bLength
= (strlen(XDBC_STRING_SERIAL
) + 1) * 2;
278 s_desc
->bDescriptorType
= USB_DT_STRING
;
280 xdbc_put_utf16(s_desc
->wData
, XDBC_STRING_SERIAL
, strlen(XDBC_STRING_SERIAL
));
281 string_length
= s_desc
->bLength
;
284 /* Product string: */
285 s_desc
= (struct usb_string_descriptor
*)strings
->product
;
286 s_desc
->bLength
= (strlen(XDBC_STRING_PRODUCT
) + 1) * 2;
287 s_desc
->bDescriptorType
= USB_DT_STRING
;
289 xdbc_put_utf16(s_desc
->wData
, XDBC_STRING_PRODUCT
, strlen(XDBC_STRING_PRODUCT
));
290 string_length
+= s_desc
->bLength
;
293 /* Manufacture string: */
294 s_desc
= (struct usb_string_descriptor
*)strings
->manufacturer
;
295 s_desc
->bLength
= (strlen(XDBC_STRING_MANUFACTURER
) + 1) * 2;
296 s_desc
->bDescriptorType
= USB_DT_STRING
;
298 xdbc_put_utf16(s_desc
->wData
, XDBC_STRING_MANUFACTURER
, strlen(XDBC_STRING_MANUFACTURER
));
299 string_length
+= s_desc
->bLength
;
303 strings
->string0
[0] = 4;
304 strings
->string0
[1] = USB_DT_STRING
;
305 strings
->string0
[2] = 0x09;
306 strings
->string0
[3] = 0x04;
310 /* Populate info Context: */
311 ctx
= (struct xdbc_context
*)xdbc
.dbcc_base
;
313 ctx
->info
.string0
= cpu_to_le64(xdbc
.string_dma
);
314 ctx
->info
.manufacturer
= cpu_to_le64(xdbc
.string_dma
+ XDBC_MAX_STRING_LENGTH
);
315 ctx
->info
.product
= cpu_to_le64(xdbc
.string_dma
+ XDBC_MAX_STRING_LENGTH
* 2);
316 ctx
->info
.serial
= cpu_to_le64(xdbc
.string_dma
+ XDBC_MAX_STRING_LENGTH
* 3);
317 ctx
->info
.length
= cpu_to_le32(string_length
);
319 /* Populate bulk out endpoint context: */
320 max_burst
= DEBUG_MAX_BURST(readl(&xdbc
.xdbc_reg
->control
));
321 ep_out
= (struct xdbc_ep_context
*)&ctx
->out
;
323 ep_out
->ep_info1
= 0;
324 ep_out
->ep_info2
= cpu_to_le32(EP_TYPE(BULK_OUT_EP
) | MAX_PACKET(1024) | MAX_BURST(max_burst
));
325 ep_out
->deq
= cpu_to_le64(xdbc
.out_seg
.dma
| xdbc
.out_ring
.cycle_state
);
327 /* Populate bulk in endpoint context: */
328 ep_in
= (struct xdbc_ep_context
*)&ctx
->in
;
331 ep_in
->ep_info2
= cpu_to_le32(EP_TYPE(BULK_IN_EP
) | MAX_PACKET(1024) | MAX_BURST(max_burst
));
332 ep_in
->deq
= cpu_to_le64(xdbc
.in_seg
.dma
| xdbc
.in_ring
.cycle_state
);
334 /* Set DbC context and info registers: */
335 xdbc_write64(xdbc
.dbcc_dma
, &xdbc
.xdbc_reg
->dccp
);
337 dev_info
= cpu_to_le32((XDBC_VENDOR_ID
<< 16) | XDBC_PROTOCOL
);
338 writel(dev_info
, &xdbc
.xdbc_reg
->devinfo1
);
340 dev_info
= cpu_to_le32((XDBC_DEVICE_REV
<< 16) | XDBC_PRODUCT_ID
);
341 writel(dev_info
, &xdbc
.xdbc_reg
->devinfo2
);
343 xdbc
.in_buf
= xdbc
.out_buf
+ XDBC_MAX_PACKET
;
344 xdbc
.in_dma
= xdbc
.out_dma
+ XDBC_MAX_PACKET
;
347 static void xdbc_do_reset_debug_port(u32 id
, u32 count
)
349 void __iomem
*ops_reg
;
350 void __iomem
*portsc
;
354 cap_length
= readl(xdbc
.xhci_base
) & 0xff;
355 ops_reg
= xdbc
.xhci_base
+ cap_length
;
358 for (i
= id
; i
< (id
+ count
); i
++) {
359 portsc
= ops_reg
+ 0x400 + i
* 0x10;
361 if (!(val
& PORT_CONNECT
))
362 writel(val
| PORT_RESET
, portsc
);
366 static void xdbc_reset_debug_port(void)
368 u32 val
, port_offset
, port_count
;
372 offset
= xhci_find_next_ext_cap(xdbc
.xhci_base
, offset
, XHCI_EXT_CAPS_PROTOCOL
);
376 val
= readl(xdbc
.xhci_base
+ offset
);
377 if (XHCI_EXT_PORT_MAJOR(val
) != 0x3)
380 val
= readl(xdbc
.xhci_base
+ offset
+ 8);
381 port_offset
= XHCI_EXT_PORT_OFF(val
);
382 port_count
= XHCI_EXT_PORT_COUNT(val
);
384 xdbc_do_reset_debug_port(port_offset
, port_count
);
389 xdbc_queue_trb(struct xdbc_ring
*ring
, u32 field1
, u32 field2
, u32 field3
, u32 field4
)
391 struct xdbc_trb
*trb
, *link_trb
;
394 trb
->field
[0] = cpu_to_le32(field1
);
395 trb
->field
[1] = cpu_to_le32(field2
);
396 trb
->field
[2] = cpu_to_le32(field3
);
397 trb
->field
[3] = cpu_to_le32(field4
);
400 if (ring
->enqueue
>= &ring
->segment
->trbs
[TRBS_PER_SEGMENT
- 1]) {
401 link_trb
= ring
->enqueue
;
402 if (ring
->cycle_state
)
403 link_trb
->field
[3] |= cpu_to_le32(TRB_CYCLE
);
405 link_trb
->field
[3] &= cpu_to_le32(~TRB_CYCLE
);
407 ring
->enqueue
= ring
->segment
->trbs
;
408 ring
->cycle_state
^= 1;
412 static void xdbc_ring_doorbell(int target
)
414 writel(DOOR_BELL_TARGET(target
), &xdbc
.xdbc_reg
->doorbell
);
417 static int xdbc_start(void)
422 ctrl
= readl(&xdbc
.xdbc_reg
->control
);
423 writel(ctrl
| CTRL_DBC_ENABLE
| CTRL_PORT_ENABLE
, &xdbc
.xdbc_reg
->control
);
424 ret
= handshake(&xdbc
.xdbc_reg
->control
, CTRL_DBC_ENABLE
, CTRL_DBC_ENABLE
, 100000, 100);
426 xdbc_trace("failed to initialize hardware\n");
430 /* Reset port to avoid bus hang: */
431 if (xdbc
.vendor
== PCI_VENDOR_ID_INTEL
)
432 xdbc_reset_debug_port();
434 /* Wait for port connection: */
435 ret
= handshake(&xdbc
.xdbc_reg
->portsc
, PORTSC_CONN_STATUS
, PORTSC_CONN_STATUS
, 5000000, 100);
437 xdbc_trace("waiting for connection timed out\n");
441 /* Wait for debug device to be configured: */
442 ret
= handshake(&xdbc
.xdbc_reg
->control
, CTRL_DBC_RUN
, CTRL_DBC_RUN
, 5000000, 100);
444 xdbc_trace("waiting for device configuration timed out\n");
448 /* Check port number: */
449 status
= readl(&xdbc
.xdbc_reg
->status
);
450 if (!DCST_DEBUG_PORT(status
)) {
451 xdbc_trace("invalid root hub port number\n");
455 xdbc
.port_number
= DCST_DEBUG_PORT(status
);
457 xdbc_trace("DbC is running now, control 0x%08x port ID %d\n",
458 readl(&xdbc
.xdbc_reg
->control
), xdbc
.port_number
);
463 static int xdbc_bulk_transfer(void *data
, int size
, bool read
)
465 struct xdbc_ring
*ring
;
466 struct xdbc_trb
*trb
;
471 if (size
> XDBC_MAX_PACKET
) {
472 xdbc_trace("bad parameter, size %d\n", size
);
476 if (!(xdbc
.flags
& XDBC_FLAGS_INITIALIZED
) ||
477 !(xdbc
.flags
& XDBC_FLAGS_CONFIGURED
) ||
478 (!read
&& (xdbc
.flags
& XDBC_FLAGS_OUT_STALL
)) ||
479 (read
&& (xdbc
.flags
& XDBC_FLAGS_IN_STALL
))) {
481 xdbc_trace("connection not ready, flags %08x\n", xdbc
.flags
);
485 ring
= (read
? &xdbc
.in_ring
: &xdbc
.out_ring
);
487 cycle
= ring
->cycle_state
;
488 length
= TRB_LEN(size
);
489 control
= TRB_TYPE(TRB_NORMAL
) | TRB_IOC
;
492 control
&= cpu_to_le32(~TRB_CYCLE
);
494 control
|= cpu_to_le32(TRB_CYCLE
);
497 memset(xdbc
.in_buf
, 0, XDBC_MAX_PACKET
);
499 xdbc
.flags
|= XDBC_FLAGS_IN_PROCESS
;
501 memset(xdbc
.out_buf
, 0, XDBC_MAX_PACKET
);
502 memcpy(xdbc
.out_buf
, data
, size
);
504 xdbc
.flags
|= XDBC_FLAGS_OUT_PROCESS
;
507 xdbc_queue_trb(ring
, lower_32_bits(addr
), upper_32_bits(addr
), length
, control
);
510 * Add a barrier between writes of trb fields and flipping
515 trb
->field
[3] |= cpu_to_le32(cycle
);
517 trb
->field
[3] &= cpu_to_le32(~TRB_CYCLE
);
519 xdbc_ring_doorbell(read
? IN_EP_DOORBELL
: OUT_EP_DOORBELL
);
524 static int xdbc_handle_external_reset(void)
529 writel(0, &xdbc
.xdbc_reg
->control
);
530 ret
= handshake(&xdbc
.xdbc_reg
->control
, CTRL_DBC_ENABLE
, 0, 100000, 10);
542 xdbc_trace("dbc recovered\n");
544 xdbc
.flags
|= XDBC_FLAGS_INITIALIZED
| XDBC_FLAGS_CONFIGURED
;
546 xdbc_bulk_transfer(NULL
, XDBC_MAX_PACKET
, true);
551 xdbc_trace("failed to recover from external reset\n");
555 static int __init
xdbc_early_setup(void)
559 writel(0, &xdbc
.xdbc_reg
->control
);
560 ret
= handshake(&xdbc
.xdbc_reg
->control
, CTRL_DBC_ENABLE
, 0, 100000, 100);
564 /* Allocate the table page: */
565 xdbc
.table_base
= xdbc_get_page(&xdbc
.table_dma
);
566 if (!xdbc
.table_base
)
569 /* Get and store the transfer buffer: */
570 xdbc
.out_buf
= xdbc_get_page(&xdbc
.out_dma
);
574 /* Allocate the event ring: */
575 ret
= xdbc_alloc_ring(&xdbc
.evt_seg
, &xdbc
.evt_ring
);
579 /* Allocate IN/OUT endpoint transfer rings: */
580 ret
= xdbc_alloc_ring(&xdbc
.in_seg
, &xdbc
.in_ring
);
584 ret
= xdbc_alloc_ring(&xdbc
.out_seg
, &xdbc
.out_ring
);
594 writel(0, &xdbc
.xdbc_reg
->control
);
598 xdbc
.flags
|= XDBC_FLAGS_INITIALIZED
| XDBC_FLAGS_CONFIGURED
;
600 xdbc_bulk_transfer(NULL
, XDBC_MAX_PACKET
, true);
605 int __init
early_xdbc_parse_parameter(char *s
)
607 unsigned long dbgp_num
= 0;
608 u32 bus
, dev
, func
, offset
;
611 if (!early_pci_allowed())
614 if (strstr(s
, "keep"))
615 early_console_keep
= true;
620 if (*s
&& kstrtoul(s
, 0, &dbgp_num
))
623 pr_notice("dbgp_num: %lu\n", dbgp_num
);
625 /* Locate the host controller: */
626 ret
= xdbc_find_dbgp(dbgp_num
, &bus
, &dev
, &func
);
628 pr_notice("failed to locate xhci host\n");
632 xdbc
.vendor
= read_pci_config_16(bus
, dev
, func
, PCI_VENDOR_ID
);
633 xdbc
.device
= read_pci_config_16(bus
, dev
, func
, PCI_DEVICE_ID
);
638 /* Map the IO memory: */
639 xdbc
.xhci_base
= xdbc_map_pci_mmio(bus
, dev
, func
);
643 /* Locate DbC registers: */
644 offset
= xhci_find_next_ext_cap(xdbc
.xhci_base
, 0, XHCI_EXT_CAPS_DEBUG
);
646 pr_notice("xhci host doesn't support debug capability\n");
647 early_iounmap(xdbc
.xhci_base
, xdbc
.xhci_length
);
648 xdbc
.xhci_base
= NULL
;
649 xdbc
.xhci_length
= 0;
653 xdbc
.xdbc_reg
= (struct xdbc_regs __iomem
*)(xdbc
.xhci_base
+ offset
);
658 int __init
early_xdbc_setup_hardware(void)
667 raw_spin_lock_init(&xdbc
.lock
);
669 ret
= xdbc_early_setup();
671 pr_notice("failed to setup the connection to host\n");
673 xdbc_free_ring(&xdbc
.evt_ring
);
674 xdbc_free_ring(&xdbc
.out_ring
);
675 xdbc_free_ring(&xdbc
.in_ring
);
678 free_bootmem(xdbc
.table_dma
, PAGE_SIZE
);
681 free_bootmem(xdbc
.out_dma
, PAGE_SIZE
);
683 xdbc
.table_base
= NULL
;
690 static void xdbc_handle_port_status(struct xdbc_trb
*evt_trb
)
694 port_reg
= readl(&xdbc
.xdbc_reg
->portsc
);
695 if (port_reg
& PORTSC_CONN_CHANGE
) {
696 xdbc_trace("connect status change event\n");
698 /* Check whether cable unplugged: */
699 if (!(port_reg
& PORTSC_CONN_STATUS
)) {
701 xdbc_trace("cable unplugged\n");
705 if (port_reg
& PORTSC_RESET_CHANGE
)
706 xdbc_trace("port reset change event\n");
708 if (port_reg
& PORTSC_LINK_CHANGE
)
709 xdbc_trace("port link status change event\n");
711 if (port_reg
& PORTSC_CONFIG_CHANGE
)
712 xdbc_trace("config error change\n");
714 /* Write back the value to clear RW1C bits: */
715 writel(port_reg
, &xdbc
.xdbc_reg
->portsc
);
718 static void xdbc_handle_tx_event(struct xdbc_trb
*evt_trb
)
720 size_t remain_length
;
724 comp_code
= GET_COMP_CODE(le32_to_cpu(evt_trb
->field
[2]));
725 remain_length
= EVENT_TRB_LEN(le32_to_cpu(evt_trb
->field
[2]));
726 ep_id
= TRB_TO_EP_ID(le32_to_cpu(evt_trb
->field
[3]));
731 case COMP_SHORT_PACKET
:
734 case COMP_BABBLE_DETECTED_ERROR
:
735 case COMP_USB_TRANSACTION_ERROR
:
736 case COMP_STALL_ERROR
:
738 if (ep_id
== XDBC_EPID_OUT
)
739 xdbc
.flags
|= XDBC_FLAGS_OUT_STALL
;
740 if (ep_id
== XDBC_EPID_IN
)
741 xdbc
.flags
|= XDBC_FLAGS_IN_STALL
;
743 xdbc_trace("endpoint %d stalled\n", ep_id
);
747 if (ep_id
== XDBC_EPID_IN
) {
748 xdbc
.flags
&= ~XDBC_FLAGS_IN_PROCESS
;
749 xdbc_bulk_transfer(NULL
, XDBC_MAX_PACKET
, true);
750 } else if (ep_id
== XDBC_EPID_OUT
) {
751 xdbc
.flags
&= ~XDBC_FLAGS_OUT_PROCESS
;
753 xdbc_trace("invalid endpoint id %d\n", ep_id
);
757 static void xdbc_handle_events(void)
759 struct xdbc_trb
*evt_trb
;
760 bool update_erdp
= false;
764 cmd
= read_pci_config_byte(xdbc
.bus
, xdbc
.dev
, xdbc
.func
, PCI_COMMAND
);
765 if (!(cmd
& PCI_COMMAND_MASTER
)) {
766 cmd
|= PCI_COMMAND_MASTER
| PCI_COMMAND_MEMORY
;
767 write_pci_config_byte(xdbc
.bus
, xdbc
.dev
, xdbc
.func
, PCI_COMMAND
, cmd
);
770 if (!(xdbc
.flags
& XDBC_FLAGS_INITIALIZED
))
773 /* Handle external reset events: */
774 reg
= readl(&xdbc
.xdbc_reg
->control
);
775 if (!(reg
& CTRL_DBC_ENABLE
)) {
776 if (xdbc_handle_external_reset()) {
777 xdbc_trace("failed to recover connection\n");
782 /* Handle configure-exit event: */
783 reg
= readl(&xdbc
.xdbc_reg
->control
);
784 if (reg
& CTRL_DBC_RUN_CHANGE
) {
785 writel(reg
, &xdbc
.xdbc_reg
->control
);
786 if (reg
& CTRL_DBC_RUN
)
787 xdbc
.flags
|= XDBC_FLAGS_CONFIGURED
;
789 xdbc
.flags
&= ~XDBC_FLAGS_CONFIGURED
;
792 /* Handle endpoint stall event: */
793 reg
= readl(&xdbc
.xdbc_reg
->control
);
794 if (reg
& CTRL_HALT_IN_TR
) {
795 xdbc
.flags
|= XDBC_FLAGS_IN_STALL
;
797 xdbc
.flags
&= ~XDBC_FLAGS_IN_STALL
;
798 if (!(xdbc
.flags
& XDBC_FLAGS_IN_PROCESS
))
799 xdbc_bulk_transfer(NULL
, XDBC_MAX_PACKET
, true);
802 if (reg
& CTRL_HALT_OUT_TR
)
803 xdbc
.flags
|= XDBC_FLAGS_OUT_STALL
;
805 xdbc
.flags
&= ~XDBC_FLAGS_OUT_STALL
;
807 /* Handle the events in the event ring: */
808 evt_trb
= xdbc
.evt_ring
.dequeue
;
809 while ((le32_to_cpu(evt_trb
->field
[3]) & TRB_CYCLE
) == xdbc
.evt_ring
.cycle_state
) {
811 * Add a barrier between reading the cycle flag and any
812 * reads of the event's flags/data below:
816 switch ((le32_to_cpu(evt_trb
->field
[3]) & TRB_TYPE_BITMASK
)) {
817 case TRB_TYPE(TRB_PORT_STATUS
):
818 xdbc_handle_port_status(evt_trb
);
820 case TRB_TYPE(TRB_TRANSFER
):
821 xdbc_handle_tx_event(evt_trb
);
827 ++(xdbc
.evt_ring
.dequeue
);
828 if (xdbc
.evt_ring
.dequeue
== &xdbc
.evt_seg
.trbs
[TRBS_PER_SEGMENT
]) {
829 xdbc
.evt_ring
.dequeue
= xdbc
.evt_seg
.trbs
;
830 xdbc
.evt_ring
.cycle_state
^= 1;
833 evt_trb
= xdbc
.evt_ring
.dequeue
;
837 /* Update event ring dequeue pointer: */
839 xdbc_write64(__pa(xdbc
.evt_ring
.dequeue
), &xdbc
.xdbc_reg
->erdp
);
842 static int xdbc_bulk_write(const char *bytes
, int size
)
844 int ret
, timeout
= 0;
849 if (!raw_spin_trylock_irqsave(&xdbc
.lock
, flags
))
852 raw_spin_lock_irqsave(&xdbc
.lock
, flags
);
855 xdbc_handle_events();
857 /* Check completion of the previous request: */
858 if ((xdbc
.flags
& XDBC_FLAGS_OUT_PROCESS
) && (timeout
< 2000000)) {
859 raw_spin_unlock_irqrestore(&xdbc
.lock
, flags
);
865 if (xdbc
.flags
& XDBC_FLAGS_OUT_PROCESS
) {
866 raw_spin_unlock_irqrestore(&xdbc
.lock
, flags
);
867 xdbc_trace("previous transfer not completed yet\n");
872 ret
= xdbc_bulk_transfer((void *)bytes
, size
, false);
873 raw_spin_unlock_irqrestore(&xdbc
.lock
, flags
);
878 static void early_xdbc_write(struct console
*con
, const char *str
, u32 n
)
880 static char buf
[XDBC_MAX_PACKET
];
886 memset(buf
, 0, XDBC_MAX_PACKET
);
888 for (chunk
= 0; chunk
< XDBC_MAX_PACKET
&& n
> 0; str
++, chunk
++, n
--) {
890 if (!use_cr
&& *str
== '\n') {
904 ret
= xdbc_bulk_write(buf
, chunk
);
906 xdbc_trace("missed message {%s}\n", buf
);
911 static struct console early_xdbc_console
= {
913 .write
= early_xdbc_write
,
914 .flags
= CON_PRINTBUFFER
,
918 void __init
early_xdbc_register_console(void)
923 early_console
= &early_xdbc_console
;
924 if (early_console_keep
)
925 early_console
->flags
&= ~CON_BOOT
;
927 early_console
->flags
|= CON_BOOT
;
928 register_console(early_console
);
931 static void xdbc_unregister_console(void)
933 if (early_xdbc_console
.flags
& CON_ENABLED
)
934 unregister_console(&early_xdbc_console
);
937 static int xdbc_scrub_function(void *ptr
)
942 raw_spin_lock_irqsave(&xdbc
.lock
, flags
);
943 xdbc_handle_events();
945 if (!(xdbc
.flags
& XDBC_FLAGS_INITIALIZED
)) {
946 raw_spin_unlock_irqrestore(&xdbc
.lock
, flags
);
950 raw_spin_unlock_irqrestore(&xdbc
.lock
, flags
);
951 schedule_timeout_interruptible(1);
954 xdbc_unregister_console();
955 writel(0, &xdbc
.xdbc_reg
->control
);
956 xdbc_trace("dbc scrub function exits\n");
961 static int __init
xdbc_init(void)
968 if (!(xdbc
.flags
& XDBC_FLAGS_INITIALIZED
))
972 * It's time to shut down the DbC, so that the debug
973 * port can be reused by the host controller:
975 if (early_xdbc_console
.index
== -1 ||
976 (early_xdbc_console
.flags
& CON_BOOT
)) {
977 xdbc_trace("hardware not used anymore\n");
981 base
= ioremap_nocache(xdbc
.xhci_start
, xdbc
.xhci_length
);
983 xdbc_trace("failed to remap the io address\n");
988 raw_spin_lock_irqsave(&xdbc
.lock
, flags
);
989 early_iounmap(xdbc
.xhci_base
, xdbc
.xhci_length
);
990 xdbc
.xhci_base
= base
;
991 offset
= xhci_find_next_ext_cap(xdbc
.xhci_base
, 0, XHCI_EXT_CAPS_DEBUG
);
992 xdbc
.xdbc_reg
= (struct xdbc_regs __iomem
*)(xdbc
.xhci_base
+ offset
);
993 raw_spin_unlock_irqrestore(&xdbc
.lock
, flags
);
995 kthread_run(xdbc_scrub_function
, NULL
, "%s", "xdbc");
1000 xdbc_free_ring(&xdbc
.evt_ring
);
1001 xdbc_free_ring(&xdbc
.out_ring
);
1002 xdbc_free_ring(&xdbc
.in_ring
);
1003 free_bootmem(xdbc
.table_dma
, PAGE_SIZE
);
1004 free_bootmem(xdbc
.out_dma
, PAGE_SIZE
);
1005 writel(0, &xdbc
.xdbc_reg
->control
);
1006 early_iounmap(xdbc
.xhci_base
, xdbc
.xhci_length
);
1010 subsys_initcall(xdbc_init
);