1 // SPDX-License-Identifier: GPL-2.0
3 * xhci-dbgcap.c - xHCI debug capability support
5 * Copyright (C) 2017 Intel Corporation
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/kstrtox.h>
14 #include <linux/list.h>
15 #include <linux/nls.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/string.h>
20 #include <linux/sysfs.h>
21 #include <linux/types.h>
22 #include <linux/workqueue.h>
24 #include <linux/io-64-nonatomic-lo-hi.h>
26 #include <asm/byteorder.h>
29 #include "xhci-trace.h"
30 #include "xhci-dbgcap.h"
32 static void dbc_free_ctx(struct device
*dev
, struct xhci_container_ctx
*ctx
)
36 dma_free_coherent(dev
, ctx
->size
, ctx
->bytes
, ctx
->dma
);
40 /* we use only one segment for DbC rings */
41 static void dbc_ring_free(struct device
*dev
, struct xhci_ring
*ring
)
46 if (ring
->first_seg
) {
47 dma_free_coherent(dev
, TRB_SEGMENT_SIZE
,
48 ring
->first_seg
->trbs
,
49 ring
->first_seg
->dma
);
50 kfree(ring
->first_seg
);
55 static u32
xhci_dbc_populate_strings(struct dbc_str_descs
*strings
)
57 struct usb_string_descriptor
*s_desc
;
61 s_desc
= (struct usb_string_descriptor
*)strings
->serial
;
62 utf8s_to_utf16s(DBC_STRING_SERIAL
, strlen(DBC_STRING_SERIAL
),
63 UTF16_LITTLE_ENDIAN
, (wchar_t *)s_desc
->wData
,
64 DBC_MAX_STRING_LENGTH
);
66 s_desc
->bLength
= (strlen(DBC_STRING_SERIAL
) + 1) * 2;
67 s_desc
->bDescriptorType
= USB_DT_STRING
;
68 string_length
= s_desc
->bLength
;
72 s_desc
= (struct usb_string_descriptor
*)strings
->product
;
73 utf8s_to_utf16s(DBC_STRING_PRODUCT
, strlen(DBC_STRING_PRODUCT
),
74 UTF16_LITTLE_ENDIAN
, (wchar_t *)s_desc
->wData
,
75 DBC_MAX_STRING_LENGTH
);
77 s_desc
->bLength
= (strlen(DBC_STRING_PRODUCT
) + 1) * 2;
78 s_desc
->bDescriptorType
= USB_DT_STRING
;
79 string_length
+= s_desc
->bLength
;
82 /* Manufacture string: */
83 s_desc
= (struct usb_string_descriptor
*)strings
->manufacturer
;
84 utf8s_to_utf16s(DBC_STRING_MANUFACTURER
,
85 strlen(DBC_STRING_MANUFACTURER
),
86 UTF16_LITTLE_ENDIAN
, (wchar_t *)s_desc
->wData
,
87 DBC_MAX_STRING_LENGTH
);
89 s_desc
->bLength
= (strlen(DBC_STRING_MANUFACTURER
) + 1) * 2;
90 s_desc
->bDescriptorType
= USB_DT_STRING
;
91 string_length
+= s_desc
->bLength
;
95 strings
->string0
[0] = 4;
96 strings
->string0
[1] = USB_DT_STRING
;
97 strings
->string0
[2] = 0x09;
98 strings
->string0
[3] = 0x04;
101 return string_length
;
104 static void xhci_dbc_init_contexts(struct xhci_dbc
*dbc
, u32 string_length
)
106 struct dbc_info_context
*info
;
107 struct xhci_ep_ctx
*ep_ctx
;
110 unsigned int max_burst
;
115 /* Populate info Context: */
116 info
= (struct dbc_info_context
*)dbc
->ctx
->bytes
;
117 dma
= dbc
->string_dma
;
118 info
->string0
= cpu_to_le64(dma
);
119 info
->manufacturer
= cpu_to_le64(dma
+ DBC_MAX_STRING_LENGTH
);
120 info
->product
= cpu_to_le64(dma
+ DBC_MAX_STRING_LENGTH
* 2);
121 info
->serial
= cpu_to_le64(dma
+ DBC_MAX_STRING_LENGTH
* 3);
122 info
->length
= cpu_to_le32(string_length
);
124 /* Populate bulk out endpoint context: */
125 ep_ctx
= dbc_bulkout_ctx(dbc
);
126 max_burst
= DBC_CTRL_MAXBURST(readl(&dbc
->regs
->control
));
127 deq
= dbc_bulkout_enq(dbc
);
129 ep_ctx
->ep_info2
= dbc_epctx_info2(BULK_OUT_EP
, 1024, max_burst
);
130 ep_ctx
->deq
= cpu_to_le64(deq
| dbc
->ring_out
->cycle_state
);
132 /* Populate bulk in endpoint context: */
133 ep_ctx
= dbc_bulkin_ctx(dbc
);
134 deq
= dbc_bulkin_enq(dbc
);
136 ep_ctx
->ep_info2
= dbc_epctx_info2(BULK_IN_EP
, 1024, max_burst
);
137 ep_ctx
->deq
= cpu_to_le64(deq
| dbc
->ring_in
->cycle_state
);
139 /* Set DbC context and info registers: */
140 lo_hi_writeq(dbc
->ctx
->dma
, &dbc
->regs
->dccp
);
142 dev_info
= (dbc
->idVendor
<< 16) | dbc
->bInterfaceProtocol
;
143 writel(dev_info
, &dbc
->regs
->devinfo1
);
145 dev_info
= (dbc
->bcdDevice
<< 16) | dbc
->idProduct
;
146 writel(dev_info
, &dbc
->regs
->devinfo2
);
149 static void xhci_dbc_giveback(struct dbc_request
*req
, int status
)
150 __releases(&dbc
->lock
)
151 __acquires(&dbc
->lock
)
153 struct xhci_dbc
*dbc
= req
->dbc
;
154 struct device
*dev
= dbc
->dev
;
156 list_del_init(&req
->list_pending
);
160 if (req
->status
== -EINPROGRESS
)
161 req
->status
= status
;
163 trace_xhci_dbc_giveback_request(req
);
165 dma_unmap_single(dev
,
168 dbc_ep_dma_direction(req
));
170 /* Give back the transfer request: */
171 spin_unlock(&dbc
->lock
);
172 req
->complete(dbc
, req
);
173 spin_lock(&dbc
->lock
);
176 static void trb_to_noop(union xhci_trb
*trb
)
178 trb
->generic
.field
[0] = 0;
179 trb
->generic
.field
[1] = 0;
180 trb
->generic
.field
[2] = 0;
181 trb
->generic
.field
[3] &= cpu_to_le32(TRB_CYCLE
);
182 trb
->generic
.field
[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP
));
185 static void xhci_dbc_flush_single_request(struct dbc_request
*req
)
187 trb_to_noop(req
->trb
);
188 xhci_dbc_giveback(req
, -ESHUTDOWN
);
191 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep
*dep
)
193 struct dbc_request
*req
, *tmp
;
195 list_for_each_entry_safe(req
, tmp
, &dep
->list_pending
, list_pending
)
196 xhci_dbc_flush_single_request(req
);
199 static void xhci_dbc_flush_requests(struct xhci_dbc
*dbc
)
201 xhci_dbc_flush_endpoint_requests(&dbc
->eps
[BULK_OUT
]);
202 xhci_dbc_flush_endpoint_requests(&dbc
->eps
[BULK_IN
]);
206 dbc_alloc_request(struct xhci_dbc
*dbc
, unsigned int direction
, gfp_t flags
)
208 struct dbc_request
*req
;
210 if (direction
!= BULK_IN
&&
211 direction
!= BULK_OUT
)
217 req
= kzalloc(sizeof(*req
), flags
);
222 INIT_LIST_HEAD(&req
->list_pending
);
223 INIT_LIST_HEAD(&req
->list_pool
);
224 req
->direction
= direction
;
226 trace_xhci_dbc_alloc_request(req
);
232 dbc_free_request(struct dbc_request
*req
)
234 trace_xhci_dbc_free_request(req
);
240 xhci_dbc_queue_trb(struct xhci_ring
*ring
, u32 field1
,
241 u32 field2
, u32 field3
, u32 field4
)
243 union xhci_trb
*trb
, *next
;
246 trb
->generic
.field
[0] = cpu_to_le32(field1
);
247 trb
->generic
.field
[1] = cpu_to_le32(field2
);
248 trb
->generic
.field
[2] = cpu_to_le32(field3
);
249 trb
->generic
.field
[3] = cpu_to_le32(field4
);
251 trace_xhci_dbc_gadget_ep_queue(ring
, &trb
->generic
,
252 xhci_trb_virt_to_dma(ring
->enq_seg
,
254 ring
->num_trbs_free
--;
255 next
= ++(ring
->enqueue
);
256 if (TRB_TYPE_LINK_LE32(next
->link
.control
)) {
257 next
->link
.control
^= cpu_to_le32(TRB_CYCLE
);
258 ring
->enqueue
= ring
->enq_seg
->trbs
;
259 ring
->cycle_state
^= 1;
263 static int xhci_dbc_queue_bulk_tx(struct dbc_ep
*dep
,
264 struct dbc_request
*req
)
268 unsigned int num_trbs
;
269 struct xhci_dbc
*dbc
= req
->dbc
;
270 struct xhci_ring
*ring
= dep
->ring
;
271 u32 length
, control
, cycle
;
273 num_trbs
= count_trbs(req
->dma
, req
->length
);
274 WARN_ON(num_trbs
!= 1);
275 if (ring
->num_trbs_free
< num_trbs
)
280 cycle
= ring
->cycle_state
;
281 length
= TRB_LEN(req
->length
);
282 control
= TRB_TYPE(TRB_NORMAL
) | TRB_IOC
;
285 control
&= cpu_to_le32(~TRB_CYCLE
);
287 control
|= cpu_to_le32(TRB_CYCLE
);
289 req
->trb
= ring
->enqueue
;
290 req
->trb_dma
= xhci_trb_virt_to_dma(ring
->enq_seg
, ring
->enqueue
);
291 xhci_dbc_queue_trb(ring
,
297 * Add a barrier between writes of trb fields and flipping
303 trb
->generic
.field
[3] |= cpu_to_le32(TRB_CYCLE
);
305 trb
->generic
.field
[3] &= cpu_to_le32(~TRB_CYCLE
);
307 writel(DBC_DOOR_BELL_TARGET(dep
->direction
), &dbc
->regs
->doorbell
);
313 dbc_ep_do_queue(struct dbc_request
*req
)
316 struct xhci_dbc
*dbc
= req
->dbc
;
317 struct device
*dev
= dbc
->dev
;
318 struct dbc_ep
*dep
= &dbc
->eps
[req
->direction
];
320 if (!req
->length
|| !req
->buf
)
324 req
->status
= -EINPROGRESS
;
326 req
->dma
= dma_map_single(dev
,
329 dbc_ep_dma_direction(dep
));
330 if (dma_mapping_error(dev
, req
->dma
)) {
331 dev_err(dbc
->dev
, "failed to map buffer\n");
335 ret
= xhci_dbc_queue_bulk_tx(dep
, req
);
337 dev_err(dbc
->dev
, "failed to queue trbs\n");
338 dma_unmap_single(dev
,
341 dbc_ep_dma_direction(dep
));
345 list_add_tail(&req
->list_pending
, &dep
->list_pending
);
350 int dbc_ep_queue(struct dbc_request
*req
)
353 struct xhci_dbc
*dbc
= req
->dbc
;
354 int ret
= -ESHUTDOWN
;
359 if (req
->direction
!= BULK_IN
&&
360 req
->direction
!= BULK_OUT
)
363 spin_lock_irqsave(&dbc
->lock
, flags
);
364 if (dbc
->state
== DS_CONFIGURED
)
365 ret
= dbc_ep_do_queue(req
);
366 spin_unlock_irqrestore(&dbc
->lock
, flags
);
368 mod_delayed_work(system_wq
, &dbc
->event_work
, 0);
370 trace_xhci_dbc_queue_request(req
);
375 static inline void xhci_dbc_do_eps_init(struct xhci_dbc
*dbc
, bool direction
)
379 dep
= &dbc
->eps
[direction
];
381 dep
->direction
= direction
;
382 dep
->ring
= direction
? dbc
->ring_in
: dbc
->ring_out
;
384 INIT_LIST_HEAD(&dep
->list_pending
);
387 static void xhci_dbc_eps_init(struct xhci_dbc
*dbc
)
389 xhci_dbc_do_eps_init(dbc
, BULK_OUT
);
390 xhci_dbc_do_eps_init(dbc
, BULK_IN
);
393 static void xhci_dbc_eps_exit(struct xhci_dbc
*dbc
)
395 memset(dbc
->eps
, 0, sizeof_field(struct xhci_dbc
, eps
));
398 static int dbc_erst_alloc(struct device
*dev
, struct xhci_ring
*evt_ring
,
399 struct xhci_erst
*erst
, gfp_t flags
)
401 erst
->entries
= dma_alloc_coherent(dev
, sizeof(*erst
->entries
),
402 &erst
->erst_dma_addr
, flags
);
406 erst
->num_entries
= 1;
407 erst
->entries
[0].seg_addr
= cpu_to_le64(evt_ring
->first_seg
->dma
);
408 erst
->entries
[0].seg_size
= cpu_to_le32(TRBS_PER_SEGMENT
);
409 erst
->entries
[0].rsvd
= 0;
413 static void dbc_erst_free(struct device
*dev
, struct xhci_erst
*erst
)
415 dma_free_coherent(dev
, sizeof(*erst
->entries
), erst
->entries
,
416 erst
->erst_dma_addr
);
417 erst
->entries
= NULL
;
420 static struct xhci_container_ctx
*
421 dbc_alloc_ctx(struct device
*dev
, gfp_t flags
)
423 struct xhci_container_ctx
*ctx
;
425 ctx
= kzalloc(sizeof(*ctx
), flags
);
429 /* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/
430 ctx
->size
= 3 * DBC_CONTEXT_SIZE
;
431 ctx
->bytes
= dma_alloc_coherent(dev
, ctx
->size
, &ctx
->dma
, flags
);
439 static struct xhci_ring
*
440 xhci_dbc_ring_alloc(struct device
*dev
, enum xhci_ring_type type
, gfp_t flags
)
442 struct xhci_ring
*ring
;
443 struct xhci_segment
*seg
;
446 ring
= kzalloc(sizeof(*ring
), flags
);
453 seg
= kzalloc(sizeof(*seg
), flags
);
457 ring
->first_seg
= seg
;
458 ring
->last_seg
= seg
;
461 seg
->trbs
= dma_alloc_coherent(dev
, TRB_SEGMENT_SIZE
, &dma
, flags
);
467 /* Only event ring does not use link TRB */
468 if (type
!= TYPE_EVENT
) {
469 union xhci_trb
*trb
= &seg
->trbs
[TRBS_PER_SEGMENT
- 1];
471 trb
->link
.segment_ptr
= cpu_to_le64(dma
);
472 trb
->link
.control
= cpu_to_le32(LINK_TOGGLE
| TRB_TYPE(TRB_LINK
));
474 INIT_LIST_HEAD(&ring
->td_list
);
475 xhci_initialize_ring_info(ring
);
484 static int xhci_dbc_mem_init(struct xhci_dbc
*dbc
, gfp_t flags
)
489 struct device
*dev
= dbc
->dev
;
491 /* Allocate various rings for events and transfers: */
492 dbc
->ring_evt
= xhci_dbc_ring_alloc(dev
, TYPE_EVENT
, flags
);
496 dbc
->ring_in
= xhci_dbc_ring_alloc(dev
, TYPE_BULK
, flags
);
500 dbc
->ring_out
= xhci_dbc_ring_alloc(dev
, TYPE_BULK
, flags
);
504 /* Allocate and populate ERST: */
505 ret
= dbc_erst_alloc(dev
, dbc
->ring_evt
, &dbc
->erst
, flags
);
509 /* Allocate context data structure: */
510 dbc
->ctx
= dbc_alloc_ctx(dev
, flags
); /* was sysdev, and is still */
514 /* Allocate the string table: */
515 dbc
->string_size
= sizeof(*dbc
->string
);
516 dbc
->string
= dma_alloc_coherent(dev
, dbc
->string_size
,
517 &dbc
->string_dma
, flags
);
521 /* Setup ERST register: */
522 writel(dbc
->erst
.num_entries
, &dbc
->regs
->ersts
);
524 lo_hi_writeq(dbc
->erst
.erst_dma_addr
, &dbc
->regs
->erstba
);
525 deq
= xhci_trb_virt_to_dma(dbc
->ring_evt
->deq_seg
,
526 dbc
->ring_evt
->dequeue
);
527 lo_hi_writeq(deq
, &dbc
->regs
->erdp
);
529 /* Setup strings and contexts: */
530 string_length
= xhci_dbc_populate_strings(dbc
->string
);
531 xhci_dbc_init_contexts(dbc
, string_length
);
533 xhci_dbc_eps_init(dbc
);
534 dbc
->state
= DS_INITIALIZED
;
539 dbc_free_ctx(dev
, dbc
->ctx
);
542 dbc_erst_free(dev
, &dbc
->erst
);
544 dbc_ring_free(dev
, dbc
->ring_out
);
545 dbc
->ring_out
= NULL
;
547 dbc_ring_free(dev
, dbc
->ring_in
);
550 dbc_ring_free(dev
, dbc
->ring_evt
);
551 dbc
->ring_evt
= NULL
;
556 static void xhci_dbc_mem_cleanup(struct xhci_dbc
*dbc
)
561 xhci_dbc_eps_exit(dbc
);
563 dma_free_coherent(dbc
->dev
, dbc
->string_size
, dbc
->string
, dbc
->string_dma
);
566 dbc_free_ctx(dbc
->dev
, dbc
->ctx
);
569 dbc_erst_free(dbc
->dev
, &dbc
->erst
);
570 dbc_ring_free(dbc
->dev
, dbc
->ring_out
);
571 dbc_ring_free(dbc
->dev
, dbc
->ring_in
);
572 dbc_ring_free(dbc
->dev
, dbc
->ring_evt
);
574 dbc
->ring_out
= NULL
;
575 dbc
->ring_evt
= NULL
;
578 static int xhci_do_dbc_start(struct xhci_dbc
*dbc
)
583 if (dbc
->state
!= DS_DISABLED
)
586 writel(0, &dbc
->regs
->control
);
587 ret
= xhci_handshake(&dbc
->regs
->control
,
593 ret
= xhci_dbc_mem_init(dbc
, GFP_ATOMIC
);
597 ctrl
= readl(&dbc
->regs
->control
);
598 writel(ctrl
| DBC_CTRL_DBC_ENABLE
| DBC_CTRL_PORT_ENABLE
,
599 &dbc
->regs
->control
);
600 ret
= xhci_handshake(&dbc
->regs
->control
,
602 DBC_CTRL_DBC_ENABLE
, 1000);
606 dbc
->state
= DS_ENABLED
;
611 static int xhci_do_dbc_stop(struct xhci_dbc
*dbc
)
613 if (dbc
->state
== DS_DISABLED
)
616 writel(0, &dbc
->regs
->control
);
617 dbc
->state
= DS_DISABLED
;
622 static int xhci_dbc_start(struct xhci_dbc
*dbc
)
629 pm_runtime_get_sync(dbc
->dev
); /* note this was self.controller */
631 spin_lock_irqsave(&dbc
->lock
, flags
);
632 ret
= xhci_do_dbc_start(dbc
);
633 spin_unlock_irqrestore(&dbc
->lock
, flags
);
636 pm_runtime_put(dbc
->dev
); /* note this was self.controller */
640 return mod_delayed_work(system_wq
, &dbc
->event_work
,
641 msecs_to_jiffies(dbc
->poll_interval
));
644 static void xhci_dbc_stop(struct xhci_dbc
*dbc
)
651 switch (dbc
->state
) {
655 if (dbc
->driver
->disconnect
)
656 dbc
->driver
->disconnect(dbc
);
662 cancel_delayed_work_sync(&dbc
->event_work
);
664 spin_lock_irqsave(&dbc
->lock
, flags
);
665 ret
= xhci_do_dbc_stop(dbc
);
666 spin_unlock_irqrestore(&dbc
->lock
, flags
);
670 xhci_dbc_mem_cleanup(dbc
);
671 pm_runtime_put_sync(dbc
->dev
); /* note, was self.controller */
675 handle_ep_halt_changes(struct xhci_dbc
*dbc
, struct dbc_ep
*dep
, bool halted
)
678 dev_info(dbc
->dev
, "DbC Endpoint halted\n");
681 } else if (dep
->halted
) {
682 dev_info(dbc
->dev
, "DbC Endpoint halt cleared\n");
685 if (!list_empty(&dep
->list_pending
))
686 writel(DBC_DOOR_BELL_TARGET(dep
->direction
),
687 &dbc
->regs
->doorbell
);
692 dbc_handle_port_status(struct xhci_dbc
*dbc
, union xhci_trb
*event
)
696 portsc
= readl(&dbc
->regs
->portsc
);
697 if (portsc
& DBC_PORTSC_CONN_CHANGE
)
698 dev_info(dbc
->dev
, "DbC port connect change\n");
700 if (portsc
& DBC_PORTSC_RESET_CHANGE
)
701 dev_info(dbc
->dev
, "DbC port reset change\n");
703 if (portsc
& DBC_PORTSC_LINK_CHANGE
)
704 dev_info(dbc
->dev
, "DbC port link status change\n");
706 if (portsc
& DBC_PORTSC_CONFIG_CHANGE
)
707 dev_info(dbc
->dev
, "DbC config error change\n");
709 /* Port reset change bit will be cleared in other place: */
710 writel(portsc
& ~DBC_PORTSC_RESET_CHANGE
, &dbc
->regs
->portsc
);
713 static void dbc_handle_xfer_event(struct xhci_dbc
*dbc
, union xhci_trb
*event
)
716 struct xhci_ring
*ring
;
719 struct xhci_ep_ctx
*ep_ctx
;
721 size_t remain_length
;
722 struct dbc_request
*req
= NULL
, *r
;
724 comp_code
= GET_COMP_CODE(le32_to_cpu(event
->generic
.field
[2]));
725 remain_length
= EVENT_TRB_LEN(le32_to_cpu(event
->generic
.field
[2]));
726 ep_id
= TRB_TO_EP_ID(le32_to_cpu(event
->generic
.field
[3]));
727 dep
= (ep_id
== EPID_OUT
) ?
728 get_out_ep(dbc
) : get_in_ep(dbc
);
729 ep_ctx
= (ep_id
== EPID_OUT
) ?
730 dbc_bulkout_ctx(dbc
) : dbc_bulkin_ctx(dbc
);
733 /* Match the pending request: */
734 list_for_each_entry(r
, &dep
->list_pending
, list_pending
) {
735 if (r
->trb_dma
== event
->trans_event
.buffer
) {
739 if (r
->status
== -COMP_STALL_ERROR
) {
740 dev_warn(dbc
->dev
, "Give back stale stalled req\n");
741 ring
->num_trbs_free
++;
742 xhci_dbc_giveback(r
, 0);
747 dev_warn(dbc
->dev
, "no matched request\n");
751 trace_xhci_dbc_handle_transfer(ring
, &req
->trb
->generic
, req
->trb_dma
);
757 case COMP_SHORT_PACKET
:
761 case COMP_BABBLE_DETECTED_ERROR
:
762 case COMP_USB_TRANSACTION_ERROR
:
763 dev_warn(dbc
->dev
, "tx error %d detected\n", comp_code
);
766 case COMP_STALL_ERROR
:
767 dev_warn(dbc
->dev
, "Stall error at bulk TRB %llx, remaining %zu, ep deq %llx\n",
768 event
->trans_event
.buffer
, remain_length
, ep_ctx
->deq
);
773 * xHC DbC may trigger a STALL bulk xfer event when host sends a
774 * ClearFeature(ENDPOINT_HALT) request even if there wasn't an
775 * active bulk transfer.
777 * Don't give back this transfer request as hardware will later
778 * start processing TRBs starting from this 'STALLED' TRB,
779 * causing TRBs and requests to be out of sync.
781 * If STALL event shows some bytes were transferred then assume
782 * it's an actual transfer issue and give back the request.
783 * In this case mark the TRB as No-Op to avoid hw from using the
787 if ((ep_ctx
->deq
& ~TRB_CYCLE
) == event
->trans_event
.buffer
) {
788 dev_dbg(dbc
->dev
, "Ep stopped on Stalled TRB\n");
789 if (remain_length
== req
->length
) {
790 dev_dbg(dbc
->dev
, "Spurious stall event, keep req\n");
791 req
->status
= -COMP_STALL_ERROR
;
795 dev_dbg(dbc
->dev
, "Give back stalled req, but turn TRB to No-op\n");
796 trb_to_noop(req
->trb
);
801 dev_err(dbc
->dev
, "unknown tx error %d\n", comp_code
);
806 ring
->num_trbs_free
++;
807 req
->actual
= req
->length
- remain_length
;
808 xhci_dbc_giveback(req
, status
);
811 static void inc_evt_deq(struct xhci_ring
*ring
)
813 /* If on the last TRB of the segment go back to the beginning */
814 if (ring
->dequeue
== &ring
->deq_seg
->trbs
[TRBS_PER_SEGMENT
- 1]) {
815 ring
->cycle_state
^= 1;
816 ring
->dequeue
= ring
->deq_seg
->trbs
;
822 static enum evtreturn
xhci_dbc_do_handle_events(struct xhci_dbc
*dbc
)
827 bool update_erdp
= false;
829 /* DbC state machine: */
830 switch (dbc
->state
) {
836 portsc
= readl(&dbc
->regs
->portsc
);
837 if (portsc
& DBC_PORTSC_CONN_STATUS
) {
838 dbc
->state
= DS_CONNECTED
;
839 dev_info(dbc
->dev
, "DbC connected\n");
844 ctrl
= readl(&dbc
->regs
->control
);
845 if (ctrl
& DBC_CTRL_DBC_RUN
) {
846 dbc
->state
= DS_CONFIGURED
;
847 dev_info(dbc
->dev
, "DbC configured\n");
848 portsc
= readl(&dbc
->regs
->portsc
);
849 writel(portsc
, &dbc
->regs
->portsc
);
855 /* Handle cable unplug event: */
856 portsc
= readl(&dbc
->regs
->portsc
);
857 if (!(portsc
& DBC_PORTSC_PORT_ENABLED
) &&
858 !(portsc
& DBC_PORTSC_CONN_STATUS
)) {
859 dev_info(dbc
->dev
, "DbC cable unplugged\n");
860 dbc
->state
= DS_ENABLED
;
861 xhci_dbc_flush_requests(dbc
);
866 /* Handle debug port reset event: */
867 if (portsc
& DBC_PORTSC_RESET_CHANGE
) {
868 dev_info(dbc
->dev
, "DbC port reset\n");
869 writel(portsc
, &dbc
->regs
->portsc
);
870 dbc
->state
= DS_ENABLED
;
871 xhci_dbc_flush_requests(dbc
);
876 /* Check and handle changes in endpoint halt status */
877 ctrl
= readl(&dbc
->regs
->control
);
878 handle_ep_halt_changes(dbc
, get_in_ep(dbc
), ctrl
& DBC_CTRL_HALT_IN_TR
);
879 handle_ep_halt_changes(dbc
, get_out_ep(dbc
), ctrl
& DBC_CTRL_HALT_OUT_TR
);
881 /* Clear DbC run change bit: */
882 if (ctrl
& DBC_CTRL_DBC_RUN_CHANGE
) {
883 writel(ctrl
, &dbc
->regs
->control
);
884 ctrl
= readl(&dbc
->regs
->control
);
888 dev_err(dbc
->dev
, "Unknown DbC state %d\n", dbc
->state
);
892 /* Handle the events in the event ring: */
893 evt
= dbc
->ring_evt
->dequeue
;
894 while ((le32_to_cpu(evt
->event_cmd
.flags
) & TRB_CYCLE
) ==
895 dbc
->ring_evt
->cycle_state
) {
897 * Add a barrier between reading the cycle flag and any
898 * reads of the event's flags/data below:
902 trace_xhci_dbc_handle_event(dbc
->ring_evt
, &evt
->generic
,
903 xhci_trb_virt_to_dma(dbc
->ring_evt
->deq_seg
,
904 dbc
->ring_evt
->dequeue
));
906 switch (le32_to_cpu(evt
->event_cmd
.flags
) & TRB_TYPE_BITMASK
) {
907 case TRB_TYPE(TRB_PORT_STATUS
):
908 dbc_handle_port_status(dbc
, evt
);
910 case TRB_TYPE(TRB_TRANSFER
):
911 dbc_handle_xfer_event(dbc
, evt
);
917 inc_evt_deq(dbc
->ring_evt
);
919 evt
= dbc
->ring_evt
->dequeue
;
923 /* Update event ring dequeue pointer: */
925 deq
= xhci_trb_virt_to_dma(dbc
->ring_evt
->deq_seg
,
926 dbc
->ring_evt
->dequeue
);
927 lo_hi_writeq(deq
, &dbc
->regs
->erdp
);
933 static void xhci_dbc_handle_events(struct work_struct
*work
)
936 struct xhci_dbc
*dbc
;
938 unsigned int poll_interval
;
940 dbc
= container_of(to_delayed_work(work
), struct xhci_dbc
, event_work
);
941 poll_interval
= dbc
->poll_interval
;
943 spin_lock_irqsave(&dbc
->lock
, flags
);
944 evtr
= xhci_dbc_do_handle_events(dbc
);
945 spin_unlock_irqrestore(&dbc
->lock
, flags
);
949 if (dbc
->driver
->configure
)
950 dbc
->driver
->configure(dbc
);
953 if (dbc
->driver
->disconnect
)
954 dbc
->driver
->disconnect(dbc
);
957 /* set fast poll rate if there are pending data transfers */
958 if (!list_empty(&dbc
->eps
[BULK_OUT
].list_pending
) ||
959 !list_empty(&dbc
->eps
[BULK_IN
].list_pending
))
963 dev_info(dbc
->dev
, "stop handling dbc events\n");
967 mod_delayed_work(system_wq
, &dbc
->event_work
,
968 msecs_to_jiffies(poll_interval
));
971 static const char * const dbc_state_strings
[DS_MAX
] = {
972 [DS_DISABLED
] = "disabled",
973 [DS_INITIALIZED
] = "initialized",
974 [DS_ENABLED
] = "enabled",
975 [DS_CONNECTED
] = "connected",
976 [DS_CONFIGURED
] = "configured",
979 static ssize_t
dbc_show(struct device
*dev
,
980 struct device_attribute
*attr
,
983 struct xhci_dbc
*dbc
;
984 struct xhci_hcd
*xhci
;
986 xhci
= hcd_to_xhci(dev_get_drvdata(dev
));
989 if (dbc
->state
>= ARRAY_SIZE(dbc_state_strings
))
990 return sysfs_emit(buf
, "unknown\n");
992 return sysfs_emit(buf
, "%s\n", dbc_state_strings
[dbc
->state
]);
995 static ssize_t
dbc_store(struct device
*dev
,
996 struct device_attribute
*attr
,
997 const char *buf
, size_t count
)
999 struct xhci_hcd
*xhci
;
1000 struct xhci_dbc
*dbc
;
1002 xhci
= hcd_to_xhci(dev_get_drvdata(dev
));
1005 if (sysfs_streq(buf
, "enable"))
1006 xhci_dbc_start(dbc
);
1007 else if (sysfs_streq(buf
, "disable"))
1015 static ssize_t
dbc_idVendor_show(struct device
*dev
,
1016 struct device_attribute
*attr
,
1019 struct xhci_dbc
*dbc
;
1020 struct xhci_hcd
*xhci
;
1022 xhci
= hcd_to_xhci(dev_get_drvdata(dev
));
1025 return sysfs_emit(buf
, "%04x\n", dbc
->idVendor
);
1028 static ssize_t
dbc_idVendor_store(struct device
*dev
,
1029 struct device_attribute
*attr
,
1030 const char *buf
, size_t size
)
1032 struct xhci_dbc
*dbc
;
1033 struct xhci_hcd
*xhci
;
1039 ret
= kstrtou16(buf
, 0, &value
);
1043 xhci
= hcd_to_xhci(dev_get_drvdata(dev
));
1045 if (dbc
->state
!= DS_DISABLED
)
1048 dbc
->idVendor
= value
;
1049 ptr
= &dbc
->regs
->devinfo1
;
1050 dev_info
= readl(ptr
);
1051 dev_info
= (dev_info
& ~(0xffffu
<< 16)) | (value
<< 16);
1052 writel(dev_info
, ptr
);
1057 static ssize_t
dbc_idProduct_show(struct device
*dev
,
1058 struct device_attribute
*attr
,
1061 struct xhci_dbc
*dbc
;
1062 struct xhci_hcd
*xhci
;
1064 xhci
= hcd_to_xhci(dev_get_drvdata(dev
));
1067 return sysfs_emit(buf
, "%04x\n", dbc
->idProduct
);
1070 static ssize_t
dbc_idProduct_store(struct device
*dev
,
1071 struct device_attribute
*attr
,
1072 const char *buf
, size_t size
)
1074 struct xhci_dbc
*dbc
;
1075 struct xhci_hcd
*xhci
;
1081 ret
= kstrtou16(buf
, 0, &value
);
1085 xhci
= hcd_to_xhci(dev_get_drvdata(dev
));
1087 if (dbc
->state
!= DS_DISABLED
)
1090 dbc
->idProduct
= value
;
1091 ptr
= &dbc
->regs
->devinfo2
;
1092 dev_info
= readl(ptr
);
1093 dev_info
= (dev_info
& ~(0xffffu
)) | value
;
1094 writel(dev_info
, ptr
);
1098 static ssize_t
dbc_bcdDevice_show(struct device
*dev
,
1099 struct device_attribute
*attr
,
1102 struct xhci_dbc
*dbc
;
1103 struct xhci_hcd
*xhci
;
1105 xhci
= hcd_to_xhci(dev_get_drvdata(dev
));
1108 return sysfs_emit(buf
, "%04x\n", dbc
->bcdDevice
);
1111 static ssize_t
dbc_bcdDevice_store(struct device
*dev
,
1112 struct device_attribute
*attr
,
1113 const char *buf
, size_t size
)
1115 struct xhci_dbc
*dbc
;
1116 struct xhci_hcd
*xhci
;
1122 ret
= kstrtou16(buf
, 0, &value
);
1126 xhci
= hcd_to_xhci(dev_get_drvdata(dev
));
1128 if (dbc
->state
!= DS_DISABLED
)
1131 dbc
->bcdDevice
= value
;
1132 ptr
= &dbc
->regs
->devinfo2
;
1133 dev_info
= readl(ptr
);
1134 dev_info
= (dev_info
& ~(0xffffu
<< 16)) | (value
<< 16);
1135 writel(dev_info
, ptr
);
1140 static ssize_t
dbc_bInterfaceProtocol_show(struct device
*dev
,
1141 struct device_attribute
*attr
,
1144 struct xhci_dbc
*dbc
;
1145 struct xhci_hcd
*xhci
;
1147 xhci
= hcd_to_xhci(dev_get_drvdata(dev
));
1150 return sysfs_emit(buf
, "%02x\n", dbc
->bInterfaceProtocol
);
1153 static ssize_t
dbc_bInterfaceProtocol_store(struct device
*dev
,
1154 struct device_attribute
*attr
,
1155 const char *buf
, size_t size
)
1157 struct xhci_dbc
*dbc
;
1158 struct xhci_hcd
*xhci
;
1164 /* bInterfaceProtocol is 8 bit, but... */
1165 ret
= kstrtou8(buf
, 0, &value
);
1169 /* ...xhci only supports values 0 and 1 */
1173 xhci
= hcd_to_xhci(dev_get_drvdata(dev
));
1175 if (dbc
->state
!= DS_DISABLED
)
1178 dbc
->bInterfaceProtocol
= value
;
1179 ptr
= &dbc
->regs
->devinfo1
;
1180 dev_info
= readl(ptr
);
1181 dev_info
= (dev_info
& ~(0xffu
)) | value
;
1182 writel(dev_info
, ptr
);
1187 static ssize_t
dbc_poll_interval_ms_show(struct device
*dev
,
1188 struct device_attribute
*attr
,
1191 struct xhci_dbc
*dbc
;
1192 struct xhci_hcd
*xhci
;
1194 xhci
= hcd_to_xhci(dev_get_drvdata(dev
));
1197 return sysfs_emit(buf
, "%u\n", dbc
->poll_interval
);
1200 static ssize_t
dbc_poll_interval_ms_store(struct device
*dev
,
1201 struct device_attribute
*attr
,
1202 const char *buf
, size_t size
)
1204 struct xhci_dbc
*dbc
;
1205 struct xhci_hcd
*xhci
;
1209 ret
= kstrtou32(buf
, 0, &value
);
1210 if (ret
|| value
> DBC_POLL_INTERVAL_MAX
)
1213 xhci
= hcd_to_xhci(dev_get_drvdata(dev
));
1216 dbc
->poll_interval
= value
;
1218 mod_delayed_work(system_wq
, &dbc
->event_work
, 0);
1223 static DEVICE_ATTR_RW(dbc
);
1224 static DEVICE_ATTR_RW(dbc_idVendor
);
1225 static DEVICE_ATTR_RW(dbc_idProduct
);
1226 static DEVICE_ATTR_RW(dbc_bcdDevice
);
1227 static DEVICE_ATTR_RW(dbc_bInterfaceProtocol
);
1228 static DEVICE_ATTR_RW(dbc_poll_interval_ms
);
1230 static struct attribute
*dbc_dev_attrs
[] = {
1232 &dev_attr_dbc_idVendor
.attr
,
1233 &dev_attr_dbc_idProduct
.attr
,
1234 &dev_attr_dbc_bcdDevice
.attr
,
1235 &dev_attr_dbc_bInterfaceProtocol
.attr
,
1236 &dev_attr_dbc_poll_interval_ms
.attr
,
1239 ATTRIBUTE_GROUPS(dbc_dev
);
1242 xhci_alloc_dbc(struct device
*dev
, void __iomem
*base
, const struct dbc_driver
*driver
)
1244 struct xhci_dbc
*dbc
;
1247 dbc
= kzalloc(sizeof(*dbc
), GFP_KERNEL
);
1253 dbc
->driver
= driver
;
1254 dbc
->idProduct
= DBC_PRODUCT_ID
;
1255 dbc
->idVendor
= DBC_VENDOR_ID
;
1256 dbc
->bcdDevice
= DBC_DEVICE_REV
;
1257 dbc
->bInterfaceProtocol
= DBC_PROTOCOL
;
1258 dbc
->poll_interval
= DBC_POLL_INTERVAL_DEFAULT
;
1260 if (readl(&dbc
->regs
->control
) & DBC_CTRL_DBC_ENABLE
)
1263 INIT_DELAYED_WORK(&dbc
->event_work
, xhci_dbc_handle_events
);
1264 spin_lock_init(&dbc
->lock
);
1266 ret
= sysfs_create_groups(&dev
->kobj
, dbc_dev_groups
);
1276 /* undo what xhci_alloc_dbc() did */
1277 void xhci_dbc_remove(struct xhci_dbc
*dbc
)
1281 /* stop hw, stop wq and call dbc->ops->stop() */
1284 /* remove sysfs files */
1285 sysfs_remove_groups(&dbc
->dev
->kobj
, dbc_dev_groups
);
1291 int xhci_create_dbc_dev(struct xhci_hcd
*xhci
)
1298 /* create all parameters needed resembling a dbc device */
1299 dev
= xhci_to_hcd(xhci
)->self
.controller
;
1300 base
= &xhci
->cap_regs
->hc_capbase
;
1302 dbc_cap_offs
= xhci_find_next_ext_cap(base
, 0, XHCI_EXT_CAPS_DEBUG
);
1306 /* already allocated and in use */
1310 ret
= xhci_dbc_tty_probe(dev
, base
+ dbc_cap_offs
, xhci
);
1315 void xhci_remove_dbc_dev(struct xhci_hcd
*xhci
)
1317 unsigned long flags
;
1322 xhci_dbc_tty_remove(xhci
->dbc
);
1323 spin_lock_irqsave(&xhci
->lock
, flags
);
1325 spin_unlock_irqrestore(&xhci
->lock
, flags
);
1329 int xhci_dbc_suspend(struct xhci_hcd
*xhci
)
1331 struct xhci_dbc
*dbc
= xhci
->dbc
;
1336 if (dbc
->state
== DS_CONFIGURED
)
1337 dbc
->resume_required
= 1;
1344 int xhci_dbc_resume(struct xhci_hcd
*xhci
)
1347 struct xhci_dbc
*dbc
= xhci
->dbc
;
1352 if (dbc
->resume_required
) {
1353 dbc
->resume_required
= 0;
1354 xhci_dbc_start(dbc
);
1359 #endif /* CONFIG_PM */
1361 int xhci_dbc_init(void)
1363 return dbc_tty_init();
1366 void xhci_dbc_exit(void)