1 // SPDX-License-Identifier: GPL-2.0
3 * xhci-dbgcap.c - xHCI debug capability support
5 * Copyright (C) 2017 Intel Corporation
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
9 #include <linux/dma-mapping.h>
10 #include <linux/slab.h>
11 #include <linux/nls.h>
14 #include "xhci-trace.h"
15 #include "xhci-dbgcap.h"
18 dbc_dma_alloc_coherent(struct xhci_hcd
*xhci
, size_t size
,
19 dma_addr_t
*dma_handle
, gfp_t flags
)
23 vaddr
= dma_alloc_coherent(xhci_to_hcd(xhci
)->self
.sysdev
,
24 size
, dma_handle
, flags
);
25 memset(vaddr
, 0, size
);
30 dbc_dma_free_coherent(struct xhci_hcd
*xhci
, size_t size
,
31 void *cpu_addr
, dma_addr_t dma_handle
)
34 dma_free_coherent(xhci_to_hcd(xhci
)->self
.sysdev
,
35 size
, cpu_addr
, dma_handle
);
38 static u32
xhci_dbc_populate_strings(struct dbc_str_descs
*strings
)
40 struct usb_string_descriptor
*s_desc
;
44 s_desc
= (struct usb_string_descriptor
*)strings
->serial
;
45 utf8s_to_utf16s(DBC_STRING_SERIAL
, strlen(DBC_STRING_SERIAL
),
46 UTF16_LITTLE_ENDIAN
, (wchar_t *)s_desc
->wData
,
47 DBC_MAX_STRING_LENGTH
);
49 s_desc
->bLength
= (strlen(DBC_STRING_SERIAL
) + 1) * 2;
50 s_desc
->bDescriptorType
= USB_DT_STRING
;
51 string_length
= s_desc
->bLength
;
55 s_desc
= (struct usb_string_descriptor
*)strings
->product
;
56 utf8s_to_utf16s(DBC_STRING_PRODUCT
, strlen(DBC_STRING_PRODUCT
),
57 UTF16_LITTLE_ENDIAN
, (wchar_t *)s_desc
->wData
,
58 DBC_MAX_STRING_LENGTH
);
60 s_desc
->bLength
= (strlen(DBC_STRING_PRODUCT
) + 1) * 2;
61 s_desc
->bDescriptorType
= USB_DT_STRING
;
62 string_length
+= s_desc
->bLength
;
65 /* Manufacture string: */
66 s_desc
= (struct usb_string_descriptor
*)strings
->manufacturer
;
67 utf8s_to_utf16s(DBC_STRING_MANUFACTURER
,
68 strlen(DBC_STRING_MANUFACTURER
),
69 UTF16_LITTLE_ENDIAN
, (wchar_t *)s_desc
->wData
,
70 DBC_MAX_STRING_LENGTH
);
72 s_desc
->bLength
= (strlen(DBC_STRING_MANUFACTURER
) + 1) * 2;
73 s_desc
->bDescriptorType
= USB_DT_STRING
;
74 string_length
+= s_desc
->bLength
;
78 strings
->string0
[0] = 4;
79 strings
->string0
[1] = USB_DT_STRING
;
80 strings
->string0
[2] = 0x09;
81 strings
->string0
[3] = 0x04;
87 static void xhci_dbc_init_contexts(struct xhci_hcd
*xhci
, u32 string_length
)
90 struct dbc_info_context
*info
;
91 struct xhci_ep_ctx
*ep_ctx
;
94 unsigned int max_burst
;
100 /* Populate info Context: */
101 info
= (struct dbc_info_context
*)dbc
->ctx
->bytes
;
102 dma
= dbc
->string_dma
;
103 info
->string0
= cpu_to_le64(dma
);
104 info
->manufacturer
= cpu_to_le64(dma
+ DBC_MAX_STRING_LENGTH
);
105 info
->product
= cpu_to_le64(dma
+ DBC_MAX_STRING_LENGTH
* 2);
106 info
->serial
= cpu_to_le64(dma
+ DBC_MAX_STRING_LENGTH
* 3);
107 info
->length
= cpu_to_le32(string_length
);
109 /* Populate bulk out endpoint context: */
110 ep_ctx
= dbc_bulkout_ctx(dbc
);
111 max_burst
= DBC_CTRL_MAXBURST(readl(&dbc
->regs
->control
));
112 deq
= dbc_bulkout_enq(dbc
);
114 ep_ctx
->ep_info2
= dbc_epctx_info2(BULK_OUT_EP
, 1024, max_burst
);
115 ep_ctx
->deq
= cpu_to_le64(deq
| dbc
->ring_out
->cycle_state
);
117 /* Populate bulk in endpoint context: */
118 ep_ctx
= dbc_bulkin_ctx(dbc
);
119 deq
= dbc_bulkin_enq(dbc
);
121 ep_ctx
->ep_info2
= dbc_epctx_info2(BULK_IN_EP
, 1024, max_burst
);
122 ep_ctx
->deq
= cpu_to_le64(deq
| dbc
->ring_in
->cycle_state
);
124 /* Set DbC context and info registers: */
125 xhci_write_64(xhci
, dbc
->ctx
->dma
, &dbc
->regs
->dccp
);
127 dev_info
= cpu_to_le32((DBC_VENDOR_ID
<< 16) | DBC_PROTOCOL
);
128 writel(dev_info
, &dbc
->regs
->devinfo1
);
130 dev_info
= cpu_to_le32((DBC_DEVICE_REV
<< 16) | DBC_PRODUCT_ID
);
131 writel(dev_info
, &dbc
->regs
->devinfo2
);
134 static void xhci_dbc_giveback(struct dbc_request
*req
, int status
)
135 __releases(&dbc
->lock
)
136 __acquires(&dbc
->lock
)
138 struct dbc_ep
*dep
= req
->dep
;
139 struct xhci_dbc
*dbc
= dep
->dbc
;
140 struct xhci_hcd
*xhci
= dbc
->xhci
;
141 struct device
*dev
= xhci_to_hcd(dbc
->xhci
)->self
.sysdev
;
143 list_del_init(&req
->list_pending
);
147 if (req
->status
== -EINPROGRESS
)
148 req
->status
= status
;
150 trace_xhci_dbc_giveback_request(req
);
152 dma_unmap_single(dev
,
155 dbc_ep_dma_direction(dep
));
157 /* Give back the transfer request: */
158 spin_unlock(&dbc
->lock
);
159 req
->complete(xhci
, req
);
160 spin_lock(&dbc
->lock
);
163 static void xhci_dbc_flush_single_request(struct dbc_request
*req
)
165 union xhci_trb
*trb
= req
->trb
;
167 trb
->generic
.field
[0] = 0;
168 trb
->generic
.field
[1] = 0;
169 trb
->generic
.field
[2] = 0;
170 trb
->generic
.field
[3] &= cpu_to_le32(TRB_CYCLE
);
171 trb
->generic
.field
[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP
));
173 xhci_dbc_giveback(req
, -ESHUTDOWN
);
176 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep
*dep
)
178 struct dbc_request
*req
, *tmp
;
180 list_for_each_entry_safe(req
, tmp
, &dep
->list_pending
, list_pending
)
181 xhci_dbc_flush_single_request(req
);
184 static void xhci_dbc_flush_requests(struct xhci_dbc
*dbc
)
186 xhci_dbc_flush_endpoint_requests(&dbc
->eps
[BULK_OUT
]);
187 xhci_dbc_flush_endpoint_requests(&dbc
->eps
[BULK_IN
]);
191 dbc_alloc_request(struct dbc_ep
*dep
, gfp_t gfp_flags
)
193 struct dbc_request
*req
;
195 req
= kzalloc(sizeof(*req
), gfp_flags
);
200 INIT_LIST_HEAD(&req
->list_pending
);
201 INIT_LIST_HEAD(&req
->list_pool
);
202 req
->direction
= dep
->direction
;
204 trace_xhci_dbc_alloc_request(req
);
210 dbc_free_request(struct dbc_ep
*dep
, struct dbc_request
*req
)
212 trace_xhci_dbc_free_request(req
);
218 xhci_dbc_queue_trb(struct xhci_ring
*ring
, u32 field1
,
219 u32 field2
, u32 field3
, u32 field4
)
221 union xhci_trb
*trb
, *next
;
224 trb
->generic
.field
[0] = cpu_to_le32(field1
);
225 trb
->generic
.field
[1] = cpu_to_le32(field2
);
226 trb
->generic
.field
[2] = cpu_to_le32(field3
);
227 trb
->generic
.field
[3] = cpu_to_le32(field4
);
229 trace_xhci_dbc_gadget_ep_queue(ring
, &trb
->generic
);
231 ring
->num_trbs_free
--;
232 next
= ++(ring
->enqueue
);
233 if (TRB_TYPE_LINK_LE32(next
->link
.control
)) {
234 next
->link
.control
^= cpu_to_le32(TRB_CYCLE
);
235 ring
->enqueue
= ring
->enq_seg
->trbs
;
236 ring
->cycle_state
^= 1;
240 static int xhci_dbc_queue_bulk_tx(struct dbc_ep
*dep
,
241 struct dbc_request
*req
)
245 unsigned int num_trbs
;
246 struct xhci_dbc
*dbc
= dep
->dbc
;
247 struct xhci_ring
*ring
= dep
->ring
;
248 u32 length
, control
, cycle
;
250 num_trbs
= count_trbs(req
->dma
, req
->length
);
251 WARN_ON(num_trbs
!= 1);
252 if (ring
->num_trbs_free
< num_trbs
)
257 cycle
= ring
->cycle_state
;
258 length
= TRB_LEN(req
->length
);
259 control
= TRB_TYPE(TRB_NORMAL
) | TRB_IOC
;
262 control
&= cpu_to_le32(~TRB_CYCLE
);
264 control
|= cpu_to_le32(TRB_CYCLE
);
266 req
->trb
= ring
->enqueue
;
267 req
->trb_dma
= xhci_trb_virt_to_dma(ring
->enq_seg
, ring
->enqueue
);
268 xhci_dbc_queue_trb(ring
,
274 * Add a barrier between writes of trb fields and flipping
280 trb
->generic
.field
[3] |= cpu_to_le32(TRB_CYCLE
);
282 trb
->generic
.field
[3] &= cpu_to_le32(~TRB_CYCLE
);
284 writel(DBC_DOOR_BELL_TARGET(dep
->direction
), &dbc
->regs
->doorbell
);
290 dbc_ep_do_queue(struct dbc_ep
*dep
, struct dbc_request
*req
)
294 struct xhci_dbc
*dbc
= dep
->dbc
;
295 struct xhci_hcd
*xhci
= dbc
->xhci
;
297 dev
= xhci_to_hcd(xhci
)->self
.sysdev
;
299 if (!req
->length
|| !req
->buf
)
303 req
->status
= -EINPROGRESS
;
305 req
->dma
= dma_map_single(dev
,
308 dbc_ep_dma_direction(dep
));
309 if (dma_mapping_error(dev
, req
->dma
)) {
310 xhci_err(xhci
, "failed to map buffer\n");
314 ret
= xhci_dbc_queue_bulk_tx(dep
, req
);
316 xhci_err(xhci
, "failed to queue trbs\n");
317 dma_unmap_single(dev
,
320 dbc_ep_dma_direction(dep
));
324 list_add_tail(&req
->list_pending
, &dep
->list_pending
);
329 int dbc_ep_queue(struct dbc_ep
*dep
, struct dbc_request
*req
,
333 struct xhci_dbc
*dbc
= dep
->dbc
;
334 int ret
= -ESHUTDOWN
;
336 spin_lock_irqsave(&dbc
->lock
, flags
);
337 if (dbc
->state
== DS_CONFIGURED
)
338 ret
= dbc_ep_do_queue(dep
, req
);
339 spin_unlock_irqrestore(&dbc
->lock
, flags
);
341 mod_delayed_work(system_wq
, &dbc
->event_work
, 0);
343 trace_xhci_dbc_queue_request(req
);
348 static inline void xhci_dbc_do_eps_init(struct xhci_hcd
*xhci
, bool direction
)
351 struct xhci_dbc
*dbc
= xhci
->dbc
;
353 dep
= &dbc
->eps
[direction
];
355 dep
->direction
= direction
;
356 dep
->ring
= direction
? dbc
->ring_in
: dbc
->ring_out
;
358 INIT_LIST_HEAD(&dep
->list_pending
);
361 static void xhci_dbc_eps_init(struct xhci_hcd
*xhci
)
363 xhci_dbc_do_eps_init(xhci
, BULK_OUT
);
364 xhci_dbc_do_eps_init(xhci
, BULK_IN
);
367 static void xhci_dbc_eps_exit(struct xhci_hcd
*xhci
)
369 struct xhci_dbc
*dbc
= xhci
->dbc
;
371 memset(dbc
->eps
, 0, sizeof(struct dbc_ep
) * ARRAY_SIZE(dbc
->eps
));
374 static int xhci_dbc_mem_init(struct xhci_hcd
*xhci
, gfp_t flags
)
379 struct xhci_dbc
*dbc
= xhci
->dbc
;
381 /* Allocate various rings for events and transfers: */
382 dbc
->ring_evt
= xhci_ring_alloc(xhci
, 1, 1, TYPE_EVENT
, 0, flags
);
386 dbc
->ring_in
= xhci_ring_alloc(xhci
, 1, 1, TYPE_BULK
, 0, flags
);
390 dbc
->ring_out
= xhci_ring_alloc(xhci
, 1, 1, TYPE_BULK
, 0, flags
);
394 /* Allocate and populate ERST: */
395 ret
= xhci_alloc_erst(xhci
, dbc
->ring_evt
, &dbc
->erst
, flags
);
399 /* Allocate context data structure: */
400 dbc
->ctx
= xhci_alloc_container_ctx(xhci
, XHCI_CTX_TYPE_DEVICE
, flags
);
404 /* Allocate the string table: */
405 dbc
->string_size
= sizeof(struct dbc_str_descs
);
406 dbc
->string
= dbc_dma_alloc_coherent(xhci
,
413 /* Setup ERST register: */
414 writel(dbc
->erst
.erst_size
, &dbc
->regs
->ersts
);
415 xhci_write_64(xhci
, dbc
->erst
.erst_dma_addr
, &dbc
->regs
->erstba
);
416 deq
= xhci_trb_virt_to_dma(dbc
->ring_evt
->deq_seg
,
417 dbc
->ring_evt
->dequeue
);
418 xhci_write_64(xhci
, deq
, &dbc
->regs
->erdp
);
420 /* Setup strings and contexts: */
421 string_length
= xhci_dbc_populate_strings(dbc
->string
);
422 xhci_dbc_init_contexts(xhci
, string_length
);
426 xhci_dbc_eps_init(xhci
);
427 dbc
->state
= DS_INITIALIZED
;
432 xhci_free_container_ctx(xhci
, dbc
->ctx
);
435 xhci_free_erst(xhci
, &dbc
->erst
);
437 xhci_ring_free(xhci
, dbc
->ring_out
);
438 dbc
->ring_out
= NULL
;
440 xhci_ring_free(xhci
, dbc
->ring_in
);
443 xhci_ring_free(xhci
, dbc
->ring_evt
);
444 dbc
->ring_evt
= NULL
;
449 static void xhci_dbc_mem_cleanup(struct xhci_hcd
*xhci
)
451 struct xhci_dbc
*dbc
= xhci
->dbc
;
456 xhci_dbc_eps_exit(xhci
);
459 dbc_dma_free_coherent(xhci
,
461 dbc
->string
, dbc
->string_dma
);
465 xhci_free_container_ctx(xhci
, dbc
->ctx
);
468 xhci_free_erst(xhci
, &dbc
->erst
);
469 xhci_ring_free(xhci
, dbc
->ring_out
);
470 xhci_ring_free(xhci
, dbc
->ring_in
);
471 xhci_ring_free(xhci
, dbc
->ring_evt
);
473 dbc
->ring_out
= NULL
;
474 dbc
->ring_evt
= NULL
;
477 static int xhci_do_dbc_start(struct xhci_hcd
*xhci
)
481 struct xhci_dbc
*dbc
= xhci
->dbc
;
483 if (dbc
->state
!= DS_DISABLED
)
486 writel(0, &dbc
->regs
->control
);
487 ret
= xhci_handshake(&dbc
->regs
->control
,
493 ret
= xhci_dbc_mem_init(xhci
, GFP_ATOMIC
);
497 ctrl
= readl(&dbc
->regs
->control
);
498 writel(ctrl
| DBC_CTRL_DBC_ENABLE
| DBC_CTRL_PORT_ENABLE
,
499 &dbc
->regs
->control
);
500 ret
= xhci_handshake(&dbc
->regs
->control
,
502 DBC_CTRL_DBC_ENABLE
, 1000);
506 dbc
->state
= DS_ENABLED
;
511 static int xhci_do_dbc_stop(struct xhci_hcd
*xhci
)
513 struct xhci_dbc
*dbc
= xhci
->dbc
;
515 if (dbc
->state
== DS_DISABLED
)
518 writel(0, &dbc
->regs
->control
);
519 xhci_dbc_mem_cleanup(xhci
);
520 dbc
->state
= DS_DISABLED
;
525 static int xhci_dbc_start(struct xhci_hcd
*xhci
)
529 struct xhci_dbc
*dbc
= xhci
->dbc
;
533 pm_runtime_get_sync(xhci_to_hcd(xhci
)->self
.controller
);
535 spin_lock_irqsave(&dbc
->lock
, flags
);
536 ret
= xhci_do_dbc_start(xhci
);
537 spin_unlock_irqrestore(&dbc
->lock
, flags
);
540 pm_runtime_put(xhci_to_hcd(xhci
)->self
.controller
);
544 return mod_delayed_work(system_wq
, &dbc
->event_work
, 1);
547 static void xhci_dbc_stop(struct xhci_hcd
*xhci
)
551 struct xhci_dbc
*dbc
= xhci
->dbc
;
552 struct dbc_port
*port
= &dbc
->port
;
556 cancel_delayed_work_sync(&dbc
->event_work
);
558 if (port
->registered
)
559 xhci_dbc_tty_unregister_device(xhci
);
561 spin_lock_irqsave(&dbc
->lock
, flags
);
562 ret
= xhci_do_dbc_stop(xhci
);
563 spin_unlock_irqrestore(&dbc
->lock
, flags
);
566 pm_runtime_put_sync(xhci_to_hcd(xhci
)->self
.controller
);
570 dbc_handle_port_status(struct xhci_hcd
*xhci
, union xhci_trb
*event
)
573 struct xhci_dbc
*dbc
= xhci
->dbc
;
575 portsc
= readl(&dbc
->regs
->portsc
);
576 if (portsc
& DBC_PORTSC_CONN_CHANGE
)
577 xhci_info(xhci
, "DbC port connect change\n");
579 if (portsc
& DBC_PORTSC_RESET_CHANGE
)
580 xhci_info(xhci
, "DbC port reset change\n");
582 if (portsc
& DBC_PORTSC_LINK_CHANGE
)
583 xhci_info(xhci
, "DbC port link status change\n");
585 if (portsc
& DBC_PORTSC_CONFIG_CHANGE
)
586 xhci_info(xhci
, "DbC config error change\n");
588 /* Port reset change bit will be cleared in other place: */
589 writel(portsc
& ~DBC_PORTSC_RESET_CHANGE
, &dbc
->regs
->portsc
);
592 static void dbc_handle_xfer_event(struct xhci_hcd
*xhci
, union xhci_trb
*event
)
595 struct xhci_ring
*ring
;
599 size_t remain_length
;
600 struct dbc_request
*req
= NULL
, *r
;
602 comp_code
= GET_COMP_CODE(le32_to_cpu(event
->generic
.field
[2]));
603 remain_length
= EVENT_TRB_LEN(le32_to_cpu(event
->generic
.field
[2]));
604 ep_id
= TRB_TO_EP_ID(le32_to_cpu(event
->generic
.field
[3]));
605 dep
= (ep_id
== EPID_OUT
) ?
606 get_out_ep(xhci
) : get_in_ep(xhci
);
613 case COMP_SHORT_PACKET
:
617 case COMP_BABBLE_DETECTED_ERROR
:
618 case COMP_USB_TRANSACTION_ERROR
:
619 case COMP_STALL_ERROR
:
620 xhci_warn(xhci
, "tx error %d detected\n", comp_code
);
624 xhci_err(xhci
, "unknown tx error %d\n", comp_code
);
629 /* Match the pending request: */
630 list_for_each_entry(r
, &dep
->list_pending
, list_pending
) {
631 if (r
->trb_dma
== event
->trans_event
.buffer
) {
638 xhci_warn(xhci
, "no matched request\n");
642 trace_xhci_dbc_handle_transfer(ring
, &req
->trb
->generic
);
644 ring
->num_trbs_free
++;
645 req
->actual
= req
->length
- remain_length
;
646 xhci_dbc_giveback(req
, status
);
649 static enum evtreturn
xhci_dbc_do_handle_events(struct xhci_dbc
*dbc
)
655 struct xhci_hcd
*xhci
= dbc
->xhci
;
656 bool update_erdp
= false;
658 /* DbC state machine: */
659 switch (dbc
->state
) {
665 portsc
= readl(&dbc
->regs
->portsc
);
666 if (portsc
& DBC_PORTSC_CONN_STATUS
) {
667 dbc
->state
= DS_CONNECTED
;
668 xhci_info(xhci
, "DbC connected\n");
673 ctrl
= readl(&dbc
->regs
->control
);
674 if (ctrl
& DBC_CTRL_DBC_RUN
) {
675 dbc
->state
= DS_CONFIGURED
;
676 xhci_info(xhci
, "DbC configured\n");
677 portsc
= readl(&dbc
->regs
->portsc
);
678 writel(portsc
, &dbc
->regs
->portsc
);
684 /* Handle cable unplug event: */
685 portsc
= readl(&dbc
->regs
->portsc
);
686 if (!(portsc
& DBC_PORTSC_PORT_ENABLED
) &&
687 !(portsc
& DBC_PORTSC_CONN_STATUS
)) {
688 xhci_info(xhci
, "DbC cable unplugged\n");
689 dbc
->state
= DS_ENABLED
;
690 xhci_dbc_flush_requests(dbc
);
695 /* Handle debug port reset event: */
696 if (portsc
& DBC_PORTSC_RESET_CHANGE
) {
697 xhci_info(xhci
, "DbC port reset\n");
698 writel(portsc
, &dbc
->regs
->portsc
);
699 dbc
->state
= DS_ENABLED
;
700 xhci_dbc_flush_requests(dbc
);
705 /* Handle endpoint stall event: */
706 ctrl
= readl(&dbc
->regs
->control
);
707 if ((ctrl
& DBC_CTRL_HALT_IN_TR
) ||
708 (ctrl
& DBC_CTRL_HALT_OUT_TR
)) {
709 xhci_info(xhci
, "DbC Endpoint stall\n");
710 dbc
->state
= DS_STALLED
;
712 if (ctrl
& DBC_CTRL_HALT_IN_TR
) {
713 dep
= get_in_ep(xhci
);
714 xhci_dbc_flush_endpoint_requests(dep
);
717 if (ctrl
& DBC_CTRL_HALT_OUT_TR
) {
718 dep
= get_out_ep(xhci
);
719 xhci_dbc_flush_endpoint_requests(dep
);
725 /* Clear DbC run change bit: */
726 if (ctrl
& DBC_CTRL_DBC_RUN_CHANGE
) {
727 writel(ctrl
, &dbc
->regs
->control
);
728 ctrl
= readl(&dbc
->regs
->control
);
733 ctrl
= readl(&dbc
->regs
->control
);
734 if (!(ctrl
& DBC_CTRL_HALT_IN_TR
) &&
735 !(ctrl
& DBC_CTRL_HALT_OUT_TR
) &&
736 (ctrl
& DBC_CTRL_DBC_RUN
)) {
737 dbc
->state
= DS_CONFIGURED
;
743 xhci_err(xhci
, "Unknown DbC state %d\n", dbc
->state
);
747 /* Handle the events in the event ring: */
748 evt
= dbc
->ring_evt
->dequeue
;
749 while ((le32_to_cpu(evt
->event_cmd
.flags
) & TRB_CYCLE
) ==
750 dbc
->ring_evt
->cycle_state
) {
752 * Add a barrier between reading the cycle flag and any
753 * reads of the event's flags/data below:
757 trace_xhci_dbc_handle_event(dbc
->ring_evt
, &evt
->generic
);
759 switch (le32_to_cpu(evt
->event_cmd
.flags
) & TRB_TYPE_BITMASK
) {
760 case TRB_TYPE(TRB_PORT_STATUS
):
761 dbc_handle_port_status(xhci
, evt
);
763 case TRB_TYPE(TRB_TRANSFER
):
764 dbc_handle_xfer_event(xhci
, evt
);
770 inc_deq(xhci
, dbc
->ring_evt
);
771 evt
= dbc
->ring_evt
->dequeue
;
775 /* Update event ring dequeue pointer: */
777 deq
= xhci_trb_virt_to_dma(dbc
->ring_evt
->deq_seg
,
778 dbc
->ring_evt
->dequeue
);
779 xhci_write_64(xhci
, deq
, &dbc
->regs
->erdp
);
785 static void xhci_dbc_handle_events(struct work_struct
*work
)
789 struct xhci_dbc
*dbc
;
791 struct xhci_hcd
*xhci
;
793 dbc
= container_of(to_delayed_work(work
), struct xhci_dbc
, event_work
);
796 spin_lock_irqsave(&dbc
->lock
, flags
);
797 evtr
= xhci_dbc_do_handle_events(dbc
);
798 spin_unlock_irqrestore(&dbc
->lock
, flags
);
802 ret
= xhci_dbc_tty_register_device(xhci
);
804 xhci_err(xhci
, "failed to alloc tty device\n");
808 xhci_info(xhci
, "DbC now attached to /dev/ttyDBC0\n");
811 xhci_dbc_tty_unregister_device(xhci
);
816 xhci_info(xhci
, "stop handling dbc events\n");
820 mod_delayed_work(system_wq
, &dbc
->event_work
, 1);
823 static void xhci_do_dbc_exit(struct xhci_hcd
*xhci
)
827 spin_lock_irqsave(&xhci
->lock
, flags
);
830 spin_unlock_irqrestore(&xhci
->lock
, flags
);
833 static int xhci_do_dbc_init(struct xhci_hcd
*xhci
)
836 struct xhci_dbc
*dbc
;
841 base
= &xhci
->cap_regs
->hc_capbase
;
842 dbc_cap_offs
= xhci_find_next_ext_cap(base
, 0, XHCI_EXT_CAPS_DEBUG
);
846 dbc
= kzalloc(sizeof(*dbc
), GFP_KERNEL
);
850 dbc
->regs
= base
+ dbc_cap_offs
;
852 /* We will avoid using DbC in xhci driver if it's in use. */
853 reg
= readl(&dbc
->regs
->control
);
854 if (reg
& DBC_CTRL_DBC_ENABLE
) {
859 spin_lock_irqsave(&xhci
->lock
, flags
);
861 spin_unlock_irqrestore(&xhci
->lock
, flags
);
866 spin_unlock_irqrestore(&xhci
->lock
, flags
);
869 INIT_DELAYED_WORK(&dbc
->event_work
, xhci_dbc_handle_events
);
870 spin_lock_init(&dbc
->lock
);
875 static ssize_t
dbc_show(struct device
*dev
,
876 struct device_attribute
*attr
,
880 struct xhci_dbc
*dbc
;
881 struct xhci_hcd
*xhci
;
883 xhci
= hcd_to_xhci(dev_get_drvdata(dev
));
886 switch (dbc
->state
) {
909 return sprintf(buf
, "%s\n", p
);
912 static ssize_t
dbc_store(struct device
*dev
,
913 struct device_attribute
*attr
,
914 const char *buf
, size_t count
)
916 struct xhci_hcd
*xhci
;
918 xhci
= hcd_to_xhci(dev_get_drvdata(dev
));
920 if (!strncmp(buf
, "enable", 6))
921 xhci_dbc_start(xhci
);
922 else if (!strncmp(buf
, "disable", 7))
930 static DEVICE_ATTR_RW(dbc
);
932 int xhci_dbc_init(struct xhci_hcd
*xhci
)
935 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
937 ret
= xhci_do_dbc_init(xhci
);
941 ret
= xhci_dbc_tty_register_driver(xhci
);
945 ret
= device_create_file(dev
, &dev_attr_dbc
);
952 xhci_dbc_tty_unregister_driver();
954 xhci_do_dbc_exit(xhci
);
959 void xhci_dbc_exit(struct xhci_hcd
*xhci
)
961 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
966 device_remove_file(dev
, &dev_attr_dbc
);
967 xhci_dbc_tty_unregister_driver();
969 xhci_do_dbc_exit(xhci
);
973 int xhci_dbc_suspend(struct xhci_hcd
*xhci
)
975 struct xhci_dbc
*dbc
= xhci
->dbc
;
980 if (dbc
->state
== DS_CONFIGURED
)
981 dbc
->resume_required
= 1;
988 int xhci_dbc_resume(struct xhci_hcd
*xhci
)
991 struct xhci_dbc
*dbc
= xhci
->dbc
;
996 if (dbc
->resume_required
) {
997 dbc
->resume_required
= 0;
998 xhci_dbc_start(xhci
);
1003 #endif /* CONFIG_PM */