2 * xhci-dbgtty.c - tty glue for xHCI debug capability
4 * Copyright (C) 2017 Intel Corporation
6 * Author: Lu Baolu <baolu.lu@linux.intel.com>
9 #include <linux/slab.h>
10 #include <linux/tty.h>
11 #include <linux/tty_flip.h>
14 #include "xhci-dbgcap.h"
17 dbc_send_packet(struct dbc_port
*port
, char *packet
, unsigned int size
)
21 len
= kfifo_len(&port
->write_fifo
);
25 size
= kfifo_out(&port
->write_fifo
, packet
, size
);
29 static int dbc_start_tx(struct dbc_port
*port
)
30 __releases(&port
->port_lock
)
31 __acquires(&port
->port_lock
)
34 struct dbc_request
*req
;
36 bool do_tty_wake
= false;
37 struct list_head
*pool
= &port
->write_pool
;
39 while (!list_empty(pool
)) {
40 req
= list_entry(pool
->next
, struct dbc_request
, list_pool
);
41 len
= dbc_send_packet(port
, req
->buf
, DBC_MAX_PACKET
);
47 list_del(&req
->list_pool
);
49 spin_unlock(&port
->port_lock
);
50 status
= dbc_ep_queue(port
->out
, req
, GFP_ATOMIC
);
51 spin_lock(&port
->port_lock
);
54 list_add(&req
->list_pool
, pool
);
59 if (do_tty_wake
&& port
->port
.tty
)
60 tty_wakeup(port
->port
.tty
);
65 static void dbc_start_rx(struct dbc_port
*port
)
66 __releases(&port
->port_lock
)
67 __acquires(&port
->port_lock
)
69 struct dbc_request
*req
;
71 struct list_head
*pool
= &port
->read_pool
;
73 while (!list_empty(pool
)) {
77 req
= list_entry(pool
->next
, struct dbc_request
, list_pool
);
78 list_del(&req
->list_pool
);
79 req
->length
= DBC_MAX_PACKET
;
81 spin_unlock(&port
->port_lock
);
82 status
= dbc_ep_queue(port
->in
, req
, GFP_ATOMIC
);
83 spin_lock(&port
->port_lock
);
86 list_add(&req
->list_pool
, pool
);
93 dbc_read_complete(struct xhci_hcd
*xhci
, struct dbc_request
*req
)
95 struct xhci_dbc
*dbc
= xhci
->dbc
;
96 struct dbc_port
*port
= &dbc
->port
;
98 spin_lock(&port
->port_lock
);
99 list_add_tail(&req
->list_pool
, &port
->read_queue
);
100 tasklet_schedule(&port
->push
);
101 spin_unlock(&port
->port_lock
);
104 static void dbc_write_complete(struct xhci_hcd
*xhci
, struct dbc_request
*req
)
106 struct xhci_dbc
*dbc
= xhci
->dbc
;
107 struct dbc_port
*port
= &dbc
->port
;
109 spin_lock(&port
->port_lock
);
110 list_add(&req
->list_pool
, &port
->write_pool
);
111 switch (req
->status
) {
118 xhci_warn(xhci
, "unexpected write complete status %d\n",
122 spin_unlock(&port
->port_lock
);
125 static void xhci_dbc_free_req(struct dbc_ep
*dep
, struct dbc_request
*req
)
128 dbc_free_request(dep
, req
);
132 xhci_dbc_alloc_requests(struct dbc_ep
*dep
, struct list_head
*head
,
133 void (*fn
)(struct xhci_hcd
*, struct dbc_request
*))
136 struct dbc_request
*req
;
138 for (i
= 0; i
< DBC_QUEUE_SIZE
; i
++) {
139 req
= dbc_alloc_request(dep
, GFP_ATOMIC
);
143 req
->length
= DBC_MAX_PACKET
;
144 req
->buf
= kmalloc(req
->length
, GFP_KERNEL
);
146 xhci_dbc_free_req(dep
, req
);
151 list_add_tail(&req
->list_pool
, head
);
154 return list_empty(head
) ? -ENOMEM
: 0;
158 xhci_dbc_free_requests(struct dbc_ep
*dep
, struct list_head
*head
)
160 struct dbc_request
*req
;
162 while (!list_empty(head
)) {
163 req
= list_entry(head
->next
, struct dbc_request
, list_pool
);
164 list_del(&req
->list_pool
);
165 xhci_dbc_free_req(dep
, req
);
169 static int dbc_tty_install(struct tty_driver
*driver
, struct tty_struct
*tty
)
171 struct dbc_port
*port
= driver
->driver_state
;
173 tty
->driver_data
= port
;
175 return tty_port_install(&port
->port
, driver
, tty
);
178 static int dbc_tty_open(struct tty_struct
*tty
, struct file
*file
)
180 struct dbc_port
*port
= tty
->driver_data
;
182 return tty_port_open(&port
->port
, tty
, file
);
185 static void dbc_tty_close(struct tty_struct
*tty
, struct file
*file
)
187 struct dbc_port
*port
= tty
->driver_data
;
189 tty_port_close(&port
->port
, tty
, file
);
192 static int dbc_tty_write(struct tty_struct
*tty
,
193 const unsigned char *buf
,
196 struct dbc_port
*port
= tty
->driver_data
;
199 spin_lock_irqsave(&port
->port_lock
, flags
);
201 count
= kfifo_in(&port
->write_fifo
, buf
, count
);
203 spin_unlock_irqrestore(&port
->port_lock
, flags
);
208 static int dbc_tty_put_char(struct tty_struct
*tty
, unsigned char ch
)
210 struct dbc_port
*port
= tty
->driver_data
;
214 spin_lock_irqsave(&port
->port_lock
, flags
);
215 status
= kfifo_put(&port
->write_fifo
, ch
);
216 spin_unlock_irqrestore(&port
->port_lock
, flags
);
221 static void dbc_tty_flush_chars(struct tty_struct
*tty
)
223 struct dbc_port
*port
= tty
->driver_data
;
226 spin_lock_irqsave(&port
->port_lock
, flags
);
228 spin_unlock_irqrestore(&port
->port_lock
, flags
);
231 static int dbc_tty_write_room(struct tty_struct
*tty
)
233 struct dbc_port
*port
= tty
->driver_data
;
237 spin_lock_irqsave(&port
->port_lock
, flags
);
238 room
= kfifo_avail(&port
->write_fifo
);
239 spin_unlock_irqrestore(&port
->port_lock
, flags
);
244 static int dbc_tty_chars_in_buffer(struct tty_struct
*tty
)
246 struct dbc_port
*port
= tty
->driver_data
;
250 spin_lock_irqsave(&port
->port_lock
, flags
);
251 chars
= kfifo_len(&port
->write_fifo
);
252 spin_unlock_irqrestore(&port
->port_lock
, flags
);
257 static void dbc_tty_unthrottle(struct tty_struct
*tty
)
259 struct dbc_port
*port
= tty
->driver_data
;
262 spin_lock_irqsave(&port
->port_lock
, flags
);
263 tasklet_schedule(&port
->push
);
264 spin_unlock_irqrestore(&port
->port_lock
, flags
);
267 static const struct tty_operations dbc_tty_ops
= {
268 .install
= dbc_tty_install
,
269 .open
= dbc_tty_open
,
270 .close
= dbc_tty_close
,
271 .write
= dbc_tty_write
,
272 .put_char
= dbc_tty_put_char
,
273 .flush_chars
= dbc_tty_flush_chars
,
274 .write_room
= dbc_tty_write_room
,
275 .chars_in_buffer
= dbc_tty_chars_in_buffer
,
276 .unthrottle
= dbc_tty_unthrottle
,
279 static struct tty_driver
*dbc_tty_driver
;
281 int xhci_dbc_tty_register_driver(struct xhci_hcd
*xhci
)
284 struct xhci_dbc
*dbc
= xhci
->dbc
;
286 dbc_tty_driver
= tty_alloc_driver(1, TTY_DRIVER_REAL_RAW
|
287 TTY_DRIVER_DYNAMIC_DEV
);
288 if (IS_ERR(dbc_tty_driver
)) {
289 status
= PTR_ERR(dbc_tty_driver
);
290 dbc_tty_driver
= NULL
;
294 dbc_tty_driver
->driver_name
= "dbc_serial";
295 dbc_tty_driver
->name
= "ttyDBC";
297 dbc_tty_driver
->type
= TTY_DRIVER_TYPE_SERIAL
;
298 dbc_tty_driver
->subtype
= SERIAL_TYPE_NORMAL
;
299 dbc_tty_driver
->init_termios
= tty_std_termios
;
300 dbc_tty_driver
->init_termios
.c_cflag
=
301 B9600
| CS8
| CREAD
| HUPCL
| CLOCAL
;
302 dbc_tty_driver
->init_termios
.c_ispeed
= 9600;
303 dbc_tty_driver
->init_termios
.c_ospeed
= 9600;
304 dbc_tty_driver
->driver_state
= &dbc
->port
;
306 tty_set_operations(dbc_tty_driver
, &dbc_tty_ops
);
308 status
= tty_register_driver(dbc_tty_driver
);
311 "can't register dbc tty driver, err %d\n", status
);
312 put_tty_driver(dbc_tty_driver
);
313 dbc_tty_driver
= NULL
;
319 void xhci_dbc_tty_unregister_driver(void)
321 tty_unregister_driver(dbc_tty_driver
);
322 put_tty_driver(dbc_tty_driver
);
323 dbc_tty_driver
= NULL
;
326 static void dbc_rx_push(unsigned long _port
)
328 struct dbc_request
*req
;
329 struct tty_struct
*tty
;
330 bool do_push
= false;
331 bool disconnect
= false;
332 struct dbc_port
*port
= (void *)_port
;
333 struct list_head
*queue
= &port
->read_queue
;
335 spin_lock_irq(&port
->port_lock
);
336 tty
= port
->port
.tty
;
337 while (!list_empty(queue
)) {
338 req
= list_first_entry(queue
, struct dbc_request
, list_pool
);
340 if (tty
&& tty_throttled(tty
))
343 switch (req
->status
) {
350 pr_warn("ttyDBC0: unexpected RX status %d\n",
356 char *packet
= req
->buf
;
357 unsigned int n
, size
= req
->actual
;
366 count
= tty_insert_flip_string(&port
->port
, packet
,
371 port
->n_read
+= count
;
377 list_move(&req
->list_pool
, &port
->read_pool
);
381 tty_flip_buffer_push(&port
->port
);
383 if (!list_empty(queue
) && tty
) {
384 if (!tty_throttled(tty
)) {
386 tasklet_schedule(&port
->push
);
388 pr_warn("ttyDBC0: RX not scheduled?\n");
395 spin_unlock_irq(&port
->port_lock
);
398 static int dbc_port_activate(struct tty_port
*_port
, struct tty_struct
*tty
)
400 struct dbc_port
*port
= container_of(_port
, struct dbc_port
, port
);
402 spin_lock_irq(&port
->port_lock
);
404 spin_unlock_irq(&port
->port_lock
);
409 static const struct tty_port_operations dbc_port_ops
= {
410 .activate
= dbc_port_activate
,
414 xhci_dbc_tty_init_port(struct xhci_hcd
*xhci
, struct dbc_port
*port
)
416 tty_port_init(&port
->port
);
417 spin_lock_init(&port
->port_lock
);
418 tasklet_init(&port
->push
, dbc_rx_push
, (unsigned long)port
);
419 INIT_LIST_HEAD(&port
->read_pool
);
420 INIT_LIST_HEAD(&port
->read_queue
);
421 INIT_LIST_HEAD(&port
->write_pool
);
423 port
->in
= get_in_ep(xhci
);
424 port
->out
= get_out_ep(xhci
);
425 port
->port
.ops
= &dbc_port_ops
;
430 xhci_dbc_tty_exit_port(struct dbc_port
*port
)
432 tasklet_kill(&port
->push
);
433 tty_port_destroy(&port
->port
);
436 int xhci_dbc_tty_register_device(struct xhci_hcd
*xhci
)
439 struct device
*tty_dev
;
440 struct xhci_dbc
*dbc
= xhci
->dbc
;
441 struct dbc_port
*port
= &dbc
->port
;
443 xhci_dbc_tty_init_port(xhci
, port
);
444 tty_dev
= tty_port_register_device(&port
->port
,
445 dbc_tty_driver
, 0, NULL
);
446 ret
= IS_ERR_OR_NULL(tty_dev
);
450 ret
= kfifo_alloc(&port
->write_fifo
, DBC_WRITE_BUF_SIZE
, GFP_KERNEL
);
454 ret
= xhci_dbc_alloc_requests(port
->in
, &port
->read_pool
,
459 ret
= xhci_dbc_alloc_requests(port
->out
, &port
->write_pool
,
464 port
->registered
= true;
469 xhci_dbc_free_requests(port
->in
, &port
->read_pool
);
470 xhci_dbc_free_requests(port
->out
, &port
->write_pool
);
471 kfifo_free(&port
->write_fifo
);
474 tty_unregister_device(dbc_tty_driver
, 0);
477 xhci_dbc_tty_exit_port(port
);
479 xhci_err(xhci
, "can't register tty port, err %d\n", ret
);
484 void xhci_dbc_tty_unregister_device(struct xhci_hcd
*xhci
)
486 struct xhci_dbc
*dbc
= xhci
->dbc
;
487 struct dbc_port
*port
= &dbc
->port
;
489 tty_unregister_device(dbc_tty_driver
, 0);
490 xhci_dbc_tty_exit_port(port
);
491 port
->registered
= false;
493 kfifo_free(&port
->write_fifo
);
494 xhci_dbc_free_requests(get_out_ep(xhci
), &port
->read_pool
);
495 xhci_dbc_free_requests(get_out_ep(xhci
), &port
->read_queue
);
496 xhci_dbc_free_requests(get_in_ep(xhci
), &port
->write_pool
);