1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
4 * Copyright (C) 2009, 2010, 2011 Red Hat, Inc.
5 * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com>
7 #include <linux/cdev.h>
8 #include <linux/debugfs.h>
9 #include <linux/completion.h>
10 #include <linux/device.h>
11 #include <linux/err.h>
12 #include <linux/freezer.h>
14 #include <linux/splice.h>
15 #include <linux/pagemap.h>
16 #include <linux/idr.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
19 #include <linux/poll.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/virtio.h>
24 #include <linux/virtio_console.h>
25 #include <linux/wait.h>
26 #include <linux/workqueue.h>
27 #include <linux/module.h>
28 #include <linux/dma-mapping.h>
29 #include "../tty/hvc/hvc_console.h"
31 #define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
32 #define VIRTCONS_MAX_PORTS 0x8000
35 * This is a global struct for storing common data for all the devices
36 * this driver handles.
38 * Mainly, it has a linked list for all the consoles in one place so
39 * that callbacks from hvc for get_chars(), put_chars() work properly
40 * across multiple devices and multiple ports per device.
42 struct ports_driver_data
{
43 /* Used for exporting per-port information to debugfs */
44 struct dentry
*debugfs_dir
;
46 /* List of all the devices we're handling */
47 struct list_head portdevs
;
49 /* All the console devices handled by this driver */
50 struct list_head consoles
;
53 static struct ports_driver_data pdrvdata
;
55 static const struct class port_class
= {
56 .name
= "virtio-ports",
59 static DEFINE_SPINLOCK(pdrvdata_lock
);
60 static DECLARE_COMPLETION(early_console_added
);
62 /* This struct holds information that's relevant only for console ports */
64 /* We'll place all consoles in a list in the pdrvdata struct */
65 struct list_head list
;
67 /* The hvc device associated with this console port */
68 struct hvc_struct
*hvc
;
70 /* The size of the console */
74 * This number identifies the number that we used to register
75 * with hvc in hvc_instantiate() and hvc_alloc(); this is the
76 * number passed on by the hvc callbacks to us to
77 * differentiate between the other console ports handled by
83 static DEFINE_IDA(vtermno_ida
);
88 /* size of the buffer in *buf above */
91 /* used length of the buffer */
93 /* offset in the buf from which to consume data */
96 /* DMA address of buffer */
99 /* Device we got DMA memory from */
102 /* List of pending dma buffers to free */
103 struct list_head list
;
105 /* If sgpages == 0 then buf is used */
106 unsigned int sgpages
;
108 /* sg is used if spages > 0. sg must be the last in is struct */
109 struct scatterlist sg
[] __counted_by(sgpages
);
113 * This is a per-device struct that stores data common to all the
114 * ports for that device (vdev->priv).
116 struct ports_device
{
117 /* Next portdev in the list, head is in the pdrvdata struct */
118 struct list_head list
;
121 * Workqueue handlers where we process deferred work after
124 struct work_struct control_work
;
125 struct work_struct config_work
;
127 struct list_head ports
;
129 /* To protect the list of ports */
130 spinlock_t ports_lock
;
132 /* To protect the vq operations for the control channel */
133 spinlock_t c_ivq_lock
;
134 spinlock_t c_ovq_lock
;
136 /* max. number of ports this device can hold */
139 /* The virtio device we're associated with */
140 struct virtio_device
*vdev
;
143 * A couple of virtqueues for the control channel: one for
144 * guest->host transfers, one for host->guest transfers
146 struct virtqueue
*c_ivq
, *c_ovq
;
149 * A control packet buffer for guest->host requests, protected
152 struct virtio_console_control cpkt
;
154 /* Array of per-port IO virtqueues */
155 struct virtqueue
**in_vqs
, **out_vqs
;
157 /* Major number for this device. Ports will be created as minors. */
162 unsigned long bytes_sent
, bytes_received
, bytes_discarded
;
165 /* This struct holds the per-port data */
167 /* Next port in the list, head is in the ports_device */
168 struct list_head list
;
170 /* Pointer to the parent virtio_console device */
171 struct ports_device
*portdev
;
173 /* The current buffer from which data has to be fed to readers */
174 struct port_buffer
*inbuf
;
177 * To protect the operations on the in_vq associated with this
178 * port. Has to be a spinlock because it can be called from
179 * interrupt context (get_char()).
181 spinlock_t inbuf_lock
;
183 /* Protect the operations on the out_vq. */
184 spinlock_t outvq_lock
;
186 /* The IO vqs for this port */
187 struct virtqueue
*in_vq
, *out_vq
;
189 /* File in the debugfs directory that exposes this port's information */
190 struct dentry
*debugfs_file
;
193 * Keep count of the bytes sent, received and discarded for
194 * this port for accounting and debugging purposes. These
195 * counts are not reset across port open / close events.
197 struct port_stats stats
;
200 * The entries in this struct will be valid if this port is
201 * hooked up to an hvc console
205 /* Each port associates with a separate char device */
209 /* Reference-counting to handle port hot-unplugs and file operations */
212 /* A waitqueue for poll() or blocking read operations */
213 wait_queue_head_t waitqueue
;
215 /* The 'name' of the port that we expose via sysfs properties */
218 /* We can notify apps of host connect / disconnect events via SIGIO */
219 struct fasync_struct
*async_queue
;
221 /* The 'id' to identify the port with the Host */
226 /* Is the host device open */
229 /* We should allow only one process to open a port */
230 bool guest_connected
;
233 static struct port
*find_port_by_vtermno(u32 vtermno
)
236 struct console
*cons
;
239 spin_lock_irqsave(&pdrvdata_lock
, flags
);
240 list_for_each_entry(cons
, &pdrvdata
.consoles
, list
) {
241 if (cons
->vtermno
== vtermno
) {
242 port
= container_of(cons
, struct port
, cons
);
248 spin_unlock_irqrestore(&pdrvdata_lock
, flags
);
252 static struct port
*find_port_by_devt_in_portdev(struct ports_device
*portdev
,
258 spin_lock_irqsave(&portdev
->ports_lock
, flags
);
259 list_for_each_entry(port
, &portdev
->ports
, list
) {
260 if (port
->cdev
->dev
== dev
) {
261 kref_get(&port
->kref
);
267 spin_unlock_irqrestore(&portdev
->ports_lock
, flags
);
272 static struct port
*find_port_by_devt(dev_t dev
)
274 struct ports_device
*portdev
;
278 spin_lock_irqsave(&pdrvdata_lock
, flags
);
279 list_for_each_entry(portdev
, &pdrvdata
.portdevs
, list
) {
280 port
= find_port_by_devt_in_portdev(portdev
, dev
);
286 spin_unlock_irqrestore(&pdrvdata_lock
, flags
);
290 static struct port
*find_port_by_id(struct ports_device
*portdev
, u32 id
)
295 spin_lock_irqsave(&portdev
->ports_lock
, flags
);
296 list_for_each_entry(port
, &portdev
->ports
, list
)
301 spin_unlock_irqrestore(&portdev
->ports_lock
, flags
);
306 static struct port
*find_port_by_vq(struct ports_device
*portdev
,
307 struct virtqueue
*vq
)
312 spin_lock_irqsave(&portdev
->ports_lock
, flags
);
313 list_for_each_entry(port
, &portdev
->ports
, list
)
314 if (port
->in_vq
== vq
|| port
->out_vq
== vq
)
318 spin_unlock_irqrestore(&portdev
->ports_lock
, flags
);
322 static bool is_console_port(struct port
*port
)
329 static bool is_rproc_serial(const struct virtio_device
*vdev
)
331 return is_rproc_enabled
&& vdev
->id
.device
== VIRTIO_ID_RPROC_SERIAL
;
334 static inline bool use_multiport(struct ports_device
*portdev
)
337 * This condition can be true when put_chars is called from
342 return __virtio_test_bit(portdev
->vdev
, VIRTIO_CONSOLE_F_MULTIPORT
);
345 static DEFINE_SPINLOCK(dma_bufs_lock
);
346 static LIST_HEAD(pending_free_dma_bufs
);
348 static void free_buf(struct port_buffer
*buf
, bool can_sleep
)
352 for (i
= 0; i
< buf
->sgpages
; i
++) {
353 struct page
*page
= sg_page(&buf
->sg
[i
]);
361 } else if (is_rproc_enabled
) {
364 /* dma_free_coherent requires interrupts to be enabled. */
366 /* queue up dma-buffers to be freed later */
367 spin_lock_irqsave(&dma_bufs_lock
, flags
);
368 list_add_tail(&buf
->list
, &pending_free_dma_bufs
);
369 spin_unlock_irqrestore(&dma_bufs_lock
, flags
);
372 dma_free_coherent(buf
->dev
, buf
->size
, buf
->buf
, buf
->dma
);
374 /* Release device refcnt and allow it to be freed */
375 put_device(buf
->dev
);
381 static void reclaim_dma_bufs(void)
384 struct port_buffer
*buf
, *tmp
;
387 if (list_empty(&pending_free_dma_bufs
))
390 /* Create a copy of the pending_free_dma_bufs while holding the lock */
391 spin_lock_irqsave(&dma_bufs_lock
, flags
);
392 list_cut_position(&tmp_list
, &pending_free_dma_bufs
,
393 pending_free_dma_bufs
.prev
);
394 spin_unlock_irqrestore(&dma_bufs_lock
, flags
);
396 /* Release the dma buffers, without irqs enabled */
397 list_for_each_entry_safe(buf
, tmp
, &tmp_list
, list
) {
398 list_del(&buf
->list
);
403 static struct port_buffer
*alloc_buf(struct virtio_device
*vdev
, size_t buf_size
,
406 struct port_buffer
*buf
;
411 * Allocate buffer and the sg list. The sg list array is allocated
412 * directly after the port_buffer struct.
414 buf
= kmalloc(struct_size(buf
, sg
, pages
), GFP_KERNEL
);
418 buf
->sgpages
= pages
;
425 if (is_rproc_serial(vdev
)) {
427 * Allocate DMA memory from ancestor. When a virtio
428 * device is created by remoteproc, the DMA memory is
429 * associated with the parent device:
430 * virtioY => remoteprocX#vdevYbuffer.
432 buf
->dev
= vdev
->dev
.parent
;
436 /* Increase device refcnt to avoid freeing it */
437 get_device(buf
->dev
);
438 buf
->buf
= dma_alloc_coherent(buf
->dev
, buf_size
, &buf
->dma
,
442 buf
->buf
= kmalloc(buf_size
, GFP_KERNEL
);
449 buf
->size
= buf_size
;
458 /* Callers should take appropriate locks */
459 static struct port_buffer
*get_inbuf(struct port
*port
)
461 struct port_buffer
*buf
;
467 buf
= virtqueue_get_buf(port
->in_vq
, &len
);
469 buf
->len
= min_t(size_t, len
, buf
->size
);
471 port
->stats
.bytes_received
+= len
;
477 * Create a scatter-gather list representing our input buffer and put
480 * Callers should take appropriate locks.
482 static int add_inbuf(struct virtqueue
*vq
, struct port_buffer
*buf
)
484 struct scatterlist sg
[1];
487 sg_init_one(sg
, buf
->buf
, buf
->size
);
489 ret
= virtqueue_add_inbuf(vq
, sg
, 1, buf
, GFP_ATOMIC
);
496 /* Discard any unread data this port has. Callers lockers. */
497 static void discard_port_data(struct port
*port
)
499 struct port_buffer
*buf
;
502 if (!port
->portdev
) {
503 /* Device has been unplugged. vqs are already gone. */
506 buf
= get_inbuf(port
);
510 port
->stats
.bytes_discarded
+= buf
->len
- buf
->offset
;
511 if (add_inbuf(port
->in_vq
, buf
) < 0) {
513 free_buf(buf
, false);
516 buf
= get_inbuf(port
);
519 dev_warn(port
->dev
, "Errors adding %d buffers back to vq\n",
523 static bool port_has_data(struct port
*port
)
529 spin_lock_irqsave(&port
->inbuf_lock
, flags
);
530 port
->inbuf
= get_inbuf(port
);
534 spin_unlock_irqrestore(&port
->inbuf_lock
, flags
);
538 static ssize_t
__send_control_msg(struct ports_device
*portdev
, u32 port_id
,
539 unsigned int event
, unsigned int value
)
541 struct scatterlist sg
[1];
542 struct virtqueue
*vq
;
545 if (!use_multiport(portdev
))
550 spin_lock(&portdev
->c_ovq_lock
);
552 portdev
->cpkt
.id
= cpu_to_virtio32(portdev
->vdev
, port_id
);
553 portdev
->cpkt
.event
= cpu_to_virtio16(portdev
->vdev
, event
);
554 portdev
->cpkt
.value
= cpu_to_virtio16(portdev
->vdev
, value
);
556 sg_init_one(sg
, &portdev
->cpkt
, sizeof(struct virtio_console_control
));
558 if (virtqueue_add_outbuf(vq
, sg
, 1, &portdev
->cpkt
, GFP_ATOMIC
) == 0) {
560 while (!virtqueue_get_buf(vq
, &len
)
561 && !virtqueue_is_broken(vq
))
565 spin_unlock(&portdev
->c_ovq_lock
);
569 static ssize_t
send_control_msg(struct port
*port
, unsigned int event
,
572 /* Did the port get unplugged before userspace closed it? */
574 return __send_control_msg(port
->portdev
, port
->id
, event
, value
);
579 /* Callers must take the port->outvq_lock */
580 static void reclaim_consumed_buffers(struct port
*port
)
582 struct port_buffer
*buf
;
585 if (!port
->portdev
) {
586 /* Device has been unplugged. vqs are already gone. */
589 while ((buf
= virtqueue_get_buf(port
->out_vq
, &len
))) {
590 free_buf(buf
, false);
591 port
->outvq_full
= false;
595 static ssize_t
__send_to_port(struct port
*port
, struct scatterlist
*sg
,
596 int nents
, size_t in_count
,
597 void *data
, bool nonblock
)
599 struct virtqueue
*out_vq
;
604 out_vq
= port
->out_vq
;
606 spin_lock_irqsave(&port
->outvq_lock
, flags
);
608 reclaim_consumed_buffers(port
);
610 err
= virtqueue_add_outbuf(out_vq
, sg
, nents
, data
, GFP_ATOMIC
);
612 /* Tell Host to go! */
613 virtqueue_kick(out_vq
);
620 if (out_vq
->num_free
== 0)
621 port
->outvq_full
= true;
627 * Wait till the host acknowledges it pushed out the data we
628 * sent. This is done for data from the hvc_console; the tty
629 * operations are performed with spinlocks held so we can't
630 * sleep here. An alternative would be to copy the data to a
631 * buffer and relax the spinning requirement. The downside is
632 * we need to kmalloc a GFP_ATOMIC buffer each time the
633 * console driver writes something out.
635 while (!virtqueue_get_buf(out_vq
, &len
)
636 && !virtqueue_is_broken(out_vq
))
639 spin_unlock_irqrestore(&port
->outvq_lock
, flags
);
641 port
->stats
.bytes_sent
+= in_count
;
643 * We're expected to return the amount of data we wrote -- all
650 * Give out the data that's requested from the buffer that we have
653 static ssize_t
fill_readbuf(struct port
*port
, u8 __user
*out_buf
,
654 size_t out_count
, bool to_user
)
656 struct port_buffer
*buf
;
659 if (!out_count
|| !port_has_data(port
))
663 out_count
= min(out_count
, buf
->len
- buf
->offset
);
668 ret
= copy_to_user(out_buf
, buf
->buf
+ buf
->offset
, out_count
);
672 memcpy((__force u8
*)out_buf
, buf
->buf
+ buf
->offset
,
676 buf
->offset
+= out_count
;
678 if (buf
->offset
== buf
->len
) {
680 * We're done using all the data in this buffer.
681 * Re-queue so that the Host can send us more data.
683 spin_lock_irqsave(&port
->inbuf_lock
, flags
);
686 if (add_inbuf(port
->in_vq
, buf
) < 0)
687 dev_warn(port
->dev
, "failed add_buf\n");
689 spin_unlock_irqrestore(&port
->inbuf_lock
, flags
);
691 /* Return the number of bytes actually copied */
695 /* The condition that must be true for polling to end */
696 static bool will_read_block(struct port
*port
)
698 if (!port
->guest_connected
) {
699 /* Port got hot-unplugged. Let's exit. */
702 return !port_has_data(port
) && port
->host_connected
;
705 static bool will_write_block(struct port
*port
)
709 if (!port
->guest_connected
) {
710 /* Port got hot-unplugged. Let's exit. */
713 if (!port
->host_connected
)
716 spin_lock_irq(&port
->outvq_lock
);
718 * Check if the Host has consumed any buffers since we last
719 * sent data (this is only applicable for nonblocking ports).
721 reclaim_consumed_buffers(port
);
722 ret
= port
->outvq_full
;
723 spin_unlock_irq(&port
->outvq_lock
);
728 static ssize_t
port_fops_read(struct file
*filp
, char __user
*ubuf
,
729 size_t count
, loff_t
*offp
)
734 port
= filp
->private_data
;
736 /* Port is hot-unplugged. */
737 if (!port
->guest_connected
)
740 if (!port_has_data(port
)) {
742 * If nothing's connected on the host just return 0 in
743 * case of list_empty; this tells the userspace app
744 * that there's no connection
746 if (!port
->host_connected
)
748 if (filp
->f_flags
& O_NONBLOCK
)
751 ret
= wait_event_freezable(port
->waitqueue
,
752 !will_read_block(port
));
756 /* Port got hot-unplugged while we were waiting above. */
757 if (!port
->guest_connected
)
760 * We could've received a disconnection message while we were
761 * waiting for more data.
763 * This check is not clubbed in the if() statement above as we
764 * might receive some data as well as the host could get
765 * disconnected after we got woken up from our wait. So we
766 * really want to give off whatever data we have and only then
767 * check for host_connected.
769 if (!port_has_data(port
) && !port
->host_connected
)
772 return fill_readbuf(port
, ubuf
, count
, true);
775 static int wait_port_writable(struct port
*port
, bool nonblock
)
779 if (will_write_block(port
)) {
783 ret
= wait_event_freezable(port
->waitqueue
,
784 !will_write_block(port
));
788 /* Port got hot-unplugged. */
789 if (!port
->guest_connected
)
795 static ssize_t
port_fops_write(struct file
*filp
, const char __user
*ubuf
,
796 size_t count
, loff_t
*offp
)
799 struct port_buffer
*buf
;
802 struct scatterlist sg
[1];
804 /* Userspace could be out to fool us */
808 port
= filp
->private_data
;
810 nonblock
= filp
->f_flags
& O_NONBLOCK
;
812 ret
= wait_port_writable(port
, nonblock
);
816 count
= min((size_t)(32 * 1024), count
);
818 buf
= alloc_buf(port
->portdev
->vdev
, count
, 0);
822 ret
= copy_from_user(buf
->buf
, ubuf
, count
);
829 * We now ask send_buf() to not spin for generic ports -- we
830 * can re-use the same code path that non-blocking file
831 * descriptors take for blocking file descriptors since the
832 * wait is already done and we're certain the write will go
833 * through to the host.
836 sg_init_one(sg
, buf
->buf
, count
);
837 ret
= __send_to_port(port
, sg
, 1, count
, buf
, nonblock
);
839 if (nonblock
&& ret
> 0)
852 struct scatterlist
*sg
;
855 static int pipe_to_sg(struct pipe_inode_info
*pipe
, struct pipe_buffer
*buf
,
856 struct splice_desc
*sd
)
858 struct sg_list
*sgl
= sd
->u
.data
;
859 unsigned int offset
, len
;
861 if (sgl
->n
== sgl
->size
)
864 /* Try lock this page */
865 if (pipe_buf_try_steal(pipe
, buf
)) {
866 /* Get reference and unlock page for moving */
868 unlock_page(buf
->page
);
870 len
= min(buf
->len
, sd
->len
);
871 sg_set_page(&(sgl
->sg
[sgl
->n
]), buf
->page
, len
, buf
->offset
);
873 /* Failback to copying a page */
874 struct page
*page
= alloc_page(GFP_KERNEL
);
880 offset
= sd
->pos
& ~PAGE_MASK
;
883 if (len
+ offset
> PAGE_SIZE
)
884 len
= PAGE_SIZE
- offset
;
886 src
= kmap_atomic(buf
->page
);
887 memcpy(page_address(page
) + offset
, src
+ buf
->offset
, len
);
890 sg_set_page(&(sgl
->sg
[sgl
->n
]), page
, len
, offset
);
898 /* Faster zero-copy write by splicing */
899 static ssize_t
port_fops_splice_write(struct pipe_inode_info
*pipe
,
900 struct file
*filp
, loff_t
*ppos
,
901 size_t len
, unsigned int flags
)
903 struct port
*port
= filp
->private_data
;
906 struct port_buffer
*buf
;
907 struct splice_desc sd
= {
913 unsigned int occupancy
;
916 * Rproc_serial does not yet support splice. To support splice
917 * pipe_to_sg() must allocate dma-buffers and copy content from
918 * regular pages to dma pages. And alloc_buf and free_buf must
919 * support allocating and freeing such a list of dma-buffers.
921 if (is_rproc_serial(port
->out_vq
->vdev
))
926 if (pipe_empty(pipe
->head
, pipe
->tail
))
929 ret
= wait_port_writable(port
, filp
->f_flags
& O_NONBLOCK
);
933 occupancy
= pipe_occupancy(pipe
->head
, pipe
->tail
);
934 buf
= alloc_buf(port
->portdev
->vdev
, 0, occupancy
);
943 sgl
.size
= occupancy
;
945 sg_init_table(sgl
.sg
, sgl
.size
);
946 ret
= __splice_from_pipe(pipe
, &sd
, pipe_to_sg
);
949 ret
= __send_to_port(port
, buf
->sg
, sgl
.n
, sgl
.len
, buf
, true);
951 if (unlikely(ret
<= 0))
960 static __poll_t
port_fops_poll(struct file
*filp
, poll_table
*wait
)
965 port
= filp
->private_data
;
966 poll_wait(filp
, &port
->waitqueue
, wait
);
968 if (!port
->guest_connected
) {
969 /* Port got unplugged */
973 if (!will_read_block(port
))
974 ret
|= EPOLLIN
| EPOLLRDNORM
;
975 if (!will_write_block(port
))
977 if (!port
->host_connected
)
983 static void remove_port(struct kref
*kref
);
985 static int port_fops_release(struct inode
*inode
, struct file
*filp
)
989 port
= filp
->private_data
;
991 /* Notify host of port being closed */
992 send_control_msg(port
, VIRTIO_CONSOLE_PORT_OPEN
, 0);
994 spin_lock_irq(&port
->inbuf_lock
);
995 port
->guest_connected
= false;
997 discard_port_data(port
);
999 spin_unlock_irq(&port
->inbuf_lock
);
1001 spin_lock_irq(&port
->outvq_lock
);
1002 reclaim_consumed_buffers(port
);
1003 spin_unlock_irq(&port
->outvq_lock
);
1007 * Locks aren't necessary here as a port can't be opened after
1008 * unplug, and if a port isn't unplugged, a kref would already
1009 * exist for the port. Plus, taking ports_lock here would
1010 * create a dependency on other locks taken by functions
1011 * inside remove_port if we're the last holder of the port,
1012 * creating many problems.
1014 kref_put(&port
->kref
, remove_port
);
1019 static int port_fops_open(struct inode
*inode
, struct file
*filp
)
1021 struct cdev
*cdev
= inode
->i_cdev
;
1025 /* We get the port with a kref here */
1026 port
= find_port_by_devt(cdev
->dev
);
1028 /* Port was unplugged before we could proceed */
1031 filp
->private_data
= port
;
1034 * Don't allow opening of console port devices -- that's done
1037 if (is_console_port(port
)) {
1042 /* Allow only one process to open a particular port at a time */
1043 spin_lock_irq(&port
->inbuf_lock
);
1044 if (port
->guest_connected
) {
1045 spin_unlock_irq(&port
->inbuf_lock
);
1050 port
->guest_connected
= true;
1051 spin_unlock_irq(&port
->inbuf_lock
);
1053 spin_lock_irq(&port
->outvq_lock
);
1055 * There might be a chance that we missed reclaiming a few
1056 * buffers in the window of the port getting previously closed
1059 reclaim_consumed_buffers(port
);
1060 spin_unlock_irq(&port
->outvq_lock
);
1062 nonseekable_open(inode
, filp
);
1064 /* Notify host of port being opened */
1065 send_control_msg(filp
->private_data
, VIRTIO_CONSOLE_PORT_OPEN
, 1);
1069 kref_put(&port
->kref
, remove_port
);
1073 static int port_fops_fasync(int fd
, struct file
*filp
, int mode
)
1077 port
= filp
->private_data
;
1078 return fasync_helper(fd
, filp
, mode
, &port
->async_queue
);
1082 * The file operations that we support: programs in the guest can open
1083 * a console device, read from it, write to it, poll for data and
1084 * close it. The devices are at
1085 * /dev/vport<device number>p<port number>
1087 static const struct file_operations port_fops
= {
1088 .owner
= THIS_MODULE
,
1089 .open
= port_fops_open
,
1090 .read
= port_fops_read
,
1091 .write
= port_fops_write
,
1092 .splice_write
= port_fops_splice_write
,
1093 .poll
= port_fops_poll
,
1094 .release
= port_fops_release
,
1095 .fasync
= port_fops_fasync
,
1099 * The put_chars() callback is pretty straightforward.
1101 * We turn the characters into a scatter-gather list, add it to the
1102 * output queue and then kick the Host. Then we sit here waiting for
1103 * it to finish: inefficient in theory, but in practice
1104 * implementations will do it immediately.
1106 static ssize_t
put_chars(u32 vtermno
, const u8
*buf
, size_t count
)
1109 struct scatterlist sg
[1];
1113 port
= find_port_by_vtermno(vtermno
);
1117 data
= kmemdup(buf
, count
, GFP_ATOMIC
);
1121 sg_init_one(sg
, data
, count
);
1122 ret
= __send_to_port(port
, sg
, 1, count
, data
, false);
1128 * get_chars() is the callback from the hvc_console infrastructure
1129 * when an interrupt is received.
1131 * We call out to fill_readbuf that gets us the required data from the
1132 * buffers that are queued up.
1134 static ssize_t
get_chars(u32 vtermno
, u8
*buf
, size_t count
)
1138 port
= find_port_by_vtermno(vtermno
);
1142 /* If we don't have an input queue yet, we can't get input. */
1143 BUG_ON(!port
->in_vq
);
1145 return fill_readbuf(port
, (__force u8 __user
*)buf
, count
, false);
1148 static void resize_console(struct port
*port
)
1150 struct virtio_device
*vdev
;
1152 /* The port could have been hot-unplugged */
1153 if (!port
|| !is_console_port(port
))
1156 vdev
= port
->portdev
->vdev
;
1158 /* Don't test F_SIZE at all if we're rproc: not a valid feature! */
1159 if (!is_rproc_serial(vdev
) &&
1160 virtio_has_feature(vdev
, VIRTIO_CONSOLE_F_SIZE
))
1161 hvc_resize(port
->cons
.hvc
, port
->cons
.ws
);
1164 /* We set the configuration at this point, since we now have a tty */
1165 static int notifier_add_vio(struct hvc_struct
*hp
, int data
)
1169 port
= find_port_by_vtermno(hp
->vtermno
);
1173 hp
->irq_requested
= 1;
1174 resize_console(port
);
1179 static void notifier_del_vio(struct hvc_struct
*hp
, int data
)
1181 hp
->irq_requested
= 0;
1184 /* The operations for console ports. */
1185 static const struct hv_ops hv_ops
= {
1186 .get_chars
= get_chars
,
1187 .put_chars
= put_chars
,
1188 .notifier_add
= notifier_add_vio
,
1189 .notifier_del
= notifier_del_vio
,
1190 .notifier_hangup
= notifier_del_vio
,
1193 static int init_port_console(struct port
*port
)
1198 * The Host's telling us this port is a console port. Hook it
1199 * up with an hvc console.
1201 * To set up and manage our virtual console, we call
1204 * The first argument of hvc_alloc() is the virtual console
1205 * number. The second argument is the parameter for the
1206 * notification mechanism (like irq number). We currently
1207 * leave this as zero, virtqueues have implicit notifications.
1209 * The third argument is a "struct hv_ops" containing the
1210 * put_chars() get_chars(), notifier_add() and notifier_del()
1211 * pointers. The final argument is the output buffer size: we
1212 * can do any size, so we put PAGE_SIZE here.
1214 ret
= ida_alloc_min(&vtermno_ida
, 1, GFP_KERNEL
);
1218 port
->cons
.vtermno
= ret
;
1219 port
->cons
.hvc
= hvc_alloc(port
->cons
.vtermno
, 0, &hv_ops
, PAGE_SIZE
);
1220 if (IS_ERR(port
->cons
.hvc
)) {
1221 ret
= PTR_ERR(port
->cons
.hvc
);
1223 "error %d allocating hvc for port\n", ret
);
1224 port
->cons
.hvc
= NULL
;
1225 ida_free(&vtermno_ida
, port
->cons
.vtermno
);
1228 spin_lock_irq(&pdrvdata_lock
);
1229 list_add_tail(&port
->cons
.list
, &pdrvdata
.consoles
);
1230 spin_unlock_irq(&pdrvdata_lock
);
1231 port
->guest_connected
= true;
1233 /* Notify host of port being opened */
1234 send_control_msg(port
, VIRTIO_CONSOLE_PORT_OPEN
, 1);
1239 static ssize_t
show_port_name(struct device
*dev
,
1240 struct device_attribute
*attr
, char *buffer
)
1244 port
= dev_get_drvdata(dev
);
1246 return sprintf(buffer
, "%s\n", port
->name
);
1249 static DEVICE_ATTR(name
, S_IRUGO
, show_port_name
, NULL
);
1251 static struct attribute
*port_sysfs_entries
[] = {
1252 &dev_attr_name
.attr
,
1256 static const struct attribute_group port_attribute_group
= {
1257 .name
= NULL
, /* put in device directory */
1258 .attrs
= port_sysfs_entries
,
1261 static int port_debugfs_show(struct seq_file
*s
, void *data
)
1263 struct port
*port
= s
->private;
1265 seq_printf(s
, "name: %s\n", port
->name
? port
->name
: "");
1266 seq_printf(s
, "guest_connected: %d\n", port
->guest_connected
);
1267 seq_printf(s
, "host_connected: %d\n", port
->host_connected
);
1268 seq_printf(s
, "outvq_full: %d\n", port
->outvq_full
);
1269 seq_printf(s
, "bytes_sent: %lu\n", port
->stats
.bytes_sent
);
1270 seq_printf(s
, "bytes_received: %lu\n", port
->stats
.bytes_received
);
1271 seq_printf(s
, "bytes_discarded: %lu\n", port
->stats
.bytes_discarded
);
1272 seq_printf(s
, "is_console: %s\n",
1273 is_console_port(port
) ? "yes" : "no");
1274 seq_printf(s
, "console_vtermno: %u\n", port
->cons
.vtermno
);
1279 DEFINE_SHOW_ATTRIBUTE(port_debugfs
);
1281 static void set_console_size(struct port
*port
, u16 rows
, u16 cols
)
1283 if (!port
|| !is_console_port(port
))
1286 port
->cons
.ws
.ws_row
= rows
;
1287 port
->cons
.ws
.ws_col
= cols
;
1290 static int fill_queue(struct virtqueue
*vq
, spinlock_t
*lock
)
1292 struct port_buffer
*buf
;
1298 buf
= alloc_buf(vq
->vdev
, PAGE_SIZE
, 0);
1302 spin_lock_irq(lock
);
1303 ret
= add_inbuf(vq
, buf
);
1305 spin_unlock_irq(lock
);
1306 free_buf(buf
, true);
1310 spin_unlock_irq(lock
);
1313 return nr_added_bufs
;
1316 static void send_sigio_to_port(struct port
*port
)
1318 if (port
->async_queue
&& port
->guest_connected
)
1319 kill_fasync(&port
->async_queue
, SIGIO
, POLL_OUT
);
1322 static int add_port(struct ports_device
*portdev
, u32 id
)
1324 char debugfs_name
[16];
1329 port
= kmalloc(sizeof(*port
), GFP_KERNEL
);
1334 kref_init(&port
->kref
);
1336 port
->portdev
= portdev
;
1341 port
->cons
.hvc
= NULL
;
1342 port
->async_queue
= NULL
;
1344 port
->cons
.ws
.ws_row
= port
->cons
.ws
.ws_col
= 0;
1345 port
->cons
.vtermno
= 0;
1347 port
->host_connected
= port
->guest_connected
= false;
1348 port
->stats
= (struct port_stats
) { 0 };
1350 port
->outvq_full
= false;
1352 port
->in_vq
= portdev
->in_vqs
[port
->id
];
1353 port
->out_vq
= portdev
->out_vqs
[port
->id
];
1355 port
->cdev
= cdev_alloc();
1357 dev_err(&port
->portdev
->vdev
->dev
, "Error allocating cdev\n");
1361 port
->cdev
->ops
= &port_fops
;
1363 devt
= MKDEV(portdev
->chr_major
, id
);
1364 err
= cdev_add(port
->cdev
, devt
, 1);
1366 dev_err(&port
->portdev
->vdev
->dev
,
1367 "Error %d adding cdev for port %u\n", err
, id
);
1370 port
->dev
= device_create(&port_class
, &port
->portdev
->vdev
->dev
,
1371 devt
, port
, "vport%up%u",
1372 port
->portdev
->vdev
->index
, id
);
1373 if (IS_ERR(port
->dev
)) {
1374 err
= PTR_ERR(port
->dev
);
1375 dev_err(&port
->portdev
->vdev
->dev
,
1376 "Error %d creating device for port %u\n",
1381 spin_lock_init(&port
->inbuf_lock
);
1382 spin_lock_init(&port
->outvq_lock
);
1383 init_waitqueue_head(&port
->waitqueue
);
1385 /* We can safely ignore ENOSPC because it means
1386 * the queue already has buffers. Buffers are removed
1387 * only by virtcons_remove(), not by unplug_port()
1389 err
= fill_queue(port
->in_vq
, &port
->inbuf_lock
);
1390 if (err
< 0 && err
!= -ENOSPC
) {
1391 dev_err(port
->dev
, "Error allocating inbufs\n");
1395 if (is_rproc_serial(port
->portdev
->vdev
))
1397 * For rproc_serial assume remote processor is connected.
1398 * rproc_serial does not want the console port, only
1399 * the generic port implementation.
1401 port
->host_connected
= true;
1402 else if (!use_multiport(port
->portdev
)) {
1404 * If we're not using multiport support,
1405 * this has to be a console port.
1407 err
= init_port_console(port
);
1412 spin_lock_irq(&portdev
->ports_lock
);
1413 list_add_tail(&port
->list
, &port
->portdev
->ports
);
1414 spin_unlock_irq(&portdev
->ports_lock
);
1417 * Tell the Host we're set so that it can send us various
1418 * configuration parameters for this port (eg, port name,
1419 * caching, whether this is a console port, etc.)
1421 send_control_msg(port
, VIRTIO_CONSOLE_PORT_READY
, 1);
1424 * Finally, create the debugfs file that we can use to
1425 * inspect a port's state at any time
1427 snprintf(debugfs_name
, sizeof(debugfs_name
), "vport%up%u",
1428 port
->portdev
->vdev
->index
, id
);
1429 port
->debugfs_file
= debugfs_create_file(debugfs_name
, 0444,
1430 pdrvdata
.debugfs_dir
,
1431 port
, &port_debugfs_fops
);
1436 device_destroy(&port_class
, port
->dev
->devt
);
1438 cdev_del(port
->cdev
);
1442 /* The host might want to notify management sw about port add failure */
1443 __send_control_msg(portdev
, id
, VIRTIO_CONSOLE_PORT_READY
, 0);
1447 /* No users remain, remove all port-specific data. */
1448 static void remove_port(struct kref
*kref
)
1452 port
= container_of(kref
, struct port
, kref
);
1457 static void remove_port_data(struct port
*port
)
1459 spin_lock_irq(&port
->inbuf_lock
);
1460 /* Remove unused data this port might have received. */
1461 discard_port_data(port
);
1462 spin_unlock_irq(&port
->inbuf_lock
);
1464 spin_lock_irq(&port
->outvq_lock
);
1465 reclaim_consumed_buffers(port
);
1466 spin_unlock_irq(&port
->outvq_lock
);
1470 * Port got unplugged. Remove port from portdev's list and drop the
1471 * kref reference. If no userspace has this port opened, it will
1472 * result in immediate removal the port.
1474 static void unplug_port(struct port
*port
)
1476 spin_lock_irq(&port
->portdev
->ports_lock
);
1477 list_del(&port
->list
);
1478 spin_unlock_irq(&port
->portdev
->ports_lock
);
1480 spin_lock_irq(&port
->inbuf_lock
);
1481 if (port
->guest_connected
) {
1482 /* Let the app know the port is going down. */
1483 send_sigio_to_port(port
);
1485 /* Do this after sigio is actually sent */
1486 port
->guest_connected
= false;
1487 port
->host_connected
= false;
1489 wake_up_interruptible(&port
->waitqueue
);
1491 spin_unlock_irq(&port
->inbuf_lock
);
1493 if (is_console_port(port
)) {
1494 spin_lock_irq(&pdrvdata_lock
);
1495 list_del(&port
->cons
.list
);
1496 spin_unlock_irq(&pdrvdata_lock
);
1497 hvc_remove(port
->cons
.hvc
);
1498 ida_free(&vtermno_ida
, port
->cons
.vtermno
);
1501 remove_port_data(port
);
1504 * We should just assume the device itself has gone off --
1505 * else a close on an open port later will try to send out a
1508 port
->portdev
= NULL
;
1510 sysfs_remove_group(&port
->dev
->kobj
, &port_attribute_group
);
1511 device_destroy(&port_class
, port
->dev
->devt
);
1512 cdev_del(port
->cdev
);
1514 debugfs_remove(port
->debugfs_file
);
1518 * Locks around here are not necessary - a port can't be
1519 * opened after we removed the port struct from ports_list
1522 kref_put(&port
->kref
, remove_port
);
1525 /* Any private messages that the Host and Guest want to share */
1526 static void handle_control_message(struct virtio_device
*vdev
,
1527 struct ports_device
*portdev
,
1528 struct port_buffer
*buf
)
1530 struct virtio_console_control
*cpkt
;
1535 cpkt
= (struct virtio_console_control
*)(buf
->buf
+ buf
->offset
);
1537 port
= find_port_by_id(portdev
, virtio32_to_cpu(vdev
, cpkt
->id
));
1539 cpkt
->event
!= cpu_to_virtio16(vdev
, VIRTIO_CONSOLE_PORT_ADD
)) {
1540 /* No valid header at start of buffer. Drop it. */
1541 dev_dbg(&portdev
->vdev
->dev
,
1542 "Invalid index %u in control packet\n", cpkt
->id
);
1546 switch (virtio16_to_cpu(vdev
, cpkt
->event
)) {
1547 case VIRTIO_CONSOLE_PORT_ADD
:
1549 dev_dbg(&portdev
->vdev
->dev
,
1550 "Port %u already added\n", port
->id
);
1551 send_control_msg(port
, VIRTIO_CONSOLE_PORT_READY
, 1);
1554 if (virtio32_to_cpu(vdev
, cpkt
->id
) >=
1555 portdev
->max_nr_ports
) {
1556 dev_warn(&portdev
->vdev
->dev
,
1557 "Request for adding port with "
1558 "out-of-bound id %u, max. supported id: %u\n",
1559 cpkt
->id
, portdev
->max_nr_ports
- 1);
1562 add_port(portdev
, virtio32_to_cpu(vdev
, cpkt
->id
));
1564 case VIRTIO_CONSOLE_PORT_REMOVE
:
1567 case VIRTIO_CONSOLE_CONSOLE_PORT
:
1570 if (is_console_port(port
))
1573 init_port_console(port
);
1574 complete(&early_console_added
);
1576 * Could remove the port here in case init fails - but
1577 * have to notify the host first.
1580 case VIRTIO_CONSOLE_RESIZE
: {
1586 if (!is_console_port(port
))
1589 memcpy(&size
, buf
->buf
+ buf
->offset
+ sizeof(*cpkt
),
1591 set_console_size(port
, size
.rows
, size
.cols
);
1593 port
->cons
.hvc
->irq_requested
= 1;
1594 resize_console(port
);
1597 case VIRTIO_CONSOLE_PORT_OPEN
:
1598 port
->host_connected
= virtio16_to_cpu(vdev
, cpkt
->value
);
1599 wake_up_interruptible(&port
->waitqueue
);
1601 * If the host port got closed and the host had any
1602 * unconsumed buffers, we'll be able to reclaim them
1605 spin_lock_irq(&port
->outvq_lock
);
1606 reclaim_consumed_buffers(port
);
1607 spin_unlock_irq(&port
->outvq_lock
);
1610 * If the guest is connected, it'll be interested in
1611 * knowing the host connection state changed.
1613 spin_lock_irq(&port
->inbuf_lock
);
1614 send_sigio_to_port(port
);
1615 spin_unlock_irq(&port
->inbuf_lock
);
1617 case VIRTIO_CONSOLE_PORT_NAME
:
1619 * If we woke up after hibernation, we can get this
1620 * again. Skip it in that case.
1626 * Skip the size of the header and the cpkt to get the size
1627 * of the name that was sent
1629 name_size
= buf
->len
- buf
->offset
- sizeof(*cpkt
) + 1;
1631 port
->name
= kmalloc(name_size
, GFP_KERNEL
);
1634 "Not enough space to store port name\n");
1637 strscpy(port
->name
, buf
->buf
+ buf
->offset
+ sizeof(*cpkt
),
1641 * Since we only have one sysfs attribute, 'name',
1642 * create it only if we have a name for the port.
1644 err
= sysfs_create_group(&port
->dev
->kobj
,
1645 &port_attribute_group
);
1648 "Error %d creating sysfs device attributes\n",
1652 * Generate a udev event so that appropriate
1653 * symlinks can be created based on udev
1656 kobject_uevent(&port
->dev
->kobj
, KOBJ_CHANGE
);
1662 static void control_work_handler(struct work_struct
*work
)
1664 struct ports_device
*portdev
;
1665 struct virtqueue
*vq
;
1666 struct port_buffer
*buf
;
1669 portdev
= container_of(work
, struct ports_device
, control_work
);
1670 vq
= portdev
->c_ivq
;
1672 spin_lock(&portdev
->c_ivq_lock
);
1673 while ((buf
= virtqueue_get_buf(vq
, &len
))) {
1674 spin_unlock(&portdev
->c_ivq_lock
);
1676 buf
->len
= min_t(size_t, len
, buf
->size
);
1679 handle_control_message(vq
->vdev
, portdev
, buf
);
1681 spin_lock(&portdev
->c_ivq_lock
);
1682 if (add_inbuf(portdev
->c_ivq
, buf
) < 0) {
1683 dev_warn(&portdev
->vdev
->dev
,
1684 "Error adding buffer to queue\n");
1685 free_buf(buf
, false);
1688 spin_unlock(&portdev
->c_ivq_lock
);
1691 static void flush_bufs(struct virtqueue
*vq
, bool can_sleep
)
1693 struct port_buffer
*buf
;
1696 while ((buf
= virtqueue_get_buf(vq
, &len
)))
1697 free_buf(buf
, can_sleep
);
1700 static void out_intr(struct virtqueue
*vq
)
1704 port
= find_port_by_vq(vq
->vdev
->priv
, vq
);
1706 flush_bufs(vq
, false);
1710 wake_up_interruptible(&port
->waitqueue
);
1713 static void in_intr(struct virtqueue
*vq
)
1716 unsigned long flags
;
1718 port
= find_port_by_vq(vq
->vdev
->priv
, vq
);
1720 flush_bufs(vq
, false);
1724 spin_lock_irqsave(&port
->inbuf_lock
, flags
);
1725 port
->inbuf
= get_inbuf(port
);
1728 * Normally the port should not accept data when the port is
1729 * closed. For generic serial ports, the host won't (shouldn't)
1730 * send data till the guest is connected. But this condition
1731 * can be reached when a console port is not yet connected (no
1732 * tty is spawned) and the other side sends out data over the
1733 * vring, or when a remote devices start sending data before
1734 * the ports are opened.
1736 * A generic serial port will discard data if not connected,
1737 * while console ports and rproc-serial ports accepts data at
1738 * any time. rproc-serial is initiated with guest_connected to
1739 * false because port_fops_open expects this. Console ports are
1740 * hooked up with an HVC console and is initialized with
1741 * guest_connected to true.
1744 if (!port
->guest_connected
&& !is_rproc_serial(port
->portdev
->vdev
))
1745 discard_port_data(port
);
1747 /* Send a SIGIO indicating new data in case the process asked for it */
1748 send_sigio_to_port(port
);
1750 spin_unlock_irqrestore(&port
->inbuf_lock
, flags
);
1752 wake_up_interruptible(&port
->waitqueue
);
1754 if (is_console_port(port
) && hvc_poll(port
->cons
.hvc
))
1758 static void control_intr(struct virtqueue
*vq
)
1760 struct ports_device
*portdev
;
1762 portdev
= vq
->vdev
->priv
;
1763 schedule_work(&portdev
->control_work
);
1766 static void config_intr(struct virtio_device
*vdev
)
1768 struct ports_device
*portdev
;
1770 portdev
= vdev
->priv
;
1772 if (!use_multiport(portdev
))
1773 schedule_work(&portdev
->config_work
);
1776 static void config_work_handler(struct work_struct
*work
)
1778 struct ports_device
*portdev
;
1780 portdev
= container_of(work
, struct ports_device
, config_work
);
1781 if (!use_multiport(portdev
)) {
1782 struct virtio_device
*vdev
;
1786 vdev
= portdev
->vdev
;
1787 virtio_cread(vdev
, struct virtio_console_config
, cols
, &cols
);
1788 virtio_cread(vdev
, struct virtio_console_config
, rows
, &rows
);
1790 port
= find_port_by_id(portdev
, 0);
1791 set_console_size(port
, rows
, cols
);
1794 * We'll use this way of resizing only for legacy
1795 * support. For newer userspace
1796 * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages
1797 * to indicate console size changes so that it can be
1800 resize_console(port
);
1804 static int init_vqs(struct ports_device
*portdev
)
1806 struct virtqueue_info
*vqs_info
;
1807 struct virtqueue
**vqs
;
1808 u32 i
, j
, nr_ports
, nr_queues
;
1811 nr_ports
= portdev
->max_nr_ports
;
1812 nr_queues
= use_multiport(portdev
) ? (nr_ports
+ 1) * 2 : 2;
1814 vqs
= kmalloc_array(nr_queues
, sizeof(struct virtqueue
*), GFP_KERNEL
);
1815 vqs_info
= kcalloc(nr_queues
, sizeof(*vqs_info
), GFP_KERNEL
);
1816 portdev
->in_vqs
= kmalloc_array(nr_ports
, sizeof(struct virtqueue
*),
1818 portdev
->out_vqs
= kmalloc_array(nr_ports
, sizeof(struct virtqueue
*),
1820 if (!vqs
|| !vqs_info
|| !portdev
->in_vqs
|| !portdev
->out_vqs
) {
1826 * For backward compat (newer host but older guest), the host
1827 * spawns a console port first and also inits the vqs for port
1831 vqs_info
[j
].callback
= in_intr
;
1832 vqs_info
[j
+ 1].callback
= out_intr
;
1833 vqs_info
[j
].name
= "input";
1834 vqs_info
[j
+ 1].name
= "output";
1837 if (use_multiport(portdev
)) {
1838 vqs_info
[j
].callback
= control_intr
;
1839 vqs_info
[j
].name
= "control-i";
1840 vqs_info
[j
+ 1].name
= "control-o";
1842 for (i
= 1; i
< nr_ports
; i
++) {
1844 vqs_info
[j
].callback
= in_intr
;
1845 vqs_info
[j
+ 1].callback
= out_intr
;
1846 vqs_info
[j
].name
= "input";
1847 vqs_info
[j
+ 1].name
= "output";
1850 /* Find the queues. */
1851 err
= virtio_find_vqs(portdev
->vdev
, nr_queues
, vqs
, vqs_info
, NULL
);
1856 portdev
->in_vqs
[0] = vqs
[0];
1857 portdev
->out_vqs
[0] = vqs
[1];
1859 if (use_multiport(portdev
)) {
1860 portdev
->c_ivq
= vqs
[j
];
1861 portdev
->c_ovq
= vqs
[j
+ 1];
1863 for (i
= 1; i
< nr_ports
; i
++) {
1865 portdev
->in_vqs
[i
] = vqs
[j
];
1866 portdev
->out_vqs
[i
] = vqs
[j
+ 1];
1875 kfree(portdev
->out_vqs
);
1876 kfree(portdev
->in_vqs
);
1883 static const struct file_operations portdev_fops
= {
1884 .owner
= THIS_MODULE
,
1887 static void remove_vqs(struct ports_device
*portdev
)
1889 struct virtqueue
*vq
;
1891 virtio_device_for_each_vq(portdev
->vdev
, vq
) {
1892 struct port_buffer
*buf
;
1894 flush_bufs(vq
, true);
1895 while ((buf
= virtqueue_detach_unused_buf(vq
)))
1896 free_buf(buf
, true);
1899 portdev
->vdev
->config
->del_vqs(portdev
->vdev
);
1900 kfree(portdev
->in_vqs
);
1901 kfree(portdev
->out_vqs
);
1904 static void virtcons_remove(struct virtio_device
*vdev
)
1906 struct ports_device
*portdev
;
1907 struct port
*port
, *port2
;
1909 portdev
= vdev
->priv
;
1911 spin_lock_irq(&pdrvdata_lock
);
1912 list_del(&portdev
->list
);
1913 spin_unlock_irq(&pdrvdata_lock
);
1915 /* Device is going away, exit any polling for buffers */
1916 virtio_break_device(vdev
);
1917 if (use_multiport(portdev
))
1918 flush_work(&portdev
->control_work
);
1920 flush_work(&portdev
->config_work
);
1922 /* Disable interrupts for vqs */
1923 virtio_reset_device(vdev
);
1924 /* Finish up work that's lined up */
1925 if (use_multiport(portdev
))
1926 cancel_work_sync(&portdev
->control_work
);
1928 cancel_work_sync(&portdev
->config_work
);
1930 list_for_each_entry_safe(port
, port2
, &portdev
->ports
, list
)
1933 unregister_chrdev(portdev
->chr_major
, "virtio-portsdev");
1936 * When yanking out a device, we immediately lose the
1937 * (device-side) queues. So there's no point in keeping the
1938 * guest side around till we drop our final reference. This
1939 * also means that any ports which are in an open state will
1940 * have to just stop using the port, as the vqs are going
1943 remove_vqs(portdev
);
1948 * Once we're further in boot, we get probed like any other virtio
1951 * If the host also supports multiple console ports, we check the
1952 * config space to see how many ports the host has spawned. We
1953 * initialize each port found.
1955 static int virtcons_probe(struct virtio_device
*vdev
)
1957 struct ports_device
*portdev
;
1961 /* We only need a config space if features are offered */
1962 if (!vdev
->config
->get
&&
1963 (virtio_has_feature(vdev
, VIRTIO_CONSOLE_F_SIZE
)
1964 || virtio_has_feature(vdev
, VIRTIO_CONSOLE_F_MULTIPORT
))) {
1965 dev_err(&vdev
->dev
, "%s failure: config access disabled\n",
1970 portdev
= kmalloc(sizeof(*portdev
), GFP_KERNEL
);
1976 /* Attach this portdev to this virtio_device, and vice-versa. */
1977 portdev
->vdev
= vdev
;
1978 vdev
->priv
= portdev
;
1980 portdev
->chr_major
= register_chrdev(0, "virtio-portsdev",
1982 if (portdev
->chr_major
< 0) {
1984 "Error %d registering chrdev for device %u\n",
1985 portdev
->chr_major
, vdev
->index
);
1986 err
= portdev
->chr_major
;
1991 portdev
->max_nr_ports
= 1;
1993 /* Don't test MULTIPORT at all if we're rproc: not a valid feature! */
1994 if (!is_rproc_serial(vdev
) &&
1995 virtio_cread_feature(vdev
, VIRTIO_CONSOLE_F_MULTIPORT
,
1996 struct virtio_console_config
, max_nr_ports
,
1997 &portdev
->max_nr_ports
) == 0) {
1998 if (portdev
->max_nr_ports
== 0 ||
1999 portdev
->max_nr_ports
> VIRTCONS_MAX_PORTS
) {
2001 "Invalidate max_nr_ports %d",
2002 portdev
->max_nr_ports
);
2009 spin_lock_init(&portdev
->ports_lock
);
2010 INIT_LIST_HEAD(&portdev
->ports
);
2011 INIT_LIST_HEAD(&portdev
->list
);
2013 INIT_WORK(&portdev
->config_work
, &config_work_handler
);
2014 INIT_WORK(&portdev
->control_work
, &control_work_handler
);
2017 spin_lock_init(&portdev
->c_ivq_lock
);
2018 spin_lock_init(&portdev
->c_ovq_lock
);
2021 err
= init_vqs(portdev
);
2023 dev_err(&vdev
->dev
, "Error %d initializing vqs\n", err
);
2027 virtio_device_ready(portdev
->vdev
);
2030 err
= fill_queue(portdev
->c_ivq
, &portdev
->c_ivq_lock
);
2033 "Error allocating buffers for control queue\n");
2035 * The host might want to notify mgmt sw about device
2038 __send_control_msg(portdev
, VIRTIO_CONSOLE_BAD_ID
,
2039 VIRTIO_CONSOLE_DEVICE_READY
, 0);
2040 /* Device was functional: we need full cleanup. */
2041 virtcons_remove(vdev
);
2046 * For backward compatibility: Create a console port
2047 * if we're running on older host.
2049 add_port(portdev
, 0);
2052 spin_lock_irq(&pdrvdata_lock
);
2053 list_add_tail(&portdev
->list
, &pdrvdata
.portdevs
);
2054 spin_unlock_irq(&pdrvdata_lock
);
2056 __send_control_msg(portdev
, VIRTIO_CONSOLE_BAD_ID
,
2057 VIRTIO_CONSOLE_DEVICE_READY
, 1);
2062 unregister_chrdev(portdev
->chr_major
, "virtio-portsdev");
2069 static const struct virtio_device_id id_table
[] = {
2070 { VIRTIO_ID_CONSOLE
, VIRTIO_DEV_ANY_ID
},
2073 MODULE_DEVICE_TABLE(virtio
, id_table
);
2075 static const unsigned int features
[] = {
2076 VIRTIO_CONSOLE_F_SIZE
,
2077 VIRTIO_CONSOLE_F_MULTIPORT
,
2080 static const struct virtio_device_id rproc_serial_id_table
[] = {
2081 #if IS_ENABLED(CONFIG_REMOTEPROC)
2082 { VIRTIO_ID_RPROC_SERIAL
, VIRTIO_DEV_ANY_ID
},
2086 MODULE_DEVICE_TABLE(virtio
, rproc_serial_id_table
);
2088 static const unsigned int rproc_serial_features
[] = {
2091 #ifdef CONFIG_PM_SLEEP
2092 static int virtcons_freeze(struct virtio_device
*vdev
)
2094 struct ports_device
*portdev
;
2097 portdev
= vdev
->priv
;
2099 virtio_reset_device(vdev
);
2101 if (use_multiport(portdev
))
2102 virtqueue_disable_cb(portdev
->c_ivq
);
2103 cancel_work_sync(&portdev
->control_work
);
2104 cancel_work_sync(&portdev
->config_work
);
2106 * Once more: if control_work_handler() was running, it would
2107 * enable the cb as the last step.
2109 if (use_multiport(portdev
))
2110 virtqueue_disable_cb(portdev
->c_ivq
);
2112 list_for_each_entry(port
, &portdev
->ports
, list
) {
2113 virtqueue_disable_cb(port
->in_vq
);
2114 virtqueue_disable_cb(port
->out_vq
);
2116 * We'll ask the host later if the new invocation has
2117 * the port opened or closed.
2119 port
->host_connected
= false;
2120 remove_port_data(port
);
2122 remove_vqs(portdev
);
2127 static int virtcons_restore(struct virtio_device
*vdev
)
2129 struct ports_device
*portdev
;
2133 portdev
= vdev
->priv
;
2135 ret
= init_vqs(portdev
);
2139 virtio_device_ready(portdev
->vdev
);
2141 if (use_multiport(portdev
))
2142 fill_queue(portdev
->c_ivq
, &portdev
->c_ivq_lock
);
2144 list_for_each_entry(port
, &portdev
->ports
, list
) {
2145 port
->in_vq
= portdev
->in_vqs
[port
->id
];
2146 port
->out_vq
= portdev
->out_vqs
[port
->id
];
2148 fill_queue(port
->in_vq
, &port
->inbuf_lock
);
2150 /* Get port open/close status on the host */
2151 send_control_msg(port
, VIRTIO_CONSOLE_PORT_READY
, 1);
2154 * If a port was open at the time of suspending, we
2155 * have to let the host know that it's still open.
2157 if (port
->guest_connected
)
2158 send_control_msg(port
, VIRTIO_CONSOLE_PORT_OPEN
, 1);
2164 static struct virtio_driver virtio_console
= {
2165 .feature_table
= features
,
2166 .feature_table_size
= ARRAY_SIZE(features
),
2167 .driver
.name
= KBUILD_MODNAME
,
2168 .id_table
= id_table
,
2169 .probe
= virtcons_probe
,
2170 .remove
= virtcons_remove
,
2171 .config_changed
= config_intr
,
2172 #ifdef CONFIG_PM_SLEEP
2173 .freeze
= virtcons_freeze
,
2174 .restore
= virtcons_restore
,
2178 static struct virtio_driver virtio_rproc_serial
= {
2179 .feature_table
= rproc_serial_features
,
2180 .feature_table_size
= ARRAY_SIZE(rproc_serial_features
),
2181 .driver
.name
= "virtio_rproc_serial",
2182 .id_table
= rproc_serial_id_table
,
2183 .probe
= virtcons_probe
,
2184 .remove
= virtcons_remove
,
2187 static int __init
virtio_console_init(void)
2191 err
= class_register(&port_class
);
2195 pdrvdata
.debugfs_dir
= debugfs_create_dir("virtio-ports", NULL
);
2196 INIT_LIST_HEAD(&pdrvdata
.consoles
);
2197 INIT_LIST_HEAD(&pdrvdata
.portdevs
);
2199 err
= register_virtio_driver(&virtio_console
);
2201 pr_err("Error %d registering virtio driver\n", err
);
2204 err
= register_virtio_driver(&virtio_rproc_serial
);
2206 pr_err("Error %d registering virtio rproc serial driver\n",
2212 unregister_virtio_driver(&virtio_console
);
2214 debugfs_remove_recursive(pdrvdata
.debugfs_dir
);
2215 class_unregister(&port_class
);
2219 static void __exit
virtio_console_fini(void)
2223 unregister_virtio_driver(&virtio_console
);
2224 unregister_virtio_driver(&virtio_rproc_serial
);
2226 class_unregister(&port_class
);
2227 debugfs_remove_recursive(pdrvdata
.debugfs_dir
);
2229 module_init(virtio_console_init
);
2230 module_exit(virtio_console_fini
);
2232 MODULE_DESCRIPTION("Virtio console driver");
2233 MODULE_LICENSE("GPL");