1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtio vhost-user driver
5 * Copyright(c) 2019 Intel Corporation
7 * This driver allows virtio devices to be used over a vhost-user socket.
9 * Guest devices can be instantiated by kernel module or command line
10 * parameters. One device will be created for each parameter. Syntax:
12 * virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]
14 * <socket> := vhost-user socket path to connect
15 * <virtio_id> := virtio device id (as in virtio_ids.h)
16 * <platform_id> := (optional) platform device id
19 * virtio_uml.device=/var/uml.socket:1
21 * Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd.
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 #include <linux/virtio.h>
27 #include <linux/virtio_config.h>
28 #include <linux/virtio_ring.h>
29 #include <shared/as-layout.h>
33 #include "vhost_user.h"
35 /* Workaround due to a conflict between irq_user.h and irqreturn.h */
40 #define MAX_SUPPORTED_QUEUE_SIZE 256
42 #define to_virtio_uml_device(_vdev) \
43 container_of(_vdev, struct virtio_uml_device, vdev)
45 struct virtio_uml_platform_data
{
47 const char *socket_path
;
48 struct work_struct conn_broken_wk
;
49 struct platform_device
*pdev
;
52 struct virtio_uml_device
{
53 struct virtio_device vdev
;
54 struct platform_device
*pdev
;
58 u64 protocol_features
;
63 struct virtio_uml_vq_info
{
68 extern unsigned long long physmem_size
, highmem
;
70 #define vu_err(vu_dev, ...) dev_err(&(vu_dev)->pdev->dev, __VA_ARGS__)
72 /* Vhost-user protocol */
74 static int full_sendmsg_fds(int fd
, const void *buf
, unsigned int len
,
75 const int *fds
, unsigned int fds_num
)
80 rc
= os_sendmsg_fds(fd
, buf
, len
, fds
, fds_num
);
87 } while (len
&& (rc
>= 0 || rc
== -EINTR
));
94 static int full_read(int fd
, void *buf
, int len
, bool abortable
)
99 rc
= os_read_file(fd
, buf
, len
);
104 } while (len
&& (rc
> 0 || rc
== -EINTR
|| (!abortable
&& rc
== -EAGAIN
)));
113 static int vhost_user_recv_header(int fd
, struct vhost_user_msg
*msg
)
115 return full_read(fd
, msg
, sizeof(msg
->header
), true);
118 static int vhost_user_recv(struct virtio_uml_device
*vu_dev
,
119 int fd
, struct vhost_user_msg
*msg
,
120 size_t max_payload_size
)
123 int rc
= vhost_user_recv_header(fd
, msg
);
125 if (rc
== -ECONNRESET
&& vu_dev
->registered
) {
126 struct virtio_uml_platform_data
*pdata
;
128 pdata
= vu_dev
->pdev
->dev
.platform_data
;
130 virtio_break_device(&vu_dev
->vdev
);
131 schedule_work(&pdata
->conn_broken_wk
);
135 size
= msg
->header
.size
;
136 if (size
> max_payload_size
)
138 return full_read(fd
, &msg
->payload
, size
, false);
141 static int vhost_user_recv_resp(struct virtio_uml_device
*vu_dev
,
142 struct vhost_user_msg
*msg
,
143 size_t max_payload_size
)
145 int rc
= vhost_user_recv(vu_dev
, vu_dev
->sock
, msg
, max_payload_size
);
150 if (msg
->header
.flags
!= (VHOST_USER_FLAG_REPLY
| VHOST_USER_VERSION
))
156 static int vhost_user_recv_u64(struct virtio_uml_device
*vu_dev
,
159 struct vhost_user_msg msg
;
160 int rc
= vhost_user_recv_resp(vu_dev
, &msg
,
161 sizeof(msg
.payload
.integer
));
165 if (msg
.header
.size
!= sizeof(msg
.payload
.integer
))
167 *value
= msg
.payload
.integer
;
171 static int vhost_user_recv_req(struct virtio_uml_device
*vu_dev
,
172 struct vhost_user_msg
*msg
,
173 size_t max_payload_size
)
175 int rc
= vhost_user_recv(vu_dev
, vu_dev
->req_fd
, msg
, max_payload_size
);
180 if ((msg
->header
.flags
& ~VHOST_USER_FLAG_NEED_REPLY
) !=
187 static int vhost_user_send(struct virtio_uml_device
*vu_dev
,
188 bool need_response
, struct vhost_user_msg
*msg
,
189 int *fds
, size_t num_fds
)
191 size_t size
= sizeof(msg
->header
) + msg
->header
.size
;
195 msg
->header
.flags
|= VHOST_USER_VERSION
;
198 * The need_response flag indicates that we already need a response,
199 * e.g. to read the features. In these cases, don't request an ACK as
200 * it is meaningless. Also request an ACK only if supported.
202 request_ack
= !need_response
;
203 if (!(vu_dev
->protocol_features
&
204 BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK
)))
208 msg
->header
.flags
|= VHOST_USER_FLAG_NEED_REPLY
;
210 rc
= full_sendmsg_fds(vu_dev
->sock
, msg
, size
, fds
, num_fds
);
217 rc
= vhost_user_recv_u64(vu_dev
, &status
);
222 vu_err(vu_dev
, "slave reports error: %llu\n", status
);
230 static int vhost_user_send_no_payload(struct virtio_uml_device
*vu_dev
,
231 bool need_response
, u32 request
)
233 struct vhost_user_msg msg
= {
234 .header
.request
= request
,
237 return vhost_user_send(vu_dev
, need_response
, &msg
, NULL
, 0);
240 static int vhost_user_send_no_payload_fd(struct virtio_uml_device
*vu_dev
,
243 struct vhost_user_msg msg
= {
244 .header
.request
= request
,
247 return vhost_user_send(vu_dev
, false, &msg
, &fd
, 1);
250 static int vhost_user_send_u64(struct virtio_uml_device
*vu_dev
,
251 u32 request
, u64 value
)
253 struct vhost_user_msg msg
= {
254 .header
.request
= request
,
255 .header
.size
= sizeof(msg
.payload
.integer
),
256 .payload
.integer
= value
,
259 return vhost_user_send(vu_dev
, false, &msg
, NULL
, 0);
262 static int vhost_user_set_owner(struct virtio_uml_device
*vu_dev
)
264 return vhost_user_send_no_payload(vu_dev
, false, VHOST_USER_SET_OWNER
);
267 static int vhost_user_get_features(struct virtio_uml_device
*vu_dev
,
270 int rc
= vhost_user_send_no_payload(vu_dev
, true,
271 VHOST_USER_GET_FEATURES
);
275 return vhost_user_recv_u64(vu_dev
, features
);
278 static int vhost_user_set_features(struct virtio_uml_device
*vu_dev
,
281 return vhost_user_send_u64(vu_dev
, VHOST_USER_SET_FEATURES
, features
);
284 static int vhost_user_get_protocol_features(struct virtio_uml_device
*vu_dev
,
285 u64
*protocol_features
)
287 int rc
= vhost_user_send_no_payload(vu_dev
, true,
288 VHOST_USER_GET_PROTOCOL_FEATURES
);
292 return vhost_user_recv_u64(vu_dev
, protocol_features
);
295 static int vhost_user_set_protocol_features(struct virtio_uml_device
*vu_dev
,
296 u64 protocol_features
)
298 return vhost_user_send_u64(vu_dev
, VHOST_USER_SET_PROTOCOL_FEATURES
,
302 static void vhost_user_reply(struct virtio_uml_device
*vu_dev
,
303 struct vhost_user_msg
*msg
, int response
)
305 struct vhost_user_msg reply
= {
306 .payload
.integer
= response
,
308 size_t size
= sizeof(reply
.header
) + sizeof(reply
.payload
.integer
);
311 reply
.header
= msg
->header
;
312 reply
.header
.flags
&= ~VHOST_USER_FLAG_NEED_REPLY
;
313 reply
.header
.flags
|= VHOST_USER_FLAG_REPLY
;
314 reply
.header
.size
= sizeof(reply
.payload
.integer
);
316 rc
= full_sendmsg_fds(vu_dev
->req_fd
, &reply
, size
, NULL
, 0);
320 "sending reply to slave request failed: %d (size %zu)\n",
324 static irqreturn_t
vu_req_interrupt(int irq
, void *data
)
326 struct virtio_uml_device
*vu_dev
= data
;
329 struct vhost_user_msg msg
;
330 u8 extra_payload
[512];
334 rc
= vhost_user_recv_req(vu_dev
, &msg
.msg
,
335 sizeof(msg
.msg
.payload
) +
336 sizeof(msg
.extra_payload
));
341 switch (msg
.msg
.header
.request
) {
342 case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG
:
343 virtio_config_changed(&vu_dev
->vdev
);
346 case VHOST_USER_SLAVE_IOTLB_MSG
:
347 /* not supported - VIRTIO_F_IOMMU_PLATFORM */
348 case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG
:
349 /* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */
351 vu_err(vu_dev
, "unexpected slave request %d\n",
352 msg
.msg
.header
.request
);
355 if (msg
.msg
.header
.flags
& VHOST_USER_FLAG_NEED_REPLY
)
356 vhost_user_reply(vu_dev
, &msg
.msg
, response
);
361 static int vhost_user_init_slave_req(struct virtio_uml_device
*vu_dev
)
365 /* Use a pipe for slave req fd, SIGIO is not supported for eventfd */
366 rc
= os_pipe(req_fds
, true, true);
369 vu_dev
->req_fd
= req_fds
[0];
371 rc
= um_request_irq(VIRTIO_IRQ
, vu_dev
->req_fd
, IRQ_READ
,
372 vu_req_interrupt
, IRQF_SHARED
,
373 vu_dev
->pdev
->name
, vu_dev
);
377 rc
= vhost_user_send_no_payload_fd(vu_dev
, VHOST_USER_SET_SLAVE_REQ_FD
,
385 um_free_irq(VIRTIO_IRQ
, vu_dev
);
387 os_close_file(req_fds
[0]);
389 /* Close unused write end of request fds */
390 os_close_file(req_fds
[1]);
394 static int vhost_user_init(struct virtio_uml_device
*vu_dev
)
396 int rc
= vhost_user_set_owner(vu_dev
);
400 rc
= vhost_user_get_features(vu_dev
, &vu_dev
->features
);
404 if (vu_dev
->features
& BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES
)) {
405 rc
= vhost_user_get_protocol_features(vu_dev
,
406 &vu_dev
->protocol_features
);
409 vu_dev
->protocol_features
&= VHOST_USER_SUPPORTED_PROTOCOL_F
;
410 rc
= vhost_user_set_protocol_features(vu_dev
,
411 vu_dev
->protocol_features
);
416 if (vu_dev
->protocol_features
&
417 BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ
)) {
418 rc
= vhost_user_init_slave_req(vu_dev
);
426 static void vhost_user_get_config(struct virtio_uml_device
*vu_dev
,
427 u32 offset
, void *buf
, u32 len
)
429 u32 cfg_size
= offset
+ len
;
430 struct vhost_user_msg
*msg
;
431 size_t payload_size
= sizeof(msg
->payload
.config
) + cfg_size
;
432 size_t msg_size
= sizeof(msg
->header
) + payload_size
;
435 if (!(vu_dev
->protocol_features
&
436 BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG
)))
439 msg
= kzalloc(msg_size
, GFP_KERNEL
);
442 msg
->header
.request
= VHOST_USER_GET_CONFIG
;
443 msg
->header
.size
= payload_size
;
444 msg
->payload
.config
.offset
= 0;
445 msg
->payload
.config
.size
= cfg_size
;
447 rc
= vhost_user_send(vu_dev
, true, msg
, NULL
, 0);
449 vu_err(vu_dev
, "sending VHOST_USER_GET_CONFIG failed: %d\n",
454 rc
= vhost_user_recv_resp(vu_dev
, msg
, msg_size
);
457 "receiving VHOST_USER_GET_CONFIG response failed: %d\n",
462 if (msg
->header
.size
!= payload_size
||
463 msg
->payload
.config
.size
!= cfg_size
) {
466 "Invalid VHOST_USER_GET_CONFIG sizes (payload %d expected %zu, config %u expected %u)\n",
467 msg
->header
.size
, payload_size
,
468 msg
->payload
.config
.size
, cfg_size
);
471 memcpy(buf
, msg
->payload
.config
.payload
+ offset
, len
);
477 static void vhost_user_set_config(struct virtio_uml_device
*vu_dev
,
478 u32 offset
, const void *buf
, u32 len
)
480 struct vhost_user_msg
*msg
;
481 size_t payload_size
= sizeof(msg
->payload
.config
) + len
;
482 size_t msg_size
= sizeof(msg
->header
) + payload_size
;
485 if (!(vu_dev
->protocol_features
&
486 BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG
)))
489 msg
= kzalloc(msg_size
, GFP_KERNEL
);
492 msg
->header
.request
= VHOST_USER_SET_CONFIG
;
493 msg
->header
.size
= payload_size
;
494 msg
->payload
.config
.offset
= offset
;
495 msg
->payload
.config
.size
= len
;
496 memcpy(msg
->payload
.config
.payload
, buf
, len
);
498 rc
= vhost_user_send(vu_dev
, false, msg
, NULL
, 0);
500 vu_err(vu_dev
, "sending VHOST_USER_SET_CONFIG failed: %d\n",
506 static int vhost_user_init_mem_region(u64 addr
, u64 size
, int *fd_out
,
507 struct vhost_user_mem_region
*region_out
)
509 unsigned long long mem_offset
;
510 int rc
= phys_mapping(addr
, &mem_offset
);
512 if (WARN(rc
< 0, "phys_mapping of 0x%llx returned %d\n", addr
, rc
))
515 region_out
->guest_addr
= addr
;
516 region_out
->user_addr
= addr
;
517 region_out
->size
= size
;
518 region_out
->mmap_offset
= mem_offset
;
520 /* Ensure mapping is valid for the entire region */
521 rc
= phys_mapping(addr
+ size
- 1, &mem_offset
);
522 if (WARN(rc
!= *fd_out
, "phys_mapping of 0x%llx failed: %d != %d\n",
523 addr
+ size
- 1, rc
, *fd_out
))
528 static int vhost_user_set_mem_table(struct virtio_uml_device
*vu_dev
)
530 struct vhost_user_msg msg
= {
531 .header
.request
= VHOST_USER_SET_MEM_TABLE
,
532 .header
.size
= sizeof(msg
.payload
.mem_regions
),
533 .payload
.mem_regions
.num
= 1,
535 unsigned long reserved
= uml_reserved
- uml_physmem
;
540 * This is a bit tricky, see also the comment with setup_physmem().
542 * Essentially, setup_physmem() uses a file to mmap() our physmem,
543 * but the code and data we *already* have is omitted. To us, this
544 * is no difference, since they both become part of our address
545 * space and memory consumption. To somebody looking in from the
546 * outside, however, it is different because the part of our memory
547 * consumption that's already part of the binary (code/data) is not
548 * mapped from the file, so it's not visible to another mmap from
549 * the file descriptor.
551 * Thus, don't advertise this space to the vhost-user slave. This
552 * means that the slave will likely abort or similar when we give
553 * it an address from the hidden range, since it's not marked as
554 * a valid address, but at least that way we detect the issue and
555 * don't just have the slave read an all-zeroes buffer from the
556 * shared memory file, or write something there that we can never
557 * see (depending on the direction of the virtqueue traffic.)
559 * Since we usually don't want to use .text for virtio buffers,
560 * this effectively means that you cannot use
561 * 1) global variables, which are in the .bss and not in the shm
563 * 2) the stack in some processes, depending on where they have
564 * their stack (or maybe only no interrupt stack?)
566 * The stack is already not typically valid for DMA, so this isn't
567 * much of a restriction, but global variables might be encountered.
569 * It might be possible to fix it by copying around the data that's
570 * between bss_start and where we map the file now, but it's not
571 * something that you typically encounter with virtio drivers, so
572 * it didn't seem worthwhile.
574 rc
= vhost_user_init_mem_region(reserved
, physmem_size
- reserved
,
576 &msg
.payload
.mem_regions
.regions
[0]);
581 msg
.payload
.mem_regions
.num
++;
582 rc
= vhost_user_init_mem_region(__pa(end_iomem
), highmem
,
583 &fds
[1], &msg
.payload
.mem_regions
.regions
[1]);
588 return vhost_user_send(vu_dev
, false, &msg
, fds
,
589 msg
.payload
.mem_regions
.num
);
592 static int vhost_user_set_vring_state(struct virtio_uml_device
*vu_dev
,
593 u32 request
, u32 index
, u32 num
)
595 struct vhost_user_msg msg
= {
596 .header
.request
= request
,
597 .header
.size
= sizeof(msg
.payload
.vring_state
),
598 .payload
.vring_state
.index
= index
,
599 .payload
.vring_state
.num
= num
,
602 return vhost_user_send(vu_dev
, false, &msg
, NULL
, 0);
605 static int vhost_user_set_vring_num(struct virtio_uml_device
*vu_dev
,
608 return vhost_user_set_vring_state(vu_dev
, VHOST_USER_SET_VRING_NUM
,
612 static int vhost_user_set_vring_base(struct virtio_uml_device
*vu_dev
,
613 u32 index
, u32 offset
)
615 return vhost_user_set_vring_state(vu_dev
, VHOST_USER_SET_VRING_BASE
,
619 static int vhost_user_set_vring_addr(struct virtio_uml_device
*vu_dev
,
620 u32 index
, u64 desc
, u64 used
, u64 avail
,
623 struct vhost_user_msg msg
= {
624 .header
.request
= VHOST_USER_SET_VRING_ADDR
,
625 .header
.size
= sizeof(msg
.payload
.vring_addr
),
626 .payload
.vring_addr
.index
= index
,
627 .payload
.vring_addr
.desc
= desc
,
628 .payload
.vring_addr
.used
= used
,
629 .payload
.vring_addr
.avail
= avail
,
630 .payload
.vring_addr
.log
= log
,
633 return vhost_user_send(vu_dev
, false, &msg
, NULL
, 0);
636 static int vhost_user_set_vring_fd(struct virtio_uml_device
*vu_dev
,
637 u32 request
, int index
, int fd
)
639 struct vhost_user_msg msg
= {
640 .header
.request
= request
,
641 .header
.size
= sizeof(msg
.payload
.integer
),
642 .payload
.integer
= index
,
645 if (index
& ~VHOST_USER_VRING_INDEX_MASK
)
648 msg
.payload
.integer
|= VHOST_USER_VRING_POLL_MASK
;
649 return vhost_user_send(vu_dev
, false, &msg
, NULL
, 0);
651 return vhost_user_send(vu_dev
, false, &msg
, &fd
, 1);
654 static int vhost_user_set_vring_call(struct virtio_uml_device
*vu_dev
,
657 return vhost_user_set_vring_fd(vu_dev
, VHOST_USER_SET_VRING_CALL
,
661 static int vhost_user_set_vring_kick(struct virtio_uml_device
*vu_dev
,
664 return vhost_user_set_vring_fd(vu_dev
, VHOST_USER_SET_VRING_KICK
,
668 static int vhost_user_set_vring_enable(struct virtio_uml_device
*vu_dev
,
669 u32 index
, bool enable
)
671 if (!(vu_dev
->features
& BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES
)))
674 return vhost_user_set_vring_state(vu_dev
, VHOST_USER_SET_VRING_ENABLE
,
679 /* Virtio interface */
681 static bool vu_notify(struct virtqueue
*vq
)
683 struct virtio_uml_vq_info
*info
= vq
->priv
;
684 const uint64_t n
= 1;
688 rc
= os_write_file(info
->kick_fd
, &n
, sizeof(n
));
689 } while (rc
== -EINTR
);
690 return !WARN(rc
!= sizeof(n
), "write returned %d\n", rc
);
693 static irqreturn_t
vu_interrupt(int irq
, void *opaque
)
695 struct virtqueue
*vq
= opaque
;
696 struct virtio_uml_vq_info
*info
= vq
->priv
;
699 irqreturn_t ret
= IRQ_NONE
;
702 rc
= os_read_file(info
->call_fd
, &n
, sizeof(n
));
704 ret
|= vring_interrupt(irq
, vq
);
705 } while (rc
== sizeof(n
) || rc
== -EINTR
);
706 WARN(rc
!= -EAGAIN
, "read returned %d\n", rc
);
711 static void vu_get(struct virtio_device
*vdev
, unsigned offset
,
712 void *buf
, unsigned len
)
714 struct virtio_uml_device
*vu_dev
= to_virtio_uml_device(vdev
);
716 vhost_user_get_config(vu_dev
, offset
, buf
, len
);
719 static void vu_set(struct virtio_device
*vdev
, unsigned offset
,
720 const void *buf
, unsigned len
)
722 struct virtio_uml_device
*vu_dev
= to_virtio_uml_device(vdev
);
724 vhost_user_set_config(vu_dev
, offset
, buf
, len
);
727 static u8
vu_get_status(struct virtio_device
*vdev
)
729 struct virtio_uml_device
*vu_dev
= to_virtio_uml_device(vdev
);
731 return vu_dev
->status
;
734 static void vu_set_status(struct virtio_device
*vdev
, u8 status
)
736 struct virtio_uml_device
*vu_dev
= to_virtio_uml_device(vdev
);
738 vu_dev
->status
= status
;
741 static void vu_reset(struct virtio_device
*vdev
)
743 struct virtio_uml_device
*vu_dev
= to_virtio_uml_device(vdev
);
748 static void vu_del_vq(struct virtqueue
*vq
)
750 struct virtio_uml_vq_info
*info
= vq
->priv
;
752 um_free_irq(VIRTIO_IRQ
, vq
);
754 os_close_file(info
->call_fd
);
755 os_close_file(info
->kick_fd
);
757 vring_del_virtqueue(vq
);
761 static void vu_del_vqs(struct virtio_device
*vdev
)
763 struct virtio_uml_device
*vu_dev
= to_virtio_uml_device(vdev
);
764 struct virtqueue
*vq
, *n
;
767 /* Note: reverse order as a workaround to a decoding bug in snabb */
768 list_for_each_entry_reverse(vq
, &vdev
->vqs
, list
)
769 WARN_ON(vhost_user_set_vring_enable(vu_dev
, vq
->index
, false));
771 /* Ensure previous messages have been processed */
772 WARN_ON(vhost_user_get_features(vu_dev
, &features
));
774 list_for_each_entry_safe(vq
, n
, &vdev
->vqs
, list
)
778 static int vu_setup_vq_call_fd(struct virtio_uml_device
*vu_dev
,
779 struct virtqueue
*vq
)
781 struct virtio_uml_vq_info
*info
= vq
->priv
;
785 /* Use a pipe for call fd, since SIGIO is not supported for eventfd */
786 rc
= os_pipe(call_fds
, true, true);
790 info
->call_fd
= call_fds
[0];
791 rc
= um_request_irq(VIRTIO_IRQ
, info
->call_fd
, IRQ_READ
,
792 vu_interrupt
, IRQF_SHARED
, info
->name
, vq
);
796 rc
= vhost_user_set_vring_call(vu_dev
, vq
->index
, call_fds
[1]);
803 um_free_irq(VIRTIO_IRQ
, vq
);
805 os_close_file(call_fds
[0]);
807 /* Close (unused) write end of call fds */
808 os_close_file(call_fds
[1]);
813 static struct virtqueue
*vu_setup_vq(struct virtio_device
*vdev
,
814 unsigned index
, vq_callback_t
*callback
,
815 const char *name
, bool ctx
)
817 struct virtio_uml_device
*vu_dev
= to_virtio_uml_device(vdev
);
818 struct platform_device
*pdev
= vu_dev
->pdev
;
819 struct virtio_uml_vq_info
*info
;
820 struct virtqueue
*vq
;
821 int num
= MAX_SUPPORTED_QUEUE_SIZE
;
824 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
829 snprintf(info
->name
, sizeof(info
->name
), "%s.%d-%s", pdev
->name
,
832 vq
= vring_create_virtqueue(index
, num
, PAGE_SIZE
, vdev
, true, true,
833 ctx
, vu_notify
, callback
, info
->name
);
839 num
= virtqueue_get_vring_size(vq
);
841 rc
= os_eventfd(0, 0);
846 rc
= vu_setup_vq_call_fd(vu_dev
, vq
);
850 rc
= vhost_user_set_vring_num(vu_dev
, index
, num
);
854 rc
= vhost_user_set_vring_base(vu_dev
, index
, 0);
858 rc
= vhost_user_set_vring_addr(vu_dev
, index
,
859 virtqueue_get_desc_addr(vq
),
860 virtqueue_get_used_addr(vq
),
861 virtqueue_get_avail_addr(vq
),
869 um_free_irq(VIRTIO_IRQ
, vq
);
870 os_close_file(info
->call_fd
);
872 os_close_file(info
->kick_fd
);
874 vring_del_virtqueue(vq
);
881 static int vu_find_vqs(struct virtio_device
*vdev
, unsigned nvqs
,
882 struct virtqueue
*vqs
[], vq_callback_t
*callbacks
[],
883 const char * const names
[], const bool *ctx
,
884 struct irq_affinity
*desc
)
886 struct virtio_uml_device
*vu_dev
= to_virtio_uml_device(vdev
);
887 int i
, queue_idx
= 0, rc
;
888 struct virtqueue
*vq
;
890 rc
= vhost_user_set_mem_table(vu_dev
);
894 for (i
= 0; i
< nvqs
; ++i
) {
900 vqs
[i
] = vu_setup_vq(vdev
, queue_idx
++, callbacks
[i
], names
[i
],
901 ctx
? ctx
[i
] : false);
902 if (IS_ERR(vqs
[i
])) {
903 rc
= PTR_ERR(vqs
[i
]);
908 list_for_each_entry(vq
, &vdev
->vqs
, list
) {
909 struct virtio_uml_vq_info
*info
= vq
->priv
;
911 rc
= vhost_user_set_vring_kick(vu_dev
, vq
->index
,
916 rc
= vhost_user_set_vring_enable(vu_dev
, vq
->index
, true);
928 static u64
vu_get_features(struct virtio_device
*vdev
)
930 struct virtio_uml_device
*vu_dev
= to_virtio_uml_device(vdev
);
932 return vu_dev
->features
;
935 static int vu_finalize_features(struct virtio_device
*vdev
)
937 struct virtio_uml_device
*vu_dev
= to_virtio_uml_device(vdev
);
938 u64 supported
= vdev
->features
& VHOST_USER_SUPPORTED_F
;
940 vring_transport_features(vdev
);
941 vu_dev
->features
= vdev
->features
| supported
;
943 return vhost_user_set_features(vu_dev
, vu_dev
->features
);
946 static const char *vu_bus_name(struct virtio_device
*vdev
)
948 struct virtio_uml_device
*vu_dev
= to_virtio_uml_device(vdev
);
950 return vu_dev
->pdev
->name
;
953 static const struct virtio_config_ops virtio_uml_config_ops
= {
956 .get_status
= vu_get_status
,
957 .set_status
= vu_set_status
,
959 .find_vqs
= vu_find_vqs
,
960 .del_vqs
= vu_del_vqs
,
961 .get_features
= vu_get_features
,
962 .finalize_features
= vu_finalize_features
,
963 .bus_name
= vu_bus_name
,
966 static void virtio_uml_release_dev(struct device
*d
)
968 struct virtio_device
*vdev
=
969 container_of(d
, struct virtio_device
, dev
);
970 struct virtio_uml_device
*vu_dev
= to_virtio_uml_device(vdev
);
972 /* might not have been opened due to not negotiating the feature */
973 if (vu_dev
->req_fd
>= 0) {
974 um_free_irq(VIRTIO_IRQ
, vu_dev
);
975 os_close_file(vu_dev
->req_fd
);
978 os_close_file(vu_dev
->sock
);
981 /* Platform device */
983 static int virtio_uml_probe(struct platform_device
*pdev
)
985 struct virtio_uml_platform_data
*pdata
= pdev
->dev
.platform_data
;
986 struct virtio_uml_device
*vu_dev
;
992 vu_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*vu_dev
), GFP_KERNEL
);
996 vu_dev
->vdev
.dev
.parent
= &pdev
->dev
;
997 vu_dev
->vdev
.dev
.release
= virtio_uml_release_dev
;
998 vu_dev
->vdev
.config
= &virtio_uml_config_ops
;
999 vu_dev
->vdev
.id
.device
= pdata
->virtio_device_id
;
1000 vu_dev
->vdev
.id
.vendor
= VIRTIO_DEV_ANY_ID
;
1001 vu_dev
->pdev
= pdev
;
1002 vu_dev
->req_fd
= -1;
1005 rc
= os_connect_socket(pdata
->socket_path
);
1006 } while (rc
== -EINTR
);
1011 rc
= vhost_user_init(vu_dev
);
1015 platform_set_drvdata(pdev
, vu_dev
);
1017 rc
= register_virtio_device(&vu_dev
->vdev
);
1019 put_device(&vu_dev
->vdev
.dev
);
1020 vu_dev
->registered
= 1;
1024 os_close_file(vu_dev
->sock
);
1028 static int virtio_uml_remove(struct platform_device
*pdev
)
1030 struct virtio_uml_device
*vu_dev
= platform_get_drvdata(pdev
);
1032 unregister_virtio_device(&vu_dev
->vdev
);
1036 /* Command line device list */
1038 static void vu_cmdline_release_dev(struct device
*d
)
1042 static struct device vu_cmdline_parent
= {
1043 .init_name
= "virtio-uml-cmdline",
1044 .release
= vu_cmdline_release_dev
,
1047 static bool vu_cmdline_parent_registered
;
1048 static int vu_cmdline_id
;
1050 static int vu_unregister_cmdline_device(struct device
*dev
, void *data
)
1052 struct platform_device
*pdev
= to_platform_device(dev
);
1053 struct virtio_uml_platform_data
*pdata
= pdev
->dev
.platform_data
;
1055 kfree(pdata
->socket_path
);
1056 platform_device_unregister(pdev
);
1060 static void vu_conn_broken(struct work_struct
*wk
)
1062 struct virtio_uml_platform_data
*pdata
;
1064 pdata
= container_of(wk
, struct virtio_uml_platform_data
, conn_broken_wk
);
1065 vu_unregister_cmdline_device(&pdata
->pdev
->dev
, NULL
);
1068 static int vu_cmdline_set(const char *device
, const struct kernel_param
*kp
)
1070 const char *ids
= strchr(device
, ':');
1071 unsigned int virtio_device_id
;
1072 int processed
, consumed
, err
;
1074 struct virtio_uml_platform_data pdata
, *ppdata
;
1075 struct platform_device
*pdev
;
1077 if (!ids
|| ids
== device
)
1080 processed
= sscanf(ids
, ":%u%n:%d%n",
1081 &virtio_device_id
, &consumed
,
1082 &vu_cmdline_id
, &consumed
);
1084 if (processed
< 1 || ids
[consumed
])
1087 if (!vu_cmdline_parent_registered
) {
1088 err
= device_register(&vu_cmdline_parent
);
1090 pr_err("Failed to register parent device!\n");
1091 put_device(&vu_cmdline_parent
);
1094 vu_cmdline_parent_registered
= true;
1097 socket_path
= kmemdup_nul(device
, ids
- device
, GFP_KERNEL
);
1101 pdata
.virtio_device_id
= (u32
) virtio_device_id
;
1102 pdata
.socket_path
= socket_path
;
1104 pr_info("Registering device virtio-uml.%d id=%d at %s\n",
1105 vu_cmdline_id
, virtio_device_id
, socket_path
);
1107 pdev
= platform_device_register_data(&vu_cmdline_parent
, "virtio-uml",
1108 vu_cmdline_id
++, &pdata
,
1110 err
= PTR_ERR_OR_ZERO(pdev
);
1114 ppdata
= pdev
->dev
.platform_data
;
1115 ppdata
->pdev
= pdev
;
1116 INIT_WORK(&ppdata
->conn_broken_wk
, vu_conn_broken
);
1125 static int vu_cmdline_get_device(struct device
*dev
, void *data
)
1127 struct platform_device
*pdev
= to_platform_device(dev
);
1128 struct virtio_uml_platform_data
*pdata
= pdev
->dev
.platform_data
;
1129 char *buffer
= data
;
1130 unsigned int len
= strlen(buffer
);
1132 snprintf(buffer
+ len
, PAGE_SIZE
- len
, "%s:%d:%d\n",
1133 pdata
->socket_path
, pdata
->virtio_device_id
, pdev
->id
);
1137 static int vu_cmdline_get(char *buffer
, const struct kernel_param
*kp
)
1140 if (vu_cmdline_parent_registered
)
1141 device_for_each_child(&vu_cmdline_parent
, buffer
,
1142 vu_cmdline_get_device
);
1143 return strlen(buffer
) + 1;
1146 static const struct kernel_param_ops vu_cmdline_param_ops
= {
1147 .set
= vu_cmdline_set
,
1148 .get
= vu_cmdline_get
,
1151 device_param_cb(device
, &vu_cmdline_param_ops
, NULL
, S_IRUSR
);
1152 __uml_help(vu_cmdline_param_ops
,
1153 "virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]\n"
1154 " Configure a virtio device over a vhost-user socket.\n"
1155 " See virtio_ids.h for a list of possible virtio device id values.\n"
1156 " Optionally use a specific platform_device id.\n\n"
1160 static void vu_unregister_cmdline_devices(void)
1162 if (vu_cmdline_parent_registered
) {
1163 device_for_each_child(&vu_cmdline_parent
, NULL
,
1164 vu_unregister_cmdline_device
);
1165 device_unregister(&vu_cmdline_parent
);
1166 vu_cmdline_parent_registered
= false;
1170 /* Platform driver */
1172 static const struct of_device_id virtio_uml_match
[] = {
1173 { .compatible
= "virtio,uml", },
1176 MODULE_DEVICE_TABLE(of
, virtio_uml_match
);
1178 static struct platform_driver virtio_uml_driver
= {
1179 .probe
= virtio_uml_probe
,
1180 .remove
= virtio_uml_remove
,
1182 .name
= "virtio-uml",
1183 .of_match_table
= virtio_uml_match
,
1187 static int __init
virtio_uml_init(void)
1189 return platform_driver_register(&virtio_uml_driver
);
1192 static void __exit
virtio_uml_exit(void)
1194 platform_driver_unregister(&virtio_uml_driver
);
1195 vu_unregister_cmdline_devices();
1198 module_init(virtio_uml_init
);
1199 module_exit(virtio_uml_exit
);
1200 __uml_exitcall(virtio_uml_exit
);
1202 MODULE_DESCRIPTION("UML driver for vhost-user virtio devices");
1203 MODULE_LICENSE("GPL");