2 * vhost transport for vsock
4 * Copyright (C) 2013-2015 Red Hat, Inc.
5 * Author: Asias He <asias@redhat.com>
6 * Stefan Hajnoczi <stefanha@redhat.com>
8 * This work is licensed under the terms of the GNU GPL, version 2.
10 #include <linux/miscdevice.h>
11 #include <linux/atomic.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/vmalloc.h>
16 #include <linux/virtio_vsock.h>
17 #include <linux/vhost.h>
19 #include <net/af_vsock.h>
22 #define VHOST_VSOCK_DEFAULT_HOST_CID 2
25 VHOST_VSOCK_FEATURES
= VHOST_FEATURES
,
28 /* Used to track all the vhost_vsock instances on the system. */
29 static DEFINE_SPINLOCK(vhost_vsock_lock
);
30 static LIST_HEAD(vhost_vsock_list
);
34 struct vhost_virtqueue vqs
[2];
36 /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
37 struct list_head list
;
39 struct vhost_work send_pkt_work
;
40 spinlock_t send_pkt_list_lock
;
41 struct list_head send_pkt_list
; /* host->guest pending packets */
43 atomic_t queued_replies
;
48 static u32
vhost_transport_get_local_cid(void)
50 return VHOST_VSOCK_DEFAULT_HOST_CID
;
53 static struct vhost_vsock
*__vhost_vsock_get(u32 guest_cid
)
55 struct vhost_vsock
*vsock
;
57 list_for_each_entry(vsock
, &vhost_vsock_list
, list
) {
58 u32 other_cid
= vsock
->guest_cid
;
60 /* Skip instances that have no CID yet */
64 if (other_cid
== guest_cid
) {
72 static struct vhost_vsock
*vhost_vsock_get(u32 guest_cid
)
74 struct vhost_vsock
*vsock
;
76 spin_lock_bh(&vhost_vsock_lock
);
77 vsock
= __vhost_vsock_get(guest_cid
);
78 spin_unlock_bh(&vhost_vsock_lock
);
84 vhost_transport_do_send_pkt(struct vhost_vsock
*vsock
,
85 struct vhost_virtqueue
*vq
)
87 struct vhost_virtqueue
*tx_vq
= &vsock
->vqs
[VSOCK_VQ_TX
];
89 bool restart_tx
= false;
91 mutex_lock(&vq
->mutex
);
93 if (!vq
->private_data
)
96 /* Avoid further vmexits, we're already processing the virtqueue */
97 vhost_disable_notify(&vsock
->dev
, vq
);
100 struct virtio_vsock_pkt
*pkt
;
101 struct iov_iter iov_iter
;
107 spin_lock_bh(&vsock
->send_pkt_list_lock
);
108 if (list_empty(&vsock
->send_pkt_list
)) {
109 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
110 vhost_enable_notify(&vsock
->dev
, vq
);
114 pkt
= list_first_entry(&vsock
->send_pkt_list
,
115 struct virtio_vsock_pkt
, list
);
116 list_del_init(&pkt
->list
);
117 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
119 head
= vhost_get_vq_desc(vq
, vq
->iov
, ARRAY_SIZE(vq
->iov
),
120 &out
, &in
, NULL
, NULL
);
122 spin_lock_bh(&vsock
->send_pkt_list_lock
);
123 list_add(&pkt
->list
, &vsock
->send_pkt_list
);
124 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
128 if (head
== vq
->num
) {
129 spin_lock_bh(&vsock
->send_pkt_list_lock
);
130 list_add(&pkt
->list
, &vsock
->send_pkt_list
);
131 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
133 /* We cannot finish yet if more buffers snuck in while
134 * re-enabling notify.
136 if (unlikely(vhost_enable_notify(&vsock
->dev
, vq
))) {
137 vhost_disable_notify(&vsock
->dev
, vq
);
144 virtio_transport_free_pkt(pkt
);
145 vq_err(vq
, "Expected 0 output buffers, got %u\n", out
);
149 len
= iov_length(&vq
->iov
[out
], in
);
150 iov_iter_init(&iov_iter
, READ
, &vq
->iov
[out
], in
, len
);
152 nbytes
= copy_to_iter(&pkt
->hdr
, sizeof(pkt
->hdr
), &iov_iter
);
153 if (nbytes
!= sizeof(pkt
->hdr
)) {
154 virtio_transport_free_pkt(pkt
);
155 vq_err(vq
, "Faulted on copying pkt hdr\n");
159 nbytes
= copy_to_iter(pkt
->buf
, pkt
->len
, &iov_iter
);
160 if (nbytes
!= pkt
->len
) {
161 virtio_transport_free_pkt(pkt
);
162 vq_err(vq
, "Faulted on copying pkt buf\n");
166 vhost_add_used(vq
, head
, sizeof(pkt
->hdr
) + pkt
->len
);
172 val
= atomic_dec_return(&vsock
->queued_replies
);
174 /* Do we have resources to resume tx processing? */
175 if (val
+ 1 == tx_vq
->num
)
179 /* Deliver to monitoring devices all correctly transmitted
182 virtio_transport_deliver_tap_pkt(pkt
);
184 virtio_transport_free_pkt(pkt
);
187 vhost_signal(&vsock
->dev
, vq
);
190 mutex_unlock(&vq
->mutex
);
193 vhost_poll_queue(&tx_vq
->poll
);
196 static void vhost_transport_send_pkt_work(struct vhost_work
*work
)
198 struct vhost_virtqueue
*vq
;
199 struct vhost_vsock
*vsock
;
201 vsock
= container_of(work
, struct vhost_vsock
, send_pkt_work
);
202 vq
= &vsock
->vqs
[VSOCK_VQ_RX
];
204 vhost_transport_do_send_pkt(vsock
, vq
);
208 vhost_transport_send_pkt(struct virtio_vsock_pkt
*pkt
)
210 struct vhost_vsock
*vsock
;
213 /* Find the vhost_vsock according to guest context id */
214 vsock
= vhost_vsock_get(le64_to_cpu(pkt
->hdr
.dst_cid
));
216 virtio_transport_free_pkt(pkt
);
221 atomic_inc(&vsock
->queued_replies
);
223 spin_lock_bh(&vsock
->send_pkt_list_lock
);
224 list_add_tail(&pkt
->list
, &vsock
->send_pkt_list
);
225 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
227 vhost_work_queue(&vsock
->dev
, &vsock
->send_pkt_work
);
232 vhost_transport_cancel_pkt(struct vsock_sock
*vsk
)
234 struct vhost_vsock
*vsock
;
235 struct virtio_vsock_pkt
*pkt
, *n
;
239 /* Find the vhost_vsock according to guest context id */
240 vsock
= vhost_vsock_get(vsk
->remote_addr
.svm_cid
);
244 spin_lock_bh(&vsock
->send_pkt_list_lock
);
245 list_for_each_entry_safe(pkt
, n
, &vsock
->send_pkt_list
, list
) {
248 list_move(&pkt
->list
, &freeme
);
250 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
252 list_for_each_entry_safe(pkt
, n
, &freeme
, list
) {
255 list_del(&pkt
->list
);
256 virtio_transport_free_pkt(pkt
);
260 struct vhost_virtqueue
*tx_vq
= &vsock
->vqs
[VSOCK_VQ_TX
];
263 new_cnt
= atomic_sub_return(cnt
, &vsock
->queued_replies
);
264 if (new_cnt
+ cnt
>= tx_vq
->num
&& new_cnt
< tx_vq
->num
)
265 vhost_poll_queue(&tx_vq
->poll
);
271 static struct virtio_vsock_pkt
*
272 vhost_vsock_alloc_pkt(struct vhost_virtqueue
*vq
,
273 unsigned int out
, unsigned int in
)
275 struct virtio_vsock_pkt
*pkt
;
276 struct iov_iter iov_iter
;
281 vq_err(vq
, "Expected 0 input buffers, got %u\n", in
);
285 pkt
= kzalloc(sizeof(*pkt
), GFP_KERNEL
);
289 len
= iov_length(vq
->iov
, out
);
290 iov_iter_init(&iov_iter
, WRITE
, vq
->iov
, out
, len
);
292 nbytes
= copy_from_iter(&pkt
->hdr
, sizeof(pkt
->hdr
), &iov_iter
);
293 if (nbytes
!= sizeof(pkt
->hdr
)) {
294 vq_err(vq
, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
295 sizeof(pkt
->hdr
), nbytes
);
300 if (le16_to_cpu(pkt
->hdr
.type
) == VIRTIO_VSOCK_TYPE_STREAM
)
301 pkt
->len
= le32_to_cpu(pkt
->hdr
.len
);
307 /* The pkt is too big */
308 if (pkt
->len
> VIRTIO_VSOCK_MAX_PKT_BUF_SIZE
) {
313 pkt
->buf
= kmalloc(pkt
->len
, GFP_KERNEL
);
319 nbytes
= copy_from_iter(pkt
->buf
, pkt
->len
, &iov_iter
);
320 if (nbytes
!= pkt
->len
) {
321 vq_err(vq
, "Expected %u byte payload, got %zu bytes\n",
323 virtio_transport_free_pkt(pkt
);
330 /* Is there space left for replies to rx packets? */
331 static bool vhost_vsock_more_replies(struct vhost_vsock
*vsock
)
333 struct vhost_virtqueue
*vq
= &vsock
->vqs
[VSOCK_VQ_TX
];
336 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
337 val
= atomic_read(&vsock
->queued_replies
);
339 return val
< vq
->num
;
342 static void vhost_vsock_handle_tx_kick(struct vhost_work
*work
)
344 struct vhost_virtqueue
*vq
= container_of(work
, struct vhost_virtqueue
,
346 struct vhost_vsock
*vsock
= container_of(vq
->dev
, struct vhost_vsock
,
348 struct virtio_vsock_pkt
*pkt
;
350 unsigned int out
, in
;
353 mutex_lock(&vq
->mutex
);
355 if (!vq
->private_data
)
358 vhost_disable_notify(&vsock
->dev
, vq
);
362 if (!vhost_vsock_more_replies(vsock
)) {
363 /* Stop tx until the device processes already
364 * pending replies. Leave tx virtqueue
365 * callbacks disabled.
367 goto no_more_replies
;
370 head
= vhost_get_vq_desc(vq
, vq
->iov
, ARRAY_SIZE(vq
->iov
),
371 &out
, &in
, NULL
, NULL
);
375 if (head
== vq
->num
) {
376 if (unlikely(vhost_enable_notify(&vsock
->dev
, vq
))) {
377 vhost_disable_notify(&vsock
->dev
, vq
);
383 pkt
= vhost_vsock_alloc_pkt(vq
, out
, in
);
385 vq_err(vq
, "Faulted on pkt\n");
391 /* Deliver to monitoring devices all received packets */
392 virtio_transport_deliver_tap_pkt(pkt
);
394 /* Only accept correctly addressed packets */
395 if (le64_to_cpu(pkt
->hdr
.src_cid
) == vsock
->guest_cid
)
396 virtio_transport_recv_pkt(pkt
);
398 virtio_transport_free_pkt(pkt
);
400 vhost_add_used(vq
, head
, sizeof(pkt
->hdr
) + len
);
406 vhost_signal(&vsock
->dev
, vq
);
409 mutex_unlock(&vq
->mutex
);
412 static void vhost_vsock_handle_rx_kick(struct vhost_work
*work
)
414 struct vhost_virtqueue
*vq
= container_of(work
, struct vhost_virtqueue
,
416 struct vhost_vsock
*vsock
= container_of(vq
->dev
, struct vhost_vsock
,
419 vhost_transport_do_send_pkt(vsock
, vq
);
422 static int vhost_vsock_start(struct vhost_vsock
*vsock
)
424 struct vhost_virtqueue
*vq
;
428 mutex_lock(&vsock
->dev
.mutex
);
430 ret
= vhost_dev_check_owner(&vsock
->dev
);
434 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
437 mutex_lock(&vq
->mutex
);
439 if (!vhost_vq_access_ok(vq
)) {
444 if (!vq
->private_data
) {
445 vq
->private_data
= vsock
;
446 ret
= vhost_vq_init_access(vq
);
451 mutex_unlock(&vq
->mutex
);
454 mutex_unlock(&vsock
->dev
.mutex
);
458 vq
->private_data
= NULL
;
459 mutex_unlock(&vq
->mutex
);
461 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
464 mutex_lock(&vq
->mutex
);
465 vq
->private_data
= NULL
;
466 mutex_unlock(&vq
->mutex
);
469 mutex_unlock(&vsock
->dev
.mutex
);
473 static int vhost_vsock_stop(struct vhost_vsock
*vsock
)
478 mutex_lock(&vsock
->dev
.mutex
);
480 ret
= vhost_dev_check_owner(&vsock
->dev
);
484 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
485 struct vhost_virtqueue
*vq
= &vsock
->vqs
[i
];
487 mutex_lock(&vq
->mutex
);
488 vq
->private_data
= NULL
;
489 mutex_unlock(&vq
->mutex
);
493 mutex_unlock(&vsock
->dev
.mutex
);
497 static void vhost_vsock_free(struct vhost_vsock
*vsock
)
502 static int vhost_vsock_dev_open(struct inode
*inode
, struct file
*file
)
504 struct vhost_virtqueue
**vqs
;
505 struct vhost_vsock
*vsock
;
508 /* This struct is large and allocation could fail, fall back to vmalloc
509 * if there is no other way.
511 vsock
= kvmalloc(sizeof(*vsock
), GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
515 vqs
= kmalloc_array(ARRAY_SIZE(vsock
->vqs
), sizeof(*vqs
), GFP_KERNEL
);
521 atomic_set(&vsock
->queued_replies
, 0);
523 vqs
[VSOCK_VQ_TX
] = &vsock
->vqs
[VSOCK_VQ_TX
];
524 vqs
[VSOCK_VQ_RX
] = &vsock
->vqs
[VSOCK_VQ_RX
];
525 vsock
->vqs
[VSOCK_VQ_TX
].handle_kick
= vhost_vsock_handle_tx_kick
;
526 vsock
->vqs
[VSOCK_VQ_RX
].handle_kick
= vhost_vsock_handle_rx_kick
;
528 vhost_dev_init(&vsock
->dev
, vqs
, ARRAY_SIZE(vsock
->vqs
));
530 file
->private_data
= vsock
;
531 spin_lock_init(&vsock
->send_pkt_list_lock
);
532 INIT_LIST_HEAD(&vsock
->send_pkt_list
);
533 vhost_work_init(&vsock
->send_pkt_work
, vhost_transport_send_pkt_work
);
535 spin_lock_bh(&vhost_vsock_lock
);
536 list_add_tail(&vsock
->list
, &vhost_vsock_list
);
537 spin_unlock_bh(&vhost_vsock_lock
);
541 vhost_vsock_free(vsock
);
545 static void vhost_vsock_flush(struct vhost_vsock
*vsock
)
549 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++)
550 if (vsock
->vqs
[i
].handle_kick
)
551 vhost_poll_flush(&vsock
->vqs
[i
].poll
);
552 vhost_work_flush(&vsock
->dev
, &vsock
->send_pkt_work
);
555 static void vhost_vsock_reset_orphans(struct sock
*sk
)
557 struct vsock_sock
*vsk
= vsock_sk(sk
);
559 /* vmci_transport.c doesn't take sk_lock here either. At least we're
560 * under vsock_table_lock so the sock cannot disappear while we're
564 if (!vhost_vsock_get(vsk
->remote_addr
.svm_cid
)) {
565 sock_set_flag(sk
, SOCK_DONE
);
566 vsk
->peer_shutdown
= SHUTDOWN_MASK
;
567 sk
->sk_state
= SS_UNCONNECTED
;
568 sk
->sk_err
= ECONNRESET
;
569 sk
->sk_error_report(sk
);
573 static int vhost_vsock_dev_release(struct inode
*inode
, struct file
*file
)
575 struct vhost_vsock
*vsock
= file
->private_data
;
577 spin_lock_bh(&vhost_vsock_lock
);
578 list_del(&vsock
->list
);
579 spin_unlock_bh(&vhost_vsock_lock
);
581 /* Iterating over all connections for all CIDs to find orphans is
582 * inefficient. Room for improvement here. */
583 vsock_for_each_connected_socket(vhost_vsock_reset_orphans
);
585 vhost_vsock_stop(vsock
);
586 vhost_vsock_flush(vsock
);
587 vhost_dev_stop(&vsock
->dev
);
589 spin_lock_bh(&vsock
->send_pkt_list_lock
);
590 while (!list_empty(&vsock
->send_pkt_list
)) {
591 struct virtio_vsock_pkt
*pkt
;
593 pkt
= list_first_entry(&vsock
->send_pkt_list
,
594 struct virtio_vsock_pkt
, list
);
595 list_del_init(&pkt
->list
);
596 virtio_transport_free_pkt(pkt
);
598 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
600 vhost_dev_cleanup(&vsock
->dev
, false);
601 kfree(vsock
->dev
.vqs
);
602 vhost_vsock_free(vsock
);
606 static int vhost_vsock_set_cid(struct vhost_vsock
*vsock
, u64 guest_cid
)
608 struct vhost_vsock
*other
;
610 /* Refuse reserved CIDs */
611 if (guest_cid
<= VMADDR_CID_HOST
||
612 guest_cid
== U32_MAX
)
615 /* 64-bit CIDs are not yet supported */
616 if (guest_cid
> U32_MAX
)
619 /* Refuse if CID is already in use */
620 spin_lock_bh(&vhost_vsock_lock
);
621 other
= __vhost_vsock_get(guest_cid
);
622 if (other
&& other
!= vsock
) {
623 spin_unlock_bh(&vhost_vsock_lock
);
626 vsock
->guest_cid
= guest_cid
;
627 spin_unlock_bh(&vhost_vsock_lock
);
632 static int vhost_vsock_set_features(struct vhost_vsock
*vsock
, u64 features
)
634 struct vhost_virtqueue
*vq
;
637 if (features
& ~VHOST_VSOCK_FEATURES
)
640 mutex_lock(&vsock
->dev
.mutex
);
641 if ((features
& (1 << VHOST_F_LOG_ALL
)) &&
642 !vhost_log_access_ok(&vsock
->dev
)) {
643 mutex_unlock(&vsock
->dev
.mutex
);
647 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
649 mutex_lock(&vq
->mutex
);
650 vq
->acked_features
= features
;
651 mutex_unlock(&vq
->mutex
);
653 mutex_unlock(&vsock
->dev
.mutex
);
657 static long vhost_vsock_dev_ioctl(struct file
*f
, unsigned int ioctl
,
660 struct vhost_vsock
*vsock
= f
->private_data
;
661 void __user
*argp
= (void __user
*)arg
;
668 case VHOST_VSOCK_SET_GUEST_CID
:
669 if (copy_from_user(&guest_cid
, argp
, sizeof(guest_cid
)))
671 return vhost_vsock_set_cid(vsock
, guest_cid
);
672 case VHOST_VSOCK_SET_RUNNING
:
673 if (copy_from_user(&start
, argp
, sizeof(start
)))
676 return vhost_vsock_start(vsock
);
678 return vhost_vsock_stop(vsock
);
679 case VHOST_GET_FEATURES
:
680 features
= VHOST_VSOCK_FEATURES
;
681 if (copy_to_user(argp
, &features
, sizeof(features
)))
684 case VHOST_SET_FEATURES
:
685 if (copy_from_user(&features
, argp
, sizeof(features
)))
687 return vhost_vsock_set_features(vsock
, features
);
689 mutex_lock(&vsock
->dev
.mutex
);
690 r
= vhost_dev_ioctl(&vsock
->dev
, ioctl
, argp
);
691 if (r
== -ENOIOCTLCMD
)
692 r
= vhost_vring_ioctl(&vsock
->dev
, ioctl
, argp
);
694 vhost_vsock_flush(vsock
);
695 mutex_unlock(&vsock
->dev
.mutex
);
700 static const struct file_operations vhost_vsock_fops
= {
701 .owner
= THIS_MODULE
,
702 .open
= vhost_vsock_dev_open
,
703 .release
= vhost_vsock_dev_release
,
704 .llseek
= noop_llseek
,
705 .unlocked_ioctl
= vhost_vsock_dev_ioctl
,
708 static struct miscdevice vhost_vsock_misc
= {
709 .minor
= VHOST_VSOCK_MINOR
,
710 .name
= "vhost-vsock",
711 .fops
= &vhost_vsock_fops
,
714 static struct virtio_transport vhost_transport
= {
716 .get_local_cid
= vhost_transport_get_local_cid
,
718 .init
= virtio_transport_do_socket_init
,
719 .destruct
= virtio_transport_destruct
,
720 .release
= virtio_transport_release
,
721 .connect
= virtio_transport_connect
,
722 .shutdown
= virtio_transport_shutdown
,
723 .cancel_pkt
= vhost_transport_cancel_pkt
,
725 .dgram_enqueue
= virtio_transport_dgram_enqueue
,
726 .dgram_dequeue
= virtio_transport_dgram_dequeue
,
727 .dgram_bind
= virtio_transport_dgram_bind
,
728 .dgram_allow
= virtio_transport_dgram_allow
,
730 .stream_enqueue
= virtio_transport_stream_enqueue
,
731 .stream_dequeue
= virtio_transport_stream_dequeue
,
732 .stream_has_data
= virtio_transport_stream_has_data
,
733 .stream_has_space
= virtio_transport_stream_has_space
,
734 .stream_rcvhiwat
= virtio_transport_stream_rcvhiwat
,
735 .stream_is_active
= virtio_transport_stream_is_active
,
736 .stream_allow
= virtio_transport_stream_allow
,
738 .notify_poll_in
= virtio_transport_notify_poll_in
,
739 .notify_poll_out
= virtio_transport_notify_poll_out
,
740 .notify_recv_init
= virtio_transport_notify_recv_init
,
741 .notify_recv_pre_block
= virtio_transport_notify_recv_pre_block
,
742 .notify_recv_pre_dequeue
= virtio_transport_notify_recv_pre_dequeue
,
743 .notify_recv_post_dequeue
= virtio_transport_notify_recv_post_dequeue
,
744 .notify_send_init
= virtio_transport_notify_send_init
,
745 .notify_send_pre_block
= virtio_transport_notify_send_pre_block
,
746 .notify_send_pre_enqueue
= virtio_transport_notify_send_pre_enqueue
,
747 .notify_send_post_enqueue
= virtio_transport_notify_send_post_enqueue
,
749 .set_buffer_size
= virtio_transport_set_buffer_size
,
750 .set_min_buffer_size
= virtio_transport_set_min_buffer_size
,
751 .set_max_buffer_size
= virtio_transport_set_max_buffer_size
,
752 .get_buffer_size
= virtio_transport_get_buffer_size
,
753 .get_min_buffer_size
= virtio_transport_get_min_buffer_size
,
754 .get_max_buffer_size
= virtio_transport_get_max_buffer_size
,
757 .send_pkt
= vhost_transport_send_pkt
,
760 static int __init
vhost_vsock_init(void)
764 ret
= vsock_core_init(&vhost_transport
.transport
);
767 return misc_register(&vhost_vsock_misc
);
770 static void __exit
vhost_vsock_exit(void)
772 misc_deregister(&vhost_vsock_misc
);
776 module_init(vhost_vsock_init
);
777 module_exit(vhost_vsock_exit
);
778 MODULE_LICENSE("GPL v2");
779 MODULE_AUTHOR("Asias He");
780 MODULE_DESCRIPTION("vhost transport for vsock ");
781 MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR
);
782 MODULE_ALIAS("devname:vhost-vsock");