2 * vhost transport for vsock
4 * Copyright (C) 2013-2015 Red Hat, Inc.
5 * Author: Asias He <asias@redhat.com>
6 * Stefan Hajnoczi <stefanha@redhat.com>
8 * This work is licensed under the terms of the GNU GPL, version 2.
10 #include <linux/miscdevice.h>
11 #include <linux/atomic.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/vmalloc.h>
16 #include <linux/virtio_vsock.h>
17 #include <linux/vhost.h>
19 #include <net/af_vsock.h>
22 #define VHOST_VSOCK_DEFAULT_HOST_CID 2
25 VHOST_VSOCK_FEATURES
= VHOST_FEATURES
,
28 /* Used to track all the vhost_vsock instances on the system. */
29 static DEFINE_SPINLOCK(vhost_vsock_lock
);
30 static LIST_HEAD(vhost_vsock_list
);
34 struct vhost_virtqueue vqs
[2];
36 /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
37 struct list_head list
;
39 struct vhost_work send_pkt_work
;
40 spinlock_t send_pkt_list_lock
;
41 struct list_head send_pkt_list
; /* host->guest pending packets */
43 atomic_t queued_replies
;
48 static u32
vhost_transport_get_local_cid(void)
50 return VHOST_VSOCK_DEFAULT_HOST_CID
;
53 static struct vhost_vsock
*vhost_vsock_get(u32 guest_cid
)
55 struct vhost_vsock
*vsock
;
57 spin_lock_bh(&vhost_vsock_lock
);
58 list_for_each_entry(vsock
, &vhost_vsock_list
, list
) {
59 u32 other_cid
= vsock
->guest_cid
;
61 /* Skip instances that have no CID yet */
65 if (other_cid
== guest_cid
) {
66 spin_unlock_bh(&vhost_vsock_lock
);
70 spin_unlock_bh(&vhost_vsock_lock
);
76 vhost_transport_do_send_pkt(struct vhost_vsock
*vsock
,
77 struct vhost_virtqueue
*vq
)
79 struct vhost_virtqueue
*tx_vq
= &vsock
->vqs
[VSOCK_VQ_TX
];
81 bool restart_tx
= false;
83 mutex_lock(&vq
->mutex
);
85 if (!vq
->private_data
)
88 /* Avoid further vmexits, we're already processing the virtqueue */
89 vhost_disable_notify(&vsock
->dev
, vq
);
92 struct virtio_vsock_pkt
*pkt
;
93 struct iov_iter iov_iter
;
99 spin_lock_bh(&vsock
->send_pkt_list_lock
);
100 if (list_empty(&vsock
->send_pkt_list
)) {
101 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
102 vhost_enable_notify(&vsock
->dev
, vq
);
106 pkt
= list_first_entry(&vsock
->send_pkt_list
,
107 struct virtio_vsock_pkt
, list
);
108 list_del_init(&pkt
->list
);
109 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
111 head
= vhost_get_vq_desc(vq
, vq
->iov
, ARRAY_SIZE(vq
->iov
),
112 &out
, &in
, NULL
, NULL
);
114 spin_lock_bh(&vsock
->send_pkt_list_lock
);
115 list_add(&pkt
->list
, &vsock
->send_pkt_list
);
116 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
120 if (head
== vq
->num
) {
121 spin_lock_bh(&vsock
->send_pkt_list_lock
);
122 list_add(&pkt
->list
, &vsock
->send_pkt_list
);
123 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
125 /* We cannot finish yet if more buffers snuck in while
126 * re-enabling notify.
128 if (unlikely(vhost_enable_notify(&vsock
->dev
, vq
))) {
129 vhost_disable_notify(&vsock
->dev
, vq
);
136 virtio_transport_free_pkt(pkt
);
137 vq_err(vq
, "Expected 0 output buffers, got %u\n", out
);
141 len
= iov_length(&vq
->iov
[out
], in
);
142 iov_iter_init(&iov_iter
, READ
, &vq
->iov
[out
], in
, len
);
144 nbytes
= copy_to_iter(&pkt
->hdr
, sizeof(pkt
->hdr
), &iov_iter
);
145 if (nbytes
!= sizeof(pkt
->hdr
)) {
146 virtio_transport_free_pkt(pkt
);
147 vq_err(vq
, "Faulted on copying pkt hdr\n");
151 nbytes
= copy_to_iter(pkt
->buf
, pkt
->len
, &iov_iter
);
152 if (nbytes
!= pkt
->len
) {
153 virtio_transport_free_pkt(pkt
);
154 vq_err(vq
, "Faulted on copying pkt buf\n");
158 vhost_add_used(vq
, head
, sizeof(pkt
->hdr
) + pkt
->len
);
164 val
= atomic_dec_return(&vsock
->queued_replies
);
166 /* Do we have resources to resume tx processing? */
167 if (val
+ 1 == tx_vq
->num
)
171 virtio_transport_free_pkt(pkt
);
174 vhost_signal(&vsock
->dev
, vq
);
177 mutex_unlock(&vq
->mutex
);
180 vhost_poll_queue(&tx_vq
->poll
);
183 static void vhost_transport_send_pkt_work(struct vhost_work
*work
)
185 struct vhost_virtqueue
*vq
;
186 struct vhost_vsock
*vsock
;
188 vsock
= container_of(work
, struct vhost_vsock
, send_pkt_work
);
189 vq
= &vsock
->vqs
[VSOCK_VQ_RX
];
191 vhost_transport_do_send_pkt(vsock
, vq
);
195 vhost_transport_send_pkt(struct virtio_vsock_pkt
*pkt
)
197 struct vhost_vsock
*vsock
;
198 struct vhost_virtqueue
*vq
;
201 /* Find the vhost_vsock according to guest context id */
202 vsock
= vhost_vsock_get(le64_to_cpu(pkt
->hdr
.dst_cid
));
204 virtio_transport_free_pkt(pkt
);
208 vq
= &vsock
->vqs
[VSOCK_VQ_RX
];
211 atomic_inc(&vsock
->queued_replies
);
213 spin_lock_bh(&vsock
->send_pkt_list_lock
);
214 list_add_tail(&pkt
->list
, &vsock
->send_pkt_list
);
215 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
217 vhost_work_queue(&vsock
->dev
, &vsock
->send_pkt_work
);
222 vhost_transport_cancel_pkt(struct vsock_sock
*vsk
)
224 struct vhost_vsock
*vsock
;
225 struct virtio_vsock_pkt
*pkt
, *n
;
229 /* Find the vhost_vsock according to guest context id */
230 vsock
= vhost_vsock_get(vsk
->remote_addr
.svm_cid
);
234 spin_lock_bh(&vsock
->send_pkt_list_lock
);
235 list_for_each_entry_safe(pkt
, n
, &vsock
->send_pkt_list
, list
) {
238 list_move(&pkt
->list
, &freeme
);
240 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
242 list_for_each_entry_safe(pkt
, n
, &freeme
, list
) {
245 list_del(&pkt
->list
);
246 virtio_transport_free_pkt(pkt
);
250 struct vhost_virtqueue
*tx_vq
= &vsock
->vqs
[VSOCK_VQ_TX
];
253 new_cnt
= atomic_sub_return(cnt
, &vsock
->queued_replies
);
254 if (new_cnt
+ cnt
>= tx_vq
->num
&& new_cnt
< tx_vq
->num
)
255 vhost_poll_queue(&tx_vq
->poll
);
261 static struct virtio_vsock_pkt
*
262 vhost_vsock_alloc_pkt(struct vhost_virtqueue
*vq
,
263 unsigned int out
, unsigned int in
)
265 struct virtio_vsock_pkt
*pkt
;
266 struct iov_iter iov_iter
;
271 vq_err(vq
, "Expected 0 input buffers, got %u\n", in
);
275 pkt
= kzalloc(sizeof(*pkt
), GFP_KERNEL
);
279 len
= iov_length(vq
->iov
, out
);
280 iov_iter_init(&iov_iter
, WRITE
, vq
->iov
, out
, len
);
282 nbytes
= copy_from_iter(&pkt
->hdr
, sizeof(pkt
->hdr
), &iov_iter
);
283 if (nbytes
!= sizeof(pkt
->hdr
)) {
284 vq_err(vq
, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
285 sizeof(pkt
->hdr
), nbytes
);
290 if (le16_to_cpu(pkt
->hdr
.type
) == VIRTIO_VSOCK_TYPE_STREAM
)
291 pkt
->len
= le32_to_cpu(pkt
->hdr
.len
);
297 /* The pkt is too big */
298 if (pkt
->len
> VIRTIO_VSOCK_MAX_PKT_BUF_SIZE
) {
303 pkt
->buf
= kmalloc(pkt
->len
, GFP_KERNEL
);
309 nbytes
= copy_from_iter(pkt
->buf
, pkt
->len
, &iov_iter
);
310 if (nbytes
!= pkt
->len
) {
311 vq_err(vq
, "Expected %u byte payload, got %zu bytes\n",
313 virtio_transport_free_pkt(pkt
);
320 /* Is there space left for replies to rx packets? */
321 static bool vhost_vsock_more_replies(struct vhost_vsock
*vsock
)
323 struct vhost_virtqueue
*vq
= &vsock
->vqs
[VSOCK_VQ_TX
];
326 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
327 val
= atomic_read(&vsock
->queued_replies
);
329 return val
< vq
->num
;
332 static void vhost_vsock_handle_tx_kick(struct vhost_work
*work
)
334 struct vhost_virtqueue
*vq
= container_of(work
, struct vhost_virtqueue
,
336 struct vhost_vsock
*vsock
= container_of(vq
->dev
, struct vhost_vsock
,
338 struct virtio_vsock_pkt
*pkt
;
340 unsigned int out
, in
;
343 mutex_lock(&vq
->mutex
);
345 if (!vq
->private_data
)
348 vhost_disable_notify(&vsock
->dev
, vq
);
352 if (!vhost_vsock_more_replies(vsock
)) {
353 /* Stop tx until the device processes already
354 * pending replies. Leave tx virtqueue
355 * callbacks disabled.
357 goto no_more_replies
;
360 head
= vhost_get_vq_desc(vq
, vq
->iov
, ARRAY_SIZE(vq
->iov
),
361 &out
, &in
, NULL
, NULL
);
365 if (head
== vq
->num
) {
366 if (unlikely(vhost_enable_notify(&vsock
->dev
, vq
))) {
367 vhost_disable_notify(&vsock
->dev
, vq
);
373 pkt
= vhost_vsock_alloc_pkt(vq
, out
, in
);
375 vq_err(vq
, "Faulted on pkt\n");
381 /* Only accept correctly addressed packets */
382 if (le64_to_cpu(pkt
->hdr
.src_cid
) == vsock
->guest_cid
)
383 virtio_transport_recv_pkt(pkt
);
385 virtio_transport_free_pkt(pkt
);
387 vhost_add_used(vq
, head
, sizeof(pkt
->hdr
) + len
);
393 vhost_signal(&vsock
->dev
, vq
);
396 mutex_unlock(&vq
->mutex
);
399 static void vhost_vsock_handle_rx_kick(struct vhost_work
*work
)
401 struct vhost_virtqueue
*vq
= container_of(work
, struct vhost_virtqueue
,
403 struct vhost_vsock
*vsock
= container_of(vq
->dev
, struct vhost_vsock
,
406 vhost_transport_do_send_pkt(vsock
, vq
);
409 static int vhost_vsock_start(struct vhost_vsock
*vsock
)
411 struct vhost_virtqueue
*vq
;
415 mutex_lock(&vsock
->dev
.mutex
);
417 ret
= vhost_dev_check_owner(&vsock
->dev
);
421 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
424 mutex_lock(&vq
->mutex
);
426 if (!vhost_vq_access_ok(vq
)) {
431 if (!vq
->private_data
) {
432 vq
->private_data
= vsock
;
433 ret
= vhost_vq_init_access(vq
);
438 mutex_unlock(&vq
->mutex
);
441 mutex_unlock(&vsock
->dev
.mutex
);
445 vq
->private_data
= NULL
;
446 mutex_unlock(&vq
->mutex
);
448 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
451 mutex_lock(&vq
->mutex
);
452 vq
->private_data
= NULL
;
453 mutex_unlock(&vq
->mutex
);
456 mutex_unlock(&vsock
->dev
.mutex
);
460 static int vhost_vsock_stop(struct vhost_vsock
*vsock
)
465 mutex_lock(&vsock
->dev
.mutex
);
467 ret
= vhost_dev_check_owner(&vsock
->dev
);
471 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
472 struct vhost_virtqueue
*vq
= &vsock
->vqs
[i
];
474 mutex_lock(&vq
->mutex
);
475 vq
->private_data
= NULL
;
476 mutex_unlock(&vq
->mutex
);
480 mutex_unlock(&vsock
->dev
.mutex
);
484 static void vhost_vsock_free(struct vhost_vsock
*vsock
)
489 static int vhost_vsock_dev_open(struct inode
*inode
, struct file
*file
)
491 struct vhost_virtqueue
**vqs
;
492 struct vhost_vsock
*vsock
;
495 /* This struct is large and allocation could fail, fall back to vmalloc
496 * if there is no other way.
498 vsock
= kzalloc(sizeof(*vsock
), GFP_KERNEL
| __GFP_NOWARN
| __GFP_REPEAT
);
500 vsock
= vmalloc(sizeof(*vsock
));
505 vqs
= kmalloc_array(ARRAY_SIZE(vsock
->vqs
), sizeof(*vqs
), GFP_KERNEL
);
511 atomic_set(&vsock
->queued_replies
, 0);
513 vqs
[VSOCK_VQ_TX
] = &vsock
->vqs
[VSOCK_VQ_TX
];
514 vqs
[VSOCK_VQ_RX
] = &vsock
->vqs
[VSOCK_VQ_RX
];
515 vsock
->vqs
[VSOCK_VQ_TX
].handle_kick
= vhost_vsock_handle_tx_kick
;
516 vsock
->vqs
[VSOCK_VQ_RX
].handle_kick
= vhost_vsock_handle_rx_kick
;
518 vhost_dev_init(&vsock
->dev
, vqs
, ARRAY_SIZE(vsock
->vqs
));
520 file
->private_data
= vsock
;
521 spin_lock_init(&vsock
->send_pkt_list_lock
);
522 INIT_LIST_HEAD(&vsock
->send_pkt_list
);
523 vhost_work_init(&vsock
->send_pkt_work
, vhost_transport_send_pkt_work
);
525 spin_lock_bh(&vhost_vsock_lock
);
526 list_add_tail(&vsock
->list
, &vhost_vsock_list
);
527 spin_unlock_bh(&vhost_vsock_lock
);
531 vhost_vsock_free(vsock
);
535 static void vhost_vsock_flush(struct vhost_vsock
*vsock
)
539 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++)
540 if (vsock
->vqs
[i
].handle_kick
)
541 vhost_poll_flush(&vsock
->vqs
[i
].poll
);
542 vhost_work_flush(&vsock
->dev
, &vsock
->send_pkt_work
);
545 static void vhost_vsock_reset_orphans(struct sock
*sk
)
547 struct vsock_sock
*vsk
= vsock_sk(sk
);
549 /* vmci_transport.c doesn't take sk_lock here either. At least we're
550 * under vsock_table_lock so the sock cannot disappear while we're
554 if (!vhost_vsock_get(vsk
->remote_addr
.svm_cid
)) {
555 sock_set_flag(sk
, SOCK_DONE
);
556 vsk
->peer_shutdown
= SHUTDOWN_MASK
;
557 sk
->sk_state
= SS_UNCONNECTED
;
558 sk
->sk_err
= ECONNRESET
;
559 sk
->sk_error_report(sk
);
563 static int vhost_vsock_dev_release(struct inode
*inode
, struct file
*file
)
565 struct vhost_vsock
*vsock
= file
->private_data
;
567 spin_lock_bh(&vhost_vsock_lock
);
568 list_del(&vsock
->list
);
569 spin_unlock_bh(&vhost_vsock_lock
);
571 /* Iterating over all connections for all CIDs to find orphans is
572 * inefficient. Room for improvement here. */
573 vsock_for_each_connected_socket(vhost_vsock_reset_orphans
);
575 vhost_vsock_stop(vsock
);
576 vhost_vsock_flush(vsock
);
577 vhost_dev_stop(&vsock
->dev
);
579 spin_lock_bh(&vsock
->send_pkt_list_lock
);
580 while (!list_empty(&vsock
->send_pkt_list
)) {
581 struct virtio_vsock_pkt
*pkt
;
583 pkt
= list_first_entry(&vsock
->send_pkt_list
,
584 struct virtio_vsock_pkt
, list
);
585 list_del_init(&pkt
->list
);
586 virtio_transport_free_pkt(pkt
);
588 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
590 vhost_dev_cleanup(&vsock
->dev
, false);
591 kfree(vsock
->dev
.vqs
);
592 vhost_vsock_free(vsock
);
596 static int vhost_vsock_set_cid(struct vhost_vsock
*vsock
, u64 guest_cid
)
598 struct vhost_vsock
*other
;
600 /* Refuse reserved CIDs */
601 if (guest_cid
<= VMADDR_CID_HOST
||
602 guest_cid
== U32_MAX
)
605 /* 64-bit CIDs are not yet supported */
606 if (guest_cid
> U32_MAX
)
609 /* Refuse if CID is already in use */
610 other
= vhost_vsock_get(guest_cid
);
611 if (other
&& other
!= vsock
)
614 spin_lock_bh(&vhost_vsock_lock
);
615 vsock
->guest_cid
= guest_cid
;
616 spin_unlock_bh(&vhost_vsock_lock
);
621 static int vhost_vsock_set_features(struct vhost_vsock
*vsock
, u64 features
)
623 struct vhost_virtqueue
*vq
;
626 if (features
& ~VHOST_VSOCK_FEATURES
)
629 mutex_lock(&vsock
->dev
.mutex
);
630 if ((features
& (1 << VHOST_F_LOG_ALL
)) &&
631 !vhost_log_access_ok(&vsock
->dev
)) {
632 mutex_unlock(&vsock
->dev
.mutex
);
636 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
638 mutex_lock(&vq
->mutex
);
639 vq
->acked_features
= features
;
640 mutex_unlock(&vq
->mutex
);
642 mutex_unlock(&vsock
->dev
.mutex
);
646 static long vhost_vsock_dev_ioctl(struct file
*f
, unsigned int ioctl
,
649 struct vhost_vsock
*vsock
= f
->private_data
;
650 void __user
*argp
= (void __user
*)arg
;
657 case VHOST_VSOCK_SET_GUEST_CID
:
658 if (copy_from_user(&guest_cid
, argp
, sizeof(guest_cid
)))
660 return vhost_vsock_set_cid(vsock
, guest_cid
);
661 case VHOST_VSOCK_SET_RUNNING
:
662 if (copy_from_user(&start
, argp
, sizeof(start
)))
665 return vhost_vsock_start(vsock
);
667 return vhost_vsock_stop(vsock
);
668 case VHOST_GET_FEATURES
:
669 features
= VHOST_VSOCK_FEATURES
;
670 if (copy_to_user(argp
, &features
, sizeof(features
)))
673 case VHOST_SET_FEATURES
:
674 if (copy_from_user(&features
, argp
, sizeof(features
)))
676 return vhost_vsock_set_features(vsock
, features
);
678 mutex_lock(&vsock
->dev
.mutex
);
679 r
= vhost_dev_ioctl(&vsock
->dev
, ioctl
, argp
);
680 if (r
== -ENOIOCTLCMD
)
681 r
= vhost_vring_ioctl(&vsock
->dev
, ioctl
, argp
);
683 vhost_vsock_flush(vsock
);
684 mutex_unlock(&vsock
->dev
.mutex
);
689 static const struct file_operations vhost_vsock_fops
= {
690 .owner
= THIS_MODULE
,
691 .open
= vhost_vsock_dev_open
,
692 .release
= vhost_vsock_dev_release
,
693 .llseek
= noop_llseek
,
694 .unlocked_ioctl
= vhost_vsock_dev_ioctl
,
697 static struct miscdevice vhost_vsock_misc
= {
698 .minor
= MISC_DYNAMIC_MINOR
,
699 .name
= "vhost-vsock",
700 .fops
= &vhost_vsock_fops
,
703 static struct virtio_transport vhost_transport
= {
705 .get_local_cid
= vhost_transport_get_local_cid
,
707 .init
= virtio_transport_do_socket_init
,
708 .destruct
= virtio_transport_destruct
,
709 .release
= virtio_transport_release
,
710 .connect
= virtio_transport_connect
,
711 .shutdown
= virtio_transport_shutdown
,
712 .cancel_pkt
= vhost_transport_cancel_pkt
,
714 .dgram_enqueue
= virtio_transport_dgram_enqueue
,
715 .dgram_dequeue
= virtio_transport_dgram_dequeue
,
716 .dgram_bind
= virtio_transport_dgram_bind
,
717 .dgram_allow
= virtio_transport_dgram_allow
,
719 .stream_enqueue
= virtio_transport_stream_enqueue
,
720 .stream_dequeue
= virtio_transport_stream_dequeue
,
721 .stream_has_data
= virtio_transport_stream_has_data
,
722 .stream_has_space
= virtio_transport_stream_has_space
,
723 .stream_rcvhiwat
= virtio_transport_stream_rcvhiwat
,
724 .stream_is_active
= virtio_transport_stream_is_active
,
725 .stream_allow
= virtio_transport_stream_allow
,
727 .notify_poll_in
= virtio_transport_notify_poll_in
,
728 .notify_poll_out
= virtio_transport_notify_poll_out
,
729 .notify_recv_init
= virtio_transport_notify_recv_init
,
730 .notify_recv_pre_block
= virtio_transport_notify_recv_pre_block
,
731 .notify_recv_pre_dequeue
= virtio_transport_notify_recv_pre_dequeue
,
732 .notify_recv_post_dequeue
= virtio_transport_notify_recv_post_dequeue
,
733 .notify_send_init
= virtio_transport_notify_send_init
,
734 .notify_send_pre_block
= virtio_transport_notify_send_pre_block
,
735 .notify_send_pre_enqueue
= virtio_transport_notify_send_pre_enqueue
,
736 .notify_send_post_enqueue
= virtio_transport_notify_send_post_enqueue
,
738 .set_buffer_size
= virtio_transport_set_buffer_size
,
739 .set_min_buffer_size
= virtio_transport_set_min_buffer_size
,
740 .set_max_buffer_size
= virtio_transport_set_max_buffer_size
,
741 .get_buffer_size
= virtio_transport_get_buffer_size
,
742 .get_min_buffer_size
= virtio_transport_get_min_buffer_size
,
743 .get_max_buffer_size
= virtio_transport_get_max_buffer_size
,
746 .send_pkt
= vhost_transport_send_pkt
,
749 static int __init
vhost_vsock_init(void)
753 ret
= vsock_core_init(&vhost_transport
.transport
);
756 return misc_register(&vhost_vsock_misc
);
759 static void __exit
vhost_vsock_exit(void)
761 misc_deregister(&vhost_vsock_misc
);
765 module_init(vhost_vsock_init
);
766 module_exit(vhost_vsock_exit
);
767 MODULE_LICENSE("GPL v2");
768 MODULE_AUTHOR("Asias He");
769 MODULE_DESCRIPTION("vhost transport for vsock ");