1 // SPDX-License-Identifier: GPL-2.0-only
3 * vhost transport for vsock
5 * Copyright (C) 2013-2015 Red Hat, Inc.
6 * Author: Asias He <asias@redhat.com>
7 * Stefan Hajnoczi <stefanha@redhat.com>
9 #include <linux/miscdevice.h>
10 #include <linux/atomic.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/vmalloc.h>
15 #include <linux/virtio_vsock.h>
16 #include <linux/vhost.h>
17 #include <linux/hashtable.h>
19 #include <net/af_vsock.h>
22 #define VHOST_VSOCK_DEFAULT_HOST_CID 2
23 /* Max number of bytes transferred before requeueing the job.
24 * Using this limit prevents one virtqueue from starving others. */
25 #define VHOST_VSOCK_WEIGHT 0x80000
26 /* Max number of packets transferred before requeueing the job.
27 * Using this limit prevents one virtqueue from starving others with
30 #define VHOST_VSOCK_PKT_WEIGHT 256
33 VHOST_VSOCK_FEATURES
= VHOST_FEATURES
,
36 /* Used to track all the vhost_vsock instances on the system. */
37 static DEFINE_MUTEX(vhost_vsock_mutex
);
38 static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash
, 8);
42 struct vhost_virtqueue vqs
[2];
44 /* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
45 struct hlist_node hash
;
47 struct vhost_work send_pkt_work
;
48 spinlock_t send_pkt_list_lock
;
49 struct list_head send_pkt_list
; /* host->guest pending packets */
51 atomic_t queued_replies
;
56 static u32
vhost_transport_get_local_cid(void)
58 return VHOST_VSOCK_DEFAULT_HOST_CID
;
61 /* Callers that dereference the return value must hold vhost_vsock_mutex or the
64 static struct vhost_vsock
*vhost_vsock_get(u32 guest_cid
)
66 struct vhost_vsock
*vsock
;
68 hash_for_each_possible_rcu(vhost_vsock_hash
, vsock
, hash
, guest_cid
) {
69 u32 other_cid
= vsock
->guest_cid
;
71 /* Skip instances that have no CID yet */
75 if (other_cid
== guest_cid
)
84 vhost_transport_do_send_pkt(struct vhost_vsock
*vsock
,
85 struct vhost_virtqueue
*vq
)
87 struct vhost_virtqueue
*tx_vq
= &vsock
->vqs
[VSOCK_VQ_TX
];
88 int pkts
= 0, total_len
= 0;
90 bool restart_tx
= false;
92 mutex_lock(&vq
->mutex
);
94 if (!vhost_vq_get_backend(vq
))
97 /* Avoid further vmexits, we're already processing the virtqueue */
98 vhost_disable_notify(&vsock
->dev
, vq
);
101 struct virtio_vsock_pkt
*pkt
;
102 struct iov_iter iov_iter
;
105 size_t iov_len
, payload_len
;
108 spin_lock_bh(&vsock
->send_pkt_list_lock
);
109 if (list_empty(&vsock
->send_pkt_list
)) {
110 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
111 vhost_enable_notify(&vsock
->dev
, vq
);
115 pkt
= list_first_entry(&vsock
->send_pkt_list
,
116 struct virtio_vsock_pkt
, list
);
117 list_del_init(&pkt
->list
);
118 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
120 head
= vhost_get_vq_desc(vq
, vq
->iov
, ARRAY_SIZE(vq
->iov
),
121 &out
, &in
, NULL
, NULL
);
123 spin_lock_bh(&vsock
->send_pkt_list_lock
);
124 list_add(&pkt
->list
, &vsock
->send_pkt_list
);
125 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
129 if (head
== vq
->num
) {
130 spin_lock_bh(&vsock
->send_pkt_list_lock
);
131 list_add(&pkt
->list
, &vsock
->send_pkt_list
);
132 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
134 /* We cannot finish yet if more buffers snuck in while
135 * re-enabling notify.
137 if (unlikely(vhost_enable_notify(&vsock
->dev
, vq
))) {
138 vhost_disable_notify(&vsock
->dev
, vq
);
145 virtio_transport_free_pkt(pkt
);
146 vq_err(vq
, "Expected 0 output buffers, got %u\n", out
);
150 iov_len
= iov_length(&vq
->iov
[out
], in
);
151 if (iov_len
< sizeof(pkt
->hdr
)) {
152 virtio_transport_free_pkt(pkt
);
153 vq_err(vq
, "Buffer len [%zu] too small\n", iov_len
);
157 iov_iter_init(&iov_iter
, READ
, &vq
->iov
[out
], in
, iov_len
);
158 payload_len
= pkt
->len
- pkt
->off
;
160 /* If the packet is greater than the space available in the
161 * buffer, we split it using multiple buffers.
163 if (payload_len
> iov_len
- sizeof(pkt
->hdr
))
164 payload_len
= iov_len
- sizeof(pkt
->hdr
);
166 /* Set the correct length in the header */
167 pkt
->hdr
.len
= cpu_to_le32(payload_len
);
169 nbytes
= copy_to_iter(&pkt
->hdr
, sizeof(pkt
->hdr
), &iov_iter
);
170 if (nbytes
!= sizeof(pkt
->hdr
)) {
171 virtio_transport_free_pkt(pkt
);
172 vq_err(vq
, "Faulted on copying pkt hdr\n");
176 nbytes
= copy_to_iter(pkt
->buf
+ pkt
->off
, payload_len
,
178 if (nbytes
!= payload_len
) {
179 virtio_transport_free_pkt(pkt
);
180 vq_err(vq
, "Faulted on copying pkt buf\n");
184 /* Deliver to monitoring devices all packets that we
187 virtio_transport_deliver_tap_pkt(pkt
);
189 vhost_add_used(vq
, head
, sizeof(pkt
->hdr
) + payload_len
);
192 pkt
->off
+= payload_len
;
193 total_len
+= payload_len
;
195 /* If we didn't send all the payload we can requeue the packet
196 * to send it with the next available buffer.
198 if (pkt
->off
< pkt
->len
) {
199 /* We are queueing the same virtio_vsock_pkt to handle
200 * the remaining bytes, and we want to deliver it
201 * to monitoring devices in the next iteration.
203 pkt
->tap_delivered
= false;
205 spin_lock_bh(&vsock
->send_pkt_list_lock
);
206 list_add(&pkt
->list
, &vsock
->send_pkt_list
);
207 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
212 val
= atomic_dec_return(&vsock
->queued_replies
);
214 /* Do we have resources to resume tx
217 if (val
+ 1 == tx_vq
->num
)
221 virtio_transport_free_pkt(pkt
);
223 } while(likely(!vhost_exceeds_weight(vq
, ++pkts
, total_len
)));
225 vhost_signal(&vsock
->dev
, vq
);
228 mutex_unlock(&vq
->mutex
);
231 vhost_poll_queue(&tx_vq
->poll
);
234 static void vhost_transport_send_pkt_work(struct vhost_work
*work
)
236 struct vhost_virtqueue
*vq
;
237 struct vhost_vsock
*vsock
;
239 vsock
= container_of(work
, struct vhost_vsock
, send_pkt_work
);
240 vq
= &vsock
->vqs
[VSOCK_VQ_RX
];
242 vhost_transport_do_send_pkt(vsock
, vq
);
246 vhost_transport_send_pkt(struct virtio_vsock_pkt
*pkt
)
248 struct vhost_vsock
*vsock
;
253 /* Find the vhost_vsock according to guest context id */
254 vsock
= vhost_vsock_get(le64_to_cpu(pkt
->hdr
.dst_cid
));
257 virtio_transport_free_pkt(pkt
);
262 atomic_inc(&vsock
->queued_replies
);
264 spin_lock_bh(&vsock
->send_pkt_list_lock
);
265 list_add_tail(&pkt
->list
, &vsock
->send_pkt_list
);
266 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
268 vhost_work_queue(&vsock
->dev
, &vsock
->send_pkt_work
);
275 vhost_transport_cancel_pkt(struct vsock_sock
*vsk
)
277 struct vhost_vsock
*vsock
;
278 struct virtio_vsock_pkt
*pkt
, *n
;
285 /* Find the vhost_vsock according to guest context id */
286 vsock
= vhost_vsock_get(vsk
->remote_addr
.svm_cid
);
290 spin_lock_bh(&vsock
->send_pkt_list_lock
);
291 list_for_each_entry_safe(pkt
, n
, &vsock
->send_pkt_list
, list
) {
294 list_move(&pkt
->list
, &freeme
);
296 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
298 list_for_each_entry_safe(pkt
, n
, &freeme
, list
) {
301 list_del(&pkt
->list
);
302 virtio_transport_free_pkt(pkt
);
306 struct vhost_virtqueue
*tx_vq
= &vsock
->vqs
[VSOCK_VQ_TX
];
309 new_cnt
= atomic_sub_return(cnt
, &vsock
->queued_replies
);
310 if (new_cnt
+ cnt
>= tx_vq
->num
&& new_cnt
< tx_vq
->num
)
311 vhost_poll_queue(&tx_vq
->poll
);
320 static struct virtio_vsock_pkt
*
321 vhost_vsock_alloc_pkt(struct vhost_virtqueue
*vq
,
322 unsigned int out
, unsigned int in
)
324 struct virtio_vsock_pkt
*pkt
;
325 struct iov_iter iov_iter
;
330 vq_err(vq
, "Expected 0 input buffers, got %u\n", in
);
334 pkt
= kzalloc(sizeof(*pkt
), GFP_KERNEL
);
338 len
= iov_length(vq
->iov
, out
);
339 iov_iter_init(&iov_iter
, WRITE
, vq
->iov
, out
, len
);
341 nbytes
= copy_from_iter(&pkt
->hdr
, sizeof(pkt
->hdr
), &iov_iter
);
342 if (nbytes
!= sizeof(pkt
->hdr
)) {
343 vq_err(vq
, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
344 sizeof(pkt
->hdr
), nbytes
);
349 if (le16_to_cpu(pkt
->hdr
.type
) == VIRTIO_VSOCK_TYPE_STREAM
)
350 pkt
->len
= le32_to_cpu(pkt
->hdr
.len
);
356 /* The pkt is too big */
357 if (pkt
->len
> VIRTIO_VSOCK_MAX_PKT_BUF_SIZE
) {
362 pkt
->buf
= kmalloc(pkt
->len
, GFP_KERNEL
);
368 pkt
->buf_len
= pkt
->len
;
370 nbytes
= copy_from_iter(pkt
->buf
, pkt
->len
, &iov_iter
);
371 if (nbytes
!= pkt
->len
) {
372 vq_err(vq
, "Expected %u byte payload, got %zu bytes\n",
374 virtio_transport_free_pkt(pkt
);
381 /* Is there space left for replies to rx packets? */
382 static bool vhost_vsock_more_replies(struct vhost_vsock
*vsock
)
384 struct vhost_virtqueue
*vq
= &vsock
->vqs
[VSOCK_VQ_TX
];
387 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
388 val
= atomic_read(&vsock
->queued_replies
);
390 return val
< vq
->num
;
393 static struct virtio_transport vhost_transport
= {
395 .module
= THIS_MODULE
,
397 .get_local_cid
= vhost_transport_get_local_cid
,
399 .init
= virtio_transport_do_socket_init
,
400 .destruct
= virtio_transport_destruct
,
401 .release
= virtio_transport_release
,
402 .connect
= virtio_transport_connect
,
403 .shutdown
= virtio_transport_shutdown
,
404 .cancel_pkt
= vhost_transport_cancel_pkt
,
406 .dgram_enqueue
= virtio_transport_dgram_enqueue
,
407 .dgram_dequeue
= virtio_transport_dgram_dequeue
,
408 .dgram_bind
= virtio_transport_dgram_bind
,
409 .dgram_allow
= virtio_transport_dgram_allow
,
411 .stream_enqueue
= virtio_transport_stream_enqueue
,
412 .stream_dequeue
= virtio_transport_stream_dequeue
,
413 .stream_has_data
= virtio_transport_stream_has_data
,
414 .stream_has_space
= virtio_transport_stream_has_space
,
415 .stream_rcvhiwat
= virtio_transport_stream_rcvhiwat
,
416 .stream_is_active
= virtio_transport_stream_is_active
,
417 .stream_allow
= virtio_transport_stream_allow
,
419 .notify_poll_in
= virtio_transport_notify_poll_in
,
420 .notify_poll_out
= virtio_transport_notify_poll_out
,
421 .notify_recv_init
= virtio_transport_notify_recv_init
,
422 .notify_recv_pre_block
= virtio_transport_notify_recv_pre_block
,
423 .notify_recv_pre_dequeue
= virtio_transport_notify_recv_pre_dequeue
,
424 .notify_recv_post_dequeue
= virtio_transport_notify_recv_post_dequeue
,
425 .notify_send_init
= virtio_transport_notify_send_init
,
426 .notify_send_pre_block
= virtio_transport_notify_send_pre_block
,
427 .notify_send_pre_enqueue
= virtio_transport_notify_send_pre_enqueue
,
428 .notify_send_post_enqueue
= virtio_transport_notify_send_post_enqueue
,
429 .notify_buffer_size
= virtio_transport_notify_buffer_size
,
433 .send_pkt
= vhost_transport_send_pkt
,
436 static void vhost_vsock_handle_tx_kick(struct vhost_work
*work
)
438 struct vhost_virtqueue
*vq
= container_of(work
, struct vhost_virtqueue
,
440 struct vhost_vsock
*vsock
= container_of(vq
->dev
, struct vhost_vsock
,
442 struct virtio_vsock_pkt
*pkt
;
443 int head
, pkts
= 0, total_len
= 0;
444 unsigned int out
, in
;
447 mutex_lock(&vq
->mutex
);
449 if (!vhost_vq_get_backend(vq
))
452 vhost_disable_notify(&vsock
->dev
, vq
);
456 if (!vhost_vsock_more_replies(vsock
)) {
457 /* Stop tx until the device processes already
458 * pending replies. Leave tx virtqueue
459 * callbacks disabled.
461 goto no_more_replies
;
464 head
= vhost_get_vq_desc(vq
, vq
->iov
, ARRAY_SIZE(vq
->iov
),
465 &out
, &in
, NULL
, NULL
);
469 if (head
== vq
->num
) {
470 if (unlikely(vhost_enable_notify(&vsock
->dev
, vq
))) {
471 vhost_disable_notify(&vsock
->dev
, vq
);
477 pkt
= vhost_vsock_alloc_pkt(vq
, out
, in
);
479 vq_err(vq
, "Faulted on pkt\n");
485 /* Deliver to monitoring devices all received packets */
486 virtio_transport_deliver_tap_pkt(pkt
);
488 /* Only accept correctly addressed packets */
489 if (le64_to_cpu(pkt
->hdr
.src_cid
) == vsock
->guest_cid
&&
490 le64_to_cpu(pkt
->hdr
.dst_cid
) ==
491 vhost_transport_get_local_cid())
492 virtio_transport_recv_pkt(&vhost_transport
, pkt
);
494 virtio_transport_free_pkt(pkt
);
496 len
+= sizeof(pkt
->hdr
);
497 vhost_add_used(vq
, head
, len
);
500 } while(likely(!vhost_exceeds_weight(vq
, ++pkts
, total_len
)));
504 vhost_signal(&vsock
->dev
, vq
);
507 mutex_unlock(&vq
->mutex
);
510 static void vhost_vsock_handle_rx_kick(struct vhost_work
*work
)
512 struct vhost_virtqueue
*vq
= container_of(work
, struct vhost_virtqueue
,
514 struct vhost_vsock
*vsock
= container_of(vq
->dev
, struct vhost_vsock
,
517 vhost_transport_do_send_pkt(vsock
, vq
);
520 static int vhost_vsock_start(struct vhost_vsock
*vsock
)
522 struct vhost_virtqueue
*vq
;
526 mutex_lock(&vsock
->dev
.mutex
);
528 ret
= vhost_dev_check_owner(&vsock
->dev
);
532 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
535 mutex_lock(&vq
->mutex
);
537 if (!vhost_vq_access_ok(vq
)) {
542 if (!vhost_vq_get_backend(vq
)) {
543 vhost_vq_set_backend(vq
, vsock
);
544 ret
= vhost_vq_init_access(vq
);
549 mutex_unlock(&vq
->mutex
);
552 /* Some packets may have been queued before the device was started,
553 * let's kick the send worker to send them.
555 vhost_work_queue(&vsock
->dev
, &vsock
->send_pkt_work
);
557 mutex_unlock(&vsock
->dev
.mutex
);
561 vhost_vq_set_backend(vq
, NULL
);
562 mutex_unlock(&vq
->mutex
);
564 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
567 mutex_lock(&vq
->mutex
);
568 vhost_vq_set_backend(vq
, NULL
);
569 mutex_unlock(&vq
->mutex
);
572 mutex_unlock(&vsock
->dev
.mutex
);
576 static int vhost_vsock_stop(struct vhost_vsock
*vsock
)
581 mutex_lock(&vsock
->dev
.mutex
);
583 ret
= vhost_dev_check_owner(&vsock
->dev
);
587 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
588 struct vhost_virtqueue
*vq
= &vsock
->vqs
[i
];
590 mutex_lock(&vq
->mutex
);
591 vhost_vq_set_backend(vq
, NULL
);
592 mutex_unlock(&vq
->mutex
);
596 mutex_unlock(&vsock
->dev
.mutex
);
600 static void vhost_vsock_free(struct vhost_vsock
*vsock
)
605 static int vhost_vsock_dev_open(struct inode
*inode
, struct file
*file
)
607 struct vhost_virtqueue
**vqs
;
608 struct vhost_vsock
*vsock
;
611 /* This struct is large and allocation could fail, fall back to vmalloc
612 * if there is no other way.
614 vsock
= kvmalloc(sizeof(*vsock
), GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
618 vqs
= kmalloc_array(ARRAY_SIZE(vsock
->vqs
), sizeof(*vqs
), GFP_KERNEL
);
624 vsock
->guest_cid
= 0; /* no CID assigned yet */
626 atomic_set(&vsock
->queued_replies
, 0);
628 vqs
[VSOCK_VQ_TX
] = &vsock
->vqs
[VSOCK_VQ_TX
];
629 vqs
[VSOCK_VQ_RX
] = &vsock
->vqs
[VSOCK_VQ_RX
];
630 vsock
->vqs
[VSOCK_VQ_TX
].handle_kick
= vhost_vsock_handle_tx_kick
;
631 vsock
->vqs
[VSOCK_VQ_RX
].handle_kick
= vhost_vsock_handle_rx_kick
;
633 vhost_dev_init(&vsock
->dev
, vqs
, ARRAY_SIZE(vsock
->vqs
),
634 UIO_MAXIOV
, VHOST_VSOCK_PKT_WEIGHT
,
635 VHOST_VSOCK_WEIGHT
, NULL
);
637 file
->private_data
= vsock
;
638 spin_lock_init(&vsock
->send_pkt_list_lock
);
639 INIT_LIST_HEAD(&vsock
->send_pkt_list
);
640 vhost_work_init(&vsock
->send_pkt_work
, vhost_transport_send_pkt_work
);
644 vhost_vsock_free(vsock
);
648 static void vhost_vsock_flush(struct vhost_vsock
*vsock
)
652 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++)
653 if (vsock
->vqs
[i
].handle_kick
)
654 vhost_poll_flush(&vsock
->vqs
[i
].poll
);
655 vhost_work_flush(&vsock
->dev
, &vsock
->send_pkt_work
);
658 static void vhost_vsock_reset_orphans(struct sock
*sk
)
660 struct vsock_sock
*vsk
= vsock_sk(sk
);
662 /* vmci_transport.c doesn't take sk_lock here either. At least we're
663 * under vsock_table_lock so the sock cannot disappear while we're
667 /* If the peer is still valid, no need to reset connection */
668 if (vhost_vsock_get(vsk
->remote_addr
.svm_cid
))
671 /* If the close timeout is pending, let it expire. This avoids races
672 * with the timeout callback.
674 if (vsk
->close_work_scheduled
)
677 sock_set_flag(sk
, SOCK_DONE
);
678 vsk
->peer_shutdown
= SHUTDOWN_MASK
;
679 sk
->sk_state
= SS_UNCONNECTED
;
680 sk
->sk_err
= ECONNRESET
;
681 sk
->sk_error_report(sk
);
684 static int vhost_vsock_dev_release(struct inode
*inode
, struct file
*file
)
686 struct vhost_vsock
*vsock
= file
->private_data
;
688 mutex_lock(&vhost_vsock_mutex
);
689 if (vsock
->guest_cid
)
690 hash_del_rcu(&vsock
->hash
);
691 mutex_unlock(&vhost_vsock_mutex
);
693 /* Wait for other CPUs to finish using vsock */
696 /* Iterating over all connections for all CIDs to find orphans is
697 * inefficient. Room for improvement here. */
698 vsock_for_each_connected_socket(vhost_vsock_reset_orphans
);
700 vhost_vsock_stop(vsock
);
701 vhost_vsock_flush(vsock
);
702 vhost_dev_stop(&vsock
->dev
);
704 spin_lock_bh(&vsock
->send_pkt_list_lock
);
705 while (!list_empty(&vsock
->send_pkt_list
)) {
706 struct virtio_vsock_pkt
*pkt
;
708 pkt
= list_first_entry(&vsock
->send_pkt_list
,
709 struct virtio_vsock_pkt
, list
);
710 list_del_init(&pkt
->list
);
711 virtio_transport_free_pkt(pkt
);
713 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
715 vhost_dev_cleanup(&vsock
->dev
);
716 kfree(vsock
->dev
.vqs
);
717 vhost_vsock_free(vsock
);
721 static int vhost_vsock_set_cid(struct vhost_vsock
*vsock
, u64 guest_cid
)
723 struct vhost_vsock
*other
;
725 /* Refuse reserved CIDs */
726 if (guest_cid
<= VMADDR_CID_HOST
||
727 guest_cid
== U32_MAX
)
730 /* 64-bit CIDs are not yet supported */
731 if (guest_cid
> U32_MAX
)
734 /* Refuse if CID is assigned to the guest->host transport (i.e. nested
735 * VM), to make the loopback work.
737 if (vsock_find_cid(guest_cid
))
740 /* Refuse if CID is already in use */
741 mutex_lock(&vhost_vsock_mutex
);
742 other
= vhost_vsock_get(guest_cid
);
743 if (other
&& other
!= vsock
) {
744 mutex_unlock(&vhost_vsock_mutex
);
748 if (vsock
->guest_cid
)
749 hash_del_rcu(&vsock
->hash
);
751 vsock
->guest_cid
= guest_cid
;
752 hash_add_rcu(vhost_vsock_hash
, &vsock
->hash
, vsock
->guest_cid
);
753 mutex_unlock(&vhost_vsock_mutex
);
758 static int vhost_vsock_set_features(struct vhost_vsock
*vsock
, u64 features
)
760 struct vhost_virtqueue
*vq
;
763 if (features
& ~VHOST_VSOCK_FEATURES
)
766 mutex_lock(&vsock
->dev
.mutex
);
767 if ((features
& (1 << VHOST_F_LOG_ALL
)) &&
768 !vhost_log_access_ok(&vsock
->dev
)) {
769 mutex_unlock(&vsock
->dev
.mutex
);
773 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
775 mutex_lock(&vq
->mutex
);
776 vq
->acked_features
= features
;
777 mutex_unlock(&vq
->mutex
);
779 mutex_unlock(&vsock
->dev
.mutex
);
783 static long vhost_vsock_dev_ioctl(struct file
*f
, unsigned int ioctl
,
786 struct vhost_vsock
*vsock
= f
->private_data
;
787 void __user
*argp
= (void __user
*)arg
;
794 case VHOST_VSOCK_SET_GUEST_CID
:
795 if (copy_from_user(&guest_cid
, argp
, sizeof(guest_cid
)))
797 return vhost_vsock_set_cid(vsock
, guest_cid
);
798 case VHOST_VSOCK_SET_RUNNING
:
799 if (copy_from_user(&start
, argp
, sizeof(start
)))
802 return vhost_vsock_start(vsock
);
804 return vhost_vsock_stop(vsock
);
805 case VHOST_GET_FEATURES
:
806 features
= VHOST_VSOCK_FEATURES
;
807 if (copy_to_user(argp
, &features
, sizeof(features
)))
810 case VHOST_SET_FEATURES
:
811 if (copy_from_user(&features
, argp
, sizeof(features
)))
813 return vhost_vsock_set_features(vsock
, features
);
815 mutex_lock(&vsock
->dev
.mutex
);
816 r
= vhost_dev_ioctl(&vsock
->dev
, ioctl
, argp
);
817 if (r
== -ENOIOCTLCMD
)
818 r
= vhost_vring_ioctl(&vsock
->dev
, ioctl
, argp
);
820 vhost_vsock_flush(vsock
);
821 mutex_unlock(&vsock
->dev
.mutex
);
826 static const struct file_operations vhost_vsock_fops
= {
827 .owner
= THIS_MODULE
,
828 .open
= vhost_vsock_dev_open
,
829 .release
= vhost_vsock_dev_release
,
830 .llseek
= noop_llseek
,
831 .unlocked_ioctl
= vhost_vsock_dev_ioctl
,
832 .compat_ioctl
= compat_ptr_ioctl
,
835 static struct miscdevice vhost_vsock_misc
= {
836 .minor
= VHOST_VSOCK_MINOR
,
837 .name
= "vhost-vsock",
838 .fops
= &vhost_vsock_fops
,
841 static int __init
vhost_vsock_init(void)
845 ret
= vsock_core_register(&vhost_transport
.transport
,
846 VSOCK_TRANSPORT_F_H2G
);
849 return misc_register(&vhost_vsock_misc
);
852 static void __exit
vhost_vsock_exit(void)
854 misc_deregister(&vhost_vsock_misc
);
855 vsock_core_unregister(&vhost_transport
.transport
);
858 module_init(vhost_vsock_init
);
859 module_exit(vhost_vsock_exit
);
860 MODULE_LICENSE("GPL v2");
861 MODULE_AUTHOR("Asias He");
862 MODULE_DESCRIPTION("vhost transport for vsock ");
863 MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR
);
864 MODULE_ALIAS("devname:vhost-vsock");