1 // SPDX-License-Identifier: GPL-2.0-only
3 * vhost transport for vsock
5 * Copyright (C) 2013-2015 Red Hat, Inc.
6 * Author: Asias He <asias@redhat.com>
7 * Stefan Hajnoczi <stefanha@redhat.com>
9 #include <linux/miscdevice.h>
10 #include <linux/atomic.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/vmalloc.h>
15 #include <linux/virtio_vsock.h>
16 #include <linux/vhost.h>
17 #include <linux/hashtable.h>
19 #include <net/af_vsock.h>
22 #define VHOST_VSOCK_DEFAULT_HOST_CID 2
23 /* Max number of bytes transferred before requeueing the job.
24 * Using this limit prevents one virtqueue from starving others. */
25 #define VHOST_VSOCK_WEIGHT 0x80000
26 /* Max number of packets transferred before requeueing the job.
27 * Using this limit prevents one virtqueue from starving others with
30 #define VHOST_VSOCK_PKT_WEIGHT 256
33 VHOST_VSOCK_FEATURES
= VHOST_FEATURES
|
34 (1ULL << VIRTIO_F_ACCESS_PLATFORM
) |
35 (1ULL << VIRTIO_VSOCK_F_SEQPACKET
)
39 VHOST_VSOCK_BACKEND_FEATURES
= (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2
)
42 /* Used to track all the vhost_vsock instances on the system. */
43 static DEFINE_MUTEX(vhost_vsock_mutex
);
44 static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash
, 8);
48 struct vhost_virtqueue vqs
[2];
50 /* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
51 struct hlist_node hash
;
53 struct vhost_work send_pkt_work
;
54 struct sk_buff_head send_pkt_queue
; /* host->guest pending packets */
56 atomic_t queued_replies
;
62 static u32
vhost_transport_get_local_cid(void)
64 return VHOST_VSOCK_DEFAULT_HOST_CID
;
67 /* Callers that dereference the return value must hold vhost_vsock_mutex or the
70 static struct vhost_vsock
*vhost_vsock_get(u32 guest_cid
)
72 struct vhost_vsock
*vsock
;
74 hash_for_each_possible_rcu(vhost_vsock_hash
, vsock
, hash
, guest_cid
) {
75 u32 other_cid
= vsock
->guest_cid
;
77 /* Skip instances that have no CID yet */
81 if (other_cid
== guest_cid
)
90 vhost_transport_do_send_pkt(struct vhost_vsock
*vsock
,
91 struct vhost_virtqueue
*vq
)
93 struct vhost_virtqueue
*tx_vq
= &vsock
->vqs
[VSOCK_VQ_TX
];
94 int pkts
= 0, total_len
= 0;
96 bool restart_tx
= false;
98 mutex_lock(&vq
->mutex
);
100 if (!vhost_vq_get_backend(vq
))
103 if (!vq_meta_prefetch(vq
))
106 /* Avoid further vmexits, we're already processing the virtqueue */
107 vhost_disable_notify(&vsock
->dev
, vq
);
110 struct virtio_vsock_hdr
*hdr
;
111 size_t iov_len
, payload_len
;
112 struct iov_iter iov_iter
;
113 u32 flags_to_restore
= 0;
120 skb
= virtio_vsock_skb_dequeue(&vsock
->send_pkt_queue
);
123 vhost_enable_notify(&vsock
->dev
, vq
);
127 head
= vhost_get_vq_desc(vq
, vq
->iov
, ARRAY_SIZE(vq
->iov
),
128 &out
, &in
, NULL
, NULL
);
130 virtio_vsock_skb_queue_head(&vsock
->send_pkt_queue
, skb
);
134 if (head
== vq
->num
) {
135 virtio_vsock_skb_queue_head(&vsock
->send_pkt_queue
, skb
);
136 /* We cannot finish yet if more buffers snuck in while
137 * re-enabling notify.
139 if (unlikely(vhost_enable_notify(&vsock
->dev
, vq
))) {
140 vhost_disable_notify(&vsock
->dev
, vq
);
148 vq_err(vq
, "Expected 0 output buffers, got %u\n", out
);
152 iov_len
= iov_length(&vq
->iov
[out
], in
);
153 if (iov_len
< sizeof(*hdr
)) {
155 vq_err(vq
, "Buffer len [%zu] too small\n", iov_len
);
159 iov_iter_init(&iov_iter
, ITER_DEST
, &vq
->iov
[out
], in
, iov_len
);
160 offset
= VIRTIO_VSOCK_SKB_CB(skb
)->offset
;
161 payload_len
= skb
->len
- offset
;
162 hdr
= virtio_vsock_hdr(skb
);
164 /* If the packet is greater than the space available in the
165 * buffer, we split it using multiple buffers.
167 if (payload_len
> iov_len
- sizeof(*hdr
)) {
168 payload_len
= iov_len
- sizeof(*hdr
);
170 /* As we are copying pieces of large packet's buffer to
171 * small rx buffers, headers of packets in rx queue are
172 * created dynamically and are initialized with header
173 * of current packet(except length). But in case of
174 * SOCK_SEQPACKET, we also must clear message delimeter
175 * bit (VIRTIO_VSOCK_SEQ_EOM) and MSG_EOR bit
176 * (VIRTIO_VSOCK_SEQ_EOR) if set. Otherwise,
177 * there will be sequence of packets with these
178 * bits set. After initialized header will be copied to
179 * rx buffer, these required bits will be restored.
181 if (le32_to_cpu(hdr
->flags
) & VIRTIO_VSOCK_SEQ_EOM
) {
182 hdr
->flags
&= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM
);
183 flags_to_restore
|= VIRTIO_VSOCK_SEQ_EOM
;
185 if (le32_to_cpu(hdr
->flags
) & VIRTIO_VSOCK_SEQ_EOR
) {
186 hdr
->flags
&= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR
);
187 flags_to_restore
|= VIRTIO_VSOCK_SEQ_EOR
;
192 /* Set the correct length in the header */
193 hdr
->len
= cpu_to_le32(payload_len
);
195 nbytes
= copy_to_iter(hdr
, sizeof(*hdr
), &iov_iter
);
196 if (nbytes
!= sizeof(*hdr
)) {
198 vq_err(vq
, "Faulted on copying pkt hdr\n");
202 if (skb_copy_datagram_iter(skb
,
207 vq_err(vq
, "Faulted on copying pkt buf\n");
211 /* Deliver to monitoring devices all packets that we
214 virtio_transport_deliver_tap_pkt(skb
);
216 vhost_add_used(vq
, head
, sizeof(*hdr
) + payload_len
);
219 VIRTIO_VSOCK_SKB_CB(skb
)->offset
+= payload_len
;
220 total_len
+= payload_len
;
222 /* If we didn't send all the payload we can requeue the packet
223 * to send it with the next available buffer.
225 if (VIRTIO_VSOCK_SKB_CB(skb
)->offset
< skb
->len
) {
226 hdr
->flags
|= cpu_to_le32(flags_to_restore
);
228 /* We are queueing the same skb to handle
229 * the remaining bytes, and we want to deliver it
230 * to monitoring devices in the next iteration.
232 virtio_vsock_skb_clear_tap_delivered(skb
);
233 virtio_vsock_skb_queue_head(&vsock
->send_pkt_queue
, skb
);
235 if (virtio_vsock_skb_reply(skb
)) {
238 val
= atomic_dec_return(&vsock
->queued_replies
);
240 /* Do we have resources to resume tx
243 if (val
+ 1 == tx_vq
->num
)
247 virtio_transport_consume_skb_sent(skb
, true);
249 } while(likely(!vhost_exceeds_weight(vq
, ++pkts
, total_len
)));
251 vhost_signal(&vsock
->dev
, vq
);
254 mutex_unlock(&vq
->mutex
);
257 vhost_poll_queue(&tx_vq
->poll
);
260 static void vhost_transport_send_pkt_work(struct vhost_work
*work
)
262 struct vhost_virtqueue
*vq
;
263 struct vhost_vsock
*vsock
;
265 vsock
= container_of(work
, struct vhost_vsock
, send_pkt_work
);
266 vq
= &vsock
->vqs
[VSOCK_VQ_RX
];
268 vhost_transport_do_send_pkt(vsock
, vq
);
272 vhost_transport_send_pkt(struct sk_buff
*skb
)
274 struct virtio_vsock_hdr
*hdr
= virtio_vsock_hdr(skb
);
275 struct vhost_vsock
*vsock
;
280 /* Find the vhost_vsock according to guest context id */
281 vsock
= vhost_vsock_get(le64_to_cpu(hdr
->dst_cid
));
288 if (virtio_vsock_skb_reply(skb
))
289 atomic_inc(&vsock
->queued_replies
);
291 virtio_vsock_skb_queue_tail(&vsock
->send_pkt_queue
, skb
);
292 vhost_vq_work_queue(&vsock
->vqs
[VSOCK_VQ_RX
], &vsock
->send_pkt_work
);
299 vhost_transport_cancel_pkt(struct vsock_sock
*vsk
)
301 struct vhost_vsock
*vsock
;
307 /* Find the vhost_vsock according to guest context id */
308 vsock
= vhost_vsock_get(vsk
->remote_addr
.svm_cid
);
312 cnt
= virtio_transport_purge_skbs(vsk
, &vsock
->send_pkt_queue
);
315 struct vhost_virtqueue
*tx_vq
= &vsock
->vqs
[VSOCK_VQ_TX
];
318 new_cnt
= atomic_sub_return(cnt
, &vsock
->queued_replies
);
319 if (new_cnt
+ cnt
>= tx_vq
->num
&& new_cnt
< tx_vq
->num
)
320 vhost_poll_queue(&tx_vq
->poll
);
329 static struct sk_buff
*
330 vhost_vsock_alloc_skb(struct vhost_virtqueue
*vq
,
331 unsigned int out
, unsigned int in
)
333 struct virtio_vsock_hdr
*hdr
;
334 struct iov_iter iov_iter
;
341 vq_err(vq
, "Expected 0 input buffers, got %u\n", in
);
345 len
= iov_length(vq
->iov
, out
);
347 /* len contains both payload and hdr */
348 skb
= virtio_vsock_alloc_skb(len
, GFP_KERNEL
);
352 iov_iter_init(&iov_iter
, ITER_SOURCE
, vq
->iov
, out
, len
);
354 hdr
= virtio_vsock_hdr(skb
);
355 nbytes
= copy_from_iter(hdr
, sizeof(*hdr
), &iov_iter
);
356 if (nbytes
!= sizeof(*hdr
)) {
357 vq_err(vq
, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
358 sizeof(*hdr
), nbytes
);
363 payload_len
= le32_to_cpu(hdr
->len
);
369 /* The pkt is too big or the length in the header is invalid */
370 if (payload_len
> VIRTIO_VSOCK_MAX_PKT_BUF_SIZE
||
371 payload_len
+ sizeof(*hdr
) > len
) {
376 virtio_vsock_skb_rx_put(skb
);
378 nbytes
= copy_from_iter(skb
->data
, payload_len
, &iov_iter
);
379 if (nbytes
!= payload_len
) {
380 vq_err(vq
, "Expected %zu byte payload, got %zu bytes\n",
381 payload_len
, nbytes
);
389 /* Is there space left for replies to rx packets? */
390 static bool vhost_vsock_more_replies(struct vhost_vsock
*vsock
)
392 struct vhost_virtqueue
*vq
= &vsock
->vqs
[VSOCK_VQ_TX
];
395 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
396 val
= atomic_read(&vsock
->queued_replies
);
398 return val
< vq
->num
;
401 static bool vhost_transport_msgzerocopy_allow(void)
406 static bool vhost_transport_seqpacket_allow(u32 remote_cid
);
408 static struct virtio_transport vhost_transport
= {
410 .module
= THIS_MODULE
,
412 .get_local_cid
= vhost_transport_get_local_cid
,
414 .init
= virtio_transport_do_socket_init
,
415 .destruct
= virtio_transport_destruct
,
416 .release
= virtio_transport_release
,
417 .connect
= virtio_transport_connect
,
418 .shutdown
= virtio_transport_shutdown
,
419 .cancel_pkt
= vhost_transport_cancel_pkt
,
421 .dgram_enqueue
= virtio_transport_dgram_enqueue
,
422 .dgram_dequeue
= virtio_transport_dgram_dequeue
,
423 .dgram_bind
= virtio_transport_dgram_bind
,
424 .dgram_allow
= virtio_transport_dgram_allow
,
426 .stream_enqueue
= virtio_transport_stream_enqueue
,
427 .stream_dequeue
= virtio_transport_stream_dequeue
,
428 .stream_has_data
= virtio_transport_stream_has_data
,
429 .stream_has_space
= virtio_transport_stream_has_space
,
430 .stream_rcvhiwat
= virtio_transport_stream_rcvhiwat
,
431 .stream_is_active
= virtio_transport_stream_is_active
,
432 .stream_allow
= virtio_transport_stream_allow
,
434 .seqpacket_dequeue
= virtio_transport_seqpacket_dequeue
,
435 .seqpacket_enqueue
= virtio_transport_seqpacket_enqueue
,
436 .seqpacket_allow
= vhost_transport_seqpacket_allow
,
437 .seqpacket_has_data
= virtio_transport_seqpacket_has_data
,
439 .msgzerocopy_allow
= vhost_transport_msgzerocopy_allow
,
441 .notify_poll_in
= virtio_transport_notify_poll_in
,
442 .notify_poll_out
= virtio_transport_notify_poll_out
,
443 .notify_recv_init
= virtio_transport_notify_recv_init
,
444 .notify_recv_pre_block
= virtio_transport_notify_recv_pre_block
,
445 .notify_recv_pre_dequeue
= virtio_transport_notify_recv_pre_dequeue
,
446 .notify_recv_post_dequeue
= virtio_transport_notify_recv_post_dequeue
,
447 .notify_send_init
= virtio_transport_notify_send_init
,
448 .notify_send_pre_block
= virtio_transport_notify_send_pre_block
,
449 .notify_send_pre_enqueue
= virtio_transport_notify_send_pre_enqueue
,
450 .notify_send_post_enqueue
= virtio_transport_notify_send_post_enqueue
,
451 .notify_buffer_size
= virtio_transport_notify_buffer_size
,
452 .notify_set_rcvlowat
= virtio_transport_notify_set_rcvlowat
,
454 .unsent_bytes
= virtio_transport_unsent_bytes
,
456 .read_skb
= virtio_transport_read_skb
,
459 .send_pkt
= vhost_transport_send_pkt
,
462 static bool vhost_transport_seqpacket_allow(u32 remote_cid
)
464 struct vhost_vsock
*vsock
;
465 bool seqpacket_allow
= false;
468 vsock
= vhost_vsock_get(remote_cid
);
471 seqpacket_allow
= vsock
->seqpacket_allow
;
475 return seqpacket_allow
;
478 static void vhost_vsock_handle_tx_kick(struct vhost_work
*work
)
480 struct vhost_virtqueue
*vq
= container_of(work
, struct vhost_virtqueue
,
482 struct vhost_vsock
*vsock
= container_of(vq
->dev
, struct vhost_vsock
,
484 int head
, pkts
= 0, total_len
= 0;
485 unsigned int out
, in
;
489 mutex_lock(&vq
->mutex
);
491 if (!vhost_vq_get_backend(vq
))
494 if (!vq_meta_prefetch(vq
))
497 vhost_disable_notify(&vsock
->dev
, vq
);
499 struct virtio_vsock_hdr
*hdr
;
501 if (!vhost_vsock_more_replies(vsock
)) {
502 /* Stop tx until the device processes already
503 * pending replies. Leave tx virtqueue
504 * callbacks disabled.
506 goto no_more_replies
;
509 head
= vhost_get_vq_desc(vq
, vq
->iov
, ARRAY_SIZE(vq
->iov
),
510 &out
, &in
, NULL
, NULL
);
514 if (head
== vq
->num
) {
515 if (unlikely(vhost_enable_notify(&vsock
->dev
, vq
))) {
516 vhost_disable_notify(&vsock
->dev
, vq
);
522 skb
= vhost_vsock_alloc_skb(vq
, out
, in
);
524 vq_err(vq
, "Faulted on pkt\n");
528 total_len
+= sizeof(*hdr
) + skb
->len
;
530 /* Deliver to monitoring devices all received packets */
531 virtio_transport_deliver_tap_pkt(skb
);
533 hdr
= virtio_vsock_hdr(skb
);
535 /* Only accept correctly addressed packets */
536 if (le64_to_cpu(hdr
->src_cid
) == vsock
->guest_cid
&&
537 le64_to_cpu(hdr
->dst_cid
) ==
538 vhost_transport_get_local_cid())
539 virtio_transport_recv_pkt(&vhost_transport
, skb
);
543 vhost_add_used(vq
, head
, 0);
545 } while(likely(!vhost_exceeds_weight(vq
, ++pkts
, total_len
)));
549 vhost_signal(&vsock
->dev
, vq
);
552 mutex_unlock(&vq
->mutex
);
555 static void vhost_vsock_handle_rx_kick(struct vhost_work
*work
)
557 struct vhost_virtqueue
*vq
= container_of(work
, struct vhost_virtqueue
,
559 struct vhost_vsock
*vsock
= container_of(vq
->dev
, struct vhost_vsock
,
562 vhost_transport_do_send_pkt(vsock
, vq
);
565 static int vhost_vsock_start(struct vhost_vsock
*vsock
)
567 struct vhost_virtqueue
*vq
;
571 mutex_lock(&vsock
->dev
.mutex
);
573 ret
= vhost_dev_check_owner(&vsock
->dev
);
577 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
580 mutex_lock(&vq
->mutex
);
582 if (!vhost_vq_access_ok(vq
)) {
587 if (!vhost_vq_get_backend(vq
)) {
588 vhost_vq_set_backend(vq
, vsock
);
589 ret
= vhost_vq_init_access(vq
);
594 mutex_unlock(&vq
->mutex
);
597 /* Some packets may have been queued before the device was started,
598 * let's kick the send worker to send them.
600 vhost_vq_work_queue(&vsock
->vqs
[VSOCK_VQ_RX
], &vsock
->send_pkt_work
);
602 mutex_unlock(&vsock
->dev
.mutex
);
606 vhost_vq_set_backend(vq
, NULL
);
607 mutex_unlock(&vq
->mutex
);
609 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
612 mutex_lock(&vq
->mutex
);
613 vhost_vq_set_backend(vq
, NULL
);
614 mutex_unlock(&vq
->mutex
);
617 mutex_unlock(&vsock
->dev
.mutex
);
621 static int vhost_vsock_stop(struct vhost_vsock
*vsock
, bool check_owner
)
626 mutex_lock(&vsock
->dev
.mutex
);
629 ret
= vhost_dev_check_owner(&vsock
->dev
);
634 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
635 struct vhost_virtqueue
*vq
= &vsock
->vqs
[i
];
637 mutex_lock(&vq
->mutex
);
638 vhost_vq_set_backend(vq
, NULL
);
639 mutex_unlock(&vq
->mutex
);
643 mutex_unlock(&vsock
->dev
.mutex
);
647 static void vhost_vsock_free(struct vhost_vsock
*vsock
)
652 static int vhost_vsock_dev_open(struct inode
*inode
, struct file
*file
)
654 struct vhost_virtqueue
**vqs
;
655 struct vhost_vsock
*vsock
;
658 /* This struct is large and allocation could fail, fall back to vmalloc
659 * if there is no other way.
661 vsock
= kvmalloc(sizeof(*vsock
), GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
665 vqs
= kmalloc_array(ARRAY_SIZE(vsock
->vqs
), sizeof(*vqs
), GFP_KERNEL
);
671 vsock
->guest_cid
= 0; /* no CID assigned yet */
672 vsock
->seqpacket_allow
= false;
674 atomic_set(&vsock
->queued_replies
, 0);
676 vqs
[VSOCK_VQ_TX
] = &vsock
->vqs
[VSOCK_VQ_TX
];
677 vqs
[VSOCK_VQ_RX
] = &vsock
->vqs
[VSOCK_VQ_RX
];
678 vsock
->vqs
[VSOCK_VQ_TX
].handle_kick
= vhost_vsock_handle_tx_kick
;
679 vsock
->vqs
[VSOCK_VQ_RX
].handle_kick
= vhost_vsock_handle_rx_kick
;
681 vhost_dev_init(&vsock
->dev
, vqs
, ARRAY_SIZE(vsock
->vqs
),
682 UIO_MAXIOV
, VHOST_VSOCK_PKT_WEIGHT
,
683 VHOST_VSOCK_WEIGHT
, true, NULL
);
685 file
->private_data
= vsock
;
686 skb_queue_head_init(&vsock
->send_pkt_queue
);
687 vhost_work_init(&vsock
->send_pkt_work
, vhost_transport_send_pkt_work
);
691 vhost_vsock_free(vsock
);
695 static void vhost_vsock_flush(struct vhost_vsock
*vsock
)
697 vhost_dev_flush(&vsock
->dev
);
700 static void vhost_vsock_reset_orphans(struct sock
*sk
)
702 struct vsock_sock
*vsk
= vsock_sk(sk
);
704 /* vmci_transport.c doesn't take sk_lock here either. At least we're
705 * under vsock_table_lock so the sock cannot disappear while we're
709 /* If the peer is still valid, no need to reset connection */
710 if (vhost_vsock_get(vsk
->remote_addr
.svm_cid
))
713 /* If the close timeout is pending, let it expire. This avoids races
714 * with the timeout callback.
716 if (vsk
->close_work_scheduled
)
719 sock_set_flag(sk
, SOCK_DONE
);
720 vsk
->peer_shutdown
= SHUTDOWN_MASK
;
721 sk
->sk_state
= SS_UNCONNECTED
;
722 sk
->sk_err
= ECONNRESET
;
726 static int vhost_vsock_dev_release(struct inode
*inode
, struct file
*file
)
728 struct vhost_vsock
*vsock
= file
->private_data
;
730 mutex_lock(&vhost_vsock_mutex
);
731 if (vsock
->guest_cid
)
732 hash_del_rcu(&vsock
->hash
);
733 mutex_unlock(&vhost_vsock_mutex
);
735 /* Wait for other CPUs to finish using vsock */
738 /* Iterating over all connections for all CIDs to find orphans is
739 * inefficient. Room for improvement here. */
740 vsock_for_each_connected_socket(&vhost_transport
.transport
,
741 vhost_vsock_reset_orphans
);
743 /* Don't check the owner, because we are in the release path, so we
744 * need to stop the vsock device in any case.
745 * vhost_vsock_stop() can not fail in this case, so we don't need to
746 * check the return code.
748 vhost_vsock_stop(vsock
, false);
749 vhost_vsock_flush(vsock
);
750 vhost_dev_stop(&vsock
->dev
);
752 virtio_vsock_skb_queue_purge(&vsock
->send_pkt_queue
);
754 vhost_dev_cleanup(&vsock
->dev
);
755 kfree(vsock
->dev
.vqs
);
756 vhost_vsock_free(vsock
);
760 static int vhost_vsock_set_cid(struct vhost_vsock
*vsock
, u64 guest_cid
)
762 struct vhost_vsock
*other
;
764 /* Refuse reserved CIDs */
765 if (guest_cid
<= VMADDR_CID_HOST
||
766 guest_cid
== U32_MAX
)
769 /* 64-bit CIDs are not yet supported */
770 if (guest_cid
> U32_MAX
)
773 /* Refuse if CID is assigned to the guest->host transport (i.e. nested
774 * VM), to make the loopback work.
776 if (vsock_find_cid(guest_cid
))
779 /* Refuse if CID is already in use */
780 mutex_lock(&vhost_vsock_mutex
);
781 other
= vhost_vsock_get(guest_cid
);
782 if (other
&& other
!= vsock
) {
783 mutex_unlock(&vhost_vsock_mutex
);
787 if (vsock
->guest_cid
)
788 hash_del_rcu(&vsock
->hash
);
790 vsock
->guest_cid
= guest_cid
;
791 hash_add_rcu(vhost_vsock_hash
, &vsock
->hash
, vsock
->guest_cid
);
792 mutex_unlock(&vhost_vsock_mutex
);
797 static int vhost_vsock_set_features(struct vhost_vsock
*vsock
, u64 features
)
799 struct vhost_virtqueue
*vq
;
802 if (features
& ~VHOST_VSOCK_FEATURES
)
805 mutex_lock(&vsock
->dev
.mutex
);
806 if ((features
& (1 << VHOST_F_LOG_ALL
)) &&
807 !vhost_log_access_ok(&vsock
->dev
)) {
811 if ((features
& (1ULL << VIRTIO_F_ACCESS_PLATFORM
))) {
812 if (vhost_init_device_iotlb(&vsock
->dev
))
816 vsock
->seqpacket_allow
= features
& (1ULL << VIRTIO_VSOCK_F_SEQPACKET
);
818 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
820 mutex_lock(&vq
->mutex
);
821 vq
->acked_features
= features
;
822 mutex_unlock(&vq
->mutex
);
824 mutex_unlock(&vsock
->dev
.mutex
);
828 mutex_unlock(&vsock
->dev
.mutex
);
832 static long vhost_vsock_dev_ioctl(struct file
*f
, unsigned int ioctl
,
835 struct vhost_vsock
*vsock
= f
->private_data
;
836 void __user
*argp
= (void __user
*)arg
;
843 case VHOST_VSOCK_SET_GUEST_CID
:
844 if (copy_from_user(&guest_cid
, argp
, sizeof(guest_cid
)))
846 return vhost_vsock_set_cid(vsock
, guest_cid
);
847 case VHOST_VSOCK_SET_RUNNING
:
848 if (copy_from_user(&start
, argp
, sizeof(start
)))
851 return vhost_vsock_start(vsock
);
853 return vhost_vsock_stop(vsock
, true);
854 case VHOST_GET_FEATURES
:
855 features
= VHOST_VSOCK_FEATURES
;
856 if (copy_to_user(argp
, &features
, sizeof(features
)))
859 case VHOST_SET_FEATURES
:
860 if (copy_from_user(&features
, argp
, sizeof(features
)))
862 return vhost_vsock_set_features(vsock
, features
);
863 case VHOST_GET_BACKEND_FEATURES
:
864 features
= VHOST_VSOCK_BACKEND_FEATURES
;
865 if (copy_to_user(argp
, &features
, sizeof(features
)))
868 case VHOST_SET_BACKEND_FEATURES
:
869 if (copy_from_user(&features
, argp
, sizeof(features
)))
871 if (features
& ~VHOST_VSOCK_BACKEND_FEATURES
)
873 vhost_set_backend_features(&vsock
->dev
, features
);
876 mutex_lock(&vsock
->dev
.mutex
);
877 r
= vhost_dev_ioctl(&vsock
->dev
, ioctl
, argp
);
878 if (r
== -ENOIOCTLCMD
)
879 r
= vhost_vring_ioctl(&vsock
->dev
, ioctl
, argp
);
881 vhost_vsock_flush(vsock
);
882 mutex_unlock(&vsock
->dev
.mutex
);
887 static ssize_t
vhost_vsock_chr_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
889 struct file
*file
= iocb
->ki_filp
;
890 struct vhost_vsock
*vsock
= file
->private_data
;
891 struct vhost_dev
*dev
= &vsock
->dev
;
892 int noblock
= file
->f_flags
& O_NONBLOCK
;
894 return vhost_chr_read_iter(dev
, to
, noblock
);
897 static ssize_t
vhost_vsock_chr_write_iter(struct kiocb
*iocb
,
898 struct iov_iter
*from
)
900 struct file
*file
= iocb
->ki_filp
;
901 struct vhost_vsock
*vsock
= file
->private_data
;
902 struct vhost_dev
*dev
= &vsock
->dev
;
904 return vhost_chr_write_iter(dev
, from
);
907 static __poll_t
vhost_vsock_chr_poll(struct file
*file
, poll_table
*wait
)
909 struct vhost_vsock
*vsock
= file
->private_data
;
910 struct vhost_dev
*dev
= &vsock
->dev
;
912 return vhost_chr_poll(file
, dev
, wait
);
915 static const struct file_operations vhost_vsock_fops
= {
916 .owner
= THIS_MODULE
,
917 .open
= vhost_vsock_dev_open
,
918 .release
= vhost_vsock_dev_release
,
919 .llseek
= noop_llseek
,
920 .unlocked_ioctl
= vhost_vsock_dev_ioctl
,
921 .compat_ioctl
= compat_ptr_ioctl
,
922 .read_iter
= vhost_vsock_chr_read_iter
,
923 .write_iter
= vhost_vsock_chr_write_iter
,
924 .poll
= vhost_vsock_chr_poll
,
927 static struct miscdevice vhost_vsock_misc
= {
928 .minor
= VHOST_VSOCK_MINOR
,
929 .name
= "vhost-vsock",
930 .fops
= &vhost_vsock_fops
,
933 static int __init
vhost_vsock_init(void)
937 ret
= vsock_core_register(&vhost_transport
.transport
,
938 VSOCK_TRANSPORT_F_H2G
);
942 ret
= misc_register(&vhost_vsock_misc
);
944 vsock_core_unregister(&vhost_transport
.transport
);
951 static void __exit
vhost_vsock_exit(void)
953 misc_deregister(&vhost_vsock_misc
);
954 vsock_core_unregister(&vhost_transport
.transport
);
957 module_init(vhost_vsock_init
);
958 module_exit(vhost_vsock_exit
);
959 MODULE_LICENSE("GPL v2");
960 MODULE_AUTHOR("Asias He");
961 MODULE_DESCRIPTION("vhost transport for vsock ");
962 MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR
);
963 MODULE_ALIAS("devname:vhost-vsock");