Linux 4.19.133
[linux/fpc-iii.git] / drivers / vhost / vsock.c
blob7891bd40ebd82daf8342b88fad75f9d583c482ac
1 /*
2 * vhost transport for vsock
4 * Copyright (C) 2013-2015 Red Hat, Inc.
5 * Author: Asias He <asias@redhat.com>
6 * Stefan Hajnoczi <stefanha@redhat.com>
8 * This work is licensed under the terms of the GNU GPL, version 2.
9 */
10 #include <linux/miscdevice.h>
11 #include <linux/atomic.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/vmalloc.h>
15 #include <net/sock.h>
16 #include <linux/virtio_vsock.h>
17 #include <linux/vhost.h>
18 #include <linux/hashtable.h>
20 #include <net/af_vsock.h>
21 #include "vhost.h"
23 #define VHOST_VSOCK_DEFAULT_HOST_CID 2
24 /* Max number of bytes transferred before requeueing the job.
25 * Using this limit prevents one virtqueue from starving others. */
26 #define VHOST_VSOCK_WEIGHT 0x80000
27 /* Max number of packets transferred before requeueing the job.
28 * Using this limit prevents one virtqueue from starving others with
29 * small pkts.
31 #define VHOST_VSOCK_PKT_WEIGHT 256
33 enum {
34 VHOST_VSOCK_FEATURES = VHOST_FEATURES,
37 /* Used to track all the vhost_vsock instances on the system. */
38 static DEFINE_SPINLOCK(vhost_vsock_lock);
39 static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
41 struct vhost_vsock {
42 struct vhost_dev dev;
43 struct vhost_virtqueue vqs[2];
45 /* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
46 struct hlist_node hash;
48 struct vhost_work send_pkt_work;
49 spinlock_t send_pkt_list_lock;
50 struct list_head send_pkt_list; /* host->guest pending packets */
52 atomic_t queued_replies;
54 u32 guest_cid;
57 static u32 vhost_transport_get_local_cid(void)
59 return VHOST_VSOCK_DEFAULT_HOST_CID;
62 /* Callers that dereference the return value must hold vhost_vsock_lock or the
63 * RCU read lock.
65 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
67 struct vhost_vsock *vsock;
69 hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
70 u32 other_cid = vsock->guest_cid;
72 /* Skip instances that have no CID yet */
73 if (other_cid == 0)
74 continue;
76 if (other_cid == guest_cid)
77 return vsock;
81 return NULL;
84 static void
85 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
86 struct vhost_virtqueue *vq)
88 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
89 int pkts = 0, total_len = 0;
90 bool added = false;
91 bool restart_tx = false;
93 mutex_lock(&vq->mutex);
95 if (!vq->private_data)
96 goto out;
98 /* Avoid further vmexits, we're already processing the virtqueue */
99 vhost_disable_notify(&vsock->dev, vq);
101 do {
102 struct virtio_vsock_pkt *pkt;
103 struct iov_iter iov_iter;
104 unsigned out, in;
105 size_t nbytes;
106 size_t iov_len, payload_len;
107 int head;
109 spin_lock_bh(&vsock->send_pkt_list_lock);
110 if (list_empty(&vsock->send_pkt_list)) {
111 spin_unlock_bh(&vsock->send_pkt_list_lock);
112 vhost_enable_notify(&vsock->dev, vq);
113 break;
116 pkt = list_first_entry(&vsock->send_pkt_list,
117 struct virtio_vsock_pkt, list);
118 list_del_init(&pkt->list);
119 spin_unlock_bh(&vsock->send_pkt_list_lock);
121 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
122 &out, &in, NULL, NULL);
123 if (head < 0) {
124 spin_lock_bh(&vsock->send_pkt_list_lock);
125 list_add(&pkt->list, &vsock->send_pkt_list);
126 spin_unlock_bh(&vsock->send_pkt_list_lock);
127 break;
130 if (head == vq->num) {
131 spin_lock_bh(&vsock->send_pkt_list_lock);
132 list_add(&pkt->list, &vsock->send_pkt_list);
133 spin_unlock_bh(&vsock->send_pkt_list_lock);
135 /* We cannot finish yet if more buffers snuck in while
136 * re-enabling notify.
138 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
139 vhost_disable_notify(&vsock->dev, vq);
140 continue;
142 break;
145 if (out) {
146 virtio_transport_free_pkt(pkt);
147 vq_err(vq, "Expected 0 output buffers, got %u\n", out);
148 break;
151 iov_len = iov_length(&vq->iov[out], in);
152 if (iov_len < sizeof(pkt->hdr)) {
153 virtio_transport_free_pkt(pkt);
154 vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
155 break;
158 iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len);
159 payload_len = pkt->len - pkt->off;
161 /* If the packet is greater than the space available in the
162 * buffer, we split it using multiple buffers.
164 if (payload_len > iov_len - sizeof(pkt->hdr))
165 payload_len = iov_len - sizeof(pkt->hdr);
167 /* Set the correct length in the header */
168 pkt->hdr.len = cpu_to_le32(payload_len);
170 nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
171 if (nbytes != sizeof(pkt->hdr)) {
172 virtio_transport_free_pkt(pkt);
173 vq_err(vq, "Faulted on copying pkt hdr\n");
174 break;
177 nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
178 &iov_iter);
179 if (nbytes != payload_len) {
180 virtio_transport_free_pkt(pkt);
181 vq_err(vq, "Faulted on copying pkt buf\n");
182 break;
185 /* Deliver to monitoring devices all packets that we
186 * will transmit.
188 virtio_transport_deliver_tap_pkt(pkt);
190 vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
191 added = true;
193 pkt->off += payload_len;
194 total_len += payload_len;
196 /* If we didn't send all the payload we can requeue the packet
197 * to send it with the next available buffer.
199 if (pkt->off < pkt->len) {
200 spin_lock_bh(&vsock->send_pkt_list_lock);
201 list_add(&pkt->list, &vsock->send_pkt_list);
202 spin_unlock_bh(&vsock->send_pkt_list_lock);
203 } else {
204 if (pkt->reply) {
205 int val;
207 val = atomic_dec_return(&vsock->queued_replies);
209 /* Do we have resources to resume tx
210 * processing?
212 if (val + 1 == tx_vq->num)
213 restart_tx = true;
216 virtio_transport_free_pkt(pkt);
218 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
219 if (added)
220 vhost_signal(&vsock->dev, vq);
222 out:
223 mutex_unlock(&vq->mutex);
225 if (restart_tx)
226 vhost_poll_queue(&tx_vq->poll);
229 static void vhost_transport_send_pkt_work(struct vhost_work *work)
231 struct vhost_virtqueue *vq;
232 struct vhost_vsock *vsock;
234 vsock = container_of(work, struct vhost_vsock, send_pkt_work);
235 vq = &vsock->vqs[VSOCK_VQ_RX];
237 vhost_transport_do_send_pkt(vsock, vq);
240 static int
241 vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
243 struct vhost_vsock *vsock;
244 int len = pkt->len;
246 rcu_read_lock();
248 /* Find the vhost_vsock according to guest context id */
249 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
250 if (!vsock) {
251 rcu_read_unlock();
252 virtio_transport_free_pkt(pkt);
253 return -ENODEV;
256 if (pkt->reply)
257 atomic_inc(&vsock->queued_replies);
259 spin_lock_bh(&vsock->send_pkt_list_lock);
260 list_add_tail(&pkt->list, &vsock->send_pkt_list);
261 spin_unlock_bh(&vsock->send_pkt_list_lock);
263 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
265 rcu_read_unlock();
266 return len;
269 static int
270 vhost_transport_cancel_pkt(struct vsock_sock *vsk)
272 struct vhost_vsock *vsock;
273 struct virtio_vsock_pkt *pkt, *n;
274 int cnt = 0;
275 int ret = -ENODEV;
276 LIST_HEAD(freeme);
278 rcu_read_lock();
280 /* Find the vhost_vsock according to guest context id */
281 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
282 if (!vsock)
283 goto out;
285 spin_lock_bh(&vsock->send_pkt_list_lock);
286 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
287 if (pkt->vsk != vsk)
288 continue;
289 list_move(&pkt->list, &freeme);
291 spin_unlock_bh(&vsock->send_pkt_list_lock);
293 list_for_each_entry_safe(pkt, n, &freeme, list) {
294 if (pkt->reply)
295 cnt++;
296 list_del(&pkt->list);
297 virtio_transport_free_pkt(pkt);
300 if (cnt) {
301 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
302 int new_cnt;
304 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
305 if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
306 vhost_poll_queue(&tx_vq->poll);
309 ret = 0;
310 out:
311 rcu_read_unlock();
312 return ret;
315 static struct virtio_vsock_pkt *
316 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
317 unsigned int out, unsigned int in)
319 struct virtio_vsock_pkt *pkt;
320 struct iov_iter iov_iter;
321 size_t nbytes;
322 size_t len;
324 if (in != 0) {
325 vq_err(vq, "Expected 0 input buffers, got %u\n", in);
326 return NULL;
329 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
330 if (!pkt)
331 return NULL;
333 len = iov_length(vq->iov, out);
334 iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
336 nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
337 if (nbytes != sizeof(pkt->hdr)) {
338 vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
339 sizeof(pkt->hdr), nbytes);
340 kfree(pkt);
341 return NULL;
344 if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
345 pkt->len = le32_to_cpu(pkt->hdr.len);
347 /* No payload */
348 if (!pkt->len)
349 return pkt;
351 /* The pkt is too big */
352 if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
353 kfree(pkt);
354 return NULL;
357 pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
358 if (!pkt->buf) {
359 kfree(pkt);
360 return NULL;
363 nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
364 if (nbytes != pkt->len) {
365 vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
366 pkt->len, nbytes);
367 virtio_transport_free_pkt(pkt);
368 return NULL;
371 return pkt;
374 /* Is there space left for replies to rx packets? */
375 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
377 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
378 int val;
380 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
381 val = atomic_read(&vsock->queued_replies);
383 return val < vq->num;
386 static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
388 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
389 poll.work);
390 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
391 dev);
392 struct virtio_vsock_pkt *pkt;
393 int head, pkts = 0, total_len = 0;
394 unsigned int out, in;
395 bool added = false;
397 mutex_lock(&vq->mutex);
399 if (!vq->private_data)
400 goto out;
402 vhost_disable_notify(&vsock->dev, vq);
403 do {
404 u32 len;
406 if (!vhost_vsock_more_replies(vsock)) {
407 /* Stop tx until the device processes already
408 * pending replies. Leave tx virtqueue
409 * callbacks disabled.
411 goto no_more_replies;
414 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
415 &out, &in, NULL, NULL);
416 if (head < 0)
417 break;
419 if (head == vq->num) {
420 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
421 vhost_disable_notify(&vsock->dev, vq);
422 continue;
424 break;
427 pkt = vhost_vsock_alloc_pkt(vq, out, in);
428 if (!pkt) {
429 vq_err(vq, "Faulted on pkt\n");
430 continue;
433 len = pkt->len;
435 /* Deliver to monitoring devices all received packets */
436 virtio_transport_deliver_tap_pkt(pkt);
438 /* Only accept correctly addressed packets */
439 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
440 le64_to_cpu(pkt->hdr.dst_cid) ==
441 vhost_transport_get_local_cid())
442 virtio_transport_recv_pkt(pkt);
443 else
444 virtio_transport_free_pkt(pkt);
446 len += sizeof(pkt->hdr);
447 vhost_add_used(vq, head, len);
448 total_len += len;
449 added = true;
450 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
452 no_more_replies:
453 if (added)
454 vhost_signal(&vsock->dev, vq);
456 out:
457 mutex_unlock(&vq->mutex);
460 static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
462 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
463 poll.work);
464 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
465 dev);
467 vhost_transport_do_send_pkt(vsock, vq);
470 static int vhost_vsock_start(struct vhost_vsock *vsock)
472 struct vhost_virtqueue *vq;
473 size_t i;
474 int ret;
476 mutex_lock(&vsock->dev.mutex);
478 ret = vhost_dev_check_owner(&vsock->dev);
479 if (ret)
480 goto err;
482 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
483 vq = &vsock->vqs[i];
485 mutex_lock(&vq->mutex);
487 if (!vhost_vq_access_ok(vq)) {
488 ret = -EFAULT;
489 goto err_vq;
492 if (!vq->private_data) {
493 vq->private_data = vsock;
494 ret = vhost_vq_init_access(vq);
495 if (ret)
496 goto err_vq;
499 mutex_unlock(&vq->mutex);
502 /* Some packets may have been queued before the device was started,
503 * let's kick the send worker to send them.
505 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
507 mutex_unlock(&vsock->dev.mutex);
508 return 0;
510 err_vq:
511 vq->private_data = NULL;
512 mutex_unlock(&vq->mutex);
514 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
515 vq = &vsock->vqs[i];
517 mutex_lock(&vq->mutex);
518 vq->private_data = NULL;
519 mutex_unlock(&vq->mutex);
521 err:
522 mutex_unlock(&vsock->dev.mutex);
523 return ret;
526 static int vhost_vsock_stop(struct vhost_vsock *vsock)
528 size_t i;
529 int ret;
531 mutex_lock(&vsock->dev.mutex);
533 ret = vhost_dev_check_owner(&vsock->dev);
534 if (ret)
535 goto err;
537 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
538 struct vhost_virtqueue *vq = &vsock->vqs[i];
540 mutex_lock(&vq->mutex);
541 vq->private_data = NULL;
542 mutex_unlock(&vq->mutex);
545 err:
546 mutex_unlock(&vsock->dev.mutex);
547 return ret;
550 static void vhost_vsock_free(struct vhost_vsock *vsock)
552 kvfree(vsock);
555 static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
557 struct vhost_virtqueue **vqs;
558 struct vhost_vsock *vsock;
559 int ret;
561 /* This struct is large and allocation could fail, fall back to vmalloc
562 * if there is no other way.
564 vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
565 if (!vsock)
566 return -ENOMEM;
568 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
569 if (!vqs) {
570 ret = -ENOMEM;
571 goto out;
574 vsock->guest_cid = 0; /* no CID assigned yet */
576 atomic_set(&vsock->queued_replies, 0);
578 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
579 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
580 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
581 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
583 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
584 UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
585 VHOST_VSOCK_WEIGHT);
587 file->private_data = vsock;
588 spin_lock_init(&vsock->send_pkt_list_lock);
589 INIT_LIST_HEAD(&vsock->send_pkt_list);
590 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
591 return 0;
593 out:
594 vhost_vsock_free(vsock);
595 return ret;
598 static void vhost_vsock_flush(struct vhost_vsock *vsock)
600 int i;
602 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
603 if (vsock->vqs[i].handle_kick)
604 vhost_poll_flush(&vsock->vqs[i].poll);
605 vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
608 static void vhost_vsock_reset_orphans(struct sock *sk)
610 struct vsock_sock *vsk = vsock_sk(sk);
612 /* vmci_transport.c doesn't take sk_lock here either. At least we're
613 * under vsock_table_lock so the sock cannot disappear while we're
614 * executing.
617 /* If the peer is still valid, no need to reset connection */
618 if (vhost_vsock_get(vsk->remote_addr.svm_cid))
619 return;
621 /* If the close timeout is pending, let it expire. This avoids races
622 * with the timeout callback.
624 if (vsk->close_work_scheduled)
625 return;
627 sock_set_flag(sk, SOCK_DONE);
628 vsk->peer_shutdown = SHUTDOWN_MASK;
629 sk->sk_state = SS_UNCONNECTED;
630 sk->sk_err = ECONNRESET;
631 sk->sk_error_report(sk);
634 static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
636 struct vhost_vsock *vsock = file->private_data;
638 spin_lock_bh(&vhost_vsock_lock);
639 if (vsock->guest_cid)
640 hash_del_rcu(&vsock->hash);
641 spin_unlock_bh(&vhost_vsock_lock);
643 /* Wait for other CPUs to finish using vsock */
644 synchronize_rcu();
646 /* Iterating over all connections for all CIDs to find orphans is
647 * inefficient. Room for improvement here. */
648 vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
650 vhost_vsock_stop(vsock);
651 vhost_vsock_flush(vsock);
652 vhost_dev_stop(&vsock->dev);
654 spin_lock_bh(&vsock->send_pkt_list_lock);
655 while (!list_empty(&vsock->send_pkt_list)) {
656 struct virtio_vsock_pkt *pkt;
658 pkt = list_first_entry(&vsock->send_pkt_list,
659 struct virtio_vsock_pkt, list);
660 list_del_init(&pkt->list);
661 virtio_transport_free_pkt(pkt);
663 spin_unlock_bh(&vsock->send_pkt_list_lock);
665 vhost_dev_cleanup(&vsock->dev);
666 kfree(vsock->dev.vqs);
667 vhost_vsock_free(vsock);
668 return 0;
671 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
673 struct vhost_vsock *other;
675 /* Refuse reserved CIDs */
676 if (guest_cid <= VMADDR_CID_HOST ||
677 guest_cid == U32_MAX)
678 return -EINVAL;
680 /* 64-bit CIDs are not yet supported */
681 if (guest_cid > U32_MAX)
682 return -EINVAL;
684 /* Refuse if CID is already in use */
685 spin_lock_bh(&vhost_vsock_lock);
686 other = vhost_vsock_get(guest_cid);
687 if (other && other != vsock) {
688 spin_unlock_bh(&vhost_vsock_lock);
689 return -EADDRINUSE;
692 if (vsock->guest_cid)
693 hash_del_rcu(&vsock->hash);
695 vsock->guest_cid = guest_cid;
696 hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
697 spin_unlock_bh(&vhost_vsock_lock);
699 return 0;
702 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
704 struct vhost_virtqueue *vq;
705 int i;
707 if (features & ~VHOST_VSOCK_FEATURES)
708 return -EOPNOTSUPP;
710 mutex_lock(&vsock->dev.mutex);
711 if ((features & (1 << VHOST_F_LOG_ALL)) &&
712 !vhost_log_access_ok(&vsock->dev)) {
713 mutex_unlock(&vsock->dev.mutex);
714 return -EFAULT;
717 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
718 vq = &vsock->vqs[i];
719 mutex_lock(&vq->mutex);
720 vq->acked_features = features;
721 mutex_unlock(&vq->mutex);
723 mutex_unlock(&vsock->dev.mutex);
724 return 0;
727 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
728 unsigned long arg)
730 struct vhost_vsock *vsock = f->private_data;
731 void __user *argp = (void __user *)arg;
732 u64 guest_cid;
733 u64 features;
734 int start;
735 int r;
737 switch (ioctl) {
738 case VHOST_VSOCK_SET_GUEST_CID:
739 if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
740 return -EFAULT;
741 return vhost_vsock_set_cid(vsock, guest_cid);
742 case VHOST_VSOCK_SET_RUNNING:
743 if (copy_from_user(&start, argp, sizeof(start)))
744 return -EFAULT;
745 if (start)
746 return vhost_vsock_start(vsock);
747 else
748 return vhost_vsock_stop(vsock);
749 case VHOST_GET_FEATURES:
750 features = VHOST_VSOCK_FEATURES;
751 if (copy_to_user(argp, &features, sizeof(features)))
752 return -EFAULT;
753 return 0;
754 case VHOST_SET_FEATURES:
755 if (copy_from_user(&features, argp, sizeof(features)))
756 return -EFAULT;
757 return vhost_vsock_set_features(vsock, features);
758 default:
759 mutex_lock(&vsock->dev.mutex);
760 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
761 if (r == -ENOIOCTLCMD)
762 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
763 else
764 vhost_vsock_flush(vsock);
765 mutex_unlock(&vsock->dev.mutex);
766 return r;
770 #ifdef CONFIG_COMPAT
771 static long vhost_vsock_dev_compat_ioctl(struct file *f, unsigned int ioctl,
772 unsigned long arg)
774 return vhost_vsock_dev_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
776 #endif
778 static const struct file_operations vhost_vsock_fops = {
779 .owner = THIS_MODULE,
780 .open = vhost_vsock_dev_open,
781 .release = vhost_vsock_dev_release,
782 .llseek = noop_llseek,
783 .unlocked_ioctl = vhost_vsock_dev_ioctl,
784 #ifdef CONFIG_COMPAT
785 .compat_ioctl = vhost_vsock_dev_compat_ioctl,
786 #endif
789 static struct miscdevice vhost_vsock_misc = {
790 .minor = VHOST_VSOCK_MINOR,
791 .name = "vhost-vsock",
792 .fops = &vhost_vsock_fops,
795 static struct virtio_transport vhost_transport = {
796 .transport = {
797 .get_local_cid = vhost_transport_get_local_cid,
799 .init = virtio_transport_do_socket_init,
800 .destruct = virtio_transport_destruct,
801 .release = virtio_transport_release,
802 .connect = virtio_transport_connect,
803 .shutdown = virtio_transport_shutdown,
804 .cancel_pkt = vhost_transport_cancel_pkt,
806 .dgram_enqueue = virtio_transport_dgram_enqueue,
807 .dgram_dequeue = virtio_transport_dgram_dequeue,
808 .dgram_bind = virtio_transport_dgram_bind,
809 .dgram_allow = virtio_transport_dgram_allow,
811 .stream_enqueue = virtio_transport_stream_enqueue,
812 .stream_dequeue = virtio_transport_stream_dequeue,
813 .stream_has_data = virtio_transport_stream_has_data,
814 .stream_has_space = virtio_transport_stream_has_space,
815 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
816 .stream_is_active = virtio_transport_stream_is_active,
817 .stream_allow = virtio_transport_stream_allow,
819 .notify_poll_in = virtio_transport_notify_poll_in,
820 .notify_poll_out = virtio_transport_notify_poll_out,
821 .notify_recv_init = virtio_transport_notify_recv_init,
822 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
823 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
824 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
825 .notify_send_init = virtio_transport_notify_send_init,
826 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
827 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
828 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
830 .set_buffer_size = virtio_transport_set_buffer_size,
831 .set_min_buffer_size = virtio_transport_set_min_buffer_size,
832 .set_max_buffer_size = virtio_transport_set_max_buffer_size,
833 .get_buffer_size = virtio_transport_get_buffer_size,
834 .get_min_buffer_size = virtio_transport_get_min_buffer_size,
835 .get_max_buffer_size = virtio_transport_get_max_buffer_size,
838 .send_pkt = vhost_transport_send_pkt,
841 static int __init vhost_vsock_init(void)
843 int ret;
845 ret = vsock_core_init(&vhost_transport.transport);
846 if (ret < 0)
847 return ret;
848 return misc_register(&vhost_vsock_misc);
851 static void __exit vhost_vsock_exit(void)
853 misc_deregister(&vhost_vsock_misc);
854 vsock_core_exit();
857 module_init(vhost_vsock_init);
858 module_exit(vhost_vsock_exit);
859 MODULE_LICENSE("GPL v2");
860 MODULE_AUTHOR("Asias He");
861 MODULE_DESCRIPTION("vhost transport for vsock ");
862 MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
863 MODULE_ALIAS("devname:vhost-vsock");