Linux 4.9.89
[linux/fpc-iii.git] / drivers / vhost / vsock.c
blob0ec970ca64ce14022e5755cee7b4216b2238e929
1 /*
2 * vhost transport for vsock
4 * Copyright (C) 2013-2015 Red Hat, Inc.
5 * Author: Asias He <asias@redhat.com>
6 * Stefan Hajnoczi <stefanha@redhat.com>
8 * This work is licensed under the terms of the GNU GPL, version 2.
9 */
10 #include <linux/miscdevice.h>
11 #include <linux/atomic.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/vmalloc.h>
15 #include <net/sock.h>
16 #include <linux/virtio_vsock.h>
17 #include <linux/vhost.h>
19 #include <net/af_vsock.h>
20 #include "vhost.h"
22 #define VHOST_VSOCK_DEFAULT_HOST_CID 2
24 enum {
25 VHOST_VSOCK_FEATURES = VHOST_FEATURES,
28 /* Used to track all the vhost_vsock instances on the system. */
29 static DEFINE_SPINLOCK(vhost_vsock_lock);
30 static LIST_HEAD(vhost_vsock_list);
32 struct vhost_vsock {
33 struct vhost_dev dev;
34 struct vhost_virtqueue vqs[2];
36 /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
37 struct list_head list;
39 struct vhost_work send_pkt_work;
40 spinlock_t send_pkt_list_lock;
41 struct list_head send_pkt_list; /* host->guest pending packets */
43 atomic_t queued_replies;
45 u32 guest_cid;
48 static u32 vhost_transport_get_local_cid(void)
50 return VHOST_VSOCK_DEFAULT_HOST_CID;
53 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
55 struct vhost_vsock *vsock;
57 spin_lock_bh(&vhost_vsock_lock);
58 list_for_each_entry(vsock, &vhost_vsock_list, list) {
59 u32 other_cid = vsock->guest_cid;
61 /* Skip instances that have no CID yet */
62 if (other_cid == 0)
63 continue;
65 if (other_cid == guest_cid) {
66 spin_unlock_bh(&vhost_vsock_lock);
67 return vsock;
70 spin_unlock_bh(&vhost_vsock_lock);
72 return NULL;
75 static void
76 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
77 struct vhost_virtqueue *vq)
79 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
80 bool added = false;
81 bool restart_tx = false;
83 mutex_lock(&vq->mutex);
85 if (!vq->private_data)
86 goto out;
88 /* Avoid further vmexits, we're already processing the virtqueue */
89 vhost_disable_notify(&vsock->dev, vq);
91 for (;;) {
92 struct virtio_vsock_pkt *pkt;
93 struct iov_iter iov_iter;
94 unsigned out, in;
95 size_t nbytes;
96 size_t len;
97 int head;
99 spin_lock_bh(&vsock->send_pkt_list_lock);
100 if (list_empty(&vsock->send_pkt_list)) {
101 spin_unlock_bh(&vsock->send_pkt_list_lock);
102 vhost_enable_notify(&vsock->dev, vq);
103 break;
106 pkt = list_first_entry(&vsock->send_pkt_list,
107 struct virtio_vsock_pkt, list);
108 list_del_init(&pkt->list);
109 spin_unlock_bh(&vsock->send_pkt_list_lock);
111 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
112 &out, &in, NULL, NULL);
113 if (head < 0) {
114 spin_lock_bh(&vsock->send_pkt_list_lock);
115 list_add(&pkt->list, &vsock->send_pkt_list);
116 spin_unlock_bh(&vsock->send_pkt_list_lock);
117 break;
120 if (head == vq->num) {
121 spin_lock_bh(&vsock->send_pkt_list_lock);
122 list_add(&pkt->list, &vsock->send_pkt_list);
123 spin_unlock_bh(&vsock->send_pkt_list_lock);
125 /* We cannot finish yet if more buffers snuck in while
126 * re-enabling notify.
128 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
129 vhost_disable_notify(&vsock->dev, vq);
130 continue;
132 break;
135 if (out) {
136 virtio_transport_free_pkt(pkt);
137 vq_err(vq, "Expected 0 output buffers, got %u\n", out);
138 break;
141 len = iov_length(&vq->iov[out], in);
142 iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
144 nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
145 if (nbytes != sizeof(pkt->hdr)) {
146 virtio_transport_free_pkt(pkt);
147 vq_err(vq, "Faulted on copying pkt hdr\n");
148 break;
151 nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
152 if (nbytes != pkt->len) {
153 virtio_transport_free_pkt(pkt);
154 vq_err(vq, "Faulted on copying pkt buf\n");
155 break;
158 vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
159 added = true;
161 if (pkt->reply) {
162 int val;
164 val = atomic_dec_return(&vsock->queued_replies);
166 /* Do we have resources to resume tx processing? */
167 if (val + 1 == tx_vq->num)
168 restart_tx = true;
171 virtio_transport_free_pkt(pkt);
173 if (added)
174 vhost_signal(&vsock->dev, vq);
176 out:
177 mutex_unlock(&vq->mutex);
179 if (restart_tx)
180 vhost_poll_queue(&tx_vq->poll);
183 static void vhost_transport_send_pkt_work(struct vhost_work *work)
185 struct vhost_virtqueue *vq;
186 struct vhost_vsock *vsock;
188 vsock = container_of(work, struct vhost_vsock, send_pkt_work);
189 vq = &vsock->vqs[VSOCK_VQ_RX];
191 vhost_transport_do_send_pkt(vsock, vq);
194 static int
195 vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
197 struct vhost_vsock *vsock;
198 struct vhost_virtqueue *vq;
199 int len = pkt->len;
201 /* Find the vhost_vsock according to guest context id */
202 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
203 if (!vsock) {
204 virtio_transport_free_pkt(pkt);
205 return -ENODEV;
208 vq = &vsock->vqs[VSOCK_VQ_RX];
210 if (pkt->reply)
211 atomic_inc(&vsock->queued_replies);
213 spin_lock_bh(&vsock->send_pkt_list_lock);
214 list_add_tail(&pkt->list, &vsock->send_pkt_list);
215 spin_unlock_bh(&vsock->send_pkt_list_lock);
217 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
218 return len;
221 static int
222 vhost_transport_cancel_pkt(struct vsock_sock *vsk)
224 struct vhost_vsock *vsock;
225 struct virtio_vsock_pkt *pkt, *n;
226 int cnt = 0;
227 LIST_HEAD(freeme);
229 /* Find the vhost_vsock according to guest context id */
230 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
231 if (!vsock)
232 return -ENODEV;
234 spin_lock_bh(&vsock->send_pkt_list_lock);
235 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
236 if (pkt->vsk != vsk)
237 continue;
238 list_move(&pkt->list, &freeme);
240 spin_unlock_bh(&vsock->send_pkt_list_lock);
242 list_for_each_entry_safe(pkt, n, &freeme, list) {
243 if (pkt->reply)
244 cnt++;
245 list_del(&pkt->list);
246 virtio_transport_free_pkt(pkt);
249 if (cnt) {
250 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
251 int new_cnt;
253 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
254 if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
255 vhost_poll_queue(&tx_vq->poll);
258 return 0;
261 static struct virtio_vsock_pkt *
262 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
263 unsigned int out, unsigned int in)
265 struct virtio_vsock_pkt *pkt;
266 struct iov_iter iov_iter;
267 size_t nbytes;
268 size_t len;
270 if (in != 0) {
271 vq_err(vq, "Expected 0 input buffers, got %u\n", in);
272 return NULL;
275 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
276 if (!pkt)
277 return NULL;
279 len = iov_length(vq->iov, out);
280 iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
282 nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
283 if (nbytes != sizeof(pkt->hdr)) {
284 vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
285 sizeof(pkt->hdr), nbytes);
286 kfree(pkt);
287 return NULL;
290 if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
291 pkt->len = le32_to_cpu(pkt->hdr.len);
293 /* No payload */
294 if (!pkt->len)
295 return pkt;
297 /* The pkt is too big */
298 if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
299 kfree(pkt);
300 return NULL;
303 pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
304 if (!pkt->buf) {
305 kfree(pkt);
306 return NULL;
309 nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
310 if (nbytes != pkt->len) {
311 vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
312 pkt->len, nbytes);
313 virtio_transport_free_pkt(pkt);
314 return NULL;
317 return pkt;
320 /* Is there space left for replies to rx packets? */
321 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
323 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
324 int val;
326 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
327 val = atomic_read(&vsock->queued_replies);
329 return val < vq->num;
332 static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
334 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
335 poll.work);
336 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
337 dev);
338 struct virtio_vsock_pkt *pkt;
339 int head;
340 unsigned int out, in;
341 bool added = false;
343 mutex_lock(&vq->mutex);
345 if (!vq->private_data)
346 goto out;
348 vhost_disable_notify(&vsock->dev, vq);
349 for (;;) {
350 u32 len;
352 if (!vhost_vsock_more_replies(vsock)) {
353 /* Stop tx until the device processes already
354 * pending replies. Leave tx virtqueue
355 * callbacks disabled.
357 goto no_more_replies;
360 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
361 &out, &in, NULL, NULL);
362 if (head < 0)
363 break;
365 if (head == vq->num) {
366 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
367 vhost_disable_notify(&vsock->dev, vq);
368 continue;
370 break;
373 pkt = vhost_vsock_alloc_pkt(vq, out, in);
374 if (!pkt) {
375 vq_err(vq, "Faulted on pkt\n");
376 continue;
379 len = pkt->len;
381 /* Only accept correctly addressed packets */
382 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
383 virtio_transport_recv_pkt(pkt);
384 else
385 virtio_transport_free_pkt(pkt);
387 vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
388 added = true;
391 no_more_replies:
392 if (added)
393 vhost_signal(&vsock->dev, vq);
395 out:
396 mutex_unlock(&vq->mutex);
399 static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
401 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
402 poll.work);
403 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
404 dev);
406 vhost_transport_do_send_pkt(vsock, vq);
409 static int vhost_vsock_start(struct vhost_vsock *vsock)
411 struct vhost_virtqueue *vq;
412 size_t i;
413 int ret;
415 mutex_lock(&vsock->dev.mutex);
417 ret = vhost_dev_check_owner(&vsock->dev);
418 if (ret)
419 goto err;
421 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
422 vq = &vsock->vqs[i];
424 mutex_lock(&vq->mutex);
426 if (!vhost_vq_access_ok(vq)) {
427 ret = -EFAULT;
428 goto err_vq;
431 if (!vq->private_data) {
432 vq->private_data = vsock;
433 ret = vhost_vq_init_access(vq);
434 if (ret)
435 goto err_vq;
438 mutex_unlock(&vq->mutex);
441 mutex_unlock(&vsock->dev.mutex);
442 return 0;
444 err_vq:
445 vq->private_data = NULL;
446 mutex_unlock(&vq->mutex);
448 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
449 vq = &vsock->vqs[i];
451 mutex_lock(&vq->mutex);
452 vq->private_data = NULL;
453 mutex_unlock(&vq->mutex);
455 err:
456 mutex_unlock(&vsock->dev.mutex);
457 return ret;
460 static int vhost_vsock_stop(struct vhost_vsock *vsock)
462 size_t i;
463 int ret;
465 mutex_lock(&vsock->dev.mutex);
467 ret = vhost_dev_check_owner(&vsock->dev);
468 if (ret)
469 goto err;
471 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
472 struct vhost_virtqueue *vq = &vsock->vqs[i];
474 mutex_lock(&vq->mutex);
475 vq->private_data = NULL;
476 mutex_unlock(&vq->mutex);
479 err:
480 mutex_unlock(&vsock->dev.mutex);
481 return ret;
484 static void vhost_vsock_free(struct vhost_vsock *vsock)
486 kvfree(vsock);
489 static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
491 struct vhost_virtqueue **vqs;
492 struct vhost_vsock *vsock;
493 int ret;
495 /* This struct is large and allocation could fail, fall back to vmalloc
496 * if there is no other way.
498 vsock = kzalloc(sizeof(*vsock), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
499 if (!vsock) {
500 vsock = vmalloc(sizeof(*vsock));
501 if (!vsock)
502 return -ENOMEM;
505 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
506 if (!vqs) {
507 ret = -ENOMEM;
508 goto out;
511 atomic_set(&vsock->queued_replies, 0);
513 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
514 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
515 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
516 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
518 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
520 file->private_data = vsock;
521 spin_lock_init(&vsock->send_pkt_list_lock);
522 INIT_LIST_HEAD(&vsock->send_pkt_list);
523 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
525 spin_lock_bh(&vhost_vsock_lock);
526 list_add_tail(&vsock->list, &vhost_vsock_list);
527 spin_unlock_bh(&vhost_vsock_lock);
528 return 0;
530 out:
531 vhost_vsock_free(vsock);
532 return ret;
535 static void vhost_vsock_flush(struct vhost_vsock *vsock)
537 int i;
539 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
540 if (vsock->vqs[i].handle_kick)
541 vhost_poll_flush(&vsock->vqs[i].poll);
542 vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
545 static void vhost_vsock_reset_orphans(struct sock *sk)
547 struct vsock_sock *vsk = vsock_sk(sk);
549 /* vmci_transport.c doesn't take sk_lock here either. At least we're
550 * under vsock_table_lock so the sock cannot disappear while we're
551 * executing.
554 if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
555 sock_set_flag(sk, SOCK_DONE);
556 vsk->peer_shutdown = SHUTDOWN_MASK;
557 sk->sk_state = SS_UNCONNECTED;
558 sk->sk_err = ECONNRESET;
559 sk->sk_error_report(sk);
563 static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
565 struct vhost_vsock *vsock = file->private_data;
567 spin_lock_bh(&vhost_vsock_lock);
568 list_del(&vsock->list);
569 spin_unlock_bh(&vhost_vsock_lock);
571 /* Iterating over all connections for all CIDs to find orphans is
572 * inefficient. Room for improvement here. */
573 vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
575 vhost_vsock_stop(vsock);
576 vhost_vsock_flush(vsock);
577 vhost_dev_stop(&vsock->dev);
579 spin_lock_bh(&vsock->send_pkt_list_lock);
580 while (!list_empty(&vsock->send_pkt_list)) {
581 struct virtio_vsock_pkt *pkt;
583 pkt = list_first_entry(&vsock->send_pkt_list,
584 struct virtio_vsock_pkt, list);
585 list_del_init(&pkt->list);
586 virtio_transport_free_pkt(pkt);
588 spin_unlock_bh(&vsock->send_pkt_list_lock);
590 vhost_dev_cleanup(&vsock->dev, false);
591 kfree(vsock->dev.vqs);
592 vhost_vsock_free(vsock);
593 return 0;
596 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
598 struct vhost_vsock *other;
600 /* Refuse reserved CIDs */
601 if (guest_cid <= VMADDR_CID_HOST ||
602 guest_cid == U32_MAX)
603 return -EINVAL;
605 /* 64-bit CIDs are not yet supported */
606 if (guest_cid > U32_MAX)
607 return -EINVAL;
609 /* Refuse if CID is already in use */
610 other = vhost_vsock_get(guest_cid);
611 if (other && other != vsock)
612 return -EADDRINUSE;
614 spin_lock_bh(&vhost_vsock_lock);
615 vsock->guest_cid = guest_cid;
616 spin_unlock_bh(&vhost_vsock_lock);
618 return 0;
621 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
623 struct vhost_virtqueue *vq;
624 int i;
626 if (features & ~VHOST_VSOCK_FEATURES)
627 return -EOPNOTSUPP;
629 mutex_lock(&vsock->dev.mutex);
630 if ((features & (1 << VHOST_F_LOG_ALL)) &&
631 !vhost_log_access_ok(&vsock->dev)) {
632 mutex_unlock(&vsock->dev.mutex);
633 return -EFAULT;
636 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
637 vq = &vsock->vqs[i];
638 mutex_lock(&vq->mutex);
639 vq->acked_features = features;
640 mutex_unlock(&vq->mutex);
642 mutex_unlock(&vsock->dev.mutex);
643 return 0;
646 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
647 unsigned long arg)
649 struct vhost_vsock *vsock = f->private_data;
650 void __user *argp = (void __user *)arg;
651 u64 guest_cid;
652 u64 features;
653 int start;
654 int r;
656 switch (ioctl) {
657 case VHOST_VSOCK_SET_GUEST_CID:
658 if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
659 return -EFAULT;
660 return vhost_vsock_set_cid(vsock, guest_cid);
661 case VHOST_VSOCK_SET_RUNNING:
662 if (copy_from_user(&start, argp, sizeof(start)))
663 return -EFAULT;
664 if (start)
665 return vhost_vsock_start(vsock);
666 else
667 return vhost_vsock_stop(vsock);
668 case VHOST_GET_FEATURES:
669 features = VHOST_VSOCK_FEATURES;
670 if (copy_to_user(argp, &features, sizeof(features)))
671 return -EFAULT;
672 return 0;
673 case VHOST_SET_FEATURES:
674 if (copy_from_user(&features, argp, sizeof(features)))
675 return -EFAULT;
676 return vhost_vsock_set_features(vsock, features);
677 default:
678 mutex_lock(&vsock->dev.mutex);
679 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
680 if (r == -ENOIOCTLCMD)
681 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
682 else
683 vhost_vsock_flush(vsock);
684 mutex_unlock(&vsock->dev.mutex);
685 return r;
689 static const struct file_operations vhost_vsock_fops = {
690 .owner = THIS_MODULE,
691 .open = vhost_vsock_dev_open,
692 .release = vhost_vsock_dev_release,
693 .llseek = noop_llseek,
694 .unlocked_ioctl = vhost_vsock_dev_ioctl,
697 static struct miscdevice vhost_vsock_misc = {
698 .minor = MISC_DYNAMIC_MINOR,
699 .name = "vhost-vsock",
700 .fops = &vhost_vsock_fops,
703 static struct virtio_transport vhost_transport = {
704 .transport = {
705 .get_local_cid = vhost_transport_get_local_cid,
707 .init = virtio_transport_do_socket_init,
708 .destruct = virtio_transport_destruct,
709 .release = virtio_transport_release,
710 .connect = virtio_transport_connect,
711 .shutdown = virtio_transport_shutdown,
712 .cancel_pkt = vhost_transport_cancel_pkt,
714 .dgram_enqueue = virtio_transport_dgram_enqueue,
715 .dgram_dequeue = virtio_transport_dgram_dequeue,
716 .dgram_bind = virtio_transport_dgram_bind,
717 .dgram_allow = virtio_transport_dgram_allow,
719 .stream_enqueue = virtio_transport_stream_enqueue,
720 .stream_dequeue = virtio_transport_stream_dequeue,
721 .stream_has_data = virtio_transport_stream_has_data,
722 .stream_has_space = virtio_transport_stream_has_space,
723 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
724 .stream_is_active = virtio_transport_stream_is_active,
725 .stream_allow = virtio_transport_stream_allow,
727 .notify_poll_in = virtio_transport_notify_poll_in,
728 .notify_poll_out = virtio_transport_notify_poll_out,
729 .notify_recv_init = virtio_transport_notify_recv_init,
730 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
731 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
732 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
733 .notify_send_init = virtio_transport_notify_send_init,
734 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
735 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
736 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
738 .set_buffer_size = virtio_transport_set_buffer_size,
739 .set_min_buffer_size = virtio_transport_set_min_buffer_size,
740 .set_max_buffer_size = virtio_transport_set_max_buffer_size,
741 .get_buffer_size = virtio_transport_get_buffer_size,
742 .get_min_buffer_size = virtio_transport_get_min_buffer_size,
743 .get_max_buffer_size = virtio_transport_get_max_buffer_size,
746 .send_pkt = vhost_transport_send_pkt,
749 static int __init vhost_vsock_init(void)
751 int ret;
753 ret = vsock_core_init(&vhost_transport.transport);
754 if (ret < 0)
755 return ret;
756 return misc_register(&vhost_vsock_misc);
759 static void __exit vhost_vsock_exit(void)
761 misc_deregister(&vhost_vsock_misc);
762 vsock_core_exit();
765 module_init(vhost_vsock_init);
766 module_exit(vhost_vsock_exit);
767 MODULE_LICENSE("GPL v2");
768 MODULE_AUTHOR("Asias He");
769 MODULE_DESCRIPTION("vhost transport for vsock ");