Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
[linux/fpc-iii.git] / drivers / vhost / vsock.c
blobc9de9c41aa9769863e8164ec90478292f52532ba
1 /*
2 * vhost transport for vsock
4 * Copyright (C) 2013-2015 Red Hat, Inc.
5 * Author: Asias He <asias@redhat.com>
6 * Stefan Hajnoczi <stefanha@redhat.com>
8 * This work is licensed under the terms of the GNU GPL, version 2.
9 */
10 #include <linux/miscdevice.h>
11 #include <linux/atomic.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/vmalloc.h>
15 #include <net/sock.h>
16 #include <linux/virtio_vsock.h>
17 #include <linux/vhost.h>
19 #include <net/af_vsock.h>
20 #include "vhost.h"
22 #define VHOST_VSOCK_DEFAULT_HOST_CID 2
24 enum {
25 VHOST_VSOCK_FEATURES = VHOST_FEATURES,
28 /* Used to track all the vhost_vsock instances on the system. */
29 static DEFINE_SPINLOCK(vhost_vsock_lock);
30 static LIST_HEAD(vhost_vsock_list);
32 struct vhost_vsock {
33 struct vhost_dev dev;
34 struct vhost_virtqueue vqs[2];
36 /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
37 struct list_head list;
39 struct vhost_work send_pkt_work;
40 spinlock_t send_pkt_list_lock;
41 struct list_head send_pkt_list; /* host->guest pending packets */
43 atomic_t queued_replies;
45 u32 guest_cid;
48 static u32 vhost_transport_get_local_cid(void)
50 return VHOST_VSOCK_DEFAULT_HOST_CID;
53 static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
55 struct vhost_vsock *vsock;
57 list_for_each_entry(vsock, &vhost_vsock_list, list) {
58 u32 other_cid = vsock->guest_cid;
60 /* Skip instances that have no CID yet */
61 if (other_cid == 0)
62 continue;
64 if (other_cid == guest_cid) {
65 return vsock;
69 return NULL;
72 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
74 struct vhost_vsock *vsock;
76 spin_lock_bh(&vhost_vsock_lock);
77 vsock = __vhost_vsock_get(guest_cid);
78 spin_unlock_bh(&vhost_vsock_lock);
80 return vsock;
83 static void
84 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
85 struct vhost_virtqueue *vq)
87 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
88 bool added = false;
89 bool restart_tx = false;
91 mutex_lock(&vq->mutex);
93 if (!vq->private_data)
94 goto out;
96 /* Avoid further vmexits, we're already processing the virtqueue */
97 vhost_disable_notify(&vsock->dev, vq);
99 for (;;) {
100 struct virtio_vsock_pkt *pkt;
101 struct iov_iter iov_iter;
102 unsigned out, in;
103 size_t nbytes;
104 size_t len;
105 int head;
107 spin_lock_bh(&vsock->send_pkt_list_lock);
108 if (list_empty(&vsock->send_pkt_list)) {
109 spin_unlock_bh(&vsock->send_pkt_list_lock);
110 vhost_enable_notify(&vsock->dev, vq);
111 break;
114 pkt = list_first_entry(&vsock->send_pkt_list,
115 struct virtio_vsock_pkt, list);
116 list_del_init(&pkt->list);
117 spin_unlock_bh(&vsock->send_pkt_list_lock);
119 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
120 &out, &in, NULL, NULL);
121 if (head < 0) {
122 spin_lock_bh(&vsock->send_pkt_list_lock);
123 list_add(&pkt->list, &vsock->send_pkt_list);
124 spin_unlock_bh(&vsock->send_pkt_list_lock);
125 break;
128 if (head == vq->num) {
129 spin_lock_bh(&vsock->send_pkt_list_lock);
130 list_add(&pkt->list, &vsock->send_pkt_list);
131 spin_unlock_bh(&vsock->send_pkt_list_lock);
133 /* We cannot finish yet if more buffers snuck in while
134 * re-enabling notify.
136 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
137 vhost_disable_notify(&vsock->dev, vq);
138 continue;
140 break;
143 if (out) {
144 virtio_transport_free_pkt(pkt);
145 vq_err(vq, "Expected 0 output buffers, got %u\n", out);
146 break;
149 len = iov_length(&vq->iov[out], in);
150 iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
152 nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
153 if (nbytes != sizeof(pkt->hdr)) {
154 virtio_transport_free_pkt(pkt);
155 vq_err(vq, "Faulted on copying pkt hdr\n");
156 break;
159 nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
160 if (nbytes != pkt->len) {
161 virtio_transport_free_pkt(pkt);
162 vq_err(vq, "Faulted on copying pkt buf\n");
163 break;
166 vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
167 added = true;
169 if (pkt->reply) {
170 int val;
172 val = atomic_dec_return(&vsock->queued_replies);
174 /* Do we have resources to resume tx processing? */
175 if (val + 1 == tx_vq->num)
176 restart_tx = true;
179 /* Deliver to monitoring devices all correctly transmitted
180 * packets.
182 virtio_transport_deliver_tap_pkt(pkt);
184 virtio_transport_free_pkt(pkt);
186 if (added)
187 vhost_signal(&vsock->dev, vq);
189 out:
190 mutex_unlock(&vq->mutex);
192 if (restart_tx)
193 vhost_poll_queue(&tx_vq->poll);
196 static void vhost_transport_send_pkt_work(struct vhost_work *work)
198 struct vhost_virtqueue *vq;
199 struct vhost_vsock *vsock;
201 vsock = container_of(work, struct vhost_vsock, send_pkt_work);
202 vq = &vsock->vqs[VSOCK_VQ_RX];
204 vhost_transport_do_send_pkt(vsock, vq);
207 static int
208 vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
210 struct vhost_vsock *vsock;
211 int len = pkt->len;
213 /* Find the vhost_vsock according to guest context id */
214 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
215 if (!vsock) {
216 virtio_transport_free_pkt(pkt);
217 return -ENODEV;
220 if (pkt->reply)
221 atomic_inc(&vsock->queued_replies);
223 spin_lock_bh(&vsock->send_pkt_list_lock);
224 list_add_tail(&pkt->list, &vsock->send_pkt_list);
225 spin_unlock_bh(&vsock->send_pkt_list_lock);
227 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
228 return len;
231 static int
232 vhost_transport_cancel_pkt(struct vsock_sock *vsk)
234 struct vhost_vsock *vsock;
235 struct virtio_vsock_pkt *pkt, *n;
236 int cnt = 0;
237 LIST_HEAD(freeme);
239 /* Find the vhost_vsock according to guest context id */
240 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
241 if (!vsock)
242 return -ENODEV;
244 spin_lock_bh(&vsock->send_pkt_list_lock);
245 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
246 if (pkt->vsk != vsk)
247 continue;
248 list_move(&pkt->list, &freeme);
250 spin_unlock_bh(&vsock->send_pkt_list_lock);
252 list_for_each_entry_safe(pkt, n, &freeme, list) {
253 if (pkt->reply)
254 cnt++;
255 list_del(&pkt->list);
256 virtio_transport_free_pkt(pkt);
259 if (cnt) {
260 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
261 int new_cnt;
263 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
264 if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
265 vhost_poll_queue(&tx_vq->poll);
268 return 0;
271 static struct virtio_vsock_pkt *
272 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
273 unsigned int out, unsigned int in)
275 struct virtio_vsock_pkt *pkt;
276 struct iov_iter iov_iter;
277 size_t nbytes;
278 size_t len;
280 if (in != 0) {
281 vq_err(vq, "Expected 0 input buffers, got %u\n", in);
282 return NULL;
285 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
286 if (!pkt)
287 return NULL;
289 len = iov_length(vq->iov, out);
290 iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
292 nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
293 if (nbytes != sizeof(pkt->hdr)) {
294 vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
295 sizeof(pkt->hdr), nbytes);
296 kfree(pkt);
297 return NULL;
300 if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
301 pkt->len = le32_to_cpu(pkt->hdr.len);
303 /* No payload */
304 if (!pkt->len)
305 return pkt;
307 /* The pkt is too big */
308 if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
309 kfree(pkt);
310 return NULL;
313 pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
314 if (!pkt->buf) {
315 kfree(pkt);
316 return NULL;
319 nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
320 if (nbytes != pkt->len) {
321 vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
322 pkt->len, nbytes);
323 virtio_transport_free_pkt(pkt);
324 return NULL;
327 return pkt;
330 /* Is there space left for replies to rx packets? */
331 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
333 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
334 int val;
336 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
337 val = atomic_read(&vsock->queued_replies);
339 return val < vq->num;
342 static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
344 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
345 poll.work);
346 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
347 dev);
348 struct virtio_vsock_pkt *pkt;
349 int head;
350 unsigned int out, in;
351 bool added = false;
353 mutex_lock(&vq->mutex);
355 if (!vq->private_data)
356 goto out;
358 vhost_disable_notify(&vsock->dev, vq);
359 for (;;) {
360 u32 len;
362 if (!vhost_vsock_more_replies(vsock)) {
363 /* Stop tx until the device processes already
364 * pending replies. Leave tx virtqueue
365 * callbacks disabled.
367 goto no_more_replies;
370 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
371 &out, &in, NULL, NULL);
372 if (head < 0)
373 break;
375 if (head == vq->num) {
376 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
377 vhost_disable_notify(&vsock->dev, vq);
378 continue;
380 break;
383 pkt = vhost_vsock_alloc_pkt(vq, out, in);
384 if (!pkt) {
385 vq_err(vq, "Faulted on pkt\n");
386 continue;
389 len = pkt->len;
391 /* Deliver to monitoring devices all received packets */
392 virtio_transport_deliver_tap_pkt(pkt);
394 /* Only accept correctly addressed packets */
395 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
396 virtio_transport_recv_pkt(pkt);
397 else
398 virtio_transport_free_pkt(pkt);
400 vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
401 added = true;
404 no_more_replies:
405 if (added)
406 vhost_signal(&vsock->dev, vq);
408 out:
409 mutex_unlock(&vq->mutex);
412 static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
414 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
415 poll.work);
416 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
417 dev);
419 vhost_transport_do_send_pkt(vsock, vq);
422 static int vhost_vsock_start(struct vhost_vsock *vsock)
424 struct vhost_virtqueue *vq;
425 size_t i;
426 int ret;
428 mutex_lock(&vsock->dev.mutex);
430 ret = vhost_dev_check_owner(&vsock->dev);
431 if (ret)
432 goto err;
434 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
435 vq = &vsock->vqs[i];
437 mutex_lock(&vq->mutex);
439 if (!vhost_vq_access_ok(vq)) {
440 ret = -EFAULT;
441 goto err_vq;
444 if (!vq->private_data) {
445 vq->private_data = vsock;
446 ret = vhost_vq_init_access(vq);
447 if (ret)
448 goto err_vq;
451 mutex_unlock(&vq->mutex);
454 mutex_unlock(&vsock->dev.mutex);
455 return 0;
457 err_vq:
458 vq->private_data = NULL;
459 mutex_unlock(&vq->mutex);
461 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
462 vq = &vsock->vqs[i];
464 mutex_lock(&vq->mutex);
465 vq->private_data = NULL;
466 mutex_unlock(&vq->mutex);
468 err:
469 mutex_unlock(&vsock->dev.mutex);
470 return ret;
473 static int vhost_vsock_stop(struct vhost_vsock *vsock)
475 size_t i;
476 int ret;
478 mutex_lock(&vsock->dev.mutex);
480 ret = vhost_dev_check_owner(&vsock->dev);
481 if (ret)
482 goto err;
484 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
485 struct vhost_virtqueue *vq = &vsock->vqs[i];
487 mutex_lock(&vq->mutex);
488 vq->private_data = NULL;
489 mutex_unlock(&vq->mutex);
492 err:
493 mutex_unlock(&vsock->dev.mutex);
494 return ret;
497 static void vhost_vsock_free(struct vhost_vsock *vsock)
499 kvfree(vsock);
502 static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
504 struct vhost_virtqueue **vqs;
505 struct vhost_vsock *vsock;
506 int ret;
508 /* This struct is large and allocation could fail, fall back to vmalloc
509 * if there is no other way.
511 vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
512 if (!vsock)
513 return -ENOMEM;
515 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
516 if (!vqs) {
517 ret = -ENOMEM;
518 goto out;
521 atomic_set(&vsock->queued_replies, 0);
523 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
524 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
525 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
526 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
528 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
530 file->private_data = vsock;
531 spin_lock_init(&vsock->send_pkt_list_lock);
532 INIT_LIST_HEAD(&vsock->send_pkt_list);
533 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
535 spin_lock_bh(&vhost_vsock_lock);
536 list_add_tail(&vsock->list, &vhost_vsock_list);
537 spin_unlock_bh(&vhost_vsock_lock);
538 return 0;
540 out:
541 vhost_vsock_free(vsock);
542 return ret;
545 static void vhost_vsock_flush(struct vhost_vsock *vsock)
547 int i;
549 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
550 if (vsock->vqs[i].handle_kick)
551 vhost_poll_flush(&vsock->vqs[i].poll);
552 vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
555 static void vhost_vsock_reset_orphans(struct sock *sk)
557 struct vsock_sock *vsk = vsock_sk(sk);
559 /* vmci_transport.c doesn't take sk_lock here either. At least we're
560 * under vsock_table_lock so the sock cannot disappear while we're
561 * executing.
564 if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
565 sock_set_flag(sk, SOCK_DONE);
566 vsk->peer_shutdown = SHUTDOWN_MASK;
567 sk->sk_state = SS_UNCONNECTED;
568 sk->sk_err = ECONNRESET;
569 sk->sk_error_report(sk);
573 static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
575 struct vhost_vsock *vsock = file->private_data;
577 spin_lock_bh(&vhost_vsock_lock);
578 list_del(&vsock->list);
579 spin_unlock_bh(&vhost_vsock_lock);
581 /* Iterating over all connections for all CIDs to find orphans is
582 * inefficient. Room for improvement here. */
583 vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
585 vhost_vsock_stop(vsock);
586 vhost_vsock_flush(vsock);
587 vhost_dev_stop(&vsock->dev);
589 spin_lock_bh(&vsock->send_pkt_list_lock);
590 while (!list_empty(&vsock->send_pkt_list)) {
591 struct virtio_vsock_pkt *pkt;
593 pkt = list_first_entry(&vsock->send_pkt_list,
594 struct virtio_vsock_pkt, list);
595 list_del_init(&pkt->list);
596 virtio_transport_free_pkt(pkt);
598 spin_unlock_bh(&vsock->send_pkt_list_lock);
600 vhost_dev_cleanup(&vsock->dev, false);
601 kfree(vsock->dev.vqs);
602 vhost_vsock_free(vsock);
603 return 0;
606 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
608 struct vhost_vsock *other;
610 /* Refuse reserved CIDs */
611 if (guest_cid <= VMADDR_CID_HOST ||
612 guest_cid == U32_MAX)
613 return -EINVAL;
615 /* 64-bit CIDs are not yet supported */
616 if (guest_cid > U32_MAX)
617 return -EINVAL;
619 /* Refuse if CID is already in use */
620 spin_lock_bh(&vhost_vsock_lock);
621 other = __vhost_vsock_get(guest_cid);
622 if (other && other != vsock) {
623 spin_unlock_bh(&vhost_vsock_lock);
624 return -EADDRINUSE;
626 vsock->guest_cid = guest_cid;
627 spin_unlock_bh(&vhost_vsock_lock);
629 return 0;
632 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
634 struct vhost_virtqueue *vq;
635 int i;
637 if (features & ~VHOST_VSOCK_FEATURES)
638 return -EOPNOTSUPP;
640 mutex_lock(&vsock->dev.mutex);
641 if ((features & (1 << VHOST_F_LOG_ALL)) &&
642 !vhost_log_access_ok(&vsock->dev)) {
643 mutex_unlock(&vsock->dev.mutex);
644 return -EFAULT;
647 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
648 vq = &vsock->vqs[i];
649 mutex_lock(&vq->mutex);
650 vq->acked_features = features;
651 mutex_unlock(&vq->mutex);
653 mutex_unlock(&vsock->dev.mutex);
654 return 0;
657 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
658 unsigned long arg)
660 struct vhost_vsock *vsock = f->private_data;
661 void __user *argp = (void __user *)arg;
662 u64 guest_cid;
663 u64 features;
664 int start;
665 int r;
667 switch (ioctl) {
668 case VHOST_VSOCK_SET_GUEST_CID:
669 if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
670 return -EFAULT;
671 return vhost_vsock_set_cid(vsock, guest_cid);
672 case VHOST_VSOCK_SET_RUNNING:
673 if (copy_from_user(&start, argp, sizeof(start)))
674 return -EFAULT;
675 if (start)
676 return vhost_vsock_start(vsock);
677 else
678 return vhost_vsock_stop(vsock);
679 case VHOST_GET_FEATURES:
680 features = VHOST_VSOCK_FEATURES;
681 if (copy_to_user(argp, &features, sizeof(features)))
682 return -EFAULT;
683 return 0;
684 case VHOST_SET_FEATURES:
685 if (copy_from_user(&features, argp, sizeof(features)))
686 return -EFAULT;
687 return vhost_vsock_set_features(vsock, features);
688 default:
689 mutex_lock(&vsock->dev.mutex);
690 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
691 if (r == -ENOIOCTLCMD)
692 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
693 else
694 vhost_vsock_flush(vsock);
695 mutex_unlock(&vsock->dev.mutex);
696 return r;
700 static const struct file_operations vhost_vsock_fops = {
701 .owner = THIS_MODULE,
702 .open = vhost_vsock_dev_open,
703 .release = vhost_vsock_dev_release,
704 .llseek = noop_llseek,
705 .unlocked_ioctl = vhost_vsock_dev_ioctl,
708 static struct miscdevice vhost_vsock_misc = {
709 .minor = VHOST_VSOCK_MINOR,
710 .name = "vhost-vsock",
711 .fops = &vhost_vsock_fops,
714 static struct virtio_transport vhost_transport = {
715 .transport = {
716 .get_local_cid = vhost_transport_get_local_cid,
718 .init = virtio_transport_do_socket_init,
719 .destruct = virtio_transport_destruct,
720 .release = virtio_transport_release,
721 .connect = virtio_transport_connect,
722 .shutdown = virtio_transport_shutdown,
723 .cancel_pkt = vhost_transport_cancel_pkt,
725 .dgram_enqueue = virtio_transport_dgram_enqueue,
726 .dgram_dequeue = virtio_transport_dgram_dequeue,
727 .dgram_bind = virtio_transport_dgram_bind,
728 .dgram_allow = virtio_transport_dgram_allow,
730 .stream_enqueue = virtio_transport_stream_enqueue,
731 .stream_dequeue = virtio_transport_stream_dequeue,
732 .stream_has_data = virtio_transport_stream_has_data,
733 .stream_has_space = virtio_transport_stream_has_space,
734 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
735 .stream_is_active = virtio_transport_stream_is_active,
736 .stream_allow = virtio_transport_stream_allow,
738 .notify_poll_in = virtio_transport_notify_poll_in,
739 .notify_poll_out = virtio_transport_notify_poll_out,
740 .notify_recv_init = virtio_transport_notify_recv_init,
741 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
742 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
743 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
744 .notify_send_init = virtio_transport_notify_send_init,
745 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
746 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
747 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
749 .set_buffer_size = virtio_transport_set_buffer_size,
750 .set_min_buffer_size = virtio_transport_set_min_buffer_size,
751 .set_max_buffer_size = virtio_transport_set_max_buffer_size,
752 .get_buffer_size = virtio_transport_get_buffer_size,
753 .get_min_buffer_size = virtio_transport_get_min_buffer_size,
754 .get_max_buffer_size = virtio_transport_get_max_buffer_size,
757 .send_pkt = vhost_transport_send_pkt,
760 static int __init vhost_vsock_init(void)
762 int ret;
764 ret = vsock_core_init(&vhost_transport.transport);
765 if (ret < 0)
766 return ret;
767 return misc_register(&vhost_vsock_misc);
770 static void __exit vhost_vsock_exit(void)
772 misc_deregister(&vhost_vsock_misc);
773 vsock_core_exit();
776 module_init(vhost_vsock_init);
777 module_exit(vhost_vsock_exit);
778 MODULE_LICENSE("GPL v2");
779 MODULE_AUTHOR("Asias He");
780 MODULE_DESCRIPTION("vhost transport for vsock ");
781 MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR);
782 MODULE_ALIAS("devname:vhost-vsock");