2 * vhost transport for vsock
4 * Copyright (C) 2013-2015 Red Hat, Inc.
5 * Author: Asias He <asias@redhat.com>
6 * Stefan Hajnoczi <stefanha@redhat.com>
8 * This work is licensed under the terms of the GNU GPL, version 2.
10 #include <linux/miscdevice.h>
11 #include <linux/atomic.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/vmalloc.h>
16 #include <linux/virtio_vsock.h>
17 #include <linux/vhost.h>
18 #include <linux/hashtable.h>
20 #include <net/af_vsock.h>
23 #define VHOST_VSOCK_DEFAULT_HOST_CID 2
24 /* Max number of bytes transferred before requeueing the job.
25 * Using this limit prevents one virtqueue from starving others. */
26 #define VHOST_VSOCK_WEIGHT 0x80000
27 /* Max number of packets transferred before requeueing the job.
28 * Using this limit prevents one virtqueue from starving others with
31 #define VHOST_VSOCK_PKT_WEIGHT 256
34 VHOST_VSOCK_FEATURES
= VHOST_FEATURES
,
37 /* Used to track all the vhost_vsock instances on the system. */
38 static DEFINE_SPINLOCK(vhost_vsock_lock
);
39 static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash
, 8);
43 struct vhost_virtqueue vqs
[2];
45 /* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
46 struct hlist_node hash
;
48 struct vhost_work send_pkt_work
;
49 spinlock_t send_pkt_list_lock
;
50 struct list_head send_pkt_list
; /* host->guest pending packets */
52 atomic_t queued_replies
;
57 static u32
vhost_transport_get_local_cid(void)
59 return VHOST_VSOCK_DEFAULT_HOST_CID
;
62 /* Callers that dereference the return value must hold vhost_vsock_lock or the
65 static struct vhost_vsock
*vhost_vsock_get(u32 guest_cid
)
67 struct vhost_vsock
*vsock
;
69 hash_for_each_possible_rcu(vhost_vsock_hash
, vsock
, hash
, guest_cid
) {
70 u32 other_cid
= vsock
->guest_cid
;
72 /* Skip instances that have no CID yet */
76 if (other_cid
== guest_cid
)
85 vhost_transport_do_send_pkt(struct vhost_vsock
*vsock
,
86 struct vhost_virtqueue
*vq
)
88 struct vhost_virtqueue
*tx_vq
= &vsock
->vqs
[VSOCK_VQ_TX
];
89 int pkts
= 0, total_len
= 0;
91 bool restart_tx
= false;
93 mutex_lock(&vq
->mutex
);
95 if (!vq
->private_data
)
98 /* Avoid further vmexits, we're already processing the virtqueue */
99 vhost_disable_notify(&vsock
->dev
, vq
);
102 struct virtio_vsock_pkt
*pkt
;
103 struct iov_iter iov_iter
;
106 size_t iov_len
, payload_len
;
109 spin_lock_bh(&vsock
->send_pkt_list_lock
);
110 if (list_empty(&vsock
->send_pkt_list
)) {
111 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
112 vhost_enable_notify(&vsock
->dev
, vq
);
116 pkt
= list_first_entry(&vsock
->send_pkt_list
,
117 struct virtio_vsock_pkt
, list
);
118 list_del_init(&pkt
->list
);
119 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
121 head
= vhost_get_vq_desc(vq
, vq
->iov
, ARRAY_SIZE(vq
->iov
),
122 &out
, &in
, NULL
, NULL
);
124 spin_lock_bh(&vsock
->send_pkt_list_lock
);
125 list_add(&pkt
->list
, &vsock
->send_pkt_list
);
126 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
130 if (head
== vq
->num
) {
131 spin_lock_bh(&vsock
->send_pkt_list_lock
);
132 list_add(&pkt
->list
, &vsock
->send_pkt_list
);
133 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
135 /* We cannot finish yet if more buffers snuck in while
136 * re-enabling notify.
138 if (unlikely(vhost_enable_notify(&vsock
->dev
, vq
))) {
139 vhost_disable_notify(&vsock
->dev
, vq
);
146 virtio_transport_free_pkt(pkt
);
147 vq_err(vq
, "Expected 0 output buffers, got %u\n", out
);
151 iov_len
= iov_length(&vq
->iov
[out
], in
);
152 if (iov_len
< sizeof(pkt
->hdr
)) {
153 virtio_transport_free_pkt(pkt
);
154 vq_err(vq
, "Buffer len [%zu] too small\n", iov_len
);
158 iov_iter_init(&iov_iter
, READ
, &vq
->iov
[out
], in
, iov_len
);
159 payload_len
= pkt
->len
- pkt
->off
;
161 /* If the packet is greater than the space available in the
162 * buffer, we split it using multiple buffers.
164 if (payload_len
> iov_len
- sizeof(pkt
->hdr
))
165 payload_len
= iov_len
- sizeof(pkt
->hdr
);
167 /* Set the correct length in the header */
168 pkt
->hdr
.len
= cpu_to_le32(payload_len
);
170 nbytes
= copy_to_iter(&pkt
->hdr
, sizeof(pkt
->hdr
), &iov_iter
);
171 if (nbytes
!= sizeof(pkt
->hdr
)) {
172 virtio_transport_free_pkt(pkt
);
173 vq_err(vq
, "Faulted on copying pkt hdr\n");
177 nbytes
= copy_to_iter(pkt
->buf
+ pkt
->off
, payload_len
,
179 if (nbytes
!= payload_len
) {
180 virtio_transport_free_pkt(pkt
);
181 vq_err(vq
, "Faulted on copying pkt buf\n");
185 /* Deliver to monitoring devices all packets that we
188 virtio_transport_deliver_tap_pkt(pkt
);
190 vhost_add_used(vq
, head
, sizeof(pkt
->hdr
) + payload_len
);
193 pkt
->off
+= payload_len
;
194 total_len
+= payload_len
;
196 /* If we didn't send all the payload we can requeue the packet
197 * to send it with the next available buffer.
199 if (pkt
->off
< pkt
->len
) {
200 spin_lock_bh(&vsock
->send_pkt_list_lock
);
201 list_add(&pkt
->list
, &vsock
->send_pkt_list
);
202 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
207 val
= atomic_dec_return(&vsock
->queued_replies
);
209 /* Do we have resources to resume tx
212 if (val
+ 1 == tx_vq
->num
)
216 virtio_transport_free_pkt(pkt
);
218 } while(likely(!vhost_exceeds_weight(vq
, ++pkts
, total_len
)));
220 vhost_signal(&vsock
->dev
, vq
);
223 mutex_unlock(&vq
->mutex
);
226 vhost_poll_queue(&tx_vq
->poll
);
229 static void vhost_transport_send_pkt_work(struct vhost_work
*work
)
231 struct vhost_virtqueue
*vq
;
232 struct vhost_vsock
*vsock
;
234 vsock
= container_of(work
, struct vhost_vsock
, send_pkt_work
);
235 vq
= &vsock
->vqs
[VSOCK_VQ_RX
];
237 vhost_transport_do_send_pkt(vsock
, vq
);
241 vhost_transport_send_pkt(struct virtio_vsock_pkt
*pkt
)
243 struct vhost_vsock
*vsock
;
248 /* Find the vhost_vsock according to guest context id */
249 vsock
= vhost_vsock_get(le64_to_cpu(pkt
->hdr
.dst_cid
));
252 virtio_transport_free_pkt(pkt
);
257 atomic_inc(&vsock
->queued_replies
);
259 spin_lock_bh(&vsock
->send_pkt_list_lock
);
260 list_add_tail(&pkt
->list
, &vsock
->send_pkt_list
);
261 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
263 vhost_work_queue(&vsock
->dev
, &vsock
->send_pkt_work
);
270 vhost_transport_cancel_pkt(struct vsock_sock
*vsk
)
272 struct vhost_vsock
*vsock
;
273 struct virtio_vsock_pkt
*pkt
, *n
;
280 /* Find the vhost_vsock according to guest context id */
281 vsock
= vhost_vsock_get(vsk
->remote_addr
.svm_cid
);
285 spin_lock_bh(&vsock
->send_pkt_list_lock
);
286 list_for_each_entry_safe(pkt
, n
, &vsock
->send_pkt_list
, list
) {
289 list_move(&pkt
->list
, &freeme
);
291 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
293 list_for_each_entry_safe(pkt
, n
, &freeme
, list
) {
296 list_del(&pkt
->list
);
297 virtio_transport_free_pkt(pkt
);
301 struct vhost_virtqueue
*tx_vq
= &vsock
->vqs
[VSOCK_VQ_TX
];
304 new_cnt
= atomic_sub_return(cnt
, &vsock
->queued_replies
);
305 if (new_cnt
+ cnt
>= tx_vq
->num
&& new_cnt
< tx_vq
->num
)
306 vhost_poll_queue(&tx_vq
->poll
);
315 static struct virtio_vsock_pkt
*
316 vhost_vsock_alloc_pkt(struct vhost_virtqueue
*vq
,
317 unsigned int out
, unsigned int in
)
319 struct virtio_vsock_pkt
*pkt
;
320 struct iov_iter iov_iter
;
325 vq_err(vq
, "Expected 0 input buffers, got %u\n", in
);
329 pkt
= kzalloc(sizeof(*pkt
), GFP_KERNEL
);
333 len
= iov_length(vq
->iov
, out
);
334 iov_iter_init(&iov_iter
, WRITE
, vq
->iov
, out
, len
);
336 nbytes
= copy_from_iter(&pkt
->hdr
, sizeof(pkt
->hdr
), &iov_iter
);
337 if (nbytes
!= sizeof(pkt
->hdr
)) {
338 vq_err(vq
, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
339 sizeof(pkt
->hdr
), nbytes
);
344 if (le16_to_cpu(pkt
->hdr
.type
) == VIRTIO_VSOCK_TYPE_STREAM
)
345 pkt
->len
= le32_to_cpu(pkt
->hdr
.len
);
351 /* The pkt is too big */
352 if (pkt
->len
> VIRTIO_VSOCK_MAX_PKT_BUF_SIZE
) {
357 pkt
->buf
= kmalloc(pkt
->len
, GFP_KERNEL
);
363 nbytes
= copy_from_iter(pkt
->buf
, pkt
->len
, &iov_iter
);
364 if (nbytes
!= pkt
->len
) {
365 vq_err(vq
, "Expected %u byte payload, got %zu bytes\n",
367 virtio_transport_free_pkt(pkt
);
374 /* Is there space left for replies to rx packets? */
375 static bool vhost_vsock_more_replies(struct vhost_vsock
*vsock
)
377 struct vhost_virtqueue
*vq
= &vsock
->vqs
[VSOCK_VQ_TX
];
380 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
381 val
= atomic_read(&vsock
->queued_replies
);
383 return val
< vq
->num
;
386 static void vhost_vsock_handle_tx_kick(struct vhost_work
*work
)
388 struct vhost_virtqueue
*vq
= container_of(work
, struct vhost_virtqueue
,
390 struct vhost_vsock
*vsock
= container_of(vq
->dev
, struct vhost_vsock
,
392 struct virtio_vsock_pkt
*pkt
;
393 int head
, pkts
= 0, total_len
= 0;
394 unsigned int out
, in
;
397 mutex_lock(&vq
->mutex
);
399 if (!vq
->private_data
)
402 vhost_disable_notify(&vsock
->dev
, vq
);
406 if (!vhost_vsock_more_replies(vsock
)) {
407 /* Stop tx until the device processes already
408 * pending replies. Leave tx virtqueue
409 * callbacks disabled.
411 goto no_more_replies
;
414 head
= vhost_get_vq_desc(vq
, vq
->iov
, ARRAY_SIZE(vq
->iov
),
415 &out
, &in
, NULL
, NULL
);
419 if (head
== vq
->num
) {
420 if (unlikely(vhost_enable_notify(&vsock
->dev
, vq
))) {
421 vhost_disable_notify(&vsock
->dev
, vq
);
427 pkt
= vhost_vsock_alloc_pkt(vq
, out
, in
);
429 vq_err(vq
, "Faulted on pkt\n");
435 /* Deliver to monitoring devices all received packets */
436 virtio_transport_deliver_tap_pkt(pkt
);
438 /* Only accept correctly addressed packets */
439 if (le64_to_cpu(pkt
->hdr
.src_cid
) == vsock
->guest_cid
&&
440 le64_to_cpu(pkt
->hdr
.dst_cid
) ==
441 vhost_transport_get_local_cid())
442 virtio_transport_recv_pkt(pkt
);
444 virtio_transport_free_pkt(pkt
);
446 len
+= sizeof(pkt
->hdr
);
447 vhost_add_used(vq
, head
, len
);
450 } while(likely(!vhost_exceeds_weight(vq
, ++pkts
, total_len
)));
454 vhost_signal(&vsock
->dev
, vq
);
457 mutex_unlock(&vq
->mutex
);
460 static void vhost_vsock_handle_rx_kick(struct vhost_work
*work
)
462 struct vhost_virtqueue
*vq
= container_of(work
, struct vhost_virtqueue
,
464 struct vhost_vsock
*vsock
= container_of(vq
->dev
, struct vhost_vsock
,
467 vhost_transport_do_send_pkt(vsock
, vq
);
470 static int vhost_vsock_start(struct vhost_vsock
*vsock
)
472 struct vhost_virtqueue
*vq
;
476 mutex_lock(&vsock
->dev
.mutex
);
478 ret
= vhost_dev_check_owner(&vsock
->dev
);
482 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
485 mutex_lock(&vq
->mutex
);
487 if (!vhost_vq_access_ok(vq
)) {
492 if (!vq
->private_data
) {
493 vq
->private_data
= vsock
;
494 ret
= vhost_vq_init_access(vq
);
499 mutex_unlock(&vq
->mutex
);
502 /* Some packets may have been queued before the device was started,
503 * let's kick the send worker to send them.
505 vhost_work_queue(&vsock
->dev
, &vsock
->send_pkt_work
);
507 mutex_unlock(&vsock
->dev
.mutex
);
511 vq
->private_data
= NULL
;
512 mutex_unlock(&vq
->mutex
);
514 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
517 mutex_lock(&vq
->mutex
);
518 vq
->private_data
= NULL
;
519 mutex_unlock(&vq
->mutex
);
522 mutex_unlock(&vsock
->dev
.mutex
);
526 static int vhost_vsock_stop(struct vhost_vsock
*vsock
)
531 mutex_lock(&vsock
->dev
.mutex
);
533 ret
= vhost_dev_check_owner(&vsock
->dev
);
537 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
538 struct vhost_virtqueue
*vq
= &vsock
->vqs
[i
];
540 mutex_lock(&vq
->mutex
);
541 vq
->private_data
= NULL
;
542 mutex_unlock(&vq
->mutex
);
546 mutex_unlock(&vsock
->dev
.mutex
);
550 static void vhost_vsock_free(struct vhost_vsock
*vsock
)
555 static int vhost_vsock_dev_open(struct inode
*inode
, struct file
*file
)
557 struct vhost_virtqueue
**vqs
;
558 struct vhost_vsock
*vsock
;
561 /* This struct is large and allocation could fail, fall back to vmalloc
562 * if there is no other way.
564 vsock
= kvmalloc(sizeof(*vsock
), GFP_KERNEL
| __GFP_RETRY_MAYFAIL
);
568 vqs
= kmalloc_array(ARRAY_SIZE(vsock
->vqs
), sizeof(*vqs
), GFP_KERNEL
);
574 vsock
->guest_cid
= 0; /* no CID assigned yet */
576 atomic_set(&vsock
->queued_replies
, 0);
578 vqs
[VSOCK_VQ_TX
] = &vsock
->vqs
[VSOCK_VQ_TX
];
579 vqs
[VSOCK_VQ_RX
] = &vsock
->vqs
[VSOCK_VQ_RX
];
580 vsock
->vqs
[VSOCK_VQ_TX
].handle_kick
= vhost_vsock_handle_tx_kick
;
581 vsock
->vqs
[VSOCK_VQ_RX
].handle_kick
= vhost_vsock_handle_rx_kick
;
583 vhost_dev_init(&vsock
->dev
, vqs
, ARRAY_SIZE(vsock
->vqs
),
584 UIO_MAXIOV
, VHOST_VSOCK_PKT_WEIGHT
,
587 file
->private_data
= vsock
;
588 spin_lock_init(&vsock
->send_pkt_list_lock
);
589 INIT_LIST_HEAD(&vsock
->send_pkt_list
);
590 vhost_work_init(&vsock
->send_pkt_work
, vhost_transport_send_pkt_work
);
594 vhost_vsock_free(vsock
);
598 static void vhost_vsock_flush(struct vhost_vsock
*vsock
)
602 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++)
603 if (vsock
->vqs
[i
].handle_kick
)
604 vhost_poll_flush(&vsock
->vqs
[i
].poll
);
605 vhost_work_flush(&vsock
->dev
, &vsock
->send_pkt_work
);
608 static void vhost_vsock_reset_orphans(struct sock
*sk
)
610 struct vsock_sock
*vsk
= vsock_sk(sk
);
612 /* vmci_transport.c doesn't take sk_lock here either. At least we're
613 * under vsock_table_lock so the sock cannot disappear while we're
617 /* If the peer is still valid, no need to reset connection */
618 if (vhost_vsock_get(vsk
->remote_addr
.svm_cid
))
621 /* If the close timeout is pending, let it expire. This avoids races
622 * with the timeout callback.
624 if (vsk
->close_work_scheduled
)
627 sock_set_flag(sk
, SOCK_DONE
);
628 vsk
->peer_shutdown
= SHUTDOWN_MASK
;
629 sk
->sk_state
= SS_UNCONNECTED
;
630 sk
->sk_err
= ECONNRESET
;
631 sk
->sk_error_report(sk
);
634 static int vhost_vsock_dev_release(struct inode
*inode
, struct file
*file
)
636 struct vhost_vsock
*vsock
= file
->private_data
;
638 spin_lock_bh(&vhost_vsock_lock
);
639 if (vsock
->guest_cid
)
640 hash_del_rcu(&vsock
->hash
);
641 spin_unlock_bh(&vhost_vsock_lock
);
643 /* Wait for other CPUs to finish using vsock */
646 /* Iterating over all connections for all CIDs to find orphans is
647 * inefficient. Room for improvement here. */
648 vsock_for_each_connected_socket(vhost_vsock_reset_orphans
);
650 vhost_vsock_stop(vsock
);
651 vhost_vsock_flush(vsock
);
652 vhost_dev_stop(&vsock
->dev
);
654 spin_lock_bh(&vsock
->send_pkt_list_lock
);
655 while (!list_empty(&vsock
->send_pkt_list
)) {
656 struct virtio_vsock_pkt
*pkt
;
658 pkt
= list_first_entry(&vsock
->send_pkt_list
,
659 struct virtio_vsock_pkt
, list
);
660 list_del_init(&pkt
->list
);
661 virtio_transport_free_pkt(pkt
);
663 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
665 vhost_dev_cleanup(&vsock
->dev
);
666 kfree(vsock
->dev
.vqs
);
667 vhost_vsock_free(vsock
);
671 static int vhost_vsock_set_cid(struct vhost_vsock
*vsock
, u64 guest_cid
)
673 struct vhost_vsock
*other
;
675 /* Refuse reserved CIDs */
676 if (guest_cid
<= VMADDR_CID_HOST
||
677 guest_cid
== U32_MAX
)
680 /* 64-bit CIDs are not yet supported */
681 if (guest_cid
> U32_MAX
)
684 /* Refuse if CID is already in use */
685 spin_lock_bh(&vhost_vsock_lock
);
686 other
= vhost_vsock_get(guest_cid
);
687 if (other
&& other
!= vsock
) {
688 spin_unlock_bh(&vhost_vsock_lock
);
692 if (vsock
->guest_cid
)
693 hash_del_rcu(&vsock
->hash
);
695 vsock
->guest_cid
= guest_cid
;
696 hash_add_rcu(vhost_vsock_hash
, &vsock
->hash
, vsock
->guest_cid
);
697 spin_unlock_bh(&vhost_vsock_lock
);
702 static int vhost_vsock_set_features(struct vhost_vsock
*vsock
, u64 features
)
704 struct vhost_virtqueue
*vq
;
707 if (features
& ~VHOST_VSOCK_FEATURES
)
710 mutex_lock(&vsock
->dev
.mutex
);
711 if ((features
& (1 << VHOST_F_LOG_ALL
)) &&
712 !vhost_log_access_ok(&vsock
->dev
)) {
713 mutex_unlock(&vsock
->dev
.mutex
);
717 for (i
= 0; i
< ARRAY_SIZE(vsock
->vqs
); i
++) {
719 mutex_lock(&vq
->mutex
);
720 vq
->acked_features
= features
;
721 mutex_unlock(&vq
->mutex
);
723 mutex_unlock(&vsock
->dev
.mutex
);
727 static long vhost_vsock_dev_ioctl(struct file
*f
, unsigned int ioctl
,
730 struct vhost_vsock
*vsock
= f
->private_data
;
731 void __user
*argp
= (void __user
*)arg
;
738 case VHOST_VSOCK_SET_GUEST_CID
:
739 if (copy_from_user(&guest_cid
, argp
, sizeof(guest_cid
)))
741 return vhost_vsock_set_cid(vsock
, guest_cid
);
742 case VHOST_VSOCK_SET_RUNNING
:
743 if (copy_from_user(&start
, argp
, sizeof(start
)))
746 return vhost_vsock_start(vsock
);
748 return vhost_vsock_stop(vsock
);
749 case VHOST_GET_FEATURES
:
750 features
= VHOST_VSOCK_FEATURES
;
751 if (copy_to_user(argp
, &features
, sizeof(features
)))
754 case VHOST_SET_FEATURES
:
755 if (copy_from_user(&features
, argp
, sizeof(features
)))
757 return vhost_vsock_set_features(vsock
, features
);
759 mutex_lock(&vsock
->dev
.mutex
);
760 r
= vhost_dev_ioctl(&vsock
->dev
, ioctl
, argp
);
761 if (r
== -ENOIOCTLCMD
)
762 r
= vhost_vring_ioctl(&vsock
->dev
, ioctl
, argp
);
764 vhost_vsock_flush(vsock
);
765 mutex_unlock(&vsock
->dev
.mutex
);
771 static long vhost_vsock_dev_compat_ioctl(struct file
*f
, unsigned int ioctl
,
774 return vhost_vsock_dev_ioctl(f
, ioctl
, (unsigned long)compat_ptr(arg
));
778 static const struct file_operations vhost_vsock_fops
= {
779 .owner
= THIS_MODULE
,
780 .open
= vhost_vsock_dev_open
,
781 .release
= vhost_vsock_dev_release
,
782 .llseek
= noop_llseek
,
783 .unlocked_ioctl
= vhost_vsock_dev_ioctl
,
785 .compat_ioctl
= vhost_vsock_dev_compat_ioctl
,
789 static struct miscdevice vhost_vsock_misc
= {
790 .minor
= VHOST_VSOCK_MINOR
,
791 .name
= "vhost-vsock",
792 .fops
= &vhost_vsock_fops
,
795 static struct virtio_transport vhost_transport
= {
797 .get_local_cid
= vhost_transport_get_local_cid
,
799 .init
= virtio_transport_do_socket_init
,
800 .destruct
= virtio_transport_destruct
,
801 .release
= virtio_transport_release
,
802 .connect
= virtio_transport_connect
,
803 .shutdown
= virtio_transport_shutdown
,
804 .cancel_pkt
= vhost_transport_cancel_pkt
,
806 .dgram_enqueue
= virtio_transport_dgram_enqueue
,
807 .dgram_dequeue
= virtio_transport_dgram_dequeue
,
808 .dgram_bind
= virtio_transport_dgram_bind
,
809 .dgram_allow
= virtio_transport_dgram_allow
,
811 .stream_enqueue
= virtio_transport_stream_enqueue
,
812 .stream_dequeue
= virtio_transport_stream_dequeue
,
813 .stream_has_data
= virtio_transport_stream_has_data
,
814 .stream_has_space
= virtio_transport_stream_has_space
,
815 .stream_rcvhiwat
= virtio_transport_stream_rcvhiwat
,
816 .stream_is_active
= virtio_transport_stream_is_active
,
817 .stream_allow
= virtio_transport_stream_allow
,
819 .notify_poll_in
= virtio_transport_notify_poll_in
,
820 .notify_poll_out
= virtio_transport_notify_poll_out
,
821 .notify_recv_init
= virtio_transport_notify_recv_init
,
822 .notify_recv_pre_block
= virtio_transport_notify_recv_pre_block
,
823 .notify_recv_pre_dequeue
= virtio_transport_notify_recv_pre_dequeue
,
824 .notify_recv_post_dequeue
= virtio_transport_notify_recv_post_dequeue
,
825 .notify_send_init
= virtio_transport_notify_send_init
,
826 .notify_send_pre_block
= virtio_transport_notify_send_pre_block
,
827 .notify_send_pre_enqueue
= virtio_transport_notify_send_pre_enqueue
,
828 .notify_send_post_enqueue
= virtio_transport_notify_send_post_enqueue
,
830 .set_buffer_size
= virtio_transport_set_buffer_size
,
831 .set_min_buffer_size
= virtio_transport_set_min_buffer_size
,
832 .set_max_buffer_size
= virtio_transport_set_max_buffer_size
,
833 .get_buffer_size
= virtio_transport_get_buffer_size
,
834 .get_min_buffer_size
= virtio_transport_get_min_buffer_size
,
835 .get_max_buffer_size
= virtio_transport_get_max_buffer_size
,
838 .send_pkt
= vhost_transport_send_pkt
,
841 static int __init
vhost_vsock_init(void)
845 ret
= vsock_core_init(&vhost_transport
.transport
);
848 return misc_register(&vhost_vsock_misc
);
851 static void __exit
vhost_vsock_exit(void)
853 misc_deregister(&vhost_vsock_misc
);
857 module_init(vhost_vsock_init
);
858 module_exit(vhost_vsock_exit
);
859 MODULE_LICENSE("GPL v2");
860 MODULE_AUTHOR("Asias He");
861 MODULE_DESCRIPTION("vhost transport for vsock ");
862 MODULE_ALIAS_MISCDEV(VHOST_VSOCK_MINOR
);
863 MODULE_ALIAS("devname:vhost-vsock");