1 /* Copyright (C) 2009 Red Hat, Inc.
2 * Copyright (C) 2006 Rusty Russell IBM Corporation
4 * Author: Michael S. Tsirkin <mst@redhat.com>
6 * Inspiration, some code, and most witty comments come from
7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
9 * This work is licensed under the terms of the GNU GPL, version 2.
11 * Generic code for virtio server in host kernel.
14 #include <linux/eventfd.h>
15 #include <linux/vhost.h>
16 #include <linux/uio.h>
18 #include <linux/mmu_context.h>
19 #include <linux/miscdevice.h>
20 #include <linux/mutex.h>
21 #include <linux/poll.h>
22 #include <linux/file.h>
23 #include <linux/highmem.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include <linux/kthread.h>
27 #include <linux/cgroup.h>
28 #include <linux/module.h>
29 #include <linux/sort.h>
30 #include <linux/interval_tree_generic.h>
34 static ushort max_mem_regions
= 64;
35 module_param(max_mem_regions
, ushort
, 0444);
36 MODULE_PARM_DESC(max_mem_regions
,
37 "Maximum number of memory regions in memory map. (default: 64)");
38 static int max_iotlb_entries
= 2048;
39 module_param(max_iotlb_entries
, int, 0444);
40 MODULE_PARM_DESC(max_iotlb_entries
,
41 "Maximum number of iotlb entries. (default: 2048)");
44 VHOST_MEMORY_F_LOG
= 0x1,
47 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
48 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
50 INTERVAL_TREE_DEFINE(struct vhost_umem_node
,
51 rb
, __u64
, __subtree_last
,
52 START
, LAST
, static inline, vhost_umem_interval_tree
);
54 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
55 static void vhost_disable_cross_endian(struct vhost_virtqueue
*vq
)
57 vq
->user_be
= !virtio_legacy_is_little_endian();
60 static void vhost_enable_cross_endian_big(struct vhost_virtqueue
*vq
)
65 static void vhost_enable_cross_endian_little(struct vhost_virtqueue
*vq
)
70 static long vhost_set_vring_endian(struct vhost_virtqueue
*vq
, int __user
*argp
)
72 struct vhost_vring_state s
;
77 if (copy_from_user(&s
, argp
, sizeof(s
)))
80 if (s
.num
!= VHOST_VRING_LITTLE_ENDIAN
&&
81 s
.num
!= VHOST_VRING_BIG_ENDIAN
)
84 if (s
.num
== VHOST_VRING_BIG_ENDIAN
)
85 vhost_enable_cross_endian_big(vq
);
87 vhost_enable_cross_endian_little(vq
);
92 static long vhost_get_vring_endian(struct vhost_virtqueue
*vq
, u32 idx
,
95 struct vhost_vring_state s
= {
100 if (copy_to_user(argp
, &s
, sizeof(s
)))
106 static void vhost_init_is_le(struct vhost_virtqueue
*vq
)
108 /* Note for legacy virtio: user_be is initialized at reset time
109 * according to the host endianness. If userspace does not set an
110 * explicit endianness, the default behavior is native endian, as
111 * expected by legacy virtio.
113 vq
->is_le
= vhost_has_feature(vq
, VIRTIO_F_VERSION_1
) || !vq
->user_be
;
116 static void vhost_disable_cross_endian(struct vhost_virtqueue
*vq
)
120 static long vhost_set_vring_endian(struct vhost_virtqueue
*vq
, int __user
*argp
)
125 static long vhost_get_vring_endian(struct vhost_virtqueue
*vq
, u32 idx
,
131 static void vhost_init_is_le(struct vhost_virtqueue
*vq
)
133 if (vhost_has_feature(vq
, VIRTIO_F_VERSION_1
))
136 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
138 static void vhost_reset_is_le(struct vhost_virtqueue
*vq
)
140 vq
->is_le
= virtio_legacy_is_little_endian();
143 struct vhost_flush_struct
{
144 struct vhost_work work
;
145 struct completion wait_event
;
148 static void vhost_flush_work(struct vhost_work
*work
)
150 struct vhost_flush_struct
*s
;
152 s
= container_of(work
, struct vhost_flush_struct
, work
);
153 complete(&s
->wait_event
);
156 static void vhost_poll_func(struct file
*file
, wait_queue_head_t
*wqh
,
159 struct vhost_poll
*poll
;
161 poll
= container_of(pt
, struct vhost_poll
, table
);
163 add_wait_queue(wqh
, &poll
->wait
);
166 static int vhost_poll_wakeup(wait_queue_t
*wait
, unsigned mode
, int sync
,
169 struct vhost_poll
*poll
= container_of(wait
, struct vhost_poll
, wait
);
171 if (!((unsigned long)key
& poll
->mask
))
174 vhost_poll_queue(poll
);
178 void vhost_work_init(struct vhost_work
*work
, vhost_work_fn_t fn
)
180 clear_bit(VHOST_WORK_QUEUED
, &work
->flags
);
182 init_waitqueue_head(&work
->done
);
184 EXPORT_SYMBOL_GPL(vhost_work_init
);
186 /* Init poll structure */
187 void vhost_poll_init(struct vhost_poll
*poll
, vhost_work_fn_t fn
,
188 unsigned long mask
, struct vhost_dev
*dev
)
190 init_waitqueue_func_entry(&poll
->wait
, vhost_poll_wakeup
);
191 init_poll_funcptr(&poll
->table
, vhost_poll_func
);
196 vhost_work_init(&poll
->work
, fn
);
198 EXPORT_SYMBOL_GPL(vhost_poll_init
);
200 /* Start polling a file. We add ourselves to file's wait queue. The caller must
201 * keep a reference to a file until after vhost_poll_stop is called. */
202 int vhost_poll_start(struct vhost_poll
*poll
, struct file
*file
)
210 mask
= file
->f_op
->poll(file
, &poll
->table
);
212 vhost_poll_wakeup(&poll
->wait
, 0, 0, (void *)mask
);
213 if (mask
& POLLERR
) {
215 remove_wait_queue(poll
->wqh
, &poll
->wait
);
221 EXPORT_SYMBOL_GPL(vhost_poll_start
);
223 /* Stop polling a file. After this function returns, it becomes safe to drop the
224 * file reference. You must also flush afterwards. */
225 void vhost_poll_stop(struct vhost_poll
*poll
)
228 remove_wait_queue(poll
->wqh
, &poll
->wait
);
232 EXPORT_SYMBOL_GPL(vhost_poll_stop
);
234 void vhost_work_flush(struct vhost_dev
*dev
, struct vhost_work
*work
)
236 struct vhost_flush_struct flush
;
239 init_completion(&flush
.wait_event
);
240 vhost_work_init(&flush
.work
, vhost_flush_work
);
242 vhost_work_queue(dev
, &flush
.work
);
243 wait_for_completion(&flush
.wait_event
);
246 EXPORT_SYMBOL_GPL(vhost_work_flush
);
248 /* Flush any work that has been scheduled. When calling this, don't hold any
249 * locks that are also used by the callback. */
250 void vhost_poll_flush(struct vhost_poll
*poll
)
252 vhost_work_flush(poll
->dev
, &poll
->work
);
254 EXPORT_SYMBOL_GPL(vhost_poll_flush
);
256 void vhost_work_queue(struct vhost_dev
*dev
, struct vhost_work
*work
)
261 if (!test_and_set_bit(VHOST_WORK_QUEUED
, &work
->flags
)) {
262 /* We can only add the work to the list after we're
263 * sure it was not in the list.
264 * test_and_set_bit() implies a memory barrier.
266 llist_add(&work
->node
, &dev
->work_list
);
267 wake_up_process(dev
->worker
);
270 EXPORT_SYMBOL_GPL(vhost_work_queue
);
272 /* A lockless hint for busy polling code to exit the loop */
273 bool vhost_has_work(struct vhost_dev
*dev
)
275 return !llist_empty(&dev
->work_list
);
277 EXPORT_SYMBOL_GPL(vhost_has_work
);
279 void vhost_poll_queue(struct vhost_poll
*poll
)
281 vhost_work_queue(poll
->dev
, &poll
->work
);
283 EXPORT_SYMBOL_GPL(vhost_poll_queue
);
285 static void vhost_vq_reset(struct vhost_dev
*dev
,
286 struct vhost_virtqueue
*vq
)
292 vq
->last_avail_idx
= 0;
293 vq
->last_used_event
= 0;
295 vq
->last_used_idx
= 0;
296 vq
->signalled_used
= 0;
297 vq
->signalled_used_valid
= false;
299 vq
->log_used
= false;
300 vq
->log_addr
= -1ull;
301 vq
->private_data
= NULL
;
302 vq
->acked_features
= 0;
304 vq
->error_ctx
= NULL
;
310 vhost_reset_is_le(vq
);
311 vhost_disable_cross_endian(vq
);
312 vq
->busyloop_timeout
= 0;
317 static int vhost_worker(void *data
)
319 struct vhost_dev
*dev
= data
;
320 struct vhost_work
*work
, *work_next
;
321 struct llist_node
*node
;
322 mm_segment_t oldfs
= get_fs();
328 /* mb paired w/ kthread_stop */
329 set_current_state(TASK_INTERRUPTIBLE
);
331 if (kthread_should_stop()) {
332 __set_current_state(TASK_RUNNING
);
336 node
= llist_del_all(&dev
->work_list
);
340 node
= llist_reverse_order(node
);
341 /* make sure flag is seen after deletion */
343 llist_for_each_entry_safe(work
, work_next
, node
, node
) {
344 clear_bit(VHOST_WORK_QUEUED
, &work
->flags
);
345 __set_current_state(TASK_RUNNING
);
356 static void vhost_vq_free_iovecs(struct vhost_virtqueue
*vq
)
366 /* Helper to allocate iovec buffers for all vqs. */
367 static long vhost_dev_alloc_iovecs(struct vhost_dev
*dev
)
369 struct vhost_virtqueue
*vq
;
372 for (i
= 0; i
< dev
->nvqs
; ++i
) {
374 vq
->indirect
= kmalloc(sizeof *vq
->indirect
* UIO_MAXIOV
,
376 vq
->log
= kmalloc(sizeof *vq
->log
* UIO_MAXIOV
, GFP_KERNEL
);
377 vq
->heads
= kmalloc(sizeof *vq
->heads
* UIO_MAXIOV
, GFP_KERNEL
);
378 if (!vq
->indirect
|| !vq
->log
|| !vq
->heads
)
385 vhost_vq_free_iovecs(dev
->vqs
[i
]);
389 static void vhost_dev_free_iovecs(struct vhost_dev
*dev
)
393 for (i
= 0; i
< dev
->nvqs
; ++i
)
394 vhost_vq_free_iovecs(dev
->vqs
[i
]);
397 void vhost_dev_init(struct vhost_dev
*dev
,
398 struct vhost_virtqueue
**vqs
, int nvqs
)
400 struct vhost_virtqueue
*vq
;
405 mutex_init(&dev
->mutex
);
407 dev
->log_file
= NULL
;
412 init_llist_head(&dev
->work_list
);
413 init_waitqueue_head(&dev
->wait
);
414 INIT_LIST_HEAD(&dev
->read_list
);
415 INIT_LIST_HEAD(&dev
->pending_list
);
416 spin_lock_init(&dev
->iotlb_lock
);
419 for (i
= 0; i
< dev
->nvqs
; ++i
) {
425 mutex_init(&vq
->mutex
);
426 vhost_vq_reset(dev
, vq
);
428 vhost_poll_init(&vq
->poll
, vq
->handle_kick
,
432 EXPORT_SYMBOL_GPL(vhost_dev_init
);
434 /* Caller should have device mutex */
435 long vhost_dev_check_owner(struct vhost_dev
*dev
)
437 /* Are you the owner? If not, I don't think you mean to do that */
438 return dev
->mm
== current
->mm
? 0 : -EPERM
;
440 EXPORT_SYMBOL_GPL(vhost_dev_check_owner
);
442 struct vhost_attach_cgroups_struct
{
443 struct vhost_work work
;
444 struct task_struct
*owner
;
448 static void vhost_attach_cgroups_work(struct vhost_work
*work
)
450 struct vhost_attach_cgroups_struct
*s
;
452 s
= container_of(work
, struct vhost_attach_cgroups_struct
, work
);
453 s
->ret
= cgroup_attach_task_all(s
->owner
, current
);
456 static int vhost_attach_cgroups(struct vhost_dev
*dev
)
458 struct vhost_attach_cgroups_struct attach
;
460 attach
.owner
= current
;
461 vhost_work_init(&attach
.work
, vhost_attach_cgroups_work
);
462 vhost_work_queue(dev
, &attach
.work
);
463 vhost_work_flush(dev
, &attach
.work
);
467 /* Caller should have device mutex */
468 bool vhost_dev_has_owner(struct vhost_dev
*dev
)
472 EXPORT_SYMBOL_GPL(vhost_dev_has_owner
);
474 /* Caller should have device mutex */
475 long vhost_dev_set_owner(struct vhost_dev
*dev
)
477 struct task_struct
*worker
;
480 /* Is there an owner already? */
481 if (vhost_dev_has_owner(dev
)) {
486 /* No owner, become one */
487 dev
->mm
= get_task_mm(current
);
488 worker
= kthread_create(vhost_worker
, dev
, "vhost-%d", current
->pid
);
489 if (IS_ERR(worker
)) {
490 err
= PTR_ERR(worker
);
494 dev
->worker
= worker
;
495 wake_up_process(worker
); /* avoid contributing to loadavg */
497 err
= vhost_attach_cgroups(dev
);
501 err
= vhost_dev_alloc_iovecs(dev
);
507 kthread_stop(worker
);
516 EXPORT_SYMBOL_GPL(vhost_dev_set_owner
);
518 static void *vhost_kvzalloc(unsigned long size
)
520 void *n
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_REPEAT
);
527 struct vhost_umem
*vhost_dev_reset_owner_prepare(void)
529 return vhost_kvzalloc(sizeof(struct vhost_umem
));
531 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare
);
533 /* Caller should have device mutex */
534 void vhost_dev_reset_owner(struct vhost_dev
*dev
, struct vhost_umem
*umem
)
538 vhost_dev_cleanup(dev
, true);
540 /* Restore memory to default empty mapping. */
541 INIT_LIST_HEAD(&umem
->umem_list
);
543 /* We don't need VQ locks below since vhost_dev_cleanup makes sure
544 * VQs aren't running.
546 for (i
= 0; i
< dev
->nvqs
; ++i
)
547 dev
->vqs
[i
]->umem
= umem
;
549 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner
);
551 void vhost_dev_stop(struct vhost_dev
*dev
)
555 for (i
= 0; i
< dev
->nvqs
; ++i
) {
556 if (dev
->vqs
[i
]->kick
&& dev
->vqs
[i
]->handle_kick
) {
557 vhost_poll_stop(&dev
->vqs
[i
]->poll
);
558 vhost_poll_flush(&dev
->vqs
[i
]->poll
);
562 EXPORT_SYMBOL_GPL(vhost_dev_stop
);
564 static void vhost_umem_free(struct vhost_umem
*umem
,
565 struct vhost_umem_node
*node
)
567 vhost_umem_interval_tree_remove(node
, &umem
->umem_tree
);
568 list_del(&node
->link
);
573 static void vhost_umem_clean(struct vhost_umem
*umem
)
575 struct vhost_umem_node
*node
, *tmp
;
580 list_for_each_entry_safe(node
, tmp
, &umem
->umem_list
, link
)
581 vhost_umem_free(umem
, node
);
586 static void vhost_clear_msg(struct vhost_dev
*dev
)
588 struct vhost_msg_node
*node
, *n
;
590 spin_lock(&dev
->iotlb_lock
);
592 list_for_each_entry_safe(node
, n
, &dev
->read_list
, node
) {
593 list_del(&node
->node
);
597 list_for_each_entry_safe(node
, n
, &dev
->pending_list
, node
) {
598 list_del(&node
->node
);
602 spin_unlock(&dev
->iotlb_lock
);
605 /* Caller should have device mutex if and only if locked is set */
606 void vhost_dev_cleanup(struct vhost_dev
*dev
, bool locked
)
610 for (i
= 0; i
< dev
->nvqs
; ++i
) {
611 if (dev
->vqs
[i
]->error_ctx
)
612 eventfd_ctx_put(dev
->vqs
[i
]->error_ctx
);
613 if (dev
->vqs
[i
]->error
)
614 fput(dev
->vqs
[i
]->error
);
615 if (dev
->vqs
[i
]->kick
)
616 fput(dev
->vqs
[i
]->kick
);
617 if (dev
->vqs
[i
]->call_ctx
)
618 eventfd_ctx_put(dev
->vqs
[i
]->call_ctx
);
619 if (dev
->vqs
[i
]->call
)
620 fput(dev
->vqs
[i
]->call
);
621 vhost_vq_reset(dev
, dev
->vqs
[i
]);
623 vhost_dev_free_iovecs(dev
);
625 eventfd_ctx_put(dev
->log_ctx
);
629 dev
->log_file
= NULL
;
630 /* No one will access memory at this point */
631 vhost_umem_clean(dev
->umem
);
633 vhost_umem_clean(dev
->iotlb
);
635 vhost_clear_msg(dev
);
636 wake_up_interruptible_poll(&dev
->wait
, POLLIN
| POLLRDNORM
);
637 WARN_ON(!llist_empty(&dev
->work_list
));
639 kthread_stop(dev
->worker
);
646 EXPORT_SYMBOL_GPL(vhost_dev_cleanup
);
648 static int log_access_ok(void __user
*log_base
, u64 addr
, unsigned long sz
)
650 u64 a
= addr
/ VHOST_PAGE_SIZE
/ 8;
652 /* Make sure 64 bit math will not overflow. */
653 if (a
> ULONG_MAX
- (unsigned long)log_base
||
654 a
+ (unsigned long)log_base
> ULONG_MAX
)
657 return access_ok(VERIFY_WRITE
, log_base
+ a
,
658 (sz
+ VHOST_PAGE_SIZE
* 8 - 1) / VHOST_PAGE_SIZE
/ 8);
661 static bool vhost_overflow(u64 uaddr
, u64 size
)
663 /* Make sure 64 bit math will not overflow. */
664 return uaddr
> ULONG_MAX
|| size
> ULONG_MAX
|| uaddr
> ULONG_MAX
- size
;
667 /* Caller should have vq mutex and device mutex. */
668 static int vq_memory_access_ok(void __user
*log_base
, struct vhost_umem
*umem
,
671 struct vhost_umem_node
*node
;
676 list_for_each_entry(node
, &umem
->umem_list
, link
) {
677 unsigned long a
= node
->userspace_addr
;
679 if (vhost_overflow(node
->userspace_addr
, node
->size
))
683 if (!access_ok(VERIFY_WRITE
, (void __user
*)a
,
686 else if (log_all
&& !log_access_ok(log_base
,
694 /* Can we switch to this memory table? */
695 /* Caller should have device mutex but not vq mutex */
696 static int memory_access_ok(struct vhost_dev
*d
, struct vhost_umem
*umem
,
701 for (i
= 0; i
< d
->nvqs
; ++i
) {
705 mutex_lock(&d
->vqs
[i
]->mutex
);
706 log
= log_all
|| vhost_has_feature(d
->vqs
[i
], VHOST_F_LOG_ALL
);
707 /* If ring is inactive, will check when it's enabled. */
708 if (d
->vqs
[i
]->private_data
)
709 ok
= vq_memory_access_ok(d
->vqs
[i
]->log_base
,
713 mutex_unlock(&d
->vqs
[i
]->mutex
);
720 static int translate_desc(struct vhost_virtqueue
*vq
, u64 addr
, u32 len
,
721 struct iovec iov
[], int iov_size
, int access
);
723 static int vhost_copy_to_user(struct vhost_virtqueue
*vq
, void __user
*to
,
724 const void *from
, unsigned size
)
729 return __copy_to_user(to
, from
, size
);
731 /* This function should be called after iotlb
732 * prefetch, which means we're sure that all vq
733 * could be access through iotlb. So -EAGAIN should
734 * not happen in this case.
736 /* TODO: more fast path */
738 ret
= translate_desc(vq
, (u64
)(uintptr_t)to
, size
, vq
->iotlb_iov
,
739 ARRAY_SIZE(vq
->iotlb_iov
),
743 iov_iter_init(&t
, WRITE
, vq
->iotlb_iov
, ret
, size
);
744 ret
= copy_to_iter(from
, size
, &t
);
752 static int vhost_copy_from_user(struct vhost_virtqueue
*vq
, void *to
,
753 void __user
*from
, unsigned size
)
758 return __copy_from_user(to
, from
, size
);
760 /* This function should be called after iotlb
761 * prefetch, which means we're sure that vq
762 * could be access through iotlb. So -EAGAIN should
763 * not happen in this case.
765 /* TODO: more fast path */
767 ret
= translate_desc(vq
, (u64
)(uintptr_t)from
, size
, vq
->iotlb_iov
,
768 ARRAY_SIZE(vq
->iotlb_iov
),
771 vq_err(vq
, "IOTLB translation failure: uaddr "
772 "%p size 0x%llx\n", from
,
773 (unsigned long long) size
);
776 iov_iter_init(&f
, READ
, vq
->iotlb_iov
, ret
, size
);
777 ret
= copy_from_iter(to
, size
, &f
);
786 static void __user
*__vhost_get_user(struct vhost_virtqueue
*vq
,
787 void __user
*addr
, unsigned size
)
791 /* This function should be called after iotlb
792 * prefetch, which means we're sure that vq
793 * could be access through iotlb. So -EAGAIN should
794 * not happen in this case.
796 /* TODO: more fast path */
797 ret
= translate_desc(vq
, (u64
)(uintptr_t)addr
, size
, vq
->iotlb_iov
,
798 ARRAY_SIZE(vq
->iotlb_iov
),
801 vq_err(vq
, "IOTLB translation failure: uaddr "
802 "%p size 0x%llx\n", addr
,
803 (unsigned long long) size
);
807 if (ret
!= 1 || vq
->iotlb_iov
[0].iov_len
!= size
) {
808 vq_err(vq
, "Non atomic userspace memory access: uaddr "
809 "%p size 0x%llx\n", addr
,
810 (unsigned long long) size
);
814 return vq
->iotlb_iov
[0].iov_base
;
817 #define vhost_put_user(vq, x, ptr) \
821 ret = __put_user(x, ptr); \
823 __typeof__(ptr) to = \
824 (__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \
826 ret = __put_user(x, to); \
833 #define vhost_get_user(vq, x, ptr) \
837 ret = __get_user(x, ptr); \
839 __typeof__(ptr) from = \
840 (__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \
842 ret = __get_user(x, from); \
849 static void vhost_dev_lock_vqs(struct vhost_dev
*d
)
852 for (i
= 0; i
< d
->nvqs
; ++i
)
853 mutex_lock(&d
->vqs
[i
]->mutex
);
856 static void vhost_dev_unlock_vqs(struct vhost_dev
*d
)
859 for (i
= 0; i
< d
->nvqs
; ++i
)
860 mutex_unlock(&d
->vqs
[i
]->mutex
);
863 static int vhost_new_umem_range(struct vhost_umem
*umem
,
864 u64 start
, u64 size
, u64 end
,
865 u64 userspace_addr
, int perm
)
867 struct vhost_umem_node
*tmp
, *node
= kmalloc(sizeof(*node
), GFP_ATOMIC
);
872 if (umem
->numem
== max_iotlb_entries
) {
873 tmp
= list_first_entry(&umem
->umem_list
, typeof(*tmp
), link
);
874 vhost_umem_free(umem
, tmp
);
880 node
->userspace_addr
= userspace_addr
;
882 INIT_LIST_HEAD(&node
->link
);
883 list_add_tail(&node
->link
, &umem
->umem_list
);
884 vhost_umem_interval_tree_insert(node
, &umem
->umem_tree
);
890 static void vhost_del_umem_range(struct vhost_umem
*umem
,
893 struct vhost_umem_node
*node
;
895 while ((node
= vhost_umem_interval_tree_iter_first(&umem
->umem_tree
,
897 vhost_umem_free(umem
, node
);
900 static void vhost_iotlb_notify_vq(struct vhost_dev
*d
,
901 struct vhost_iotlb_msg
*msg
)
903 struct vhost_msg_node
*node
, *n
;
905 spin_lock(&d
->iotlb_lock
);
907 list_for_each_entry_safe(node
, n
, &d
->pending_list
, node
) {
908 struct vhost_iotlb_msg
*vq_msg
= &node
->msg
.iotlb
;
909 if (msg
->iova
<= vq_msg
->iova
&&
910 msg
->iova
+ msg
->size
- 1 > vq_msg
->iova
&&
911 vq_msg
->type
== VHOST_IOTLB_MISS
) {
912 vhost_poll_queue(&node
->vq
->poll
);
913 list_del(&node
->node
);
918 spin_unlock(&d
->iotlb_lock
);
921 static int umem_access_ok(u64 uaddr
, u64 size
, int access
)
923 unsigned long a
= uaddr
;
925 /* Make sure 64 bit math will not overflow. */
926 if (vhost_overflow(uaddr
, size
))
929 if ((access
& VHOST_ACCESS_RO
) &&
930 !access_ok(VERIFY_READ
, (void __user
*)a
, size
))
932 if ((access
& VHOST_ACCESS_WO
) &&
933 !access_ok(VERIFY_WRITE
, (void __user
*)a
, size
))
938 static int vhost_process_iotlb_msg(struct vhost_dev
*dev
,
939 struct vhost_iotlb_msg
*msg
)
943 vhost_dev_lock_vqs(dev
);
945 case VHOST_IOTLB_UPDATE
:
950 if (umem_access_ok(msg
->uaddr
, msg
->size
, msg
->perm
)) {
954 if (vhost_new_umem_range(dev
->iotlb
, msg
->iova
, msg
->size
,
955 msg
->iova
+ msg
->size
- 1,
956 msg
->uaddr
, msg
->perm
)) {
960 vhost_iotlb_notify_vq(dev
, msg
);
962 case VHOST_IOTLB_INVALIDATE
:
963 vhost_del_umem_range(dev
->iotlb
, msg
->iova
,
964 msg
->iova
+ msg
->size
- 1);
971 vhost_dev_unlock_vqs(dev
);
974 ssize_t
vhost_chr_write_iter(struct vhost_dev
*dev
,
975 struct iov_iter
*from
)
977 struct vhost_msg_node node
;
978 unsigned size
= sizeof(struct vhost_msg
);
982 if (iov_iter_count(from
) < size
)
984 ret
= copy_from_iter(&node
.msg
, size
, from
);
988 switch (node
.msg
.type
) {
989 case VHOST_IOTLB_MSG
:
990 err
= vhost_process_iotlb_msg(dev
, &node
.msg
.iotlb
);
1002 EXPORT_SYMBOL(vhost_chr_write_iter
);
1004 unsigned int vhost_chr_poll(struct file
*file
, struct vhost_dev
*dev
,
1007 unsigned int mask
= 0;
1009 poll_wait(file
, &dev
->wait
, wait
);
1011 if (!list_empty(&dev
->read_list
))
1012 mask
|= POLLIN
| POLLRDNORM
;
1016 EXPORT_SYMBOL(vhost_chr_poll
);
1018 ssize_t
vhost_chr_read_iter(struct vhost_dev
*dev
, struct iov_iter
*to
,
1022 struct vhost_msg_node
*node
;
1024 unsigned size
= sizeof(struct vhost_msg
);
1026 if (iov_iter_count(to
) < size
)
1031 prepare_to_wait(&dev
->wait
, &wait
,
1032 TASK_INTERRUPTIBLE
);
1034 node
= vhost_dequeue_msg(dev
, &dev
->read_list
);
1041 if (signal_pending(current
)) {
1054 finish_wait(&dev
->wait
, &wait
);
1057 ret
= copy_to_iter(&node
->msg
, size
, to
);
1059 if (ret
!= size
|| node
->msg
.type
!= VHOST_IOTLB_MISS
) {
1064 vhost_enqueue_msg(dev
, &dev
->pending_list
, node
);
1069 EXPORT_SYMBOL_GPL(vhost_chr_read_iter
);
1071 static int vhost_iotlb_miss(struct vhost_virtqueue
*vq
, u64 iova
, int access
)
1073 struct vhost_dev
*dev
= vq
->dev
;
1074 struct vhost_msg_node
*node
;
1075 struct vhost_iotlb_msg
*msg
;
1077 node
= vhost_new_msg(vq
, VHOST_IOTLB_MISS
);
1081 msg
= &node
->msg
.iotlb
;
1082 msg
->type
= VHOST_IOTLB_MISS
;
1086 vhost_enqueue_msg(dev
, &dev
->read_list
, node
);
1091 static int vq_access_ok(struct vhost_virtqueue
*vq
, unsigned int num
,
1092 struct vring_desc __user
*desc
,
1093 struct vring_avail __user
*avail
,
1094 struct vring_used __user
*used
)
1097 size_t s
= vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
1099 return access_ok(VERIFY_READ
, desc
, num
* sizeof *desc
) &&
1100 access_ok(VERIFY_READ
, avail
,
1101 sizeof *avail
+ num
* sizeof *avail
->ring
+ s
) &&
1102 access_ok(VERIFY_WRITE
, used
,
1103 sizeof *used
+ num
* sizeof *used
->ring
+ s
);
1106 static int iotlb_access_ok(struct vhost_virtqueue
*vq
,
1107 int access
, u64 addr
, u64 len
)
1109 const struct vhost_umem_node
*node
;
1110 struct vhost_umem
*umem
= vq
->iotlb
;
1114 node
= vhost_umem_interval_tree_iter_first(&umem
->umem_tree
,
1117 if (node
== NULL
|| node
->start
> addr
) {
1118 vhost_iotlb_miss(vq
, addr
, access
);
1120 } else if (!(node
->perm
& access
)) {
1121 /* Report the possible access violation by
1122 * request another translation from userspace.
1127 size
= node
->size
- addr
+ node
->start
;
1135 int vq_iotlb_prefetch(struct vhost_virtqueue
*vq
)
1137 size_t s
= vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
1138 unsigned int num
= vq
->num
;
1143 return iotlb_access_ok(vq
, VHOST_ACCESS_RO
, (u64
)(uintptr_t)vq
->desc
,
1144 num
* sizeof *vq
->desc
) &&
1145 iotlb_access_ok(vq
, VHOST_ACCESS_RO
, (u64
)(uintptr_t)vq
->avail
,
1147 num
* sizeof *vq
->avail
->ring
+ s
) &&
1148 iotlb_access_ok(vq
, VHOST_ACCESS_WO
, (u64
)(uintptr_t)vq
->used
,
1150 num
* sizeof *vq
->used
->ring
+ s
);
1152 EXPORT_SYMBOL_GPL(vq_iotlb_prefetch
);
1154 /* Can we log writes? */
1155 /* Caller should have device mutex but not vq mutex */
1156 int vhost_log_access_ok(struct vhost_dev
*dev
)
1158 return memory_access_ok(dev
, dev
->umem
, 1);
1160 EXPORT_SYMBOL_GPL(vhost_log_access_ok
);
1162 /* Verify access for write logging. */
1163 /* Caller should have vq mutex and device mutex */
1164 static int vq_log_access_ok(struct vhost_virtqueue
*vq
,
1165 void __user
*log_base
)
1167 size_t s
= vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
1169 return vq_memory_access_ok(log_base
, vq
->umem
,
1170 vhost_has_feature(vq
, VHOST_F_LOG_ALL
)) &&
1171 (!vq
->log_used
|| log_access_ok(log_base
, vq
->log_addr
,
1173 vq
->num
* sizeof *vq
->used
->ring
+ s
));
1176 /* Can we start vq? */
1177 /* Caller should have vq mutex and device mutex */
1178 int vhost_vq_access_ok(struct vhost_virtqueue
*vq
)
1181 /* When device IOTLB was used, the access validation
1182 * will be validated during prefetching.
1186 return vq_access_ok(vq
, vq
->num
, vq
->desc
, vq
->avail
, vq
->used
) &&
1187 vq_log_access_ok(vq
, vq
->log_base
);
1189 EXPORT_SYMBOL_GPL(vhost_vq_access_ok
);
1191 static struct vhost_umem
*vhost_umem_alloc(void)
1193 struct vhost_umem
*umem
= vhost_kvzalloc(sizeof(*umem
));
1198 umem
->umem_tree
= RB_ROOT
;
1200 INIT_LIST_HEAD(&umem
->umem_list
);
1205 static long vhost_set_memory(struct vhost_dev
*d
, struct vhost_memory __user
*m
)
1207 struct vhost_memory mem
, *newmem
;
1208 struct vhost_memory_region
*region
;
1209 struct vhost_umem
*newumem
, *oldumem
;
1210 unsigned long size
= offsetof(struct vhost_memory
, regions
);
1213 if (copy_from_user(&mem
, m
, size
))
1217 if (mem
.nregions
> max_mem_regions
)
1219 newmem
= vhost_kvzalloc(size
+ mem
.nregions
* sizeof(*m
->regions
));
1223 memcpy(newmem
, &mem
, size
);
1224 if (copy_from_user(newmem
->regions
, m
->regions
,
1225 mem
.nregions
* sizeof *m
->regions
)) {
1230 newumem
= vhost_umem_alloc();
1236 for (region
= newmem
->regions
;
1237 region
< newmem
->regions
+ mem
.nregions
;
1239 if (vhost_new_umem_range(newumem
,
1240 region
->guest_phys_addr
,
1241 region
->memory_size
,
1242 region
->guest_phys_addr
+
1243 region
->memory_size
- 1,
1244 region
->userspace_addr
,
1249 if (!memory_access_ok(d
, newumem
, 0))
1255 /* All memory accesses are done under some VQ mutex. */
1256 for (i
= 0; i
< d
->nvqs
; ++i
) {
1257 mutex_lock(&d
->vqs
[i
]->mutex
);
1258 d
->vqs
[i
]->umem
= newumem
;
1259 mutex_unlock(&d
->vqs
[i
]->mutex
);
1263 vhost_umem_clean(oldumem
);
1267 vhost_umem_clean(newumem
);
1272 long vhost_vring_ioctl(struct vhost_dev
*d
, int ioctl
, void __user
*argp
)
1274 struct file
*eventfp
, *filep
= NULL
;
1275 bool pollstart
= false, pollstop
= false;
1276 struct eventfd_ctx
*ctx
= NULL
;
1277 u32 __user
*idxp
= argp
;
1278 struct vhost_virtqueue
*vq
;
1279 struct vhost_vring_state s
;
1280 struct vhost_vring_file f
;
1281 struct vhost_vring_addr a
;
1285 r
= get_user(idx
, idxp
);
1293 mutex_lock(&vq
->mutex
);
1296 case VHOST_SET_VRING_NUM
:
1297 /* Resizing ring with an active backend?
1298 * You don't want to do that. */
1299 if (vq
->private_data
) {
1303 if (copy_from_user(&s
, argp
, sizeof s
)) {
1307 if (!s
.num
|| s
.num
> 0xffff || (s
.num
& (s
.num
- 1))) {
1313 case VHOST_SET_VRING_BASE
:
1314 /* Moving base with an active backend?
1315 * You don't want to do that. */
1316 if (vq
->private_data
) {
1320 if (copy_from_user(&s
, argp
, sizeof s
)) {
1324 if (s
.num
> 0xffff) {
1328 vq
->last_avail_idx
= vq
->last_used_event
= s
.num
;
1329 /* Forget the cached index value. */
1330 vq
->avail_idx
= vq
->last_avail_idx
;
1332 case VHOST_GET_VRING_BASE
:
1334 s
.num
= vq
->last_avail_idx
;
1335 if (copy_to_user(argp
, &s
, sizeof s
))
1338 case VHOST_SET_VRING_ADDR
:
1339 if (copy_from_user(&a
, argp
, sizeof a
)) {
1343 if (a
.flags
& ~(0x1 << VHOST_VRING_F_LOG
)) {
1347 /* For 32bit, verify that the top 32bits of the user
1348 data are set to zero. */
1349 if ((u64
)(unsigned long)a
.desc_user_addr
!= a
.desc_user_addr
||
1350 (u64
)(unsigned long)a
.used_user_addr
!= a
.used_user_addr
||
1351 (u64
)(unsigned long)a
.avail_user_addr
!= a
.avail_user_addr
) {
1356 /* Make sure it's safe to cast pointers to vring types. */
1357 BUILD_BUG_ON(__alignof__
*vq
->avail
> VRING_AVAIL_ALIGN_SIZE
);
1358 BUILD_BUG_ON(__alignof__
*vq
->used
> VRING_USED_ALIGN_SIZE
);
1359 if ((a
.avail_user_addr
& (VRING_AVAIL_ALIGN_SIZE
- 1)) ||
1360 (a
.used_user_addr
& (VRING_USED_ALIGN_SIZE
- 1)) ||
1361 (a
.log_guest_addr
& (VRING_USED_ALIGN_SIZE
- 1))) {
1366 /* We only verify access here if backend is configured.
1367 * If it is not, we don't as size might not have been setup.
1368 * We will verify when backend is configured. */
1369 if (vq
->private_data
) {
1370 if (!vq_access_ok(vq
, vq
->num
,
1371 (void __user
*)(unsigned long)a
.desc_user_addr
,
1372 (void __user
*)(unsigned long)a
.avail_user_addr
,
1373 (void __user
*)(unsigned long)a
.used_user_addr
)) {
1378 /* Also validate log access for used ring if enabled. */
1379 if ((a
.flags
& (0x1 << VHOST_VRING_F_LOG
)) &&
1380 !log_access_ok(vq
->log_base
, a
.log_guest_addr
,
1382 vq
->num
* sizeof *vq
->used
->ring
)) {
1388 vq
->log_used
= !!(a
.flags
& (0x1 << VHOST_VRING_F_LOG
));
1389 vq
->desc
= (void __user
*)(unsigned long)a
.desc_user_addr
;
1390 vq
->avail
= (void __user
*)(unsigned long)a
.avail_user_addr
;
1391 vq
->log_addr
= a
.log_guest_addr
;
1392 vq
->used
= (void __user
*)(unsigned long)a
.used_user_addr
;
1394 case VHOST_SET_VRING_KICK
:
1395 if (copy_from_user(&f
, argp
, sizeof f
)) {
1399 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
1400 if (IS_ERR(eventfp
)) {
1401 r
= PTR_ERR(eventfp
);
1404 if (eventfp
!= vq
->kick
) {
1405 pollstop
= (filep
= vq
->kick
) != NULL
;
1406 pollstart
= (vq
->kick
= eventfp
) != NULL
;
1410 case VHOST_SET_VRING_CALL
:
1411 if (copy_from_user(&f
, argp
, sizeof f
)) {
1415 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
1416 if (IS_ERR(eventfp
)) {
1417 r
= PTR_ERR(eventfp
);
1420 if (eventfp
!= vq
->call
) {
1424 vq
->call_ctx
= eventfp
?
1425 eventfd_ctx_fileget(eventfp
) : NULL
;
1429 case VHOST_SET_VRING_ERR
:
1430 if (copy_from_user(&f
, argp
, sizeof f
)) {
1434 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
1435 if (IS_ERR(eventfp
)) {
1436 r
= PTR_ERR(eventfp
);
1439 if (eventfp
!= vq
->error
) {
1441 vq
->error
= eventfp
;
1442 ctx
= vq
->error_ctx
;
1443 vq
->error_ctx
= eventfp
?
1444 eventfd_ctx_fileget(eventfp
) : NULL
;
1448 case VHOST_SET_VRING_ENDIAN
:
1449 r
= vhost_set_vring_endian(vq
, argp
);
1451 case VHOST_GET_VRING_ENDIAN
:
1452 r
= vhost_get_vring_endian(vq
, idx
, argp
);
1454 case VHOST_SET_VRING_BUSYLOOP_TIMEOUT
:
1455 if (copy_from_user(&s
, argp
, sizeof(s
))) {
1459 vq
->busyloop_timeout
= s
.num
;
1461 case VHOST_GET_VRING_BUSYLOOP_TIMEOUT
:
1463 s
.num
= vq
->busyloop_timeout
;
1464 if (copy_to_user(argp
, &s
, sizeof(s
)))
1471 if (pollstop
&& vq
->handle_kick
)
1472 vhost_poll_stop(&vq
->poll
);
1475 eventfd_ctx_put(ctx
);
1479 if (pollstart
&& vq
->handle_kick
)
1480 r
= vhost_poll_start(&vq
->poll
, vq
->kick
);
1482 mutex_unlock(&vq
->mutex
);
1484 if (pollstop
&& vq
->handle_kick
)
1485 vhost_poll_flush(&vq
->poll
);
1488 EXPORT_SYMBOL_GPL(vhost_vring_ioctl
);
1490 int vhost_init_device_iotlb(struct vhost_dev
*d
, bool enabled
)
1492 struct vhost_umem
*niotlb
, *oiotlb
;
1495 niotlb
= vhost_umem_alloc();
1502 for (i
= 0; i
< d
->nvqs
; ++i
) {
1503 mutex_lock(&d
->vqs
[i
]->mutex
);
1504 d
->vqs
[i
]->iotlb
= niotlb
;
1505 mutex_unlock(&d
->vqs
[i
]->mutex
);
1508 vhost_umem_clean(oiotlb
);
1512 EXPORT_SYMBOL_GPL(vhost_init_device_iotlb
);
1514 /* Caller must have device mutex */
1515 long vhost_dev_ioctl(struct vhost_dev
*d
, unsigned int ioctl
, void __user
*argp
)
1517 struct file
*eventfp
, *filep
= NULL
;
1518 struct eventfd_ctx
*ctx
= NULL
;
1523 /* If you are not the owner, you can become one */
1524 if (ioctl
== VHOST_SET_OWNER
) {
1525 r
= vhost_dev_set_owner(d
);
1529 /* You must be the owner to do anything else */
1530 r
= vhost_dev_check_owner(d
);
1535 case VHOST_SET_MEM_TABLE
:
1536 r
= vhost_set_memory(d
, argp
);
1538 case VHOST_SET_LOG_BASE
:
1539 if (copy_from_user(&p
, argp
, sizeof p
)) {
1543 if ((u64
)(unsigned long)p
!= p
) {
1547 for (i
= 0; i
< d
->nvqs
; ++i
) {
1548 struct vhost_virtqueue
*vq
;
1549 void __user
*base
= (void __user
*)(unsigned long)p
;
1551 mutex_lock(&vq
->mutex
);
1552 /* If ring is inactive, will check when it's enabled. */
1553 if (vq
->private_data
&& !vq_log_access_ok(vq
, base
))
1556 vq
->log_base
= base
;
1557 mutex_unlock(&vq
->mutex
);
1560 case VHOST_SET_LOG_FD
:
1561 r
= get_user(fd
, (int __user
*)argp
);
1564 eventfp
= fd
== -1 ? NULL
: eventfd_fget(fd
);
1565 if (IS_ERR(eventfp
)) {
1566 r
= PTR_ERR(eventfp
);
1569 if (eventfp
!= d
->log_file
) {
1570 filep
= d
->log_file
;
1571 d
->log_file
= eventfp
;
1573 d
->log_ctx
= eventfp
?
1574 eventfd_ctx_fileget(eventfp
) : NULL
;
1577 for (i
= 0; i
< d
->nvqs
; ++i
) {
1578 mutex_lock(&d
->vqs
[i
]->mutex
);
1579 d
->vqs
[i
]->log_ctx
= d
->log_ctx
;
1580 mutex_unlock(&d
->vqs
[i
]->mutex
);
1583 eventfd_ctx_put(ctx
);
1594 EXPORT_SYMBOL_GPL(vhost_dev_ioctl
);
1596 /* TODO: This is really inefficient. We need something like get_user()
1597 * (instruction directly accesses the data, with an exception table entry
1598 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
1600 static int set_bit_to_user(int nr
, void __user
*addr
)
1602 unsigned long log
= (unsigned long)addr
;
1605 int bit
= nr
+ (log
% PAGE_SIZE
) * 8;
1608 r
= get_user_pages_fast(log
, 1, 1, &page
);
1612 base
= kmap_atomic(page
);
1614 kunmap_atomic(base
);
1615 set_page_dirty_lock(page
);
1620 static int log_write(void __user
*log_base
,
1621 u64 write_address
, u64 write_length
)
1623 u64 write_page
= write_address
/ VHOST_PAGE_SIZE
;
1628 write_length
+= write_address
% VHOST_PAGE_SIZE
;
1630 u64 base
= (u64
)(unsigned long)log_base
;
1631 u64 log
= base
+ write_page
/ 8;
1632 int bit
= write_page
% 8;
1633 if ((u64
)(unsigned long)log
!= log
)
1635 r
= set_bit_to_user(bit
, (void __user
*)(unsigned long)log
);
1638 if (write_length
<= VHOST_PAGE_SIZE
)
1640 write_length
-= VHOST_PAGE_SIZE
;
1646 int vhost_log_write(struct vhost_virtqueue
*vq
, struct vhost_log
*log
,
1647 unsigned int log_num
, u64 len
)
1651 /* Make sure data written is seen before log. */
1653 for (i
= 0; i
< log_num
; ++i
) {
1654 u64 l
= min(log
[i
].len
, len
);
1655 r
= log_write(vq
->log_base
, log
[i
].addr
, l
);
1661 eventfd_signal(vq
->log_ctx
, 1);
1665 /* Length written exceeds what we have stored. This is a bug. */
1669 EXPORT_SYMBOL_GPL(vhost_log_write
);
1671 static int vhost_update_used_flags(struct vhost_virtqueue
*vq
)
1674 if (vhost_put_user(vq
, cpu_to_vhost16(vq
, vq
->used_flags
),
1675 &vq
->used
->flags
) < 0)
1677 if (unlikely(vq
->log_used
)) {
1678 /* Make sure the flag is seen before log. */
1680 /* Log used flag write. */
1681 used
= &vq
->used
->flags
;
1682 log_write(vq
->log_base
, vq
->log_addr
+
1683 (used
- (void __user
*)vq
->used
),
1684 sizeof vq
->used
->flags
);
1686 eventfd_signal(vq
->log_ctx
, 1);
1691 static int vhost_update_avail_event(struct vhost_virtqueue
*vq
, u16 avail_event
)
1693 if (vhost_put_user(vq
, cpu_to_vhost16(vq
, vq
->avail_idx
),
1694 vhost_avail_event(vq
)))
1696 if (unlikely(vq
->log_used
)) {
1698 /* Make sure the event is seen before log. */
1700 /* Log avail event write */
1701 used
= vhost_avail_event(vq
);
1702 log_write(vq
->log_base
, vq
->log_addr
+
1703 (used
- (void __user
*)vq
->used
),
1704 sizeof *vhost_avail_event(vq
));
1706 eventfd_signal(vq
->log_ctx
, 1);
1711 int vhost_vq_init_access(struct vhost_virtqueue
*vq
)
1713 __virtio16 last_used_idx
;
1715 bool is_le
= vq
->is_le
;
1717 if (!vq
->private_data
) {
1718 vhost_reset_is_le(vq
);
1722 vhost_init_is_le(vq
);
1724 r
= vhost_update_used_flags(vq
);
1727 vq
->signalled_used_valid
= false;
1729 !access_ok(VERIFY_READ
, &vq
->used
->idx
, sizeof vq
->used
->idx
)) {
1733 r
= vhost_get_user(vq
, last_used_idx
, &vq
->used
->idx
);
1735 vq_err(vq
, "Can't access used idx at %p\n",
1739 vq
->last_used_idx
= vhost16_to_cpu(vq
, last_used_idx
);
1746 EXPORT_SYMBOL_GPL(vhost_vq_init_access
);
1748 static int translate_desc(struct vhost_virtqueue
*vq
, u64 addr
, u32 len
,
1749 struct iovec iov
[], int iov_size
, int access
)
1751 const struct vhost_umem_node
*node
;
1752 struct vhost_dev
*dev
= vq
->dev
;
1753 struct vhost_umem
*umem
= dev
->iotlb
? dev
->iotlb
: dev
->umem
;
1758 while ((u64
)len
> s
) {
1760 if (unlikely(ret
>= iov_size
)) {
1765 node
= vhost_umem_interval_tree_iter_first(&umem
->umem_tree
,
1766 addr
, addr
+ len
- 1);
1767 if (node
== NULL
|| node
->start
> addr
) {
1768 if (umem
!= dev
->iotlb
) {
1774 } else if (!(node
->perm
& access
)) {
1780 size
= node
->size
- addr
+ node
->start
;
1781 _iov
->iov_len
= min((u64
)len
- s
, size
);
1782 _iov
->iov_base
= (void __user
*)(unsigned long)
1783 (node
->userspace_addr
+ addr
- node
->start
);
1790 vhost_iotlb_miss(vq
, addr
, access
);
1794 /* Each buffer in the virtqueues is actually a chain of descriptors. This
1795 * function returns the next descriptor in the chain,
1796 * or -1U if we're at the end. */
1797 static unsigned next_desc(struct vhost_virtqueue
*vq
, struct vring_desc
*desc
)
1801 /* If this descriptor says it doesn't chain, we're done. */
1802 if (!(desc
->flags
& cpu_to_vhost16(vq
, VRING_DESC_F_NEXT
)))
1805 /* Check they're not leading us off end of descriptors. */
1806 next
= vhost16_to_cpu(vq
, desc
->next
);
1807 /* Make sure compiler knows to grab that: we don't want it changing! */
1808 /* We will use the result as an index in an array, so most
1809 * architectures only need a compiler barrier here. */
1810 read_barrier_depends();
1815 static int get_indirect(struct vhost_virtqueue
*vq
,
1816 struct iovec iov
[], unsigned int iov_size
,
1817 unsigned int *out_num
, unsigned int *in_num
,
1818 struct vhost_log
*log
, unsigned int *log_num
,
1819 struct vring_desc
*indirect
)
1821 struct vring_desc desc
;
1822 unsigned int i
= 0, count
, found
= 0;
1823 u32 len
= vhost32_to_cpu(vq
, indirect
->len
);
1824 struct iov_iter from
;
1828 if (unlikely(len
% sizeof desc
)) {
1829 vq_err(vq
, "Invalid length in indirect descriptor: "
1830 "len 0x%llx not multiple of 0x%zx\n",
1831 (unsigned long long)len
,
1836 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, indirect
->addr
), len
, vq
->indirect
,
1837 UIO_MAXIOV
, VHOST_ACCESS_RO
);
1838 if (unlikely(ret
< 0)) {
1840 vq_err(vq
, "Translation failure %d in indirect.\n", ret
);
1843 iov_iter_init(&from
, READ
, vq
->indirect
, ret
, len
);
1845 /* We will use the result as an address to read from, so most
1846 * architectures only need a compiler barrier here. */
1847 read_barrier_depends();
1849 count
= len
/ sizeof desc
;
1850 /* Buffers are chained via a 16 bit next field, so
1851 * we can have at most 2^16 of these. */
1852 if (unlikely(count
> USHRT_MAX
+ 1)) {
1853 vq_err(vq
, "Indirect buffer length too big: %d\n",
1859 unsigned iov_count
= *in_num
+ *out_num
;
1860 if (unlikely(++found
> count
)) {
1861 vq_err(vq
, "Loop detected: last one at %u "
1862 "indirect size %u\n",
1866 if (unlikely(!copy_from_iter_full(&desc
, sizeof(desc
), &from
))) {
1867 vq_err(vq
, "Failed indirect descriptor: idx %d, %zx\n",
1868 i
, (size_t)vhost64_to_cpu(vq
, indirect
->addr
) + i
* sizeof desc
);
1871 if (unlikely(desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_INDIRECT
))) {
1872 vq_err(vq
, "Nested indirect descriptor: idx %d, %zx\n",
1873 i
, (size_t)vhost64_to_cpu(vq
, indirect
->addr
) + i
* sizeof desc
);
1877 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_WRITE
))
1878 access
= VHOST_ACCESS_WO
;
1880 access
= VHOST_ACCESS_RO
;
1882 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, desc
.addr
),
1883 vhost32_to_cpu(vq
, desc
.len
), iov
+ iov_count
,
1884 iov_size
- iov_count
, access
);
1885 if (unlikely(ret
< 0)) {
1887 vq_err(vq
, "Translation failure %d indirect idx %d\n",
1891 /* If this is an input descriptor, increment that count. */
1892 if (access
== VHOST_ACCESS_WO
) {
1894 if (unlikely(log
)) {
1895 log
[*log_num
].addr
= vhost64_to_cpu(vq
, desc
.addr
);
1896 log
[*log_num
].len
= vhost32_to_cpu(vq
, desc
.len
);
1900 /* If it's an output descriptor, they're all supposed
1901 * to come before any input descriptors. */
1902 if (unlikely(*in_num
)) {
1903 vq_err(vq
, "Indirect descriptor "
1904 "has out after in: idx %d\n", i
);
1909 } while ((i
= next_desc(vq
, &desc
)) != -1);
1913 /* This looks in the virtqueue and for the first available buffer, and converts
1914 * it to an iovec for convenient access. Since descriptors consist of some
1915 * number of output then some number of input descriptors, it's actually two
1916 * iovecs, but we pack them into one and note how many of each there were.
1918 * This function returns the descriptor number found, or vq->num (which is
1919 * never a valid descriptor number) if none was found. A negative code is
1920 * returned on error. */
1921 int vhost_get_vq_desc(struct vhost_virtqueue
*vq
,
1922 struct iovec iov
[], unsigned int iov_size
,
1923 unsigned int *out_num
, unsigned int *in_num
,
1924 struct vhost_log
*log
, unsigned int *log_num
)
1926 struct vring_desc desc
;
1927 unsigned int i
, head
, found
= 0;
1929 __virtio16 avail_idx
;
1930 __virtio16 ring_head
;
1933 /* Check it isn't doing very strange things with descriptor numbers. */
1934 last_avail_idx
= vq
->last_avail_idx
;
1935 if (unlikely(vhost_get_user(vq
, avail_idx
, &vq
->avail
->idx
))) {
1936 vq_err(vq
, "Failed to access avail idx at %p\n",
1940 vq
->avail_idx
= vhost16_to_cpu(vq
, avail_idx
);
1942 if (unlikely((u16
)(vq
->avail_idx
- last_avail_idx
) > vq
->num
)) {
1943 vq_err(vq
, "Guest moved used index from %u to %u",
1944 last_avail_idx
, vq
->avail_idx
);
1948 /* If there's nothing new since last we looked, return invalid. */
1949 if (vq
->avail_idx
== last_avail_idx
)
1952 /* Only get avail ring entries after they have been exposed by guest. */
1955 /* Grab the next descriptor number they're advertising, and increment
1956 * the index we've seen. */
1957 if (unlikely(vhost_get_user(vq
, ring_head
,
1958 &vq
->avail
->ring
[last_avail_idx
& (vq
->num
- 1)]))) {
1959 vq_err(vq
, "Failed to read head: idx %d address %p\n",
1961 &vq
->avail
->ring
[last_avail_idx
% vq
->num
]);
1965 head
= vhost16_to_cpu(vq
, ring_head
);
1967 /* If their number is silly, that's an error. */
1968 if (unlikely(head
>= vq
->num
)) {
1969 vq_err(vq
, "Guest says index %u > %u is available",
1974 /* When we start there are none of either input nor output. */
1975 *out_num
= *in_num
= 0;
1981 unsigned iov_count
= *in_num
+ *out_num
;
1982 if (unlikely(i
>= vq
->num
)) {
1983 vq_err(vq
, "Desc index is %u > %u, head = %u",
1987 if (unlikely(++found
> vq
->num
)) {
1988 vq_err(vq
, "Loop detected: last one at %u "
1989 "vq size %u head %u\n",
1993 ret
= vhost_copy_from_user(vq
, &desc
, vq
->desc
+ i
,
1995 if (unlikely(ret
)) {
1996 vq_err(vq
, "Failed to get descriptor: idx %d addr %p\n",
2000 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_INDIRECT
)) {
2001 ret
= get_indirect(vq
, iov
, iov_size
,
2003 log
, log_num
, &desc
);
2004 if (unlikely(ret
< 0)) {
2006 vq_err(vq
, "Failure detected "
2007 "in indirect descriptor at idx %d\n", i
);
2013 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_WRITE
))
2014 access
= VHOST_ACCESS_WO
;
2016 access
= VHOST_ACCESS_RO
;
2017 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, desc
.addr
),
2018 vhost32_to_cpu(vq
, desc
.len
), iov
+ iov_count
,
2019 iov_size
- iov_count
, access
);
2020 if (unlikely(ret
< 0)) {
2022 vq_err(vq
, "Translation failure %d descriptor idx %d\n",
2026 if (access
== VHOST_ACCESS_WO
) {
2027 /* If this is an input descriptor,
2028 * increment that count. */
2030 if (unlikely(log
)) {
2031 log
[*log_num
].addr
= vhost64_to_cpu(vq
, desc
.addr
);
2032 log
[*log_num
].len
= vhost32_to_cpu(vq
, desc
.len
);
2036 /* If it's an output descriptor, they're all supposed
2037 * to come before any input descriptors. */
2038 if (unlikely(*in_num
)) {
2039 vq_err(vq
, "Descriptor has out after in: "
2045 } while ((i
= next_desc(vq
, &desc
)) != -1);
2047 /* On success, increment avail index. */
2048 vq
->last_avail_idx
++;
2050 /* Assume notifications from guest are disabled at this point,
2051 * if they aren't we would need to update avail_event index. */
2052 BUG_ON(!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
));
2055 EXPORT_SYMBOL_GPL(vhost_get_vq_desc
);
2057 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
2058 void vhost_discard_vq_desc(struct vhost_virtqueue
*vq
, int n
)
2060 vq
->last_avail_idx
-= n
;
2062 EXPORT_SYMBOL_GPL(vhost_discard_vq_desc
);
2064 /* After we've used one of their buffers, we tell them about it. We'll then
2065 * want to notify the guest, using eventfd. */
2066 int vhost_add_used(struct vhost_virtqueue
*vq
, unsigned int head
, int len
)
2068 struct vring_used_elem heads
= {
2069 cpu_to_vhost32(vq
, head
),
2070 cpu_to_vhost32(vq
, len
)
2073 return vhost_add_used_n(vq
, &heads
, 1);
2075 EXPORT_SYMBOL_GPL(vhost_add_used
);
2077 static int __vhost_add_used_n(struct vhost_virtqueue
*vq
,
2078 struct vring_used_elem
*heads
,
2081 struct vring_used_elem __user
*used
;
2085 start
= vq
->last_used_idx
& (vq
->num
- 1);
2086 used
= vq
->used
->ring
+ start
;
2088 if (vhost_put_user(vq
, heads
[0].id
, &used
->id
)) {
2089 vq_err(vq
, "Failed to write used id");
2092 if (vhost_put_user(vq
, heads
[0].len
, &used
->len
)) {
2093 vq_err(vq
, "Failed to write used len");
2096 } else if (vhost_copy_to_user(vq
, used
, heads
, count
* sizeof *used
)) {
2097 vq_err(vq
, "Failed to write used");
2100 if (unlikely(vq
->log_used
)) {
2101 /* Make sure data is seen before log. */
2103 /* Log used ring entry write. */
2104 log_write(vq
->log_base
,
2106 ((void __user
*)used
- (void __user
*)vq
->used
),
2107 count
* sizeof *used
);
2109 old
= vq
->last_used_idx
;
2110 new = (vq
->last_used_idx
+= count
);
2111 /* If the driver never bothers to signal in a very long while,
2112 * used index might wrap around. If that happens, invalidate
2113 * signalled_used index we stored. TODO: make sure driver
2114 * signals at least once in 2^16 and remove this. */
2115 if (unlikely((u16
)(new - vq
->signalled_used
) < (u16
)(new - old
)))
2116 vq
->signalled_used_valid
= false;
2120 /* After we've used one of their buffers, we tell them about it. We'll then
2121 * want to notify the guest, using eventfd. */
2122 int vhost_add_used_n(struct vhost_virtqueue
*vq
, struct vring_used_elem
*heads
,
2127 start
= vq
->last_used_idx
& (vq
->num
- 1);
2128 n
= vq
->num
- start
;
2130 r
= __vhost_add_used_n(vq
, heads
, n
);
2136 r
= __vhost_add_used_n(vq
, heads
, count
);
2138 /* Make sure buffer is written before we update index. */
2140 if (vhost_put_user(vq
, cpu_to_vhost16(vq
, vq
->last_used_idx
),
2142 vq_err(vq
, "Failed to increment used idx");
2145 if (unlikely(vq
->log_used
)) {
2146 /* Log used index update. */
2147 log_write(vq
->log_base
,
2148 vq
->log_addr
+ offsetof(struct vring_used
, idx
),
2149 sizeof vq
->used
->idx
);
2151 eventfd_signal(vq
->log_ctx
, 1);
2155 EXPORT_SYMBOL_GPL(vhost_add_used_n
);
2157 static bool vhost_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2163 if (vhost_has_feature(vq
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
2164 unlikely(vq
->avail_idx
== vq
->last_avail_idx
))
2167 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
2169 /* Flush out used index updates. This is paired
2170 * with the barrier that the Guest executes when enabling
2173 if (vhost_get_user(vq
, flags
, &vq
->avail
->flags
)) {
2174 vq_err(vq
, "Failed to get flags");
2177 return !(flags
& cpu_to_vhost16(vq
, VRING_AVAIL_F_NO_INTERRUPT
));
2179 old
= vq
->signalled_used
;
2180 v
= vq
->signalled_used_valid
;
2181 new = vq
->signalled_used
= vq
->last_used_idx
;
2182 vq
->signalled_used_valid
= true;
2187 /* We're sure if the following conditions are met, there's no
2188 * need to notify guest:
2189 * 1) cached used event is ahead of new
2190 * 2) old to new updating does not cross cached used event. */
2191 if (vring_need_event(vq
->last_used_event
, new + vq
->num
, new) &&
2192 !vring_need_event(vq
->last_used_event
, new, old
))
2195 /* Flush out used index updates. This is paired
2196 * with the barrier that the Guest executes when enabling
2200 if (vhost_get_user(vq
, event
, vhost_used_event(vq
))) {
2201 vq_err(vq
, "Failed to get used event idx");
2204 vq
->last_used_event
= vhost16_to_cpu(vq
, event
);
2206 return vring_need_event(vq
->last_used_event
, new, old
);
2209 /* This actually signals the guest, using eventfd. */
2210 void vhost_signal(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2212 /* Signal the Guest tell them we used something up. */
2213 if (vq
->call_ctx
&& vhost_notify(dev
, vq
))
2214 eventfd_signal(vq
->call_ctx
, 1);
2216 EXPORT_SYMBOL_GPL(vhost_signal
);
2218 /* And here's the combo meal deal. Supersize me! */
2219 void vhost_add_used_and_signal(struct vhost_dev
*dev
,
2220 struct vhost_virtqueue
*vq
,
2221 unsigned int head
, int len
)
2223 vhost_add_used(vq
, head
, len
);
2224 vhost_signal(dev
, vq
);
2226 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal
);
2228 /* multi-buffer version of vhost_add_used_and_signal */
2229 void vhost_add_used_and_signal_n(struct vhost_dev
*dev
,
2230 struct vhost_virtqueue
*vq
,
2231 struct vring_used_elem
*heads
, unsigned count
)
2233 vhost_add_used_n(vq
, heads
, count
);
2234 vhost_signal(dev
, vq
);
2236 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n
);
2238 /* return true if we're sure that avaiable ring is empty */
2239 bool vhost_vq_avail_empty(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2241 __virtio16 avail_idx
;
2244 r
= vhost_get_user(vq
, avail_idx
, &vq
->avail
->idx
);
2248 return vhost16_to_cpu(vq
, avail_idx
) == vq
->avail_idx
;
2250 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty
);
2252 /* OK, now we need to know about added descriptors. */
2253 bool vhost_enable_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2255 __virtio16 avail_idx
;
2258 if (!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
))
2260 vq
->used_flags
&= ~VRING_USED_F_NO_NOTIFY
;
2261 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
2262 r
= vhost_update_used_flags(vq
);
2264 vq_err(vq
, "Failed to enable notification at %p: %d\n",
2265 &vq
->used
->flags
, r
);
2269 r
= vhost_update_avail_event(vq
, vq
->avail_idx
);
2271 vq_err(vq
, "Failed to update avail event index at %p: %d\n",
2272 vhost_avail_event(vq
), r
);
2276 /* They could have slipped one in as we were doing that: make
2277 * sure it's written, then check again. */
2279 r
= vhost_get_user(vq
, avail_idx
, &vq
->avail
->idx
);
2281 vq_err(vq
, "Failed to check avail idx at %p: %d\n",
2282 &vq
->avail
->idx
, r
);
2286 return vhost16_to_cpu(vq
, avail_idx
) != vq
->avail_idx
;
2288 EXPORT_SYMBOL_GPL(vhost_enable_notify
);
2290 /* We don't need to be notified again. */
2291 void vhost_disable_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2295 if (vq
->used_flags
& VRING_USED_F_NO_NOTIFY
)
2297 vq
->used_flags
|= VRING_USED_F_NO_NOTIFY
;
2298 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
2299 r
= vhost_update_used_flags(vq
);
2301 vq_err(vq
, "Failed to enable notification at %p: %d\n",
2302 &vq
->used
->flags
, r
);
2305 EXPORT_SYMBOL_GPL(vhost_disable_notify
);
2307 /* Create a new message. */
2308 struct vhost_msg_node
*vhost_new_msg(struct vhost_virtqueue
*vq
, int type
)
2310 struct vhost_msg_node
*node
= kmalloc(sizeof *node
, GFP_KERNEL
);
2314 node
->msg
.type
= type
;
2317 EXPORT_SYMBOL_GPL(vhost_new_msg
);
2319 void vhost_enqueue_msg(struct vhost_dev
*dev
, struct list_head
*head
,
2320 struct vhost_msg_node
*node
)
2322 spin_lock(&dev
->iotlb_lock
);
2323 list_add_tail(&node
->node
, head
);
2324 spin_unlock(&dev
->iotlb_lock
);
2326 wake_up_interruptible_poll(&dev
->wait
, POLLIN
| POLLRDNORM
);
2328 EXPORT_SYMBOL_GPL(vhost_enqueue_msg
);
2330 struct vhost_msg_node
*vhost_dequeue_msg(struct vhost_dev
*dev
,
2331 struct list_head
*head
)
2333 struct vhost_msg_node
*node
= NULL
;
2335 spin_lock(&dev
->iotlb_lock
);
2336 if (!list_empty(head
)) {
2337 node
= list_first_entry(head
, struct vhost_msg_node
,
2339 list_del(&node
->node
);
2341 spin_unlock(&dev
->iotlb_lock
);
2345 EXPORT_SYMBOL_GPL(vhost_dequeue_msg
);
2348 static int __init
vhost_init(void)
2353 static void __exit
vhost_exit(void)
2357 module_init(vhost_init
);
2358 module_exit(vhost_exit
);
2360 MODULE_VERSION("0.0.1");
2361 MODULE_LICENSE("GPL v2");
2362 MODULE_AUTHOR("Michael S. Tsirkin");
2363 MODULE_DESCRIPTION("Host kernel accelerator for virtio");