1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2009 Red Hat, Inc.
3 * Copyright (C) 2006 Rusty Russell IBM Corporation
5 * Author: Michael S. Tsirkin <mst@redhat.com>
7 * Inspiration, some code, and most witty comments come from
8 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
10 * Generic code for virtio server in host kernel.
13 #include <linux/eventfd.h>
14 #include <linux/vhost.h>
15 #include <linux/uio.h>
17 #include <linux/mmu_context.h>
18 #include <linux/miscdevice.h>
19 #include <linux/mutex.h>
20 #include <linux/poll.h>
21 #include <linux/file.h>
22 #include <linux/highmem.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/kthread.h>
26 #include <linux/cgroup.h>
27 #include <linux/module.h>
28 #include <linux/sort.h>
29 #include <linux/sched/mm.h>
30 #include <linux/sched/signal.h>
31 #include <linux/interval_tree_generic.h>
32 #include <linux/nospec.h>
36 static ushort max_mem_regions
= 64;
37 module_param(max_mem_regions
, ushort
, 0444);
38 MODULE_PARM_DESC(max_mem_regions
,
39 "Maximum number of memory regions in memory map. (default: 64)");
40 static int max_iotlb_entries
= 2048;
41 module_param(max_iotlb_entries
, int, 0444);
42 MODULE_PARM_DESC(max_iotlb_entries
,
43 "Maximum number of iotlb entries. (default: 2048)");
46 VHOST_MEMORY_F_LOG
= 0x1,
49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
52 INTERVAL_TREE_DEFINE(struct vhost_umem_node
,
53 rb
, __u64
, __subtree_last
,
54 START
, LAST
, static inline, vhost_umem_interval_tree
);
56 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
57 static void vhost_disable_cross_endian(struct vhost_virtqueue
*vq
)
59 vq
->user_be
= !virtio_legacy_is_little_endian();
62 static void vhost_enable_cross_endian_big(struct vhost_virtqueue
*vq
)
67 static void vhost_enable_cross_endian_little(struct vhost_virtqueue
*vq
)
72 static long vhost_set_vring_endian(struct vhost_virtqueue
*vq
, int __user
*argp
)
74 struct vhost_vring_state s
;
79 if (copy_from_user(&s
, argp
, sizeof(s
)))
82 if (s
.num
!= VHOST_VRING_LITTLE_ENDIAN
&&
83 s
.num
!= VHOST_VRING_BIG_ENDIAN
)
86 if (s
.num
== VHOST_VRING_BIG_ENDIAN
)
87 vhost_enable_cross_endian_big(vq
);
89 vhost_enable_cross_endian_little(vq
);
94 static long vhost_get_vring_endian(struct vhost_virtqueue
*vq
, u32 idx
,
97 struct vhost_vring_state s
= {
102 if (copy_to_user(argp
, &s
, sizeof(s
)))
108 static void vhost_init_is_le(struct vhost_virtqueue
*vq
)
110 /* Note for legacy virtio: user_be is initialized at reset time
111 * according to the host endianness. If userspace does not set an
112 * explicit endianness, the default behavior is native endian, as
113 * expected by legacy virtio.
115 vq
->is_le
= vhost_has_feature(vq
, VIRTIO_F_VERSION_1
) || !vq
->user_be
;
118 static void vhost_disable_cross_endian(struct vhost_virtqueue
*vq
)
122 static long vhost_set_vring_endian(struct vhost_virtqueue
*vq
, int __user
*argp
)
127 static long vhost_get_vring_endian(struct vhost_virtqueue
*vq
, u32 idx
,
133 static void vhost_init_is_le(struct vhost_virtqueue
*vq
)
135 vq
->is_le
= vhost_has_feature(vq
, VIRTIO_F_VERSION_1
)
136 || virtio_legacy_is_little_endian();
138 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
140 static void vhost_reset_is_le(struct vhost_virtqueue
*vq
)
142 vhost_init_is_le(vq
);
145 struct vhost_flush_struct
{
146 struct vhost_work work
;
147 struct completion wait_event
;
150 static void vhost_flush_work(struct vhost_work
*work
)
152 struct vhost_flush_struct
*s
;
154 s
= container_of(work
, struct vhost_flush_struct
, work
);
155 complete(&s
->wait_event
);
158 static void vhost_poll_func(struct file
*file
, wait_queue_head_t
*wqh
,
161 struct vhost_poll
*poll
;
163 poll
= container_of(pt
, struct vhost_poll
, table
);
165 add_wait_queue(wqh
, &poll
->wait
);
168 static int vhost_poll_wakeup(wait_queue_entry_t
*wait
, unsigned mode
, int sync
,
171 struct vhost_poll
*poll
= container_of(wait
, struct vhost_poll
, wait
);
173 if (!(key_to_poll(key
) & poll
->mask
))
176 vhost_poll_queue(poll
);
180 void vhost_work_init(struct vhost_work
*work
, vhost_work_fn_t fn
)
182 clear_bit(VHOST_WORK_QUEUED
, &work
->flags
);
185 EXPORT_SYMBOL_GPL(vhost_work_init
);
187 /* Init poll structure */
188 void vhost_poll_init(struct vhost_poll
*poll
, vhost_work_fn_t fn
,
189 __poll_t mask
, struct vhost_dev
*dev
)
191 init_waitqueue_func_entry(&poll
->wait
, vhost_poll_wakeup
);
192 init_poll_funcptr(&poll
->table
, vhost_poll_func
);
197 vhost_work_init(&poll
->work
, fn
);
199 EXPORT_SYMBOL_GPL(vhost_poll_init
);
201 /* Start polling a file. We add ourselves to file's wait queue. The caller must
202 * keep a reference to a file until after vhost_poll_stop is called. */
203 int vhost_poll_start(struct vhost_poll
*poll
, struct file
*file
)
211 mask
= vfs_poll(file
, &poll
->table
);
213 vhost_poll_wakeup(&poll
->wait
, 0, 0, poll_to_key(mask
));
214 if (mask
& EPOLLERR
) {
215 vhost_poll_stop(poll
);
221 EXPORT_SYMBOL_GPL(vhost_poll_start
);
223 /* Stop polling a file. After this function returns, it becomes safe to drop the
224 * file reference. You must also flush afterwards. */
225 void vhost_poll_stop(struct vhost_poll
*poll
)
228 remove_wait_queue(poll
->wqh
, &poll
->wait
);
232 EXPORT_SYMBOL_GPL(vhost_poll_stop
);
234 void vhost_work_flush(struct vhost_dev
*dev
, struct vhost_work
*work
)
236 struct vhost_flush_struct flush
;
239 init_completion(&flush
.wait_event
);
240 vhost_work_init(&flush
.work
, vhost_flush_work
);
242 vhost_work_queue(dev
, &flush
.work
);
243 wait_for_completion(&flush
.wait_event
);
246 EXPORT_SYMBOL_GPL(vhost_work_flush
);
248 /* Flush any work that has been scheduled. When calling this, don't hold any
249 * locks that are also used by the callback. */
250 void vhost_poll_flush(struct vhost_poll
*poll
)
252 vhost_work_flush(poll
->dev
, &poll
->work
);
254 EXPORT_SYMBOL_GPL(vhost_poll_flush
);
256 void vhost_work_queue(struct vhost_dev
*dev
, struct vhost_work
*work
)
261 if (!test_and_set_bit(VHOST_WORK_QUEUED
, &work
->flags
)) {
262 /* We can only add the work to the list after we're
263 * sure it was not in the list.
264 * test_and_set_bit() implies a memory barrier.
266 llist_add(&work
->node
, &dev
->work_list
);
267 wake_up_process(dev
->worker
);
270 EXPORT_SYMBOL_GPL(vhost_work_queue
);
272 /* A lockless hint for busy polling code to exit the loop */
273 bool vhost_has_work(struct vhost_dev
*dev
)
275 return !llist_empty(&dev
->work_list
);
277 EXPORT_SYMBOL_GPL(vhost_has_work
);
279 void vhost_poll_queue(struct vhost_poll
*poll
)
281 vhost_work_queue(poll
->dev
, &poll
->work
);
283 EXPORT_SYMBOL_GPL(vhost_poll_queue
);
285 static void __vhost_vq_meta_reset(struct vhost_virtqueue
*vq
)
289 for (j
= 0; j
< VHOST_NUM_ADDRS
; j
++)
290 vq
->meta_iotlb
[j
] = NULL
;
293 static void vhost_vq_meta_reset(struct vhost_dev
*d
)
297 for (i
= 0; i
< d
->nvqs
; ++i
)
298 __vhost_vq_meta_reset(d
->vqs
[i
]);
301 #if VHOST_ARCH_CAN_ACCEL_UACCESS
302 static void vhost_map_unprefetch(struct vhost_map
*map
)
310 static void vhost_uninit_vq_maps(struct vhost_virtqueue
*vq
)
312 struct vhost_map
*map
[VHOST_NUM_ADDRS
];
315 spin_lock(&vq
->mmu_lock
);
316 for (i
= 0; i
< VHOST_NUM_ADDRS
; i
++) {
317 map
[i
] = rcu_dereference_protected(vq
->maps
[i
],
318 lockdep_is_held(&vq
->mmu_lock
));
320 rcu_assign_pointer(vq
->maps
[i
], NULL
);
322 spin_unlock(&vq
->mmu_lock
);
326 for (i
= 0; i
< VHOST_NUM_ADDRS
; i
++)
328 vhost_map_unprefetch(map
[i
]);
332 static void vhost_reset_vq_maps(struct vhost_virtqueue
*vq
)
336 vhost_uninit_vq_maps(vq
);
337 for (i
= 0; i
< VHOST_NUM_ADDRS
; i
++)
338 vq
->uaddrs
[i
].size
= 0;
341 static bool vhost_map_range_overlap(struct vhost_uaddr
*uaddr
,
345 if (unlikely(!uaddr
->size
))
348 return !(end
< uaddr
->uaddr
|| start
> uaddr
->uaddr
- 1 + uaddr
->size
);
351 static void vhost_invalidate_vq_start(struct vhost_virtqueue
*vq
,
356 struct vhost_uaddr
*uaddr
= &vq
->uaddrs
[index
];
357 struct vhost_map
*map
;
360 if (!vhost_map_range_overlap(uaddr
, start
, end
))
363 spin_lock(&vq
->mmu_lock
);
364 ++vq
->invalidate_count
;
366 map
= rcu_dereference_protected(vq
->maps
[index
],
367 lockdep_is_held(&vq
->mmu_lock
));
370 for (i
= 0; i
< map
->npages
; i
++)
371 set_page_dirty(map
->pages
[i
]);
373 rcu_assign_pointer(vq
->maps
[index
], NULL
);
375 spin_unlock(&vq
->mmu_lock
);
379 vhost_map_unprefetch(map
);
383 static void vhost_invalidate_vq_end(struct vhost_virtqueue
*vq
,
388 if (!vhost_map_range_overlap(&vq
->uaddrs
[index
], start
, end
))
391 spin_lock(&vq
->mmu_lock
);
392 --vq
->invalidate_count
;
393 spin_unlock(&vq
->mmu_lock
);
396 static int vhost_invalidate_range_start(struct mmu_notifier
*mn
,
397 const struct mmu_notifier_range
*range
)
399 struct vhost_dev
*dev
= container_of(mn
, struct vhost_dev
,
403 if (!mmu_notifier_range_blockable(range
))
406 for (i
= 0; i
< dev
->nvqs
; i
++) {
407 struct vhost_virtqueue
*vq
= dev
->vqs
[i
];
409 for (j
= 0; j
< VHOST_NUM_ADDRS
; j
++)
410 vhost_invalidate_vq_start(vq
, j
,
418 static void vhost_invalidate_range_end(struct mmu_notifier
*mn
,
419 const struct mmu_notifier_range
*range
)
421 struct vhost_dev
*dev
= container_of(mn
, struct vhost_dev
,
425 for (i
= 0; i
< dev
->nvqs
; i
++) {
426 struct vhost_virtqueue
*vq
= dev
->vqs
[i
];
428 for (j
= 0; j
< VHOST_NUM_ADDRS
; j
++)
429 vhost_invalidate_vq_end(vq
, j
,
435 static const struct mmu_notifier_ops vhost_mmu_notifier_ops
= {
436 .invalidate_range_start
= vhost_invalidate_range_start
,
437 .invalidate_range_end
= vhost_invalidate_range_end
,
440 static void vhost_init_maps(struct vhost_dev
*dev
)
442 struct vhost_virtqueue
*vq
;
445 dev
->mmu_notifier
.ops
= &vhost_mmu_notifier_ops
;
447 for (i
= 0; i
< dev
->nvqs
; ++i
) {
449 for (j
= 0; j
< VHOST_NUM_ADDRS
; j
++)
450 RCU_INIT_POINTER(vq
->maps
[j
], NULL
);
455 static void vhost_vq_reset(struct vhost_dev
*dev
,
456 struct vhost_virtqueue
*vq
)
462 vq
->last_avail_idx
= 0;
464 vq
->last_used_idx
= 0;
465 vq
->signalled_used
= 0;
466 vq
->signalled_used_valid
= false;
468 vq
->log_used
= false;
469 vq
->log_addr
= -1ull;
470 vq
->private_data
= NULL
;
471 vq
->acked_features
= 0;
472 vq
->acked_backend_features
= 0;
474 vq
->error_ctx
= NULL
;
478 vhost_reset_is_le(vq
);
479 vhost_disable_cross_endian(vq
);
480 vq
->busyloop_timeout
= 0;
483 vq
->invalidate_count
= 0;
484 __vhost_vq_meta_reset(vq
);
485 #if VHOST_ARCH_CAN_ACCEL_UACCESS
486 vhost_reset_vq_maps(vq
);
490 static int vhost_worker(void *data
)
492 struct vhost_dev
*dev
= data
;
493 struct vhost_work
*work
, *work_next
;
494 struct llist_node
*node
;
495 mm_segment_t oldfs
= get_fs();
501 /* mb paired w/ kthread_stop */
502 set_current_state(TASK_INTERRUPTIBLE
);
504 if (kthread_should_stop()) {
505 __set_current_state(TASK_RUNNING
);
509 node
= llist_del_all(&dev
->work_list
);
513 node
= llist_reverse_order(node
);
514 /* make sure flag is seen after deletion */
516 llist_for_each_entry_safe(work
, work_next
, node
, node
) {
517 clear_bit(VHOST_WORK_QUEUED
, &work
->flags
);
518 __set_current_state(TASK_RUNNING
);
529 static void vhost_vq_free_iovecs(struct vhost_virtqueue
*vq
)
539 /* Helper to allocate iovec buffers for all vqs. */
540 static long vhost_dev_alloc_iovecs(struct vhost_dev
*dev
)
542 struct vhost_virtqueue
*vq
;
545 for (i
= 0; i
< dev
->nvqs
; ++i
) {
547 vq
->indirect
= kmalloc_array(UIO_MAXIOV
,
548 sizeof(*vq
->indirect
),
550 vq
->log
= kmalloc_array(dev
->iov_limit
, sizeof(*vq
->log
),
552 vq
->heads
= kmalloc_array(dev
->iov_limit
, sizeof(*vq
->heads
),
554 if (!vq
->indirect
|| !vq
->log
|| !vq
->heads
)
561 vhost_vq_free_iovecs(dev
->vqs
[i
]);
565 static void vhost_dev_free_iovecs(struct vhost_dev
*dev
)
569 for (i
= 0; i
< dev
->nvqs
; ++i
)
570 vhost_vq_free_iovecs(dev
->vqs
[i
]);
573 bool vhost_exceeds_weight(struct vhost_virtqueue
*vq
,
574 int pkts
, int total_len
)
576 struct vhost_dev
*dev
= vq
->dev
;
578 if ((dev
->byte_weight
&& total_len
>= dev
->byte_weight
) ||
579 pkts
>= dev
->weight
) {
580 vhost_poll_queue(&vq
->poll
);
586 EXPORT_SYMBOL_GPL(vhost_exceeds_weight
);
588 static size_t vhost_get_avail_size(struct vhost_virtqueue
*vq
,
591 size_t event __maybe_unused
=
592 vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
594 return sizeof(*vq
->avail
) +
595 sizeof(*vq
->avail
->ring
) * num
+ event
;
598 static size_t vhost_get_used_size(struct vhost_virtqueue
*vq
,
601 size_t event __maybe_unused
=
602 vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
604 return sizeof(*vq
->used
) +
605 sizeof(*vq
->used
->ring
) * num
+ event
;
608 static size_t vhost_get_desc_size(struct vhost_virtqueue
*vq
,
611 return sizeof(*vq
->desc
) * num
;
614 void vhost_dev_init(struct vhost_dev
*dev
,
615 struct vhost_virtqueue
**vqs
, int nvqs
,
616 int iov_limit
, int weight
, int byte_weight
)
618 struct vhost_virtqueue
*vq
;
623 mutex_init(&dev
->mutex
);
629 dev
->iov_limit
= iov_limit
;
630 dev
->weight
= weight
;
631 dev
->byte_weight
= byte_weight
;
632 init_llist_head(&dev
->work_list
);
633 init_waitqueue_head(&dev
->wait
);
634 INIT_LIST_HEAD(&dev
->read_list
);
635 INIT_LIST_HEAD(&dev
->pending_list
);
636 spin_lock_init(&dev
->iotlb_lock
);
637 #if VHOST_ARCH_CAN_ACCEL_UACCESS
638 vhost_init_maps(dev
);
641 for (i
= 0; i
< dev
->nvqs
; ++i
) {
647 mutex_init(&vq
->mutex
);
648 spin_lock_init(&vq
->mmu_lock
);
649 vhost_vq_reset(dev
, vq
);
651 vhost_poll_init(&vq
->poll
, vq
->handle_kick
,
655 EXPORT_SYMBOL_GPL(vhost_dev_init
);
657 /* Caller should have device mutex */
658 long vhost_dev_check_owner(struct vhost_dev
*dev
)
660 /* Are you the owner? If not, I don't think you mean to do that */
661 return dev
->mm
== current
->mm
? 0 : -EPERM
;
663 EXPORT_SYMBOL_GPL(vhost_dev_check_owner
);
665 struct vhost_attach_cgroups_struct
{
666 struct vhost_work work
;
667 struct task_struct
*owner
;
671 static void vhost_attach_cgroups_work(struct vhost_work
*work
)
673 struct vhost_attach_cgroups_struct
*s
;
675 s
= container_of(work
, struct vhost_attach_cgroups_struct
, work
);
676 s
->ret
= cgroup_attach_task_all(s
->owner
, current
);
679 static int vhost_attach_cgroups(struct vhost_dev
*dev
)
681 struct vhost_attach_cgroups_struct attach
;
683 attach
.owner
= current
;
684 vhost_work_init(&attach
.work
, vhost_attach_cgroups_work
);
685 vhost_work_queue(dev
, &attach
.work
);
686 vhost_work_flush(dev
, &attach
.work
);
690 /* Caller should have device mutex */
691 bool vhost_dev_has_owner(struct vhost_dev
*dev
)
695 EXPORT_SYMBOL_GPL(vhost_dev_has_owner
);
697 /* Caller should have device mutex */
698 long vhost_dev_set_owner(struct vhost_dev
*dev
)
700 struct task_struct
*worker
;
703 /* Is there an owner already? */
704 if (vhost_dev_has_owner(dev
)) {
709 /* No owner, become one */
710 dev
->mm
= get_task_mm(current
);
711 worker
= kthread_create(vhost_worker
, dev
, "vhost-%d", current
->pid
);
712 if (IS_ERR(worker
)) {
713 err
= PTR_ERR(worker
);
717 dev
->worker
= worker
;
718 wake_up_process(worker
); /* avoid contributing to loadavg */
720 err
= vhost_attach_cgroups(dev
);
724 err
= vhost_dev_alloc_iovecs(dev
);
728 #if VHOST_ARCH_CAN_ACCEL_UACCESS
729 err
= mmu_notifier_register(&dev
->mmu_notifier
, dev
->mm
);
731 goto err_mmu_notifier
;
736 #if VHOST_ARCH_CAN_ACCEL_UACCESS
738 vhost_dev_free_iovecs(dev
);
741 kthread_stop(worker
);
750 EXPORT_SYMBOL_GPL(vhost_dev_set_owner
);
752 struct vhost_umem
*vhost_dev_reset_owner_prepare(void)
754 return kvzalloc(sizeof(struct vhost_umem
), GFP_KERNEL
);
756 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare
);
758 /* Caller should have device mutex */
759 void vhost_dev_reset_owner(struct vhost_dev
*dev
, struct vhost_umem
*umem
)
763 vhost_dev_cleanup(dev
);
765 /* Restore memory to default empty mapping. */
766 INIT_LIST_HEAD(&umem
->umem_list
);
768 /* We don't need VQ locks below since vhost_dev_cleanup makes sure
769 * VQs aren't running.
771 for (i
= 0; i
< dev
->nvqs
; ++i
)
772 dev
->vqs
[i
]->umem
= umem
;
774 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner
);
776 void vhost_dev_stop(struct vhost_dev
*dev
)
780 for (i
= 0; i
< dev
->nvqs
; ++i
) {
781 if (dev
->vqs
[i
]->kick
&& dev
->vqs
[i
]->handle_kick
) {
782 vhost_poll_stop(&dev
->vqs
[i
]->poll
);
783 vhost_poll_flush(&dev
->vqs
[i
]->poll
);
787 EXPORT_SYMBOL_GPL(vhost_dev_stop
);
789 static void vhost_umem_free(struct vhost_umem
*umem
,
790 struct vhost_umem_node
*node
)
792 vhost_umem_interval_tree_remove(node
, &umem
->umem_tree
);
793 list_del(&node
->link
);
798 static void vhost_umem_clean(struct vhost_umem
*umem
)
800 struct vhost_umem_node
*node
, *tmp
;
805 list_for_each_entry_safe(node
, tmp
, &umem
->umem_list
, link
)
806 vhost_umem_free(umem
, node
);
811 static void vhost_clear_msg(struct vhost_dev
*dev
)
813 struct vhost_msg_node
*node
, *n
;
815 spin_lock(&dev
->iotlb_lock
);
817 list_for_each_entry_safe(node
, n
, &dev
->read_list
, node
) {
818 list_del(&node
->node
);
822 list_for_each_entry_safe(node
, n
, &dev
->pending_list
, node
) {
823 list_del(&node
->node
);
827 spin_unlock(&dev
->iotlb_lock
);
830 #if VHOST_ARCH_CAN_ACCEL_UACCESS
831 static void vhost_setup_uaddr(struct vhost_virtqueue
*vq
,
832 int index
, unsigned long uaddr
,
833 size_t size
, bool write
)
835 struct vhost_uaddr
*addr
= &vq
->uaddrs
[index
];
842 static void vhost_setup_vq_uaddr(struct vhost_virtqueue
*vq
)
844 vhost_setup_uaddr(vq
, VHOST_ADDR_DESC
,
845 (unsigned long)vq
->desc
,
846 vhost_get_desc_size(vq
, vq
->num
),
848 vhost_setup_uaddr(vq
, VHOST_ADDR_AVAIL
,
849 (unsigned long)vq
->avail
,
850 vhost_get_avail_size(vq
, vq
->num
),
852 vhost_setup_uaddr(vq
, VHOST_ADDR_USED
,
853 (unsigned long)vq
->used
,
854 vhost_get_used_size(vq
, vq
->num
),
858 static int vhost_map_prefetch(struct vhost_virtqueue
*vq
,
861 struct vhost_map
*map
;
862 struct vhost_uaddr
*uaddr
= &vq
->uaddrs
[index
];
864 int npages
= DIV_ROUND_UP(uaddr
->size
, PAGE_SIZE
);
870 spin_lock(&vq
->mmu_lock
);
873 if (vq
->invalidate_count
)
877 map
= kmalloc(sizeof(*map
), GFP_ATOMIC
);
881 pages
= kmalloc_array(npages
, sizeof(struct page
*), GFP_ATOMIC
);
886 npinned
= __get_user_pages_fast(uaddr
->uaddr
, npages
,
887 uaddr
->write
, pages
);
889 release_pages(pages
, npinned
);
890 if (npinned
!= npages
)
893 for (i
= 0; i
< npinned
; i
++)
894 if (PageHighMem(pages
[i
]))
897 vaddr
= v
= page_address(pages
[0]);
899 /* For simplicity, fallback to userspace address if VA is not
902 for (i
= 1; i
< npinned
; i
++) {
904 if (v
!= page_address(pages
[i
]))
908 map
->addr
= vaddr
+ (uaddr
->uaddr
& (PAGE_SIZE
- 1));
909 map
->npages
= npages
;
912 rcu_assign_pointer(vq
->maps
[index
], map
);
913 /* No need for a synchronize_rcu(). This function should be
914 * called by dev->worker so we are serialized with all
917 spin_unlock(&vq
->mmu_lock
);
926 spin_unlock(&vq
->mmu_lock
);
931 void vhost_dev_cleanup(struct vhost_dev
*dev
)
935 for (i
= 0; i
< dev
->nvqs
; ++i
) {
936 if (dev
->vqs
[i
]->error_ctx
)
937 eventfd_ctx_put(dev
->vqs
[i
]->error_ctx
);
938 if (dev
->vqs
[i
]->kick
)
939 fput(dev
->vqs
[i
]->kick
);
940 if (dev
->vqs
[i
]->call_ctx
)
941 eventfd_ctx_put(dev
->vqs
[i
]->call_ctx
);
942 vhost_vq_reset(dev
, dev
->vqs
[i
]);
944 vhost_dev_free_iovecs(dev
);
946 eventfd_ctx_put(dev
->log_ctx
);
948 /* No one will access memory at this point */
949 vhost_umem_clean(dev
->umem
);
951 vhost_umem_clean(dev
->iotlb
);
953 vhost_clear_msg(dev
);
954 wake_up_interruptible_poll(&dev
->wait
, EPOLLIN
| EPOLLRDNORM
);
955 WARN_ON(!llist_empty(&dev
->work_list
));
957 kthread_stop(dev
->worker
);
961 #if VHOST_ARCH_CAN_ACCEL_UACCESS
962 mmu_notifier_unregister(&dev
->mmu_notifier
, dev
->mm
);
966 #if VHOST_ARCH_CAN_ACCEL_UACCESS
967 for (i
= 0; i
< dev
->nvqs
; i
++)
968 vhost_uninit_vq_maps(dev
->vqs
[i
]);
972 EXPORT_SYMBOL_GPL(vhost_dev_cleanup
);
974 static bool log_access_ok(void __user
*log_base
, u64 addr
, unsigned long sz
)
976 u64 a
= addr
/ VHOST_PAGE_SIZE
/ 8;
978 /* Make sure 64 bit math will not overflow. */
979 if (a
> ULONG_MAX
- (unsigned long)log_base
||
980 a
+ (unsigned long)log_base
> ULONG_MAX
)
983 return access_ok(log_base
+ a
,
984 (sz
+ VHOST_PAGE_SIZE
* 8 - 1) / VHOST_PAGE_SIZE
/ 8);
987 static bool vhost_overflow(u64 uaddr
, u64 size
)
989 /* Make sure 64 bit math will not overflow. */
990 return uaddr
> ULONG_MAX
|| size
> ULONG_MAX
|| uaddr
> ULONG_MAX
- size
;
993 /* Caller should have vq mutex and device mutex. */
994 static bool vq_memory_access_ok(void __user
*log_base
, struct vhost_umem
*umem
,
997 struct vhost_umem_node
*node
;
1002 list_for_each_entry(node
, &umem
->umem_list
, link
) {
1003 unsigned long a
= node
->userspace_addr
;
1005 if (vhost_overflow(node
->userspace_addr
, node
->size
))
1009 if (!access_ok((void __user
*)a
,
1012 else if (log_all
&& !log_access_ok(log_base
,
1020 static inline void __user
*vhost_vq_meta_fetch(struct vhost_virtqueue
*vq
,
1021 u64 addr
, unsigned int size
,
1024 const struct vhost_umem_node
*node
= vq
->meta_iotlb
[type
];
1029 return (void *)(uintptr_t)(node
->userspace_addr
+ addr
- node
->start
);
1032 /* Can we switch to this memory table? */
1033 /* Caller should have device mutex but not vq mutex */
1034 static bool memory_access_ok(struct vhost_dev
*d
, struct vhost_umem
*umem
,
1039 for (i
= 0; i
< d
->nvqs
; ++i
) {
1043 mutex_lock(&d
->vqs
[i
]->mutex
);
1044 log
= log_all
|| vhost_has_feature(d
->vqs
[i
], VHOST_F_LOG_ALL
);
1045 /* If ring is inactive, will check when it's enabled. */
1046 if (d
->vqs
[i
]->private_data
)
1047 ok
= vq_memory_access_ok(d
->vqs
[i
]->log_base
,
1051 mutex_unlock(&d
->vqs
[i
]->mutex
);
1058 static int translate_desc(struct vhost_virtqueue
*vq
, u64 addr
, u32 len
,
1059 struct iovec iov
[], int iov_size
, int access
);
1061 static int vhost_copy_to_user(struct vhost_virtqueue
*vq
, void __user
*to
,
1062 const void *from
, unsigned size
)
1067 return __copy_to_user(to
, from
, size
);
1069 /* This function should be called after iotlb
1070 * prefetch, which means we're sure that all vq
1071 * could be access through iotlb. So -EAGAIN should
1072 * not happen in this case.
1075 void __user
*uaddr
= vhost_vq_meta_fetch(vq
,
1076 (u64
)(uintptr_t)to
, size
,
1080 return __copy_to_user(uaddr
, from
, size
);
1082 ret
= translate_desc(vq
, (u64
)(uintptr_t)to
, size
, vq
->iotlb_iov
,
1083 ARRAY_SIZE(vq
->iotlb_iov
),
1087 iov_iter_init(&t
, WRITE
, vq
->iotlb_iov
, ret
, size
);
1088 ret
= copy_to_iter(from
, size
, &t
);
1096 static int vhost_copy_from_user(struct vhost_virtqueue
*vq
, void *to
,
1097 void __user
*from
, unsigned size
)
1102 return __copy_from_user(to
, from
, size
);
1104 /* This function should be called after iotlb
1105 * prefetch, which means we're sure that vq
1106 * could be access through iotlb. So -EAGAIN should
1107 * not happen in this case.
1109 void __user
*uaddr
= vhost_vq_meta_fetch(vq
,
1110 (u64
)(uintptr_t)from
, size
,
1115 return __copy_from_user(to
, uaddr
, size
);
1117 ret
= translate_desc(vq
, (u64
)(uintptr_t)from
, size
, vq
->iotlb_iov
,
1118 ARRAY_SIZE(vq
->iotlb_iov
),
1121 vq_err(vq
, "IOTLB translation failure: uaddr "
1122 "%p size 0x%llx\n", from
,
1123 (unsigned long long) size
);
1126 iov_iter_init(&f
, READ
, vq
->iotlb_iov
, ret
, size
);
1127 ret
= copy_from_iter(to
, size
, &f
);
1136 static void __user
*__vhost_get_user_slow(struct vhost_virtqueue
*vq
,
1137 void __user
*addr
, unsigned int size
,
1142 ret
= translate_desc(vq
, (u64
)(uintptr_t)addr
, size
, vq
->iotlb_iov
,
1143 ARRAY_SIZE(vq
->iotlb_iov
),
1146 vq_err(vq
, "IOTLB translation failure: uaddr "
1147 "%p size 0x%llx\n", addr
,
1148 (unsigned long long) size
);
1152 if (ret
!= 1 || vq
->iotlb_iov
[0].iov_len
!= size
) {
1153 vq_err(vq
, "Non atomic userspace memory access: uaddr "
1154 "%p size 0x%llx\n", addr
,
1155 (unsigned long long) size
);
1159 return vq
->iotlb_iov
[0].iov_base
;
1162 /* This function should be called after iotlb
1163 * prefetch, which means we're sure that vq
1164 * could be access through iotlb. So -EAGAIN should
1165 * not happen in this case.
1167 static inline void __user
*__vhost_get_user(struct vhost_virtqueue
*vq
,
1168 void *addr
, unsigned int size
,
1171 void __user
*uaddr
= vhost_vq_meta_fetch(vq
,
1172 (u64
)(uintptr_t)addr
, size
, type
);
1176 return __vhost_get_user_slow(vq
, addr
, size
, type
);
1179 #define vhost_put_user(vq, x, ptr) \
1181 int ret = -EFAULT; \
1183 ret = __put_user(x, ptr); \
1185 __typeof__(ptr) to = \
1186 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
1187 sizeof(*ptr), VHOST_ADDR_USED); \
1189 ret = __put_user(x, to); \
1196 static inline int vhost_put_avail_event(struct vhost_virtqueue
*vq
)
1198 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1199 struct vhost_map
*map
;
1200 struct vring_used
*used
;
1205 map
= rcu_dereference(vq
->maps
[VHOST_ADDR_USED
]);
1208 *((__virtio16
*)&used
->ring
[vq
->num
]) =
1209 cpu_to_vhost16(vq
, vq
->avail_idx
);
1218 return vhost_put_user(vq
, cpu_to_vhost16(vq
, vq
->avail_idx
),
1219 vhost_avail_event(vq
));
1222 static inline int vhost_put_used(struct vhost_virtqueue
*vq
,
1223 struct vring_used_elem
*head
, int idx
,
1226 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1227 struct vhost_map
*map
;
1228 struct vring_used
*used
;
1234 map
= rcu_dereference(vq
->maps
[VHOST_ADDR_USED
]);
1237 size
= count
* sizeof(*head
);
1238 memcpy(used
->ring
+ idx
, head
, size
);
1247 return vhost_copy_to_user(vq
, vq
->used
->ring
+ idx
, head
,
1248 count
* sizeof(*head
));
1251 static inline int vhost_put_used_flags(struct vhost_virtqueue
*vq
)
1254 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1255 struct vhost_map
*map
;
1256 struct vring_used
*used
;
1261 map
= rcu_dereference(vq
->maps
[VHOST_ADDR_USED
]);
1264 used
->flags
= cpu_to_vhost16(vq
, vq
->used_flags
);
1273 return vhost_put_user(vq
, cpu_to_vhost16(vq
, vq
->used_flags
),
1277 static inline int vhost_put_used_idx(struct vhost_virtqueue
*vq
)
1280 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1281 struct vhost_map
*map
;
1282 struct vring_used
*used
;
1287 map
= rcu_dereference(vq
->maps
[VHOST_ADDR_USED
]);
1290 used
->idx
= cpu_to_vhost16(vq
, vq
->last_used_idx
);
1299 return vhost_put_user(vq
, cpu_to_vhost16(vq
, vq
->last_used_idx
),
1303 #define vhost_get_user(vq, x, ptr, type) \
1307 ret = __get_user(x, ptr); \
1309 __typeof__(ptr) from = \
1310 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
1314 ret = __get_user(x, from); \
1321 #define vhost_get_avail(vq, x, ptr) \
1322 vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
1324 #define vhost_get_used(vq, x, ptr) \
1325 vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
1327 static void vhost_dev_lock_vqs(struct vhost_dev
*d
)
1330 for (i
= 0; i
< d
->nvqs
; ++i
)
1331 mutex_lock_nested(&d
->vqs
[i
]->mutex
, i
);
1334 static void vhost_dev_unlock_vqs(struct vhost_dev
*d
)
1337 for (i
= 0; i
< d
->nvqs
; ++i
)
1338 mutex_unlock(&d
->vqs
[i
]->mutex
);
1341 static inline int vhost_get_avail_idx(struct vhost_virtqueue
*vq
,
1344 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1345 struct vhost_map
*map
;
1346 struct vring_avail
*avail
;
1351 map
= rcu_dereference(vq
->maps
[VHOST_ADDR_AVAIL
]);
1363 return vhost_get_avail(vq
, *idx
, &vq
->avail
->idx
);
1366 static inline int vhost_get_avail_head(struct vhost_virtqueue
*vq
,
1367 __virtio16
*head
, int idx
)
1369 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1370 struct vhost_map
*map
;
1371 struct vring_avail
*avail
;
1376 map
= rcu_dereference(vq
->maps
[VHOST_ADDR_AVAIL
]);
1379 *head
= avail
->ring
[idx
& (vq
->num
- 1)];
1388 return vhost_get_avail(vq
, *head
,
1389 &vq
->avail
->ring
[idx
& (vq
->num
- 1)]);
1392 static inline int vhost_get_avail_flags(struct vhost_virtqueue
*vq
,
1395 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1396 struct vhost_map
*map
;
1397 struct vring_avail
*avail
;
1402 map
= rcu_dereference(vq
->maps
[VHOST_ADDR_AVAIL
]);
1405 *flags
= avail
->flags
;
1414 return vhost_get_avail(vq
, *flags
, &vq
->avail
->flags
);
1417 static inline int vhost_get_used_event(struct vhost_virtqueue
*vq
,
1420 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1421 struct vhost_map
*map
;
1422 struct vring_avail
*avail
;
1426 map
= rcu_dereference(vq
->maps
[VHOST_ADDR_AVAIL
]);
1429 *event
= (__virtio16
)avail
->ring
[vq
->num
];
1437 return vhost_get_avail(vq
, *event
, vhost_used_event(vq
));
1440 static inline int vhost_get_used_idx(struct vhost_virtqueue
*vq
,
1443 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1444 struct vhost_map
*map
;
1445 struct vring_used
*used
;
1450 map
= rcu_dereference(vq
->maps
[VHOST_ADDR_USED
]);
1462 return vhost_get_used(vq
, *idx
, &vq
->used
->idx
);
1465 static inline int vhost_get_desc(struct vhost_virtqueue
*vq
,
1466 struct vring_desc
*desc
, int idx
)
1468 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1469 struct vhost_map
*map
;
1470 struct vring_desc
*d
;
1475 map
= rcu_dereference(vq
->maps
[VHOST_ADDR_DESC
]);
1487 return vhost_copy_from_user(vq
, desc
, vq
->desc
+ idx
, sizeof(*desc
));
1490 static int vhost_new_umem_range(struct vhost_umem
*umem
,
1491 u64 start
, u64 size
, u64 end
,
1492 u64 userspace_addr
, int perm
)
1494 struct vhost_umem_node
*tmp
, *node
;
1499 node
= kmalloc(sizeof(*node
), GFP_ATOMIC
);
1503 if (umem
->numem
== max_iotlb_entries
) {
1504 tmp
= list_first_entry(&umem
->umem_list
, typeof(*tmp
), link
);
1505 vhost_umem_free(umem
, tmp
);
1508 node
->start
= start
;
1511 node
->userspace_addr
= userspace_addr
;
1513 INIT_LIST_HEAD(&node
->link
);
1514 list_add_tail(&node
->link
, &umem
->umem_list
);
1515 vhost_umem_interval_tree_insert(node
, &umem
->umem_tree
);
1521 static void vhost_del_umem_range(struct vhost_umem
*umem
,
1524 struct vhost_umem_node
*node
;
1526 while ((node
= vhost_umem_interval_tree_iter_first(&umem
->umem_tree
,
1528 vhost_umem_free(umem
, node
);
1531 static void vhost_iotlb_notify_vq(struct vhost_dev
*d
,
1532 struct vhost_iotlb_msg
*msg
)
1534 struct vhost_msg_node
*node
, *n
;
1536 spin_lock(&d
->iotlb_lock
);
1538 list_for_each_entry_safe(node
, n
, &d
->pending_list
, node
) {
1539 struct vhost_iotlb_msg
*vq_msg
= &node
->msg
.iotlb
;
1540 if (msg
->iova
<= vq_msg
->iova
&&
1541 msg
->iova
+ msg
->size
- 1 >= vq_msg
->iova
&&
1542 vq_msg
->type
== VHOST_IOTLB_MISS
) {
1543 vhost_poll_queue(&node
->vq
->poll
);
1544 list_del(&node
->node
);
1549 spin_unlock(&d
->iotlb_lock
);
1552 static bool umem_access_ok(u64 uaddr
, u64 size
, int access
)
1554 unsigned long a
= uaddr
;
1556 /* Make sure 64 bit math will not overflow. */
1557 if (vhost_overflow(uaddr
, size
))
1560 if ((access
& VHOST_ACCESS_RO
) &&
1561 !access_ok((void __user
*)a
, size
))
1563 if ((access
& VHOST_ACCESS_WO
) &&
1564 !access_ok((void __user
*)a
, size
))
1569 static int vhost_process_iotlb_msg(struct vhost_dev
*dev
,
1570 struct vhost_iotlb_msg
*msg
)
1574 mutex_lock(&dev
->mutex
);
1575 vhost_dev_lock_vqs(dev
);
1576 switch (msg
->type
) {
1577 case VHOST_IOTLB_UPDATE
:
1582 if (!umem_access_ok(msg
->uaddr
, msg
->size
, msg
->perm
)) {
1586 vhost_vq_meta_reset(dev
);
1587 if (vhost_new_umem_range(dev
->iotlb
, msg
->iova
, msg
->size
,
1588 msg
->iova
+ msg
->size
- 1,
1589 msg
->uaddr
, msg
->perm
)) {
1593 vhost_iotlb_notify_vq(dev
, msg
);
1595 case VHOST_IOTLB_INVALIDATE
:
1600 vhost_vq_meta_reset(dev
);
1601 vhost_del_umem_range(dev
->iotlb
, msg
->iova
,
1602 msg
->iova
+ msg
->size
- 1);
1609 vhost_dev_unlock_vqs(dev
);
1610 mutex_unlock(&dev
->mutex
);
1614 ssize_t
vhost_chr_write_iter(struct vhost_dev
*dev
,
1615 struct iov_iter
*from
)
1617 struct vhost_iotlb_msg msg
;
1621 ret
= copy_from_iter(&type
, sizeof(type
), from
);
1622 if (ret
!= sizeof(type
)) {
1628 case VHOST_IOTLB_MSG
:
1629 /* There maybe a hole after type for V1 message type,
1632 offset
= offsetof(struct vhost_msg
, iotlb
) - sizeof(int);
1634 case VHOST_IOTLB_MSG_V2
:
1635 offset
= sizeof(__u32
);
1642 iov_iter_advance(from
, offset
);
1643 ret
= copy_from_iter(&msg
, sizeof(msg
), from
);
1644 if (ret
!= sizeof(msg
)) {
1648 if (vhost_process_iotlb_msg(dev
, &msg
)) {
1653 ret
= (type
== VHOST_IOTLB_MSG
) ? sizeof(struct vhost_msg
) :
1654 sizeof(struct vhost_msg_v2
);
1658 EXPORT_SYMBOL(vhost_chr_write_iter
);
1660 __poll_t
vhost_chr_poll(struct file
*file
, struct vhost_dev
*dev
,
1665 poll_wait(file
, &dev
->wait
, wait
);
1667 if (!list_empty(&dev
->read_list
))
1668 mask
|= EPOLLIN
| EPOLLRDNORM
;
1672 EXPORT_SYMBOL(vhost_chr_poll
);
1674 ssize_t
vhost_chr_read_iter(struct vhost_dev
*dev
, struct iov_iter
*to
,
1678 struct vhost_msg_node
*node
;
1680 unsigned size
= sizeof(struct vhost_msg
);
1682 if (iov_iter_count(to
) < size
)
1687 prepare_to_wait(&dev
->wait
, &wait
,
1688 TASK_INTERRUPTIBLE
);
1690 node
= vhost_dequeue_msg(dev
, &dev
->read_list
);
1697 if (signal_pending(current
)) {
1710 finish_wait(&dev
->wait
, &wait
);
1713 struct vhost_iotlb_msg
*msg
;
1714 void *start
= &node
->msg
;
1716 switch (node
->msg
.type
) {
1717 case VHOST_IOTLB_MSG
:
1718 size
= sizeof(node
->msg
);
1719 msg
= &node
->msg
.iotlb
;
1721 case VHOST_IOTLB_MSG_V2
:
1722 size
= sizeof(node
->msg_v2
);
1723 msg
= &node
->msg_v2
.iotlb
;
1730 ret
= copy_to_iter(start
, size
, to
);
1731 if (ret
!= size
|| msg
->type
!= VHOST_IOTLB_MISS
) {
1735 vhost_enqueue_msg(dev
, &dev
->pending_list
, node
);
1740 EXPORT_SYMBOL_GPL(vhost_chr_read_iter
);
1742 static int vhost_iotlb_miss(struct vhost_virtqueue
*vq
, u64 iova
, int access
)
1744 struct vhost_dev
*dev
= vq
->dev
;
1745 struct vhost_msg_node
*node
;
1746 struct vhost_iotlb_msg
*msg
;
1747 bool v2
= vhost_backend_has_feature(vq
, VHOST_BACKEND_F_IOTLB_MSG_V2
);
1749 node
= vhost_new_msg(vq
, v2
? VHOST_IOTLB_MSG_V2
: VHOST_IOTLB_MSG
);
1754 node
->msg_v2
.type
= VHOST_IOTLB_MSG_V2
;
1755 msg
= &node
->msg_v2
.iotlb
;
1757 msg
= &node
->msg
.iotlb
;
1760 msg
->type
= VHOST_IOTLB_MISS
;
1764 vhost_enqueue_msg(dev
, &dev
->read_list
, node
);
1769 static bool vq_access_ok(struct vhost_virtqueue
*vq
, unsigned int num
,
1770 struct vring_desc __user
*desc
,
1771 struct vring_avail __user
*avail
,
1772 struct vring_used __user
*used
)
1775 return access_ok(desc
, vhost_get_desc_size(vq
, num
)) &&
1776 access_ok(avail
, vhost_get_avail_size(vq
, num
)) &&
1777 access_ok(used
, vhost_get_used_size(vq
, num
));
1780 static void vhost_vq_meta_update(struct vhost_virtqueue
*vq
,
1781 const struct vhost_umem_node
*node
,
1784 int access
= (type
== VHOST_ADDR_USED
) ?
1785 VHOST_ACCESS_WO
: VHOST_ACCESS_RO
;
1787 if (likely(node
->perm
& access
))
1788 vq
->meta_iotlb
[type
] = node
;
1791 static bool iotlb_access_ok(struct vhost_virtqueue
*vq
,
1792 int access
, u64 addr
, u64 len
, int type
)
1794 const struct vhost_umem_node
*node
;
1795 struct vhost_umem
*umem
= vq
->iotlb
;
1796 u64 s
= 0, size
, orig_addr
= addr
, last
= addr
+ len
- 1;
1798 if (vhost_vq_meta_fetch(vq
, addr
, len
, type
))
1802 node
= vhost_umem_interval_tree_iter_first(&umem
->umem_tree
,
1805 if (node
== NULL
|| node
->start
> addr
) {
1806 vhost_iotlb_miss(vq
, addr
, access
);
1808 } else if (!(node
->perm
& access
)) {
1809 /* Report the possible access violation by
1810 * request another translation from userspace.
1815 size
= node
->size
- addr
+ node
->start
;
1817 if (orig_addr
== addr
&& size
>= len
)
1818 vhost_vq_meta_update(vq
, node
, type
);
1827 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1828 static void vhost_vq_map_prefetch(struct vhost_virtqueue
*vq
)
1830 struct vhost_map __rcu
*map
;
1833 for (i
= 0; i
< VHOST_NUM_ADDRS
; i
++) {
1835 map
= rcu_dereference(vq
->maps
[i
]);
1838 vhost_map_prefetch(vq
, i
);
1843 int vq_meta_prefetch(struct vhost_virtqueue
*vq
)
1845 unsigned int num
= vq
->num
;
1848 #if VHOST_ARCH_CAN_ACCEL_UACCESS
1849 vhost_vq_map_prefetch(vq
);
1854 return iotlb_access_ok(vq
, VHOST_ACCESS_RO
, (u64
)(uintptr_t)vq
->desc
,
1855 vhost_get_desc_size(vq
, num
), VHOST_ADDR_DESC
) &&
1856 iotlb_access_ok(vq
, VHOST_ACCESS_RO
, (u64
)(uintptr_t)vq
->avail
,
1857 vhost_get_avail_size(vq
, num
),
1858 VHOST_ADDR_AVAIL
) &&
1859 iotlb_access_ok(vq
, VHOST_ACCESS_WO
, (u64
)(uintptr_t)vq
->used
,
1860 vhost_get_used_size(vq
, num
), VHOST_ADDR_USED
);
1862 EXPORT_SYMBOL_GPL(vq_meta_prefetch
);
1864 /* Can we log writes? */
1865 /* Caller should have device mutex but not vq mutex */
1866 bool vhost_log_access_ok(struct vhost_dev
*dev
)
1868 return memory_access_ok(dev
, dev
->umem
, 1);
1870 EXPORT_SYMBOL_GPL(vhost_log_access_ok
);
1872 /* Verify access for write logging. */
1873 /* Caller should have vq mutex and device mutex */
1874 static bool vq_log_access_ok(struct vhost_virtqueue
*vq
,
1875 void __user
*log_base
)
1877 return vq_memory_access_ok(log_base
, vq
->umem
,
1878 vhost_has_feature(vq
, VHOST_F_LOG_ALL
)) &&
1879 (!vq
->log_used
|| log_access_ok(log_base
, vq
->log_addr
,
1880 vhost_get_used_size(vq
, vq
->num
)));
1883 /* Can we start vq? */
1884 /* Caller should have vq mutex and device mutex */
1885 bool vhost_vq_access_ok(struct vhost_virtqueue
*vq
)
1887 if (!vq_log_access_ok(vq
, vq
->log_base
))
1890 /* Access validation occurs at prefetch time with IOTLB */
1894 return vq_access_ok(vq
, vq
->num
, vq
->desc
, vq
->avail
, vq
->used
);
1896 EXPORT_SYMBOL_GPL(vhost_vq_access_ok
);
1898 static struct vhost_umem
*vhost_umem_alloc(void)
1900 struct vhost_umem
*umem
= kvzalloc(sizeof(*umem
), GFP_KERNEL
);
1905 umem
->umem_tree
= RB_ROOT_CACHED
;
1907 INIT_LIST_HEAD(&umem
->umem_list
);
1912 static long vhost_set_memory(struct vhost_dev
*d
, struct vhost_memory __user
*m
)
1914 struct vhost_memory mem
, *newmem
;
1915 struct vhost_memory_region
*region
;
1916 struct vhost_umem
*newumem
, *oldumem
;
1917 unsigned long size
= offsetof(struct vhost_memory
, regions
);
1920 if (copy_from_user(&mem
, m
, size
))
1924 if (mem
.nregions
> max_mem_regions
)
1926 newmem
= kvzalloc(struct_size(newmem
, regions
, mem
.nregions
),
1931 memcpy(newmem
, &mem
, size
);
1932 if (copy_from_user(newmem
->regions
, m
->regions
,
1933 mem
.nregions
* sizeof *m
->regions
)) {
1938 newumem
= vhost_umem_alloc();
1944 for (region
= newmem
->regions
;
1945 region
< newmem
->regions
+ mem
.nregions
;
1947 if (vhost_new_umem_range(newumem
,
1948 region
->guest_phys_addr
,
1949 region
->memory_size
,
1950 region
->guest_phys_addr
+
1951 region
->memory_size
- 1,
1952 region
->userspace_addr
,
1957 if (!memory_access_ok(d
, newumem
, 0))
1963 /* All memory accesses are done under some VQ mutex. */
1964 for (i
= 0; i
< d
->nvqs
; ++i
) {
1965 mutex_lock(&d
->vqs
[i
]->mutex
);
1966 d
->vqs
[i
]->umem
= newumem
;
1967 mutex_unlock(&d
->vqs
[i
]->mutex
);
1971 vhost_umem_clean(oldumem
);
1975 vhost_umem_clean(newumem
);
1980 static long vhost_vring_set_num(struct vhost_dev
*d
,
1981 struct vhost_virtqueue
*vq
,
1984 struct vhost_vring_state s
;
1986 /* Resizing ring with an active backend?
1987 * You don't want to do that. */
1988 if (vq
->private_data
)
1991 if (copy_from_user(&s
, argp
, sizeof s
))
1994 if (!s
.num
|| s
.num
> 0xffff || (s
.num
& (s
.num
- 1)))
2001 static long vhost_vring_set_addr(struct vhost_dev
*d
,
2002 struct vhost_virtqueue
*vq
,
2005 struct vhost_vring_addr a
;
2007 if (copy_from_user(&a
, argp
, sizeof a
))
2009 if (a
.flags
& ~(0x1 << VHOST_VRING_F_LOG
))
2012 /* For 32bit, verify that the top 32bits of the user
2013 data are set to zero. */
2014 if ((u64
)(unsigned long)a
.desc_user_addr
!= a
.desc_user_addr
||
2015 (u64
)(unsigned long)a
.used_user_addr
!= a
.used_user_addr
||
2016 (u64
)(unsigned long)a
.avail_user_addr
!= a
.avail_user_addr
)
2019 /* Make sure it's safe to cast pointers to vring types. */
2020 BUILD_BUG_ON(__alignof__
*vq
->avail
> VRING_AVAIL_ALIGN_SIZE
);
2021 BUILD_BUG_ON(__alignof__
*vq
->used
> VRING_USED_ALIGN_SIZE
);
2022 if ((a
.avail_user_addr
& (VRING_AVAIL_ALIGN_SIZE
- 1)) ||
2023 (a
.used_user_addr
& (VRING_USED_ALIGN_SIZE
- 1)) ||
2024 (a
.log_guest_addr
& (VRING_USED_ALIGN_SIZE
- 1)))
2027 /* We only verify access here if backend is configured.
2028 * If it is not, we don't as size might not have been setup.
2029 * We will verify when backend is configured. */
2030 if (vq
->private_data
) {
2031 if (!vq_access_ok(vq
, vq
->num
,
2032 (void __user
*)(unsigned long)a
.desc_user_addr
,
2033 (void __user
*)(unsigned long)a
.avail_user_addr
,
2034 (void __user
*)(unsigned long)a
.used_user_addr
))
2037 /* Also validate log access for used ring if enabled. */
2038 if ((a
.flags
& (0x1 << VHOST_VRING_F_LOG
)) &&
2039 !log_access_ok(vq
->log_base
, a
.log_guest_addr
,
2041 vq
->num
* sizeof *vq
->used
->ring
))
2045 vq
->log_used
= !!(a
.flags
& (0x1 << VHOST_VRING_F_LOG
));
2046 vq
->desc
= (void __user
*)(unsigned long)a
.desc_user_addr
;
2047 vq
->avail
= (void __user
*)(unsigned long)a
.avail_user_addr
;
2048 vq
->log_addr
= a
.log_guest_addr
;
2049 vq
->used
= (void __user
*)(unsigned long)a
.used_user_addr
;
2054 static long vhost_vring_set_num_addr(struct vhost_dev
*d
,
2055 struct vhost_virtqueue
*vq
,
2061 mutex_lock(&vq
->mutex
);
2063 #if VHOST_ARCH_CAN_ACCEL_UACCESS
2064 /* Unregister MMU notifer to allow invalidation callback
2065 * can access vq->uaddrs[] without holding a lock.
2068 mmu_notifier_unregister(&d
->mmu_notifier
, d
->mm
);
2070 vhost_uninit_vq_maps(vq
);
2074 case VHOST_SET_VRING_NUM
:
2075 r
= vhost_vring_set_num(d
, vq
, argp
);
2077 case VHOST_SET_VRING_ADDR
:
2078 r
= vhost_vring_set_addr(d
, vq
, argp
);
2084 #if VHOST_ARCH_CAN_ACCEL_UACCESS
2085 vhost_setup_vq_uaddr(vq
);
2088 mmu_notifier_register(&d
->mmu_notifier
, d
->mm
);
2091 mutex_unlock(&vq
->mutex
);
2095 long vhost_vring_ioctl(struct vhost_dev
*d
, unsigned int ioctl
, void __user
*argp
)
2097 struct file
*eventfp
, *filep
= NULL
;
2098 bool pollstart
= false, pollstop
= false;
2099 struct eventfd_ctx
*ctx
= NULL
;
2100 u32 __user
*idxp
= argp
;
2101 struct vhost_virtqueue
*vq
;
2102 struct vhost_vring_state s
;
2103 struct vhost_vring_file f
;
2107 r
= get_user(idx
, idxp
);
2113 idx
= array_index_nospec(idx
, d
->nvqs
);
2116 if (ioctl
== VHOST_SET_VRING_NUM
||
2117 ioctl
== VHOST_SET_VRING_ADDR
) {
2118 return vhost_vring_set_num_addr(d
, vq
, ioctl
, argp
);
2121 mutex_lock(&vq
->mutex
);
2124 case VHOST_SET_VRING_BASE
:
2125 /* Moving base with an active backend?
2126 * You don't want to do that. */
2127 if (vq
->private_data
) {
2131 if (copy_from_user(&s
, argp
, sizeof s
)) {
2135 if (s
.num
> 0xffff) {
2139 vq
->last_avail_idx
= s
.num
;
2140 /* Forget the cached index value. */
2141 vq
->avail_idx
= vq
->last_avail_idx
;
2143 case VHOST_GET_VRING_BASE
:
2145 s
.num
= vq
->last_avail_idx
;
2146 if (copy_to_user(argp
, &s
, sizeof s
))
2149 case VHOST_SET_VRING_KICK
:
2150 if (copy_from_user(&f
, argp
, sizeof f
)) {
2154 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
2155 if (IS_ERR(eventfp
)) {
2156 r
= PTR_ERR(eventfp
);
2159 if (eventfp
!= vq
->kick
) {
2160 pollstop
= (filep
= vq
->kick
) != NULL
;
2161 pollstart
= (vq
->kick
= eventfp
) != NULL
;
2165 case VHOST_SET_VRING_CALL
:
2166 if (copy_from_user(&f
, argp
, sizeof f
)) {
2170 ctx
= f
.fd
== -1 ? NULL
: eventfd_ctx_fdget(f
.fd
);
2175 swap(ctx
, vq
->call_ctx
);
2177 case VHOST_SET_VRING_ERR
:
2178 if (copy_from_user(&f
, argp
, sizeof f
)) {
2182 ctx
= f
.fd
== -1 ? NULL
: eventfd_ctx_fdget(f
.fd
);
2187 swap(ctx
, vq
->error_ctx
);
2189 case VHOST_SET_VRING_ENDIAN
:
2190 r
= vhost_set_vring_endian(vq
, argp
);
2192 case VHOST_GET_VRING_ENDIAN
:
2193 r
= vhost_get_vring_endian(vq
, idx
, argp
);
2195 case VHOST_SET_VRING_BUSYLOOP_TIMEOUT
:
2196 if (copy_from_user(&s
, argp
, sizeof(s
))) {
2200 vq
->busyloop_timeout
= s
.num
;
2202 case VHOST_GET_VRING_BUSYLOOP_TIMEOUT
:
2204 s
.num
= vq
->busyloop_timeout
;
2205 if (copy_to_user(argp
, &s
, sizeof(s
)))
2212 if (pollstop
&& vq
->handle_kick
)
2213 vhost_poll_stop(&vq
->poll
);
2215 if (!IS_ERR_OR_NULL(ctx
))
2216 eventfd_ctx_put(ctx
);
2220 if (pollstart
&& vq
->handle_kick
)
2221 r
= vhost_poll_start(&vq
->poll
, vq
->kick
);
2223 mutex_unlock(&vq
->mutex
);
2225 if (pollstop
&& vq
->handle_kick
)
2226 vhost_poll_flush(&vq
->poll
);
2229 EXPORT_SYMBOL_GPL(vhost_vring_ioctl
);
2231 int vhost_init_device_iotlb(struct vhost_dev
*d
, bool enabled
)
2233 struct vhost_umem
*niotlb
, *oiotlb
;
2236 niotlb
= vhost_umem_alloc();
2243 for (i
= 0; i
< d
->nvqs
; ++i
) {
2244 struct vhost_virtqueue
*vq
= d
->vqs
[i
];
2246 mutex_lock(&vq
->mutex
);
2248 __vhost_vq_meta_reset(vq
);
2249 mutex_unlock(&vq
->mutex
);
2252 vhost_umem_clean(oiotlb
);
2256 EXPORT_SYMBOL_GPL(vhost_init_device_iotlb
);
2258 /* Caller must have device mutex */
2259 long vhost_dev_ioctl(struct vhost_dev
*d
, unsigned int ioctl
, void __user
*argp
)
2261 struct eventfd_ctx
*ctx
;
2266 /* If you are not the owner, you can become one */
2267 if (ioctl
== VHOST_SET_OWNER
) {
2268 r
= vhost_dev_set_owner(d
);
2272 /* You must be the owner to do anything else */
2273 r
= vhost_dev_check_owner(d
);
2278 case VHOST_SET_MEM_TABLE
:
2279 r
= vhost_set_memory(d
, argp
);
2281 case VHOST_SET_LOG_BASE
:
2282 if (copy_from_user(&p
, argp
, sizeof p
)) {
2286 if ((u64
)(unsigned long)p
!= p
) {
2290 for (i
= 0; i
< d
->nvqs
; ++i
) {
2291 struct vhost_virtqueue
*vq
;
2292 void __user
*base
= (void __user
*)(unsigned long)p
;
2294 mutex_lock(&vq
->mutex
);
2295 /* If ring is inactive, will check when it's enabled. */
2296 if (vq
->private_data
&& !vq_log_access_ok(vq
, base
))
2299 vq
->log_base
= base
;
2300 mutex_unlock(&vq
->mutex
);
2303 case VHOST_SET_LOG_FD
:
2304 r
= get_user(fd
, (int __user
*)argp
);
2307 ctx
= fd
== -1 ? NULL
: eventfd_ctx_fdget(fd
);
2312 swap(ctx
, d
->log_ctx
);
2313 for (i
= 0; i
< d
->nvqs
; ++i
) {
2314 mutex_lock(&d
->vqs
[i
]->mutex
);
2315 d
->vqs
[i
]->log_ctx
= d
->log_ctx
;
2316 mutex_unlock(&d
->vqs
[i
]->mutex
);
2319 eventfd_ctx_put(ctx
);
2328 EXPORT_SYMBOL_GPL(vhost_dev_ioctl
);
2330 /* TODO: This is really inefficient. We need something like get_user()
2331 * (instruction directly accesses the data, with an exception table entry
2332 * returning -EFAULT). See Documentation/x86/exception-tables.rst.
2334 static int set_bit_to_user(int nr
, void __user
*addr
)
2336 unsigned long log
= (unsigned long)addr
;
2339 int bit
= nr
+ (log
% PAGE_SIZE
) * 8;
2342 r
= get_user_pages_fast(log
, 1, FOLL_WRITE
, &page
);
2346 base
= kmap_atomic(page
);
2348 kunmap_atomic(base
);
2349 set_page_dirty_lock(page
);
2354 static int log_write(void __user
*log_base
,
2355 u64 write_address
, u64 write_length
)
2357 u64 write_page
= write_address
/ VHOST_PAGE_SIZE
;
2362 write_length
+= write_address
% VHOST_PAGE_SIZE
;
2364 u64 base
= (u64
)(unsigned long)log_base
;
2365 u64 log
= base
+ write_page
/ 8;
2366 int bit
= write_page
% 8;
2367 if ((u64
)(unsigned long)log
!= log
)
2369 r
= set_bit_to_user(bit
, (void __user
*)(unsigned long)log
);
2372 if (write_length
<= VHOST_PAGE_SIZE
)
2374 write_length
-= VHOST_PAGE_SIZE
;
2380 static int log_write_hva(struct vhost_virtqueue
*vq
, u64 hva
, u64 len
)
2382 struct vhost_umem
*umem
= vq
->umem
;
2383 struct vhost_umem_node
*u
;
2384 u64 start
, end
, l
, min
;
2390 /* More than one GPAs can be mapped into a single HVA. So
2391 * iterate all possible umems here to be safe.
2393 list_for_each_entry(u
, &umem
->umem_list
, link
) {
2394 if (u
->userspace_addr
> hva
- 1 + len
||
2395 u
->userspace_addr
- 1 + u
->size
< hva
)
2397 start
= max(u
->userspace_addr
, hva
);
2398 end
= min(u
->userspace_addr
- 1 + u
->size
,
2400 l
= end
- start
+ 1;
2401 r
= log_write(vq
->log_base
,
2402 u
->start
+ start
- u
->userspace_addr
,
2420 static int log_used(struct vhost_virtqueue
*vq
, u64 used_offset
, u64 len
)
2422 struct iovec iov
[64];
2426 return log_write(vq
->log_base
, vq
->log_addr
+ used_offset
, len
);
2428 ret
= translate_desc(vq
, (uintptr_t)vq
->used
+ used_offset
,
2429 len
, iov
, 64, VHOST_ACCESS_WO
);
2433 for (i
= 0; i
< ret
; i
++) {
2434 ret
= log_write_hva(vq
, (uintptr_t)iov
[i
].iov_base
,
2443 int vhost_log_write(struct vhost_virtqueue
*vq
, struct vhost_log
*log
,
2444 unsigned int log_num
, u64 len
, struct iovec
*iov
, int count
)
2448 /* Make sure data written is seen before log. */
2452 for (i
= 0; i
< count
; i
++) {
2453 r
= log_write_hva(vq
, (uintptr_t)iov
[i
].iov_base
,
2461 for (i
= 0; i
< log_num
; ++i
) {
2462 u64 l
= min(log
[i
].len
, len
);
2463 r
= log_write(vq
->log_base
, log
[i
].addr
, l
);
2469 eventfd_signal(vq
->log_ctx
, 1);
2473 /* Length written exceeds what we have stored. This is a bug. */
2477 EXPORT_SYMBOL_GPL(vhost_log_write
);
2479 static int vhost_update_used_flags(struct vhost_virtqueue
*vq
)
2482 if (vhost_put_used_flags(vq
))
2484 if (unlikely(vq
->log_used
)) {
2485 /* Make sure the flag is seen before log. */
2487 /* Log used flag write. */
2488 used
= &vq
->used
->flags
;
2489 log_used(vq
, (used
- (void __user
*)vq
->used
),
2490 sizeof vq
->used
->flags
);
2492 eventfd_signal(vq
->log_ctx
, 1);
2497 static int vhost_update_avail_event(struct vhost_virtqueue
*vq
, u16 avail_event
)
2499 if (vhost_put_avail_event(vq
))
2501 if (unlikely(vq
->log_used
)) {
2503 /* Make sure the event is seen before log. */
2505 /* Log avail event write */
2506 used
= vhost_avail_event(vq
);
2507 log_used(vq
, (used
- (void __user
*)vq
->used
),
2508 sizeof *vhost_avail_event(vq
));
2510 eventfd_signal(vq
->log_ctx
, 1);
2515 int vhost_vq_init_access(struct vhost_virtqueue
*vq
)
2517 __virtio16 last_used_idx
;
2519 bool is_le
= vq
->is_le
;
2521 if (!vq
->private_data
)
2524 vhost_init_is_le(vq
);
2526 r
= vhost_update_used_flags(vq
);
2529 vq
->signalled_used_valid
= false;
2531 !access_ok(&vq
->used
->idx
, sizeof vq
->used
->idx
)) {
2535 r
= vhost_get_used_idx(vq
, &last_used_idx
);
2537 vq_err(vq
, "Can't access used idx at %p\n",
2541 vq
->last_used_idx
= vhost16_to_cpu(vq
, last_used_idx
);
2548 EXPORT_SYMBOL_GPL(vhost_vq_init_access
);
2550 static int translate_desc(struct vhost_virtqueue
*vq
, u64 addr
, u32 len
,
2551 struct iovec iov
[], int iov_size
, int access
)
2553 const struct vhost_umem_node
*node
;
2554 struct vhost_dev
*dev
= vq
->dev
;
2555 struct vhost_umem
*umem
= dev
->iotlb
? dev
->iotlb
: dev
->umem
;
2560 while ((u64
)len
> s
) {
2562 if (unlikely(ret
>= iov_size
)) {
2567 node
= vhost_umem_interval_tree_iter_first(&umem
->umem_tree
,
2568 addr
, addr
+ len
- 1);
2569 if (node
== NULL
|| node
->start
> addr
) {
2570 if (umem
!= dev
->iotlb
) {
2576 } else if (!(node
->perm
& access
)) {
2582 size
= node
->size
- addr
+ node
->start
;
2583 _iov
->iov_len
= min((u64
)len
- s
, size
);
2584 _iov
->iov_base
= (void __user
*)(unsigned long)
2585 (node
->userspace_addr
+ addr
- node
->start
);
2592 vhost_iotlb_miss(vq
, addr
, access
);
2596 /* Each buffer in the virtqueues is actually a chain of descriptors. This
2597 * function returns the next descriptor in the chain,
2598 * or -1U if we're at the end. */
2599 static unsigned next_desc(struct vhost_virtqueue
*vq
, struct vring_desc
*desc
)
2603 /* If this descriptor says it doesn't chain, we're done. */
2604 if (!(desc
->flags
& cpu_to_vhost16(vq
, VRING_DESC_F_NEXT
)))
2607 /* Check they're not leading us off end of descriptors. */
2608 next
= vhost16_to_cpu(vq
, READ_ONCE(desc
->next
));
2612 static int get_indirect(struct vhost_virtqueue
*vq
,
2613 struct iovec iov
[], unsigned int iov_size
,
2614 unsigned int *out_num
, unsigned int *in_num
,
2615 struct vhost_log
*log
, unsigned int *log_num
,
2616 struct vring_desc
*indirect
)
2618 struct vring_desc desc
;
2619 unsigned int i
= 0, count
, found
= 0;
2620 u32 len
= vhost32_to_cpu(vq
, indirect
->len
);
2621 struct iov_iter from
;
2625 if (unlikely(len
% sizeof desc
)) {
2626 vq_err(vq
, "Invalid length in indirect descriptor: "
2627 "len 0x%llx not multiple of 0x%zx\n",
2628 (unsigned long long)len
,
2633 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, indirect
->addr
), len
, vq
->indirect
,
2634 UIO_MAXIOV
, VHOST_ACCESS_RO
);
2635 if (unlikely(ret
< 0)) {
2637 vq_err(vq
, "Translation failure %d in indirect.\n", ret
);
2640 iov_iter_init(&from
, READ
, vq
->indirect
, ret
, len
);
2642 /* We will use the result as an address to read from, so most
2643 * architectures only need a compiler barrier here. */
2644 read_barrier_depends();
2646 count
= len
/ sizeof desc
;
2647 /* Buffers are chained via a 16 bit next field, so
2648 * we can have at most 2^16 of these. */
2649 if (unlikely(count
> USHRT_MAX
+ 1)) {
2650 vq_err(vq
, "Indirect buffer length too big: %d\n",
2656 unsigned iov_count
= *in_num
+ *out_num
;
2657 if (unlikely(++found
> count
)) {
2658 vq_err(vq
, "Loop detected: last one at %u "
2659 "indirect size %u\n",
2663 if (unlikely(!copy_from_iter_full(&desc
, sizeof(desc
), &from
))) {
2664 vq_err(vq
, "Failed indirect descriptor: idx %d, %zx\n",
2665 i
, (size_t)vhost64_to_cpu(vq
, indirect
->addr
) + i
* sizeof desc
);
2668 if (unlikely(desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_INDIRECT
))) {
2669 vq_err(vq
, "Nested indirect descriptor: idx %d, %zx\n",
2670 i
, (size_t)vhost64_to_cpu(vq
, indirect
->addr
) + i
* sizeof desc
);
2674 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_WRITE
))
2675 access
= VHOST_ACCESS_WO
;
2677 access
= VHOST_ACCESS_RO
;
2679 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, desc
.addr
),
2680 vhost32_to_cpu(vq
, desc
.len
), iov
+ iov_count
,
2681 iov_size
- iov_count
, access
);
2682 if (unlikely(ret
< 0)) {
2684 vq_err(vq
, "Translation failure %d indirect idx %d\n",
2688 /* If this is an input descriptor, increment that count. */
2689 if (access
== VHOST_ACCESS_WO
) {
2691 if (unlikely(log
)) {
2692 log
[*log_num
].addr
= vhost64_to_cpu(vq
, desc
.addr
);
2693 log
[*log_num
].len
= vhost32_to_cpu(vq
, desc
.len
);
2697 /* If it's an output descriptor, they're all supposed
2698 * to come before any input descriptors. */
2699 if (unlikely(*in_num
)) {
2700 vq_err(vq
, "Indirect descriptor "
2701 "has out after in: idx %d\n", i
);
2706 } while ((i
= next_desc(vq
, &desc
)) != -1);
2710 /* This looks in the virtqueue and for the first available buffer, and converts
2711 * it to an iovec for convenient access. Since descriptors consist of some
2712 * number of output then some number of input descriptors, it's actually two
2713 * iovecs, but we pack them into one and note how many of each there were.
2715 * This function returns the descriptor number found, or vq->num (which is
2716 * never a valid descriptor number) if none was found. A negative code is
2717 * returned on error. */
2718 int vhost_get_vq_desc(struct vhost_virtqueue
*vq
,
2719 struct iovec iov
[], unsigned int iov_size
,
2720 unsigned int *out_num
, unsigned int *in_num
,
2721 struct vhost_log
*log
, unsigned int *log_num
)
2723 struct vring_desc desc
;
2724 unsigned int i
, head
, found
= 0;
2726 __virtio16 avail_idx
;
2727 __virtio16 ring_head
;
2730 /* Check it isn't doing very strange things with descriptor numbers. */
2731 last_avail_idx
= vq
->last_avail_idx
;
2733 if (vq
->avail_idx
== vq
->last_avail_idx
) {
2734 if (unlikely(vhost_get_avail_idx(vq
, &avail_idx
))) {
2735 vq_err(vq
, "Failed to access avail idx at %p\n",
2739 vq
->avail_idx
= vhost16_to_cpu(vq
, avail_idx
);
2741 if (unlikely((u16
)(vq
->avail_idx
- last_avail_idx
) > vq
->num
)) {
2742 vq_err(vq
, "Guest moved used index from %u to %u",
2743 last_avail_idx
, vq
->avail_idx
);
2747 /* If there's nothing new since last we looked, return
2750 if (vq
->avail_idx
== last_avail_idx
)
2753 /* Only get avail ring entries after they have been
2759 /* Grab the next descriptor number they're advertising, and increment
2760 * the index we've seen. */
2761 if (unlikely(vhost_get_avail_head(vq
, &ring_head
, last_avail_idx
))) {
2762 vq_err(vq
, "Failed to read head: idx %d address %p\n",
2764 &vq
->avail
->ring
[last_avail_idx
% vq
->num
]);
2768 head
= vhost16_to_cpu(vq
, ring_head
);
2770 /* If their number is silly, that's an error. */
2771 if (unlikely(head
>= vq
->num
)) {
2772 vq_err(vq
, "Guest says index %u > %u is available",
2777 /* When we start there are none of either input nor output. */
2778 *out_num
= *in_num
= 0;
2784 unsigned iov_count
= *in_num
+ *out_num
;
2785 if (unlikely(i
>= vq
->num
)) {
2786 vq_err(vq
, "Desc index is %u > %u, head = %u",
2790 if (unlikely(++found
> vq
->num
)) {
2791 vq_err(vq
, "Loop detected: last one at %u "
2792 "vq size %u head %u\n",
2796 ret
= vhost_get_desc(vq
, &desc
, i
);
2797 if (unlikely(ret
)) {
2798 vq_err(vq
, "Failed to get descriptor: idx %d addr %p\n",
2802 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_INDIRECT
)) {
2803 ret
= get_indirect(vq
, iov
, iov_size
,
2805 log
, log_num
, &desc
);
2806 if (unlikely(ret
< 0)) {
2808 vq_err(vq
, "Failure detected "
2809 "in indirect descriptor at idx %d\n", i
);
2815 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_WRITE
))
2816 access
= VHOST_ACCESS_WO
;
2818 access
= VHOST_ACCESS_RO
;
2819 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, desc
.addr
),
2820 vhost32_to_cpu(vq
, desc
.len
), iov
+ iov_count
,
2821 iov_size
- iov_count
, access
);
2822 if (unlikely(ret
< 0)) {
2824 vq_err(vq
, "Translation failure %d descriptor idx %d\n",
2828 if (access
== VHOST_ACCESS_WO
) {
2829 /* If this is an input descriptor,
2830 * increment that count. */
2832 if (unlikely(log
)) {
2833 log
[*log_num
].addr
= vhost64_to_cpu(vq
, desc
.addr
);
2834 log
[*log_num
].len
= vhost32_to_cpu(vq
, desc
.len
);
2838 /* If it's an output descriptor, they're all supposed
2839 * to come before any input descriptors. */
2840 if (unlikely(*in_num
)) {
2841 vq_err(vq
, "Descriptor has out after in: "
2847 } while ((i
= next_desc(vq
, &desc
)) != -1);
2849 /* On success, increment avail index. */
2850 vq
->last_avail_idx
++;
2852 /* Assume notifications from guest are disabled at this point,
2853 * if they aren't we would need to update avail_event index. */
2854 BUG_ON(!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
));
2857 EXPORT_SYMBOL_GPL(vhost_get_vq_desc
);
2859 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
2860 void vhost_discard_vq_desc(struct vhost_virtqueue
*vq
, int n
)
2862 vq
->last_avail_idx
-= n
;
2864 EXPORT_SYMBOL_GPL(vhost_discard_vq_desc
);
2866 /* After we've used one of their buffers, we tell them about it. We'll then
2867 * want to notify the guest, using eventfd. */
2868 int vhost_add_used(struct vhost_virtqueue
*vq
, unsigned int head
, int len
)
2870 struct vring_used_elem heads
= {
2871 cpu_to_vhost32(vq
, head
),
2872 cpu_to_vhost32(vq
, len
)
2875 return vhost_add_used_n(vq
, &heads
, 1);
2877 EXPORT_SYMBOL_GPL(vhost_add_used
);
2879 static int __vhost_add_used_n(struct vhost_virtqueue
*vq
,
2880 struct vring_used_elem
*heads
,
2883 struct vring_used_elem __user
*used
;
2887 start
= vq
->last_used_idx
& (vq
->num
- 1);
2888 used
= vq
->used
->ring
+ start
;
2889 if (vhost_put_used(vq
, heads
, start
, count
)) {
2890 vq_err(vq
, "Failed to write used");
2893 if (unlikely(vq
->log_used
)) {
2894 /* Make sure data is seen before log. */
2896 /* Log used ring entry write. */
2897 log_used(vq
, ((void __user
*)used
- (void __user
*)vq
->used
),
2898 count
* sizeof *used
);
2900 old
= vq
->last_used_idx
;
2901 new = (vq
->last_used_idx
+= count
);
2902 /* If the driver never bothers to signal in a very long while,
2903 * used index might wrap around. If that happens, invalidate
2904 * signalled_used index we stored. TODO: make sure driver
2905 * signals at least once in 2^16 and remove this. */
2906 if (unlikely((u16
)(new - vq
->signalled_used
) < (u16
)(new - old
)))
2907 vq
->signalled_used_valid
= false;
2911 /* After we've used one of their buffers, we tell them about it. We'll then
2912 * want to notify the guest, using eventfd. */
2913 int vhost_add_used_n(struct vhost_virtqueue
*vq
, struct vring_used_elem
*heads
,
2918 start
= vq
->last_used_idx
& (vq
->num
- 1);
2919 n
= vq
->num
- start
;
2921 r
= __vhost_add_used_n(vq
, heads
, n
);
2927 r
= __vhost_add_used_n(vq
, heads
, count
);
2929 /* Make sure buffer is written before we update index. */
2931 if (vhost_put_used_idx(vq
)) {
2932 vq_err(vq
, "Failed to increment used idx");
2935 if (unlikely(vq
->log_used
)) {
2936 /* Make sure used idx is seen before log. */
2938 /* Log used index update. */
2939 log_used(vq
, offsetof(struct vring_used
, idx
),
2940 sizeof vq
->used
->idx
);
2942 eventfd_signal(vq
->log_ctx
, 1);
2946 EXPORT_SYMBOL_GPL(vhost_add_used_n
);
2948 static bool vhost_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2953 /* Flush out used index updates. This is paired
2954 * with the barrier that the Guest executes when enabling
2958 if (vhost_has_feature(vq
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
2959 unlikely(vq
->avail_idx
== vq
->last_avail_idx
))
2962 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
2964 if (vhost_get_avail_flags(vq
, &flags
)) {
2965 vq_err(vq
, "Failed to get flags");
2968 return !(flags
& cpu_to_vhost16(vq
, VRING_AVAIL_F_NO_INTERRUPT
));
2970 old
= vq
->signalled_used
;
2971 v
= vq
->signalled_used_valid
;
2972 new = vq
->signalled_used
= vq
->last_used_idx
;
2973 vq
->signalled_used_valid
= true;
2978 if (vhost_get_used_event(vq
, &event
)) {
2979 vq_err(vq
, "Failed to get used event idx");
2982 return vring_need_event(vhost16_to_cpu(vq
, event
), new, old
);
2985 /* This actually signals the guest, using eventfd. */
2986 void vhost_signal(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2988 /* Signal the Guest tell them we used something up. */
2989 if (vq
->call_ctx
&& vhost_notify(dev
, vq
))
2990 eventfd_signal(vq
->call_ctx
, 1);
2992 EXPORT_SYMBOL_GPL(vhost_signal
);
2994 /* And here's the combo meal deal. Supersize me! */
2995 void vhost_add_used_and_signal(struct vhost_dev
*dev
,
2996 struct vhost_virtqueue
*vq
,
2997 unsigned int head
, int len
)
2999 vhost_add_used(vq
, head
, len
);
3000 vhost_signal(dev
, vq
);
3002 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal
);
3004 /* multi-buffer version of vhost_add_used_and_signal */
3005 void vhost_add_used_and_signal_n(struct vhost_dev
*dev
,
3006 struct vhost_virtqueue
*vq
,
3007 struct vring_used_elem
*heads
, unsigned count
)
3009 vhost_add_used_n(vq
, heads
, count
);
3010 vhost_signal(dev
, vq
);
3012 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n
);
3014 /* return true if we're sure that avaiable ring is empty */
3015 bool vhost_vq_avail_empty(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
3017 __virtio16 avail_idx
;
3020 if (vq
->avail_idx
!= vq
->last_avail_idx
)
3023 r
= vhost_get_avail_idx(vq
, &avail_idx
);
3026 vq
->avail_idx
= vhost16_to_cpu(vq
, avail_idx
);
3028 return vq
->avail_idx
== vq
->last_avail_idx
;
3030 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty
);
3032 /* OK, now we need to know about added descriptors. */
3033 bool vhost_enable_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
3035 __virtio16 avail_idx
;
3038 if (!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
))
3040 vq
->used_flags
&= ~VRING_USED_F_NO_NOTIFY
;
3041 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
3042 r
= vhost_update_used_flags(vq
);
3044 vq_err(vq
, "Failed to enable notification at %p: %d\n",
3045 &vq
->used
->flags
, r
);
3049 r
= vhost_update_avail_event(vq
, vq
->avail_idx
);
3051 vq_err(vq
, "Failed to update avail event index at %p: %d\n",
3052 vhost_avail_event(vq
), r
);
3056 /* They could have slipped one in as we were doing that: make
3057 * sure it's written, then check again. */
3059 r
= vhost_get_avail_idx(vq
, &avail_idx
);
3061 vq_err(vq
, "Failed to check avail idx at %p: %d\n",
3062 &vq
->avail
->idx
, r
);
3066 return vhost16_to_cpu(vq
, avail_idx
) != vq
->avail_idx
;
3068 EXPORT_SYMBOL_GPL(vhost_enable_notify
);
3070 /* We don't need to be notified again. */
3071 void vhost_disable_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
3075 if (vq
->used_flags
& VRING_USED_F_NO_NOTIFY
)
3077 vq
->used_flags
|= VRING_USED_F_NO_NOTIFY
;
3078 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
3079 r
= vhost_update_used_flags(vq
);
3081 vq_err(vq
, "Failed to enable notification at %p: %d\n",
3082 &vq
->used
->flags
, r
);
3085 EXPORT_SYMBOL_GPL(vhost_disable_notify
);
3087 /* Create a new message. */
3088 struct vhost_msg_node
*vhost_new_msg(struct vhost_virtqueue
*vq
, int type
)
3090 struct vhost_msg_node
*node
= kmalloc(sizeof *node
, GFP_KERNEL
);
3094 /* Make sure all padding within the structure is initialized. */
3095 memset(&node
->msg
, 0, sizeof node
->msg
);
3097 node
->msg
.type
= type
;
3100 EXPORT_SYMBOL_GPL(vhost_new_msg
);
3102 void vhost_enqueue_msg(struct vhost_dev
*dev
, struct list_head
*head
,
3103 struct vhost_msg_node
*node
)
3105 spin_lock(&dev
->iotlb_lock
);
3106 list_add_tail(&node
->node
, head
);
3107 spin_unlock(&dev
->iotlb_lock
);
3109 wake_up_interruptible_poll(&dev
->wait
, EPOLLIN
| EPOLLRDNORM
);
3111 EXPORT_SYMBOL_GPL(vhost_enqueue_msg
);
3113 struct vhost_msg_node
*vhost_dequeue_msg(struct vhost_dev
*dev
,
3114 struct list_head
*head
)
3116 struct vhost_msg_node
*node
= NULL
;
3118 spin_lock(&dev
->iotlb_lock
);
3119 if (!list_empty(head
)) {
3120 node
= list_first_entry(head
, struct vhost_msg_node
,
3122 list_del(&node
->node
);
3124 spin_unlock(&dev
->iotlb_lock
);
3128 EXPORT_SYMBOL_GPL(vhost_dequeue_msg
);
3131 static int __init
vhost_init(void)
3136 static void __exit
vhost_exit(void)
3140 module_init(vhost_init
);
3141 module_exit(vhost_exit
);
3143 MODULE_VERSION("0.0.1");
3144 MODULE_LICENSE("GPL v2");
3145 MODULE_AUTHOR("Michael S. Tsirkin");
3146 MODULE_DESCRIPTION("Host kernel accelerator for virtio");