1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2009 Red Hat, Inc.
3 * Copyright (C) 2006 Rusty Russell IBM Corporation
5 * Author: Michael S. Tsirkin <mst@redhat.com>
7 * Inspiration, some code, and most witty comments come from
8 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
10 * Generic code for virtio server in host kernel.
13 #include <linux/eventfd.h>
14 #include <linux/vhost.h>
15 #include <linux/uio.h>
17 #include <linux/miscdevice.h>
18 #include <linux/mutex.h>
19 #include <linux/poll.h>
20 #include <linux/file.h>
21 #include <linux/highmem.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/kthread.h>
25 #include <linux/module.h>
26 #include <linux/sort.h>
27 #include <linux/sched/mm.h>
28 #include <linux/sched/signal.h>
29 #include <linux/sched/vhost_task.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/nospec.h>
32 #include <linux/kcov.h>
36 static ushort max_mem_regions
= 64;
37 module_param(max_mem_regions
, ushort
, 0444);
38 MODULE_PARM_DESC(max_mem_regions
,
39 "Maximum number of memory regions in memory map. (default: 64)");
40 static int max_iotlb_entries
= 2048;
41 module_param(max_iotlb_entries
, int, 0444);
42 MODULE_PARM_DESC(max_iotlb_entries
,
43 "Maximum number of iotlb entries. (default: 2048)");
46 VHOST_MEMORY_F_LOG
= 0x1,
49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
52 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
53 static void vhost_disable_cross_endian(struct vhost_virtqueue
*vq
)
55 vq
->user_be
= !virtio_legacy_is_little_endian();
58 static void vhost_enable_cross_endian_big(struct vhost_virtqueue
*vq
)
63 static void vhost_enable_cross_endian_little(struct vhost_virtqueue
*vq
)
68 static long vhost_set_vring_endian(struct vhost_virtqueue
*vq
, int __user
*argp
)
70 struct vhost_vring_state s
;
75 if (copy_from_user(&s
, argp
, sizeof(s
)))
78 if (s
.num
!= VHOST_VRING_LITTLE_ENDIAN
&&
79 s
.num
!= VHOST_VRING_BIG_ENDIAN
)
82 if (s
.num
== VHOST_VRING_BIG_ENDIAN
)
83 vhost_enable_cross_endian_big(vq
);
85 vhost_enable_cross_endian_little(vq
);
90 static long vhost_get_vring_endian(struct vhost_virtqueue
*vq
, u32 idx
,
93 struct vhost_vring_state s
= {
98 if (copy_to_user(argp
, &s
, sizeof(s
)))
104 static void vhost_init_is_le(struct vhost_virtqueue
*vq
)
106 /* Note for legacy virtio: user_be is initialized at reset time
107 * according to the host endianness. If userspace does not set an
108 * explicit endianness, the default behavior is native endian, as
109 * expected by legacy virtio.
111 vq
->is_le
= vhost_has_feature(vq
, VIRTIO_F_VERSION_1
) || !vq
->user_be
;
114 static void vhost_disable_cross_endian(struct vhost_virtqueue
*vq
)
118 static long vhost_set_vring_endian(struct vhost_virtqueue
*vq
, int __user
*argp
)
123 static long vhost_get_vring_endian(struct vhost_virtqueue
*vq
, u32 idx
,
129 static void vhost_init_is_le(struct vhost_virtqueue
*vq
)
131 vq
->is_le
= vhost_has_feature(vq
, VIRTIO_F_VERSION_1
)
132 || virtio_legacy_is_little_endian();
134 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
136 static void vhost_reset_is_le(struct vhost_virtqueue
*vq
)
138 vhost_init_is_le(vq
);
141 struct vhost_flush_struct
{
142 struct vhost_work work
;
143 struct completion wait_event
;
146 static void vhost_flush_work(struct vhost_work
*work
)
148 struct vhost_flush_struct
*s
;
150 s
= container_of(work
, struct vhost_flush_struct
, work
);
151 complete(&s
->wait_event
);
154 static void vhost_poll_func(struct file
*file
, wait_queue_head_t
*wqh
,
157 struct vhost_poll
*poll
;
159 poll
= container_of(pt
, struct vhost_poll
, table
);
161 add_wait_queue(wqh
, &poll
->wait
);
164 static int vhost_poll_wakeup(wait_queue_entry_t
*wait
, unsigned mode
, int sync
,
167 struct vhost_poll
*poll
= container_of(wait
, struct vhost_poll
, wait
);
168 struct vhost_work
*work
= &poll
->work
;
170 if (!(key_to_poll(key
) & poll
->mask
))
173 if (!poll
->dev
->use_worker
)
176 vhost_poll_queue(poll
);
181 void vhost_work_init(struct vhost_work
*work
, vhost_work_fn_t fn
)
183 clear_bit(VHOST_WORK_QUEUED
, &work
->flags
);
186 EXPORT_SYMBOL_GPL(vhost_work_init
);
188 /* Init poll structure */
189 void vhost_poll_init(struct vhost_poll
*poll
, vhost_work_fn_t fn
,
190 __poll_t mask
, struct vhost_dev
*dev
,
191 struct vhost_virtqueue
*vq
)
193 init_waitqueue_func_entry(&poll
->wait
, vhost_poll_wakeup
);
194 init_poll_funcptr(&poll
->table
, vhost_poll_func
);
200 vhost_work_init(&poll
->work
, fn
);
202 EXPORT_SYMBOL_GPL(vhost_poll_init
);
204 /* Start polling a file. We add ourselves to file's wait queue. The caller must
205 * keep a reference to a file until after vhost_poll_stop is called. */
206 int vhost_poll_start(struct vhost_poll
*poll
, struct file
*file
)
213 mask
= vfs_poll(file
, &poll
->table
);
215 vhost_poll_wakeup(&poll
->wait
, 0, 0, poll_to_key(mask
));
216 if (mask
& EPOLLERR
) {
217 vhost_poll_stop(poll
);
223 EXPORT_SYMBOL_GPL(vhost_poll_start
);
225 /* Stop polling a file. After this function returns, it becomes safe to drop the
226 * file reference. You must also flush afterwards. */
227 void vhost_poll_stop(struct vhost_poll
*poll
)
230 remove_wait_queue(poll
->wqh
, &poll
->wait
);
234 EXPORT_SYMBOL_GPL(vhost_poll_stop
);
236 static void vhost_worker_queue(struct vhost_worker
*worker
,
237 struct vhost_work
*work
)
239 if (!test_and_set_bit(VHOST_WORK_QUEUED
, &work
->flags
)) {
240 /* We can only add the work to the list after we're
241 * sure it was not in the list.
242 * test_and_set_bit() implies a memory barrier.
244 llist_add(&work
->node
, &worker
->work_list
);
245 vhost_task_wake(worker
->vtsk
);
249 bool vhost_vq_work_queue(struct vhost_virtqueue
*vq
, struct vhost_work
*work
)
251 struct vhost_worker
*worker
;
255 worker
= rcu_dereference(vq
->worker
);
258 vhost_worker_queue(worker
, work
);
264 EXPORT_SYMBOL_GPL(vhost_vq_work_queue
);
267 * __vhost_worker_flush - flush a worker
268 * @worker: worker to flush
270 * The worker's flush_mutex must be held.
272 static void __vhost_worker_flush(struct vhost_worker
*worker
)
274 struct vhost_flush_struct flush
;
276 if (!worker
->attachment_cnt
|| worker
->killed
)
279 init_completion(&flush
.wait_event
);
280 vhost_work_init(&flush
.work
, vhost_flush_work
);
282 vhost_worker_queue(worker
, &flush
.work
);
284 * Drop mutex in case our worker is killed and it needs to take the
285 * mutex to force cleanup.
287 mutex_unlock(&worker
->mutex
);
288 wait_for_completion(&flush
.wait_event
);
289 mutex_lock(&worker
->mutex
);
292 static void vhost_worker_flush(struct vhost_worker
*worker
)
294 mutex_lock(&worker
->mutex
);
295 __vhost_worker_flush(worker
);
296 mutex_unlock(&worker
->mutex
);
299 void vhost_dev_flush(struct vhost_dev
*dev
)
301 struct vhost_worker
*worker
;
304 xa_for_each(&dev
->worker_xa
, i
, worker
)
305 vhost_worker_flush(worker
);
307 EXPORT_SYMBOL_GPL(vhost_dev_flush
);
309 /* A lockless hint for busy polling code to exit the loop */
310 bool vhost_vq_has_work(struct vhost_virtqueue
*vq
)
312 struct vhost_worker
*worker
;
313 bool has_work
= false;
316 worker
= rcu_dereference(vq
->worker
);
317 if (worker
&& !llist_empty(&worker
->work_list
))
323 EXPORT_SYMBOL_GPL(vhost_vq_has_work
);
325 void vhost_poll_queue(struct vhost_poll
*poll
)
327 vhost_vq_work_queue(poll
->vq
, &poll
->work
);
329 EXPORT_SYMBOL_GPL(vhost_poll_queue
);
331 static void __vhost_vq_meta_reset(struct vhost_virtqueue
*vq
)
335 for (j
= 0; j
< VHOST_NUM_ADDRS
; j
++)
336 vq
->meta_iotlb
[j
] = NULL
;
339 static void vhost_vq_meta_reset(struct vhost_dev
*d
)
343 for (i
= 0; i
< d
->nvqs
; ++i
)
344 __vhost_vq_meta_reset(d
->vqs
[i
]);
347 static void vhost_vring_call_reset(struct vhost_vring_call
*call_ctx
)
349 call_ctx
->ctx
= NULL
;
350 memset(&call_ctx
->producer
, 0x0, sizeof(struct irq_bypass_producer
));
353 bool vhost_vq_is_setup(struct vhost_virtqueue
*vq
)
355 return vq
->avail
&& vq
->desc
&& vq
->used
&& vhost_vq_access_ok(vq
);
357 EXPORT_SYMBOL_GPL(vhost_vq_is_setup
);
359 static void vhost_vq_reset(struct vhost_dev
*dev
,
360 struct vhost_virtqueue
*vq
)
366 vq
->last_avail_idx
= 0;
368 vq
->last_used_idx
= 0;
369 vq
->signalled_used
= 0;
370 vq
->signalled_used_valid
= false;
372 vq
->log_used
= false;
373 vq
->log_addr
= -1ull;
374 vq
->private_data
= NULL
;
375 vq
->acked_features
= 0;
376 vq
->acked_backend_features
= 0;
378 vq
->error_ctx
= NULL
;
381 vhost_disable_cross_endian(vq
);
382 vhost_reset_is_le(vq
);
383 vq
->busyloop_timeout
= 0;
386 rcu_assign_pointer(vq
->worker
, NULL
);
387 vhost_vring_call_reset(&vq
->call_ctx
);
388 __vhost_vq_meta_reset(vq
);
391 static bool vhost_run_work_list(void *data
)
393 struct vhost_worker
*worker
= data
;
394 struct vhost_work
*work
, *work_next
;
395 struct llist_node
*node
;
397 node
= llist_del_all(&worker
->work_list
);
399 __set_current_state(TASK_RUNNING
);
401 node
= llist_reverse_order(node
);
402 /* make sure flag is seen after deletion */
404 llist_for_each_entry_safe(work
, work_next
, node
, node
) {
405 clear_bit(VHOST_WORK_QUEUED
, &work
->flags
);
406 kcov_remote_start_common(worker
->kcov_handle
);
416 static void vhost_worker_killed(void *data
)
418 struct vhost_worker
*worker
= data
;
419 struct vhost_dev
*dev
= worker
->dev
;
420 struct vhost_virtqueue
*vq
;
421 int i
, attach_cnt
= 0;
423 mutex_lock(&worker
->mutex
);
424 worker
->killed
= true;
426 for (i
= 0; i
< dev
->nvqs
; i
++) {
429 mutex_lock(&vq
->mutex
);
431 rcu_dereference_check(vq
->worker
,
432 lockdep_is_held(&vq
->mutex
))) {
433 rcu_assign_pointer(vq
->worker
, NULL
);
436 mutex_unlock(&vq
->mutex
);
439 worker
->attachment_cnt
-= attach_cnt
;
443 * Finish vhost_worker_flush calls and any other works that snuck in
444 * before the synchronize_rcu.
446 vhost_run_work_list(worker
);
447 mutex_unlock(&worker
->mutex
);
450 static void vhost_vq_free_iovecs(struct vhost_virtqueue
*vq
)
460 /* Helper to allocate iovec buffers for all vqs. */
461 static long vhost_dev_alloc_iovecs(struct vhost_dev
*dev
)
463 struct vhost_virtqueue
*vq
;
466 for (i
= 0; i
< dev
->nvqs
; ++i
) {
468 vq
->indirect
= kmalloc_array(UIO_MAXIOV
,
469 sizeof(*vq
->indirect
),
471 vq
->log
= kmalloc_array(dev
->iov_limit
, sizeof(*vq
->log
),
473 vq
->heads
= kmalloc_array(dev
->iov_limit
, sizeof(*vq
->heads
),
475 if (!vq
->indirect
|| !vq
->log
|| !vq
->heads
)
482 vhost_vq_free_iovecs(dev
->vqs
[i
]);
486 static void vhost_dev_free_iovecs(struct vhost_dev
*dev
)
490 for (i
= 0; i
< dev
->nvqs
; ++i
)
491 vhost_vq_free_iovecs(dev
->vqs
[i
]);
494 bool vhost_exceeds_weight(struct vhost_virtqueue
*vq
,
495 int pkts
, int total_len
)
497 struct vhost_dev
*dev
= vq
->dev
;
499 if ((dev
->byte_weight
&& total_len
>= dev
->byte_weight
) ||
500 pkts
>= dev
->weight
) {
501 vhost_poll_queue(&vq
->poll
);
507 EXPORT_SYMBOL_GPL(vhost_exceeds_weight
);
509 static size_t vhost_get_avail_size(struct vhost_virtqueue
*vq
,
512 size_t event __maybe_unused
=
513 vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
515 return size_add(struct_size(vq
->avail
, ring
, num
), event
);
518 static size_t vhost_get_used_size(struct vhost_virtqueue
*vq
,
521 size_t event __maybe_unused
=
522 vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
524 return size_add(struct_size(vq
->used
, ring
, num
), event
);
527 static size_t vhost_get_desc_size(struct vhost_virtqueue
*vq
,
530 return sizeof(*vq
->desc
) * num
;
533 void vhost_dev_init(struct vhost_dev
*dev
,
534 struct vhost_virtqueue
**vqs
, int nvqs
,
535 int iov_limit
, int weight
, int byte_weight
,
537 int (*msg_handler
)(struct vhost_dev
*dev
, u32 asid
,
538 struct vhost_iotlb_msg
*msg
))
540 struct vhost_virtqueue
*vq
;
545 mutex_init(&dev
->mutex
);
550 dev
->iov_limit
= iov_limit
;
551 dev
->weight
= weight
;
552 dev
->byte_weight
= byte_weight
;
553 dev
->use_worker
= use_worker
;
554 dev
->msg_handler
= msg_handler
;
555 init_waitqueue_head(&dev
->wait
);
556 INIT_LIST_HEAD(&dev
->read_list
);
557 INIT_LIST_HEAD(&dev
->pending_list
);
558 spin_lock_init(&dev
->iotlb_lock
);
559 xa_init_flags(&dev
->worker_xa
, XA_FLAGS_ALLOC
);
561 for (i
= 0; i
< dev
->nvqs
; ++i
) {
567 mutex_init(&vq
->mutex
);
568 vhost_vq_reset(dev
, vq
);
570 vhost_poll_init(&vq
->poll
, vq
->handle_kick
,
574 EXPORT_SYMBOL_GPL(vhost_dev_init
);
576 /* Caller should have device mutex */
577 long vhost_dev_check_owner(struct vhost_dev
*dev
)
579 /* Are you the owner? If not, I don't think you mean to do that */
580 return dev
->mm
== current
->mm
? 0 : -EPERM
;
582 EXPORT_SYMBOL_GPL(vhost_dev_check_owner
);
584 /* Caller should have device mutex */
585 bool vhost_dev_has_owner(struct vhost_dev
*dev
)
589 EXPORT_SYMBOL_GPL(vhost_dev_has_owner
);
591 static void vhost_attach_mm(struct vhost_dev
*dev
)
593 /* No owner, become one */
594 if (dev
->use_worker
) {
595 dev
->mm
= get_task_mm(current
);
597 /* vDPA device does not use worker thead, so there's
598 * no need to hold the address space for mm. This help
599 * to avoid deadlock in the case of mmap() which may
600 * held the refcnt of the file and depends on release
601 * method to remove vma.
603 dev
->mm
= current
->mm
;
608 static void vhost_detach_mm(struct vhost_dev
*dev
)
621 static void vhost_worker_destroy(struct vhost_dev
*dev
,
622 struct vhost_worker
*worker
)
627 WARN_ON(!llist_empty(&worker
->work_list
));
628 xa_erase(&dev
->worker_xa
, worker
->id
);
629 vhost_task_stop(worker
->vtsk
);
633 static void vhost_workers_free(struct vhost_dev
*dev
)
635 struct vhost_worker
*worker
;
638 if (!dev
->use_worker
)
641 for (i
= 0; i
< dev
->nvqs
; i
++)
642 rcu_assign_pointer(dev
->vqs
[i
]->worker
, NULL
);
644 * Free the default worker we created and cleanup workers userspace
645 * created but couldn't clean up (it forgot or crashed).
647 xa_for_each(&dev
->worker_xa
, i
, worker
)
648 vhost_worker_destroy(dev
, worker
);
649 xa_destroy(&dev
->worker_xa
);
652 static struct vhost_worker
*vhost_worker_create(struct vhost_dev
*dev
)
654 struct vhost_worker
*worker
;
655 struct vhost_task
*vtsk
;
656 char name
[TASK_COMM_LEN
];
660 worker
= kzalloc(sizeof(*worker
), GFP_KERNEL_ACCOUNT
);
665 snprintf(name
, sizeof(name
), "vhost-%d", current
->pid
);
667 vtsk
= vhost_task_create(vhost_run_work_list
, vhost_worker_killed
,
672 mutex_init(&worker
->mutex
);
673 init_llist_head(&worker
->work_list
);
674 worker
->kcov_handle
= kcov_common_handle();
677 vhost_task_start(vtsk
);
679 ret
= xa_alloc(&dev
->worker_xa
, &id
, worker
, xa_limit_32b
, GFP_KERNEL
);
687 vhost_task_stop(vtsk
);
693 /* Caller must have device mutex */
694 static void __vhost_vq_attach_worker(struct vhost_virtqueue
*vq
,
695 struct vhost_worker
*worker
)
697 struct vhost_worker
*old_worker
;
699 mutex_lock(&worker
->mutex
);
700 if (worker
->killed
) {
701 mutex_unlock(&worker
->mutex
);
705 mutex_lock(&vq
->mutex
);
707 old_worker
= rcu_dereference_check(vq
->worker
,
708 lockdep_is_held(&vq
->mutex
));
709 rcu_assign_pointer(vq
->worker
, worker
);
710 worker
->attachment_cnt
++;
713 mutex_unlock(&vq
->mutex
);
714 mutex_unlock(&worker
->mutex
);
717 mutex_unlock(&vq
->mutex
);
718 mutex_unlock(&worker
->mutex
);
721 * Take the worker mutex to make sure we see the work queued from
722 * device wide flushes which doesn't use RCU for execution.
724 mutex_lock(&old_worker
->mutex
);
725 if (old_worker
->killed
) {
726 mutex_unlock(&old_worker
->mutex
);
731 * We don't want to call synchronize_rcu for every vq during setup
732 * because it will slow down VM startup. If we haven't done
733 * VHOST_SET_VRING_KICK and not done the driver specific
734 * SET_ENDPOINT/RUNNUNG then we can skip the sync since there will
735 * not be any works queued for scsi and net.
737 mutex_lock(&vq
->mutex
);
738 if (!vhost_vq_get_backend(vq
) && !vq
->kick
) {
739 mutex_unlock(&vq
->mutex
);
741 old_worker
->attachment_cnt
--;
742 mutex_unlock(&old_worker
->mutex
);
744 * vsock can queue anytime after VHOST_VSOCK_SET_GUEST_CID.
745 * Warn if it adds support for multiple workers but forgets to
746 * handle the early queueing case.
748 WARN_ON(!old_worker
->attachment_cnt
&&
749 !llist_empty(&old_worker
->work_list
));
752 mutex_unlock(&vq
->mutex
);
754 /* Make sure new vq queue/flush/poll calls see the new worker */
756 /* Make sure whatever was queued gets run */
757 __vhost_worker_flush(old_worker
);
758 old_worker
->attachment_cnt
--;
759 mutex_unlock(&old_worker
->mutex
);
762 /* Caller must have device mutex */
763 static int vhost_vq_attach_worker(struct vhost_virtqueue
*vq
,
764 struct vhost_vring_worker
*info
)
766 unsigned long index
= info
->worker_id
;
767 struct vhost_dev
*dev
= vq
->dev
;
768 struct vhost_worker
*worker
;
770 if (!dev
->use_worker
)
773 worker
= xa_find(&dev
->worker_xa
, &index
, UINT_MAX
, XA_PRESENT
);
774 if (!worker
|| worker
->id
!= info
->worker_id
)
777 __vhost_vq_attach_worker(vq
, worker
);
781 /* Caller must have device mutex */
782 static int vhost_new_worker(struct vhost_dev
*dev
,
783 struct vhost_worker_state
*info
)
785 struct vhost_worker
*worker
;
787 worker
= vhost_worker_create(dev
);
791 info
->worker_id
= worker
->id
;
795 /* Caller must have device mutex */
796 static int vhost_free_worker(struct vhost_dev
*dev
,
797 struct vhost_worker_state
*info
)
799 unsigned long index
= info
->worker_id
;
800 struct vhost_worker
*worker
;
802 worker
= xa_find(&dev
->worker_xa
, &index
, UINT_MAX
, XA_PRESENT
);
803 if (!worker
|| worker
->id
!= info
->worker_id
)
806 mutex_lock(&worker
->mutex
);
807 if (worker
->attachment_cnt
|| worker
->killed
) {
808 mutex_unlock(&worker
->mutex
);
812 * A flush might have raced and snuck in before attachment_cnt was set
813 * to zero. Make sure flushes are flushed from the queue before
816 __vhost_worker_flush(worker
);
817 mutex_unlock(&worker
->mutex
);
819 vhost_worker_destroy(dev
, worker
);
823 static int vhost_get_vq_from_user(struct vhost_dev
*dev
, void __user
*argp
,
824 struct vhost_virtqueue
**vq
, u32
*id
)
826 u32 __user
*idxp
= argp
;
830 r
= get_user(idx
, idxp
);
834 if (idx
>= dev
->nvqs
)
837 idx
= array_index_nospec(idx
, dev
->nvqs
);
844 /* Caller must have device mutex */
845 long vhost_worker_ioctl(struct vhost_dev
*dev
, unsigned int ioctl
,
848 struct vhost_vring_worker ring_worker
;
849 struct vhost_worker_state state
;
850 struct vhost_worker
*worker
;
851 struct vhost_virtqueue
*vq
;
855 if (!dev
->use_worker
)
858 if (!vhost_dev_has_owner(dev
))
861 ret
= vhost_dev_check_owner(dev
);
866 /* dev worker ioctls */
867 case VHOST_NEW_WORKER
:
868 ret
= vhost_new_worker(dev
, &state
);
869 if (!ret
&& copy_to_user(argp
, &state
, sizeof(state
)))
872 case VHOST_FREE_WORKER
:
873 if (copy_from_user(&state
, argp
, sizeof(state
)))
875 return vhost_free_worker(dev
, &state
);
876 /* vring worker ioctls */
877 case VHOST_ATTACH_VRING_WORKER
:
878 case VHOST_GET_VRING_WORKER
:
884 ret
= vhost_get_vq_from_user(dev
, argp
, &vq
, &idx
);
889 case VHOST_ATTACH_VRING_WORKER
:
890 if (copy_from_user(&ring_worker
, argp
, sizeof(ring_worker
))) {
895 ret
= vhost_vq_attach_worker(vq
, &ring_worker
);
897 case VHOST_GET_VRING_WORKER
:
898 worker
= rcu_dereference_check(vq
->worker
,
899 lockdep_is_held(&dev
->mutex
));
905 ring_worker
.index
= idx
;
906 ring_worker
.worker_id
= worker
->id
;
908 if (copy_to_user(argp
, &ring_worker
, sizeof(ring_worker
)))
918 EXPORT_SYMBOL_GPL(vhost_worker_ioctl
);
920 /* Caller should have device mutex */
921 long vhost_dev_set_owner(struct vhost_dev
*dev
)
923 struct vhost_worker
*worker
;
926 /* Is there an owner already? */
927 if (vhost_dev_has_owner(dev
)) {
932 vhost_attach_mm(dev
);
934 err
= vhost_dev_alloc_iovecs(dev
);
938 if (dev
->use_worker
) {
940 * This should be done last, because vsock can queue work
941 * before VHOST_SET_OWNER so it simplifies the failure path
942 * below since we don't have to worry about vsock queueing
943 * while we free the worker.
945 worker
= vhost_worker_create(dev
);
951 for (i
= 0; i
< dev
->nvqs
; i
++)
952 __vhost_vq_attach_worker(dev
->vqs
[i
], worker
);
958 vhost_dev_free_iovecs(dev
);
960 vhost_detach_mm(dev
);
964 EXPORT_SYMBOL_GPL(vhost_dev_set_owner
);
966 static struct vhost_iotlb
*iotlb_alloc(void)
968 return vhost_iotlb_alloc(max_iotlb_entries
,
969 VHOST_IOTLB_FLAG_RETIRE
);
972 struct vhost_iotlb
*vhost_dev_reset_owner_prepare(void)
974 return iotlb_alloc();
976 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare
);
978 /* Caller should have device mutex */
979 void vhost_dev_reset_owner(struct vhost_dev
*dev
, struct vhost_iotlb
*umem
)
983 vhost_dev_cleanup(dev
);
986 /* We don't need VQ locks below since vhost_dev_cleanup makes sure
987 * VQs aren't running.
989 for (i
= 0; i
< dev
->nvqs
; ++i
)
990 dev
->vqs
[i
]->umem
= umem
;
992 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner
);
994 void vhost_dev_stop(struct vhost_dev
*dev
)
998 for (i
= 0; i
< dev
->nvqs
; ++i
) {
999 if (dev
->vqs
[i
]->kick
&& dev
->vqs
[i
]->handle_kick
)
1000 vhost_poll_stop(&dev
->vqs
[i
]->poll
);
1003 vhost_dev_flush(dev
);
1005 EXPORT_SYMBOL_GPL(vhost_dev_stop
);
1007 void vhost_clear_msg(struct vhost_dev
*dev
)
1009 struct vhost_msg_node
*node
, *n
;
1011 spin_lock(&dev
->iotlb_lock
);
1013 list_for_each_entry_safe(node
, n
, &dev
->read_list
, node
) {
1014 list_del(&node
->node
);
1018 list_for_each_entry_safe(node
, n
, &dev
->pending_list
, node
) {
1019 list_del(&node
->node
);
1023 spin_unlock(&dev
->iotlb_lock
);
1025 EXPORT_SYMBOL_GPL(vhost_clear_msg
);
1027 void vhost_dev_cleanup(struct vhost_dev
*dev
)
1031 for (i
= 0; i
< dev
->nvqs
; ++i
) {
1032 if (dev
->vqs
[i
]->error_ctx
)
1033 eventfd_ctx_put(dev
->vqs
[i
]->error_ctx
);
1034 if (dev
->vqs
[i
]->kick
)
1035 fput(dev
->vqs
[i
]->kick
);
1036 if (dev
->vqs
[i
]->call_ctx
.ctx
)
1037 eventfd_ctx_put(dev
->vqs
[i
]->call_ctx
.ctx
);
1038 vhost_vq_reset(dev
, dev
->vqs
[i
]);
1040 vhost_dev_free_iovecs(dev
);
1042 eventfd_ctx_put(dev
->log_ctx
);
1043 dev
->log_ctx
= NULL
;
1044 /* No one will access memory at this point */
1045 vhost_iotlb_free(dev
->umem
);
1047 vhost_iotlb_free(dev
->iotlb
);
1049 vhost_clear_msg(dev
);
1050 wake_up_interruptible_poll(&dev
->wait
, EPOLLIN
| EPOLLRDNORM
);
1051 vhost_workers_free(dev
);
1052 vhost_detach_mm(dev
);
1054 EXPORT_SYMBOL_GPL(vhost_dev_cleanup
);
1056 static bool log_access_ok(void __user
*log_base
, u64 addr
, unsigned long sz
)
1058 u64 a
= addr
/ VHOST_PAGE_SIZE
/ 8;
1060 /* Make sure 64 bit math will not overflow. */
1061 if (a
> ULONG_MAX
- (unsigned long)log_base
||
1062 a
+ (unsigned long)log_base
> ULONG_MAX
)
1065 return access_ok(log_base
+ a
,
1066 (sz
+ VHOST_PAGE_SIZE
* 8 - 1) / VHOST_PAGE_SIZE
/ 8);
1069 /* Make sure 64 bit math will not overflow. */
1070 static bool vhost_overflow(u64 uaddr
, u64 size
)
1072 if (uaddr
> ULONG_MAX
|| size
> ULONG_MAX
)
1078 return uaddr
> ULONG_MAX
- size
+ 1;
1081 /* Caller should have vq mutex and device mutex. */
1082 static bool vq_memory_access_ok(void __user
*log_base
, struct vhost_iotlb
*umem
,
1085 struct vhost_iotlb_map
*map
;
1090 list_for_each_entry(map
, &umem
->list
, link
) {
1091 unsigned long a
= map
->addr
;
1093 if (vhost_overflow(map
->addr
, map
->size
))
1097 if (!access_ok((void __user
*)a
, map
->size
))
1099 else if (log_all
&& !log_access_ok(log_base
,
1107 static inline void __user
*vhost_vq_meta_fetch(struct vhost_virtqueue
*vq
,
1108 u64 addr
, unsigned int size
,
1111 const struct vhost_iotlb_map
*map
= vq
->meta_iotlb
[type
];
1116 return (void __user
*)(uintptr_t)(map
->addr
+ addr
- map
->start
);
1119 /* Can we switch to this memory table? */
1120 /* Caller should have device mutex but not vq mutex */
1121 static bool memory_access_ok(struct vhost_dev
*d
, struct vhost_iotlb
*umem
,
1126 for (i
= 0; i
< d
->nvqs
; ++i
) {
1130 mutex_lock(&d
->vqs
[i
]->mutex
);
1131 log
= log_all
|| vhost_has_feature(d
->vqs
[i
], VHOST_F_LOG_ALL
);
1132 /* If ring is inactive, will check when it's enabled. */
1133 if (d
->vqs
[i
]->private_data
)
1134 ok
= vq_memory_access_ok(d
->vqs
[i
]->log_base
,
1138 mutex_unlock(&d
->vqs
[i
]->mutex
);
1145 static int translate_desc(struct vhost_virtqueue
*vq
, u64 addr
, u32 len
,
1146 struct iovec iov
[], int iov_size
, int access
);
1148 static int vhost_copy_to_user(struct vhost_virtqueue
*vq
, void __user
*to
,
1149 const void *from
, unsigned size
)
1154 return __copy_to_user(to
, from
, size
);
1156 /* This function should be called after iotlb
1157 * prefetch, which means we're sure that all vq
1158 * could be access through iotlb. So -EAGAIN should
1159 * not happen in this case.
1162 void __user
*uaddr
= vhost_vq_meta_fetch(vq
,
1163 (u64
)(uintptr_t)to
, size
,
1167 return __copy_to_user(uaddr
, from
, size
);
1169 ret
= translate_desc(vq
, (u64
)(uintptr_t)to
, size
, vq
->iotlb_iov
,
1170 ARRAY_SIZE(vq
->iotlb_iov
),
1174 iov_iter_init(&t
, ITER_DEST
, vq
->iotlb_iov
, ret
, size
);
1175 ret
= copy_to_iter(from
, size
, &t
);
1183 static int vhost_copy_from_user(struct vhost_virtqueue
*vq
, void *to
,
1184 void __user
*from
, unsigned size
)
1189 return __copy_from_user(to
, from
, size
);
1191 /* This function should be called after iotlb
1192 * prefetch, which means we're sure that vq
1193 * could be access through iotlb. So -EAGAIN should
1194 * not happen in this case.
1196 void __user
*uaddr
= vhost_vq_meta_fetch(vq
,
1197 (u64
)(uintptr_t)from
, size
,
1202 return __copy_from_user(to
, uaddr
, size
);
1204 ret
= translate_desc(vq
, (u64
)(uintptr_t)from
, size
, vq
->iotlb_iov
,
1205 ARRAY_SIZE(vq
->iotlb_iov
),
1208 vq_err(vq
, "IOTLB translation failure: uaddr "
1209 "%p size 0x%llx\n", from
,
1210 (unsigned long long) size
);
1213 iov_iter_init(&f
, ITER_SOURCE
, vq
->iotlb_iov
, ret
, size
);
1214 ret
= copy_from_iter(to
, size
, &f
);
1223 static void __user
*__vhost_get_user_slow(struct vhost_virtqueue
*vq
,
1224 void __user
*addr
, unsigned int size
,
1229 ret
= translate_desc(vq
, (u64
)(uintptr_t)addr
, size
, vq
->iotlb_iov
,
1230 ARRAY_SIZE(vq
->iotlb_iov
),
1233 vq_err(vq
, "IOTLB translation failure: uaddr "
1234 "%p size 0x%llx\n", addr
,
1235 (unsigned long long) size
);
1239 if (ret
!= 1 || vq
->iotlb_iov
[0].iov_len
!= size
) {
1240 vq_err(vq
, "Non atomic userspace memory access: uaddr "
1241 "%p size 0x%llx\n", addr
,
1242 (unsigned long long) size
);
1246 return vq
->iotlb_iov
[0].iov_base
;
1249 /* This function should be called after iotlb
1250 * prefetch, which means we're sure that vq
1251 * could be access through iotlb. So -EAGAIN should
1252 * not happen in this case.
1254 static inline void __user
*__vhost_get_user(struct vhost_virtqueue
*vq
,
1255 void __user
*addr
, unsigned int size
,
1258 void __user
*uaddr
= vhost_vq_meta_fetch(vq
,
1259 (u64
)(uintptr_t)addr
, size
, type
);
1263 return __vhost_get_user_slow(vq
, addr
, size
, type
);
1266 #define vhost_put_user(vq, x, ptr) \
1270 ret = __put_user(x, ptr); \
1272 __typeof__(ptr) to = \
1273 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
1274 sizeof(*ptr), VHOST_ADDR_USED); \
1276 ret = __put_user(x, to); \
1283 static inline int vhost_put_avail_event(struct vhost_virtqueue
*vq
)
1285 return vhost_put_user(vq
, cpu_to_vhost16(vq
, vq
->avail_idx
),
1286 vhost_avail_event(vq
));
1289 static inline int vhost_put_used(struct vhost_virtqueue
*vq
,
1290 struct vring_used_elem
*head
, int idx
,
1293 return vhost_copy_to_user(vq
, vq
->used
->ring
+ idx
, head
,
1294 count
* sizeof(*head
));
1297 static inline int vhost_put_used_flags(struct vhost_virtqueue
*vq
)
1300 return vhost_put_user(vq
, cpu_to_vhost16(vq
, vq
->used_flags
),
1304 static inline int vhost_put_used_idx(struct vhost_virtqueue
*vq
)
1307 return vhost_put_user(vq
, cpu_to_vhost16(vq
, vq
->last_used_idx
),
1311 #define vhost_get_user(vq, x, ptr, type) \
1315 ret = __get_user(x, ptr); \
1317 __typeof__(ptr) from = \
1318 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
1322 ret = __get_user(x, from); \
1329 #define vhost_get_avail(vq, x, ptr) \
1330 vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
1332 #define vhost_get_used(vq, x, ptr) \
1333 vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
1335 static void vhost_dev_lock_vqs(struct vhost_dev
*d
)
1338 for (i
= 0; i
< d
->nvqs
; ++i
)
1339 mutex_lock_nested(&d
->vqs
[i
]->mutex
, i
);
1342 static void vhost_dev_unlock_vqs(struct vhost_dev
*d
)
1345 for (i
= 0; i
< d
->nvqs
; ++i
)
1346 mutex_unlock(&d
->vqs
[i
]->mutex
);
1349 static inline int vhost_get_avail_idx(struct vhost_virtqueue
*vq
)
1354 r
= vhost_get_avail(vq
, idx
, &vq
->avail
->idx
);
1355 if (unlikely(r
< 0)) {
1356 vq_err(vq
, "Failed to access available index at %p (%d)\n",
1357 &vq
->avail
->idx
, r
);
1361 /* Check it isn't doing very strange thing with available indexes */
1362 vq
->avail_idx
= vhost16_to_cpu(vq
, idx
);
1363 if (unlikely((u16
)(vq
->avail_idx
- vq
->last_avail_idx
) > vq
->num
)) {
1364 vq_err(vq
, "Invalid available index change from %u to %u",
1365 vq
->last_avail_idx
, vq
->avail_idx
);
1369 /* We're done if there is nothing new */
1370 if (vq
->avail_idx
== vq
->last_avail_idx
)
1374 * We updated vq->avail_idx so we need a memory barrier between
1375 * the index read above and the caller reading avail ring entries.
1381 static inline int vhost_get_avail_head(struct vhost_virtqueue
*vq
,
1382 __virtio16
*head
, int idx
)
1384 return vhost_get_avail(vq
, *head
,
1385 &vq
->avail
->ring
[idx
& (vq
->num
- 1)]);
1388 static inline int vhost_get_avail_flags(struct vhost_virtqueue
*vq
,
1391 return vhost_get_avail(vq
, *flags
, &vq
->avail
->flags
);
1394 static inline int vhost_get_used_event(struct vhost_virtqueue
*vq
,
1397 return vhost_get_avail(vq
, *event
, vhost_used_event(vq
));
1400 static inline int vhost_get_used_idx(struct vhost_virtqueue
*vq
,
1403 return vhost_get_used(vq
, *idx
, &vq
->used
->idx
);
1406 static inline int vhost_get_desc(struct vhost_virtqueue
*vq
,
1407 struct vring_desc
*desc
, int idx
)
1409 return vhost_copy_from_user(vq
, desc
, vq
->desc
+ idx
, sizeof(*desc
));
1412 static void vhost_iotlb_notify_vq(struct vhost_dev
*d
,
1413 struct vhost_iotlb_msg
*msg
)
1415 struct vhost_msg_node
*node
, *n
;
1417 spin_lock(&d
->iotlb_lock
);
1419 list_for_each_entry_safe(node
, n
, &d
->pending_list
, node
) {
1420 struct vhost_iotlb_msg
*vq_msg
= &node
->msg
.iotlb
;
1421 if (msg
->iova
<= vq_msg
->iova
&&
1422 msg
->iova
+ msg
->size
- 1 >= vq_msg
->iova
&&
1423 vq_msg
->type
== VHOST_IOTLB_MISS
) {
1424 vhost_poll_queue(&node
->vq
->poll
);
1425 list_del(&node
->node
);
1430 spin_unlock(&d
->iotlb_lock
);
1433 static bool umem_access_ok(u64 uaddr
, u64 size
, int access
)
1435 unsigned long a
= uaddr
;
1437 /* Make sure 64 bit math will not overflow. */
1438 if (vhost_overflow(uaddr
, size
))
1441 if ((access
& VHOST_ACCESS_RO
) &&
1442 !access_ok((void __user
*)a
, size
))
1444 if ((access
& VHOST_ACCESS_WO
) &&
1445 !access_ok((void __user
*)a
, size
))
1450 static int vhost_process_iotlb_msg(struct vhost_dev
*dev
, u32 asid
,
1451 struct vhost_iotlb_msg
*msg
)
1458 mutex_lock(&dev
->mutex
);
1459 vhost_dev_lock_vqs(dev
);
1460 switch (msg
->type
) {
1461 case VHOST_IOTLB_UPDATE
:
1466 if (!umem_access_ok(msg
->uaddr
, msg
->size
, msg
->perm
)) {
1470 vhost_vq_meta_reset(dev
);
1471 if (vhost_iotlb_add_range(dev
->iotlb
, msg
->iova
,
1472 msg
->iova
+ msg
->size
- 1,
1473 msg
->uaddr
, msg
->perm
)) {
1477 vhost_iotlb_notify_vq(dev
, msg
);
1479 case VHOST_IOTLB_INVALIDATE
:
1484 vhost_vq_meta_reset(dev
);
1485 vhost_iotlb_del_range(dev
->iotlb
, msg
->iova
,
1486 msg
->iova
+ msg
->size
- 1);
1493 vhost_dev_unlock_vqs(dev
);
1494 mutex_unlock(&dev
->mutex
);
1498 ssize_t
vhost_chr_write_iter(struct vhost_dev
*dev
,
1499 struct iov_iter
*from
)
1501 struct vhost_iotlb_msg msg
;
1506 ret
= copy_from_iter(&type
, sizeof(type
), from
);
1507 if (ret
!= sizeof(type
)) {
1513 case VHOST_IOTLB_MSG
:
1514 /* There maybe a hole after type for V1 message type,
1517 offset
= offsetof(struct vhost_msg
, iotlb
) - sizeof(int);
1519 case VHOST_IOTLB_MSG_V2
:
1520 if (vhost_backend_has_feature(dev
->vqs
[0],
1521 VHOST_BACKEND_F_IOTLB_ASID
)) {
1522 ret
= copy_from_iter(&asid
, sizeof(asid
), from
);
1523 if (ret
!= sizeof(asid
)) {
1529 offset
= sizeof(__u32
);
1536 iov_iter_advance(from
, offset
);
1537 ret
= copy_from_iter(&msg
, sizeof(msg
), from
);
1538 if (ret
!= sizeof(msg
)) {
1543 if (msg
.type
== VHOST_IOTLB_UPDATE
&& msg
.size
== 0) {
1548 if (dev
->msg_handler
)
1549 ret
= dev
->msg_handler(dev
, asid
, &msg
);
1551 ret
= vhost_process_iotlb_msg(dev
, asid
, &msg
);
1557 ret
= (type
== VHOST_IOTLB_MSG
) ? sizeof(struct vhost_msg
) :
1558 sizeof(struct vhost_msg_v2
);
1562 EXPORT_SYMBOL(vhost_chr_write_iter
);
1564 __poll_t
vhost_chr_poll(struct file
*file
, struct vhost_dev
*dev
,
1569 poll_wait(file
, &dev
->wait
, wait
);
1571 if (!list_empty(&dev
->read_list
))
1572 mask
|= EPOLLIN
| EPOLLRDNORM
;
1576 EXPORT_SYMBOL(vhost_chr_poll
);
1578 ssize_t
vhost_chr_read_iter(struct vhost_dev
*dev
, struct iov_iter
*to
,
1582 struct vhost_msg_node
*node
;
1584 unsigned size
= sizeof(struct vhost_msg
);
1586 if (iov_iter_count(to
) < size
)
1591 prepare_to_wait(&dev
->wait
, &wait
,
1592 TASK_INTERRUPTIBLE
);
1594 node
= vhost_dequeue_msg(dev
, &dev
->read_list
);
1601 if (signal_pending(current
)) {
1614 finish_wait(&dev
->wait
, &wait
);
1617 struct vhost_iotlb_msg
*msg
;
1618 void *start
= &node
->msg
;
1620 switch (node
->msg
.type
) {
1621 case VHOST_IOTLB_MSG
:
1622 size
= sizeof(node
->msg
);
1623 msg
= &node
->msg
.iotlb
;
1625 case VHOST_IOTLB_MSG_V2
:
1626 size
= sizeof(node
->msg_v2
);
1627 msg
= &node
->msg_v2
.iotlb
;
1634 ret
= copy_to_iter(start
, size
, to
);
1635 if (ret
!= size
|| msg
->type
!= VHOST_IOTLB_MISS
) {
1639 vhost_enqueue_msg(dev
, &dev
->pending_list
, node
);
1644 EXPORT_SYMBOL_GPL(vhost_chr_read_iter
);
1646 static int vhost_iotlb_miss(struct vhost_virtqueue
*vq
, u64 iova
, int access
)
1648 struct vhost_dev
*dev
= vq
->dev
;
1649 struct vhost_msg_node
*node
;
1650 struct vhost_iotlb_msg
*msg
;
1651 bool v2
= vhost_backend_has_feature(vq
, VHOST_BACKEND_F_IOTLB_MSG_V2
);
1653 node
= vhost_new_msg(vq
, v2
? VHOST_IOTLB_MSG_V2
: VHOST_IOTLB_MSG
);
1658 node
->msg_v2
.type
= VHOST_IOTLB_MSG_V2
;
1659 msg
= &node
->msg_v2
.iotlb
;
1661 msg
= &node
->msg
.iotlb
;
1664 msg
->type
= VHOST_IOTLB_MISS
;
1668 vhost_enqueue_msg(dev
, &dev
->read_list
, node
);
1673 static bool vq_access_ok(struct vhost_virtqueue
*vq
, unsigned int num
,
1674 vring_desc_t __user
*desc
,
1675 vring_avail_t __user
*avail
,
1676 vring_used_t __user
*used
)
1679 /* If an IOTLB device is present, the vring addresses are
1680 * GIOVAs. Access validation occurs at prefetch time. */
1684 return access_ok(desc
, vhost_get_desc_size(vq
, num
)) &&
1685 access_ok(avail
, vhost_get_avail_size(vq
, num
)) &&
1686 access_ok(used
, vhost_get_used_size(vq
, num
));
1689 static void vhost_vq_meta_update(struct vhost_virtqueue
*vq
,
1690 const struct vhost_iotlb_map
*map
,
1693 int access
= (type
== VHOST_ADDR_USED
) ?
1694 VHOST_ACCESS_WO
: VHOST_ACCESS_RO
;
1696 if (likely(map
->perm
& access
))
1697 vq
->meta_iotlb
[type
] = map
;
1700 static bool iotlb_access_ok(struct vhost_virtqueue
*vq
,
1701 int access
, u64 addr
, u64 len
, int type
)
1703 const struct vhost_iotlb_map
*map
;
1704 struct vhost_iotlb
*umem
= vq
->iotlb
;
1705 u64 s
= 0, size
, orig_addr
= addr
, last
= addr
+ len
- 1;
1707 if (vhost_vq_meta_fetch(vq
, addr
, len
, type
))
1711 map
= vhost_iotlb_itree_first(umem
, addr
, last
);
1712 if (map
== NULL
|| map
->start
> addr
) {
1713 vhost_iotlb_miss(vq
, addr
, access
);
1715 } else if (!(map
->perm
& access
)) {
1716 /* Report the possible access violation by
1717 * request another translation from userspace.
1722 size
= map
->size
- addr
+ map
->start
;
1724 if (orig_addr
== addr
&& size
>= len
)
1725 vhost_vq_meta_update(vq
, map
, type
);
1734 int vq_meta_prefetch(struct vhost_virtqueue
*vq
)
1736 unsigned int num
= vq
->num
;
1741 return iotlb_access_ok(vq
, VHOST_MAP_RO
, (u64
)(uintptr_t)vq
->desc
,
1742 vhost_get_desc_size(vq
, num
), VHOST_ADDR_DESC
) &&
1743 iotlb_access_ok(vq
, VHOST_MAP_RO
, (u64
)(uintptr_t)vq
->avail
,
1744 vhost_get_avail_size(vq
, num
),
1745 VHOST_ADDR_AVAIL
) &&
1746 iotlb_access_ok(vq
, VHOST_MAP_WO
, (u64
)(uintptr_t)vq
->used
,
1747 vhost_get_used_size(vq
, num
), VHOST_ADDR_USED
);
1749 EXPORT_SYMBOL_GPL(vq_meta_prefetch
);
1751 /* Can we log writes? */
1752 /* Caller should have device mutex but not vq mutex */
1753 bool vhost_log_access_ok(struct vhost_dev
*dev
)
1755 return memory_access_ok(dev
, dev
->umem
, 1);
1757 EXPORT_SYMBOL_GPL(vhost_log_access_ok
);
1759 static bool vq_log_used_access_ok(struct vhost_virtqueue
*vq
,
1760 void __user
*log_base
,
1764 /* If an IOTLB device is present, log_addr is a GIOVA that
1765 * will never be logged by log_used(). */
1769 return !log_used
|| log_access_ok(log_base
, log_addr
,
1770 vhost_get_used_size(vq
, vq
->num
));
1773 /* Verify access for write logging. */
1774 /* Caller should have vq mutex and device mutex */
1775 static bool vq_log_access_ok(struct vhost_virtqueue
*vq
,
1776 void __user
*log_base
)
1778 return vq_memory_access_ok(log_base
, vq
->umem
,
1779 vhost_has_feature(vq
, VHOST_F_LOG_ALL
)) &&
1780 vq_log_used_access_ok(vq
, log_base
, vq
->log_used
, vq
->log_addr
);
1783 /* Can we start vq? */
1784 /* Caller should have vq mutex and device mutex */
1785 bool vhost_vq_access_ok(struct vhost_virtqueue
*vq
)
1787 if (!vq_log_access_ok(vq
, vq
->log_base
))
1790 return vq_access_ok(vq
, vq
->num
, vq
->desc
, vq
->avail
, vq
->used
);
1792 EXPORT_SYMBOL_GPL(vhost_vq_access_ok
);
1794 static long vhost_set_memory(struct vhost_dev
*d
, struct vhost_memory __user
*m
)
1796 struct vhost_memory mem
, *newmem
;
1797 struct vhost_memory_region
*region
;
1798 struct vhost_iotlb
*newumem
, *oldumem
;
1799 unsigned long size
= offsetof(struct vhost_memory
, regions
);
1802 if (copy_from_user(&mem
, m
, size
))
1806 if (mem
.nregions
> max_mem_regions
)
1808 newmem
= kvzalloc(struct_size(newmem
, regions
, mem
.nregions
),
1813 memcpy(newmem
, &mem
, size
);
1814 if (copy_from_user(newmem
->regions
, m
->regions
,
1815 flex_array_size(newmem
, regions
, mem
.nregions
))) {
1820 newumem
= iotlb_alloc();
1826 for (region
= newmem
->regions
;
1827 region
< newmem
->regions
+ mem
.nregions
;
1829 if (vhost_iotlb_add_range(newumem
,
1830 region
->guest_phys_addr
,
1831 region
->guest_phys_addr
+
1832 region
->memory_size
- 1,
1833 region
->userspace_addr
,
1838 if (!memory_access_ok(d
, newumem
, 0))
1844 /* All memory accesses are done under some VQ mutex. */
1845 for (i
= 0; i
< d
->nvqs
; ++i
) {
1846 mutex_lock(&d
->vqs
[i
]->mutex
);
1847 d
->vqs
[i
]->umem
= newumem
;
1848 mutex_unlock(&d
->vqs
[i
]->mutex
);
1852 vhost_iotlb_free(oldumem
);
1856 vhost_iotlb_free(newumem
);
1861 static long vhost_vring_set_num(struct vhost_dev
*d
,
1862 struct vhost_virtqueue
*vq
,
1865 struct vhost_vring_state s
;
1867 /* Resizing ring with an active backend?
1868 * You don't want to do that. */
1869 if (vq
->private_data
)
1872 if (copy_from_user(&s
, argp
, sizeof s
))
1875 if (!s
.num
|| s
.num
> 0xffff || (s
.num
& (s
.num
- 1)))
1882 static long vhost_vring_set_addr(struct vhost_dev
*d
,
1883 struct vhost_virtqueue
*vq
,
1886 struct vhost_vring_addr a
;
1888 if (copy_from_user(&a
, argp
, sizeof a
))
1890 if (a
.flags
& ~(0x1 << VHOST_VRING_F_LOG
))
1893 /* For 32bit, verify that the top 32bits of the user
1894 data are set to zero. */
1895 if ((u64
)(unsigned long)a
.desc_user_addr
!= a
.desc_user_addr
||
1896 (u64
)(unsigned long)a
.used_user_addr
!= a
.used_user_addr
||
1897 (u64
)(unsigned long)a
.avail_user_addr
!= a
.avail_user_addr
)
1900 /* Make sure it's safe to cast pointers to vring types. */
1901 BUILD_BUG_ON(__alignof__
*vq
->avail
> VRING_AVAIL_ALIGN_SIZE
);
1902 BUILD_BUG_ON(__alignof__
*vq
->used
> VRING_USED_ALIGN_SIZE
);
1903 if ((a
.avail_user_addr
& (VRING_AVAIL_ALIGN_SIZE
- 1)) ||
1904 (a
.used_user_addr
& (VRING_USED_ALIGN_SIZE
- 1)) ||
1905 (a
.log_guest_addr
& (VRING_USED_ALIGN_SIZE
- 1)))
1908 /* We only verify access here if backend is configured.
1909 * If it is not, we don't as size might not have been setup.
1910 * We will verify when backend is configured. */
1911 if (vq
->private_data
) {
1912 if (!vq_access_ok(vq
, vq
->num
,
1913 (void __user
*)(unsigned long)a
.desc_user_addr
,
1914 (void __user
*)(unsigned long)a
.avail_user_addr
,
1915 (void __user
*)(unsigned long)a
.used_user_addr
))
1918 /* Also validate log access for used ring if enabled. */
1919 if (!vq_log_used_access_ok(vq
, vq
->log_base
,
1920 a
.flags
& (0x1 << VHOST_VRING_F_LOG
),
1925 vq
->log_used
= !!(a
.flags
& (0x1 << VHOST_VRING_F_LOG
));
1926 vq
->desc
= (void __user
*)(unsigned long)a
.desc_user_addr
;
1927 vq
->avail
= (void __user
*)(unsigned long)a
.avail_user_addr
;
1928 vq
->log_addr
= a
.log_guest_addr
;
1929 vq
->used
= (void __user
*)(unsigned long)a
.used_user_addr
;
1934 static long vhost_vring_set_num_addr(struct vhost_dev
*d
,
1935 struct vhost_virtqueue
*vq
,
1941 mutex_lock(&vq
->mutex
);
1944 case VHOST_SET_VRING_NUM
:
1945 r
= vhost_vring_set_num(d
, vq
, argp
);
1947 case VHOST_SET_VRING_ADDR
:
1948 r
= vhost_vring_set_addr(d
, vq
, argp
);
1954 mutex_unlock(&vq
->mutex
);
1958 long vhost_vring_ioctl(struct vhost_dev
*d
, unsigned int ioctl
, void __user
*argp
)
1960 struct file
*eventfp
, *filep
= NULL
;
1961 bool pollstart
= false, pollstop
= false;
1962 struct eventfd_ctx
*ctx
= NULL
;
1963 struct vhost_virtqueue
*vq
;
1964 struct vhost_vring_state s
;
1965 struct vhost_vring_file f
;
1969 r
= vhost_get_vq_from_user(d
, argp
, &vq
, &idx
);
1973 if (ioctl
== VHOST_SET_VRING_NUM
||
1974 ioctl
== VHOST_SET_VRING_ADDR
) {
1975 return vhost_vring_set_num_addr(d
, vq
, ioctl
, argp
);
1978 mutex_lock(&vq
->mutex
);
1981 case VHOST_SET_VRING_BASE
:
1982 /* Moving base with an active backend?
1983 * You don't want to do that. */
1984 if (vq
->private_data
) {
1988 if (copy_from_user(&s
, argp
, sizeof s
)) {
1992 if (vhost_has_feature(vq
, VIRTIO_F_RING_PACKED
)) {
1993 vq
->last_avail_idx
= s
.num
& 0xffff;
1994 vq
->last_used_idx
= (s
.num
>> 16) & 0xffff;
1996 if (s
.num
> 0xffff) {
2000 vq
->last_avail_idx
= s
.num
;
2002 /* Forget the cached index value. */
2003 vq
->avail_idx
= vq
->last_avail_idx
;
2005 case VHOST_GET_VRING_BASE
:
2007 if (vhost_has_feature(vq
, VIRTIO_F_RING_PACKED
))
2008 s
.num
= (u32
)vq
->last_avail_idx
| ((u32
)vq
->last_used_idx
<< 16);
2010 s
.num
= vq
->last_avail_idx
;
2011 if (copy_to_user(argp
, &s
, sizeof s
))
2014 case VHOST_SET_VRING_KICK
:
2015 if (copy_from_user(&f
, argp
, sizeof f
)) {
2019 eventfp
= f
.fd
== VHOST_FILE_UNBIND
? NULL
: eventfd_fget(f
.fd
);
2020 if (IS_ERR(eventfp
)) {
2021 r
= PTR_ERR(eventfp
);
2024 if (eventfp
!= vq
->kick
) {
2025 pollstop
= (filep
= vq
->kick
) != NULL
;
2026 pollstart
= (vq
->kick
= eventfp
) != NULL
;
2030 case VHOST_SET_VRING_CALL
:
2031 if (copy_from_user(&f
, argp
, sizeof f
)) {
2035 ctx
= f
.fd
== VHOST_FILE_UNBIND
? NULL
: eventfd_ctx_fdget(f
.fd
);
2041 swap(ctx
, vq
->call_ctx
.ctx
);
2043 case VHOST_SET_VRING_ERR
:
2044 if (copy_from_user(&f
, argp
, sizeof f
)) {
2048 ctx
= f
.fd
== VHOST_FILE_UNBIND
? NULL
: eventfd_ctx_fdget(f
.fd
);
2053 swap(ctx
, vq
->error_ctx
);
2055 case VHOST_SET_VRING_ENDIAN
:
2056 r
= vhost_set_vring_endian(vq
, argp
);
2058 case VHOST_GET_VRING_ENDIAN
:
2059 r
= vhost_get_vring_endian(vq
, idx
, argp
);
2061 case VHOST_SET_VRING_BUSYLOOP_TIMEOUT
:
2062 if (copy_from_user(&s
, argp
, sizeof(s
))) {
2066 vq
->busyloop_timeout
= s
.num
;
2068 case VHOST_GET_VRING_BUSYLOOP_TIMEOUT
:
2070 s
.num
= vq
->busyloop_timeout
;
2071 if (copy_to_user(argp
, &s
, sizeof(s
)))
2078 if (pollstop
&& vq
->handle_kick
)
2079 vhost_poll_stop(&vq
->poll
);
2081 if (!IS_ERR_OR_NULL(ctx
))
2082 eventfd_ctx_put(ctx
);
2086 if (pollstart
&& vq
->handle_kick
)
2087 r
= vhost_poll_start(&vq
->poll
, vq
->kick
);
2089 mutex_unlock(&vq
->mutex
);
2091 if (pollstop
&& vq
->handle_kick
)
2092 vhost_dev_flush(vq
->poll
.dev
);
2095 EXPORT_SYMBOL_GPL(vhost_vring_ioctl
);
2097 int vhost_init_device_iotlb(struct vhost_dev
*d
)
2099 struct vhost_iotlb
*niotlb
, *oiotlb
;
2102 niotlb
= iotlb_alloc();
2109 for (i
= 0; i
< d
->nvqs
; ++i
) {
2110 struct vhost_virtqueue
*vq
= d
->vqs
[i
];
2112 mutex_lock(&vq
->mutex
);
2114 __vhost_vq_meta_reset(vq
);
2115 mutex_unlock(&vq
->mutex
);
2118 vhost_iotlb_free(oiotlb
);
2122 EXPORT_SYMBOL_GPL(vhost_init_device_iotlb
);
2124 /* Caller must have device mutex */
2125 long vhost_dev_ioctl(struct vhost_dev
*d
, unsigned int ioctl
, void __user
*argp
)
2127 struct eventfd_ctx
*ctx
;
2132 /* If you are not the owner, you can become one */
2133 if (ioctl
== VHOST_SET_OWNER
) {
2134 r
= vhost_dev_set_owner(d
);
2138 /* You must be the owner to do anything else */
2139 r
= vhost_dev_check_owner(d
);
2144 case VHOST_SET_MEM_TABLE
:
2145 r
= vhost_set_memory(d
, argp
);
2147 case VHOST_SET_LOG_BASE
:
2148 if (copy_from_user(&p
, argp
, sizeof p
)) {
2152 if ((u64
)(unsigned long)p
!= p
) {
2156 for (i
= 0; i
< d
->nvqs
; ++i
) {
2157 struct vhost_virtqueue
*vq
;
2158 void __user
*base
= (void __user
*)(unsigned long)p
;
2160 mutex_lock(&vq
->mutex
);
2161 /* If ring is inactive, will check when it's enabled. */
2162 if (vq
->private_data
&& !vq_log_access_ok(vq
, base
))
2165 vq
->log_base
= base
;
2166 mutex_unlock(&vq
->mutex
);
2169 case VHOST_SET_LOG_FD
:
2170 r
= get_user(fd
, (int __user
*)argp
);
2173 ctx
= fd
== VHOST_FILE_UNBIND
? NULL
: eventfd_ctx_fdget(fd
);
2178 swap(ctx
, d
->log_ctx
);
2179 for (i
= 0; i
< d
->nvqs
; ++i
) {
2180 mutex_lock(&d
->vqs
[i
]->mutex
);
2181 d
->vqs
[i
]->log_ctx
= d
->log_ctx
;
2182 mutex_unlock(&d
->vqs
[i
]->mutex
);
2185 eventfd_ctx_put(ctx
);
2194 EXPORT_SYMBOL_GPL(vhost_dev_ioctl
);
2196 /* TODO: This is really inefficient. We need something like get_user()
2197 * (instruction directly accesses the data, with an exception table entry
2198 * returning -EFAULT). See Documentation/arch/x86/exception-tables.rst.
2200 static int set_bit_to_user(int nr
, void __user
*addr
)
2202 unsigned long log
= (unsigned long)addr
;
2205 int bit
= nr
+ (log
% PAGE_SIZE
) * 8;
2208 r
= pin_user_pages_fast(log
, 1, FOLL_WRITE
, &page
);
2212 base
= kmap_atomic(page
);
2214 kunmap_atomic(base
);
2215 unpin_user_pages_dirty_lock(&page
, 1, true);
2219 static int log_write(void __user
*log_base
,
2220 u64 write_address
, u64 write_length
)
2222 u64 write_page
= write_address
/ VHOST_PAGE_SIZE
;
2227 write_length
+= write_address
% VHOST_PAGE_SIZE
;
2229 u64 base
= (u64
)(unsigned long)log_base
;
2230 u64 log
= base
+ write_page
/ 8;
2231 int bit
= write_page
% 8;
2232 if ((u64
)(unsigned long)log
!= log
)
2234 r
= set_bit_to_user(bit
, (void __user
*)(unsigned long)log
);
2237 if (write_length
<= VHOST_PAGE_SIZE
)
2239 write_length
-= VHOST_PAGE_SIZE
;
2245 static int log_write_hva(struct vhost_virtqueue
*vq
, u64 hva
, u64 len
)
2247 struct vhost_iotlb
*umem
= vq
->umem
;
2248 struct vhost_iotlb_map
*u
;
2249 u64 start
, end
, l
, min
;
2255 /* More than one GPAs can be mapped into a single HVA. So
2256 * iterate all possible umems here to be safe.
2258 list_for_each_entry(u
, &umem
->list
, link
) {
2259 if (u
->addr
> hva
- 1 + len
||
2260 u
->addr
- 1 + u
->size
< hva
)
2262 start
= max(u
->addr
, hva
);
2263 end
= min(u
->addr
- 1 + u
->size
, hva
- 1 + len
);
2264 l
= end
- start
+ 1;
2265 r
= log_write(vq
->log_base
,
2266 u
->start
+ start
- u
->addr
,
2284 static int log_used(struct vhost_virtqueue
*vq
, u64 used_offset
, u64 len
)
2286 struct iovec
*iov
= vq
->log_iov
;
2290 return log_write(vq
->log_base
, vq
->log_addr
+ used_offset
, len
);
2292 ret
= translate_desc(vq
, (uintptr_t)vq
->used
+ used_offset
,
2293 len
, iov
, 64, VHOST_ACCESS_WO
);
2297 for (i
= 0; i
< ret
; i
++) {
2298 ret
= log_write_hva(vq
, (uintptr_t)iov
[i
].iov_base
,
2307 int vhost_log_write(struct vhost_virtqueue
*vq
, struct vhost_log
*log
,
2308 unsigned int log_num
, u64 len
, struct iovec
*iov
, int count
)
2312 /* Make sure data written is seen before log. */
2316 for (i
= 0; i
< count
; i
++) {
2317 r
= log_write_hva(vq
, (uintptr_t)iov
[i
].iov_base
,
2325 for (i
= 0; i
< log_num
; ++i
) {
2326 u64 l
= min(log
[i
].len
, len
);
2327 r
= log_write(vq
->log_base
, log
[i
].addr
, l
);
2333 eventfd_signal(vq
->log_ctx
);
2337 /* Length written exceeds what we have stored. This is a bug. */
2341 EXPORT_SYMBOL_GPL(vhost_log_write
);
2343 static int vhost_update_used_flags(struct vhost_virtqueue
*vq
)
2346 if (vhost_put_used_flags(vq
))
2348 if (unlikely(vq
->log_used
)) {
2349 /* Make sure the flag is seen before log. */
2351 /* Log used flag write. */
2352 used
= &vq
->used
->flags
;
2353 log_used(vq
, (used
- (void __user
*)vq
->used
),
2354 sizeof vq
->used
->flags
);
2356 eventfd_signal(vq
->log_ctx
);
2361 static int vhost_update_avail_event(struct vhost_virtqueue
*vq
)
2363 if (vhost_put_avail_event(vq
))
2365 if (unlikely(vq
->log_used
)) {
2367 /* Make sure the event is seen before log. */
2369 /* Log avail event write */
2370 used
= vhost_avail_event(vq
);
2371 log_used(vq
, (used
- (void __user
*)vq
->used
),
2372 sizeof *vhost_avail_event(vq
));
2374 eventfd_signal(vq
->log_ctx
);
2379 int vhost_vq_init_access(struct vhost_virtqueue
*vq
)
2381 __virtio16 last_used_idx
;
2383 bool is_le
= vq
->is_le
;
2385 if (!vq
->private_data
)
2388 vhost_init_is_le(vq
);
2390 r
= vhost_update_used_flags(vq
);
2393 vq
->signalled_used_valid
= false;
2395 !access_ok(&vq
->used
->idx
, sizeof vq
->used
->idx
)) {
2399 r
= vhost_get_used_idx(vq
, &last_used_idx
);
2401 vq_err(vq
, "Can't access used idx at %p\n",
2405 vq
->last_used_idx
= vhost16_to_cpu(vq
, last_used_idx
);
2412 EXPORT_SYMBOL_GPL(vhost_vq_init_access
);
2414 static int translate_desc(struct vhost_virtqueue
*vq
, u64 addr
, u32 len
,
2415 struct iovec iov
[], int iov_size
, int access
)
2417 const struct vhost_iotlb_map
*map
;
2418 struct vhost_dev
*dev
= vq
->dev
;
2419 struct vhost_iotlb
*umem
= dev
->iotlb
? dev
->iotlb
: dev
->umem
;
2421 u64 s
= 0, last
= addr
+ len
- 1;
2424 while ((u64
)len
> s
) {
2426 if (unlikely(ret
>= iov_size
)) {
2431 map
= vhost_iotlb_itree_first(umem
, addr
, last
);
2432 if (map
== NULL
|| map
->start
> addr
) {
2433 if (umem
!= dev
->iotlb
) {
2439 } else if (!(map
->perm
& access
)) {
2445 size
= map
->size
- addr
+ map
->start
;
2446 _iov
->iov_len
= min((u64
)len
- s
, size
);
2447 _iov
->iov_base
= (void __user
*)(unsigned long)
2448 (map
->addr
+ addr
- map
->start
);
2455 vhost_iotlb_miss(vq
, addr
, access
);
2459 /* Each buffer in the virtqueues is actually a chain of descriptors. This
2460 * function returns the next descriptor in the chain,
2461 * or -1U if we're at the end. */
2462 static unsigned next_desc(struct vhost_virtqueue
*vq
, struct vring_desc
*desc
)
2466 /* If this descriptor says it doesn't chain, we're done. */
2467 if (!(desc
->flags
& cpu_to_vhost16(vq
, VRING_DESC_F_NEXT
)))
2470 /* Check they're not leading us off end of descriptors. */
2471 next
= vhost16_to_cpu(vq
, READ_ONCE(desc
->next
));
2475 static int get_indirect(struct vhost_virtqueue
*vq
,
2476 struct iovec iov
[], unsigned int iov_size
,
2477 unsigned int *out_num
, unsigned int *in_num
,
2478 struct vhost_log
*log
, unsigned int *log_num
,
2479 struct vring_desc
*indirect
)
2481 struct vring_desc desc
;
2482 unsigned int i
= 0, count
, found
= 0;
2483 u32 len
= vhost32_to_cpu(vq
, indirect
->len
);
2484 struct iov_iter from
;
2488 if (unlikely(len
% sizeof desc
)) {
2489 vq_err(vq
, "Invalid length in indirect descriptor: "
2490 "len 0x%llx not multiple of 0x%zx\n",
2491 (unsigned long long)len
,
2496 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, indirect
->addr
), len
, vq
->indirect
,
2497 UIO_MAXIOV
, VHOST_ACCESS_RO
);
2498 if (unlikely(ret
< 0)) {
2500 vq_err(vq
, "Translation failure %d in indirect.\n", ret
);
2503 iov_iter_init(&from
, ITER_SOURCE
, vq
->indirect
, ret
, len
);
2504 count
= len
/ sizeof desc
;
2505 /* Buffers are chained via a 16 bit next field, so
2506 * we can have at most 2^16 of these. */
2507 if (unlikely(count
> USHRT_MAX
+ 1)) {
2508 vq_err(vq
, "Indirect buffer length too big: %d\n",
2514 unsigned iov_count
= *in_num
+ *out_num
;
2515 if (unlikely(++found
> count
)) {
2516 vq_err(vq
, "Loop detected: last one at %u "
2517 "indirect size %u\n",
2521 if (unlikely(!copy_from_iter_full(&desc
, sizeof(desc
), &from
))) {
2522 vq_err(vq
, "Failed indirect descriptor: idx %d, %zx\n",
2523 i
, (size_t)vhost64_to_cpu(vq
, indirect
->addr
) + i
* sizeof desc
);
2526 if (unlikely(desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_INDIRECT
))) {
2527 vq_err(vq
, "Nested indirect descriptor: idx %d, %zx\n",
2528 i
, (size_t)vhost64_to_cpu(vq
, indirect
->addr
) + i
* sizeof desc
);
2532 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_WRITE
))
2533 access
= VHOST_ACCESS_WO
;
2535 access
= VHOST_ACCESS_RO
;
2537 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, desc
.addr
),
2538 vhost32_to_cpu(vq
, desc
.len
), iov
+ iov_count
,
2539 iov_size
- iov_count
, access
);
2540 if (unlikely(ret
< 0)) {
2542 vq_err(vq
, "Translation failure %d indirect idx %d\n",
2546 /* If this is an input descriptor, increment that count. */
2547 if (access
== VHOST_ACCESS_WO
) {
2549 if (unlikely(log
&& ret
)) {
2550 log
[*log_num
].addr
= vhost64_to_cpu(vq
, desc
.addr
);
2551 log
[*log_num
].len
= vhost32_to_cpu(vq
, desc
.len
);
2555 /* If it's an output descriptor, they're all supposed
2556 * to come before any input descriptors. */
2557 if (unlikely(*in_num
)) {
2558 vq_err(vq
, "Indirect descriptor "
2559 "has out after in: idx %d\n", i
);
2564 } while ((i
= next_desc(vq
, &desc
)) != -1);
2568 /* This looks in the virtqueue and for the first available buffer, and converts
2569 * it to an iovec for convenient access. Since descriptors consist of some
2570 * number of output then some number of input descriptors, it's actually two
2571 * iovecs, but we pack them into one and note how many of each there were.
2573 * This function returns the descriptor number found, or vq->num (which is
2574 * never a valid descriptor number) if none was found. A negative code is
2575 * returned on error. */
2576 int vhost_get_vq_desc(struct vhost_virtqueue
*vq
,
2577 struct iovec iov
[], unsigned int iov_size
,
2578 unsigned int *out_num
, unsigned int *in_num
,
2579 struct vhost_log
*log
, unsigned int *log_num
)
2581 struct vring_desc desc
;
2582 unsigned int i
, head
, found
= 0;
2583 u16 last_avail_idx
= vq
->last_avail_idx
;
2584 __virtio16 ring_head
;
2587 if (vq
->avail_idx
== vq
->last_avail_idx
) {
2588 ret
= vhost_get_avail_idx(vq
);
2589 if (unlikely(ret
< 0))
2596 /* Grab the next descriptor number they're advertising, and increment
2597 * the index we've seen. */
2598 if (unlikely(vhost_get_avail_head(vq
, &ring_head
, last_avail_idx
))) {
2599 vq_err(vq
, "Failed to read head: idx %d address %p\n",
2601 &vq
->avail
->ring
[last_avail_idx
% vq
->num
]);
2605 head
= vhost16_to_cpu(vq
, ring_head
);
2607 /* If their number is silly, that's an error. */
2608 if (unlikely(head
>= vq
->num
)) {
2609 vq_err(vq
, "Guest says index %u > %u is available",
2614 /* When we start there are none of either input nor output. */
2615 *out_num
= *in_num
= 0;
2621 unsigned iov_count
= *in_num
+ *out_num
;
2622 if (unlikely(i
>= vq
->num
)) {
2623 vq_err(vq
, "Desc index is %u > %u, head = %u",
2627 if (unlikely(++found
> vq
->num
)) {
2628 vq_err(vq
, "Loop detected: last one at %u "
2629 "vq size %u head %u\n",
2633 ret
= vhost_get_desc(vq
, &desc
, i
);
2634 if (unlikely(ret
)) {
2635 vq_err(vq
, "Failed to get descriptor: idx %d addr %p\n",
2639 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_INDIRECT
)) {
2640 ret
= get_indirect(vq
, iov
, iov_size
,
2642 log
, log_num
, &desc
);
2643 if (unlikely(ret
< 0)) {
2645 vq_err(vq
, "Failure detected "
2646 "in indirect descriptor at idx %d\n", i
);
2652 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_WRITE
))
2653 access
= VHOST_ACCESS_WO
;
2655 access
= VHOST_ACCESS_RO
;
2656 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, desc
.addr
),
2657 vhost32_to_cpu(vq
, desc
.len
), iov
+ iov_count
,
2658 iov_size
- iov_count
, access
);
2659 if (unlikely(ret
< 0)) {
2661 vq_err(vq
, "Translation failure %d descriptor idx %d\n",
2665 if (access
== VHOST_ACCESS_WO
) {
2666 /* If this is an input descriptor,
2667 * increment that count. */
2669 if (unlikely(log
&& ret
)) {
2670 log
[*log_num
].addr
= vhost64_to_cpu(vq
, desc
.addr
);
2671 log
[*log_num
].len
= vhost32_to_cpu(vq
, desc
.len
);
2675 /* If it's an output descriptor, they're all supposed
2676 * to come before any input descriptors. */
2677 if (unlikely(*in_num
)) {
2678 vq_err(vq
, "Descriptor has out after in: "
2684 } while ((i
= next_desc(vq
, &desc
)) != -1);
2686 /* On success, increment avail index. */
2687 vq
->last_avail_idx
++;
2689 /* Assume notifications from guest are disabled at this point,
2690 * if they aren't we would need to update avail_event index. */
2691 BUG_ON(!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
));
2694 EXPORT_SYMBOL_GPL(vhost_get_vq_desc
);
2696 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
2697 void vhost_discard_vq_desc(struct vhost_virtqueue
*vq
, int n
)
2699 vq
->last_avail_idx
-= n
;
2701 EXPORT_SYMBOL_GPL(vhost_discard_vq_desc
);
2703 /* After we've used one of their buffers, we tell them about it. We'll then
2704 * want to notify the guest, using eventfd. */
2705 int vhost_add_used(struct vhost_virtqueue
*vq
, unsigned int head
, int len
)
2707 struct vring_used_elem heads
= {
2708 cpu_to_vhost32(vq
, head
),
2709 cpu_to_vhost32(vq
, len
)
2712 return vhost_add_used_n(vq
, &heads
, 1);
2714 EXPORT_SYMBOL_GPL(vhost_add_used
);
2716 static int __vhost_add_used_n(struct vhost_virtqueue
*vq
,
2717 struct vring_used_elem
*heads
,
2720 vring_used_elem_t __user
*used
;
2724 start
= vq
->last_used_idx
& (vq
->num
- 1);
2725 used
= vq
->used
->ring
+ start
;
2726 if (vhost_put_used(vq
, heads
, start
, count
)) {
2727 vq_err(vq
, "Failed to write used");
2730 if (unlikely(vq
->log_used
)) {
2731 /* Make sure data is seen before log. */
2733 /* Log used ring entry write. */
2734 log_used(vq
, ((void __user
*)used
- (void __user
*)vq
->used
),
2735 count
* sizeof *used
);
2737 old
= vq
->last_used_idx
;
2738 new = (vq
->last_used_idx
+= count
);
2739 /* If the driver never bothers to signal in a very long while,
2740 * used index might wrap around. If that happens, invalidate
2741 * signalled_used index we stored. TODO: make sure driver
2742 * signals at least once in 2^16 and remove this. */
2743 if (unlikely((u16
)(new - vq
->signalled_used
) < (u16
)(new - old
)))
2744 vq
->signalled_used_valid
= false;
2748 /* After we've used one of their buffers, we tell them about it. We'll then
2749 * want to notify the guest, using eventfd. */
2750 int vhost_add_used_n(struct vhost_virtqueue
*vq
, struct vring_used_elem
*heads
,
2755 start
= vq
->last_used_idx
& (vq
->num
- 1);
2756 n
= vq
->num
- start
;
2758 r
= __vhost_add_used_n(vq
, heads
, n
);
2764 r
= __vhost_add_used_n(vq
, heads
, count
);
2766 /* Make sure buffer is written before we update index. */
2768 if (vhost_put_used_idx(vq
)) {
2769 vq_err(vq
, "Failed to increment used idx");
2772 if (unlikely(vq
->log_used
)) {
2773 /* Make sure used idx is seen before log. */
2775 /* Log used index update. */
2776 log_used(vq
, offsetof(struct vring_used
, idx
),
2777 sizeof vq
->used
->idx
);
2779 eventfd_signal(vq
->log_ctx
);
2783 EXPORT_SYMBOL_GPL(vhost_add_used_n
);
2785 static bool vhost_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2790 /* Flush out used index updates. This is paired
2791 * with the barrier that the Guest executes when enabling
2795 if (vhost_has_feature(vq
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
2796 unlikely(vq
->avail_idx
== vq
->last_avail_idx
))
2799 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
2801 if (vhost_get_avail_flags(vq
, &flags
)) {
2802 vq_err(vq
, "Failed to get flags");
2805 return !(flags
& cpu_to_vhost16(vq
, VRING_AVAIL_F_NO_INTERRUPT
));
2807 old
= vq
->signalled_used
;
2808 v
= vq
->signalled_used_valid
;
2809 new = vq
->signalled_used
= vq
->last_used_idx
;
2810 vq
->signalled_used_valid
= true;
2815 if (vhost_get_used_event(vq
, &event
)) {
2816 vq_err(vq
, "Failed to get used event idx");
2819 return vring_need_event(vhost16_to_cpu(vq
, event
), new, old
);
2822 /* This actually signals the guest, using eventfd. */
2823 void vhost_signal(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2825 /* Signal the Guest tell them we used something up. */
2826 if (vq
->call_ctx
.ctx
&& vhost_notify(dev
, vq
))
2827 eventfd_signal(vq
->call_ctx
.ctx
);
2829 EXPORT_SYMBOL_GPL(vhost_signal
);
2831 /* And here's the combo meal deal. Supersize me! */
2832 void vhost_add_used_and_signal(struct vhost_dev
*dev
,
2833 struct vhost_virtqueue
*vq
,
2834 unsigned int head
, int len
)
2836 vhost_add_used(vq
, head
, len
);
2837 vhost_signal(dev
, vq
);
2839 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal
);
2841 /* multi-buffer version of vhost_add_used_and_signal */
2842 void vhost_add_used_and_signal_n(struct vhost_dev
*dev
,
2843 struct vhost_virtqueue
*vq
,
2844 struct vring_used_elem
*heads
, unsigned count
)
2846 vhost_add_used_n(vq
, heads
, count
);
2847 vhost_signal(dev
, vq
);
2849 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n
);
2851 /* return true if we're sure that avaiable ring is empty */
2852 bool vhost_vq_avail_empty(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2856 if (vq
->avail_idx
!= vq
->last_avail_idx
)
2859 r
= vhost_get_avail_idx(vq
);
2861 /* Note: we treat error as non-empty here */
2864 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty
);
2866 /* OK, now we need to know about added descriptors. */
2867 bool vhost_enable_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2871 if (!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
))
2873 vq
->used_flags
&= ~VRING_USED_F_NO_NOTIFY
;
2874 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
2875 r
= vhost_update_used_flags(vq
);
2877 vq_err(vq
, "Failed to enable notification at %p: %d\n",
2878 &vq
->used
->flags
, r
);
2882 r
= vhost_update_avail_event(vq
);
2884 vq_err(vq
, "Failed to update avail event index at %p: %d\n",
2885 vhost_avail_event(vq
), r
);
2889 /* They could have slipped one in as we were doing that: make
2890 * sure it's written, then check again. */
2893 r
= vhost_get_avail_idx(vq
);
2894 /* Note: we treat error as empty here */
2895 if (unlikely(r
< 0))
2900 EXPORT_SYMBOL_GPL(vhost_enable_notify
);
2902 /* We don't need to be notified again. */
2903 void vhost_disable_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2907 if (vq
->used_flags
& VRING_USED_F_NO_NOTIFY
)
2909 vq
->used_flags
|= VRING_USED_F_NO_NOTIFY
;
2910 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
2911 r
= vhost_update_used_flags(vq
);
2913 vq_err(vq
, "Failed to disable notification at %p: %d\n",
2914 &vq
->used
->flags
, r
);
2917 EXPORT_SYMBOL_GPL(vhost_disable_notify
);
2919 /* Create a new message. */
2920 struct vhost_msg_node
*vhost_new_msg(struct vhost_virtqueue
*vq
, int type
)
2922 /* Make sure all padding within the structure is initialized. */
2923 struct vhost_msg_node
*node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
2928 node
->msg
.type
= type
;
2931 EXPORT_SYMBOL_GPL(vhost_new_msg
);
2933 void vhost_enqueue_msg(struct vhost_dev
*dev
, struct list_head
*head
,
2934 struct vhost_msg_node
*node
)
2936 spin_lock(&dev
->iotlb_lock
);
2937 list_add_tail(&node
->node
, head
);
2938 spin_unlock(&dev
->iotlb_lock
);
2940 wake_up_interruptible_poll(&dev
->wait
, EPOLLIN
| EPOLLRDNORM
);
2942 EXPORT_SYMBOL_GPL(vhost_enqueue_msg
);
2944 struct vhost_msg_node
*vhost_dequeue_msg(struct vhost_dev
*dev
,
2945 struct list_head
*head
)
2947 struct vhost_msg_node
*node
= NULL
;
2949 spin_lock(&dev
->iotlb_lock
);
2950 if (!list_empty(head
)) {
2951 node
= list_first_entry(head
, struct vhost_msg_node
,
2953 list_del(&node
->node
);
2955 spin_unlock(&dev
->iotlb_lock
);
2959 EXPORT_SYMBOL_GPL(vhost_dequeue_msg
);
2961 void vhost_set_backend_features(struct vhost_dev
*dev
, u64 features
)
2963 struct vhost_virtqueue
*vq
;
2966 mutex_lock(&dev
->mutex
);
2967 for (i
= 0; i
< dev
->nvqs
; ++i
) {
2969 mutex_lock(&vq
->mutex
);
2970 vq
->acked_backend_features
= features
;
2971 mutex_unlock(&vq
->mutex
);
2973 mutex_unlock(&dev
->mutex
);
2975 EXPORT_SYMBOL_GPL(vhost_set_backend_features
);
2977 static int __init
vhost_init(void)
2982 static void __exit
vhost_exit(void)
2986 module_init(vhost_init
);
2987 module_exit(vhost_exit
);
2989 MODULE_VERSION("0.0.1");
2990 MODULE_LICENSE("GPL v2");
2991 MODULE_AUTHOR("Michael S. Tsirkin");
2992 MODULE_DESCRIPTION("Host kernel accelerator for virtio");