1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2009 Red Hat, Inc.
3 * Copyright (C) 2006 Rusty Russell IBM Corporation
5 * Author: Michael S. Tsirkin <mst@redhat.com>
7 * Inspiration, some code, and most witty comments come from
8 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
10 * Generic code for virtio server in host kernel.
13 #include <linux/eventfd.h>
14 #include <linux/vhost.h>
15 #include <linux/uio.h>
17 #include <linux/miscdevice.h>
18 #include <linux/mutex.h>
19 #include <linux/poll.h>
20 #include <linux/file.h>
21 #include <linux/highmem.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/kthread.h>
25 #include <linux/cgroup.h>
26 #include <linux/module.h>
27 #include <linux/sort.h>
28 #include <linux/sched/mm.h>
29 #include <linux/sched/signal.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/nospec.h>
32 #include <linux/kcov.h>
36 static ushort max_mem_regions
= 64;
37 module_param(max_mem_regions
, ushort
, 0444);
38 MODULE_PARM_DESC(max_mem_regions
,
39 "Maximum number of memory regions in memory map. (default: 64)");
40 static int max_iotlb_entries
= 2048;
41 module_param(max_iotlb_entries
, int, 0444);
42 MODULE_PARM_DESC(max_iotlb_entries
,
43 "Maximum number of iotlb entries. (default: 2048)");
46 VHOST_MEMORY_F_LOG
= 0x1,
49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
52 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
53 static void vhost_disable_cross_endian(struct vhost_virtqueue
*vq
)
55 vq
->user_be
= !virtio_legacy_is_little_endian();
58 static void vhost_enable_cross_endian_big(struct vhost_virtqueue
*vq
)
63 static void vhost_enable_cross_endian_little(struct vhost_virtqueue
*vq
)
68 static long vhost_set_vring_endian(struct vhost_virtqueue
*vq
, int __user
*argp
)
70 struct vhost_vring_state s
;
75 if (copy_from_user(&s
, argp
, sizeof(s
)))
78 if (s
.num
!= VHOST_VRING_LITTLE_ENDIAN
&&
79 s
.num
!= VHOST_VRING_BIG_ENDIAN
)
82 if (s
.num
== VHOST_VRING_BIG_ENDIAN
)
83 vhost_enable_cross_endian_big(vq
);
85 vhost_enable_cross_endian_little(vq
);
90 static long vhost_get_vring_endian(struct vhost_virtqueue
*vq
, u32 idx
,
93 struct vhost_vring_state s
= {
98 if (copy_to_user(argp
, &s
, sizeof(s
)))
104 static void vhost_init_is_le(struct vhost_virtqueue
*vq
)
106 /* Note for legacy virtio: user_be is initialized at reset time
107 * according to the host endianness. If userspace does not set an
108 * explicit endianness, the default behavior is native endian, as
109 * expected by legacy virtio.
111 vq
->is_le
= vhost_has_feature(vq
, VIRTIO_F_VERSION_1
) || !vq
->user_be
;
114 static void vhost_disable_cross_endian(struct vhost_virtqueue
*vq
)
118 static long vhost_set_vring_endian(struct vhost_virtqueue
*vq
, int __user
*argp
)
123 static long vhost_get_vring_endian(struct vhost_virtqueue
*vq
, u32 idx
,
129 static void vhost_init_is_le(struct vhost_virtqueue
*vq
)
131 vq
->is_le
= vhost_has_feature(vq
, VIRTIO_F_VERSION_1
)
132 || virtio_legacy_is_little_endian();
134 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
136 static void vhost_reset_is_le(struct vhost_virtqueue
*vq
)
138 vhost_init_is_le(vq
);
141 struct vhost_flush_struct
{
142 struct vhost_work work
;
143 struct completion wait_event
;
146 static void vhost_flush_work(struct vhost_work
*work
)
148 struct vhost_flush_struct
*s
;
150 s
= container_of(work
, struct vhost_flush_struct
, work
);
151 complete(&s
->wait_event
);
154 static void vhost_poll_func(struct file
*file
, wait_queue_head_t
*wqh
,
157 struct vhost_poll
*poll
;
159 poll
= container_of(pt
, struct vhost_poll
, table
);
161 add_wait_queue(wqh
, &poll
->wait
);
164 static int vhost_poll_wakeup(wait_queue_entry_t
*wait
, unsigned mode
, int sync
,
167 struct vhost_poll
*poll
= container_of(wait
, struct vhost_poll
, wait
);
168 struct vhost_work
*work
= &poll
->work
;
170 if (!(key_to_poll(key
) & poll
->mask
))
173 if (!poll
->dev
->use_worker
)
176 vhost_poll_queue(poll
);
181 void vhost_work_init(struct vhost_work
*work
, vhost_work_fn_t fn
)
183 clear_bit(VHOST_WORK_QUEUED
, &work
->flags
);
186 EXPORT_SYMBOL_GPL(vhost_work_init
);
188 /* Init poll structure */
189 void vhost_poll_init(struct vhost_poll
*poll
, vhost_work_fn_t fn
,
190 __poll_t mask
, struct vhost_dev
*dev
)
192 init_waitqueue_func_entry(&poll
->wait
, vhost_poll_wakeup
);
193 init_poll_funcptr(&poll
->table
, vhost_poll_func
);
198 vhost_work_init(&poll
->work
, fn
);
200 EXPORT_SYMBOL_GPL(vhost_poll_init
);
202 /* Start polling a file. We add ourselves to file's wait queue. The caller must
203 * keep a reference to a file until after vhost_poll_stop is called. */
204 int vhost_poll_start(struct vhost_poll
*poll
, struct file
*file
)
211 mask
= vfs_poll(file
, &poll
->table
);
213 vhost_poll_wakeup(&poll
->wait
, 0, 0, poll_to_key(mask
));
214 if (mask
& EPOLLERR
) {
215 vhost_poll_stop(poll
);
221 EXPORT_SYMBOL_GPL(vhost_poll_start
);
223 /* Stop polling a file. After this function returns, it becomes safe to drop the
224 * file reference. You must also flush afterwards. */
225 void vhost_poll_stop(struct vhost_poll
*poll
)
228 remove_wait_queue(poll
->wqh
, &poll
->wait
);
232 EXPORT_SYMBOL_GPL(vhost_poll_stop
);
234 void vhost_work_flush(struct vhost_dev
*dev
, struct vhost_work
*work
)
236 struct vhost_flush_struct flush
;
239 init_completion(&flush
.wait_event
);
240 vhost_work_init(&flush
.work
, vhost_flush_work
);
242 vhost_work_queue(dev
, &flush
.work
);
243 wait_for_completion(&flush
.wait_event
);
246 EXPORT_SYMBOL_GPL(vhost_work_flush
);
248 /* Flush any work that has been scheduled. When calling this, don't hold any
249 * locks that are also used by the callback. */
250 void vhost_poll_flush(struct vhost_poll
*poll
)
252 vhost_work_flush(poll
->dev
, &poll
->work
);
254 EXPORT_SYMBOL_GPL(vhost_poll_flush
);
256 void vhost_work_queue(struct vhost_dev
*dev
, struct vhost_work
*work
)
261 if (!test_and_set_bit(VHOST_WORK_QUEUED
, &work
->flags
)) {
262 /* We can only add the work to the list after we're
263 * sure it was not in the list.
264 * test_and_set_bit() implies a memory barrier.
266 llist_add(&work
->node
, &dev
->work_list
);
267 wake_up_process(dev
->worker
);
270 EXPORT_SYMBOL_GPL(vhost_work_queue
);
272 /* A lockless hint for busy polling code to exit the loop */
273 bool vhost_has_work(struct vhost_dev
*dev
)
275 return !llist_empty(&dev
->work_list
);
277 EXPORT_SYMBOL_GPL(vhost_has_work
);
279 void vhost_poll_queue(struct vhost_poll
*poll
)
281 vhost_work_queue(poll
->dev
, &poll
->work
);
283 EXPORT_SYMBOL_GPL(vhost_poll_queue
);
285 static void __vhost_vq_meta_reset(struct vhost_virtqueue
*vq
)
289 for (j
= 0; j
< VHOST_NUM_ADDRS
; j
++)
290 vq
->meta_iotlb
[j
] = NULL
;
293 static void vhost_vq_meta_reset(struct vhost_dev
*d
)
297 for (i
= 0; i
< d
->nvqs
; ++i
)
298 __vhost_vq_meta_reset(d
->vqs
[i
]);
301 static void vhost_vring_call_reset(struct vhost_vring_call
*call_ctx
)
303 call_ctx
->ctx
= NULL
;
304 memset(&call_ctx
->producer
, 0x0, sizeof(struct irq_bypass_producer
));
307 bool vhost_vq_is_setup(struct vhost_virtqueue
*vq
)
309 return vq
->avail
&& vq
->desc
&& vq
->used
&& vhost_vq_access_ok(vq
);
311 EXPORT_SYMBOL_GPL(vhost_vq_is_setup
);
313 static void vhost_vq_reset(struct vhost_dev
*dev
,
314 struct vhost_virtqueue
*vq
)
320 vq
->last_avail_idx
= 0;
322 vq
->last_used_idx
= 0;
323 vq
->signalled_used
= 0;
324 vq
->signalled_used_valid
= false;
326 vq
->log_used
= false;
327 vq
->log_addr
= -1ull;
328 vq
->private_data
= NULL
;
329 vq
->acked_features
= 0;
330 vq
->acked_backend_features
= 0;
332 vq
->error_ctx
= NULL
;
335 vhost_reset_is_le(vq
);
336 vhost_disable_cross_endian(vq
);
337 vq
->busyloop_timeout
= 0;
340 vhost_vring_call_reset(&vq
->call_ctx
);
341 __vhost_vq_meta_reset(vq
);
344 static int vhost_worker(void *data
)
346 struct vhost_dev
*dev
= data
;
347 struct vhost_work
*work
, *work_next
;
348 struct llist_node
*node
;
350 kthread_use_mm(dev
->mm
);
353 /* mb paired w/ kthread_stop */
354 set_current_state(TASK_INTERRUPTIBLE
);
356 if (kthread_should_stop()) {
357 __set_current_state(TASK_RUNNING
);
361 node
= llist_del_all(&dev
->work_list
);
365 node
= llist_reverse_order(node
);
366 /* make sure flag is seen after deletion */
368 llist_for_each_entry_safe(work
, work_next
, node
, node
) {
369 clear_bit(VHOST_WORK_QUEUED
, &work
->flags
);
370 __set_current_state(TASK_RUNNING
);
371 kcov_remote_start_common(dev
->kcov_handle
);
378 kthread_unuse_mm(dev
->mm
);
382 static void vhost_vq_free_iovecs(struct vhost_virtqueue
*vq
)
392 /* Helper to allocate iovec buffers for all vqs. */
393 static long vhost_dev_alloc_iovecs(struct vhost_dev
*dev
)
395 struct vhost_virtqueue
*vq
;
398 for (i
= 0; i
< dev
->nvqs
; ++i
) {
400 vq
->indirect
= kmalloc_array(UIO_MAXIOV
,
401 sizeof(*vq
->indirect
),
403 vq
->log
= kmalloc_array(dev
->iov_limit
, sizeof(*vq
->log
),
405 vq
->heads
= kmalloc_array(dev
->iov_limit
, sizeof(*vq
->heads
),
407 if (!vq
->indirect
|| !vq
->log
|| !vq
->heads
)
414 vhost_vq_free_iovecs(dev
->vqs
[i
]);
418 static void vhost_dev_free_iovecs(struct vhost_dev
*dev
)
422 for (i
= 0; i
< dev
->nvqs
; ++i
)
423 vhost_vq_free_iovecs(dev
->vqs
[i
]);
426 bool vhost_exceeds_weight(struct vhost_virtqueue
*vq
,
427 int pkts
, int total_len
)
429 struct vhost_dev
*dev
= vq
->dev
;
431 if ((dev
->byte_weight
&& total_len
>= dev
->byte_weight
) ||
432 pkts
>= dev
->weight
) {
433 vhost_poll_queue(&vq
->poll
);
439 EXPORT_SYMBOL_GPL(vhost_exceeds_weight
);
441 static size_t vhost_get_avail_size(struct vhost_virtqueue
*vq
,
444 size_t event __maybe_unused
=
445 vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
447 return sizeof(*vq
->avail
) +
448 sizeof(*vq
->avail
->ring
) * num
+ event
;
451 static size_t vhost_get_used_size(struct vhost_virtqueue
*vq
,
454 size_t event __maybe_unused
=
455 vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
457 return sizeof(*vq
->used
) +
458 sizeof(*vq
->used
->ring
) * num
+ event
;
461 static size_t vhost_get_desc_size(struct vhost_virtqueue
*vq
,
464 return sizeof(*vq
->desc
) * num
;
467 void vhost_dev_init(struct vhost_dev
*dev
,
468 struct vhost_virtqueue
**vqs
, int nvqs
,
469 int iov_limit
, int weight
, int byte_weight
,
471 int (*msg_handler
)(struct vhost_dev
*dev
,
472 struct vhost_iotlb_msg
*msg
))
474 struct vhost_virtqueue
*vq
;
479 mutex_init(&dev
->mutex
);
485 dev
->iov_limit
= iov_limit
;
486 dev
->weight
= weight
;
487 dev
->byte_weight
= byte_weight
;
488 dev
->use_worker
= use_worker
;
489 dev
->msg_handler
= msg_handler
;
490 init_llist_head(&dev
->work_list
);
491 init_waitqueue_head(&dev
->wait
);
492 INIT_LIST_HEAD(&dev
->read_list
);
493 INIT_LIST_HEAD(&dev
->pending_list
);
494 spin_lock_init(&dev
->iotlb_lock
);
497 for (i
= 0; i
< dev
->nvqs
; ++i
) {
503 mutex_init(&vq
->mutex
);
504 vhost_vq_reset(dev
, vq
);
506 vhost_poll_init(&vq
->poll
, vq
->handle_kick
,
510 EXPORT_SYMBOL_GPL(vhost_dev_init
);
512 /* Caller should have device mutex */
513 long vhost_dev_check_owner(struct vhost_dev
*dev
)
515 /* Are you the owner? If not, I don't think you mean to do that */
516 return dev
->mm
== current
->mm
? 0 : -EPERM
;
518 EXPORT_SYMBOL_GPL(vhost_dev_check_owner
);
520 struct vhost_attach_cgroups_struct
{
521 struct vhost_work work
;
522 struct task_struct
*owner
;
526 static void vhost_attach_cgroups_work(struct vhost_work
*work
)
528 struct vhost_attach_cgroups_struct
*s
;
530 s
= container_of(work
, struct vhost_attach_cgroups_struct
, work
);
531 s
->ret
= cgroup_attach_task_all(s
->owner
, current
);
534 static int vhost_attach_cgroups(struct vhost_dev
*dev
)
536 struct vhost_attach_cgroups_struct attach
;
538 attach
.owner
= current
;
539 vhost_work_init(&attach
.work
, vhost_attach_cgroups_work
);
540 vhost_work_queue(dev
, &attach
.work
);
541 vhost_work_flush(dev
, &attach
.work
);
545 /* Caller should have device mutex */
546 bool vhost_dev_has_owner(struct vhost_dev
*dev
)
550 EXPORT_SYMBOL_GPL(vhost_dev_has_owner
);
552 static void vhost_attach_mm(struct vhost_dev
*dev
)
554 /* No owner, become one */
555 if (dev
->use_worker
) {
556 dev
->mm
= get_task_mm(current
);
558 /* vDPA device does not use worker thead, so there's
559 * no need to hold the address space for mm. This help
560 * to avoid deadlock in the case of mmap() which may
561 * held the refcnt of the file and depends on release
562 * method to remove vma.
564 dev
->mm
= current
->mm
;
569 static void vhost_detach_mm(struct vhost_dev
*dev
)
582 /* Caller should have device mutex */
583 long vhost_dev_set_owner(struct vhost_dev
*dev
)
585 struct task_struct
*worker
;
588 /* Is there an owner already? */
589 if (vhost_dev_has_owner(dev
)) {
594 vhost_attach_mm(dev
);
596 dev
->kcov_handle
= kcov_common_handle();
597 if (dev
->use_worker
) {
598 worker
= kthread_create(vhost_worker
, dev
,
599 "vhost-%d", current
->pid
);
600 if (IS_ERR(worker
)) {
601 err
= PTR_ERR(worker
);
605 dev
->worker
= worker
;
606 wake_up_process(worker
); /* avoid contributing to loadavg */
608 err
= vhost_attach_cgroups(dev
);
613 err
= vhost_dev_alloc_iovecs(dev
);
620 kthread_stop(dev
->worker
);
624 vhost_detach_mm(dev
);
625 dev
->kcov_handle
= 0;
629 EXPORT_SYMBOL_GPL(vhost_dev_set_owner
);
631 static struct vhost_iotlb
*iotlb_alloc(void)
633 return vhost_iotlb_alloc(max_iotlb_entries
,
634 VHOST_IOTLB_FLAG_RETIRE
);
637 struct vhost_iotlb
*vhost_dev_reset_owner_prepare(void)
639 return iotlb_alloc();
641 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare
);
643 /* Caller should have device mutex */
644 void vhost_dev_reset_owner(struct vhost_dev
*dev
, struct vhost_iotlb
*umem
)
648 vhost_dev_cleanup(dev
);
651 /* We don't need VQ locks below since vhost_dev_cleanup makes sure
652 * VQs aren't running.
654 for (i
= 0; i
< dev
->nvqs
; ++i
)
655 dev
->vqs
[i
]->umem
= umem
;
657 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner
);
659 void vhost_dev_stop(struct vhost_dev
*dev
)
663 for (i
= 0; i
< dev
->nvqs
; ++i
) {
664 if (dev
->vqs
[i
]->kick
&& dev
->vqs
[i
]->handle_kick
) {
665 vhost_poll_stop(&dev
->vqs
[i
]->poll
);
666 vhost_poll_flush(&dev
->vqs
[i
]->poll
);
670 EXPORT_SYMBOL_GPL(vhost_dev_stop
);
672 static void vhost_clear_msg(struct vhost_dev
*dev
)
674 struct vhost_msg_node
*node
, *n
;
676 spin_lock(&dev
->iotlb_lock
);
678 list_for_each_entry_safe(node
, n
, &dev
->read_list
, node
) {
679 list_del(&node
->node
);
683 list_for_each_entry_safe(node
, n
, &dev
->pending_list
, node
) {
684 list_del(&node
->node
);
688 spin_unlock(&dev
->iotlb_lock
);
691 void vhost_dev_cleanup(struct vhost_dev
*dev
)
695 for (i
= 0; i
< dev
->nvqs
; ++i
) {
696 if (dev
->vqs
[i
]->error_ctx
)
697 eventfd_ctx_put(dev
->vqs
[i
]->error_ctx
);
698 if (dev
->vqs
[i
]->kick
)
699 fput(dev
->vqs
[i
]->kick
);
700 if (dev
->vqs
[i
]->call_ctx
.ctx
)
701 eventfd_ctx_put(dev
->vqs
[i
]->call_ctx
.ctx
);
702 vhost_vq_reset(dev
, dev
->vqs
[i
]);
704 vhost_dev_free_iovecs(dev
);
706 eventfd_ctx_put(dev
->log_ctx
);
708 /* No one will access memory at this point */
709 vhost_iotlb_free(dev
->umem
);
711 vhost_iotlb_free(dev
->iotlb
);
713 vhost_clear_msg(dev
);
714 wake_up_interruptible_poll(&dev
->wait
, EPOLLIN
| EPOLLRDNORM
);
715 WARN_ON(!llist_empty(&dev
->work_list
));
717 kthread_stop(dev
->worker
);
719 dev
->kcov_handle
= 0;
721 vhost_detach_mm(dev
);
723 EXPORT_SYMBOL_GPL(vhost_dev_cleanup
);
725 static bool log_access_ok(void __user
*log_base
, u64 addr
, unsigned long sz
)
727 u64 a
= addr
/ VHOST_PAGE_SIZE
/ 8;
729 /* Make sure 64 bit math will not overflow. */
730 if (a
> ULONG_MAX
- (unsigned long)log_base
||
731 a
+ (unsigned long)log_base
> ULONG_MAX
)
734 return access_ok(log_base
+ a
,
735 (sz
+ VHOST_PAGE_SIZE
* 8 - 1) / VHOST_PAGE_SIZE
/ 8);
738 static bool vhost_overflow(u64 uaddr
, u64 size
)
740 /* Make sure 64 bit math will not overflow. */
741 return uaddr
> ULONG_MAX
|| size
> ULONG_MAX
|| uaddr
> ULONG_MAX
- size
;
744 /* Caller should have vq mutex and device mutex. */
745 static bool vq_memory_access_ok(void __user
*log_base
, struct vhost_iotlb
*umem
,
748 struct vhost_iotlb_map
*map
;
753 list_for_each_entry(map
, &umem
->list
, link
) {
754 unsigned long a
= map
->addr
;
756 if (vhost_overflow(map
->addr
, map
->size
))
760 if (!access_ok((void __user
*)a
, map
->size
))
762 else if (log_all
&& !log_access_ok(log_base
,
770 static inline void __user
*vhost_vq_meta_fetch(struct vhost_virtqueue
*vq
,
771 u64 addr
, unsigned int size
,
774 const struct vhost_iotlb_map
*map
= vq
->meta_iotlb
[type
];
779 return (void __user
*)(uintptr_t)(map
->addr
+ addr
- map
->start
);
782 /* Can we switch to this memory table? */
783 /* Caller should have device mutex but not vq mutex */
784 static bool memory_access_ok(struct vhost_dev
*d
, struct vhost_iotlb
*umem
,
789 for (i
= 0; i
< d
->nvqs
; ++i
) {
793 mutex_lock(&d
->vqs
[i
]->mutex
);
794 log
= log_all
|| vhost_has_feature(d
->vqs
[i
], VHOST_F_LOG_ALL
);
795 /* If ring is inactive, will check when it's enabled. */
796 if (d
->vqs
[i
]->private_data
)
797 ok
= vq_memory_access_ok(d
->vqs
[i
]->log_base
,
801 mutex_unlock(&d
->vqs
[i
]->mutex
);
808 static int translate_desc(struct vhost_virtqueue
*vq
, u64 addr
, u32 len
,
809 struct iovec iov
[], int iov_size
, int access
);
811 static int vhost_copy_to_user(struct vhost_virtqueue
*vq
, void __user
*to
,
812 const void *from
, unsigned size
)
817 return __copy_to_user(to
, from
, size
);
819 /* This function should be called after iotlb
820 * prefetch, which means we're sure that all vq
821 * could be access through iotlb. So -EAGAIN should
822 * not happen in this case.
825 void __user
*uaddr
= vhost_vq_meta_fetch(vq
,
826 (u64
)(uintptr_t)to
, size
,
830 return __copy_to_user(uaddr
, from
, size
);
832 ret
= translate_desc(vq
, (u64
)(uintptr_t)to
, size
, vq
->iotlb_iov
,
833 ARRAY_SIZE(vq
->iotlb_iov
),
837 iov_iter_init(&t
, WRITE
, vq
->iotlb_iov
, ret
, size
);
838 ret
= copy_to_iter(from
, size
, &t
);
846 static int vhost_copy_from_user(struct vhost_virtqueue
*vq
, void *to
,
847 void __user
*from
, unsigned size
)
852 return __copy_from_user(to
, from
, size
);
854 /* This function should be called after iotlb
855 * prefetch, which means we're sure that vq
856 * could be access through iotlb. So -EAGAIN should
857 * not happen in this case.
859 void __user
*uaddr
= vhost_vq_meta_fetch(vq
,
860 (u64
)(uintptr_t)from
, size
,
865 return __copy_from_user(to
, uaddr
, size
);
867 ret
= translate_desc(vq
, (u64
)(uintptr_t)from
, size
, vq
->iotlb_iov
,
868 ARRAY_SIZE(vq
->iotlb_iov
),
871 vq_err(vq
, "IOTLB translation failure: uaddr "
872 "%p size 0x%llx\n", from
,
873 (unsigned long long) size
);
876 iov_iter_init(&f
, READ
, vq
->iotlb_iov
, ret
, size
);
877 ret
= copy_from_iter(to
, size
, &f
);
886 static void __user
*__vhost_get_user_slow(struct vhost_virtqueue
*vq
,
887 void __user
*addr
, unsigned int size
,
892 ret
= translate_desc(vq
, (u64
)(uintptr_t)addr
, size
, vq
->iotlb_iov
,
893 ARRAY_SIZE(vq
->iotlb_iov
),
896 vq_err(vq
, "IOTLB translation failure: uaddr "
897 "%p size 0x%llx\n", addr
,
898 (unsigned long long) size
);
902 if (ret
!= 1 || vq
->iotlb_iov
[0].iov_len
!= size
) {
903 vq_err(vq
, "Non atomic userspace memory access: uaddr "
904 "%p size 0x%llx\n", addr
,
905 (unsigned long long) size
);
909 return vq
->iotlb_iov
[0].iov_base
;
912 /* This function should be called after iotlb
913 * prefetch, which means we're sure that vq
914 * could be access through iotlb. So -EAGAIN should
915 * not happen in this case.
917 static inline void __user
*__vhost_get_user(struct vhost_virtqueue
*vq
,
918 void __user
*addr
, unsigned int size
,
921 void __user
*uaddr
= vhost_vq_meta_fetch(vq
,
922 (u64
)(uintptr_t)addr
, size
, type
);
926 return __vhost_get_user_slow(vq
, addr
, size
, type
);
929 #define vhost_put_user(vq, x, ptr) \
933 ret = __put_user(x, ptr); \
935 __typeof__(ptr) to = \
936 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
937 sizeof(*ptr), VHOST_ADDR_USED); \
939 ret = __put_user(x, to); \
946 static inline int vhost_put_avail_event(struct vhost_virtqueue
*vq
)
948 return vhost_put_user(vq
, cpu_to_vhost16(vq
, vq
->avail_idx
),
949 vhost_avail_event(vq
));
952 static inline int vhost_put_used(struct vhost_virtqueue
*vq
,
953 struct vring_used_elem
*head
, int idx
,
956 return vhost_copy_to_user(vq
, vq
->used
->ring
+ idx
, head
,
957 count
* sizeof(*head
));
960 static inline int vhost_put_used_flags(struct vhost_virtqueue
*vq
)
963 return vhost_put_user(vq
, cpu_to_vhost16(vq
, vq
->used_flags
),
967 static inline int vhost_put_used_idx(struct vhost_virtqueue
*vq
)
970 return vhost_put_user(vq
, cpu_to_vhost16(vq
, vq
->last_used_idx
),
974 #define vhost_get_user(vq, x, ptr, type) \
978 ret = __get_user(x, ptr); \
980 __typeof__(ptr) from = \
981 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
985 ret = __get_user(x, from); \
992 #define vhost_get_avail(vq, x, ptr) \
993 vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
995 #define vhost_get_used(vq, x, ptr) \
996 vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
998 static void vhost_dev_lock_vqs(struct vhost_dev
*d
)
1001 for (i
= 0; i
< d
->nvqs
; ++i
)
1002 mutex_lock_nested(&d
->vqs
[i
]->mutex
, i
);
1005 static void vhost_dev_unlock_vqs(struct vhost_dev
*d
)
1008 for (i
= 0; i
< d
->nvqs
; ++i
)
1009 mutex_unlock(&d
->vqs
[i
]->mutex
);
1012 static inline int vhost_get_avail_idx(struct vhost_virtqueue
*vq
,
1015 return vhost_get_avail(vq
, *idx
, &vq
->avail
->idx
);
1018 static inline int vhost_get_avail_head(struct vhost_virtqueue
*vq
,
1019 __virtio16
*head
, int idx
)
1021 return vhost_get_avail(vq
, *head
,
1022 &vq
->avail
->ring
[idx
& (vq
->num
- 1)]);
1025 static inline int vhost_get_avail_flags(struct vhost_virtqueue
*vq
,
1028 return vhost_get_avail(vq
, *flags
, &vq
->avail
->flags
);
1031 static inline int vhost_get_used_event(struct vhost_virtqueue
*vq
,
1034 return vhost_get_avail(vq
, *event
, vhost_used_event(vq
));
1037 static inline int vhost_get_used_idx(struct vhost_virtqueue
*vq
,
1040 return vhost_get_used(vq
, *idx
, &vq
->used
->idx
);
1043 static inline int vhost_get_desc(struct vhost_virtqueue
*vq
,
1044 struct vring_desc
*desc
, int idx
)
1046 return vhost_copy_from_user(vq
, desc
, vq
->desc
+ idx
, sizeof(*desc
));
1049 static void vhost_iotlb_notify_vq(struct vhost_dev
*d
,
1050 struct vhost_iotlb_msg
*msg
)
1052 struct vhost_msg_node
*node
, *n
;
1054 spin_lock(&d
->iotlb_lock
);
1056 list_for_each_entry_safe(node
, n
, &d
->pending_list
, node
) {
1057 struct vhost_iotlb_msg
*vq_msg
= &node
->msg
.iotlb
;
1058 if (msg
->iova
<= vq_msg
->iova
&&
1059 msg
->iova
+ msg
->size
- 1 >= vq_msg
->iova
&&
1060 vq_msg
->type
== VHOST_IOTLB_MISS
) {
1061 vhost_poll_queue(&node
->vq
->poll
);
1062 list_del(&node
->node
);
1067 spin_unlock(&d
->iotlb_lock
);
1070 static bool umem_access_ok(u64 uaddr
, u64 size
, int access
)
1072 unsigned long a
= uaddr
;
1074 /* Make sure 64 bit math will not overflow. */
1075 if (vhost_overflow(uaddr
, size
))
1078 if ((access
& VHOST_ACCESS_RO
) &&
1079 !access_ok((void __user
*)a
, size
))
1081 if ((access
& VHOST_ACCESS_WO
) &&
1082 !access_ok((void __user
*)a
, size
))
1087 static int vhost_process_iotlb_msg(struct vhost_dev
*dev
,
1088 struct vhost_iotlb_msg
*msg
)
1092 mutex_lock(&dev
->mutex
);
1093 vhost_dev_lock_vqs(dev
);
1094 switch (msg
->type
) {
1095 case VHOST_IOTLB_UPDATE
:
1100 if (!umem_access_ok(msg
->uaddr
, msg
->size
, msg
->perm
)) {
1104 vhost_vq_meta_reset(dev
);
1105 if (vhost_iotlb_add_range(dev
->iotlb
, msg
->iova
,
1106 msg
->iova
+ msg
->size
- 1,
1107 msg
->uaddr
, msg
->perm
)) {
1111 vhost_iotlb_notify_vq(dev
, msg
);
1113 case VHOST_IOTLB_INVALIDATE
:
1118 vhost_vq_meta_reset(dev
);
1119 vhost_iotlb_del_range(dev
->iotlb
, msg
->iova
,
1120 msg
->iova
+ msg
->size
- 1);
1127 vhost_dev_unlock_vqs(dev
);
1128 mutex_unlock(&dev
->mutex
);
1132 ssize_t
vhost_chr_write_iter(struct vhost_dev
*dev
,
1133 struct iov_iter
*from
)
1135 struct vhost_iotlb_msg msg
;
1139 ret
= copy_from_iter(&type
, sizeof(type
), from
);
1140 if (ret
!= sizeof(type
)) {
1146 case VHOST_IOTLB_MSG
:
1147 /* There maybe a hole after type for V1 message type,
1150 offset
= offsetof(struct vhost_msg
, iotlb
) - sizeof(int);
1152 case VHOST_IOTLB_MSG_V2
:
1153 offset
= sizeof(__u32
);
1160 iov_iter_advance(from
, offset
);
1161 ret
= copy_from_iter(&msg
, sizeof(msg
), from
);
1162 if (ret
!= sizeof(msg
)) {
1167 if (dev
->msg_handler
)
1168 ret
= dev
->msg_handler(dev
, &msg
);
1170 ret
= vhost_process_iotlb_msg(dev
, &msg
);
1176 ret
= (type
== VHOST_IOTLB_MSG
) ? sizeof(struct vhost_msg
) :
1177 sizeof(struct vhost_msg_v2
);
1181 EXPORT_SYMBOL(vhost_chr_write_iter
);
1183 __poll_t
vhost_chr_poll(struct file
*file
, struct vhost_dev
*dev
,
1188 poll_wait(file
, &dev
->wait
, wait
);
1190 if (!list_empty(&dev
->read_list
))
1191 mask
|= EPOLLIN
| EPOLLRDNORM
;
1195 EXPORT_SYMBOL(vhost_chr_poll
);
1197 ssize_t
vhost_chr_read_iter(struct vhost_dev
*dev
, struct iov_iter
*to
,
1201 struct vhost_msg_node
*node
;
1203 unsigned size
= sizeof(struct vhost_msg
);
1205 if (iov_iter_count(to
) < size
)
1210 prepare_to_wait(&dev
->wait
, &wait
,
1211 TASK_INTERRUPTIBLE
);
1213 node
= vhost_dequeue_msg(dev
, &dev
->read_list
);
1220 if (signal_pending(current
)) {
1233 finish_wait(&dev
->wait
, &wait
);
1236 struct vhost_iotlb_msg
*msg
;
1237 void *start
= &node
->msg
;
1239 switch (node
->msg
.type
) {
1240 case VHOST_IOTLB_MSG
:
1241 size
= sizeof(node
->msg
);
1242 msg
= &node
->msg
.iotlb
;
1244 case VHOST_IOTLB_MSG_V2
:
1245 size
= sizeof(node
->msg_v2
);
1246 msg
= &node
->msg_v2
.iotlb
;
1253 ret
= copy_to_iter(start
, size
, to
);
1254 if (ret
!= size
|| msg
->type
!= VHOST_IOTLB_MISS
) {
1258 vhost_enqueue_msg(dev
, &dev
->pending_list
, node
);
1263 EXPORT_SYMBOL_GPL(vhost_chr_read_iter
);
1265 static int vhost_iotlb_miss(struct vhost_virtqueue
*vq
, u64 iova
, int access
)
1267 struct vhost_dev
*dev
= vq
->dev
;
1268 struct vhost_msg_node
*node
;
1269 struct vhost_iotlb_msg
*msg
;
1270 bool v2
= vhost_backend_has_feature(vq
, VHOST_BACKEND_F_IOTLB_MSG_V2
);
1272 node
= vhost_new_msg(vq
, v2
? VHOST_IOTLB_MSG_V2
: VHOST_IOTLB_MSG
);
1277 node
->msg_v2
.type
= VHOST_IOTLB_MSG_V2
;
1278 msg
= &node
->msg_v2
.iotlb
;
1280 msg
= &node
->msg
.iotlb
;
1283 msg
->type
= VHOST_IOTLB_MISS
;
1287 vhost_enqueue_msg(dev
, &dev
->read_list
, node
);
1292 static bool vq_access_ok(struct vhost_virtqueue
*vq
, unsigned int num
,
1293 vring_desc_t __user
*desc
,
1294 vring_avail_t __user
*avail
,
1295 vring_used_t __user
*used
)
1298 /* If an IOTLB device is present, the vring addresses are
1299 * GIOVAs. Access validation occurs at prefetch time. */
1303 return access_ok(desc
, vhost_get_desc_size(vq
, num
)) &&
1304 access_ok(avail
, vhost_get_avail_size(vq
, num
)) &&
1305 access_ok(used
, vhost_get_used_size(vq
, num
));
1308 static void vhost_vq_meta_update(struct vhost_virtqueue
*vq
,
1309 const struct vhost_iotlb_map
*map
,
1312 int access
= (type
== VHOST_ADDR_USED
) ?
1313 VHOST_ACCESS_WO
: VHOST_ACCESS_RO
;
1315 if (likely(map
->perm
& access
))
1316 vq
->meta_iotlb
[type
] = map
;
1319 static bool iotlb_access_ok(struct vhost_virtqueue
*vq
,
1320 int access
, u64 addr
, u64 len
, int type
)
1322 const struct vhost_iotlb_map
*map
;
1323 struct vhost_iotlb
*umem
= vq
->iotlb
;
1324 u64 s
= 0, size
, orig_addr
= addr
, last
= addr
+ len
- 1;
1326 if (vhost_vq_meta_fetch(vq
, addr
, len
, type
))
1330 map
= vhost_iotlb_itree_first(umem
, addr
, last
);
1331 if (map
== NULL
|| map
->start
> addr
) {
1332 vhost_iotlb_miss(vq
, addr
, access
);
1334 } else if (!(map
->perm
& access
)) {
1335 /* Report the possible access violation by
1336 * request another translation from userspace.
1341 size
= map
->size
- addr
+ map
->start
;
1343 if (orig_addr
== addr
&& size
>= len
)
1344 vhost_vq_meta_update(vq
, map
, type
);
1353 int vq_meta_prefetch(struct vhost_virtqueue
*vq
)
1355 unsigned int num
= vq
->num
;
1360 return iotlb_access_ok(vq
, VHOST_MAP_RO
, (u64
)(uintptr_t)vq
->desc
,
1361 vhost_get_desc_size(vq
, num
), VHOST_ADDR_DESC
) &&
1362 iotlb_access_ok(vq
, VHOST_MAP_RO
, (u64
)(uintptr_t)vq
->avail
,
1363 vhost_get_avail_size(vq
, num
),
1364 VHOST_ADDR_AVAIL
) &&
1365 iotlb_access_ok(vq
, VHOST_MAP_WO
, (u64
)(uintptr_t)vq
->used
,
1366 vhost_get_used_size(vq
, num
), VHOST_ADDR_USED
);
1368 EXPORT_SYMBOL_GPL(vq_meta_prefetch
);
1370 /* Can we log writes? */
1371 /* Caller should have device mutex but not vq mutex */
1372 bool vhost_log_access_ok(struct vhost_dev
*dev
)
1374 return memory_access_ok(dev
, dev
->umem
, 1);
1376 EXPORT_SYMBOL_GPL(vhost_log_access_ok
);
1378 static bool vq_log_used_access_ok(struct vhost_virtqueue
*vq
,
1379 void __user
*log_base
,
1383 /* If an IOTLB device is present, log_addr is a GIOVA that
1384 * will never be logged by log_used(). */
1388 return !log_used
|| log_access_ok(log_base
, log_addr
,
1389 vhost_get_used_size(vq
, vq
->num
));
1392 /* Verify access for write logging. */
1393 /* Caller should have vq mutex and device mutex */
1394 static bool vq_log_access_ok(struct vhost_virtqueue
*vq
,
1395 void __user
*log_base
)
1397 return vq_memory_access_ok(log_base
, vq
->umem
,
1398 vhost_has_feature(vq
, VHOST_F_LOG_ALL
)) &&
1399 vq_log_used_access_ok(vq
, log_base
, vq
->log_used
, vq
->log_addr
);
1402 /* Can we start vq? */
1403 /* Caller should have vq mutex and device mutex */
1404 bool vhost_vq_access_ok(struct vhost_virtqueue
*vq
)
1406 if (!vq_log_access_ok(vq
, vq
->log_base
))
1409 return vq_access_ok(vq
, vq
->num
, vq
->desc
, vq
->avail
, vq
->used
);
1411 EXPORT_SYMBOL_GPL(vhost_vq_access_ok
);
1413 static long vhost_set_memory(struct vhost_dev
*d
, struct vhost_memory __user
*m
)
1415 struct vhost_memory mem
, *newmem
;
1416 struct vhost_memory_region
*region
;
1417 struct vhost_iotlb
*newumem
, *oldumem
;
1418 unsigned long size
= offsetof(struct vhost_memory
, regions
);
1421 if (copy_from_user(&mem
, m
, size
))
1425 if (mem
.nregions
> max_mem_regions
)
1427 newmem
= kvzalloc(struct_size(newmem
, regions
, mem
.nregions
),
1432 memcpy(newmem
, &mem
, size
);
1433 if (copy_from_user(newmem
->regions
, m
->regions
,
1434 flex_array_size(newmem
, regions
, mem
.nregions
))) {
1439 newumem
= iotlb_alloc();
1445 for (region
= newmem
->regions
;
1446 region
< newmem
->regions
+ mem
.nregions
;
1448 if (vhost_iotlb_add_range(newumem
,
1449 region
->guest_phys_addr
,
1450 region
->guest_phys_addr
+
1451 region
->memory_size
- 1,
1452 region
->userspace_addr
,
1457 if (!memory_access_ok(d
, newumem
, 0))
1463 /* All memory accesses are done under some VQ mutex. */
1464 for (i
= 0; i
< d
->nvqs
; ++i
) {
1465 mutex_lock(&d
->vqs
[i
]->mutex
);
1466 d
->vqs
[i
]->umem
= newumem
;
1467 mutex_unlock(&d
->vqs
[i
]->mutex
);
1471 vhost_iotlb_free(oldumem
);
1475 vhost_iotlb_free(newumem
);
1480 static long vhost_vring_set_num(struct vhost_dev
*d
,
1481 struct vhost_virtqueue
*vq
,
1484 struct vhost_vring_state s
;
1486 /* Resizing ring with an active backend?
1487 * You don't want to do that. */
1488 if (vq
->private_data
)
1491 if (copy_from_user(&s
, argp
, sizeof s
))
1494 if (!s
.num
|| s
.num
> 0xffff || (s
.num
& (s
.num
- 1)))
1501 static long vhost_vring_set_addr(struct vhost_dev
*d
,
1502 struct vhost_virtqueue
*vq
,
1505 struct vhost_vring_addr a
;
1507 if (copy_from_user(&a
, argp
, sizeof a
))
1509 if (a
.flags
& ~(0x1 << VHOST_VRING_F_LOG
))
1512 /* For 32bit, verify that the top 32bits of the user
1513 data are set to zero. */
1514 if ((u64
)(unsigned long)a
.desc_user_addr
!= a
.desc_user_addr
||
1515 (u64
)(unsigned long)a
.used_user_addr
!= a
.used_user_addr
||
1516 (u64
)(unsigned long)a
.avail_user_addr
!= a
.avail_user_addr
)
1519 /* Make sure it's safe to cast pointers to vring types. */
1520 BUILD_BUG_ON(__alignof__
*vq
->avail
> VRING_AVAIL_ALIGN_SIZE
);
1521 BUILD_BUG_ON(__alignof__
*vq
->used
> VRING_USED_ALIGN_SIZE
);
1522 if ((a
.avail_user_addr
& (VRING_AVAIL_ALIGN_SIZE
- 1)) ||
1523 (a
.used_user_addr
& (VRING_USED_ALIGN_SIZE
- 1)) ||
1524 (a
.log_guest_addr
& (VRING_USED_ALIGN_SIZE
- 1)))
1527 /* We only verify access here if backend is configured.
1528 * If it is not, we don't as size might not have been setup.
1529 * We will verify when backend is configured. */
1530 if (vq
->private_data
) {
1531 if (!vq_access_ok(vq
, vq
->num
,
1532 (void __user
*)(unsigned long)a
.desc_user_addr
,
1533 (void __user
*)(unsigned long)a
.avail_user_addr
,
1534 (void __user
*)(unsigned long)a
.used_user_addr
))
1537 /* Also validate log access for used ring if enabled. */
1538 if (!vq_log_used_access_ok(vq
, vq
->log_base
,
1539 a
.flags
& (0x1 << VHOST_VRING_F_LOG
),
1544 vq
->log_used
= !!(a
.flags
& (0x1 << VHOST_VRING_F_LOG
));
1545 vq
->desc
= (void __user
*)(unsigned long)a
.desc_user_addr
;
1546 vq
->avail
= (void __user
*)(unsigned long)a
.avail_user_addr
;
1547 vq
->log_addr
= a
.log_guest_addr
;
1548 vq
->used
= (void __user
*)(unsigned long)a
.used_user_addr
;
1553 static long vhost_vring_set_num_addr(struct vhost_dev
*d
,
1554 struct vhost_virtqueue
*vq
,
1560 mutex_lock(&vq
->mutex
);
1563 case VHOST_SET_VRING_NUM
:
1564 r
= vhost_vring_set_num(d
, vq
, argp
);
1566 case VHOST_SET_VRING_ADDR
:
1567 r
= vhost_vring_set_addr(d
, vq
, argp
);
1573 mutex_unlock(&vq
->mutex
);
1577 long vhost_vring_ioctl(struct vhost_dev
*d
, unsigned int ioctl
, void __user
*argp
)
1579 struct file
*eventfp
, *filep
= NULL
;
1580 bool pollstart
= false, pollstop
= false;
1581 struct eventfd_ctx
*ctx
= NULL
;
1582 u32 __user
*idxp
= argp
;
1583 struct vhost_virtqueue
*vq
;
1584 struct vhost_vring_state s
;
1585 struct vhost_vring_file f
;
1589 r
= get_user(idx
, idxp
);
1595 idx
= array_index_nospec(idx
, d
->nvqs
);
1598 if (ioctl
== VHOST_SET_VRING_NUM
||
1599 ioctl
== VHOST_SET_VRING_ADDR
) {
1600 return vhost_vring_set_num_addr(d
, vq
, ioctl
, argp
);
1603 mutex_lock(&vq
->mutex
);
1606 case VHOST_SET_VRING_BASE
:
1607 /* Moving base with an active backend?
1608 * You don't want to do that. */
1609 if (vq
->private_data
) {
1613 if (copy_from_user(&s
, argp
, sizeof s
)) {
1617 if (s
.num
> 0xffff) {
1621 vq
->last_avail_idx
= s
.num
;
1622 /* Forget the cached index value. */
1623 vq
->avail_idx
= vq
->last_avail_idx
;
1625 case VHOST_GET_VRING_BASE
:
1627 s
.num
= vq
->last_avail_idx
;
1628 if (copy_to_user(argp
, &s
, sizeof s
))
1631 case VHOST_SET_VRING_KICK
:
1632 if (copy_from_user(&f
, argp
, sizeof f
)) {
1636 eventfp
= f
.fd
== VHOST_FILE_UNBIND
? NULL
: eventfd_fget(f
.fd
);
1637 if (IS_ERR(eventfp
)) {
1638 r
= PTR_ERR(eventfp
);
1641 if (eventfp
!= vq
->kick
) {
1642 pollstop
= (filep
= vq
->kick
) != NULL
;
1643 pollstart
= (vq
->kick
= eventfp
) != NULL
;
1647 case VHOST_SET_VRING_CALL
:
1648 if (copy_from_user(&f
, argp
, sizeof f
)) {
1652 ctx
= f
.fd
== VHOST_FILE_UNBIND
? NULL
: eventfd_ctx_fdget(f
.fd
);
1658 swap(ctx
, vq
->call_ctx
.ctx
);
1660 case VHOST_SET_VRING_ERR
:
1661 if (copy_from_user(&f
, argp
, sizeof f
)) {
1665 ctx
= f
.fd
== VHOST_FILE_UNBIND
? NULL
: eventfd_ctx_fdget(f
.fd
);
1670 swap(ctx
, vq
->error_ctx
);
1672 case VHOST_SET_VRING_ENDIAN
:
1673 r
= vhost_set_vring_endian(vq
, argp
);
1675 case VHOST_GET_VRING_ENDIAN
:
1676 r
= vhost_get_vring_endian(vq
, idx
, argp
);
1678 case VHOST_SET_VRING_BUSYLOOP_TIMEOUT
:
1679 if (copy_from_user(&s
, argp
, sizeof(s
))) {
1683 vq
->busyloop_timeout
= s
.num
;
1685 case VHOST_GET_VRING_BUSYLOOP_TIMEOUT
:
1687 s
.num
= vq
->busyloop_timeout
;
1688 if (copy_to_user(argp
, &s
, sizeof(s
)))
1695 if (pollstop
&& vq
->handle_kick
)
1696 vhost_poll_stop(&vq
->poll
);
1698 if (!IS_ERR_OR_NULL(ctx
))
1699 eventfd_ctx_put(ctx
);
1703 if (pollstart
&& vq
->handle_kick
)
1704 r
= vhost_poll_start(&vq
->poll
, vq
->kick
);
1706 mutex_unlock(&vq
->mutex
);
1708 if (pollstop
&& vq
->handle_kick
)
1709 vhost_poll_flush(&vq
->poll
);
1712 EXPORT_SYMBOL_GPL(vhost_vring_ioctl
);
1714 int vhost_init_device_iotlb(struct vhost_dev
*d
, bool enabled
)
1716 struct vhost_iotlb
*niotlb
, *oiotlb
;
1719 niotlb
= iotlb_alloc();
1726 for (i
= 0; i
< d
->nvqs
; ++i
) {
1727 struct vhost_virtqueue
*vq
= d
->vqs
[i
];
1729 mutex_lock(&vq
->mutex
);
1731 __vhost_vq_meta_reset(vq
);
1732 mutex_unlock(&vq
->mutex
);
1735 vhost_iotlb_free(oiotlb
);
1739 EXPORT_SYMBOL_GPL(vhost_init_device_iotlb
);
1741 /* Caller must have device mutex */
1742 long vhost_dev_ioctl(struct vhost_dev
*d
, unsigned int ioctl
, void __user
*argp
)
1744 struct eventfd_ctx
*ctx
;
1749 /* If you are not the owner, you can become one */
1750 if (ioctl
== VHOST_SET_OWNER
) {
1751 r
= vhost_dev_set_owner(d
);
1755 /* You must be the owner to do anything else */
1756 r
= vhost_dev_check_owner(d
);
1761 case VHOST_SET_MEM_TABLE
:
1762 r
= vhost_set_memory(d
, argp
);
1764 case VHOST_SET_LOG_BASE
:
1765 if (copy_from_user(&p
, argp
, sizeof p
)) {
1769 if ((u64
)(unsigned long)p
!= p
) {
1773 for (i
= 0; i
< d
->nvqs
; ++i
) {
1774 struct vhost_virtqueue
*vq
;
1775 void __user
*base
= (void __user
*)(unsigned long)p
;
1777 mutex_lock(&vq
->mutex
);
1778 /* If ring is inactive, will check when it's enabled. */
1779 if (vq
->private_data
&& !vq_log_access_ok(vq
, base
))
1782 vq
->log_base
= base
;
1783 mutex_unlock(&vq
->mutex
);
1786 case VHOST_SET_LOG_FD
:
1787 r
= get_user(fd
, (int __user
*)argp
);
1790 ctx
= fd
== VHOST_FILE_UNBIND
? NULL
: eventfd_ctx_fdget(fd
);
1795 swap(ctx
, d
->log_ctx
);
1796 for (i
= 0; i
< d
->nvqs
; ++i
) {
1797 mutex_lock(&d
->vqs
[i
]->mutex
);
1798 d
->vqs
[i
]->log_ctx
= d
->log_ctx
;
1799 mutex_unlock(&d
->vqs
[i
]->mutex
);
1802 eventfd_ctx_put(ctx
);
1811 EXPORT_SYMBOL_GPL(vhost_dev_ioctl
);
1813 /* TODO: This is really inefficient. We need something like get_user()
1814 * (instruction directly accesses the data, with an exception table entry
1815 * returning -EFAULT). See Documentation/x86/exception-tables.rst.
1817 static int set_bit_to_user(int nr
, void __user
*addr
)
1819 unsigned long log
= (unsigned long)addr
;
1822 int bit
= nr
+ (log
% PAGE_SIZE
) * 8;
1825 r
= pin_user_pages_fast(log
, 1, FOLL_WRITE
, &page
);
1829 base
= kmap_atomic(page
);
1831 kunmap_atomic(base
);
1832 unpin_user_pages_dirty_lock(&page
, 1, true);
1836 static int log_write(void __user
*log_base
,
1837 u64 write_address
, u64 write_length
)
1839 u64 write_page
= write_address
/ VHOST_PAGE_SIZE
;
1844 write_length
+= write_address
% VHOST_PAGE_SIZE
;
1846 u64 base
= (u64
)(unsigned long)log_base
;
1847 u64 log
= base
+ write_page
/ 8;
1848 int bit
= write_page
% 8;
1849 if ((u64
)(unsigned long)log
!= log
)
1851 r
= set_bit_to_user(bit
, (void __user
*)(unsigned long)log
);
1854 if (write_length
<= VHOST_PAGE_SIZE
)
1856 write_length
-= VHOST_PAGE_SIZE
;
1862 static int log_write_hva(struct vhost_virtqueue
*vq
, u64 hva
, u64 len
)
1864 struct vhost_iotlb
*umem
= vq
->umem
;
1865 struct vhost_iotlb_map
*u
;
1866 u64 start
, end
, l
, min
;
1872 /* More than one GPAs can be mapped into a single HVA. So
1873 * iterate all possible umems here to be safe.
1875 list_for_each_entry(u
, &umem
->list
, link
) {
1876 if (u
->addr
> hva
- 1 + len
||
1877 u
->addr
- 1 + u
->size
< hva
)
1879 start
= max(u
->addr
, hva
);
1880 end
= min(u
->addr
- 1 + u
->size
, hva
- 1 + len
);
1881 l
= end
- start
+ 1;
1882 r
= log_write(vq
->log_base
,
1883 u
->start
+ start
- u
->addr
,
1901 static int log_used(struct vhost_virtqueue
*vq
, u64 used_offset
, u64 len
)
1903 struct iovec
*iov
= vq
->log_iov
;
1907 return log_write(vq
->log_base
, vq
->log_addr
+ used_offset
, len
);
1909 ret
= translate_desc(vq
, (uintptr_t)vq
->used
+ used_offset
,
1910 len
, iov
, 64, VHOST_ACCESS_WO
);
1914 for (i
= 0; i
< ret
; i
++) {
1915 ret
= log_write_hva(vq
, (uintptr_t)iov
[i
].iov_base
,
1924 int vhost_log_write(struct vhost_virtqueue
*vq
, struct vhost_log
*log
,
1925 unsigned int log_num
, u64 len
, struct iovec
*iov
, int count
)
1929 /* Make sure data written is seen before log. */
1933 for (i
= 0; i
< count
; i
++) {
1934 r
= log_write_hva(vq
, (uintptr_t)iov
[i
].iov_base
,
1942 for (i
= 0; i
< log_num
; ++i
) {
1943 u64 l
= min(log
[i
].len
, len
);
1944 r
= log_write(vq
->log_base
, log
[i
].addr
, l
);
1950 eventfd_signal(vq
->log_ctx
, 1);
1954 /* Length written exceeds what we have stored. This is a bug. */
1958 EXPORT_SYMBOL_GPL(vhost_log_write
);
1960 static int vhost_update_used_flags(struct vhost_virtqueue
*vq
)
1963 if (vhost_put_used_flags(vq
))
1965 if (unlikely(vq
->log_used
)) {
1966 /* Make sure the flag is seen before log. */
1968 /* Log used flag write. */
1969 used
= &vq
->used
->flags
;
1970 log_used(vq
, (used
- (void __user
*)vq
->used
),
1971 sizeof vq
->used
->flags
);
1973 eventfd_signal(vq
->log_ctx
, 1);
1978 static int vhost_update_avail_event(struct vhost_virtqueue
*vq
, u16 avail_event
)
1980 if (vhost_put_avail_event(vq
))
1982 if (unlikely(vq
->log_used
)) {
1984 /* Make sure the event is seen before log. */
1986 /* Log avail event write */
1987 used
= vhost_avail_event(vq
);
1988 log_used(vq
, (used
- (void __user
*)vq
->used
),
1989 sizeof *vhost_avail_event(vq
));
1991 eventfd_signal(vq
->log_ctx
, 1);
1996 int vhost_vq_init_access(struct vhost_virtqueue
*vq
)
1998 __virtio16 last_used_idx
;
2000 bool is_le
= vq
->is_le
;
2002 if (!vq
->private_data
)
2005 vhost_init_is_le(vq
);
2007 r
= vhost_update_used_flags(vq
);
2010 vq
->signalled_used_valid
= false;
2012 !access_ok(&vq
->used
->idx
, sizeof vq
->used
->idx
)) {
2016 r
= vhost_get_used_idx(vq
, &last_used_idx
);
2018 vq_err(vq
, "Can't access used idx at %p\n",
2022 vq
->last_used_idx
= vhost16_to_cpu(vq
, last_used_idx
);
2029 EXPORT_SYMBOL_GPL(vhost_vq_init_access
);
2031 static int translate_desc(struct vhost_virtqueue
*vq
, u64 addr
, u32 len
,
2032 struct iovec iov
[], int iov_size
, int access
)
2034 const struct vhost_iotlb_map
*map
;
2035 struct vhost_dev
*dev
= vq
->dev
;
2036 struct vhost_iotlb
*umem
= dev
->iotlb
? dev
->iotlb
: dev
->umem
;
2041 while ((u64
)len
> s
) {
2043 if (unlikely(ret
>= iov_size
)) {
2048 map
= vhost_iotlb_itree_first(umem
, addr
, addr
+ len
- 1);
2049 if (map
== NULL
|| map
->start
> addr
) {
2050 if (umem
!= dev
->iotlb
) {
2056 } else if (!(map
->perm
& access
)) {
2062 size
= map
->size
- addr
+ map
->start
;
2063 _iov
->iov_len
= min((u64
)len
- s
, size
);
2064 _iov
->iov_base
= (void __user
*)(unsigned long)
2065 (map
->addr
+ addr
- map
->start
);
2072 vhost_iotlb_miss(vq
, addr
, access
);
2076 /* Each buffer in the virtqueues is actually a chain of descriptors. This
2077 * function returns the next descriptor in the chain,
2078 * or -1U if we're at the end. */
2079 static unsigned next_desc(struct vhost_virtqueue
*vq
, struct vring_desc
*desc
)
2083 /* If this descriptor says it doesn't chain, we're done. */
2084 if (!(desc
->flags
& cpu_to_vhost16(vq
, VRING_DESC_F_NEXT
)))
2087 /* Check they're not leading us off end of descriptors. */
2088 next
= vhost16_to_cpu(vq
, READ_ONCE(desc
->next
));
2092 static int get_indirect(struct vhost_virtqueue
*vq
,
2093 struct iovec iov
[], unsigned int iov_size
,
2094 unsigned int *out_num
, unsigned int *in_num
,
2095 struct vhost_log
*log
, unsigned int *log_num
,
2096 struct vring_desc
*indirect
)
2098 struct vring_desc desc
;
2099 unsigned int i
= 0, count
, found
= 0;
2100 u32 len
= vhost32_to_cpu(vq
, indirect
->len
);
2101 struct iov_iter from
;
2105 if (unlikely(len
% sizeof desc
)) {
2106 vq_err(vq
, "Invalid length in indirect descriptor: "
2107 "len 0x%llx not multiple of 0x%zx\n",
2108 (unsigned long long)len
,
2113 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, indirect
->addr
), len
, vq
->indirect
,
2114 UIO_MAXIOV
, VHOST_ACCESS_RO
);
2115 if (unlikely(ret
< 0)) {
2117 vq_err(vq
, "Translation failure %d in indirect.\n", ret
);
2120 iov_iter_init(&from
, READ
, vq
->indirect
, ret
, len
);
2121 count
= len
/ sizeof desc
;
2122 /* Buffers are chained via a 16 bit next field, so
2123 * we can have at most 2^16 of these. */
2124 if (unlikely(count
> USHRT_MAX
+ 1)) {
2125 vq_err(vq
, "Indirect buffer length too big: %d\n",
2131 unsigned iov_count
= *in_num
+ *out_num
;
2132 if (unlikely(++found
> count
)) {
2133 vq_err(vq
, "Loop detected: last one at %u "
2134 "indirect size %u\n",
2138 if (unlikely(!copy_from_iter_full(&desc
, sizeof(desc
), &from
))) {
2139 vq_err(vq
, "Failed indirect descriptor: idx %d, %zx\n",
2140 i
, (size_t)vhost64_to_cpu(vq
, indirect
->addr
) + i
* sizeof desc
);
2143 if (unlikely(desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_INDIRECT
))) {
2144 vq_err(vq
, "Nested indirect descriptor: idx %d, %zx\n",
2145 i
, (size_t)vhost64_to_cpu(vq
, indirect
->addr
) + i
* sizeof desc
);
2149 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_WRITE
))
2150 access
= VHOST_ACCESS_WO
;
2152 access
= VHOST_ACCESS_RO
;
2154 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, desc
.addr
),
2155 vhost32_to_cpu(vq
, desc
.len
), iov
+ iov_count
,
2156 iov_size
- iov_count
, access
);
2157 if (unlikely(ret
< 0)) {
2159 vq_err(vq
, "Translation failure %d indirect idx %d\n",
2163 /* If this is an input descriptor, increment that count. */
2164 if (access
== VHOST_ACCESS_WO
) {
2166 if (unlikely(log
&& ret
)) {
2167 log
[*log_num
].addr
= vhost64_to_cpu(vq
, desc
.addr
);
2168 log
[*log_num
].len
= vhost32_to_cpu(vq
, desc
.len
);
2172 /* If it's an output descriptor, they're all supposed
2173 * to come before any input descriptors. */
2174 if (unlikely(*in_num
)) {
2175 vq_err(vq
, "Indirect descriptor "
2176 "has out after in: idx %d\n", i
);
2181 } while ((i
= next_desc(vq
, &desc
)) != -1);
2185 /* This looks in the virtqueue and for the first available buffer, and converts
2186 * it to an iovec for convenient access. Since descriptors consist of some
2187 * number of output then some number of input descriptors, it's actually two
2188 * iovecs, but we pack them into one and note how many of each there were.
2190 * This function returns the descriptor number found, or vq->num (which is
2191 * never a valid descriptor number) if none was found. A negative code is
2192 * returned on error. */
2193 int vhost_get_vq_desc(struct vhost_virtqueue
*vq
,
2194 struct iovec iov
[], unsigned int iov_size
,
2195 unsigned int *out_num
, unsigned int *in_num
,
2196 struct vhost_log
*log
, unsigned int *log_num
)
2198 struct vring_desc desc
;
2199 unsigned int i
, head
, found
= 0;
2201 __virtio16 avail_idx
;
2202 __virtio16 ring_head
;
2205 /* Check it isn't doing very strange things with descriptor numbers. */
2206 last_avail_idx
= vq
->last_avail_idx
;
2208 if (vq
->avail_idx
== vq
->last_avail_idx
) {
2209 if (unlikely(vhost_get_avail_idx(vq
, &avail_idx
))) {
2210 vq_err(vq
, "Failed to access avail idx at %p\n",
2214 vq
->avail_idx
= vhost16_to_cpu(vq
, avail_idx
);
2216 if (unlikely((u16
)(vq
->avail_idx
- last_avail_idx
) > vq
->num
)) {
2217 vq_err(vq
, "Guest moved used index from %u to %u",
2218 last_avail_idx
, vq
->avail_idx
);
2222 /* If there's nothing new since last we looked, return
2225 if (vq
->avail_idx
== last_avail_idx
)
2228 /* Only get avail ring entries after they have been
2234 /* Grab the next descriptor number they're advertising, and increment
2235 * the index we've seen. */
2236 if (unlikely(vhost_get_avail_head(vq
, &ring_head
, last_avail_idx
))) {
2237 vq_err(vq
, "Failed to read head: idx %d address %p\n",
2239 &vq
->avail
->ring
[last_avail_idx
% vq
->num
]);
2243 head
= vhost16_to_cpu(vq
, ring_head
);
2245 /* If their number is silly, that's an error. */
2246 if (unlikely(head
>= vq
->num
)) {
2247 vq_err(vq
, "Guest says index %u > %u is available",
2252 /* When we start there are none of either input nor output. */
2253 *out_num
= *in_num
= 0;
2259 unsigned iov_count
= *in_num
+ *out_num
;
2260 if (unlikely(i
>= vq
->num
)) {
2261 vq_err(vq
, "Desc index is %u > %u, head = %u",
2265 if (unlikely(++found
> vq
->num
)) {
2266 vq_err(vq
, "Loop detected: last one at %u "
2267 "vq size %u head %u\n",
2271 ret
= vhost_get_desc(vq
, &desc
, i
);
2272 if (unlikely(ret
)) {
2273 vq_err(vq
, "Failed to get descriptor: idx %d addr %p\n",
2277 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_INDIRECT
)) {
2278 ret
= get_indirect(vq
, iov
, iov_size
,
2280 log
, log_num
, &desc
);
2281 if (unlikely(ret
< 0)) {
2283 vq_err(vq
, "Failure detected "
2284 "in indirect descriptor at idx %d\n", i
);
2290 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_WRITE
))
2291 access
= VHOST_ACCESS_WO
;
2293 access
= VHOST_ACCESS_RO
;
2294 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, desc
.addr
),
2295 vhost32_to_cpu(vq
, desc
.len
), iov
+ iov_count
,
2296 iov_size
- iov_count
, access
);
2297 if (unlikely(ret
< 0)) {
2299 vq_err(vq
, "Translation failure %d descriptor idx %d\n",
2303 if (access
== VHOST_ACCESS_WO
) {
2304 /* If this is an input descriptor,
2305 * increment that count. */
2307 if (unlikely(log
&& ret
)) {
2308 log
[*log_num
].addr
= vhost64_to_cpu(vq
, desc
.addr
);
2309 log
[*log_num
].len
= vhost32_to_cpu(vq
, desc
.len
);
2313 /* If it's an output descriptor, they're all supposed
2314 * to come before any input descriptors. */
2315 if (unlikely(*in_num
)) {
2316 vq_err(vq
, "Descriptor has out after in: "
2322 } while ((i
= next_desc(vq
, &desc
)) != -1);
2324 /* On success, increment avail index. */
2325 vq
->last_avail_idx
++;
2327 /* Assume notifications from guest are disabled at this point,
2328 * if they aren't we would need to update avail_event index. */
2329 BUG_ON(!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
));
2332 EXPORT_SYMBOL_GPL(vhost_get_vq_desc
);
2334 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
2335 void vhost_discard_vq_desc(struct vhost_virtqueue
*vq
, int n
)
2337 vq
->last_avail_idx
-= n
;
2339 EXPORT_SYMBOL_GPL(vhost_discard_vq_desc
);
2341 /* After we've used one of their buffers, we tell them about it. We'll then
2342 * want to notify the guest, using eventfd. */
2343 int vhost_add_used(struct vhost_virtqueue
*vq
, unsigned int head
, int len
)
2345 struct vring_used_elem heads
= {
2346 cpu_to_vhost32(vq
, head
),
2347 cpu_to_vhost32(vq
, len
)
2350 return vhost_add_used_n(vq
, &heads
, 1);
2352 EXPORT_SYMBOL_GPL(vhost_add_used
);
2354 static int __vhost_add_used_n(struct vhost_virtqueue
*vq
,
2355 struct vring_used_elem
*heads
,
2358 vring_used_elem_t __user
*used
;
2362 start
= vq
->last_used_idx
& (vq
->num
- 1);
2363 used
= vq
->used
->ring
+ start
;
2364 if (vhost_put_used(vq
, heads
, start
, count
)) {
2365 vq_err(vq
, "Failed to write used");
2368 if (unlikely(vq
->log_used
)) {
2369 /* Make sure data is seen before log. */
2371 /* Log used ring entry write. */
2372 log_used(vq
, ((void __user
*)used
- (void __user
*)vq
->used
),
2373 count
* sizeof *used
);
2375 old
= vq
->last_used_idx
;
2376 new = (vq
->last_used_idx
+= count
);
2377 /* If the driver never bothers to signal in a very long while,
2378 * used index might wrap around. If that happens, invalidate
2379 * signalled_used index we stored. TODO: make sure driver
2380 * signals at least once in 2^16 and remove this. */
2381 if (unlikely((u16
)(new - vq
->signalled_used
) < (u16
)(new - old
)))
2382 vq
->signalled_used_valid
= false;
2386 /* After we've used one of their buffers, we tell them about it. We'll then
2387 * want to notify the guest, using eventfd. */
2388 int vhost_add_used_n(struct vhost_virtqueue
*vq
, struct vring_used_elem
*heads
,
2393 start
= vq
->last_used_idx
& (vq
->num
- 1);
2394 n
= vq
->num
- start
;
2396 r
= __vhost_add_used_n(vq
, heads
, n
);
2402 r
= __vhost_add_used_n(vq
, heads
, count
);
2404 /* Make sure buffer is written before we update index. */
2406 if (vhost_put_used_idx(vq
)) {
2407 vq_err(vq
, "Failed to increment used idx");
2410 if (unlikely(vq
->log_used
)) {
2411 /* Make sure used idx is seen before log. */
2413 /* Log used index update. */
2414 log_used(vq
, offsetof(struct vring_used
, idx
),
2415 sizeof vq
->used
->idx
);
2417 eventfd_signal(vq
->log_ctx
, 1);
2421 EXPORT_SYMBOL_GPL(vhost_add_used_n
);
2423 static bool vhost_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2428 /* Flush out used index updates. This is paired
2429 * with the barrier that the Guest executes when enabling
2433 if (vhost_has_feature(vq
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
2434 unlikely(vq
->avail_idx
== vq
->last_avail_idx
))
2437 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
2439 if (vhost_get_avail_flags(vq
, &flags
)) {
2440 vq_err(vq
, "Failed to get flags");
2443 return !(flags
& cpu_to_vhost16(vq
, VRING_AVAIL_F_NO_INTERRUPT
));
2445 old
= vq
->signalled_used
;
2446 v
= vq
->signalled_used_valid
;
2447 new = vq
->signalled_used
= vq
->last_used_idx
;
2448 vq
->signalled_used_valid
= true;
2453 if (vhost_get_used_event(vq
, &event
)) {
2454 vq_err(vq
, "Failed to get used event idx");
2457 return vring_need_event(vhost16_to_cpu(vq
, event
), new, old
);
2460 /* This actually signals the guest, using eventfd. */
2461 void vhost_signal(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2463 /* Signal the Guest tell them we used something up. */
2464 if (vq
->call_ctx
.ctx
&& vhost_notify(dev
, vq
))
2465 eventfd_signal(vq
->call_ctx
.ctx
, 1);
2467 EXPORT_SYMBOL_GPL(vhost_signal
);
2469 /* And here's the combo meal deal. Supersize me! */
2470 void vhost_add_used_and_signal(struct vhost_dev
*dev
,
2471 struct vhost_virtqueue
*vq
,
2472 unsigned int head
, int len
)
2474 vhost_add_used(vq
, head
, len
);
2475 vhost_signal(dev
, vq
);
2477 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal
);
2479 /* multi-buffer version of vhost_add_used_and_signal */
2480 void vhost_add_used_and_signal_n(struct vhost_dev
*dev
,
2481 struct vhost_virtqueue
*vq
,
2482 struct vring_used_elem
*heads
, unsigned count
)
2484 vhost_add_used_n(vq
, heads
, count
);
2485 vhost_signal(dev
, vq
);
2487 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n
);
2489 /* return true if we're sure that avaiable ring is empty */
2490 bool vhost_vq_avail_empty(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2492 __virtio16 avail_idx
;
2495 if (vq
->avail_idx
!= vq
->last_avail_idx
)
2498 r
= vhost_get_avail_idx(vq
, &avail_idx
);
2501 vq
->avail_idx
= vhost16_to_cpu(vq
, avail_idx
);
2503 return vq
->avail_idx
== vq
->last_avail_idx
;
2505 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty
);
2507 /* OK, now we need to know about added descriptors. */
2508 bool vhost_enable_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2510 __virtio16 avail_idx
;
2513 if (!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
))
2515 vq
->used_flags
&= ~VRING_USED_F_NO_NOTIFY
;
2516 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
2517 r
= vhost_update_used_flags(vq
);
2519 vq_err(vq
, "Failed to enable notification at %p: %d\n",
2520 &vq
->used
->flags
, r
);
2524 r
= vhost_update_avail_event(vq
, vq
->avail_idx
);
2526 vq_err(vq
, "Failed to update avail event index at %p: %d\n",
2527 vhost_avail_event(vq
), r
);
2531 /* They could have slipped one in as we were doing that: make
2532 * sure it's written, then check again. */
2534 r
= vhost_get_avail_idx(vq
, &avail_idx
);
2536 vq_err(vq
, "Failed to check avail idx at %p: %d\n",
2537 &vq
->avail
->idx
, r
);
2541 return vhost16_to_cpu(vq
, avail_idx
) != vq
->avail_idx
;
2543 EXPORT_SYMBOL_GPL(vhost_enable_notify
);
2545 /* We don't need to be notified again. */
2546 void vhost_disable_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
2550 if (vq
->used_flags
& VRING_USED_F_NO_NOTIFY
)
2552 vq
->used_flags
|= VRING_USED_F_NO_NOTIFY
;
2553 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
2554 r
= vhost_update_used_flags(vq
);
2556 vq_err(vq
, "Failed to disable notification at %p: %d\n",
2557 &vq
->used
->flags
, r
);
2560 EXPORT_SYMBOL_GPL(vhost_disable_notify
);
2562 /* Create a new message. */
2563 struct vhost_msg_node
*vhost_new_msg(struct vhost_virtqueue
*vq
, int type
)
2565 struct vhost_msg_node
*node
= kmalloc(sizeof *node
, GFP_KERNEL
);
2569 /* Make sure all padding within the structure is initialized. */
2570 memset(&node
->msg
, 0, sizeof node
->msg
);
2572 node
->msg
.type
= type
;
2575 EXPORT_SYMBOL_GPL(vhost_new_msg
);
2577 void vhost_enqueue_msg(struct vhost_dev
*dev
, struct list_head
*head
,
2578 struct vhost_msg_node
*node
)
2580 spin_lock(&dev
->iotlb_lock
);
2581 list_add_tail(&node
->node
, head
);
2582 spin_unlock(&dev
->iotlb_lock
);
2584 wake_up_interruptible_poll(&dev
->wait
, EPOLLIN
| EPOLLRDNORM
);
2586 EXPORT_SYMBOL_GPL(vhost_enqueue_msg
);
2588 struct vhost_msg_node
*vhost_dequeue_msg(struct vhost_dev
*dev
,
2589 struct list_head
*head
)
2591 struct vhost_msg_node
*node
= NULL
;
2593 spin_lock(&dev
->iotlb_lock
);
2594 if (!list_empty(head
)) {
2595 node
= list_first_entry(head
, struct vhost_msg_node
,
2597 list_del(&node
->node
);
2599 spin_unlock(&dev
->iotlb_lock
);
2603 EXPORT_SYMBOL_GPL(vhost_dequeue_msg
);
2605 void vhost_set_backend_features(struct vhost_dev
*dev
, u64 features
)
2607 struct vhost_virtqueue
*vq
;
2610 mutex_lock(&dev
->mutex
);
2611 for (i
= 0; i
< dev
->nvqs
; ++i
) {
2613 mutex_lock(&vq
->mutex
);
2614 vq
->acked_backend_features
= features
;
2615 mutex_unlock(&vq
->mutex
);
2617 mutex_unlock(&dev
->mutex
);
2619 EXPORT_SYMBOL_GPL(vhost_set_backend_features
);
2621 static int __init
vhost_init(void)
2626 static void __exit
vhost_exit(void)
2630 module_init(vhost_init
);
2631 module_exit(vhost_exit
);
2633 MODULE_VERSION("0.0.1");
2634 MODULE_LICENSE("GPL v2");
2635 MODULE_AUTHOR("Michael S. Tsirkin");
2636 MODULE_DESCRIPTION("Host kernel accelerator for virtio");