1 // SPDX-License-Identifier: GPL-2.0-only
3 * Helpers for the host side of a virtio ring.
5 * Since these may be in userspace, we use (inline) accessors.
7 #include <linux/compiler.h>
8 #include <linux/module.h>
9 #include <linux/vringh.h>
10 #include <linux/virtio_ring.h>
11 #include <linux/kernel.h>
12 #include <linux/ratelimit.h>
13 #include <linux/uaccess.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
17 #include <linux/bvec.h>
18 #include <linux/highmem.h>
19 #include <linux/vhost_iotlb.h>
21 #include <uapi/linux/virtio_config.h>
23 static __printf(1,2) __cold
void vringh_bad(const char *fmt
, ...)
25 static DEFINE_RATELIMIT_STATE(vringh_rs
,
26 DEFAULT_RATELIMIT_INTERVAL
,
27 DEFAULT_RATELIMIT_BURST
);
28 if (__ratelimit(&vringh_rs
)) {
31 printk(KERN_NOTICE
"vringh:");
37 /* Returns vring->num if empty, -ve on error. */
38 static inline int __vringh_get_head(const struct vringh
*vrh
,
39 int (*getu16
)(const struct vringh
*vrh
,
40 u16
*val
, const __virtio16
*p
),
43 u16 avail_idx
, i
, head
;
46 err
= getu16(vrh
, &avail_idx
, &vrh
->vring
.avail
->idx
);
48 vringh_bad("Failed to access avail idx at %p",
49 &vrh
->vring
.avail
->idx
);
53 if (*last_avail_idx
== avail_idx
)
54 return vrh
->vring
.num
;
56 /* Only get avail ring entries after they have been exposed by guest. */
57 virtio_rmb(vrh
->weak_barriers
);
59 i
= *last_avail_idx
& (vrh
->vring
.num
- 1);
61 err
= getu16(vrh
, &head
, &vrh
->vring
.avail
->ring
[i
]);
63 vringh_bad("Failed to read head: idx %d address %p",
64 *last_avail_idx
, &vrh
->vring
.avail
->ring
[i
]);
68 if (head
>= vrh
->vring
.num
) {
69 vringh_bad("Guest says index %u > %u is available",
70 head
, vrh
->vring
.num
);
78 /* Copy some bytes to/from the iovec. Returns num copied. */
79 static inline ssize_t
vringh_iov_xfer(struct vringh
*vrh
,
80 struct vringh_kiov
*iov
,
81 void *ptr
, size_t len
,
82 int (*xfer
)(const struct vringh
*vrh
,
83 void *addr
, void *ptr
,
88 while (len
&& iov
->i
< iov
->used
) {
91 partlen
= min(iov
->iov
[iov
->i
].iov_len
, len
);
92 err
= xfer(vrh
, iov
->iov
[iov
->i
].iov_base
, ptr
, partlen
);
98 iov
->consumed
+= partlen
;
99 iov
->iov
[iov
->i
].iov_len
-= partlen
;
100 iov
->iov
[iov
->i
].iov_base
+= partlen
;
102 if (!iov
->iov
[iov
->i
].iov_len
) {
103 /* Fix up old iov element then increment. */
104 iov
->iov
[iov
->i
].iov_len
= iov
->consumed
;
105 iov
->iov
[iov
->i
].iov_base
-= iov
->consumed
;
115 /* May reduce *len if range is shorter. */
116 static inline bool range_check(struct vringh
*vrh
, u64 addr
, size_t *len
,
117 struct vringh_range
*range
,
118 bool (*getrange
)(struct vringh
*,
119 u64
, struct vringh_range
*))
121 if (addr
< range
->start
|| addr
> range
->end_incl
) {
122 if (!getrange(vrh
, addr
, range
))
125 BUG_ON(addr
< range
->start
|| addr
> range
->end_incl
);
127 /* To end of memory? */
128 if (unlikely(addr
+ *len
== 0)) {
129 if (range
->end_incl
== -1ULL)
134 /* Otherwise, don't wrap. */
135 if (addr
+ *len
< addr
) {
136 vringh_bad("Wrapping descriptor %zu@0x%llx",
137 *len
, (unsigned long long)addr
);
141 if (unlikely(addr
+ *len
- 1 > range
->end_incl
))
146 *len
= range
->end_incl
+ 1 - addr
;
150 static inline bool no_range_check(struct vringh
*vrh
, u64 addr
, size_t *len
,
151 struct vringh_range
*range
,
152 bool (*getrange
)(struct vringh
*,
153 u64
, struct vringh_range
*))
158 /* No reason for this code to be inline. */
159 static int move_to_indirect(const struct vringh
*vrh
,
160 int *up_next
, u16
*i
, void *addr
,
161 const struct vring_desc
*desc
,
162 struct vring_desc
**descs
, int *desc_max
)
166 /* Indirect tables can't have indirect. */
167 if (*up_next
!= -1) {
168 vringh_bad("Multilevel indirect %u->%u", *up_next
, *i
);
172 len
= vringh32_to_cpu(vrh
, desc
->len
);
173 if (unlikely(len
% sizeof(struct vring_desc
))) {
174 vringh_bad("Strange indirect len %u", desc
->len
);
178 /* We will check this when we follow it! */
179 if (desc
->flags
& cpu_to_vringh16(vrh
, VRING_DESC_F_NEXT
))
180 *up_next
= vringh16_to_cpu(vrh
, desc
->next
);
184 *desc_max
= len
/ sizeof(struct vring_desc
);
186 /* Now, start at the first indirect. */
191 static int resize_iovec(struct vringh_kiov
*iov
, gfp_t gfp
)
194 unsigned int flag
, new_num
= (iov
->max_num
& ~VRINGH_IOV_ALLOCATED
) * 2;
199 flag
= (iov
->max_num
& VRINGH_IOV_ALLOCATED
);
201 new = krealloc(iov
->iov
, new_num
* sizeof(struct iovec
), gfp
);
203 new = kmalloc_array(new_num
, sizeof(struct iovec
), gfp
);
205 memcpy(new, iov
->iov
,
206 iov
->max_num
* sizeof(struct iovec
));
207 flag
= VRINGH_IOV_ALLOCATED
;
213 iov
->max_num
= (new_num
| flag
);
217 static u16 __cold
return_from_indirect(const struct vringh
*vrh
, int *up_next
,
218 struct vring_desc
**descs
, int *desc_max
)
223 *descs
= vrh
->vring
.desc
;
224 *desc_max
= vrh
->vring
.num
;
228 static int slow_copy(struct vringh
*vrh
, void *dst
, const void *src
,
229 bool (*rcheck
)(struct vringh
*vrh
, u64 addr
, size_t *len
,
230 struct vringh_range
*range
,
231 bool (*getrange
)(struct vringh
*vrh
,
233 struct vringh_range
*)),
234 bool (*getrange
)(struct vringh
*vrh
,
236 struct vringh_range
*r
),
237 struct vringh_range
*range
,
238 int (*copy
)(const struct vringh
*vrh
,
239 void *dst
, const void *src
, size_t len
))
241 size_t part
, len
= sizeof(struct vring_desc
);
248 addr
= (u64
)(unsigned long)src
- range
->offset
;
250 if (!rcheck(vrh
, addr
, &part
, range
, getrange
))
253 err
= copy(vrh
, dst
, src
, part
);
265 __vringh_iov(struct vringh
*vrh
, u16 i
,
266 struct vringh_kiov
*riov
,
267 struct vringh_kiov
*wiov
,
268 bool (*rcheck
)(struct vringh
*vrh
, u64 addr
, size_t *len
,
269 struct vringh_range
*range
,
270 bool (*getrange
)(struct vringh
*, u64
,
271 struct vringh_range
*)),
272 bool (*getrange
)(struct vringh
*, u64
, struct vringh_range
*),
274 int (*copy
)(const struct vringh
*vrh
,
275 void *dst
, const void *src
, size_t len
))
277 int err
, count
= 0, up_next
, desc_max
;
278 struct vring_desc desc
, *descs
;
279 struct vringh_range range
= { -1ULL, 0 }, slowrange
;
282 /* We start traversing vring's descriptor table. */
283 descs
= vrh
->vring
.desc
;
284 desc_max
= vrh
->vring
.num
;
288 riov
->i
= riov
->used
= 0;
290 wiov
->i
= wiov
->used
= 0;
292 /* You must want something! */
297 struct vringh_kiov
*iov
;
301 err
= slow_copy(vrh
, &desc
, &descs
[i
], rcheck
, getrange
,
304 err
= copy(vrh
, &desc
, &descs
[i
], sizeof(desc
));
308 if (unlikely(desc
.flags
&
309 cpu_to_vringh16(vrh
, VRING_DESC_F_INDIRECT
))) {
310 u64 a
= vringh64_to_cpu(vrh
, desc
.addr
);
312 /* Make sure it's OK, and get offset. */
313 len
= vringh32_to_cpu(vrh
, desc
.len
);
314 if (!rcheck(vrh
, a
, &len
, &range
, getrange
)) {
319 if (unlikely(len
!= vringh32_to_cpu(vrh
, desc
.len
))) {
321 /* We need to save this range to use offset */
325 addr
= (void *)(long)(a
+ range
.offset
);
326 err
= move_to_indirect(vrh
, &up_next
, &i
, addr
, &desc
,
333 if (count
++ == vrh
->vring
.num
) {
334 vringh_bad("Descriptor loop in %p", descs
);
339 if (desc
.flags
& cpu_to_vringh16(vrh
, VRING_DESC_F_WRITE
))
343 if (unlikely(wiov
&& wiov
->i
)) {
344 vringh_bad("Readable desc %p after writable",
352 vringh_bad("Unexpected %s desc",
353 !wiov
? "writable" : "readable");
359 /* Make sure it's OK, and get offset. */
360 len
= vringh32_to_cpu(vrh
, desc
.len
);
361 if (!rcheck(vrh
, vringh64_to_cpu(vrh
, desc
.addr
), &len
, &range
,
366 addr
= (void *)(unsigned long)(vringh64_to_cpu(vrh
, desc
.addr
) +
369 if (unlikely(iov
->used
== (iov
->max_num
& ~VRINGH_IOV_ALLOCATED
))) {
370 err
= resize_iovec(iov
, gfp
);
375 iov
->iov
[iov
->used
].iov_base
= addr
;
376 iov
->iov
[iov
->used
].iov_len
= len
;
379 if (unlikely(len
!= vringh32_to_cpu(vrh
, desc
.len
))) {
380 desc
.len
= cpu_to_vringh32(vrh
,
381 vringh32_to_cpu(vrh
, desc
.len
) - len
);
382 desc
.addr
= cpu_to_vringh64(vrh
,
383 vringh64_to_cpu(vrh
, desc
.addr
) + len
);
387 if (desc
.flags
& cpu_to_vringh16(vrh
, VRING_DESC_F_NEXT
)) {
388 i
= vringh16_to_cpu(vrh
, desc
.next
);
390 /* Just in case we need to finish traversing above. */
391 if (unlikely(up_next
> 0)) {
392 i
= return_from_indirect(vrh
, &up_next
,
400 vringh_bad("Chained index %u > %u", i
, desc_max
);
412 static inline int __vringh_complete(struct vringh
*vrh
,
413 const struct vring_used_elem
*used
,
414 unsigned int num_used
,
415 int (*putu16
)(const struct vringh
*vrh
,
416 __virtio16
*p
, u16 val
),
417 int (*putused
)(const struct vringh
*vrh
,
418 struct vring_used_elem
*dst
,
419 const struct vring_used_elem
422 struct vring_used
*used_ring
;
426 used_ring
= vrh
->vring
.used
;
427 used_idx
= vrh
->last_used_idx
+ vrh
->completed
;
429 off
= used_idx
% vrh
->vring
.num
;
431 /* Compiler knows num_used == 1 sometimes, hence extra check */
432 if (num_used
> 1 && unlikely(off
+ num_used
>= vrh
->vring
.num
)) {
433 u16 part
= vrh
->vring
.num
- off
;
434 err
= putused(vrh
, &used_ring
->ring
[off
], used
, part
);
436 err
= putused(vrh
, &used_ring
->ring
[0], used
+ part
,
439 err
= putused(vrh
, &used_ring
->ring
[off
], used
, num_used
);
442 vringh_bad("Failed to write %u used entries %u at %p",
443 num_used
, off
, &used_ring
->ring
[off
]);
447 /* Make sure buffer is written before we update index. */
448 virtio_wmb(vrh
->weak_barriers
);
450 err
= putu16(vrh
, &vrh
->vring
.used
->idx
, used_idx
+ num_used
);
452 vringh_bad("Failed to update used index at %p",
453 &vrh
->vring
.used
->idx
);
457 vrh
->completed
+= num_used
;
462 static inline int __vringh_need_notify(struct vringh
*vrh
,
463 int (*getu16
)(const struct vringh
*vrh
,
465 const __virtio16
*p
))
471 /* Flush out used index update. This is paired with the
472 * barrier that the Guest executes when enabling
474 virtio_mb(vrh
->weak_barriers
);
476 /* Old-style, without event indices. */
477 if (!vrh
->event_indices
) {
479 err
= getu16(vrh
, &flags
, &vrh
->vring
.avail
->flags
);
481 vringh_bad("Failed to get flags at %p",
482 &vrh
->vring
.avail
->flags
);
485 return (!(flags
& VRING_AVAIL_F_NO_INTERRUPT
));
488 /* Modern: we know when other side wants to know. */
489 err
= getu16(vrh
, &used_event
, &vring_used_event(&vrh
->vring
));
491 vringh_bad("Failed to get used event idx at %p",
492 &vring_used_event(&vrh
->vring
));
496 /* Just in case we added so many that we wrap. */
497 if (unlikely(vrh
->completed
> 0xffff))
500 notify
= vring_need_event(used_event
,
501 vrh
->last_used_idx
+ vrh
->completed
,
504 vrh
->last_used_idx
+= vrh
->completed
;
509 static inline bool __vringh_notify_enable(struct vringh
*vrh
,
510 int (*getu16
)(const struct vringh
*vrh
,
511 u16
*val
, const __virtio16
*p
),
512 int (*putu16
)(const struct vringh
*vrh
,
513 __virtio16
*p
, u16 val
))
517 if (!vrh
->event_indices
) {
518 /* Old-school; update flags. */
519 if (putu16(vrh
, &vrh
->vring
.used
->flags
, 0) != 0) {
520 vringh_bad("Clearing used flags %p",
521 &vrh
->vring
.used
->flags
);
525 if (putu16(vrh
, &vring_avail_event(&vrh
->vring
),
526 vrh
->last_avail_idx
) != 0) {
527 vringh_bad("Updating avail event index %p",
528 &vring_avail_event(&vrh
->vring
));
533 /* They could have slipped one in as we were doing that: make
534 * sure it's written, then check again. */
535 virtio_mb(vrh
->weak_barriers
);
537 if (getu16(vrh
, &avail
, &vrh
->vring
.avail
->idx
) != 0) {
538 vringh_bad("Failed to check avail idx at %p",
539 &vrh
->vring
.avail
->idx
);
543 /* This is unlikely, so we just leave notifications enabled
544 * (if we're using event_indices, we'll only get one
545 * notification anyway). */
546 return avail
== vrh
->last_avail_idx
;
549 static inline void __vringh_notify_disable(struct vringh
*vrh
,
550 int (*putu16
)(const struct vringh
*vrh
,
551 __virtio16
*p
, u16 val
))
553 if (!vrh
->event_indices
) {
554 /* Old-school; update flags. */
555 if (putu16(vrh
, &vrh
->vring
.used
->flags
,
556 VRING_USED_F_NO_NOTIFY
)) {
557 vringh_bad("Setting used flags %p",
558 &vrh
->vring
.used
->flags
);
563 /* Userspace access helpers: in this case, addresses are really userspace. */
564 static inline int getu16_user(const struct vringh
*vrh
, u16
*val
, const __virtio16
*p
)
567 int rc
= get_user(v
, (__force __virtio16 __user
*)p
);
568 *val
= vringh16_to_cpu(vrh
, v
);
572 static inline int putu16_user(const struct vringh
*vrh
, __virtio16
*p
, u16 val
)
574 __virtio16 v
= cpu_to_vringh16(vrh
, val
);
575 return put_user(v
, (__force __virtio16 __user
*)p
);
578 static inline int copydesc_user(const struct vringh
*vrh
,
579 void *dst
, const void *src
, size_t len
)
581 return copy_from_user(dst
, (__force
void __user
*)src
, len
) ?
585 static inline int putused_user(const struct vringh
*vrh
,
586 struct vring_used_elem
*dst
,
587 const struct vring_used_elem
*src
,
590 return copy_to_user((__force
void __user
*)dst
, src
,
591 sizeof(*dst
) * num
) ? -EFAULT
: 0;
594 static inline int xfer_from_user(const struct vringh
*vrh
, void *src
,
595 void *dst
, size_t len
)
597 return copy_from_user(dst
, (__force
void __user
*)src
, len
) ?
601 static inline int xfer_to_user(const struct vringh
*vrh
,
602 void *dst
, void *src
, size_t len
)
604 return copy_to_user((__force
void __user
*)dst
, src
, len
) ?
609 * vringh_init_user - initialize a vringh for a userspace vring.
610 * @vrh: the vringh to initialize.
611 * @features: the feature bits for this ring.
612 * @num: the number of elements.
613 * @weak_barriers: true if we only need memory barriers, not I/O.
614 * @desc: the userpace descriptor pointer.
615 * @avail: the userpace avail pointer.
616 * @used: the userpace used pointer.
618 * Returns an error if num is invalid: you should check pointers
621 int vringh_init_user(struct vringh
*vrh
, u64 features
,
622 unsigned int num
, bool weak_barriers
,
623 struct vring_desc __user
*desc
,
624 struct vring_avail __user
*avail
,
625 struct vring_used __user
*used
)
627 /* Sane power of 2 please! */
628 if (!num
|| num
> 0xffff || (num
& (num
- 1))) {
629 vringh_bad("Bad ring size %u", num
);
633 vrh
->little_endian
= (features
& (1ULL << VIRTIO_F_VERSION_1
));
634 vrh
->event_indices
= (features
& (1 << VIRTIO_RING_F_EVENT_IDX
));
635 vrh
->weak_barriers
= weak_barriers
;
637 vrh
->last_avail_idx
= 0;
638 vrh
->last_used_idx
= 0;
639 vrh
->vring
.num
= num
;
640 /* vring expects kernel addresses, but only used via accessors. */
641 vrh
->vring
.desc
= (__force
struct vring_desc
*)desc
;
642 vrh
->vring
.avail
= (__force
struct vring_avail
*)avail
;
643 vrh
->vring
.used
= (__force
struct vring_used
*)used
;
646 EXPORT_SYMBOL(vringh_init_user
);
649 * vringh_getdesc_user - get next available descriptor from userspace ring.
650 * @vrh: the userspace vring.
651 * @riov: where to put the readable descriptors (or NULL)
652 * @wiov: where to put the writable descriptors (or NULL)
653 * @getrange: function to call to check ranges.
654 * @head: head index we received, for passing to vringh_complete_user().
656 * Returns 0 if there was no descriptor, 1 if there was, or -errno.
658 * Note that on error return, you can tell the difference between an
659 * invalid ring and a single invalid descriptor: in the former case,
660 * *head will be vrh->vring.num. You may be able to ignore an invalid
661 * descriptor, but there's not much you can do with an invalid ring.
663 * Note that you may need to clean up riov and wiov, even on error!
665 int vringh_getdesc_user(struct vringh
*vrh
,
666 struct vringh_iov
*riov
,
667 struct vringh_iov
*wiov
,
668 bool (*getrange
)(struct vringh
*vrh
,
669 u64 addr
, struct vringh_range
*r
),
674 *head
= vrh
->vring
.num
;
675 err
= __vringh_get_head(vrh
, getu16_user
, &vrh
->last_avail_idx
);
680 if (err
== vrh
->vring
.num
)
683 /* We need the layouts to be the identical for this to work */
684 BUILD_BUG_ON(sizeof(struct vringh_kiov
) != sizeof(struct vringh_iov
));
685 BUILD_BUG_ON(offsetof(struct vringh_kiov
, iov
) !=
686 offsetof(struct vringh_iov
, iov
));
687 BUILD_BUG_ON(offsetof(struct vringh_kiov
, i
) !=
688 offsetof(struct vringh_iov
, i
));
689 BUILD_BUG_ON(offsetof(struct vringh_kiov
, used
) !=
690 offsetof(struct vringh_iov
, used
));
691 BUILD_BUG_ON(offsetof(struct vringh_kiov
, max_num
) !=
692 offsetof(struct vringh_iov
, max_num
));
693 BUILD_BUG_ON(sizeof(struct iovec
) != sizeof(struct kvec
));
694 BUILD_BUG_ON(offsetof(struct iovec
, iov_base
) !=
695 offsetof(struct kvec
, iov_base
));
696 BUILD_BUG_ON(offsetof(struct iovec
, iov_len
) !=
697 offsetof(struct kvec
, iov_len
));
698 BUILD_BUG_ON(sizeof(((struct iovec
*)NULL
)->iov_base
)
699 != sizeof(((struct kvec
*)NULL
)->iov_base
));
700 BUILD_BUG_ON(sizeof(((struct iovec
*)NULL
)->iov_len
)
701 != sizeof(((struct kvec
*)NULL
)->iov_len
));
704 err
= __vringh_iov(vrh
, *head
, (struct vringh_kiov
*)riov
,
705 (struct vringh_kiov
*)wiov
,
706 range_check
, getrange
, GFP_KERNEL
, copydesc_user
);
712 EXPORT_SYMBOL(vringh_getdesc_user
);
715 * vringh_iov_pull_user - copy bytes from vring_iov.
716 * @riov: the riov as passed to vringh_getdesc_user() (updated as we consume)
717 * @dst: the place to copy.
718 * @len: the maximum length to copy.
720 * Returns the bytes copied <= len or a negative errno.
722 ssize_t
vringh_iov_pull_user(struct vringh_iov
*riov
, void *dst
, size_t len
)
724 return vringh_iov_xfer(NULL
, (struct vringh_kiov
*)riov
,
725 dst
, len
, xfer_from_user
);
727 EXPORT_SYMBOL(vringh_iov_pull_user
);
730 * vringh_iov_push_user - copy bytes into vring_iov.
731 * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume)
732 * @dst: the place to copy.
733 * @len: the maximum length to copy.
735 * Returns the bytes copied <= len or a negative errno.
737 ssize_t
vringh_iov_push_user(struct vringh_iov
*wiov
,
738 const void *src
, size_t len
)
740 return vringh_iov_xfer(NULL
, (struct vringh_kiov
*)wiov
,
741 (void *)src
, len
, xfer_to_user
);
743 EXPORT_SYMBOL(vringh_iov_push_user
);
746 * vringh_abandon_user - we've decided not to handle the descriptor(s).
748 * @num: the number of descriptors to put back (ie. num
749 * vringh_get_user() to undo).
751 * The next vringh_get_user() will return the old descriptor(s) again.
753 void vringh_abandon_user(struct vringh
*vrh
, unsigned int num
)
755 /* We only update vring_avail_event(vr) when we want to be notified,
756 * so we haven't changed that yet. */
757 vrh
->last_avail_idx
-= num
;
759 EXPORT_SYMBOL(vringh_abandon_user
);
762 * vringh_complete_user - we've finished with descriptor, publish it.
764 * @head: the head as filled in by vringh_getdesc_user.
765 * @len: the length of data we have written.
767 * You should check vringh_need_notify_user() after one or more calls
770 int vringh_complete_user(struct vringh
*vrh
, u16 head
, u32 len
)
772 struct vring_used_elem used
;
774 used
.id
= cpu_to_vringh32(vrh
, head
);
775 used
.len
= cpu_to_vringh32(vrh
, len
);
776 return __vringh_complete(vrh
, &used
, 1, putu16_user
, putused_user
);
778 EXPORT_SYMBOL(vringh_complete_user
);
781 * vringh_complete_multi_user - we've finished with many descriptors.
783 * @used: the head, length pairs.
784 * @num_used: the number of used elements.
786 * You should check vringh_need_notify_user() after one or more calls
789 int vringh_complete_multi_user(struct vringh
*vrh
,
790 const struct vring_used_elem used
[],
793 return __vringh_complete(vrh
, used
, num_used
,
794 putu16_user
, putused_user
);
796 EXPORT_SYMBOL(vringh_complete_multi_user
);
799 * vringh_notify_enable_user - we want to know if something changes.
802 * This always enables notifications, but returns false if there are
803 * now more buffers available in the vring.
805 bool vringh_notify_enable_user(struct vringh
*vrh
)
807 return __vringh_notify_enable(vrh
, getu16_user
, putu16_user
);
809 EXPORT_SYMBOL(vringh_notify_enable_user
);
812 * vringh_notify_disable_user - don't tell us if something changes.
815 * This is our normal running state: we disable and then only enable when
816 * we're going to sleep.
818 void vringh_notify_disable_user(struct vringh
*vrh
)
820 __vringh_notify_disable(vrh
, putu16_user
);
822 EXPORT_SYMBOL(vringh_notify_disable_user
);
825 * vringh_need_notify_user - must we tell the other side about used buffers?
826 * @vrh: the vring we've called vringh_complete_user() on.
828 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
830 int vringh_need_notify_user(struct vringh
*vrh
)
832 return __vringh_need_notify(vrh
, getu16_user
);
834 EXPORT_SYMBOL(vringh_need_notify_user
);
836 /* Kernelspace access helpers. */
837 static inline int getu16_kern(const struct vringh
*vrh
,
838 u16
*val
, const __virtio16
*p
)
840 *val
= vringh16_to_cpu(vrh
, READ_ONCE(*p
));
844 static inline int putu16_kern(const struct vringh
*vrh
, __virtio16
*p
, u16 val
)
846 WRITE_ONCE(*p
, cpu_to_vringh16(vrh
, val
));
850 static inline int copydesc_kern(const struct vringh
*vrh
,
851 void *dst
, const void *src
, size_t len
)
853 memcpy(dst
, src
, len
);
857 static inline int putused_kern(const struct vringh
*vrh
,
858 struct vring_used_elem
*dst
,
859 const struct vring_used_elem
*src
,
862 memcpy(dst
, src
, num
* sizeof(*dst
));
866 static inline int xfer_kern(const struct vringh
*vrh
, void *src
,
867 void *dst
, size_t len
)
869 memcpy(dst
, src
, len
);
873 static inline int kern_xfer(const struct vringh
*vrh
, void *dst
,
874 void *src
, size_t len
)
876 memcpy(dst
, src
, len
);
881 * vringh_init_kern - initialize a vringh for a kernelspace vring.
882 * @vrh: the vringh to initialize.
883 * @features: the feature bits for this ring.
884 * @num: the number of elements.
885 * @weak_barriers: true if we only need memory barriers, not I/O.
886 * @desc: the userpace descriptor pointer.
887 * @avail: the userpace avail pointer.
888 * @used: the userpace used pointer.
890 * Returns an error if num is invalid.
892 int vringh_init_kern(struct vringh
*vrh
, u64 features
,
893 unsigned int num
, bool weak_barriers
,
894 struct vring_desc
*desc
,
895 struct vring_avail
*avail
,
896 struct vring_used
*used
)
898 /* Sane power of 2 please! */
899 if (!num
|| num
> 0xffff || (num
& (num
- 1))) {
900 vringh_bad("Bad ring size %u", num
);
904 vrh
->little_endian
= (features
& (1ULL << VIRTIO_F_VERSION_1
));
905 vrh
->event_indices
= (features
& (1 << VIRTIO_RING_F_EVENT_IDX
));
906 vrh
->weak_barriers
= weak_barriers
;
908 vrh
->last_avail_idx
= 0;
909 vrh
->last_used_idx
= 0;
910 vrh
->vring
.num
= num
;
911 vrh
->vring
.desc
= desc
;
912 vrh
->vring
.avail
= avail
;
913 vrh
->vring
.used
= used
;
916 EXPORT_SYMBOL(vringh_init_kern
);
919 * vringh_getdesc_kern - get next available descriptor from kernelspace ring.
920 * @vrh: the kernelspace vring.
921 * @riov: where to put the readable descriptors (or NULL)
922 * @wiov: where to put the writable descriptors (or NULL)
923 * @head: head index we received, for passing to vringh_complete_kern().
924 * @gfp: flags for allocating larger riov/wiov.
926 * Returns 0 if there was no descriptor, 1 if there was, or -errno.
928 * Note that on error return, you can tell the difference between an
929 * invalid ring and a single invalid descriptor: in the former case,
930 * *head will be vrh->vring.num. You may be able to ignore an invalid
931 * descriptor, but there's not much you can do with an invalid ring.
933 * Note that you may need to clean up riov and wiov, even on error!
935 int vringh_getdesc_kern(struct vringh
*vrh
,
936 struct vringh_kiov
*riov
,
937 struct vringh_kiov
*wiov
,
943 err
= __vringh_get_head(vrh
, getu16_kern
, &vrh
->last_avail_idx
);
948 if (err
== vrh
->vring
.num
)
952 err
= __vringh_iov(vrh
, *head
, riov
, wiov
, no_range_check
, NULL
,
959 EXPORT_SYMBOL(vringh_getdesc_kern
);
962 * vringh_iov_pull_kern - copy bytes from vring_iov.
963 * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume)
964 * @dst: the place to copy.
965 * @len: the maximum length to copy.
967 * Returns the bytes copied <= len or a negative errno.
969 ssize_t
vringh_iov_pull_kern(struct vringh_kiov
*riov
, void *dst
, size_t len
)
971 return vringh_iov_xfer(NULL
, riov
, dst
, len
, xfer_kern
);
973 EXPORT_SYMBOL(vringh_iov_pull_kern
);
976 * vringh_iov_push_kern - copy bytes into vring_iov.
977 * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume)
978 * @dst: the place to copy.
979 * @len: the maximum length to copy.
981 * Returns the bytes copied <= len or a negative errno.
983 ssize_t
vringh_iov_push_kern(struct vringh_kiov
*wiov
,
984 const void *src
, size_t len
)
986 return vringh_iov_xfer(NULL
, wiov
, (void *)src
, len
, kern_xfer
);
988 EXPORT_SYMBOL(vringh_iov_push_kern
);
991 * vringh_abandon_kern - we've decided not to handle the descriptor(s).
993 * @num: the number of descriptors to put back (ie. num
994 * vringh_get_kern() to undo).
996 * The next vringh_get_kern() will return the old descriptor(s) again.
998 void vringh_abandon_kern(struct vringh
*vrh
, unsigned int num
)
1000 /* We only update vring_avail_event(vr) when we want to be notified,
1001 * so we haven't changed that yet. */
1002 vrh
->last_avail_idx
-= num
;
1004 EXPORT_SYMBOL(vringh_abandon_kern
);
1007 * vringh_complete_kern - we've finished with descriptor, publish it.
1009 * @head: the head as filled in by vringh_getdesc_kern.
1010 * @len: the length of data we have written.
1012 * You should check vringh_need_notify_kern() after one or more calls
1015 int vringh_complete_kern(struct vringh
*vrh
, u16 head
, u32 len
)
1017 struct vring_used_elem used
;
1019 used
.id
= cpu_to_vringh32(vrh
, head
);
1020 used
.len
= cpu_to_vringh32(vrh
, len
);
1022 return __vringh_complete(vrh
, &used
, 1, putu16_kern
, putused_kern
);
1024 EXPORT_SYMBOL(vringh_complete_kern
);
1027 * vringh_notify_enable_kern - we want to know if something changes.
1030 * This always enables notifications, but returns false if there are
1031 * now more buffers available in the vring.
1033 bool vringh_notify_enable_kern(struct vringh
*vrh
)
1035 return __vringh_notify_enable(vrh
, getu16_kern
, putu16_kern
);
1037 EXPORT_SYMBOL(vringh_notify_enable_kern
);
1040 * vringh_notify_disable_kern - don't tell us if something changes.
1043 * This is our normal running state: we disable and then only enable when
1044 * we're going to sleep.
1046 void vringh_notify_disable_kern(struct vringh
*vrh
)
1048 __vringh_notify_disable(vrh
, putu16_kern
);
1050 EXPORT_SYMBOL(vringh_notify_disable_kern
);
1053 * vringh_need_notify_kern - must we tell the other side about used buffers?
1054 * @vrh: the vring we've called vringh_complete_kern() on.
1056 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
1058 int vringh_need_notify_kern(struct vringh
*vrh
)
1060 return __vringh_need_notify(vrh
, getu16_kern
);
1062 EXPORT_SYMBOL(vringh_need_notify_kern
);
1064 #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
1066 static int iotlb_translate(const struct vringh
*vrh
,
1067 u64 addr
, u64 len
, struct bio_vec iov
[],
1068 int iov_size
, u32 perm
)
1070 struct vhost_iotlb_map
*map
;
1071 struct vhost_iotlb
*iotlb
= vrh
->iotlb
;
1078 if (unlikely(ret
>= iov_size
)) {
1083 map
= vhost_iotlb_itree_first(iotlb
, addr
,
1085 if (!map
|| map
->start
> addr
) {
1088 } else if (!(map
->perm
& perm
)) {
1093 size
= map
->size
- addr
+ map
->start
;
1094 pa
= map
->addr
+ addr
- map
->start
;
1095 pfn
= pa
>> PAGE_SHIFT
;
1096 iov
[ret
].bv_page
= pfn_to_page(pfn
);
1097 iov
[ret
].bv_len
= min(len
- s
, size
);
1098 iov
[ret
].bv_offset
= pa
& (PAGE_SIZE
- 1);
1107 static inline int copy_from_iotlb(const struct vringh
*vrh
, void *dst
,
1108 void *src
, size_t len
)
1110 struct iov_iter iter
;
1111 struct bio_vec iov
[16];
1114 ret
= iotlb_translate(vrh
, (u64
)(uintptr_t)src
,
1115 len
, iov
, 16, VHOST_MAP_RO
);
1119 iov_iter_bvec(&iter
, READ
, iov
, ret
, len
);
1121 ret
= copy_from_iter(dst
, len
, &iter
);
1126 static inline int copy_to_iotlb(const struct vringh
*vrh
, void *dst
,
1127 void *src
, size_t len
)
1129 struct iov_iter iter
;
1130 struct bio_vec iov
[16];
1133 ret
= iotlb_translate(vrh
, (u64
)(uintptr_t)dst
,
1134 len
, iov
, 16, VHOST_MAP_WO
);
1138 iov_iter_bvec(&iter
, WRITE
, iov
, ret
, len
);
1140 return copy_to_iter(src
, len
, &iter
);
1143 static inline int getu16_iotlb(const struct vringh
*vrh
,
1144 u16
*val
, const __virtio16
*p
)
1150 /* Atomic read is needed for getu16 */
1151 ret
= iotlb_translate(vrh
, (u64
)(uintptr_t)p
, sizeof(*p
),
1152 &iov
, 1, VHOST_MAP_RO
);
1156 kaddr
= kmap_atomic(iov
.bv_page
);
1157 from
= kaddr
+ iov
.bv_offset
;
1158 *val
= vringh16_to_cpu(vrh
, READ_ONCE(*(__virtio16
*)from
));
1159 kunmap_atomic(kaddr
);
1164 static inline int putu16_iotlb(const struct vringh
*vrh
,
1165 __virtio16
*p
, u16 val
)
1171 /* Atomic write is needed for putu16 */
1172 ret
= iotlb_translate(vrh
, (u64
)(uintptr_t)p
, sizeof(*p
),
1173 &iov
, 1, VHOST_MAP_WO
);
1177 kaddr
= kmap_atomic(iov
.bv_page
);
1178 to
= kaddr
+ iov
.bv_offset
;
1179 WRITE_ONCE(*(__virtio16
*)to
, cpu_to_vringh16(vrh
, val
));
1180 kunmap_atomic(kaddr
);
1185 static inline int copydesc_iotlb(const struct vringh
*vrh
,
1186 void *dst
, const void *src
, size_t len
)
1190 ret
= copy_from_iotlb(vrh
, dst
, (void *)src
, len
);
1197 static inline int xfer_from_iotlb(const struct vringh
*vrh
, void *src
,
1198 void *dst
, size_t len
)
1202 ret
= copy_from_iotlb(vrh
, dst
, src
, len
);
1209 static inline int xfer_to_iotlb(const struct vringh
*vrh
,
1210 void *dst
, void *src
, size_t len
)
1214 ret
= copy_to_iotlb(vrh
, dst
, src
, len
);
1221 static inline int putused_iotlb(const struct vringh
*vrh
,
1222 struct vring_used_elem
*dst
,
1223 const struct vring_used_elem
*src
,
1226 int size
= num
* sizeof(*dst
);
1229 ret
= copy_to_iotlb(vrh
, dst
, (void *)src
, num
* sizeof(*dst
));
1237 * vringh_init_iotlb - initialize a vringh for a ring with IOTLB.
1238 * @vrh: the vringh to initialize.
1239 * @features: the feature bits for this ring.
1240 * @num: the number of elements.
1241 * @weak_barriers: true if we only need memory barriers, not I/O.
1242 * @desc: the userpace descriptor pointer.
1243 * @avail: the userpace avail pointer.
1244 * @used: the userpace used pointer.
1246 * Returns an error if num is invalid.
1248 int vringh_init_iotlb(struct vringh
*vrh
, u64 features
,
1249 unsigned int num
, bool weak_barriers
,
1250 struct vring_desc
*desc
,
1251 struct vring_avail
*avail
,
1252 struct vring_used
*used
)
1254 return vringh_init_kern(vrh
, features
, num
, weak_barriers
,
1257 EXPORT_SYMBOL(vringh_init_iotlb
);
1260 * vringh_set_iotlb - initialize a vringh for a ring with IOTLB.
1262 * @iotlb: iotlb associated with this vring
1264 void vringh_set_iotlb(struct vringh
*vrh
, struct vhost_iotlb
*iotlb
)
1268 EXPORT_SYMBOL(vringh_set_iotlb
);
1271 * vringh_getdesc_iotlb - get next available descriptor from ring with
1273 * @vrh: the kernelspace vring.
1274 * @riov: where to put the readable descriptors (or NULL)
1275 * @wiov: where to put the writable descriptors (or NULL)
1276 * @head: head index we received, for passing to vringh_complete_iotlb().
1277 * @gfp: flags for allocating larger riov/wiov.
1279 * Returns 0 if there was no descriptor, 1 if there was, or -errno.
1281 * Note that on error return, you can tell the difference between an
1282 * invalid ring and a single invalid descriptor: in the former case,
1283 * *head will be vrh->vring.num. You may be able to ignore an invalid
1284 * descriptor, but there's not much you can do with an invalid ring.
1286 * Note that you may need to clean up riov and wiov, even on error!
1288 int vringh_getdesc_iotlb(struct vringh
*vrh
,
1289 struct vringh_kiov
*riov
,
1290 struct vringh_kiov
*wiov
,
1296 err
= __vringh_get_head(vrh
, getu16_iotlb
, &vrh
->last_avail_idx
);
1301 if (err
== vrh
->vring
.num
)
1305 err
= __vringh_iov(vrh
, *head
, riov
, wiov
, no_range_check
, NULL
,
1306 gfp
, copydesc_iotlb
);
1312 EXPORT_SYMBOL(vringh_getdesc_iotlb
);
1315 * vringh_iov_pull_iotlb - copy bytes from vring_iov.
1317 * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume)
1318 * @dst: the place to copy.
1319 * @len: the maximum length to copy.
1321 * Returns the bytes copied <= len or a negative errno.
1323 ssize_t
vringh_iov_pull_iotlb(struct vringh
*vrh
,
1324 struct vringh_kiov
*riov
,
1325 void *dst
, size_t len
)
1327 return vringh_iov_xfer(vrh
, riov
, dst
, len
, xfer_from_iotlb
);
1329 EXPORT_SYMBOL(vringh_iov_pull_iotlb
);
1332 * vringh_iov_push_iotlb - copy bytes into vring_iov.
1334 * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume)
1335 * @dst: the place to copy.
1336 * @len: the maximum length to copy.
1338 * Returns the bytes copied <= len or a negative errno.
1340 ssize_t
vringh_iov_push_iotlb(struct vringh
*vrh
,
1341 struct vringh_kiov
*wiov
,
1342 const void *src
, size_t len
)
1344 return vringh_iov_xfer(vrh
, wiov
, (void *)src
, len
, xfer_to_iotlb
);
1346 EXPORT_SYMBOL(vringh_iov_push_iotlb
);
1349 * vringh_abandon_iotlb - we've decided not to handle the descriptor(s).
1351 * @num: the number of descriptors to put back (ie. num
1352 * vringh_get_iotlb() to undo).
1354 * The next vringh_get_iotlb() will return the old descriptor(s) again.
1356 void vringh_abandon_iotlb(struct vringh
*vrh
, unsigned int num
)
1358 /* We only update vring_avail_event(vr) when we want to be notified,
1359 * so we haven't changed that yet.
1361 vrh
->last_avail_idx
-= num
;
1363 EXPORT_SYMBOL(vringh_abandon_iotlb
);
1366 * vringh_complete_iotlb - we've finished with descriptor, publish it.
1368 * @head: the head as filled in by vringh_getdesc_iotlb.
1369 * @len: the length of data we have written.
1371 * You should check vringh_need_notify_iotlb() after one or more calls
1374 int vringh_complete_iotlb(struct vringh
*vrh
, u16 head
, u32 len
)
1376 struct vring_used_elem used
;
1378 used
.id
= cpu_to_vringh32(vrh
, head
);
1379 used
.len
= cpu_to_vringh32(vrh
, len
);
1381 return __vringh_complete(vrh
, &used
, 1, putu16_iotlb
, putused_iotlb
);
1383 EXPORT_SYMBOL(vringh_complete_iotlb
);
1386 * vringh_notify_enable_iotlb - we want to know if something changes.
1389 * This always enables notifications, but returns false if there are
1390 * now more buffers available in the vring.
1392 bool vringh_notify_enable_iotlb(struct vringh
*vrh
)
1394 return __vringh_notify_enable(vrh
, getu16_iotlb
, putu16_iotlb
);
1396 EXPORT_SYMBOL(vringh_notify_enable_iotlb
);
1399 * vringh_notify_disable_iotlb - don't tell us if something changes.
1402 * This is our normal running state: we disable and then only enable when
1403 * we're going to sleep.
1405 void vringh_notify_disable_iotlb(struct vringh
*vrh
)
1407 __vringh_notify_disable(vrh
, putu16_iotlb
);
1409 EXPORT_SYMBOL(vringh_notify_disable_iotlb
);
1412 * vringh_need_notify_iotlb - must we tell the other side about used buffers?
1413 * @vrh: the vring we've called vringh_complete_iotlb() on.
1415 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
1417 int vringh_need_notify_iotlb(struct vringh
*vrh
)
1419 return __vringh_need_notify(vrh
, getu16_iotlb
);
1421 EXPORT_SYMBOL(vringh_need_notify_iotlb
);
1425 MODULE_LICENSE("GPL");