1 // SPDX-License-Identifier: GPL-2.0
2 /* Simple test of virtio code, entirely in userpsace. */
6 #include <linux/kernel.h>
8 #include <linux/virtio.h>
9 #include <linux/vringh.h>
10 #include <linux/virtio_ring.h>
11 #include <linux/virtio_config.h>
12 #include <linux/uaccess.h>
13 #include <sys/types.h>
19 #define USER_MEM (1024*1024)
20 void *__user_addr_min
, *__user_addr_max
;
21 void *__kmalloc_fake
, *__kfree_ignore_start
, *__kfree_ignore_end
;
22 static u64 user_addr_offset
;
27 static bool never_notify_host(struct virtqueue
*vq
)
32 static void never_callback_guest(struct virtqueue
*vq
)
37 static bool getrange_iov(struct vringh
*vrh
, u64 addr
, struct vringh_range
*r
)
39 if (addr
< (u64
)(unsigned long)__user_addr_min
- user_addr_offset
)
41 if (addr
>= (u64
)(unsigned long)__user_addr_max
- user_addr_offset
)
44 r
->start
= (u64
)(unsigned long)__user_addr_min
- user_addr_offset
;
45 r
->end_incl
= (u64
)(unsigned long)__user_addr_max
- 1 - user_addr_offset
;
46 r
->offset
= user_addr_offset
;
50 /* We return single byte ranges. */
51 static bool getrange_slow(struct vringh
*vrh
, u64 addr
, struct vringh_range
*r
)
53 if (addr
< (u64
)(unsigned long)__user_addr_min
- user_addr_offset
)
55 if (addr
>= (u64
)(unsigned long)__user_addr_max
- user_addr_offset
)
59 r
->end_incl
= r
->start
;
60 r
->offset
= user_addr_offset
;
64 struct guest_virtio_device
{
65 struct virtio_device vdev
;
67 unsigned long notifies
;
70 static bool parallel_notify_host(struct virtqueue
*vq
)
73 struct guest_virtio_device
*gvdev
;
75 gvdev
= container_of(vq
->vdev
, struct guest_virtio_device
, vdev
);
76 rc
= write(gvdev
->to_host_fd
, "", 1);
83 static bool no_notify_host(struct virtqueue
*vq
)
88 #define NUM_XFERS (10000000)
90 /* We aim for two "distant" cpus. */
91 static void find_cpus(unsigned int *first
, unsigned int *last
)
97 for (i
= 0; i
< 4096; i
++) {
101 if (sched_setaffinity(getpid(), sizeof(set
), &set
) == 0) {
110 /* Opencoded version for fast mode */
111 static inline int vringh_get_head(struct vringh
*vrh
, u16
*head
)
116 err
= get_user(avail_idx
, &vrh
->vring
.avail
->idx
);
120 if (vrh
->last_avail_idx
== avail_idx
)
123 /* Only get avail ring entries after they have been exposed by guest. */
124 virtio_rmb(vrh
->weak_barriers
);
126 i
= vrh
->last_avail_idx
& (vrh
->vring
.num
- 1);
128 err
= get_user(*head
, &vrh
->vring
.avail
->ring
[i
]);
132 vrh
->last_avail_idx
++;
136 static int parallel_test(u64 features
,
137 bool (*getrange
)(struct vringh
*vrh
,
138 u64 addr
, struct vringh_range
*r
),
141 void *host_map
, *guest_map
;
142 int fd
, mapsize
, to_guest
[2], to_host
[2];
143 unsigned long xfers
= 0, notifies
= 0, receives
= 0;
144 unsigned int first_cpu
, last_cpu
;
148 /* Create real file to mmap. */
149 fd
= open("/tmp/vringh_test-file", O_RDWR
|O_CREAT
|O_TRUNC
, 0600);
151 err(1, "Opening /tmp/vringh_test-file");
153 /* Extra room at the end for some data, and indirects */
154 mapsize
= vring_size(RINGSIZE
, ALIGN
)
155 + RINGSIZE
* 2 * sizeof(int)
156 + RINGSIZE
* 6 * sizeof(struct vring_desc
);
157 mapsize
= (mapsize
+ getpagesize() - 1) & ~(getpagesize() - 1);
158 ftruncate(fd
, mapsize
);
160 /* Parent and child use separate addresses, to check our mapping logic! */
161 host_map
= mmap(NULL
, mapsize
, PROT_READ
|PROT_WRITE
, MAP_SHARED
, fd
, 0);
162 guest_map
= mmap(NULL
, mapsize
, PROT_READ
|PROT_WRITE
, MAP_SHARED
, fd
, 0);
168 find_cpus(&first_cpu
, &last_cpu
);
169 printf("Using CPUS %u and %u\n", first_cpu
, last_cpu
);
174 int status
, err
, rlen
= 0;
177 /* We are the host: never access guest addresses! */
178 munmap(guest_map
, mapsize
);
180 __user_addr_min
= host_map
;
181 __user_addr_max
= __user_addr_min
+ mapsize
;
182 user_addr_offset
= host_map
- guest_map
;
183 assert(user_addr_offset
);
188 vring_init(&vrh
.vring
, RINGSIZE
, host_map
, ALIGN
);
189 vringh_init_user(&vrh
, features
, RINGSIZE
, true,
190 vrh
.vring
.desc
, vrh
.vring
.avail
, vrh
.vring
.used
);
191 CPU_SET(first_cpu
, &cpu_set
);
192 if (sched_setaffinity(getpid(), sizeof(cpu_set
), &cpu_set
))
193 errx(1, "Could not set affinity to cpu %u", first_cpu
);
195 while (xfers
< NUM_XFERS
) {
196 struct iovec host_riov
[2], host_wiov
[2];
197 struct vringh_iov riov
, wiov
;
202 err
= vringh_get_head(&vrh
, &head
);
205 err
= vringh_need_notify_user(&vrh
);
207 errx(1, "vringh_need_notify_user: %i",
210 write(to_guest
[1], "", 1);
215 errx(1, "vringh_get_head");
219 vringh_iov_init(&riov
,
221 ARRAY_SIZE(host_riov
));
222 vringh_iov_init(&wiov
,
224 ARRAY_SIZE(host_wiov
));
226 err
= vringh_getdesc_user(&vrh
, &riov
, &wiov
,
230 err
= vringh_need_notify_user(&vrh
);
232 errx(1, "vringh_need_notify_user: %i",
235 write(to_guest
[1], "", 1);
239 if (!vringh_notify_enable_user(&vrh
))
242 /* Swallow all notifies at once. */
243 if (read(to_host
[0], buf
, sizeof(buf
)) < 1)
246 vringh_notify_disable_user(&vrh
);
251 errx(1, "vringh_getdesc_user: %i", err
);
253 /* We simply copy bytes. */
255 rlen
= vringh_iov_pull_user(&riov
, rbuf
,
258 errx(1, "vringh_iov_pull_user: %i",
260 assert(riov
.i
== riov
.used
);
263 err
= vringh_iov_push_user(&wiov
, rbuf
, rlen
);
265 errx(1, "vringh_iov_push_user: %i",
267 assert(wiov
.i
== wiov
.used
);
273 err
= vringh_complete_user(&vrh
, head
, written
);
275 errx(1, "vringh_complete_user: %i", err
);
278 err
= vringh_need_notify_user(&vrh
);
280 errx(1, "vringh_need_notify_user: %i", err
);
282 write(to_guest
[1], "", 1);
286 if (!WIFEXITED(status
))
287 errx(1, "Child died with signal %i?", WTERMSIG(status
));
288 if (WEXITSTATUS(status
) != 0)
289 errx(1, "Child exited %i?", WEXITSTATUS(status
));
290 printf("Host: notified %lu, pinged %lu\n", notifies
, receives
);
293 struct guest_virtio_device gvdev
;
294 struct virtqueue
*vq
;
296 struct vring_desc
*indirects
;
297 unsigned int finished
= 0;
299 /* We pass sg[]s pointing into here, but we need RINGSIZE+1 */
300 data
= guest_map
+ vring_size(RINGSIZE
, ALIGN
);
301 indirects
= (void *)data
+ (RINGSIZE
+ 1) * 2 * sizeof(int);
303 /* We are the guest. */
304 munmap(host_map
, mapsize
);
309 gvdev
.vdev
.features
= features
;
310 INIT_LIST_HEAD(&gvdev
.vdev
.vqs
);
311 gvdev
.to_host_fd
= to_host
[1];
314 CPU_SET(first_cpu
, &cpu_set
);
315 if (sched_setaffinity(getpid(), sizeof(cpu_set
), &cpu_set
))
316 err(1, "Could not set affinity to cpu %u", first_cpu
);
318 vq
= vring_new_virtqueue(0, RINGSIZE
, ALIGN
, &gvdev
.vdev
, true,
320 fast_vringh
? no_notify_host
321 : parallel_notify_host
,
322 never_callback_guest
, "guest vq");
324 /* Don't kfree indirects. */
325 __kfree_ignore_start
= indirects
;
326 __kfree_ignore_end
= indirects
+ RINGSIZE
* 6;
328 while (xfers
< NUM_XFERS
) {
329 struct scatterlist sg
[4];
330 unsigned int num_sg
, len
;
332 bool output
= !(xfers
% 2);
335 while ((dbuf
= virtqueue_get_buf(vq
, &len
)) != NULL
) {
337 assert(*dbuf
== finished
- 1);
338 else if (!fast_vringh
)
339 assert(*dbuf
== finished
);
343 /* Produce a buffer. */
344 dbuf
= data
+ (xfers
% (RINGSIZE
+ 1));
351 switch ((xfers
/ sizeof(*dbuf
)) % 4) {
353 /* Nasty three-element sg list. */
354 sg_init_table(sg
, num_sg
= 3);
355 sg_set_buf(&sg
[0], (void *)dbuf
, 1);
356 sg_set_buf(&sg
[1], (void *)dbuf
+ 1, 2);
357 sg_set_buf(&sg
[2], (void *)dbuf
+ 3, 1);
360 sg_init_table(sg
, num_sg
= 2);
361 sg_set_buf(&sg
[0], (void *)dbuf
, 1);
362 sg_set_buf(&sg
[1], (void *)dbuf
+ 1, 3);
365 sg_init_table(sg
, num_sg
= 1);
366 sg_set_buf(&sg
[0], (void *)dbuf
, 4);
369 sg_init_table(sg
, num_sg
= 4);
370 sg_set_buf(&sg
[0], (void *)dbuf
, 1);
371 sg_set_buf(&sg
[1], (void *)dbuf
+ 1, 1);
372 sg_set_buf(&sg
[2], (void *)dbuf
+ 2, 1);
373 sg_set_buf(&sg
[3], (void *)dbuf
+ 3, 1);
377 /* May allocate an indirect, so force it to allocate
379 __kmalloc_fake
= indirects
+ (xfers
% RINGSIZE
) * 4;
381 err
= virtqueue_add_outbuf(vq
, sg
, num_sg
, dbuf
,
384 err
= virtqueue_add_inbuf(vq
, sg
, num_sg
,
387 if (err
== -ENOSPC
) {
388 if (!virtqueue_enable_cb_delayed(vq
))
390 /* Swallow all notifies at once. */
391 if (read(to_guest
[0], buf
, sizeof(buf
)) < 1)
395 virtqueue_disable_cb(vq
);
400 errx(1, "virtqueue_add_in/outbuf: %i", err
);
407 while (finished
!= xfers
) {
412 dbuf
= virtqueue_get_buf(vq
, &len
);
415 assert(*dbuf
== finished
- 1);
422 if (!virtqueue_enable_cb_delayed(vq
))
424 if (read(to_guest
[0], buf
, sizeof(buf
)) < 1)
428 virtqueue_disable_cb(vq
);
431 printf("Guest: notified %lu, pinged %lu\n",
432 gvdev
.notifies
, receives
);
433 vring_del_virtqueue(vq
);
438 int main(int argc
, char *argv
[])
440 struct virtio_device vdev
;
441 struct virtqueue
*vq
;
443 struct scatterlist guest_sg
[RINGSIZE
], *sgs
[2];
444 struct iovec host_riov
[2], host_wiov
[2];
445 struct vringh_iov riov
, wiov
;
446 struct vring_used_elem used
[RINGSIZE
];
452 bool (*getrange
)(struct vringh
*vrh
, u64 addr
, struct vringh_range
*r
);
453 bool fast_vringh
= false, parallel
= false;
455 getrange
= getrange_iov
;
457 INIT_LIST_HEAD(&vdev
.vqs
);
460 if (strcmp(argv
[1], "--indirect") == 0)
461 __virtio_set_bit(&vdev
, VIRTIO_RING_F_INDIRECT_DESC
);
462 else if (strcmp(argv
[1], "--eventidx") == 0)
463 __virtio_set_bit(&vdev
, VIRTIO_RING_F_EVENT_IDX
);
464 else if (strcmp(argv
[1], "--virtio-1") == 0)
465 __virtio_set_bit(&vdev
, VIRTIO_F_VERSION_1
);
466 else if (strcmp(argv
[1], "--slow-range") == 0)
467 getrange
= getrange_slow
;
468 else if (strcmp(argv
[1], "--fast-vringh") == 0)
470 else if (strcmp(argv
[1], "--parallel") == 0)
473 errx(1, "Unknown arg %s", argv
[1]);
478 return parallel_test(vdev
.features
, getrange
, fast_vringh
);
480 if (posix_memalign(&__user_addr_min
, PAGE_SIZE
, USER_MEM
) != 0)
482 __user_addr_max
= __user_addr_min
+ USER_MEM
;
483 memset(__user_addr_min
, 0, vring_size(RINGSIZE
, ALIGN
));
485 /* Set up guest side. */
486 vq
= vring_new_virtqueue(0, RINGSIZE
, ALIGN
, &vdev
, true, false,
488 never_notify_host
, never_callback_guest
,
491 /* Set up host side. */
492 vring_init(&vrh
.vring
, RINGSIZE
, __user_addr_min
, ALIGN
);
493 vringh_init_user(&vrh
, vdev
.features
, RINGSIZE
, true,
494 vrh
.vring
.desc
, vrh
.vring
.avail
, vrh
.vring
.used
);
496 /* No descriptor to get yet... */
497 err
= vringh_getdesc_user(&vrh
, &riov
, &wiov
, getrange
, &head
);
499 errx(1, "vringh_getdesc_user: %i", err
);
501 /* Guest puts in a descriptor. */
502 memcpy(__user_addr_max
- 1, "a", 1);
503 sg_init_table(guest_sg
, 1);
504 sg_set_buf(&guest_sg
[0], __user_addr_max
- 1, 1);
505 sg_init_table(guest_sg
+1, 1);
506 sg_set_buf(&guest_sg
[1], __user_addr_max
- 3, 2);
507 sgs
[0] = &guest_sg
[0];
508 sgs
[1] = &guest_sg
[1];
510 /* May allocate an indirect, so force it to allocate user addr */
511 __kmalloc_fake
= __user_addr_min
+ vring_size(RINGSIZE
, ALIGN
);
512 err
= virtqueue_add_sgs(vq
, sgs
, 1, 1, &err
, GFP_KERNEL
);
514 errx(1, "virtqueue_add_sgs: %i", err
);
515 __kmalloc_fake
= NULL
;
517 /* Host retreives it. */
518 vringh_iov_init(&riov
, host_riov
, ARRAY_SIZE(host_riov
));
519 vringh_iov_init(&wiov
, host_wiov
, ARRAY_SIZE(host_wiov
));
521 err
= vringh_getdesc_user(&vrh
, &riov
, &wiov
, getrange
, &head
);
523 errx(1, "vringh_getdesc_user: %i", err
);
525 assert(riov
.used
== 1);
526 assert(riov
.iov
[0].iov_base
== __user_addr_max
- 1);
527 assert(riov
.iov
[0].iov_len
== 1);
528 if (getrange
!= getrange_slow
) {
529 assert(wiov
.used
== 1);
530 assert(wiov
.iov
[0].iov_base
== __user_addr_max
- 3);
531 assert(wiov
.iov
[0].iov_len
== 2);
533 assert(wiov
.used
== 2);
534 assert(wiov
.iov
[0].iov_base
== __user_addr_max
- 3);
535 assert(wiov
.iov
[0].iov_len
== 1);
536 assert(wiov
.iov
[1].iov_base
== __user_addr_max
- 2);
537 assert(wiov
.iov
[1].iov_len
== 1);
540 err
= vringh_iov_pull_user(&riov
, buf
, 5);
542 errx(1, "vringh_iov_pull_user: %i", err
);
543 assert(buf
[0] == 'a');
545 assert(vringh_iov_pull_user(&riov
, buf
, 5) == 0);
547 memcpy(buf
, "bcdef", 5);
548 err
= vringh_iov_push_user(&wiov
, buf
, 5);
550 errx(1, "vringh_iov_push_user: %i", err
);
551 assert(memcmp(__user_addr_max
- 3, "bc", 2) == 0);
552 assert(wiov
.i
== wiov
.used
);
553 assert(vringh_iov_push_user(&wiov
, buf
, 5) == 0);
556 err
= vringh_complete_user(&vrh
, head
, err
);
558 errx(1, "vringh_complete_user: %i", err
);
560 /* Guest should see used token now. */
561 __kfree_ignore_start
= __user_addr_min
+ vring_size(RINGSIZE
, ALIGN
);
562 __kfree_ignore_end
= __kfree_ignore_start
+ 1;
563 ret
= virtqueue_get_buf(vq
, &i
);
565 errx(1, "virtqueue_get_buf: %p", ret
);
568 /* Guest puts in a huge descriptor. */
569 sg_init_table(guest_sg
, RINGSIZE
);
570 for (i
= 0; i
< RINGSIZE
; i
++) {
571 sg_set_buf(&guest_sg
[i
],
572 __user_addr_max
- USER_MEM
/4, USER_MEM
/4);
575 /* Fill contents with recognisable garbage. */
576 for (i
= 0; i
< USER_MEM
/4; i
++)
577 ((char *)__user_addr_max
- USER_MEM
/4)[i
] = i
;
579 /* This will allocate an indirect, so force it to allocate user addr */
580 __kmalloc_fake
= __user_addr_min
+ vring_size(RINGSIZE
, ALIGN
);
581 err
= virtqueue_add_outbuf(vq
, guest_sg
, RINGSIZE
, &err
, GFP_KERNEL
);
583 errx(1, "virtqueue_add_outbuf (large): %i", err
);
584 __kmalloc_fake
= NULL
;
586 /* Host picks it up (allocates new iov). */
587 vringh_iov_init(&riov
, host_riov
, ARRAY_SIZE(host_riov
));
588 vringh_iov_init(&wiov
, host_wiov
, ARRAY_SIZE(host_wiov
));
590 err
= vringh_getdesc_user(&vrh
, &riov
, &wiov
, getrange
, &head
);
592 errx(1, "vringh_getdesc_user: %i", err
);
594 assert(riov
.max_num
& VRINGH_IOV_ALLOCATED
);
595 assert(riov
.iov
!= host_riov
);
596 if (getrange
!= getrange_slow
)
597 assert(riov
.used
== RINGSIZE
);
599 assert(riov
.used
== RINGSIZE
* USER_MEM
/4);
601 assert(!(wiov
.max_num
& VRINGH_IOV_ALLOCATED
));
602 assert(wiov
.used
== 0);
604 /* Pull data back out (in odd chunks), should be as expected. */
605 for (i
= 0; i
< RINGSIZE
* USER_MEM
/4; i
+= 3) {
606 err
= vringh_iov_pull_user(&riov
, buf
, 3);
607 if (err
!= 3 && i
+ err
!= RINGSIZE
* USER_MEM
/4)
608 errx(1, "vringh_iov_pull_user large: %i", err
);
609 assert(buf
[0] == (char)i
);
610 assert(err
< 2 || buf
[1] == (char)(i
+ 1));
611 assert(err
< 3 || buf
[2] == (char)(i
+ 2));
613 assert(riov
.i
== riov
.used
);
614 vringh_iov_cleanup(&riov
);
615 vringh_iov_cleanup(&wiov
);
617 /* Complete using multi interface, just because we can. */
620 err
= vringh_complete_multi_user(&vrh
, used
, 1);
622 errx(1, "vringh_complete_multi_user(1): %i", err
);
624 /* Free up those descriptors. */
625 ret
= virtqueue_get_buf(vq
, &i
);
627 errx(1, "virtqueue_get_buf: %p", ret
);
629 /* Add lots of descriptors. */
630 sg_init_table(guest_sg
, 1);
631 sg_set_buf(&guest_sg
[0], __user_addr_max
- 1, 1);
632 for (i
= 0; i
< RINGSIZE
; i
++) {
633 err
= virtqueue_add_outbuf(vq
, guest_sg
, 1, &err
, GFP_KERNEL
);
635 errx(1, "virtqueue_add_outbuf (multiple): %i", err
);
638 /* Now get many, and consume them all at once. */
639 vringh_iov_init(&riov
, host_riov
, ARRAY_SIZE(host_riov
));
640 vringh_iov_init(&wiov
, host_wiov
, ARRAY_SIZE(host_wiov
));
642 for (i
= 0; i
< RINGSIZE
; i
++) {
643 err
= vringh_getdesc_user(&vrh
, &riov
, &wiov
, getrange
, &head
);
645 errx(1, "vringh_getdesc_user: %i", err
);
649 /* Make sure it wraps around ring, to test! */
650 assert(vrh
.vring
.used
->idx
% RINGSIZE
!= 0);
651 err
= vringh_complete_multi_user(&vrh
, used
, RINGSIZE
);
653 errx(1, "vringh_complete_multi_user: %i", err
);
655 /* Free those buffers. */
656 for (i
= 0; i
< RINGSIZE
; i
++) {
658 assert(virtqueue_get_buf(vq
, &len
) != NULL
);
661 /* Test weird (but legal!) indirect. */
662 if (__virtio_test_bit(&vdev
, VIRTIO_RING_F_INDIRECT_DESC
)) {
663 char *data
= __user_addr_max
- USER_MEM
/4;
664 struct vring_desc
*d
= __user_addr_max
- USER_MEM
/2;
667 /* Force creation of direct, which we modify. */
668 __virtio_clear_bit(&vdev
, VIRTIO_RING_F_INDIRECT_DESC
);
669 vq
= vring_new_virtqueue(0, RINGSIZE
, ALIGN
, &vdev
, true,
670 false, __user_addr_min
,
672 never_callback_guest
,
675 sg_init_table(guest_sg
, 4);
676 sg_set_buf(&guest_sg
[0], d
, sizeof(*d
)*2);
677 sg_set_buf(&guest_sg
[1], d
+ 2, sizeof(*d
)*1);
678 sg_set_buf(&guest_sg
[2], data
+ 6, 4);
679 sg_set_buf(&guest_sg
[3], d
+ 3, sizeof(*d
)*3);
681 err
= virtqueue_add_outbuf(vq
, guest_sg
, 4, &err
, GFP_KERNEL
);
683 errx(1, "virtqueue_add_outbuf (indirect): %i", err
);
685 vring_init(&vring
, RINGSIZE
, __user_addr_min
, ALIGN
);
687 /* They're used in order, but double-check... */
688 assert(vring
.desc
[0].addr
== (unsigned long)d
);
689 assert(vring
.desc
[1].addr
== (unsigned long)(d
+2));
690 assert(vring
.desc
[2].addr
== (unsigned long)data
+ 6);
691 assert(vring
.desc
[3].addr
== (unsigned long)(d
+3));
692 vring
.desc
[0].flags
|= VRING_DESC_F_INDIRECT
;
693 vring
.desc
[1].flags
|= VRING_DESC_F_INDIRECT
;
694 vring
.desc
[3].flags
|= VRING_DESC_F_INDIRECT
;
697 d
[0].addr
= (unsigned long)data
;
699 d
[0].flags
= VRING_DESC_F_NEXT
;
701 d
[1].addr
= (unsigned long)data
+ 1;
705 /* Second indirect */
706 d
[2].addr
= (unsigned long)data
+ 3;
711 d
[3].addr
= (unsigned long)data
+ 10;
713 d
[3].flags
= VRING_DESC_F_NEXT
;
715 d
[4].addr
= (unsigned long)data
+ 15;
717 d
[4].flags
= VRING_DESC_F_NEXT
;
719 d
[5].addr
= (unsigned long)data
+ 21;
723 /* Host picks it up (allocates new iov). */
724 vringh_iov_init(&riov
, host_riov
, ARRAY_SIZE(host_riov
));
725 vringh_iov_init(&wiov
, host_wiov
, ARRAY_SIZE(host_wiov
));
727 err
= vringh_getdesc_user(&vrh
, &riov
, &wiov
, getrange
, &head
);
729 errx(1, "vringh_getdesc_user: %i", err
);
732 errx(1, "vringh_getdesc_user: head %i not 0", head
);
734 assert(riov
.max_num
& VRINGH_IOV_ALLOCATED
);
735 if (getrange
!= getrange_slow
)
736 assert(riov
.used
== 7);
738 assert(riov
.used
== 28);
739 err
= vringh_iov_pull_user(&riov
, buf
, 29);
742 /* Data should be linear. */
743 for (i
= 0; i
< err
; i
++)
745 vringh_iov_cleanup(&riov
);
748 /* Don't leak memory... */
749 vring_del_virtqueue(vq
);
750 free(__user_addr_min
);