4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2016 Red Hat, Inc.
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Marc-André Lureau <mlureau@redhat.com>
10 * Victor Kaplansky <victork@redhat.com>
12 * This work is licensed under the terms of the GNU GPL, version 2 or
13 * later. See the COPYING file in the top-level directory.
16 /* this code avoids GLib dependency */
25 #include <sys/types.h>
26 #include <sys/socket.h>
27 #include <sys/eventfd.h>
29 #include "qemu/compiler.h"
31 #if defined(__linux__)
32 #include <sys/syscall.h>
34 #include <sys/ioctl.h>
35 #include <linux/vhost.h>
37 #ifdef __NR_userfaultfd
38 #include <linux/userfaultfd.h>
43 #include "qemu/atomic.h"
45 #include "libvhost-user.h"
47 /* usually provided by GLib */
49 #define MIN(x, y) ({ \
50 typeof(x) _min1 = (x); \
51 typeof(y) _min2 = (y); \
52 (void) (&_min1 == &_min2); \
53 _min1 < _min2 ? _min1 : _min2; })
56 #define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
58 /* The version of the protocol we support */
59 #define VHOST_USER_VERSION 1
60 #define LIBVHOST_USER_DEBUG 0
64 if (LIBVHOST_USER_DEBUG) { \
65 fprintf(stderr, __VA_ARGS__); \
70 vu_request_to_string(unsigned int req
)
72 #define REQ(req) [req] = #req
73 static const char *vu_request_str
[] = {
75 REQ(VHOST_USER_GET_FEATURES
),
76 REQ(VHOST_USER_SET_FEATURES
),
77 REQ(VHOST_USER_SET_OWNER
),
78 REQ(VHOST_USER_RESET_OWNER
),
79 REQ(VHOST_USER_SET_MEM_TABLE
),
80 REQ(VHOST_USER_SET_LOG_BASE
),
81 REQ(VHOST_USER_SET_LOG_FD
),
82 REQ(VHOST_USER_SET_VRING_NUM
),
83 REQ(VHOST_USER_SET_VRING_ADDR
),
84 REQ(VHOST_USER_SET_VRING_BASE
),
85 REQ(VHOST_USER_GET_VRING_BASE
),
86 REQ(VHOST_USER_SET_VRING_KICK
),
87 REQ(VHOST_USER_SET_VRING_CALL
),
88 REQ(VHOST_USER_SET_VRING_ERR
),
89 REQ(VHOST_USER_GET_PROTOCOL_FEATURES
),
90 REQ(VHOST_USER_SET_PROTOCOL_FEATURES
),
91 REQ(VHOST_USER_GET_QUEUE_NUM
),
92 REQ(VHOST_USER_SET_VRING_ENABLE
),
93 REQ(VHOST_USER_SEND_RARP
),
94 REQ(VHOST_USER_NET_SET_MTU
),
95 REQ(VHOST_USER_SET_SLAVE_REQ_FD
),
96 REQ(VHOST_USER_IOTLB_MSG
),
97 REQ(VHOST_USER_SET_VRING_ENDIAN
),
98 REQ(VHOST_USER_GET_CONFIG
),
99 REQ(VHOST_USER_SET_CONFIG
),
100 REQ(VHOST_USER_POSTCOPY_ADVISE
),
101 REQ(VHOST_USER_POSTCOPY_LISTEN
),
102 REQ(VHOST_USER_POSTCOPY_END
),
107 if (req
< VHOST_USER_MAX
) {
108 return vu_request_str
[req
];
115 vu_panic(VuDev
*dev
, const char *msg
, ...)
121 if (vasprintf(&buf
, msg
, ap
) < 0) {
127 dev
->panic(dev
, buf
);
130 /* FIXME: find a way to call virtio_error? */
133 /* Translate guest physical address to our virtual address. */
135 vu_gpa_to_va(VuDev
*dev
, uint64_t *plen
, uint64_t guest_addr
)
143 /* Find matching memory region. */
144 for (i
= 0; i
< dev
->nregions
; i
++) {
145 VuDevRegion
*r
= &dev
->regions
[i
];
147 if ((guest_addr
>= r
->gpa
) && (guest_addr
< (r
->gpa
+ r
->size
))) {
148 if ((guest_addr
+ *plen
) > (r
->gpa
+ r
->size
)) {
149 *plen
= r
->gpa
+ r
->size
- guest_addr
;
151 return (void *)(uintptr_t)
152 guest_addr
- r
->gpa
+ r
->mmap_addr
+ r
->mmap_offset
;
159 /* Translate qemu virtual address to our virtual address. */
161 qva_to_va(VuDev
*dev
, uint64_t qemu_addr
)
165 /* Find matching memory region. */
166 for (i
= 0; i
< dev
->nregions
; i
++) {
167 VuDevRegion
*r
= &dev
->regions
[i
];
169 if ((qemu_addr
>= r
->qva
) && (qemu_addr
< (r
->qva
+ r
->size
))) {
170 return (void *)(uintptr_t)
171 qemu_addr
- r
->qva
+ r
->mmap_addr
+ r
->mmap_offset
;
179 vmsg_close_fds(VhostUserMsg
*vmsg
)
183 for (i
= 0; i
< vmsg
->fd_num
; i
++) {
188 /* A test to see if we have userfault available */
192 #if defined(__linux__) && defined(__NR_userfaultfd) &&\
193 defined(UFFD_FEATURE_MISSING_SHMEM) &&\
194 defined(UFFD_FEATURE_MISSING_HUGETLBFS)
195 /* Now test the kernel we're running on really has the features */
196 int ufd
= syscall(__NR_userfaultfd
, O_CLOEXEC
| O_NONBLOCK
);
197 struct uffdio_api api_struct
;
202 api_struct
.api
= UFFD_API
;
203 api_struct
.features
= UFFD_FEATURE_MISSING_SHMEM
|
204 UFFD_FEATURE_MISSING_HUGETLBFS
;
205 if (ioctl(ufd
, UFFDIO_API
, &api_struct
)) {
218 vu_message_read(VuDev
*dev
, int conn_fd
, VhostUserMsg
*vmsg
)
220 char control
[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS
* sizeof(int))] = { };
222 .iov_base
= (char *)vmsg
,
223 .iov_len
= VHOST_USER_HDR_SIZE
,
225 struct msghdr msg
= {
228 .msg_control
= control
,
229 .msg_controllen
= sizeof(control
),
232 struct cmsghdr
*cmsg
;
236 rc
= recvmsg(conn_fd
, &msg
, 0);
237 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
240 vu_panic(dev
, "Error while recvmsg: %s", strerror(errno
));
245 for (cmsg
= CMSG_FIRSTHDR(&msg
);
247 cmsg
= CMSG_NXTHDR(&msg
, cmsg
))
249 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
250 fd_size
= cmsg
->cmsg_len
- CMSG_LEN(0);
251 vmsg
->fd_num
= fd_size
/ sizeof(int);
252 memcpy(vmsg
->fds
, CMSG_DATA(cmsg
), fd_size
);
257 if (vmsg
->size
> sizeof(vmsg
->payload
)) {
259 "Error: too big message request: %d, size: vmsg->size: %u, "
260 "while sizeof(vmsg->payload) = %zu\n",
261 vmsg
->request
, vmsg
->size
, sizeof(vmsg
->payload
));
267 rc
= read(conn_fd
, &vmsg
->payload
, vmsg
->size
);
268 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
271 vu_panic(dev
, "Error while reading: %s", strerror(errno
));
275 assert(rc
== vmsg
->size
);
281 vmsg_close_fds(vmsg
);
287 vu_message_write(VuDev
*dev
, int conn_fd
, VhostUserMsg
*vmsg
)
290 uint8_t *p
= (uint8_t *)vmsg
;
291 char control
[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS
* sizeof(int))] = { };
293 .iov_base
= (char *)vmsg
,
294 .iov_len
= VHOST_USER_HDR_SIZE
,
296 struct msghdr msg
= {
299 .msg_control
= control
,
301 struct cmsghdr
*cmsg
;
303 memset(control
, 0, sizeof(control
));
304 assert(vmsg
->fd_num
<= VHOST_MEMORY_MAX_NREGIONS
);
305 if (vmsg
->fd_num
> 0) {
306 size_t fdsize
= vmsg
->fd_num
* sizeof(int);
307 msg
.msg_controllen
= CMSG_SPACE(fdsize
);
308 cmsg
= CMSG_FIRSTHDR(&msg
);
309 cmsg
->cmsg_len
= CMSG_LEN(fdsize
);
310 cmsg
->cmsg_level
= SOL_SOCKET
;
311 cmsg
->cmsg_type
= SCM_RIGHTS
;
312 memcpy(CMSG_DATA(cmsg
), vmsg
->fds
, fdsize
);
314 msg
.msg_controllen
= 0;
318 rc
= sendmsg(conn_fd
, &msg
, 0);
319 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
324 rc
= write(conn_fd
, vmsg
->data
, vmsg
->size
);
326 rc
= write(conn_fd
, p
+ VHOST_USER_HDR_SIZE
, vmsg
->size
);
328 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
332 vu_panic(dev
, "Error while writing: %s", strerror(errno
));
340 vu_send_reply(VuDev
*dev
, int conn_fd
, VhostUserMsg
*vmsg
)
342 /* Set the version in the flags when sending the reply */
343 vmsg
->flags
&= ~VHOST_USER_VERSION_MASK
;
344 vmsg
->flags
|= VHOST_USER_VERSION
;
345 vmsg
->flags
|= VHOST_USER_REPLY_MASK
;
347 return vu_message_write(dev
, conn_fd
, vmsg
);
351 vu_process_message_reply(VuDev
*dev
, const VhostUserMsg
*vmsg
)
353 VhostUserMsg msg_reply
;
355 if ((vmsg
->flags
& VHOST_USER_NEED_REPLY_MASK
) == 0) {
359 if (!vu_message_read(dev
, dev
->slave_fd
, &msg_reply
)) {
363 if (msg_reply
.request
!= vmsg
->request
) {
364 DPRINT("Received unexpected msg type. Expected %d received %d",
365 vmsg
->request
, msg_reply
.request
);
369 return msg_reply
.payload
.u64
== 0;
372 /* Kick the log_call_fd if required. */
374 vu_log_kick(VuDev
*dev
)
376 if (dev
->log_call_fd
!= -1) {
377 DPRINT("Kicking the QEMU's log...\n");
378 if (eventfd_write(dev
->log_call_fd
, 1) < 0) {
379 vu_panic(dev
, "Error writing eventfd: %s", strerror(errno
));
385 vu_log_page(uint8_t *log_table
, uint64_t page
)
387 DPRINT("Logged dirty guest page: %"PRId64
"\n", page
);
388 atomic_or(&log_table
[page
/ 8], 1 << (page
% 8));
392 vu_log_write(VuDev
*dev
, uint64_t address
, uint64_t length
)
396 if (!(dev
->features
& (1ULL << VHOST_F_LOG_ALL
)) ||
397 !dev
->log_table
|| !length
) {
401 assert(dev
->log_size
> ((address
+ length
- 1) / VHOST_LOG_PAGE
/ 8));
403 page
= address
/ VHOST_LOG_PAGE
;
404 while (page
* VHOST_LOG_PAGE
< address
+ length
) {
405 vu_log_page(dev
->log_table
, page
);
406 page
+= VHOST_LOG_PAGE
;
413 vu_kick_cb(VuDev
*dev
, int condition
, void *data
)
415 int index
= (intptr_t)data
;
416 VuVirtq
*vq
= &dev
->vq
[index
];
417 int sock
= vq
->kick_fd
;
421 rc
= eventfd_read(sock
, &kick_data
);
423 vu_panic(dev
, "kick eventfd_read(): %s", strerror(errno
));
424 dev
->remove_watch(dev
, dev
->vq
[index
].kick_fd
);
426 DPRINT("Got kick_data: %016"PRIx64
" handler:%p idx:%d\n",
427 kick_data
, vq
->handler
, index
);
429 vq
->handler(dev
, index
);
435 vu_get_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
438 1ULL << VHOST_F_LOG_ALL
|
439 1ULL << VHOST_USER_F_PROTOCOL_FEATURES
;
441 if (dev
->iface
->get_features
) {
442 vmsg
->payload
.u64
|= dev
->iface
->get_features(dev
);
445 vmsg
->size
= sizeof(vmsg
->payload
.u64
);
448 DPRINT("Sending back to guest u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
454 vu_set_enable_all_rings(VuDev
*dev
, bool enabled
)
458 for (i
= 0; i
< VHOST_MAX_NR_VIRTQUEUE
; i
++) {
459 dev
->vq
[i
].enable
= enabled
;
464 vu_set_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
466 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
468 dev
->features
= vmsg
->payload
.u64
;
470 if (!(dev
->features
& VHOST_USER_F_PROTOCOL_FEATURES
)) {
471 vu_set_enable_all_rings(dev
, true);
474 if (dev
->iface
->set_features
) {
475 dev
->iface
->set_features(dev
, dev
->features
);
482 vu_set_owner_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
488 vu_close_log(VuDev
*dev
)
490 if (dev
->log_table
) {
491 if (munmap(dev
->log_table
, dev
->log_size
) != 0) {
492 perror("close log munmap() error");
495 dev
->log_table
= NULL
;
497 if (dev
->log_call_fd
!= -1) {
498 close(dev
->log_call_fd
);
499 dev
->log_call_fd
= -1;
504 vu_reset_device_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
506 vu_set_enable_all_rings(dev
, false);
512 vu_set_mem_table_exec_postcopy(VuDev
*dev
, VhostUserMsg
*vmsg
)
515 VhostUserMemory
*memory
= &vmsg
->payload
.memory
;
516 dev
->nregions
= memory
->nregions
;
518 DPRINT("Nregions: %d\n", memory
->nregions
);
519 for (i
= 0; i
< dev
->nregions
; i
++) {
521 VhostUserMemoryRegion
*msg_region
= &memory
->regions
[i
];
522 VuDevRegion
*dev_region
= &dev
->regions
[i
];
524 DPRINT("Region %d\n", i
);
525 DPRINT(" guest_phys_addr: 0x%016"PRIx64
"\n",
526 msg_region
->guest_phys_addr
);
527 DPRINT(" memory_size: 0x%016"PRIx64
"\n",
528 msg_region
->memory_size
);
529 DPRINT(" userspace_addr 0x%016"PRIx64
"\n",
530 msg_region
->userspace_addr
);
531 DPRINT(" mmap_offset 0x%016"PRIx64
"\n",
532 msg_region
->mmap_offset
);
534 dev_region
->gpa
= msg_region
->guest_phys_addr
;
535 dev_region
->size
= msg_region
->memory_size
;
536 dev_region
->qva
= msg_region
->userspace_addr
;
537 dev_region
->mmap_offset
= msg_region
->mmap_offset
;
539 /* We don't use offset argument of mmap() since the
540 * mapped address has to be page aligned, and we use huge
542 * In postcopy we're using PROT_NONE here to catch anyone
543 * accessing it before we userfault
545 mmap_addr
= mmap(0, dev_region
->size
+ dev_region
->mmap_offset
,
546 PROT_NONE
, MAP_SHARED
,
549 if (mmap_addr
== MAP_FAILED
) {
550 vu_panic(dev
, "region mmap error: %s", strerror(errno
));
552 dev_region
->mmap_addr
= (uint64_t)(uintptr_t)mmap_addr
;
553 DPRINT(" mmap_addr: 0x%016"PRIx64
"\n",
554 dev_region
->mmap_addr
);
557 /* Return the address to QEMU so that it can translate the ufd
558 * fault addresses back.
560 msg_region
->userspace_addr
= (uintptr_t)(mmap_addr
+
561 dev_region
->mmap_offset
);
565 /* Send the message back to qemu with the addresses filled in */
567 if (!vu_send_reply(dev
, dev
->sock
, vmsg
)) {
568 vu_panic(dev
, "failed to respond to set-mem-table for postcopy");
572 /* Wait for QEMU to confirm that it's registered the handler for the
575 if (!vu_message_read(dev
, dev
->sock
, vmsg
) ||
576 vmsg
->size
!= sizeof(vmsg
->payload
.u64
) ||
577 vmsg
->payload
.u64
!= 0) {
578 vu_panic(dev
, "failed to receive valid ack for postcopy set-mem-table");
582 /* OK, now we can go and register the memory and generate faults */
583 for (i
= 0; i
< dev
->nregions
; i
++) {
584 VuDevRegion
*dev_region
= &dev
->regions
[i
];
586 #ifdef UFFDIO_REGISTER
587 /* We should already have an open ufd. Mark each memory
589 * Discard any mapping we have here; note I can't use MADV_REMOVE
590 * or fallocate to make the hole since I don't want to lose
591 * data that's already arrived in the shared process.
592 * TODO: How to do hugepage
594 ret
= madvise((void *)dev_region
->mmap_addr
,
595 dev_region
->size
+ dev_region
->mmap_offset
,
599 "%s: Failed to madvise(DONTNEED) region %d: %s\n",
600 __func__
, i
, strerror(errno
));
602 /* Turn off transparent hugepages so we dont get lose wakeups
603 * in neighbouring pages.
604 * TODO: Turn this backon later.
606 ret
= madvise((void *)dev_region
->mmap_addr
,
607 dev_region
->size
+ dev_region
->mmap_offset
,
610 /* Note: This can happen legally on kernels that are configured
611 * without madvise'able hugepages
614 "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n",
615 __func__
, i
, strerror(errno
));
617 struct uffdio_register reg_struct
;
618 reg_struct
.range
.start
= (uintptr_t)dev_region
->mmap_addr
;
619 reg_struct
.range
.len
= dev_region
->size
+ dev_region
->mmap_offset
;
620 reg_struct
.mode
= UFFDIO_REGISTER_MODE_MISSING
;
622 if (ioctl(dev
->postcopy_ufd
, UFFDIO_REGISTER
, ®_struct
)) {
623 vu_panic(dev
, "%s: Failed to userfault region %d "
624 "@%p + size:%zx offset: %zx: (ufd=%d)%s\n",
626 dev_region
->mmap_addr
,
627 dev_region
->size
, dev_region
->mmap_offset
,
628 dev
->postcopy_ufd
, strerror(errno
));
631 if (!(reg_struct
.ioctls
& ((__u64
)1 << _UFFDIO_COPY
))) {
632 vu_panic(dev
, "%s Region (%d) doesn't support COPY",
636 DPRINT("%s: region %d: Registered userfault for %llx + %llx\n",
637 __func__
, i
, reg_struct
.range
.start
, reg_struct
.range
.len
);
638 /* Now it's registered we can let the client at it */
639 if (mprotect((void *)dev_region
->mmap_addr
,
640 dev_region
->size
+ dev_region
->mmap_offset
,
641 PROT_READ
| PROT_WRITE
)) {
642 vu_panic(dev
, "failed to mprotect region %d for postcopy (%s)",
646 /* TODO: Stash 'zero' support flags somewhere */
654 vu_set_mem_table_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
657 VhostUserMemory
*memory
= &vmsg
->payload
.memory
;
659 for (i
= 0; i
< dev
->nregions
; i
++) {
660 VuDevRegion
*r
= &dev
->regions
[i
];
661 void *m
= (void *) (uintptr_t) r
->mmap_addr
;
664 munmap(m
, r
->size
+ r
->mmap_offset
);
667 dev
->nregions
= memory
->nregions
;
669 if (dev
->postcopy_listening
) {
670 return vu_set_mem_table_exec_postcopy(dev
, vmsg
);
673 DPRINT("Nregions: %d\n", memory
->nregions
);
674 for (i
= 0; i
< dev
->nregions
; i
++) {
676 VhostUserMemoryRegion
*msg_region
= &memory
->regions
[i
];
677 VuDevRegion
*dev_region
= &dev
->regions
[i
];
679 DPRINT("Region %d\n", i
);
680 DPRINT(" guest_phys_addr: 0x%016"PRIx64
"\n",
681 msg_region
->guest_phys_addr
);
682 DPRINT(" memory_size: 0x%016"PRIx64
"\n",
683 msg_region
->memory_size
);
684 DPRINT(" userspace_addr 0x%016"PRIx64
"\n",
685 msg_region
->userspace_addr
);
686 DPRINT(" mmap_offset 0x%016"PRIx64
"\n",
687 msg_region
->mmap_offset
);
689 dev_region
->gpa
= msg_region
->guest_phys_addr
;
690 dev_region
->size
= msg_region
->memory_size
;
691 dev_region
->qva
= msg_region
->userspace_addr
;
692 dev_region
->mmap_offset
= msg_region
->mmap_offset
;
694 /* We don't use offset argument of mmap() since the
695 * mapped address has to be page aligned, and we use huge
697 mmap_addr
= mmap(0, dev_region
->size
+ dev_region
->mmap_offset
,
698 PROT_READ
| PROT_WRITE
, MAP_SHARED
,
701 if (mmap_addr
== MAP_FAILED
) {
702 vu_panic(dev
, "region mmap error: %s", strerror(errno
));
704 dev_region
->mmap_addr
= (uint64_t)(uintptr_t)mmap_addr
;
705 DPRINT(" mmap_addr: 0x%016"PRIx64
"\n",
706 dev_region
->mmap_addr
);
716 vu_set_log_base_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
719 uint64_t log_mmap_size
, log_mmap_offset
;
722 if (vmsg
->fd_num
!= 1 ||
723 vmsg
->size
!= sizeof(vmsg
->payload
.log
)) {
724 vu_panic(dev
, "Invalid log_base message");
729 log_mmap_offset
= vmsg
->payload
.log
.mmap_offset
;
730 log_mmap_size
= vmsg
->payload
.log
.mmap_size
;
731 DPRINT("Log mmap_offset: %"PRId64
"\n", log_mmap_offset
);
732 DPRINT("Log mmap_size: %"PRId64
"\n", log_mmap_size
);
734 rc
= mmap(0, log_mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, fd
,
737 if (rc
== MAP_FAILED
) {
738 perror("log mmap error");
741 if (dev
->log_table
) {
742 munmap(dev
->log_table
, dev
->log_size
);
745 dev
->log_size
= log_mmap_size
;
747 vmsg
->size
= sizeof(vmsg
->payload
.u64
);
754 vu_set_log_fd_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
756 if (vmsg
->fd_num
!= 1) {
757 vu_panic(dev
, "Invalid log_fd message");
761 if (dev
->log_call_fd
!= -1) {
762 close(dev
->log_call_fd
);
764 dev
->log_call_fd
= vmsg
->fds
[0];
765 DPRINT("Got log_call_fd: %d\n", vmsg
->fds
[0]);
771 vu_set_vring_num_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
773 unsigned int index
= vmsg
->payload
.state
.index
;
774 unsigned int num
= vmsg
->payload
.state
.num
;
776 DPRINT("State.index: %d\n", index
);
777 DPRINT("State.num: %d\n", num
);
778 dev
->vq
[index
].vring
.num
= num
;
784 vu_set_vring_addr_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
786 struct vhost_vring_addr
*vra
= &vmsg
->payload
.addr
;
787 unsigned int index
= vra
->index
;
788 VuVirtq
*vq
= &dev
->vq
[index
];
790 DPRINT("vhost_vring_addr:\n");
791 DPRINT(" index: %d\n", vra
->index
);
792 DPRINT(" flags: %d\n", vra
->flags
);
793 DPRINT(" desc_user_addr: 0x%016llx\n", vra
->desc_user_addr
);
794 DPRINT(" used_user_addr: 0x%016llx\n", vra
->used_user_addr
);
795 DPRINT(" avail_user_addr: 0x%016llx\n", vra
->avail_user_addr
);
796 DPRINT(" log_guest_addr: 0x%016llx\n", vra
->log_guest_addr
);
798 vq
->vring
.flags
= vra
->flags
;
799 vq
->vring
.desc
= qva_to_va(dev
, vra
->desc_user_addr
);
800 vq
->vring
.used
= qva_to_va(dev
, vra
->used_user_addr
);
801 vq
->vring
.avail
= qva_to_va(dev
, vra
->avail_user_addr
);
802 vq
->vring
.log_guest_addr
= vra
->log_guest_addr
;
804 DPRINT("Setting virtq addresses:\n");
805 DPRINT(" vring_desc at %p\n", vq
->vring
.desc
);
806 DPRINT(" vring_used at %p\n", vq
->vring
.used
);
807 DPRINT(" vring_avail at %p\n", vq
->vring
.avail
);
809 if (!(vq
->vring
.desc
&& vq
->vring
.used
&& vq
->vring
.avail
)) {
810 vu_panic(dev
, "Invalid vring_addr message");
814 vq
->used_idx
= vq
->vring
.used
->idx
;
816 if (vq
->last_avail_idx
!= vq
->used_idx
) {
817 bool resume
= dev
->iface
->queue_is_processed_in_order
&&
818 dev
->iface
->queue_is_processed_in_order(dev
, index
);
820 DPRINT("Last avail index != used index: %u != %u%s\n",
821 vq
->last_avail_idx
, vq
->used_idx
,
822 resume
? ", resuming" : "");
825 vq
->shadow_avail_idx
= vq
->last_avail_idx
= vq
->used_idx
;
833 vu_set_vring_base_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
835 unsigned int index
= vmsg
->payload
.state
.index
;
836 unsigned int num
= vmsg
->payload
.state
.num
;
838 DPRINT("State.index: %d\n", index
);
839 DPRINT("State.num: %d\n", num
);
840 dev
->vq
[index
].shadow_avail_idx
= dev
->vq
[index
].last_avail_idx
= num
;
846 vu_get_vring_base_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
848 unsigned int index
= vmsg
->payload
.state
.index
;
850 DPRINT("State.index: %d\n", index
);
851 vmsg
->payload
.state
.num
= dev
->vq
[index
].last_avail_idx
;
852 vmsg
->size
= sizeof(vmsg
->payload
.state
);
854 dev
->vq
[index
].started
= false;
855 if (dev
->iface
->queue_set_started
) {
856 dev
->iface
->queue_set_started(dev
, index
, false);
859 if (dev
->vq
[index
].call_fd
!= -1) {
860 close(dev
->vq
[index
].call_fd
);
861 dev
->vq
[index
].call_fd
= -1;
863 if (dev
->vq
[index
].kick_fd
!= -1) {
864 dev
->remove_watch(dev
, dev
->vq
[index
].kick_fd
);
865 close(dev
->vq
[index
].kick_fd
);
866 dev
->vq
[index
].kick_fd
= -1;
873 vu_check_queue_msg_file(VuDev
*dev
, VhostUserMsg
*vmsg
)
875 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
877 if (index
>= VHOST_MAX_NR_VIRTQUEUE
) {
878 vmsg_close_fds(vmsg
);
879 vu_panic(dev
, "Invalid queue index: %u", index
);
883 if (vmsg
->payload
.u64
& VHOST_USER_VRING_NOFD_MASK
||
885 vmsg_close_fds(vmsg
);
886 vu_panic(dev
, "Invalid fds in request: %d", vmsg
->request
);
894 vu_set_vring_kick_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
896 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
898 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
900 if (!vu_check_queue_msg_file(dev
, vmsg
)) {
904 if (dev
->vq
[index
].kick_fd
!= -1) {
905 dev
->remove_watch(dev
, dev
->vq
[index
].kick_fd
);
906 close(dev
->vq
[index
].kick_fd
);
907 dev
->vq
[index
].kick_fd
= -1;
910 if (!(vmsg
->payload
.u64
& VHOST_USER_VRING_NOFD_MASK
)) {
911 dev
->vq
[index
].kick_fd
= vmsg
->fds
[0];
912 DPRINT("Got kick_fd: %d for vq: %d\n", vmsg
->fds
[0], index
);
915 dev
->vq
[index
].started
= true;
916 if (dev
->iface
->queue_set_started
) {
917 dev
->iface
->queue_set_started(dev
, index
, true);
920 if (dev
->vq
[index
].kick_fd
!= -1 && dev
->vq
[index
].handler
) {
921 dev
->set_watch(dev
, dev
->vq
[index
].kick_fd
, VU_WATCH_IN
,
922 vu_kick_cb
, (void *)(long)index
);
924 DPRINT("Waiting for kicks on fd: %d for vq: %d\n",
925 dev
->vq
[index
].kick_fd
, index
);
931 void vu_set_queue_handler(VuDev
*dev
, VuVirtq
*vq
,
932 vu_queue_handler_cb handler
)
934 int qidx
= vq
- dev
->vq
;
936 vq
->handler
= handler
;
937 if (vq
->kick_fd
>= 0) {
939 dev
->set_watch(dev
, vq
->kick_fd
, VU_WATCH_IN
,
940 vu_kick_cb
, (void *)(long)qidx
);
942 dev
->remove_watch(dev
, vq
->kick_fd
);
947 bool vu_set_queue_host_notifier(VuDev
*dev
, VuVirtq
*vq
, int fd
,
948 int size
, int offset
)
950 int qidx
= vq
- dev
->vq
;
952 VhostUserMsg vmsg
= {
953 .request
= VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG
,
954 .flags
= VHOST_USER_VERSION
| VHOST_USER_NEED_REPLY_MASK
,
955 .size
= sizeof(vmsg
.payload
.area
),
957 .u64
= qidx
& VHOST_USER_VRING_IDX_MASK
,
964 vmsg
.payload
.area
.u64
|= VHOST_USER_VRING_NOFD_MASK
;
966 vmsg
.fds
[fd_num
++] = fd
;
969 vmsg
.fd_num
= fd_num
;
971 if ((dev
->protocol_features
& VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD
) == 0) {
975 if (!vu_message_write(dev
, dev
->slave_fd
, &vmsg
)) {
979 return vu_process_message_reply(dev
, &vmsg
);
983 vu_set_vring_call_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
985 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
987 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
989 if (!vu_check_queue_msg_file(dev
, vmsg
)) {
993 if (dev
->vq
[index
].call_fd
!= -1) {
994 close(dev
->vq
[index
].call_fd
);
995 dev
->vq
[index
].call_fd
= -1;
998 if (!(vmsg
->payload
.u64
& VHOST_USER_VRING_NOFD_MASK
)) {
999 dev
->vq
[index
].call_fd
= vmsg
->fds
[0];
1002 DPRINT("Got call_fd: %d for vq: %d\n", vmsg
->fds
[0], index
);
1008 vu_set_vring_err_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1010 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
1012 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
1014 if (!vu_check_queue_msg_file(dev
, vmsg
)) {
1018 if (dev
->vq
[index
].err_fd
!= -1) {
1019 close(dev
->vq
[index
].err_fd
);
1020 dev
->vq
[index
].err_fd
= -1;
1023 if (!(vmsg
->payload
.u64
& VHOST_USER_VRING_NOFD_MASK
)) {
1024 dev
->vq
[index
].err_fd
= vmsg
->fds
[0];
1031 vu_get_protocol_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1033 uint64_t features
= 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD
|
1034 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ
|
1035 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER
|
1036 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD
;
1038 if (have_userfault()) {
1039 features
|= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT
;
1042 if (dev
->iface
->get_protocol_features
) {
1043 features
|= dev
->iface
->get_protocol_features(dev
);
1046 vmsg
->payload
.u64
= features
;
1047 vmsg
->size
= sizeof(vmsg
->payload
.u64
);
1054 vu_set_protocol_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1056 uint64_t features
= vmsg
->payload
.u64
;
1058 DPRINT("u64: 0x%016"PRIx64
"\n", features
);
1060 dev
->protocol_features
= vmsg
->payload
.u64
;
1062 if (dev
->iface
->set_protocol_features
) {
1063 dev
->iface
->set_protocol_features(dev
, features
);
1070 vu_get_queue_num_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1072 DPRINT("Function %s() not implemented yet.\n", __func__
);
1077 vu_set_vring_enable_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1079 unsigned int index
= vmsg
->payload
.state
.index
;
1080 unsigned int enable
= vmsg
->payload
.state
.num
;
1082 DPRINT("State.index: %d\n", index
);
1083 DPRINT("State.enable: %d\n", enable
);
1085 if (index
>= VHOST_MAX_NR_VIRTQUEUE
) {
1086 vu_panic(dev
, "Invalid vring_enable index: %u", index
);
1090 dev
->vq
[index
].enable
= enable
;
1095 vu_set_slave_req_fd(VuDev
*dev
, VhostUserMsg
*vmsg
)
1097 if (vmsg
->fd_num
!= 1) {
1098 vu_panic(dev
, "Invalid slave_req_fd message (%d fd's)", vmsg
->fd_num
);
1102 if (dev
->slave_fd
!= -1) {
1103 close(dev
->slave_fd
);
1105 dev
->slave_fd
= vmsg
->fds
[0];
1106 DPRINT("Got slave_fd: %d\n", vmsg
->fds
[0]);
1112 vu_get_config(VuDev
*dev
, VhostUserMsg
*vmsg
)
1116 if (dev
->iface
->get_config
) {
1117 ret
= dev
->iface
->get_config(dev
, vmsg
->payload
.config
.region
,
1118 vmsg
->payload
.config
.size
);
1122 /* resize to zero to indicate an error to master */
1130 vu_set_config(VuDev
*dev
, VhostUserMsg
*vmsg
)
1134 if (dev
->iface
->set_config
) {
1135 ret
= dev
->iface
->set_config(dev
, vmsg
->payload
.config
.region
,
1136 vmsg
->payload
.config
.offset
,
1137 vmsg
->payload
.config
.size
,
1138 vmsg
->payload
.config
.flags
);
1140 vu_panic(dev
, "Set virtio configuration space failed");
1148 vu_set_postcopy_advise(VuDev
*dev
, VhostUserMsg
*vmsg
)
1150 dev
->postcopy_ufd
= -1;
1152 struct uffdio_api api_struct
;
1154 dev
->postcopy_ufd
= syscall(__NR_userfaultfd
, O_CLOEXEC
| O_NONBLOCK
);
1158 if (dev
->postcopy_ufd
== -1) {
1159 vu_panic(dev
, "Userfaultfd not available: %s", strerror(errno
));
1164 api_struct
.api
= UFFD_API
;
1165 api_struct
.features
= 0;
1166 if (ioctl(dev
->postcopy_ufd
, UFFDIO_API
, &api_struct
)) {
1167 vu_panic(dev
, "Failed UFFDIO_API: %s", strerror(errno
));
1168 close(dev
->postcopy_ufd
);
1169 dev
->postcopy_ufd
= -1;
1172 /* TODO: Stash feature flags somewhere */
1176 /* Return a ufd to the QEMU */
1178 vmsg
->fds
[0] = dev
->postcopy_ufd
;
1179 return true; /* = send a reply */
1183 vu_set_postcopy_listen(VuDev
*dev
, VhostUserMsg
*vmsg
)
1185 vmsg
->payload
.u64
= -1;
1186 vmsg
->size
= sizeof(vmsg
->payload
.u64
);
1188 if (dev
->nregions
) {
1189 vu_panic(dev
, "Regions already registered at postcopy-listen");
1192 dev
->postcopy_listening
= true;
1194 vmsg
->flags
= VHOST_USER_VERSION
| VHOST_USER_REPLY_MASK
;
1195 vmsg
->payload
.u64
= 0; /* Success */
1200 vu_set_postcopy_end(VuDev
*dev
, VhostUserMsg
*vmsg
)
1202 DPRINT("%s: Entry\n", __func__
);
1203 dev
->postcopy_listening
= false;
1204 if (dev
->postcopy_ufd
> 0) {
1205 close(dev
->postcopy_ufd
);
1206 dev
->postcopy_ufd
= -1;
1207 DPRINT("%s: Done close\n", __func__
);
1211 vmsg
->payload
.u64
= 0;
1212 vmsg
->size
= sizeof(vmsg
->payload
.u64
);
1213 vmsg
->flags
= VHOST_USER_VERSION
| VHOST_USER_REPLY_MASK
;
1214 DPRINT("%s: exit\n", __func__
);
1219 vu_process_message(VuDev
*dev
, VhostUserMsg
*vmsg
)
1223 /* Print out generic part of the request. */
1224 DPRINT("================ Vhost user message ================\n");
1225 DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg
->request
),
1227 DPRINT("Flags: 0x%x\n", vmsg
->flags
);
1228 DPRINT("Size: %d\n", vmsg
->size
);
1233 for (i
= 0; i
< vmsg
->fd_num
; i
++) {
1234 DPRINT(" %d", vmsg
->fds
[i
]);
1239 if (dev
->iface
->process_msg
&&
1240 dev
->iface
->process_msg(dev
, vmsg
, &do_reply
)) {
1244 switch (vmsg
->request
) {
1245 case VHOST_USER_GET_FEATURES
:
1246 return vu_get_features_exec(dev
, vmsg
);
1247 case VHOST_USER_SET_FEATURES
:
1248 return vu_set_features_exec(dev
, vmsg
);
1249 case VHOST_USER_GET_PROTOCOL_FEATURES
:
1250 return vu_get_protocol_features_exec(dev
, vmsg
);
1251 case VHOST_USER_SET_PROTOCOL_FEATURES
:
1252 return vu_set_protocol_features_exec(dev
, vmsg
);
1253 case VHOST_USER_SET_OWNER
:
1254 return vu_set_owner_exec(dev
, vmsg
);
1255 case VHOST_USER_RESET_OWNER
:
1256 return vu_reset_device_exec(dev
, vmsg
);
1257 case VHOST_USER_SET_MEM_TABLE
:
1258 return vu_set_mem_table_exec(dev
, vmsg
);
1259 case VHOST_USER_SET_LOG_BASE
:
1260 return vu_set_log_base_exec(dev
, vmsg
);
1261 case VHOST_USER_SET_LOG_FD
:
1262 return vu_set_log_fd_exec(dev
, vmsg
);
1263 case VHOST_USER_SET_VRING_NUM
:
1264 return vu_set_vring_num_exec(dev
, vmsg
);
1265 case VHOST_USER_SET_VRING_ADDR
:
1266 return vu_set_vring_addr_exec(dev
, vmsg
);
1267 case VHOST_USER_SET_VRING_BASE
:
1268 return vu_set_vring_base_exec(dev
, vmsg
);
1269 case VHOST_USER_GET_VRING_BASE
:
1270 return vu_get_vring_base_exec(dev
, vmsg
);
1271 case VHOST_USER_SET_VRING_KICK
:
1272 return vu_set_vring_kick_exec(dev
, vmsg
);
1273 case VHOST_USER_SET_VRING_CALL
:
1274 return vu_set_vring_call_exec(dev
, vmsg
);
1275 case VHOST_USER_SET_VRING_ERR
:
1276 return vu_set_vring_err_exec(dev
, vmsg
);
1277 case VHOST_USER_GET_QUEUE_NUM
:
1278 return vu_get_queue_num_exec(dev
, vmsg
);
1279 case VHOST_USER_SET_VRING_ENABLE
:
1280 return vu_set_vring_enable_exec(dev
, vmsg
);
1281 case VHOST_USER_SET_SLAVE_REQ_FD
:
1282 return vu_set_slave_req_fd(dev
, vmsg
);
1283 case VHOST_USER_GET_CONFIG
:
1284 return vu_get_config(dev
, vmsg
);
1285 case VHOST_USER_SET_CONFIG
:
1286 return vu_set_config(dev
, vmsg
);
1287 case VHOST_USER_NONE
:
1289 case VHOST_USER_POSTCOPY_ADVISE
:
1290 return vu_set_postcopy_advise(dev
, vmsg
);
1291 case VHOST_USER_POSTCOPY_LISTEN
:
1292 return vu_set_postcopy_listen(dev
, vmsg
);
1293 case VHOST_USER_POSTCOPY_END
:
1294 return vu_set_postcopy_end(dev
, vmsg
);
1296 vmsg_close_fds(vmsg
);
1297 vu_panic(dev
, "Unhandled request: %d", vmsg
->request
);
1304 vu_dispatch(VuDev
*dev
)
1306 VhostUserMsg vmsg
= { 0, };
1307 int reply_requested
;
1308 bool success
= false;
1310 if (!vu_message_read(dev
, dev
->sock
, &vmsg
)) {
1314 reply_requested
= vu_process_message(dev
, &vmsg
);
1315 if (!reply_requested
) {
1320 if (!vu_send_reply(dev
, dev
->sock
, &vmsg
)) {
1332 vu_deinit(VuDev
*dev
)
1336 for (i
= 0; i
< dev
->nregions
; i
++) {
1337 VuDevRegion
*r
= &dev
->regions
[i
];
1338 void *m
= (void *) (uintptr_t) r
->mmap_addr
;
1339 if (m
!= MAP_FAILED
) {
1340 munmap(m
, r
->size
+ r
->mmap_offset
);
1345 for (i
= 0; i
< VHOST_MAX_NR_VIRTQUEUE
; i
++) {
1346 VuVirtq
*vq
= &dev
->vq
[i
];
1348 if (vq
->call_fd
!= -1) {
1353 if (vq
->kick_fd
!= -1) {
1358 if (vq
->err_fd
!= -1) {
1366 if (dev
->slave_fd
!= -1) {
1367 close(dev
->slave_fd
);
1371 if (dev
->sock
!= -1) {
1380 vu_set_watch_cb set_watch
,
1381 vu_remove_watch_cb remove_watch
,
1382 const VuDevIface
*iface
)
1386 assert(socket
>= 0);
1388 assert(remove_watch
);
1392 memset(dev
, 0, sizeof(*dev
));
1396 dev
->set_watch
= set_watch
;
1397 dev
->remove_watch
= remove_watch
;
1399 dev
->log_call_fd
= -1;
1401 for (i
= 0; i
< VHOST_MAX_NR_VIRTQUEUE
; i
++) {
1402 dev
->vq
[i
] = (VuVirtq
) {
1403 .call_fd
= -1, .kick_fd
= -1, .err_fd
= -1,
1404 .notification
= true,
1410 vu_get_queue(VuDev
*dev
, int qidx
)
1412 assert(qidx
< VHOST_MAX_NR_VIRTQUEUE
);
1413 return &dev
->vq
[qidx
];
1417 vu_queue_enabled(VuDev
*dev
, VuVirtq
*vq
)
1423 vu_queue_started(const VuDev
*dev
, const VuVirtq
*vq
)
1428 static inline uint16_t
1429 vring_avail_flags(VuVirtq
*vq
)
1431 return vq
->vring
.avail
->flags
;
1434 static inline uint16_t
1435 vring_avail_idx(VuVirtq
*vq
)
1437 vq
->shadow_avail_idx
= vq
->vring
.avail
->idx
;
1439 return vq
->shadow_avail_idx
;
1442 static inline uint16_t
1443 vring_avail_ring(VuVirtq
*vq
, int i
)
1445 return vq
->vring
.avail
->ring
[i
];
1448 static inline uint16_t
1449 vring_get_used_event(VuVirtq
*vq
)
1451 return vring_avail_ring(vq
, vq
->vring
.num
);
1455 virtqueue_num_heads(VuDev
*dev
, VuVirtq
*vq
, unsigned int idx
)
1457 uint16_t num_heads
= vring_avail_idx(vq
) - idx
;
1459 /* Check it isn't doing very strange things with descriptor numbers. */
1460 if (num_heads
> vq
->vring
.num
) {
1461 vu_panic(dev
, "Guest moved used index from %u to %u",
1462 idx
, vq
->shadow_avail_idx
);
1466 /* On success, callers read a descriptor at vq->last_avail_idx.
1467 * Make sure descriptor read does not bypass avail index read. */
1475 virtqueue_get_head(VuDev
*dev
, VuVirtq
*vq
,
1476 unsigned int idx
, unsigned int *head
)
1478 /* Grab the next descriptor number they're advertising, and increment
1479 * the index we've seen. */
1480 *head
= vring_avail_ring(vq
, idx
% vq
->vring
.num
);
1482 /* If their number is silly, that's a fatal mistake. */
1483 if (*head
>= vq
->vring
.num
) {
1484 vu_panic(dev
, "Guest says index %u is available", head
);
1492 virtqueue_read_indirect_desc(VuDev
*dev
, struct vring_desc
*desc
,
1493 uint64_t addr
, size_t len
)
1495 struct vring_desc
*ori_desc
;
1498 if (len
> (VIRTQUEUE_MAX_SIZE
* sizeof(struct vring_desc
))) {
1508 ori_desc
= vu_gpa_to_va(dev
, &read_len
, addr
);
1513 memcpy(desc
, ori_desc
, read_len
);
1523 VIRTQUEUE_READ_DESC_ERROR
= -1,
1524 VIRTQUEUE_READ_DESC_DONE
= 0, /* end of chain */
1525 VIRTQUEUE_READ_DESC_MORE
= 1, /* more buffers in chain */
1529 virtqueue_read_next_desc(VuDev
*dev
, struct vring_desc
*desc
,
1530 int i
, unsigned int max
, unsigned int *next
)
1532 /* If this descriptor says it doesn't chain, we're done. */
1533 if (!(desc
[i
].flags
& VRING_DESC_F_NEXT
)) {
1534 return VIRTQUEUE_READ_DESC_DONE
;
1537 /* Check they're not leading us off end of descriptors. */
1538 *next
= desc
[i
].next
;
1539 /* Make sure compiler knows to grab that: we don't want it changing! */
1543 vu_panic(dev
, "Desc next is %u", next
);
1544 return VIRTQUEUE_READ_DESC_ERROR
;
1547 return VIRTQUEUE_READ_DESC_MORE
;
1551 vu_queue_get_avail_bytes(VuDev
*dev
, VuVirtq
*vq
, unsigned int *in_bytes
,
1552 unsigned int *out_bytes
,
1553 unsigned max_in_bytes
, unsigned max_out_bytes
)
1556 unsigned int total_bufs
, in_total
, out_total
;
1559 idx
= vq
->last_avail_idx
;
1561 total_bufs
= in_total
= out_total
= 0;
1562 if (unlikely(dev
->broken
) ||
1563 unlikely(!vq
->vring
.avail
)) {
1567 while ((rc
= virtqueue_num_heads(dev
, vq
, idx
)) > 0) {
1568 unsigned int max
, desc_len
, num_bufs
, indirect
= 0;
1569 uint64_t desc_addr
, read_len
;
1570 struct vring_desc
*desc
;
1571 struct vring_desc desc_buf
[VIRTQUEUE_MAX_SIZE
];
1574 max
= vq
->vring
.num
;
1575 num_bufs
= total_bufs
;
1576 if (!virtqueue_get_head(dev
, vq
, idx
++, &i
)) {
1579 desc
= vq
->vring
.desc
;
1581 if (desc
[i
].flags
& VRING_DESC_F_INDIRECT
) {
1582 if (desc
[i
].len
% sizeof(struct vring_desc
)) {
1583 vu_panic(dev
, "Invalid size for indirect buffer table");
1587 /* If we've got too many, that implies a descriptor loop. */
1588 if (num_bufs
>= max
) {
1589 vu_panic(dev
, "Looped descriptor");
1593 /* loop over the indirect descriptor table */
1595 desc_addr
= desc
[i
].addr
;
1596 desc_len
= desc
[i
].len
;
1597 max
= desc_len
/ sizeof(struct vring_desc
);
1598 read_len
= desc_len
;
1599 desc
= vu_gpa_to_va(dev
, &read_len
, desc_addr
);
1600 if (unlikely(desc
&& read_len
!= desc_len
)) {
1601 /* Failed to use zero copy */
1603 if (!virtqueue_read_indirect_desc(dev
, desc_buf
,
1610 vu_panic(dev
, "Invalid indirect buffer table");
1617 /* If we've got too many, that implies a descriptor loop. */
1618 if (++num_bufs
> max
) {
1619 vu_panic(dev
, "Looped descriptor");
1623 if (desc
[i
].flags
& VRING_DESC_F_WRITE
) {
1624 in_total
+= desc
[i
].len
;
1626 out_total
+= desc
[i
].len
;
1628 if (in_total
>= max_in_bytes
&& out_total
>= max_out_bytes
) {
1631 rc
= virtqueue_read_next_desc(dev
, desc
, i
, max
, &i
);
1632 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1634 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
1639 total_bufs
= num_bufs
;
1649 *in_bytes
= in_total
;
1652 *out_bytes
= out_total
;
1657 in_total
= out_total
= 0;
1662 vu_queue_avail_bytes(VuDev
*dev
, VuVirtq
*vq
, unsigned int in_bytes
,
1663 unsigned int out_bytes
)
1665 unsigned int in_total
, out_total
;
1667 vu_queue_get_avail_bytes(dev
, vq
, &in_total
, &out_total
,
1668 in_bytes
, out_bytes
);
1670 return in_bytes
<= in_total
&& out_bytes
<= out_total
;
1673 /* Fetch avail_idx from VQ memory only when we really need to know if
1674 * guest has added some buffers. */
1676 vu_queue_empty(VuDev
*dev
, VuVirtq
*vq
)
1678 if (unlikely(dev
->broken
) ||
1679 unlikely(!vq
->vring
.avail
)) {
1683 if (vq
->shadow_avail_idx
!= vq
->last_avail_idx
) {
1687 return vring_avail_idx(vq
) == vq
->last_avail_idx
;
1691 bool has_feature(uint64_t features
, unsigned int fbit
)
1694 return !!(features
& (1ULL << fbit
));
1698 bool vu_has_feature(VuDev
*dev
,
1701 return has_feature(dev
->features
, fbit
);
1705 vring_notify(VuDev
*dev
, VuVirtq
*vq
)
1710 /* We need to expose used array entries before checking used event. */
1713 /* Always notify when queue is empty (when feature acknowledge) */
1714 if (vu_has_feature(dev
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
1715 !vq
->inuse
&& vu_queue_empty(dev
, vq
)) {
1719 if (!vu_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
1720 return !(vring_avail_flags(vq
) & VRING_AVAIL_F_NO_INTERRUPT
);
1723 v
= vq
->signalled_used_valid
;
1724 vq
->signalled_used_valid
= true;
1725 old
= vq
->signalled_used
;
1726 new = vq
->signalled_used
= vq
->used_idx
;
1727 return !v
|| vring_need_event(vring_get_used_event(vq
), new, old
);
1731 vu_queue_notify(VuDev
*dev
, VuVirtq
*vq
)
1733 if (unlikely(dev
->broken
) ||
1734 unlikely(!vq
->vring
.avail
)) {
1738 if (!vring_notify(dev
, vq
)) {
1739 DPRINT("skipped notify...\n");
1743 if (eventfd_write(vq
->call_fd
, 1) < 0) {
1744 vu_panic(dev
, "Error writing eventfd: %s", strerror(errno
));
1749 vring_used_flags_set_bit(VuVirtq
*vq
, int mask
)
1753 flags
= (uint16_t *)((char*)vq
->vring
.used
+
1754 offsetof(struct vring_used
, flags
));
1759 vring_used_flags_unset_bit(VuVirtq
*vq
, int mask
)
1763 flags
= (uint16_t *)((char*)vq
->vring
.used
+
1764 offsetof(struct vring_used
, flags
));
1769 vring_set_avail_event(VuVirtq
*vq
, uint16_t val
)
1771 if (!vq
->notification
) {
1775 *((uint16_t *) &vq
->vring
.used
->ring
[vq
->vring
.num
]) = val
;
1779 vu_queue_set_notification(VuDev
*dev
, VuVirtq
*vq
, int enable
)
1781 vq
->notification
= enable
;
1782 if (vu_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
1783 vring_set_avail_event(vq
, vring_avail_idx(vq
));
1784 } else if (enable
) {
1785 vring_used_flags_unset_bit(vq
, VRING_USED_F_NO_NOTIFY
);
1787 vring_used_flags_set_bit(vq
, VRING_USED_F_NO_NOTIFY
);
1790 /* Expose avail event/used flags before caller checks the avail idx. */
1796 virtqueue_map_desc(VuDev
*dev
,
1797 unsigned int *p_num_sg
, struct iovec
*iov
,
1798 unsigned int max_num_sg
, bool is_write
,
1799 uint64_t pa
, size_t sz
)
1801 unsigned num_sg
= *p_num_sg
;
1803 assert(num_sg
<= max_num_sg
);
1806 vu_panic(dev
, "virtio: zero sized buffers are not allowed");
1813 if (num_sg
== max_num_sg
) {
1814 vu_panic(dev
, "virtio: too many descriptors in indirect table");
1818 iov
[num_sg
].iov_base
= vu_gpa_to_va(dev
, &len
, pa
);
1819 if (iov
[num_sg
].iov_base
== NULL
) {
1820 vu_panic(dev
, "virtio: invalid address for buffers");
1823 iov
[num_sg
].iov_len
= len
;
1832 /* Round number down to multiple */
1833 #define ALIGN_DOWN(n, m) ((n) / (m) * (m))
1835 /* Round number up to multiple */
1836 #define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m))
1839 virtqueue_alloc_element(size_t sz
,
1840 unsigned out_num
, unsigned in_num
)
1842 VuVirtqElement
*elem
;
1843 size_t in_sg_ofs
= ALIGN_UP(sz
, __alignof__(elem
->in_sg
[0]));
1844 size_t out_sg_ofs
= in_sg_ofs
+ in_num
* sizeof(elem
->in_sg
[0]);
1845 size_t out_sg_end
= out_sg_ofs
+ out_num
* sizeof(elem
->out_sg
[0]);
1847 assert(sz
>= sizeof(VuVirtqElement
));
1848 elem
= malloc(out_sg_end
);
1849 elem
->out_num
= out_num
;
1850 elem
->in_num
= in_num
;
1851 elem
->in_sg
= (void *)elem
+ in_sg_ofs
;
1852 elem
->out_sg
= (void *)elem
+ out_sg_ofs
;
1857 vu_queue_pop(VuDev
*dev
, VuVirtq
*vq
, size_t sz
)
1859 unsigned int i
, head
, max
, desc_len
;
1860 uint64_t desc_addr
, read_len
;
1861 VuVirtqElement
*elem
;
1862 unsigned out_num
, in_num
;
1863 struct iovec iov
[VIRTQUEUE_MAX_SIZE
];
1864 struct vring_desc desc_buf
[VIRTQUEUE_MAX_SIZE
];
1865 struct vring_desc
*desc
;
1868 if (unlikely(dev
->broken
) ||
1869 unlikely(!vq
->vring
.avail
)) {
1873 if (vu_queue_empty(dev
, vq
)) {
1876 /* Needed after virtio_queue_empty(), see comment in
1877 * virtqueue_num_heads(). */
1880 /* When we start there are none of either input nor output. */
1881 out_num
= in_num
= 0;
1883 max
= vq
->vring
.num
;
1884 if (vq
->inuse
>= vq
->vring
.num
) {
1885 vu_panic(dev
, "Virtqueue size exceeded");
1889 if (!virtqueue_get_head(dev
, vq
, vq
->last_avail_idx
++, &head
)) {
1893 if (vu_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
1894 vring_set_avail_event(vq
, vq
->last_avail_idx
);
1898 desc
= vq
->vring
.desc
;
1899 if (desc
[i
].flags
& VRING_DESC_F_INDIRECT
) {
1900 if (desc
[i
].len
% sizeof(struct vring_desc
)) {
1901 vu_panic(dev
, "Invalid size for indirect buffer table");
1904 /* loop over the indirect descriptor table */
1905 desc_addr
= desc
[i
].addr
;
1906 desc_len
= desc
[i
].len
;
1907 max
= desc_len
/ sizeof(struct vring_desc
);
1908 read_len
= desc_len
;
1909 desc
= vu_gpa_to_va(dev
, &read_len
, desc_addr
);
1910 if (unlikely(desc
&& read_len
!= desc_len
)) {
1911 /* Failed to use zero copy */
1913 if (!virtqueue_read_indirect_desc(dev
, desc_buf
,
1920 vu_panic(dev
, "Invalid indirect buffer table");
1926 /* Collect all the descriptors */
1928 if (desc
[i
].flags
& VRING_DESC_F_WRITE
) {
1929 virtqueue_map_desc(dev
, &in_num
, iov
+ out_num
,
1930 VIRTQUEUE_MAX_SIZE
- out_num
, true,
1931 desc
[i
].addr
, desc
[i
].len
);
1934 vu_panic(dev
, "Incorrect order for descriptors");
1937 virtqueue_map_desc(dev
, &out_num
, iov
,
1938 VIRTQUEUE_MAX_SIZE
, false,
1939 desc
[i
].addr
, desc
[i
].len
);
1942 /* If we've got too many, that implies a descriptor loop. */
1943 if ((in_num
+ out_num
) > max
) {
1944 vu_panic(dev
, "Looped descriptor");
1946 rc
= virtqueue_read_next_desc(dev
, desc
, i
, max
, &i
);
1947 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
1949 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
1953 /* Now copy what we have collected and mapped */
1954 elem
= virtqueue_alloc_element(sz
, out_num
, in_num
);
1956 for (i
= 0; i
< out_num
; i
++) {
1957 elem
->out_sg
[i
] = iov
[i
];
1959 for (i
= 0; i
< in_num
; i
++) {
1960 elem
->in_sg
[i
] = iov
[out_num
+ i
];
1969 vu_queue_rewind(VuDev
*dev
, VuVirtq
*vq
, unsigned int num
)
1971 if (num
> vq
->inuse
) {
1974 vq
->last_avail_idx
-= num
;
1980 void vring_used_write(VuDev
*dev
, VuVirtq
*vq
,
1981 struct vring_used_elem
*uelem
, int i
)
1983 struct vring_used
*used
= vq
->vring
.used
;
1985 used
->ring
[i
] = *uelem
;
1986 vu_log_write(dev
, vq
->vring
.log_guest_addr
+
1987 offsetof(struct vring_used
, ring
[i
]),
1988 sizeof(used
->ring
[i
]));
1993 vu_log_queue_fill(VuDev
*dev
, VuVirtq
*vq
,
1994 const VuVirtqElement
*elem
,
1997 struct vring_desc
*desc
= vq
->vring
.desc
;
1998 unsigned int i
, max
, min
, desc_len
;
1999 uint64_t desc_addr
, read_len
;
2000 struct vring_desc desc_buf
[VIRTQUEUE_MAX_SIZE
];
2001 unsigned num_bufs
= 0;
2003 max
= vq
->vring
.num
;
2006 if (desc
[i
].flags
& VRING_DESC_F_INDIRECT
) {
2007 if (desc
[i
].len
% sizeof(struct vring_desc
)) {
2008 vu_panic(dev
, "Invalid size for indirect buffer table");
2011 /* loop over the indirect descriptor table */
2012 desc_addr
= desc
[i
].addr
;
2013 desc_len
= desc
[i
].len
;
2014 max
= desc_len
/ sizeof(struct vring_desc
);
2015 read_len
= desc_len
;
2016 desc
= vu_gpa_to_va(dev
, &read_len
, desc_addr
);
2017 if (unlikely(desc
&& read_len
!= desc_len
)) {
2018 /* Failed to use zero copy */
2020 if (!virtqueue_read_indirect_desc(dev
, desc_buf
,
2027 vu_panic(dev
, "Invalid indirect buffer table");
2034 if (++num_bufs
> max
) {
2035 vu_panic(dev
, "Looped descriptor");
2039 if (desc
[i
].flags
& VRING_DESC_F_WRITE
) {
2040 min
= MIN(desc
[i
].len
, len
);
2041 vu_log_write(dev
, desc
[i
].addr
, min
);
2046 (virtqueue_read_next_desc(dev
, desc
, i
, max
, &i
)
2047 == VIRTQUEUE_READ_DESC_MORE
));
2051 vu_queue_fill(VuDev
*dev
, VuVirtq
*vq
,
2052 const VuVirtqElement
*elem
,
2053 unsigned int len
, unsigned int idx
)
2055 struct vring_used_elem uelem
;
2057 if (unlikely(dev
->broken
) ||
2058 unlikely(!vq
->vring
.avail
)) {
2062 vu_log_queue_fill(dev
, vq
, elem
, len
);
2064 idx
= (idx
+ vq
->used_idx
) % vq
->vring
.num
;
2066 uelem
.id
= elem
->index
;
2068 vring_used_write(dev
, vq
, &uelem
, idx
);
2072 void vring_used_idx_set(VuDev
*dev
, VuVirtq
*vq
, uint16_t val
)
2074 vq
->vring
.used
->idx
= val
;
2076 vq
->vring
.log_guest_addr
+ offsetof(struct vring_used
, idx
),
2077 sizeof(vq
->vring
.used
->idx
));
2083 vu_queue_flush(VuDev
*dev
, VuVirtq
*vq
, unsigned int count
)
2087 if (unlikely(dev
->broken
) ||
2088 unlikely(!vq
->vring
.avail
)) {
2092 /* Make sure buffer is written before we update index. */
2097 vring_used_idx_set(dev
, vq
, new);
2099 if (unlikely((int16_t)(new - vq
->signalled_used
) < (uint16_t)(new - old
))) {
2100 vq
->signalled_used_valid
= false;
2105 vu_queue_push(VuDev
*dev
, VuVirtq
*vq
,
2106 const VuVirtqElement
*elem
, unsigned int len
)
2108 vu_queue_fill(dev
, vq
, elem
, len
, 0);
2109 vu_queue_flush(dev
, vq
, 1);