4 * Copyright (c) 2013 Virtual Open Systems Sarl.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
12 #include "qapi/error.h"
13 #include "hw/virtio/vhost.h"
14 #include "hw/virtio/vhost-user.h"
15 #include "hw/virtio/vhost-backend.h"
16 #include "hw/virtio/virtio.h"
17 #include "hw/virtio/virtio-net.h"
18 #include "chardev/char-fe.h"
19 #include "io/channel-socket.h"
20 #include "sysemu/kvm.h"
21 #include "qemu/error-report.h"
22 #include "qemu/main-loop.h"
23 #include "qemu/sockets.h"
24 #include "sysemu/cryptodev.h"
25 #include "migration/migration.h"
26 #include "migration/postcopy-ram.h"
29 #include <sys/ioctl.h>
30 #include <sys/socket.h>
33 #include "standard-headers/linux/vhost_types.h"
36 #include <linux/userfaultfd.h>
39 #define VHOST_MEMORY_BASELINE_NREGIONS 8
40 #define VHOST_USER_F_PROTOCOL_FEATURES 30
41 #define VHOST_USER_SLAVE_MAX_FDS 8
44 * Set maximum number of RAM slots supported to
45 * the maximum number supported by the target
48 #if defined(TARGET_X86) || defined(TARGET_X86_64) || \
49 defined(TARGET_ARM) || defined(TARGET_ARM_64)
50 #include "hw/acpi/acpi.h"
51 #define VHOST_USER_MAX_RAM_SLOTS ACPI_MAX_RAM_SLOTS
53 #elif defined(TARGET_PPC) || defined(TARGET_PPC_64)
54 #include "hw/ppc/spapr.h"
55 #define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS
58 #define VHOST_USER_MAX_RAM_SLOTS 512
62 * Maximum size of virtio device config space
64 #define VHOST_USER_MAX_CONFIG_SIZE 256
66 enum VhostUserProtocolFeature
{
67 VHOST_USER_PROTOCOL_F_MQ
= 0,
68 VHOST_USER_PROTOCOL_F_LOG_SHMFD
= 1,
69 VHOST_USER_PROTOCOL_F_RARP
= 2,
70 VHOST_USER_PROTOCOL_F_REPLY_ACK
= 3,
71 VHOST_USER_PROTOCOL_F_NET_MTU
= 4,
72 VHOST_USER_PROTOCOL_F_SLAVE_REQ
= 5,
73 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN
= 6,
74 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION
= 7,
75 VHOST_USER_PROTOCOL_F_PAGEFAULT
= 8,
76 VHOST_USER_PROTOCOL_F_CONFIG
= 9,
77 VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD
= 10,
78 VHOST_USER_PROTOCOL_F_HOST_NOTIFIER
= 11,
79 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
= 12,
80 VHOST_USER_PROTOCOL_F_RESET_DEVICE
= 13,
81 /* Feature 14 reserved for VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. */
82 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS
= 15,
83 VHOST_USER_PROTOCOL_F_MAX
86 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
88 typedef enum VhostUserRequest
{
90 VHOST_USER_GET_FEATURES
= 1,
91 VHOST_USER_SET_FEATURES
= 2,
92 VHOST_USER_SET_OWNER
= 3,
93 VHOST_USER_RESET_OWNER
= 4,
94 VHOST_USER_SET_MEM_TABLE
= 5,
95 VHOST_USER_SET_LOG_BASE
= 6,
96 VHOST_USER_SET_LOG_FD
= 7,
97 VHOST_USER_SET_VRING_NUM
= 8,
98 VHOST_USER_SET_VRING_ADDR
= 9,
99 VHOST_USER_SET_VRING_BASE
= 10,
100 VHOST_USER_GET_VRING_BASE
= 11,
101 VHOST_USER_SET_VRING_KICK
= 12,
102 VHOST_USER_SET_VRING_CALL
= 13,
103 VHOST_USER_SET_VRING_ERR
= 14,
104 VHOST_USER_GET_PROTOCOL_FEATURES
= 15,
105 VHOST_USER_SET_PROTOCOL_FEATURES
= 16,
106 VHOST_USER_GET_QUEUE_NUM
= 17,
107 VHOST_USER_SET_VRING_ENABLE
= 18,
108 VHOST_USER_SEND_RARP
= 19,
109 VHOST_USER_NET_SET_MTU
= 20,
110 VHOST_USER_SET_SLAVE_REQ_FD
= 21,
111 VHOST_USER_IOTLB_MSG
= 22,
112 VHOST_USER_SET_VRING_ENDIAN
= 23,
113 VHOST_USER_GET_CONFIG
= 24,
114 VHOST_USER_SET_CONFIG
= 25,
115 VHOST_USER_CREATE_CRYPTO_SESSION
= 26,
116 VHOST_USER_CLOSE_CRYPTO_SESSION
= 27,
117 VHOST_USER_POSTCOPY_ADVISE
= 28,
118 VHOST_USER_POSTCOPY_LISTEN
= 29,
119 VHOST_USER_POSTCOPY_END
= 30,
120 VHOST_USER_GET_INFLIGHT_FD
= 31,
121 VHOST_USER_SET_INFLIGHT_FD
= 32,
122 VHOST_USER_GPU_SET_SOCKET
= 33,
123 VHOST_USER_RESET_DEVICE
= 34,
124 /* Message number 35 reserved for VHOST_USER_VRING_KICK. */
125 VHOST_USER_GET_MAX_MEM_SLOTS
= 36,
126 VHOST_USER_ADD_MEM_REG
= 37,
127 VHOST_USER_REM_MEM_REG
= 38,
131 typedef enum VhostUserSlaveRequest
{
132 VHOST_USER_SLAVE_NONE
= 0,
133 VHOST_USER_SLAVE_IOTLB_MSG
= 1,
134 VHOST_USER_SLAVE_CONFIG_CHANGE_MSG
= 2,
135 VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG
= 3,
137 } VhostUserSlaveRequest
;
139 typedef struct VhostUserMemoryRegion
{
140 uint64_t guest_phys_addr
;
141 uint64_t memory_size
;
142 uint64_t userspace_addr
;
143 uint64_t mmap_offset
;
144 } VhostUserMemoryRegion
;
146 typedef struct VhostUserMemory
{
149 VhostUserMemoryRegion regions
[VHOST_MEMORY_BASELINE_NREGIONS
];
152 typedef struct VhostUserMemRegMsg
{
154 VhostUserMemoryRegion region
;
155 } VhostUserMemRegMsg
;
157 typedef struct VhostUserLog
{
159 uint64_t mmap_offset
;
162 typedef struct VhostUserConfig
{
166 uint8_t region
[VHOST_USER_MAX_CONFIG_SIZE
];
169 #define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN 512
170 #define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN 64
172 typedef struct VhostUserCryptoSession
{
173 /* session id for success, -1 on errors */
175 CryptoDevBackendSymSessionInfo session_setup_data
;
176 uint8_t key
[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN
];
177 uint8_t auth_key
[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN
];
178 } VhostUserCryptoSession
;
180 static VhostUserConfig c
__attribute__ ((unused
));
181 #define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
185 typedef struct VhostUserVringArea
{
189 } VhostUserVringArea
;
191 typedef struct VhostUserInflight
{
193 uint64_t mmap_offset
;
199 VhostUserRequest request
;
201 #define VHOST_USER_VERSION_MASK (0x3)
202 #define VHOST_USER_REPLY_MASK (0x1<<2)
203 #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
205 uint32_t size
; /* the following payload size */
206 } QEMU_PACKED VhostUserHeader
;
209 #define VHOST_USER_VRING_IDX_MASK (0xff)
210 #define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
212 struct vhost_vring_state state
;
213 struct vhost_vring_addr addr
;
214 VhostUserMemory memory
;
215 VhostUserMemRegMsg mem_reg
;
217 struct vhost_iotlb_msg iotlb
;
218 VhostUserConfig config
;
219 VhostUserCryptoSession session
;
220 VhostUserVringArea area
;
221 VhostUserInflight inflight
;
224 typedef struct VhostUserMsg
{
226 VhostUserPayload payload
;
227 } QEMU_PACKED VhostUserMsg
;
229 static VhostUserMsg m
__attribute__ ((unused
));
230 #define VHOST_USER_HDR_SIZE (sizeof(VhostUserHeader))
232 #define VHOST_USER_PAYLOAD_SIZE (sizeof(VhostUserPayload))
234 /* The version of the protocol we support */
235 #define VHOST_USER_VERSION (0x1)
238 struct vhost_dev
*dev
;
239 /* Shared between vhost devs of the same virtio device */
240 VhostUserState
*user
;
241 QIOChannel
*slave_ioc
;
243 NotifierWithReturn postcopy_notifier
;
244 struct PostCopyFD postcopy_fd
;
245 uint64_t postcopy_client_bases
[VHOST_USER_MAX_RAM_SLOTS
];
246 /* Length of the region_rb and region_rb_offset arrays */
247 size_t region_rb_len
;
248 /* RAMBlock associated with a given region */
249 RAMBlock
**region_rb
;
250 /* The offset from the start of the RAMBlock to the start of the
253 ram_addr_t
*region_rb_offset
;
255 /* True once we've entered postcopy_listen */
256 bool postcopy_listen
;
258 /* Our current regions */
259 int num_shadow_regions
;
260 struct vhost_memory_region shadow_regions
[VHOST_USER_MAX_RAM_SLOTS
];
263 struct scrub_regions
{
264 struct vhost_memory_region
*region
;
269 static bool ioeventfd_enabled(void)
271 return !kvm_enabled() || kvm_eventfds_enabled();
274 static int vhost_user_read_header(struct vhost_dev
*dev
, VhostUserMsg
*msg
)
276 struct vhost_user
*u
= dev
->opaque
;
277 CharBackend
*chr
= u
->user
->chr
;
278 uint8_t *p
= (uint8_t *) msg
;
279 int r
, size
= VHOST_USER_HDR_SIZE
;
281 r
= qemu_chr_fe_read_all(chr
, p
, size
);
283 error_report("Failed to read msg header. Read %d instead of %d."
284 " Original request %d.", r
, size
, msg
->hdr
.request
);
288 /* validate received flags */
289 if (msg
->hdr
.flags
!= (VHOST_USER_REPLY_MASK
| VHOST_USER_VERSION
)) {
290 error_report("Failed to read msg header."
291 " Flags 0x%x instead of 0x%x.", msg
->hdr
.flags
,
292 VHOST_USER_REPLY_MASK
| VHOST_USER_VERSION
);
299 struct vhost_user_read_cb_data
{
300 struct vhost_dev
*dev
;
306 static gboolean
vhost_user_read_cb(GIOChannel
*source
, GIOCondition condition
,
309 struct vhost_user_read_cb_data
*data
= opaque
;
310 struct vhost_dev
*dev
= data
->dev
;
311 VhostUserMsg
*msg
= data
->msg
;
312 struct vhost_user
*u
= dev
->opaque
;
313 CharBackend
*chr
= u
->user
->chr
;
314 uint8_t *p
= (uint8_t *) msg
;
317 if (vhost_user_read_header(dev
, msg
) < 0) {
322 /* validate message size is sane */
323 if (msg
->hdr
.size
> VHOST_USER_PAYLOAD_SIZE
) {
324 error_report("Failed to read msg header."
325 " Size %d exceeds the maximum %zu.", msg
->hdr
.size
,
326 VHOST_USER_PAYLOAD_SIZE
);
332 p
+= VHOST_USER_HDR_SIZE
;
333 size
= msg
->hdr
.size
;
334 r
= qemu_chr_fe_read_all(chr
, p
, size
);
336 error_report("Failed to read msg payload."
337 " Read %d instead of %d.", r
, msg
->hdr
.size
);
344 g_main_loop_quit(data
->loop
);
345 return G_SOURCE_REMOVE
;
348 static gboolean
slave_read(QIOChannel
*ioc
, GIOCondition condition
,
352 * This updates the read handler to use a new event loop context.
353 * Event sources are removed from the previous context : this ensures
354 * that events detected in the previous context are purged. They will
355 * be re-detected and processed in the new context.
357 static void slave_update_read_handler(struct vhost_dev
*dev
,
360 struct vhost_user
*u
= dev
->opaque
;
367 g_source_destroy(u
->slave_src
);
368 g_source_unref(u
->slave_src
);
371 u
->slave_src
= qio_channel_add_watch_source(u
->slave_ioc
,
373 slave_read
, dev
, NULL
,
377 static int vhost_user_read(struct vhost_dev
*dev
, VhostUserMsg
*msg
)
379 struct vhost_user
*u
= dev
->opaque
;
380 CharBackend
*chr
= u
->user
->chr
;
381 GMainContext
*prev_ctxt
= chr
->chr
->gcontext
;
382 GMainContext
*ctxt
= g_main_context_new();
383 GMainLoop
*loop
= g_main_loop_new(ctxt
, FALSE
);
384 struct vhost_user_read_cb_data data
= {
392 * We want to be able to monitor the slave channel fd while waiting
393 * for chr I/O. This requires an event loop, but we can't nest the
394 * one to which chr is currently attached : its fd handlers might not
395 * be prepared for re-entrancy. So we create a new one and switch chr
398 slave_update_read_handler(dev
, ctxt
);
399 qemu_chr_be_update_read_handlers(chr
->chr
, ctxt
);
400 qemu_chr_fe_add_watch(chr
, G_IO_IN
| G_IO_HUP
, vhost_user_read_cb
, &data
);
402 g_main_loop_run(loop
);
405 * Restore the previous event loop context. This also destroys/recreates
406 * event sources : this guarantees that all pending events in the original
407 * context that have been processed by the nested loop are purged.
409 qemu_chr_be_update_read_handlers(chr
->chr
, prev_ctxt
);
410 slave_update_read_handler(dev
, NULL
);
412 g_main_loop_unref(loop
);
413 g_main_context_unref(ctxt
);
418 static int process_message_reply(struct vhost_dev
*dev
,
419 const VhostUserMsg
*msg
)
421 VhostUserMsg msg_reply
;
423 if ((msg
->hdr
.flags
& VHOST_USER_NEED_REPLY_MASK
) == 0) {
427 if (vhost_user_read(dev
, &msg_reply
) < 0) {
431 if (msg_reply
.hdr
.request
!= msg
->hdr
.request
) {
432 error_report("Received unexpected msg type."
433 "Expected %d received %d",
434 msg
->hdr
.request
, msg_reply
.hdr
.request
);
438 return msg_reply
.payload
.u64
? -1 : 0;
441 static bool vhost_user_one_time_request(VhostUserRequest request
)
444 case VHOST_USER_SET_OWNER
:
445 case VHOST_USER_RESET_OWNER
:
446 case VHOST_USER_SET_MEM_TABLE
:
447 case VHOST_USER_GET_QUEUE_NUM
:
448 case VHOST_USER_NET_SET_MTU
:
455 /* most non-init callers ignore the error */
456 static int vhost_user_write(struct vhost_dev
*dev
, VhostUserMsg
*msg
,
457 int *fds
, int fd_num
)
459 struct vhost_user
*u
= dev
->opaque
;
460 CharBackend
*chr
= u
->user
->chr
;
461 int ret
, size
= VHOST_USER_HDR_SIZE
+ msg
->hdr
.size
;
464 * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
465 * we just need send it once in the first time. For later such
466 * request, we just ignore it.
468 if (vhost_user_one_time_request(msg
->hdr
.request
) && dev
->vq_index
!= 0) {
469 msg
->hdr
.flags
&= ~VHOST_USER_NEED_REPLY_MASK
;
473 if (qemu_chr_fe_set_msgfds(chr
, fds
, fd_num
) < 0) {
474 error_report("Failed to set msg fds.");
478 ret
= qemu_chr_fe_write_all(chr
, (const uint8_t *) msg
, size
);
480 error_report("Failed to write msg."
481 " Wrote %d instead of %d.", ret
, size
);
488 int vhost_user_gpu_set_socket(struct vhost_dev
*dev
, int fd
)
491 .hdr
.request
= VHOST_USER_GPU_SET_SOCKET
,
492 .hdr
.flags
= VHOST_USER_VERSION
,
495 return vhost_user_write(dev
, &msg
, &fd
, 1);
498 static int vhost_user_set_log_base(struct vhost_dev
*dev
, uint64_t base
,
499 struct vhost_log
*log
)
501 int fds
[VHOST_USER_MAX_RAM_SLOTS
];
503 bool shmfd
= virtio_has_feature(dev
->protocol_features
,
504 VHOST_USER_PROTOCOL_F_LOG_SHMFD
);
506 .hdr
.request
= VHOST_USER_SET_LOG_BASE
,
507 .hdr
.flags
= VHOST_USER_VERSION
,
508 .payload
.log
.mmap_size
= log
->size
* sizeof(*(log
->log
)),
509 .payload
.log
.mmap_offset
= 0,
510 .hdr
.size
= sizeof(msg
.payload
.log
),
513 if (shmfd
&& log
->fd
!= -1) {
514 fds
[fd_num
++] = log
->fd
;
517 if (vhost_user_write(dev
, &msg
, fds
, fd_num
) < 0) {
523 if (vhost_user_read(dev
, &msg
) < 0) {
527 if (msg
.hdr
.request
!= VHOST_USER_SET_LOG_BASE
) {
528 error_report("Received unexpected msg type. "
529 "Expected %d received %d",
530 VHOST_USER_SET_LOG_BASE
, msg
.hdr
.request
);
538 static MemoryRegion
*vhost_user_get_mr_data(uint64_t addr
, ram_addr_t
*offset
,
543 assert((uintptr_t)addr
== addr
);
544 mr
= memory_region_from_host((void *)(uintptr_t)addr
, offset
);
545 *fd
= memory_region_get_fd(mr
);
550 static void vhost_user_fill_msg_region(VhostUserMemoryRegion
*dst
,
551 struct vhost_memory_region
*src
,
552 uint64_t mmap_offset
)
554 assert(src
!= NULL
&& dst
!= NULL
);
555 dst
->userspace_addr
= src
->userspace_addr
;
556 dst
->memory_size
= src
->memory_size
;
557 dst
->guest_phys_addr
= src
->guest_phys_addr
;
558 dst
->mmap_offset
= mmap_offset
;
561 static int vhost_user_fill_set_mem_table_msg(struct vhost_user
*u
,
562 struct vhost_dev
*dev
,
564 int *fds
, size_t *fd_num
,
565 bool track_ramblocks
)
570 struct vhost_memory_region
*reg
;
571 VhostUserMemoryRegion region_buffer
;
573 msg
->hdr
.request
= VHOST_USER_SET_MEM_TABLE
;
575 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
576 reg
= dev
->mem
->regions
+ i
;
578 mr
= vhost_user_get_mr_data(reg
->userspace_addr
, &offset
, &fd
);
580 if (track_ramblocks
) {
581 assert(*fd_num
< VHOST_MEMORY_BASELINE_NREGIONS
);
582 trace_vhost_user_set_mem_table_withfd(*fd_num
, mr
->name
,
584 reg
->guest_phys_addr
,
587 u
->region_rb_offset
[i
] = offset
;
588 u
->region_rb
[i
] = mr
->ram_block
;
589 } else if (*fd_num
== VHOST_MEMORY_BASELINE_NREGIONS
) {
590 error_report("Failed preparing vhost-user memory table msg");
593 vhost_user_fill_msg_region(®ion_buffer
, reg
, offset
);
594 msg
->payload
.memory
.regions
[*fd_num
] = region_buffer
;
595 fds
[(*fd_num
)++] = fd
;
596 } else if (track_ramblocks
) {
597 u
->region_rb_offset
[i
] = 0;
598 u
->region_rb
[i
] = NULL
;
602 msg
->payload
.memory
.nregions
= *fd_num
;
605 error_report("Failed initializing vhost-user memory map, "
606 "consider using -object memory-backend-file share=on");
610 msg
->hdr
.size
= sizeof(msg
->payload
.memory
.nregions
);
611 msg
->hdr
.size
+= sizeof(msg
->payload
.memory
.padding
);
612 msg
->hdr
.size
+= *fd_num
* sizeof(VhostUserMemoryRegion
);
617 static inline bool reg_equal(struct vhost_memory_region
*shadow_reg
,
618 struct vhost_memory_region
*vdev_reg
)
620 return shadow_reg
->guest_phys_addr
== vdev_reg
->guest_phys_addr
&&
621 shadow_reg
->userspace_addr
== vdev_reg
->userspace_addr
&&
622 shadow_reg
->memory_size
== vdev_reg
->memory_size
;
625 static void scrub_shadow_regions(struct vhost_dev
*dev
,
626 struct scrub_regions
*add_reg
,
628 struct scrub_regions
*rem_reg
,
629 int *nr_rem_reg
, uint64_t *shadow_pcb
,
630 bool track_ramblocks
)
632 struct vhost_user
*u
= dev
->opaque
;
633 bool found
[VHOST_USER_MAX_RAM_SLOTS
] = {};
634 struct vhost_memory_region
*reg
, *shadow_reg
;
635 int i
, j
, fd
, add_idx
= 0, rm_idx
= 0, fd_num
= 0;
641 * Find memory regions present in our shadow state which are not in
642 * the device's current memory state.
644 * Mark regions in both the shadow and device state as "found".
646 for (i
= 0; i
< u
->num_shadow_regions
; i
++) {
647 shadow_reg
= &u
->shadow_regions
[i
];
650 for (j
= 0; j
< dev
->mem
->nregions
; j
++) {
651 reg
= &dev
->mem
->regions
[j
];
653 mr
= vhost_user_get_mr_data(reg
->userspace_addr
, &offset
, &fd
);
655 if (reg_equal(shadow_reg
, reg
)) {
658 if (track_ramblocks
) {
660 * Reset postcopy client bases, region_rb, and
661 * region_rb_offset in case regions are removed.
664 u
->region_rb_offset
[j
] = offset
;
665 u
->region_rb
[j
] = mr
->ram_block
;
666 shadow_pcb
[j
] = u
->postcopy_client_bases
[i
];
668 u
->region_rb_offset
[j
] = 0;
669 u
->region_rb
[j
] = NULL
;
677 * If the region was not found in the current device memory state
678 * create an entry for it in the removed list.
681 rem_reg
[rm_idx
].region
= shadow_reg
;
682 rem_reg
[rm_idx
++].reg_idx
= i
;
687 * For regions not marked "found", create entries in the added list.
689 * Note their indexes in the device memory state and the indexes of their
692 for (i
= 0; i
< dev
->mem
->nregions
; i
++) {
693 reg
= &dev
->mem
->regions
[i
];
694 vhost_user_get_mr_data(reg
->userspace_addr
, &offset
, &fd
);
700 * If the region was in both the shadow and device state we don't
701 * need to send a VHOST_USER_ADD_MEM_REG message for it.
707 add_reg
[add_idx
].region
= reg
;
708 add_reg
[add_idx
].reg_idx
= i
;
709 add_reg
[add_idx
++].fd_idx
= fd_num
;
711 *nr_rem_reg
= rm_idx
;
712 *nr_add_reg
= add_idx
;
717 static int send_remove_regions(struct vhost_dev
*dev
,
718 struct scrub_regions
*remove_reg
,
719 int nr_rem_reg
, VhostUserMsg
*msg
,
720 bool reply_supported
)
722 struct vhost_user
*u
= dev
->opaque
;
723 struct vhost_memory_region
*shadow_reg
;
724 int i
, fd
, shadow_reg_idx
, ret
;
726 VhostUserMemoryRegion region_buffer
;
729 * The regions in remove_reg appear in the same order they do in the
730 * shadow table. Therefore we can minimize memory copies by iterating
731 * through remove_reg backwards.
733 for (i
= nr_rem_reg
- 1; i
>= 0; i
--) {
734 shadow_reg
= remove_reg
[i
].region
;
735 shadow_reg_idx
= remove_reg
[i
].reg_idx
;
737 vhost_user_get_mr_data(shadow_reg
->userspace_addr
, &offset
, &fd
);
740 msg
->hdr
.request
= VHOST_USER_REM_MEM_REG
;
741 vhost_user_fill_msg_region(®ion_buffer
, shadow_reg
, 0);
742 msg
->payload
.mem_reg
.region
= region_buffer
;
744 if (vhost_user_write(dev
, msg
, &fd
, 1) < 0) {
748 if (reply_supported
) {
749 ret
= process_message_reply(dev
, msg
);
757 * At this point we know the backend has unmapped the region. It is now
758 * safe to remove it from the shadow table.
760 memmove(&u
->shadow_regions
[shadow_reg_idx
],
761 &u
->shadow_regions
[shadow_reg_idx
+ 1],
762 sizeof(struct vhost_memory_region
) *
763 (u
->num_shadow_regions
- shadow_reg_idx
- 1));
764 u
->num_shadow_regions
--;
770 static int send_add_regions(struct vhost_dev
*dev
,
771 struct scrub_regions
*add_reg
, int nr_add_reg
,
772 VhostUserMsg
*msg
, uint64_t *shadow_pcb
,
773 bool reply_supported
, bool track_ramblocks
)
775 struct vhost_user
*u
= dev
->opaque
;
776 int i
, fd
, ret
, reg_idx
, reg_fd_idx
;
777 struct vhost_memory_region
*reg
;
780 VhostUserMsg msg_reply
;
781 VhostUserMemoryRegion region_buffer
;
783 for (i
= 0; i
< nr_add_reg
; i
++) {
784 reg
= add_reg
[i
].region
;
785 reg_idx
= add_reg
[i
].reg_idx
;
786 reg_fd_idx
= add_reg
[i
].fd_idx
;
788 mr
= vhost_user_get_mr_data(reg
->userspace_addr
, &offset
, &fd
);
791 if (track_ramblocks
) {
792 trace_vhost_user_set_mem_table_withfd(reg_fd_idx
, mr
->name
,
794 reg
->guest_phys_addr
,
797 u
->region_rb_offset
[reg_idx
] = offset
;
798 u
->region_rb
[reg_idx
] = mr
->ram_block
;
800 msg
->hdr
.request
= VHOST_USER_ADD_MEM_REG
;
801 vhost_user_fill_msg_region(®ion_buffer
, reg
, offset
);
802 msg
->payload
.mem_reg
.region
= region_buffer
;
804 if (vhost_user_write(dev
, msg
, &fd
, 1) < 0) {
808 if (track_ramblocks
) {
811 if (vhost_user_read(dev
, &msg_reply
) < 0) {
815 reply_gpa
= msg_reply
.payload
.mem_reg
.region
.guest_phys_addr
;
817 if (msg_reply
.hdr
.request
!= VHOST_USER_ADD_MEM_REG
) {
818 error_report("%s: Received unexpected msg type."
819 "Expected %d received %d", __func__
,
820 VHOST_USER_ADD_MEM_REG
,
821 msg_reply
.hdr
.request
);
826 * We're using the same structure, just reusing one of the
827 * fields, so it should be the same size.
829 if (msg_reply
.hdr
.size
!= msg
->hdr
.size
) {
830 error_report("%s: Unexpected size for postcopy reply "
831 "%d vs %d", __func__
, msg_reply
.hdr
.size
,
836 /* Get the postcopy client base from the backend's reply. */
837 if (reply_gpa
== dev
->mem
->regions
[reg_idx
].guest_phys_addr
) {
838 shadow_pcb
[reg_idx
] =
839 msg_reply
.payload
.mem_reg
.region
.userspace_addr
;
840 trace_vhost_user_set_mem_table_postcopy(
841 msg_reply
.payload
.mem_reg
.region
.userspace_addr
,
842 msg
->payload
.mem_reg
.region
.userspace_addr
,
843 reg_fd_idx
, reg_idx
);
845 error_report("%s: invalid postcopy reply for region. "
846 "Got guest physical address %" PRIX64
", expected "
847 "%" PRIX64
, __func__
, reply_gpa
,
848 dev
->mem
->regions
[reg_idx
].guest_phys_addr
);
851 } else if (reply_supported
) {
852 ret
= process_message_reply(dev
, msg
);
857 } else if (track_ramblocks
) {
858 u
->region_rb_offset
[reg_idx
] = 0;
859 u
->region_rb
[reg_idx
] = NULL
;
863 * At this point, we know the backend has mapped in the new
864 * region, if the region has a valid file descriptor.
866 * The region should now be added to the shadow table.
868 u
->shadow_regions
[u
->num_shadow_regions
].guest_phys_addr
=
869 reg
->guest_phys_addr
;
870 u
->shadow_regions
[u
->num_shadow_regions
].userspace_addr
=
872 u
->shadow_regions
[u
->num_shadow_regions
].memory_size
=
874 u
->num_shadow_regions
++;
880 static int vhost_user_add_remove_regions(struct vhost_dev
*dev
,
882 bool reply_supported
,
883 bool track_ramblocks
)
885 struct vhost_user
*u
= dev
->opaque
;
886 struct scrub_regions add_reg
[VHOST_USER_MAX_RAM_SLOTS
];
887 struct scrub_regions rem_reg
[VHOST_USER_MAX_RAM_SLOTS
];
888 uint64_t shadow_pcb
[VHOST_USER_MAX_RAM_SLOTS
] = {};
889 int nr_add_reg
, nr_rem_reg
;
891 msg
->hdr
.size
= sizeof(msg
->payload
.mem_reg
);
893 /* Find the regions which need to be removed or added. */
894 scrub_shadow_regions(dev
, add_reg
, &nr_add_reg
, rem_reg
, &nr_rem_reg
,
895 shadow_pcb
, track_ramblocks
);
897 if (nr_rem_reg
&& send_remove_regions(dev
, rem_reg
, nr_rem_reg
, msg
,
898 reply_supported
) < 0)
903 if (nr_add_reg
&& send_add_regions(dev
, add_reg
, nr_add_reg
, msg
,
904 shadow_pcb
, reply_supported
, track_ramblocks
) < 0)
909 if (track_ramblocks
) {
910 memcpy(u
->postcopy_client_bases
, shadow_pcb
,
911 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS
);
913 * Now we've registered this with the postcopy code, we ack to the
914 * client, because now we're in the position to be able to deal with
915 * any faults it generates.
917 /* TODO: Use this for failure cases as well with a bad value. */
918 msg
->hdr
.size
= sizeof(msg
->payload
.u64
);
919 msg
->payload
.u64
= 0; /* OK */
921 if (vhost_user_write(dev
, msg
, NULL
, 0) < 0) {
929 if (track_ramblocks
) {
930 memcpy(u
->postcopy_client_bases
, shadow_pcb
,
931 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS
);
937 static int vhost_user_set_mem_table_postcopy(struct vhost_dev
*dev
,
938 struct vhost_memory
*mem
,
939 bool reply_supported
,
940 bool config_mem_slots
)
942 struct vhost_user
*u
= dev
->opaque
;
943 int fds
[VHOST_MEMORY_BASELINE_NREGIONS
];
945 VhostUserMsg msg_reply
;
949 .hdr
.flags
= VHOST_USER_VERSION
,
952 if (u
->region_rb_len
< dev
->mem
->nregions
) {
953 u
->region_rb
= g_renew(RAMBlock
*, u
->region_rb
, dev
->mem
->nregions
);
954 u
->region_rb_offset
= g_renew(ram_addr_t
, u
->region_rb_offset
,
956 memset(&(u
->region_rb
[u
->region_rb_len
]), '\0',
957 sizeof(RAMBlock
*) * (dev
->mem
->nregions
- u
->region_rb_len
));
958 memset(&(u
->region_rb_offset
[u
->region_rb_len
]), '\0',
959 sizeof(ram_addr_t
) * (dev
->mem
->nregions
- u
->region_rb_len
));
960 u
->region_rb_len
= dev
->mem
->nregions
;
963 if (config_mem_slots
) {
964 if (vhost_user_add_remove_regions(dev
, &msg
, reply_supported
,
969 if (vhost_user_fill_set_mem_table_msg(u
, dev
, &msg
, fds
, &fd_num
,
974 if (vhost_user_write(dev
, &msg
, fds
, fd_num
) < 0) {
978 if (vhost_user_read(dev
, &msg_reply
) < 0) {
982 if (msg_reply
.hdr
.request
!= VHOST_USER_SET_MEM_TABLE
) {
983 error_report("%s: Received unexpected msg type."
984 "Expected %d received %d", __func__
,
985 VHOST_USER_SET_MEM_TABLE
, msg_reply
.hdr
.request
);
990 * We're using the same structure, just reusing one of the
991 * fields, so it should be the same size.
993 if (msg_reply
.hdr
.size
!= msg
.hdr
.size
) {
994 error_report("%s: Unexpected size for postcopy reply "
995 "%d vs %d", __func__
, msg_reply
.hdr
.size
,
1000 memset(u
->postcopy_client_bases
, 0,
1001 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS
);
1004 * They're in the same order as the regions that were sent
1005 * but some of the regions were skipped (above) if they
1008 for (msg_i
= 0, region_i
= 0;
1009 region_i
< dev
->mem
->nregions
;
1011 if (msg_i
< fd_num
&&
1012 msg_reply
.payload
.memory
.regions
[msg_i
].guest_phys_addr
==
1013 dev
->mem
->regions
[region_i
].guest_phys_addr
) {
1014 u
->postcopy_client_bases
[region_i
] =
1015 msg_reply
.payload
.memory
.regions
[msg_i
].userspace_addr
;
1016 trace_vhost_user_set_mem_table_postcopy(
1017 msg_reply
.payload
.memory
.regions
[msg_i
].userspace_addr
,
1018 msg
.payload
.memory
.regions
[msg_i
].userspace_addr
,
1023 if (msg_i
!= fd_num
) {
1024 error_report("%s: postcopy reply not fully consumed "
1026 __func__
, msg_i
, fd_num
);
1031 * Now we've registered this with the postcopy code, we ack to the
1032 * client, because now we're in the position to be able to deal
1033 * with any faults it generates.
1035 /* TODO: Use this for failure cases as well with a bad value. */
1036 msg
.hdr
.size
= sizeof(msg
.payload
.u64
);
1037 msg
.payload
.u64
= 0; /* OK */
1038 if (vhost_user_write(dev
, &msg
, NULL
, 0) < 0) {
1046 static int vhost_user_set_mem_table(struct vhost_dev
*dev
,
1047 struct vhost_memory
*mem
)
1049 struct vhost_user
*u
= dev
->opaque
;
1050 int fds
[VHOST_MEMORY_BASELINE_NREGIONS
];
1052 bool do_postcopy
= u
->postcopy_listen
&& u
->postcopy_fd
.handler
;
1053 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
1054 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
1055 bool config_mem_slots
=
1056 virtio_has_feature(dev
->protocol_features
,
1057 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS
);
1061 * Postcopy has enough differences that it's best done in it's own
1064 return vhost_user_set_mem_table_postcopy(dev
, mem
, reply_supported
,
1068 VhostUserMsg msg
= {
1069 .hdr
.flags
= VHOST_USER_VERSION
,
1072 if (reply_supported
) {
1073 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
1076 if (config_mem_slots
) {
1077 if (vhost_user_add_remove_regions(dev
, &msg
, reply_supported
,
1082 if (vhost_user_fill_set_mem_table_msg(u
, dev
, &msg
, fds
, &fd_num
,
1086 if (vhost_user_write(dev
, &msg
, fds
, fd_num
) < 0) {
1090 if (reply_supported
) {
1091 return process_message_reply(dev
, &msg
);
1098 static int vhost_user_set_vring_addr(struct vhost_dev
*dev
,
1099 struct vhost_vring_addr
*addr
)
1101 VhostUserMsg msg
= {
1102 .hdr
.request
= VHOST_USER_SET_VRING_ADDR
,
1103 .hdr
.flags
= VHOST_USER_VERSION
,
1104 .payload
.addr
= *addr
,
1105 .hdr
.size
= sizeof(msg
.payload
.addr
),
1108 if (vhost_user_write(dev
, &msg
, NULL
, 0) < 0) {
1115 static int vhost_user_set_vring_endian(struct vhost_dev
*dev
,
1116 struct vhost_vring_state
*ring
)
1118 bool cross_endian
= virtio_has_feature(dev
->protocol_features
,
1119 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN
);
1120 VhostUserMsg msg
= {
1121 .hdr
.request
= VHOST_USER_SET_VRING_ENDIAN
,
1122 .hdr
.flags
= VHOST_USER_VERSION
,
1123 .payload
.state
= *ring
,
1124 .hdr
.size
= sizeof(msg
.payload
.state
),
1127 if (!cross_endian
) {
1128 error_report("vhost-user trying to send unhandled ioctl");
1132 if (vhost_user_write(dev
, &msg
, NULL
, 0) < 0) {
1139 static int vhost_set_vring(struct vhost_dev
*dev
,
1140 unsigned long int request
,
1141 struct vhost_vring_state
*ring
)
1143 VhostUserMsg msg
= {
1144 .hdr
.request
= request
,
1145 .hdr
.flags
= VHOST_USER_VERSION
,
1146 .payload
.state
= *ring
,
1147 .hdr
.size
= sizeof(msg
.payload
.state
),
1150 if (vhost_user_write(dev
, &msg
, NULL
, 0) < 0) {
1157 static int vhost_user_set_vring_num(struct vhost_dev
*dev
,
1158 struct vhost_vring_state
*ring
)
1160 return vhost_set_vring(dev
, VHOST_USER_SET_VRING_NUM
, ring
);
1163 static void vhost_user_host_notifier_restore(struct vhost_dev
*dev
,
1166 struct vhost_user
*u
= dev
->opaque
;
1167 VhostUserHostNotifier
*n
= &u
->user
->notifier
[queue_idx
];
1168 VirtIODevice
*vdev
= dev
->vdev
;
1170 if (n
->addr
&& !n
->set
) {
1171 virtio_queue_set_host_notifier_mr(vdev
, queue_idx
, &n
->mr
, true);
1176 static void vhost_user_host_notifier_remove(struct vhost_dev
*dev
,
1179 struct vhost_user
*u
= dev
->opaque
;
1180 VhostUserHostNotifier
*n
= &u
->user
->notifier
[queue_idx
];
1181 VirtIODevice
*vdev
= dev
->vdev
;
1183 if (n
->addr
&& n
->set
) {
1184 virtio_queue_set_host_notifier_mr(vdev
, queue_idx
, &n
->mr
, false);
1189 static int vhost_user_set_vring_base(struct vhost_dev
*dev
,
1190 struct vhost_vring_state
*ring
)
1192 vhost_user_host_notifier_restore(dev
, ring
->index
);
1194 return vhost_set_vring(dev
, VHOST_USER_SET_VRING_BASE
, ring
);
1197 static int vhost_user_set_vring_enable(struct vhost_dev
*dev
, int enable
)
1201 if (!virtio_has_feature(dev
->features
, VHOST_USER_F_PROTOCOL_FEATURES
)) {
1205 for (i
= 0; i
< dev
->nvqs
; ++i
) {
1206 struct vhost_vring_state state
= {
1207 .index
= dev
->vq_index
+ i
,
1211 vhost_set_vring(dev
, VHOST_USER_SET_VRING_ENABLE
, &state
);
1217 static int vhost_user_get_vring_base(struct vhost_dev
*dev
,
1218 struct vhost_vring_state
*ring
)
1220 VhostUserMsg msg
= {
1221 .hdr
.request
= VHOST_USER_GET_VRING_BASE
,
1222 .hdr
.flags
= VHOST_USER_VERSION
,
1223 .payload
.state
= *ring
,
1224 .hdr
.size
= sizeof(msg
.payload
.state
),
1227 vhost_user_host_notifier_remove(dev
, ring
->index
);
1229 if (vhost_user_write(dev
, &msg
, NULL
, 0) < 0) {
1233 if (vhost_user_read(dev
, &msg
) < 0) {
1237 if (msg
.hdr
.request
!= VHOST_USER_GET_VRING_BASE
) {
1238 error_report("Received unexpected msg type. Expected %d received %d",
1239 VHOST_USER_GET_VRING_BASE
, msg
.hdr
.request
);
1243 if (msg
.hdr
.size
!= sizeof(msg
.payload
.state
)) {
1244 error_report("Received bad msg size.");
1248 *ring
= msg
.payload
.state
;
1253 static int vhost_set_vring_file(struct vhost_dev
*dev
,
1254 VhostUserRequest request
,
1255 struct vhost_vring_file
*file
)
1257 int fds
[VHOST_USER_MAX_RAM_SLOTS
];
1259 VhostUserMsg msg
= {
1260 .hdr
.request
= request
,
1261 .hdr
.flags
= VHOST_USER_VERSION
,
1262 .payload
.u64
= file
->index
& VHOST_USER_VRING_IDX_MASK
,
1263 .hdr
.size
= sizeof(msg
.payload
.u64
),
1266 if (ioeventfd_enabled() && file
->fd
> 0) {
1267 fds
[fd_num
++] = file
->fd
;
1269 msg
.payload
.u64
|= VHOST_USER_VRING_NOFD_MASK
;
1272 if (vhost_user_write(dev
, &msg
, fds
, fd_num
) < 0) {
1279 static int vhost_user_set_vring_kick(struct vhost_dev
*dev
,
1280 struct vhost_vring_file
*file
)
1282 return vhost_set_vring_file(dev
, VHOST_USER_SET_VRING_KICK
, file
);
1285 static int vhost_user_set_vring_call(struct vhost_dev
*dev
,
1286 struct vhost_vring_file
*file
)
1288 return vhost_set_vring_file(dev
, VHOST_USER_SET_VRING_CALL
, file
);
1291 static int vhost_user_set_u64(struct vhost_dev
*dev
, int request
, uint64_t u64
)
1293 VhostUserMsg msg
= {
1294 .hdr
.request
= request
,
1295 .hdr
.flags
= VHOST_USER_VERSION
,
1297 .hdr
.size
= sizeof(msg
.payload
.u64
),
1300 if (vhost_user_write(dev
, &msg
, NULL
, 0) < 0) {
1307 static int vhost_user_set_features(struct vhost_dev
*dev
,
1310 return vhost_user_set_u64(dev
, VHOST_USER_SET_FEATURES
, features
);
1313 static int vhost_user_set_protocol_features(struct vhost_dev
*dev
,
1316 return vhost_user_set_u64(dev
, VHOST_USER_SET_PROTOCOL_FEATURES
, features
);
1319 static int vhost_user_get_u64(struct vhost_dev
*dev
, int request
, uint64_t *u64
)
1321 VhostUserMsg msg
= {
1322 .hdr
.request
= request
,
1323 .hdr
.flags
= VHOST_USER_VERSION
,
1326 if (vhost_user_one_time_request(request
) && dev
->vq_index
!= 0) {
1330 if (vhost_user_write(dev
, &msg
, NULL
, 0) < 0) {
1334 if (vhost_user_read(dev
, &msg
) < 0) {
1338 if (msg
.hdr
.request
!= request
) {
1339 error_report("Received unexpected msg type. Expected %d received %d",
1340 request
, msg
.hdr
.request
);
1344 if (msg
.hdr
.size
!= sizeof(msg
.payload
.u64
)) {
1345 error_report("Received bad msg size.");
1349 *u64
= msg
.payload
.u64
;
1354 static int vhost_user_get_features(struct vhost_dev
*dev
, uint64_t *features
)
1356 if (vhost_user_get_u64(dev
, VHOST_USER_GET_FEATURES
, features
) < 0) {
1363 static int vhost_user_set_owner(struct vhost_dev
*dev
)
1365 VhostUserMsg msg
= {
1366 .hdr
.request
= VHOST_USER_SET_OWNER
,
1367 .hdr
.flags
= VHOST_USER_VERSION
,
1370 if (vhost_user_write(dev
, &msg
, NULL
, 0) < 0) {
1377 static int vhost_user_get_max_memslots(struct vhost_dev
*dev
,
1378 uint64_t *max_memslots
)
1380 uint64_t backend_max_memslots
;
1383 err
= vhost_user_get_u64(dev
, VHOST_USER_GET_MAX_MEM_SLOTS
,
1384 &backend_max_memslots
);
1389 *max_memslots
= backend_max_memslots
;
1394 static int vhost_user_reset_device(struct vhost_dev
*dev
)
1396 VhostUserMsg msg
= {
1397 .hdr
.flags
= VHOST_USER_VERSION
,
1400 msg
.hdr
.request
= virtio_has_feature(dev
->protocol_features
,
1401 VHOST_USER_PROTOCOL_F_RESET_DEVICE
)
1402 ? VHOST_USER_RESET_DEVICE
1403 : VHOST_USER_RESET_OWNER
;
1405 if (vhost_user_write(dev
, &msg
, NULL
, 0) < 0) {
1412 static int vhost_user_slave_handle_config_change(struct vhost_dev
*dev
)
1416 if (!dev
->config_ops
) {
1420 if (dev
->config_ops
->vhost_dev_config_notifier
) {
1421 ret
= dev
->config_ops
->vhost_dev_config_notifier(dev
);
1427 static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev
*dev
,
1428 VhostUserVringArea
*area
,
1431 int queue_idx
= area
->u64
& VHOST_USER_VRING_IDX_MASK
;
1432 size_t page_size
= qemu_real_host_page_size
;
1433 struct vhost_user
*u
= dev
->opaque
;
1434 VhostUserState
*user
= u
->user
;
1435 VirtIODevice
*vdev
= dev
->vdev
;
1436 VhostUserHostNotifier
*n
;
1440 if (!virtio_has_feature(dev
->protocol_features
,
1441 VHOST_USER_PROTOCOL_F_HOST_NOTIFIER
) ||
1442 vdev
== NULL
|| queue_idx
>= virtio_get_num_queues(vdev
)) {
1446 n
= &user
->notifier
[queue_idx
];
1449 virtio_queue_set_host_notifier_mr(vdev
, queue_idx
, &n
->mr
, false);
1450 object_unparent(OBJECT(&n
->mr
));
1451 munmap(n
->addr
, page_size
);
1455 if (area
->u64
& VHOST_USER_VRING_NOFD_MASK
) {
1460 if (area
->size
!= page_size
) {
1464 addr
= mmap(NULL
, page_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
1466 if (addr
== MAP_FAILED
) {
1470 name
= g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
1472 memory_region_init_ram_device_ptr(&n
->mr
, OBJECT(vdev
), name
,
1476 if (virtio_queue_set_host_notifier_mr(vdev
, queue_idx
, &n
->mr
, true)) {
1477 munmap(addr
, page_size
);
1487 static void close_slave_channel(struct vhost_user
*u
)
1489 g_source_destroy(u
->slave_src
);
1490 g_source_unref(u
->slave_src
);
1491 u
->slave_src
= NULL
;
1492 object_unref(OBJECT(u
->slave_ioc
));
1493 u
->slave_ioc
= NULL
;
1496 static gboolean
slave_read(QIOChannel
*ioc
, GIOCondition condition
,
1499 struct vhost_dev
*dev
= opaque
;
1500 struct vhost_user
*u
= dev
->opaque
;
1501 VhostUserHeader hdr
= { 0, };
1502 VhostUserPayload payload
= { 0, };
1503 Error
*local_err
= NULL
;
1504 gboolean rc
= G_SOURCE_CONTINUE
;
1507 g_autofree
int *fd
= NULL
;
1512 iov
.iov_base
= &hdr
;
1513 iov
.iov_len
= VHOST_USER_HDR_SIZE
;
1515 if (qio_channel_readv_full_all(ioc
, &iov
, 1, &fd
, &fdsize
, &local_err
)) {
1516 error_report_err(local_err
);
1520 if (hdr
.size
> VHOST_USER_PAYLOAD_SIZE
) {
1521 error_report("Failed to read msg header."
1522 " Size %d exceeds the maximum %zu.", hdr
.size
,
1523 VHOST_USER_PAYLOAD_SIZE
);
1528 if (qio_channel_read_all(ioc
, (char *) &payload
, hdr
.size
, &local_err
)) {
1529 error_report_err(local_err
);
1533 switch (hdr
.request
) {
1534 case VHOST_USER_SLAVE_IOTLB_MSG
:
1535 ret
= vhost_backend_handle_iotlb_msg(dev
, &payload
.iotlb
);
1537 case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG
:
1538 ret
= vhost_user_slave_handle_config_change(dev
);
1540 case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG
:
1541 ret
= vhost_user_slave_handle_vring_host_notifier(dev
, &payload
.area
,
1545 error_report("Received unexpected msg type: %d.", hdr
.request
);
1550 * REPLY_ACK feature handling. Other reply types has to be managed
1551 * directly in their request handlers.
1553 if (hdr
.flags
& VHOST_USER_NEED_REPLY_MASK
) {
1554 struct iovec iovec
[2];
1557 hdr
.flags
&= ~VHOST_USER_NEED_REPLY_MASK
;
1558 hdr
.flags
|= VHOST_USER_REPLY_MASK
;
1560 payload
.u64
= !!ret
;
1561 hdr
.size
= sizeof(payload
.u64
);
1563 iovec
[0].iov_base
= &hdr
;
1564 iovec
[0].iov_len
= VHOST_USER_HDR_SIZE
;
1565 iovec
[1].iov_base
= &payload
;
1566 iovec
[1].iov_len
= hdr
.size
;
1568 if (qio_channel_writev_all(ioc
, iovec
, ARRAY_SIZE(iovec
), &local_err
)) {
1569 error_report_err(local_err
);
1577 close_slave_channel(u
);
1578 rc
= G_SOURCE_REMOVE
;
1582 for (i
= 0; i
< fdsize
; i
++) {
1589 static int vhost_setup_slave_channel(struct vhost_dev
*dev
)
1591 VhostUserMsg msg
= {
1592 .hdr
.request
= VHOST_USER_SET_SLAVE_REQ_FD
,
1593 .hdr
.flags
= VHOST_USER_VERSION
,
1595 struct vhost_user
*u
= dev
->opaque
;
1597 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
1598 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
1599 Error
*local_err
= NULL
;
1602 if (!virtio_has_feature(dev
->protocol_features
,
1603 VHOST_USER_PROTOCOL_F_SLAVE_REQ
)) {
1607 if (socketpair(PF_UNIX
, SOCK_STREAM
, 0, sv
) == -1) {
1608 error_report("socketpair() failed");
1612 ioc
= QIO_CHANNEL(qio_channel_socket_new_fd(sv
[0], &local_err
));
1614 error_report_err(local_err
);
1618 slave_update_read_handler(dev
, NULL
);
1620 if (reply_supported
) {
1621 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
1624 ret
= vhost_user_write(dev
, &msg
, &sv
[1], 1);
1629 if (reply_supported
) {
1630 ret
= process_message_reply(dev
, &msg
);
1636 close_slave_channel(u
);
1644 * Called back from the postcopy fault thread when a fault is received on our
1646 * TODO: This is Linux specific
1648 static int vhost_user_postcopy_fault_handler(struct PostCopyFD
*pcfd
,
1651 struct vhost_dev
*dev
= pcfd
->data
;
1652 struct vhost_user
*u
= dev
->opaque
;
1653 struct uffd_msg
*msg
= ufd
;
1654 uint64_t faultaddr
= msg
->arg
.pagefault
.address
;
1655 RAMBlock
*rb
= NULL
;
1659 trace_vhost_user_postcopy_fault_handler(pcfd
->idstr
, faultaddr
,
1660 dev
->mem
->nregions
);
1661 for (i
= 0; i
< MIN(dev
->mem
->nregions
, u
->region_rb_len
); i
++) {
1662 trace_vhost_user_postcopy_fault_handler_loop(i
,
1663 u
->postcopy_client_bases
[i
], dev
->mem
->regions
[i
].memory_size
);
1664 if (faultaddr
>= u
->postcopy_client_bases
[i
]) {
1665 /* Ofset of the fault address in the vhost region */
1666 uint64_t region_offset
= faultaddr
- u
->postcopy_client_bases
[i
];
1667 if (region_offset
< dev
->mem
->regions
[i
].memory_size
) {
1668 rb_offset
= region_offset
+ u
->region_rb_offset
[i
];
1669 trace_vhost_user_postcopy_fault_handler_found(i
,
1670 region_offset
, rb_offset
);
1671 rb
= u
->region_rb
[i
];
1672 return postcopy_request_shared_page(pcfd
, rb
, faultaddr
,
1677 error_report("%s: Failed to find region for fault %" PRIx64
,
1678 __func__
, faultaddr
);
1682 static int vhost_user_postcopy_waker(struct PostCopyFD
*pcfd
, RAMBlock
*rb
,
1685 struct vhost_dev
*dev
= pcfd
->data
;
1686 struct vhost_user
*u
= dev
->opaque
;
1689 trace_vhost_user_postcopy_waker(qemu_ram_get_idstr(rb
), offset
);
1694 /* Translate the offset into an address in the clients address space */
1695 for (i
= 0; i
< MIN(dev
->mem
->nregions
, u
->region_rb_len
); i
++) {
1696 if (u
->region_rb
[i
] == rb
&&
1697 offset
>= u
->region_rb_offset
[i
] &&
1698 offset
< (u
->region_rb_offset
[i
] +
1699 dev
->mem
->regions
[i
].memory_size
)) {
1700 uint64_t client_addr
= (offset
- u
->region_rb_offset
[i
]) +
1701 u
->postcopy_client_bases
[i
];
1702 trace_vhost_user_postcopy_waker_found(client_addr
);
1703 return postcopy_wake_shared(pcfd
, client_addr
, rb
);
1707 trace_vhost_user_postcopy_waker_nomatch(qemu_ram_get_idstr(rb
), offset
);
1713 * Called at the start of an inbound postcopy on reception of the
1716 static int vhost_user_postcopy_advise(struct vhost_dev
*dev
, Error
**errp
)
1719 struct vhost_user
*u
= dev
->opaque
;
1720 CharBackend
*chr
= u
->user
->chr
;
1722 VhostUserMsg msg
= {
1723 .hdr
.request
= VHOST_USER_POSTCOPY_ADVISE
,
1724 .hdr
.flags
= VHOST_USER_VERSION
,
1727 if (vhost_user_write(dev
, &msg
, NULL
, 0) < 0) {
1728 error_setg(errp
, "Failed to send postcopy_advise to vhost");
1732 if (vhost_user_read(dev
, &msg
) < 0) {
1733 error_setg(errp
, "Failed to get postcopy_advise reply from vhost");
1737 if (msg
.hdr
.request
!= VHOST_USER_POSTCOPY_ADVISE
) {
1738 error_setg(errp
, "Unexpected msg type. Expected %d received %d",
1739 VHOST_USER_POSTCOPY_ADVISE
, msg
.hdr
.request
);
1744 error_setg(errp
, "Received bad msg size.");
1747 ufd
= qemu_chr_fe_get_msgfd(chr
);
1749 error_setg(errp
, "%s: Failed to get ufd", __func__
);
1752 qemu_set_nonblock(ufd
);
1754 /* register ufd with userfault thread */
1755 u
->postcopy_fd
.fd
= ufd
;
1756 u
->postcopy_fd
.data
= dev
;
1757 u
->postcopy_fd
.handler
= vhost_user_postcopy_fault_handler
;
1758 u
->postcopy_fd
.waker
= vhost_user_postcopy_waker
;
1759 u
->postcopy_fd
.idstr
= "vhost-user"; /* Need to find unique name */
1760 postcopy_register_shared_ufd(&u
->postcopy_fd
);
1763 error_setg(errp
, "Postcopy not supported on non-Linux systems");
1769 * Called at the switch to postcopy on reception of the 'listen' command.
1771 static int vhost_user_postcopy_listen(struct vhost_dev
*dev
, Error
**errp
)
1773 struct vhost_user
*u
= dev
->opaque
;
1775 VhostUserMsg msg
= {
1776 .hdr
.request
= VHOST_USER_POSTCOPY_LISTEN
,
1777 .hdr
.flags
= VHOST_USER_VERSION
| VHOST_USER_NEED_REPLY_MASK
,
1779 u
->postcopy_listen
= true;
1780 trace_vhost_user_postcopy_listen();
1781 if (vhost_user_write(dev
, &msg
, NULL
, 0) < 0) {
1782 error_setg(errp
, "Failed to send postcopy_listen to vhost");
1786 ret
= process_message_reply(dev
, &msg
);
1788 error_setg(errp
, "Failed to receive reply to postcopy_listen");
1796 * Called at the end of postcopy
1798 static int vhost_user_postcopy_end(struct vhost_dev
*dev
, Error
**errp
)
1800 VhostUserMsg msg
= {
1801 .hdr
.request
= VHOST_USER_POSTCOPY_END
,
1802 .hdr
.flags
= VHOST_USER_VERSION
| VHOST_USER_NEED_REPLY_MASK
,
1805 struct vhost_user
*u
= dev
->opaque
;
1807 trace_vhost_user_postcopy_end_entry();
1808 if (vhost_user_write(dev
, &msg
, NULL
, 0) < 0) {
1809 error_setg(errp
, "Failed to send postcopy_end to vhost");
1813 ret
= process_message_reply(dev
, &msg
);
1815 error_setg(errp
, "Failed to receive reply to postcopy_end");
1818 postcopy_unregister_shared_ufd(&u
->postcopy_fd
);
1819 close(u
->postcopy_fd
.fd
);
1820 u
->postcopy_fd
.handler
= NULL
;
1822 trace_vhost_user_postcopy_end_exit();
1827 static int vhost_user_postcopy_notifier(NotifierWithReturn
*notifier
,
1830 struct PostcopyNotifyData
*pnd
= opaque
;
1831 struct vhost_user
*u
= container_of(notifier
, struct vhost_user
,
1833 struct vhost_dev
*dev
= u
->dev
;
1835 switch (pnd
->reason
) {
1836 case POSTCOPY_NOTIFY_PROBE
:
1837 if (!virtio_has_feature(dev
->protocol_features
,
1838 VHOST_USER_PROTOCOL_F_PAGEFAULT
)) {
1839 /* TODO: Get the device name into this error somehow */
1840 error_setg(pnd
->errp
,
1841 "vhost-user backend not capable of postcopy");
1846 case POSTCOPY_NOTIFY_INBOUND_ADVISE
:
1847 return vhost_user_postcopy_advise(dev
, pnd
->errp
);
1849 case POSTCOPY_NOTIFY_INBOUND_LISTEN
:
1850 return vhost_user_postcopy_listen(dev
, pnd
->errp
);
1852 case POSTCOPY_NOTIFY_INBOUND_END
:
1853 return vhost_user_postcopy_end(dev
, pnd
->errp
);
1856 /* We ignore notifications we don't know */
1863 static int vhost_user_backend_init(struct vhost_dev
*dev
, void *opaque
,
1866 uint64_t features
, protocol_features
, ram_slots
;
1867 struct vhost_user
*u
;
1870 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
1872 u
= g_new0(struct vhost_user
, 1);
1877 err
= vhost_user_get_features(dev
, &features
);
1882 if (virtio_has_feature(features
, VHOST_USER_F_PROTOCOL_FEATURES
)) {
1883 dev
->backend_features
|= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES
;
1885 err
= vhost_user_get_u64(dev
, VHOST_USER_GET_PROTOCOL_FEATURES
,
1886 &protocol_features
);
1891 dev
->protocol_features
=
1892 protocol_features
& VHOST_USER_PROTOCOL_FEATURE_MASK
;
1894 if (!dev
->config_ops
|| !dev
->config_ops
->vhost_dev_config_notifier
) {
1895 /* Don't acknowledge CONFIG feature if device doesn't support it */
1896 dev
->protocol_features
&= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG
);
1897 } else if (!(protocol_features
&
1898 (1ULL << VHOST_USER_PROTOCOL_F_CONFIG
))) {
1899 error_setg(errp
, "Device expects VHOST_USER_PROTOCOL_F_CONFIG "
1900 "but backend does not support it.");
1904 err
= vhost_user_set_protocol_features(dev
, dev
->protocol_features
);
1909 /* query the max queues we support if backend supports Multiple Queue */
1910 if (dev
->protocol_features
& (1ULL << VHOST_USER_PROTOCOL_F_MQ
)) {
1911 err
= vhost_user_get_u64(dev
, VHOST_USER_GET_QUEUE_NUM
,
1917 dev
->max_queues
= 1;
1920 if (dev
->num_queues
&& dev
->max_queues
< dev
->num_queues
) {
1921 error_setg(errp
, "The maximum number of queues supported by the "
1922 "backend is %" PRIu64
, dev
->max_queues
);
1926 if (virtio_has_feature(features
, VIRTIO_F_IOMMU_PLATFORM
) &&
1927 !(virtio_has_feature(dev
->protocol_features
,
1928 VHOST_USER_PROTOCOL_F_SLAVE_REQ
) &&
1929 virtio_has_feature(dev
->protocol_features
,
1930 VHOST_USER_PROTOCOL_F_REPLY_ACK
))) {
1931 error_setg(errp
, "IOMMU support requires reply-ack and "
1932 "slave-req protocol features.");
1936 /* get max memory regions if backend supports configurable RAM slots */
1937 if (!virtio_has_feature(dev
->protocol_features
,
1938 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS
)) {
1939 u
->user
->memory_slots
= VHOST_MEMORY_BASELINE_NREGIONS
;
1941 err
= vhost_user_get_max_memslots(dev
, &ram_slots
);
1946 if (ram_slots
< u
->user
->memory_slots
) {
1947 error_setg(errp
, "The backend specified a max ram slots limit "
1948 "of %" PRIu64
", when the prior validated limit was "
1949 "%d. This limit should never decrease.", ram_slots
,
1950 u
->user
->memory_slots
);
1954 u
->user
->memory_slots
= MIN(ram_slots
, VHOST_USER_MAX_RAM_SLOTS
);
1958 if (dev
->migration_blocker
== NULL
&&
1959 !virtio_has_feature(dev
->protocol_features
,
1960 VHOST_USER_PROTOCOL_F_LOG_SHMFD
)) {
1961 error_setg(&dev
->migration_blocker
,
1962 "Migration disabled: vhost-user backend lacks "
1963 "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
1966 if (dev
->vq_index
== 0) {
1967 err
= vhost_setup_slave_channel(dev
);
1973 u
->postcopy_notifier
.notify
= vhost_user_postcopy_notifier
;
1974 postcopy_add_notifier(&u
->postcopy_notifier
);
1979 static int vhost_user_backend_cleanup(struct vhost_dev
*dev
)
1981 struct vhost_user
*u
;
1983 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
1986 if (u
->postcopy_notifier
.notify
) {
1987 postcopy_remove_notifier(&u
->postcopy_notifier
);
1988 u
->postcopy_notifier
.notify
= NULL
;
1990 u
->postcopy_listen
= false;
1991 if (u
->postcopy_fd
.handler
) {
1992 postcopy_unregister_shared_ufd(&u
->postcopy_fd
);
1993 close(u
->postcopy_fd
.fd
);
1994 u
->postcopy_fd
.handler
= NULL
;
1997 close_slave_channel(u
);
1999 g_free(u
->region_rb
);
2000 u
->region_rb
= NULL
;
2001 g_free(u
->region_rb_offset
);
2002 u
->region_rb_offset
= NULL
;
2003 u
->region_rb_len
= 0;
2010 static int vhost_user_get_vq_index(struct vhost_dev
*dev
, int idx
)
2012 assert(idx
>= dev
->vq_index
&& idx
< dev
->vq_index
+ dev
->nvqs
);
2017 static int vhost_user_memslots_limit(struct vhost_dev
*dev
)
2019 struct vhost_user
*u
= dev
->opaque
;
2021 return u
->user
->memory_slots
;
2024 static bool vhost_user_requires_shm_log(struct vhost_dev
*dev
)
2026 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
2028 return virtio_has_feature(dev
->protocol_features
,
2029 VHOST_USER_PROTOCOL_F_LOG_SHMFD
);
2032 static int vhost_user_migration_done(struct vhost_dev
*dev
, char* mac_addr
)
2034 VhostUserMsg msg
= { };
2036 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
2038 /* If guest supports GUEST_ANNOUNCE do nothing */
2039 if (virtio_has_feature(dev
->acked_features
, VIRTIO_NET_F_GUEST_ANNOUNCE
)) {
2043 /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */
2044 if (virtio_has_feature(dev
->protocol_features
,
2045 VHOST_USER_PROTOCOL_F_RARP
)) {
2046 msg
.hdr
.request
= VHOST_USER_SEND_RARP
;
2047 msg
.hdr
.flags
= VHOST_USER_VERSION
;
2048 memcpy((char *)&msg
.payload
.u64
, mac_addr
, 6);
2049 msg
.hdr
.size
= sizeof(msg
.payload
.u64
);
2051 return vhost_user_write(dev
, &msg
, NULL
, 0);
2056 static bool vhost_user_can_merge(struct vhost_dev
*dev
,
2057 uint64_t start1
, uint64_t size1
,
2058 uint64_t start2
, uint64_t size2
)
2063 (void)vhost_user_get_mr_data(start1
, &offset
, &mfd
);
2064 (void)vhost_user_get_mr_data(start2
, &offset
, &rfd
);
2069 static int vhost_user_net_set_mtu(struct vhost_dev
*dev
, uint16_t mtu
)
2072 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
2073 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
2075 if (!(dev
->protocol_features
& (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU
))) {
2079 msg
.hdr
.request
= VHOST_USER_NET_SET_MTU
;
2080 msg
.payload
.u64
= mtu
;
2081 msg
.hdr
.size
= sizeof(msg
.payload
.u64
);
2082 msg
.hdr
.flags
= VHOST_USER_VERSION
;
2083 if (reply_supported
) {
2084 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
2087 if (vhost_user_write(dev
, &msg
, NULL
, 0) < 0) {
2091 /* If reply_ack supported, slave has to ack specified MTU is valid */
2092 if (reply_supported
) {
2093 return process_message_reply(dev
, &msg
);
2099 static int vhost_user_send_device_iotlb_msg(struct vhost_dev
*dev
,
2100 struct vhost_iotlb_msg
*imsg
)
2102 VhostUserMsg msg
= {
2103 .hdr
.request
= VHOST_USER_IOTLB_MSG
,
2104 .hdr
.size
= sizeof(msg
.payload
.iotlb
),
2105 .hdr
.flags
= VHOST_USER_VERSION
| VHOST_USER_NEED_REPLY_MASK
,
2106 .payload
.iotlb
= *imsg
,
2109 if (vhost_user_write(dev
, &msg
, NULL
, 0) < 0) {
2113 return process_message_reply(dev
, &msg
);
2117 static void vhost_user_set_iotlb_callback(struct vhost_dev
*dev
, int enabled
)
2119 /* No-op as the receive channel is not dedicated to IOTLB messages. */
2122 static int vhost_user_get_config(struct vhost_dev
*dev
, uint8_t *config
,
2123 uint32_t config_len
, Error
**errp
)
2125 VhostUserMsg msg
= {
2126 .hdr
.request
= VHOST_USER_GET_CONFIG
,
2127 .hdr
.flags
= VHOST_USER_VERSION
,
2128 .hdr
.size
= VHOST_USER_CONFIG_HDR_SIZE
+ config_len
,
2131 if (!virtio_has_feature(dev
->protocol_features
,
2132 VHOST_USER_PROTOCOL_F_CONFIG
)) {
2133 error_setg(errp
, "VHOST_USER_PROTOCOL_F_CONFIG not supported");
2137 assert(config_len
<= VHOST_USER_MAX_CONFIG_SIZE
);
2139 msg
.payload
.config
.offset
= 0;
2140 msg
.payload
.config
.size
= config_len
;
2141 if (vhost_user_write(dev
, &msg
, NULL
, 0) < 0) {
2145 if (vhost_user_read(dev
, &msg
) < 0) {
2149 if (msg
.hdr
.request
!= VHOST_USER_GET_CONFIG
) {
2151 "Received unexpected msg type. Expected %d received %d",
2152 VHOST_USER_GET_CONFIG
, msg
.hdr
.request
);
2156 if (msg
.hdr
.size
!= VHOST_USER_CONFIG_HDR_SIZE
+ config_len
) {
2157 error_setg(errp
, "Received bad msg size.");
2161 memcpy(config
, msg
.payload
.config
.region
, config_len
);
2166 static int vhost_user_set_config(struct vhost_dev
*dev
, const uint8_t *data
,
2167 uint32_t offset
, uint32_t size
, uint32_t flags
)
2170 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
2171 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
2173 VhostUserMsg msg
= {
2174 .hdr
.request
= VHOST_USER_SET_CONFIG
,
2175 .hdr
.flags
= VHOST_USER_VERSION
,
2176 .hdr
.size
= VHOST_USER_CONFIG_HDR_SIZE
+ size
,
2179 if (!virtio_has_feature(dev
->protocol_features
,
2180 VHOST_USER_PROTOCOL_F_CONFIG
)) {
2184 if (reply_supported
) {
2185 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
2188 if (size
> VHOST_USER_MAX_CONFIG_SIZE
) {
2192 msg
.payload
.config
.offset
= offset
,
2193 msg
.payload
.config
.size
= size
,
2194 msg
.payload
.config
.flags
= flags
,
2195 p
= msg
.payload
.config
.region
;
2196 memcpy(p
, data
, size
);
2198 if (vhost_user_write(dev
, &msg
, NULL
, 0) < 0) {
2202 if (reply_supported
) {
2203 return process_message_reply(dev
, &msg
);
2209 static int vhost_user_crypto_create_session(struct vhost_dev
*dev
,
2211 uint64_t *session_id
)
2213 bool crypto_session
= virtio_has_feature(dev
->protocol_features
,
2214 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION
);
2215 CryptoDevBackendSymSessionInfo
*sess_info
= session_info
;
2216 VhostUserMsg msg
= {
2217 .hdr
.request
= VHOST_USER_CREATE_CRYPTO_SESSION
,
2218 .hdr
.flags
= VHOST_USER_VERSION
,
2219 .hdr
.size
= sizeof(msg
.payload
.session
),
2222 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
2224 if (!crypto_session
) {
2225 error_report("vhost-user trying to send unhandled ioctl");
2229 memcpy(&msg
.payload
.session
.session_setup_data
, sess_info
,
2230 sizeof(CryptoDevBackendSymSessionInfo
));
2231 if (sess_info
->key_len
) {
2232 memcpy(&msg
.payload
.session
.key
, sess_info
->cipher_key
,
2233 sess_info
->key_len
);
2235 if (sess_info
->auth_key_len
> 0) {
2236 memcpy(&msg
.payload
.session
.auth_key
, sess_info
->auth_key
,
2237 sess_info
->auth_key_len
);
2239 if (vhost_user_write(dev
, &msg
, NULL
, 0) < 0) {
2240 error_report("vhost_user_write() return -1, create session failed");
2244 if (vhost_user_read(dev
, &msg
) < 0) {
2245 error_report("vhost_user_read() return -1, create session failed");
2249 if (msg
.hdr
.request
!= VHOST_USER_CREATE_CRYPTO_SESSION
) {
2250 error_report("Received unexpected msg type. Expected %d received %d",
2251 VHOST_USER_CREATE_CRYPTO_SESSION
, msg
.hdr
.request
);
2255 if (msg
.hdr
.size
!= sizeof(msg
.payload
.session
)) {
2256 error_report("Received bad msg size.");
2260 if (msg
.payload
.session
.session_id
< 0) {
2261 error_report("Bad session id: %" PRId64
"",
2262 msg
.payload
.session
.session_id
);
2265 *session_id
= msg
.payload
.session
.session_id
;
2271 vhost_user_crypto_close_session(struct vhost_dev
*dev
, uint64_t session_id
)
2273 bool crypto_session
= virtio_has_feature(dev
->protocol_features
,
2274 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION
);
2275 VhostUserMsg msg
= {
2276 .hdr
.request
= VHOST_USER_CLOSE_CRYPTO_SESSION
,
2277 .hdr
.flags
= VHOST_USER_VERSION
,
2278 .hdr
.size
= sizeof(msg
.payload
.u64
),
2280 msg
.payload
.u64
= session_id
;
2282 if (!crypto_session
) {
2283 error_report("vhost-user trying to send unhandled ioctl");
2287 if (vhost_user_write(dev
, &msg
, NULL
, 0) < 0) {
2288 error_report("vhost_user_write() return -1, close session failed");
2295 static bool vhost_user_mem_section_filter(struct vhost_dev
*dev
,
2296 MemoryRegionSection
*section
)
2300 result
= memory_region_get_fd(section
->mr
) >= 0;
2305 static int vhost_user_get_inflight_fd(struct vhost_dev
*dev
,
2306 uint16_t queue_size
,
2307 struct vhost_inflight
*inflight
)
2311 struct vhost_user
*u
= dev
->opaque
;
2312 CharBackend
*chr
= u
->user
->chr
;
2313 VhostUserMsg msg
= {
2314 .hdr
.request
= VHOST_USER_GET_INFLIGHT_FD
,
2315 .hdr
.flags
= VHOST_USER_VERSION
,
2316 .payload
.inflight
.num_queues
= dev
->nvqs
,
2317 .payload
.inflight
.queue_size
= queue_size
,
2318 .hdr
.size
= sizeof(msg
.payload
.inflight
),
2321 if (!virtio_has_feature(dev
->protocol_features
,
2322 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
2326 if (vhost_user_write(dev
, &msg
, NULL
, 0) < 0) {
2330 if (vhost_user_read(dev
, &msg
) < 0) {
2334 if (msg
.hdr
.request
!= VHOST_USER_GET_INFLIGHT_FD
) {
2335 error_report("Received unexpected msg type. "
2336 "Expected %d received %d",
2337 VHOST_USER_GET_INFLIGHT_FD
, msg
.hdr
.request
);
2341 if (msg
.hdr
.size
!= sizeof(msg
.payload
.inflight
)) {
2342 error_report("Received bad msg size.");
2346 if (!msg
.payload
.inflight
.mmap_size
) {
2350 fd
= qemu_chr_fe_get_msgfd(chr
);
2352 error_report("Failed to get mem fd");
2356 addr
= mmap(0, msg
.payload
.inflight
.mmap_size
, PROT_READ
| PROT_WRITE
,
2357 MAP_SHARED
, fd
, msg
.payload
.inflight
.mmap_offset
);
2359 if (addr
== MAP_FAILED
) {
2360 error_report("Failed to mmap mem fd");
2365 inflight
->addr
= addr
;
2367 inflight
->size
= msg
.payload
.inflight
.mmap_size
;
2368 inflight
->offset
= msg
.payload
.inflight
.mmap_offset
;
2369 inflight
->queue_size
= queue_size
;
2374 static int vhost_user_set_inflight_fd(struct vhost_dev
*dev
,
2375 struct vhost_inflight
*inflight
)
2377 VhostUserMsg msg
= {
2378 .hdr
.request
= VHOST_USER_SET_INFLIGHT_FD
,
2379 .hdr
.flags
= VHOST_USER_VERSION
,
2380 .payload
.inflight
.mmap_size
= inflight
->size
,
2381 .payload
.inflight
.mmap_offset
= inflight
->offset
,
2382 .payload
.inflight
.num_queues
= dev
->nvqs
,
2383 .payload
.inflight
.queue_size
= inflight
->queue_size
,
2384 .hdr
.size
= sizeof(msg
.payload
.inflight
),
2387 if (!virtio_has_feature(dev
->protocol_features
,
2388 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
2392 if (vhost_user_write(dev
, &msg
, &inflight
->fd
, 1) < 0) {
2399 bool vhost_user_init(VhostUserState
*user
, CharBackend
*chr
, Error
**errp
)
2402 error_setg(errp
, "Cannot initialize vhost-user state");
2406 user
->memory_slots
= 0;
2410 void vhost_user_cleanup(VhostUserState
*user
)
2418 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2419 if (user
->notifier
[i
].addr
) {
2420 object_unparent(OBJECT(&user
->notifier
[i
].mr
));
2421 munmap(user
->notifier
[i
].addr
, qemu_real_host_page_size
);
2422 user
->notifier
[i
].addr
= NULL
;
2428 const VhostOps user_ops
= {
2429 .backend_type
= VHOST_BACKEND_TYPE_USER
,
2430 .vhost_backend_init
= vhost_user_backend_init
,
2431 .vhost_backend_cleanup
= vhost_user_backend_cleanup
,
2432 .vhost_backend_memslots_limit
= vhost_user_memslots_limit
,
2433 .vhost_set_log_base
= vhost_user_set_log_base
,
2434 .vhost_set_mem_table
= vhost_user_set_mem_table
,
2435 .vhost_set_vring_addr
= vhost_user_set_vring_addr
,
2436 .vhost_set_vring_endian
= vhost_user_set_vring_endian
,
2437 .vhost_set_vring_num
= vhost_user_set_vring_num
,
2438 .vhost_set_vring_base
= vhost_user_set_vring_base
,
2439 .vhost_get_vring_base
= vhost_user_get_vring_base
,
2440 .vhost_set_vring_kick
= vhost_user_set_vring_kick
,
2441 .vhost_set_vring_call
= vhost_user_set_vring_call
,
2442 .vhost_set_features
= vhost_user_set_features
,
2443 .vhost_get_features
= vhost_user_get_features
,
2444 .vhost_set_owner
= vhost_user_set_owner
,
2445 .vhost_reset_device
= vhost_user_reset_device
,
2446 .vhost_get_vq_index
= vhost_user_get_vq_index
,
2447 .vhost_set_vring_enable
= vhost_user_set_vring_enable
,
2448 .vhost_requires_shm_log
= vhost_user_requires_shm_log
,
2449 .vhost_migration_done
= vhost_user_migration_done
,
2450 .vhost_backend_can_merge
= vhost_user_can_merge
,
2451 .vhost_net_set_mtu
= vhost_user_net_set_mtu
,
2452 .vhost_set_iotlb_callback
= vhost_user_set_iotlb_callback
,
2453 .vhost_send_device_iotlb_msg
= vhost_user_send_device_iotlb_msg
,
2454 .vhost_get_config
= vhost_user_get_config
,
2455 .vhost_set_config
= vhost_user_set_config
,
2456 .vhost_crypto_create_session
= vhost_user_crypto_create_session
,
2457 .vhost_crypto_close_session
= vhost_user_crypto_close_session
,
2458 .vhost_backend_mem_section_filter
= vhost_user_mem_section_filter
,
2459 .vhost_get_inflight_fd
= vhost_user_get_inflight_fd
,
2460 .vhost_set_inflight_fd
= vhost_user_set_inflight_fd
,