4 * Copyright (C) 2020 Red Hat, Inc.
7 * David Hildenbrand <david@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2.
10 * See the COPYING file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "qemu-common.h"
16 #include "qemu/cutils.h"
17 #include "qemu/error-report.h"
18 #include "qemu/units.h"
19 #include "sysemu/numa.h"
20 #include "sysemu/sysemu.h"
21 #include "sysemu/reset.h"
22 #include "hw/virtio/virtio.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "hw/virtio/virtio-access.h"
25 #include "hw/virtio/virtio-mem.h"
26 #include "qapi/error.h"
27 #include "qapi/visitor.h"
28 #include "exec/ram_addr.h"
29 #include "migration/misc.h"
30 #include "hw/boards.h"
31 #include "hw/qdev-properties.h"
32 #include CONFIG_DEVICES
36 * Let's not allow blocks smaller than 1 MiB, for example, to keep the tracking
39 #define VIRTIO_MEM_MIN_BLOCK_SIZE ((uint32_t)(1 * MiB))
41 #if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) || \
42 defined(__powerpc64__)
43 #define VIRTIO_MEM_DEFAULT_THP_SIZE ((uint32_t)(2 * MiB))
45 /* fallback to 1 MiB (e.g., the THP size on s390x) */
46 #define VIRTIO_MEM_DEFAULT_THP_SIZE VIRTIO_MEM_MIN_BLOCK_SIZE
50 * We want to have a reasonable default block size such that
51 * 1. We avoid splitting THPs when unplugging memory, which degrades
53 * 2. We avoid placing THPs for plugged blocks that also cover unplugged
56 * The actual THP size might differ between Linux kernels, so we try to probe
57 * it. In the future (if we ever run into issues regarding 2.), we might want
58 * to disable THP in case we fail to properly probe the THP size, or if the
59 * block size is configured smaller than the THP size.
61 static uint32_t thp_size
;
63 #define HPAGE_PMD_SIZE_PATH "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size"
64 static uint32_t virtio_mem_thp_size(void)
66 gchar
*content
= NULL
;
75 * Try to probe the actual THP size, fallback to (sane but eventually
76 * incorrect) default sizes.
78 if (g_file_get_contents(HPAGE_PMD_SIZE_PATH
, &content
, NULL
, NULL
) &&
79 !qemu_strtou64(content
, &endptr
, 0, &tmp
) &&
80 (!endptr
|| *endptr
== '\n')) {
82 * Sanity-check the value, if it's too big (e.g., aarch64 with 64k base
83 * pages) or weird, fallback to something smaller.
85 if (!tmp
|| !is_power_of_2(tmp
) || tmp
> 16 * MiB
) {
86 warn_report("Read unsupported THP size: %" PRIx64
, tmp
);
93 thp_size
= VIRTIO_MEM_DEFAULT_THP_SIZE
;
94 warn_report("Could not detect THP size, falling back to %" PRIx64
95 " MiB.", thp_size
/ MiB
);
102 static uint64_t virtio_mem_default_block_size(RAMBlock
*rb
)
104 const uint64_t page_size
= qemu_ram_pagesize(rb
);
106 /* We can have hugetlbfs with a page size smaller than the THP size. */
107 if (page_size
== qemu_real_host_page_size
) {
108 return MAX(page_size
, virtio_mem_thp_size());
110 return MAX(page_size
, VIRTIO_MEM_MIN_BLOCK_SIZE
);
114 * Size the usable region bigger than the requested size if possible. Esp.
115 * Linux guests will only add (aligned) memory blocks in case they fully
116 * fit into the usable region, but plug+online only a subset of the pages.
117 * The memory block size corresponds mostly to the section size.
119 * This allows e.g., to add 20MB with a section size of 128MB on x86_64, and
120 * a section size of 1GB on arm64 (as long as the start address is properly
121 * aligned, similar to ordinary DIMMs).
123 * We can change this at any time and maybe even make it configurable if
124 * necessary (as the section size can change). But it's more likely that the
125 * section size will rather get smaller and not bigger over time.
127 #if defined(TARGET_X86_64) || defined(TARGET_I386)
128 #define VIRTIO_MEM_USABLE_EXTENT (2 * (128 * MiB))
130 #error VIRTIO_MEM_USABLE_EXTENT not defined
133 static bool virtio_mem_is_busy(void)
136 * Postcopy cannot handle concurrent discards and we don't want to migrate
137 * pages on-demand with stale content when plugging new blocks.
139 * For precopy, we don't want unplugged blocks in our migration stream, and
140 * when plugging new blocks, the page content might differ between source
141 * and destination (observable by the guest when not initializing pages
142 * after plugging them) until we're running on the destination (as we didn't
143 * migrate these blocks when they were unplugged).
145 return migration_in_incoming_postcopy() || !migration_is_idle();
148 typedef int (*virtio_mem_range_cb
)(const VirtIOMEM
*vmem
, void *arg
,
149 uint64_t offset
, uint64_t size
);
151 static int virtio_mem_for_each_unplugged_range(const VirtIOMEM
*vmem
, void *arg
,
152 virtio_mem_range_cb cb
)
154 unsigned long first_zero_bit
, last_zero_bit
;
155 uint64_t offset
, size
;
158 first_zero_bit
= find_first_zero_bit(vmem
->bitmap
, vmem
->bitmap_size
);
159 while (first_zero_bit
< vmem
->bitmap_size
) {
160 offset
= first_zero_bit
* vmem
->block_size
;
161 last_zero_bit
= find_next_bit(vmem
->bitmap
, vmem
->bitmap_size
,
162 first_zero_bit
+ 1) - 1;
163 size
= (last_zero_bit
- first_zero_bit
+ 1) * vmem
->block_size
;
165 ret
= cb(vmem
, arg
, offset
, size
);
169 first_zero_bit
= find_next_zero_bit(vmem
->bitmap
, vmem
->bitmap_size
,
176 * Adjust the memory section to cover the intersection with the given range.
178 * Returns false if the intersection is empty, otherwise returns true.
180 static bool virito_mem_intersect_memory_section(MemoryRegionSection
*s
,
181 uint64_t offset
, uint64_t size
)
183 uint64_t start
= MAX(s
->offset_within_region
, offset
);
184 uint64_t end
= MIN(s
->offset_within_region
+ int128_get64(s
->size
),
191 s
->offset_within_address_space
+= start
- s
->offset_within_region
;
192 s
->offset_within_region
= start
;
193 s
->size
= int128_make64(end
- start
);
197 typedef int (*virtio_mem_section_cb
)(MemoryRegionSection
*s
, void *arg
);
199 static int virtio_mem_for_each_plugged_section(const VirtIOMEM
*vmem
,
200 MemoryRegionSection
*s
,
202 virtio_mem_section_cb cb
)
204 unsigned long first_bit
, last_bit
;
205 uint64_t offset
, size
;
208 first_bit
= s
->offset_within_region
/ vmem
->bitmap_size
;
209 first_bit
= find_next_bit(vmem
->bitmap
, vmem
->bitmap_size
, first_bit
);
210 while (first_bit
< vmem
->bitmap_size
) {
211 MemoryRegionSection tmp
= *s
;
213 offset
= first_bit
* vmem
->block_size
;
214 last_bit
= find_next_zero_bit(vmem
->bitmap
, vmem
->bitmap_size
,
216 size
= (last_bit
- first_bit
+ 1) * vmem
->block_size
;
218 if (!virito_mem_intersect_memory_section(&tmp
, offset
, size
)) {
225 first_bit
= find_next_bit(vmem
->bitmap
, vmem
->bitmap_size
,
231 static int virtio_mem_notify_populate_cb(MemoryRegionSection
*s
, void *arg
)
233 RamDiscardListener
*rdl
= arg
;
235 return rdl
->notify_populate(rdl
, s
);
238 static int virtio_mem_notify_discard_cb(MemoryRegionSection
*s
, void *arg
)
240 RamDiscardListener
*rdl
= arg
;
242 rdl
->notify_discard(rdl
, s
);
246 static void virtio_mem_notify_unplug(VirtIOMEM
*vmem
, uint64_t offset
,
249 RamDiscardListener
*rdl
;
251 QLIST_FOREACH(rdl
, &vmem
->rdl_list
, next
) {
252 MemoryRegionSection tmp
= *rdl
->section
;
254 if (!virito_mem_intersect_memory_section(&tmp
, offset
, size
)) {
257 rdl
->notify_discard(rdl
, &tmp
);
261 static int virtio_mem_notify_plug(VirtIOMEM
*vmem
, uint64_t offset
,
264 RamDiscardListener
*rdl
, *rdl2
;
267 QLIST_FOREACH(rdl
, &vmem
->rdl_list
, next
) {
268 MemoryRegionSection tmp
= *rdl
->section
;
270 if (!virito_mem_intersect_memory_section(&tmp
, offset
, size
)) {
273 ret
= rdl
->notify_populate(rdl
, &tmp
);
280 /* Notify all already-notified listeners. */
281 QLIST_FOREACH(rdl2
, &vmem
->rdl_list
, next
) {
282 MemoryRegionSection tmp
= *rdl
->section
;
287 if (!virito_mem_intersect_memory_section(&tmp
, offset
, size
)) {
290 rdl2
->notify_discard(rdl2
, &tmp
);
296 static void virtio_mem_notify_unplug_all(VirtIOMEM
*vmem
)
298 RamDiscardListener
*rdl
;
304 QLIST_FOREACH(rdl
, &vmem
->rdl_list
, next
) {
305 if (rdl
->double_discard_supported
) {
306 rdl
->notify_discard(rdl
, rdl
->section
);
308 virtio_mem_for_each_plugged_section(vmem
, rdl
->section
, rdl
,
309 virtio_mem_notify_discard_cb
);
314 static bool virtio_mem_test_bitmap(const VirtIOMEM
*vmem
, uint64_t start_gpa
,
315 uint64_t size
, bool plugged
)
317 const unsigned long first_bit
= (start_gpa
- vmem
->addr
) / vmem
->block_size
;
318 const unsigned long last_bit
= first_bit
+ (size
/ vmem
->block_size
) - 1;
319 unsigned long found_bit
;
321 /* We fake a shorter bitmap to avoid searching too far. */
323 found_bit
= find_next_zero_bit(vmem
->bitmap
, last_bit
+ 1, first_bit
);
325 found_bit
= find_next_bit(vmem
->bitmap
, last_bit
+ 1, first_bit
);
327 return found_bit
> last_bit
;
330 static void virtio_mem_set_bitmap(VirtIOMEM
*vmem
, uint64_t start_gpa
,
331 uint64_t size
, bool plugged
)
333 const unsigned long bit
= (start_gpa
- vmem
->addr
) / vmem
->block_size
;
334 const unsigned long nbits
= size
/ vmem
->block_size
;
337 bitmap_set(vmem
->bitmap
, bit
, nbits
);
339 bitmap_clear(vmem
->bitmap
, bit
, nbits
);
343 static void virtio_mem_send_response(VirtIOMEM
*vmem
, VirtQueueElement
*elem
,
344 struct virtio_mem_resp
*resp
)
346 VirtIODevice
*vdev
= VIRTIO_DEVICE(vmem
);
347 VirtQueue
*vq
= vmem
->vq
;
349 trace_virtio_mem_send_response(le16_to_cpu(resp
->type
));
350 iov_from_buf(elem
->in_sg
, elem
->in_num
, 0, resp
, sizeof(*resp
));
352 virtqueue_push(vq
, elem
, sizeof(*resp
));
353 virtio_notify(vdev
, vq
);
356 static void virtio_mem_send_response_simple(VirtIOMEM
*vmem
,
357 VirtQueueElement
*elem
,
360 struct virtio_mem_resp resp
= {
361 .type
= cpu_to_le16(type
),
364 virtio_mem_send_response(vmem
, elem
, &resp
);
367 static bool virtio_mem_valid_range(const VirtIOMEM
*vmem
, uint64_t gpa
,
370 if (!QEMU_IS_ALIGNED(gpa
, vmem
->block_size
)) {
373 if (gpa
+ size
< gpa
|| !size
) {
376 if (gpa
< vmem
->addr
|| gpa
>= vmem
->addr
+ vmem
->usable_region_size
) {
379 if (gpa
+ size
> vmem
->addr
+ vmem
->usable_region_size
) {
385 static int virtio_mem_set_block_state(VirtIOMEM
*vmem
, uint64_t start_gpa
,
386 uint64_t size
, bool plug
)
388 const uint64_t offset
= start_gpa
- vmem
->addr
;
389 RAMBlock
*rb
= vmem
->memdev
->mr
.ram_block
;
391 if (virtio_mem_is_busy()) {
396 if (ram_block_discard_range(rb
, offset
, size
)) {
399 virtio_mem_notify_unplug(vmem
, offset
, size
);
400 } else if (virtio_mem_notify_plug(vmem
, offset
, size
)) {
401 /* Could be a mapping attempt resulted in memory getting populated. */
402 ram_block_discard_range(vmem
->memdev
->mr
.ram_block
, offset
, size
);
405 virtio_mem_set_bitmap(vmem
, start_gpa
, size
, plug
);
409 static int virtio_mem_state_change_request(VirtIOMEM
*vmem
, uint64_t gpa
,
410 uint16_t nb_blocks
, bool plug
)
412 const uint64_t size
= nb_blocks
* vmem
->block_size
;
415 if (!virtio_mem_valid_range(vmem
, gpa
, size
)) {
416 return VIRTIO_MEM_RESP_ERROR
;
419 if (plug
&& (vmem
->size
+ size
> vmem
->requested_size
)) {
420 return VIRTIO_MEM_RESP_NACK
;
423 /* test if really all blocks are in the opposite state */
424 if (!virtio_mem_test_bitmap(vmem
, gpa
, size
, !plug
)) {
425 return VIRTIO_MEM_RESP_ERROR
;
428 ret
= virtio_mem_set_block_state(vmem
, gpa
, size
, plug
);
430 return VIRTIO_MEM_RESP_BUSY
;
437 notifier_list_notify(&vmem
->size_change_notifiers
, &vmem
->size
);
438 return VIRTIO_MEM_RESP_ACK
;
441 static void virtio_mem_plug_request(VirtIOMEM
*vmem
, VirtQueueElement
*elem
,
442 struct virtio_mem_req
*req
)
444 const uint64_t gpa
= le64_to_cpu(req
->u
.plug
.addr
);
445 const uint16_t nb_blocks
= le16_to_cpu(req
->u
.plug
.nb_blocks
);
448 trace_virtio_mem_plug_request(gpa
, nb_blocks
);
449 type
= virtio_mem_state_change_request(vmem
, gpa
, nb_blocks
, true);
450 virtio_mem_send_response_simple(vmem
, elem
, type
);
453 static void virtio_mem_unplug_request(VirtIOMEM
*vmem
, VirtQueueElement
*elem
,
454 struct virtio_mem_req
*req
)
456 const uint64_t gpa
= le64_to_cpu(req
->u
.unplug
.addr
);
457 const uint16_t nb_blocks
= le16_to_cpu(req
->u
.unplug
.nb_blocks
);
460 trace_virtio_mem_unplug_request(gpa
, nb_blocks
);
461 type
= virtio_mem_state_change_request(vmem
, gpa
, nb_blocks
, false);
462 virtio_mem_send_response_simple(vmem
, elem
, type
);
465 static void virtio_mem_resize_usable_region(VirtIOMEM
*vmem
,
466 uint64_t requested_size
,
469 uint64_t newsize
= MIN(memory_region_size(&vmem
->memdev
->mr
),
470 requested_size
+ VIRTIO_MEM_USABLE_EXTENT
);
472 /* The usable region size always has to be multiples of the block size. */
473 newsize
= QEMU_ALIGN_UP(newsize
, vmem
->block_size
);
475 if (!requested_size
) {
479 if (newsize
< vmem
->usable_region_size
&& !can_shrink
) {
483 trace_virtio_mem_resized_usable_region(vmem
->usable_region_size
, newsize
);
484 vmem
->usable_region_size
= newsize
;
487 static int virtio_mem_unplug_all(VirtIOMEM
*vmem
)
489 RAMBlock
*rb
= vmem
->memdev
->mr
.ram_block
;
491 if (virtio_mem_is_busy()) {
495 if (ram_block_discard_range(rb
, 0, qemu_ram_get_used_length(rb
))) {
498 virtio_mem_notify_unplug_all(vmem
);
500 bitmap_clear(vmem
->bitmap
, 0, vmem
->bitmap_size
);
503 notifier_list_notify(&vmem
->size_change_notifiers
, &vmem
->size
);
505 trace_virtio_mem_unplugged_all();
506 virtio_mem_resize_usable_region(vmem
, vmem
->requested_size
, true);
510 static void virtio_mem_unplug_all_request(VirtIOMEM
*vmem
,
511 VirtQueueElement
*elem
)
513 trace_virtio_mem_unplug_all_request();
514 if (virtio_mem_unplug_all(vmem
)) {
515 virtio_mem_send_response_simple(vmem
, elem
, VIRTIO_MEM_RESP_BUSY
);
517 virtio_mem_send_response_simple(vmem
, elem
, VIRTIO_MEM_RESP_ACK
);
521 static void virtio_mem_state_request(VirtIOMEM
*vmem
, VirtQueueElement
*elem
,
522 struct virtio_mem_req
*req
)
524 const uint16_t nb_blocks
= le16_to_cpu(req
->u
.state
.nb_blocks
);
525 const uint64_t gpa
= le64_to_cpu(req
->u
.state
.addr
);
526 const uint64_t size
= nb_blocks
* vmem
->block_size
;
527 struct virtio_mem_resp resp
= {
528 .type
= cpu_to_le16(VIRTIO_MEM_RESP_ACK
),
531 trace_virtio_mem_state_request(gpa
, nb_blocks
);
532 if (!virtio_mem_valid_range(vmem
, gpa
, size
)) {
533 virtio_mem_send_response_simple(vmem
, elem
, VIRTIO_MEM_RESP_ERROR
);
537 if (virtio_mem_test_bitmap(vmem
, gpa
, size
, true)) {
538 resp
.u
.state
.state
= cpu_to_le16(VIRTIO_MEM_STATE_PLUGGED
);
539 } else if (virtio_mem_test_bitmap(vmem
, gpa
, size
, false)) {
540 resp
.u
.state
.state
= cpu_to_le16(VIRTIO_MEM_STATE_UNPLUGGED
);
542 resp
.u
.state
.state
= cpu_to_le16(VIRTIO_MEM_STATE_MIXED
);
544 trace_virtio_mem_state_response(le16_to_cpu(resp
.u
.state
.state
));
545 virtio_mem_send_response(vmem
, elem
, &resp
);
548 static void virtio_mem_handle_request(VirtIODevice
*vdev
, VirtQueue
*vq
)
550 const int len
= sizeof(struct virtio_mem_req
);
551 VirtIOMEM
*vmem
= VIRTIO_MEM(vdev
);
552 VirtQueueElement
*elem
;
553 struct virtio_mem_req req
;
557 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
562 if (iov_to_buf(elem
->out_sg
, elem
->out_num
, 0, &req
, len
) < len
) {
563 virtio_error(vdev
, "virtio-mem protocol violation: invalid request"
565 virtqueue_detach_element(vq
, elem
, 0);
570 if (iov_size(elem
->in_sg
, elem
->in_num
) <
571 sizeof(struct virtio_mem_resp
)) {
572 virtio_error(vdev
, "virtio-mem protocol violation: not enough space"
573 " for response: %zu",
574 iov_size(elem
->in_sg
, elem
->in_num
));
575 virtqueue_detach_element(vq
, elem
, 0);
580 type
= le16_to_cpu(req
.type
);
582 case VIRTIO_MEM_REQ_PLUG
:
583 virtio_mem_plug_request(vmem
, elem
, &req
);
585 case VIRTIO_MEM_REQ_UNPLUG
:
586 virtio_mem_unplug_request(vmem
, elem
, &req
);
588 case VIRTIO_MEM_REQ_UNPLUG_ALL
:
589 virtio_mem_unplug_all_request(vmem
, elem
);
591 case VIRTIO_MEM_REQ_STATE
:
592 virtio_mem_state_request(vmem
, elem
, &req
);
595 virtio_error(vdev
, "virtio-mem protocol violation: unknown request"
597 virtqueue_detach_element(vq
, elem
, 0);
606 static void virtio_mem_get_config(VirtIODevice
*vdev
, uint8_t *config_data
)
608 VirtIOMEM
*vmem
= VIRTIO_MEM(vdev
);
609 struct virtio_mem_config
*config
= (void *) config_data
;
611 config
->block_size
= cpu_to_le64(vmem
->block_size
);
612 config
->node_id
= cpu_to_le16(vmem
->node
);
613 config
->requested_size
= cpu_to_le64(vmem
->requested_size
);
614 config
->plugged_size
= cpu_to_le64(vmem
->size
);
615 config
->addr
= cpu_to_le64(vmem
->addr
);
616 config
->region_size
= cpu_to_le64(memory_region_size(&vmem
->memdev
->mr
));
617 config
->usable_region_size
= cpu_to_le64(vmem
->usable_region_size
);
620 static uint64_t virtio_mem_get_features(VirtIODevice
*vdev
, uint64_t features
,
623 MachineState
*ms
= MACHINE(qdev_get_machine());
625 if (ms
->numa_state
) {
626 #if defined(CONFIG_ACPI)
627 virtio_add_feature(&features
, VIRTIO_MEM_F_ACPI_PXM
);
633 static void virtio_mem_system_reset(void *opaque
)
635 VirtIOMEM
*vmem
= VIRTIO_MEM(opaque
);
638 * During usual resets, we will unplug all memory and shrink the usable
639 * region size. This is, however, not possible in all scenarios. Then,
640 * the guest has to deal with this manually (VIRTIO_MEM_REQ_UNPLUG_ALL).
642 virtio_mem_unplug_all(vmem
);
645 static void virtio_mem_device_realize(DeviceState
*dev
, Error
**errp
)
647 MachineState
*ms
= MACHINE(qdev_get_machine());
648 int nb_numa_nodes
= ms
->numa_state
? ms
->numa_state
->num_nodes
: 0;
649 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
650 VirtIOMEM
*vmem
= VIRTIO_MEM(dev
);
656 error_setg(errp
, "'%s' property is not set", VIRTIO_MEM_MEMDEV_PROP
);
658 } else if (host_memory_backend_is_mapped(vmem
->memdev
)) {
659 error_setg(errp
, "'%s' property specifies a busy memdev: %s",
660 VIRTIO_MEM_MEMDEV_PROP
,
661 object_get_canonical_path_component(OBJECT(vmem
->memdev
)));
663 } else if (!memory_region_is_ram(&vmem
->memdev
->mr
) ||
664 memory_region_is_rom(&vmem
->memdev
->mr
) ||
665 !vmem
->memdev
->mr
.ram_block
) {
666 error_setg(errp
, "'%s' property specifies an unsupported memdev",
667 VIRTIO_MEM_MEMDEV_PROP
);
671 if ((nb_numa_nodes
&& vmem
->node
>= nb_numa_nodes
) ||
672 (!nb_numa_nodes
&& vmem
->node
)) {
673 error_setg(errp
, "'%s' property has value '%" PRIu32
"', which exceeds"
674 "the number of numa nodes: %d", VIRTIO_MEM_NODE_PROP
,
675 vmem
->node
, nb_numa_nodes
? nb_numa_nodes
: 1);
680 error_setg(errp
, "Incompatible with mlock");
684 rb
= vmem
->memdev
->mr
.ram_block
;
685 page_size
= qemu_ram_pagesize(rb
);
688 * If the block size wasn't configured by the user, use a sane default. This
689 * allows using hugetlbfs backends of any page size without manual
692 if (!vmem
->block_size
) {
693 vmem
->block_size
= virtio_mem_default_block_size(rb
);
696 if (vmem
->block_size
< page_size
) {
697 error_setg(errp
, "'%s' property has to be at least the page size (0x%"
698 PRIx64
")", VIRTIO_MEM_BLOCK_SIZE_PROP
, page_size
);
700 } else if (vmem
->block_size
< virtio_mem_default_block_size(rb
)) {
701 warn_report("'%s' property is smaller than the default block size (%"
702 PRIx64
" MiB)", VIRTIO_MEM_BLOCK_SIZE_PROP
,
703 virtio_mem_default_block_size(rb
) / MiB
);
704 } else if (!QEMU_IS_ALIGNED(vmem
->requested_size
, vmem
->block_size
)) {
705 error_setg(errp
, "'%s' property has to be multiples of '%s' (0x%" PRIx64
706 ")", VIRTIO_MEM_REQUESTED_SIZE_PROP
,
707 VIRTIO_MEM_BLOCK_SIZE_PROP
, vmem
->block_size
);
709 } else if (!QEMU_IS_ALIGNED(vmem
->addr
, vmem
->block_size
)) {
710 error_setg(errp
, "'%s' property has to be multiples of '%s' (0x%" PRIx64
711 ")", VIRTIO_MEM_ADDR_PROP
, VIRTIO_MEM_BLOCK_SIZE_PROP
,
714 } else if (!QEMU_IS_ALIGNED(memory_region_size(&vmem
->memdev
->mr
),
716 error_setg(errp
, "'%s' property memdev size has to be multiples of"
717 "'%s' (0x%" PRIx64
")", VIRTIO_MEM_MEMDEV_PROP
,
718 VIRTIO_MEM_BLOCK_SIZE_PROP
, vmem
->block_size
);
722 if (ram_block_coordinated_discard_require(true)) {
723 error_setg(errp
, "Discarding RAM is disabled");
727 ret
= ram_block_discard_range(rb
, 0, qemu_ram_get_used_length(rb
));
729 error_setg_errno(errp
, -ret
, "Unexpected error discarding RAM");
730 ram_block_coordinated_discard_require(false);
734 virtio_mem_resize_usable_region(vmem
, vmem
->requested_size
, true);
736 vmem
->bitmap_size
= memory_region_size(&vmem
->memdev
->mr
) /
738 vmem
->bitmap
= bitmap_new(vmem
->bitmap_size
);
740 virtio_init(vdev
, TYPE_VIRTIO_MEM
, VIRTIO_ID_MEM
,
741 sizeof(struct virtio_mem_config
));
742 vmem
->vq
= virtio_add_queue(vdev
, 128, virtio_mem_handle_request
);
744 host_memory_backend_set_mapped(vmem
->memdev
, true);
745 vmstate_register_ram(&vmem
->memdev
->mr
, DEVICE(vmem
));
746 qemu_register_reset(virtio_mem_system_reset
, vmem
);
747 precopy_add_notifier(&vmem
->precopy_notifier
);
750 * Set ourselves as RamDiscardManager before the plug handler maps the
751 * memory region and exposes it via an address space.
753 memory_region_set_ram_discard_manager(&vmem
->memdev
->mr
,
754 RAM_DISCARD_MANAGER(vmem
));
757 static void virtio_mem_device_unrealize(DeviceState
*dev
)
759 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
760 VirtIOMEM
*vmem
= VIRTIO_MEM(dev
);
763 * The unplug handler unmapped the memory region, it cannot be
764 * found via an address space anymore. Unset ourselves.
766 memory_region_set_ram_discard_manager(&vmem
->memdev
->mr
, NULL
);
767 precopy_remove_notifier(&vmem
->precopy_notifier
);
768 qemu_unregister_reset(virtio_mem_system_reset
, vmem
);
769 vmstate_unregister_ram(&vmem
->memdev
->mr
, DEVICE(vmem
));
770 host_memory_backend_set_mapped(vmem
->memdev
, false);
771 virtio_del_queue(vdev
, 0);
772 virtio_cleanup(vdev
);
773 g_free(vmem
->bitmap
);
774 ram_block_coordinated_discard_require(false);
777 static int virtio_mem_discard_range_cb(const VirtIOMEM
*vmem
, void *arg
,
778 uint64_t offset
, uint64_t size
)
780 RAMBlock
*rb
= vmem
->memdev
->mr
.ram_block
;
782 return ram_block_discard_range(rb
, offset
, size
) ? -EINVAL
: 0;
785 static int virtio_mem_restore_unplugged(VirtIOMEM
*vmem
)
787 /* Make sure all memory is really discarded after migration. */
788 return virtio_mem_for_each_unplugged_range(vmem
, NULL
,
789 virtio_mem_discard_range_cb
);
792 static int virtio_mem_post_load(void *opaque
, int version_id
)
794 VirtIOMEM
*vmem
= VIRTIO_MEM(opaque
);
795 RamDiscardListener
*rdl
;
799 * We started out with all memory discarded and our memory region is mapped
800 * into an address space. Replay, now that we updated the bitmap.
802 QLIST_FOREACH(rdl
, &vmem
->rdl_list
, next
) {
803 ret
= virtio_mem_for_each_plugged_section(vmem
, rdl
->section
, rdl
,
804 virtio_mem_notify_populate_cb
);
810 if (migration_in_incoming_postcopy()) {
814 return virtio_mem_restore_unplugged(vmem
);
817 typedef struct VirtIOMEMMigSanityChecks
{
820 uint64_t region_size
;
823 } VirtIOMEMMigSanityChecks
;
825 static int virtio_mem_mig_sanity_checks_pre_save(void *opaque
)
827 VirtIOMEMMigSanityChecks
*tmp
= opaque
;
828 VirtIOMEM
*vmem
= tmp
->parent
;
830 tmp
->addr
= vmem
->addr
;
831 tmp
->region_size
= memory_region_size(&vmem
->memdev
->mr
);
832 tmp
->block_size
= vmem
->block_size
;
833 tmp
->node
= vmem
->node
;
837 static int virtio_mem_mig_sanity_checks_post_load(void *opaque
, int version_id
)
839 VirtIOMEMMigSanityChecks
*tmp
= opaque
;
840 VirtIOMEM
*vmem
= tmp
->parent
;
841 const uint64_t new_region_size
= memory_region_size(&vmem
->memdev
->mr
);
843 if (tmp
->addr
!= vmem
->addr
) {
844 error_report("Property '%s' changed from 0x%" PRIx64
" to 0x%" PRIx64
,
845 VIRTIO_MEM_ADDR_PROP
, tmp
->addr
, vmem
->addr
);
849 * Note: Preparation for resizeable memory regions. The maximum size
850 * of the memory region must not change during migration.
852 if (tmp
->region_size
!= new_region_size
) {
853 error_report("Property '%s' size changed from 0x%" PRIx64
" to 0x%"
854 PRIx64
, VIRTIO_MEM_MEMDEV_PROP
, tmp
->region_size
,
858 if (tmp
->block_size
!= vmem
->block_size
) {
859 error_report("Property '%s' changed from 0x%" PRIx64
" to 0x%" PRIx64
,
860 VIRTIO_MEM_BLOCK_SIZE_PROP
, tmp
->block_size
,
864 if (tmp
->node
!= vmem
->node
) {
865 error_report("Property '%s' changed from %" PRIu32
" to %" PRIu32
,
866 VIRTIO_MEM_NODE_PROP
, tmp
->node
, vmem
->node
);
872 static const VMStateDescription vmstate_virtio_mem_sanity_checks
= {
873 .name
= "virtio-mem-device/sanity-checks",
874 .pre_save
= virtio_mem_mig_sanity_checks_pre_save
,
875 .post_load
= virtio_mem_mig_sanity_checks_post_load
,
876 .fields
= (VMStateField
[]) {
877 VMSTATE_UINT64(addr
, VirtIOMEMMigSanityChecks
),
878 VMSTATE_UINT64(region_size
, VirtIOMEMMigSanityChecks
),
879 VMSTATE_UINT64(block_size
, VirtIOMEMMigSanityChecks
),
880 VMSTATE_UINT32(node
, VirtIOMEMMigSanityChecks
),
881 VMSTATE_END_OF_LIST(),
885 static const VMStateDescription vmstate_virtio_mem_device
= {
886 .name
= "virtio-mem-device",
887 .minimum_version_id
= 1,
889 .priority
= MIG_PRI_VIRTIO_MEM
,
890 .post_load
= virtio_mem_post_load
,
891 .fields
= (VMStateField
[]) {
892 VMSTATE_WITH_TMP(VirtIOMEM
, VirtIOMEMMigSanityChecks
,
893 vmstate_virtio_mem_sanity_checks
),
894 VMSTATE_UINT64(usable_region_size
, VirtIOMEM
),
895 VMSTATE_UINT64(size
, VirtIOMEM
),
896 VMSTATE_UINT64(requested_size
, VirtIOMEM
),
897 VMSTATE_BITMAP(bitmap
, VirtIOMEM
, 0, bitmap_size
),
898 VMSTATE_END_OF_LIST()
902 static const VMStateDescription vmstate_virtio_mem
= {
903 .name
= "virtio-mem",
904 .minimum_version_id
= 1,
906 .fields
= (VMStateField
[]) {
907 VMSTATE_VIRTIO_DEVICE
,
908 VMSTATE_END_OF_LIST()
912 static void virtio_mem_fill_device_info(const VirtIOMEM
*vmem
,
913 VirtioMEMDeviceInfo
*vi
)
915 vi
->memaddr
= vmem
->addr
;
916 vi
->node
= vmem
->node
;
917 vi
->requested_size
= vmem
->requested_size
;
918 vi
->size
= vmem
->size
;
919 vi
->max_size
= memory_region_size(&vmem
->memdev
->mr
);
920 vi
->block_size
= vmem
->block_size
;
921 vi
->memdev
= object_get_canonical_path(OBJECT(vmem
->memdev
));
924 static MemoryRegion
*virtio_mem_get_memory_region(VirtIOMEM
*vmem
, Error
**errp
)
927 error_setg(errp
, "'%s' property must be set", VIRTIO_MEM_MEMDEV_PROP
);
931 return &vmem
->memdev
->mr
;
934 static void virtio_mem_add_size_change_notifier(VirtIOMEM
*vmem
,
937 notifier_list_add(&vmem
->size_change_notifiers
, notifier
);
940 static void virtio_mem_remove_size_change_notifier(VirtIOMEM
*vmem
,
943 notifier_remove(notifier
);
946 static void virtio_mem_get_size(Object
*obj
, Visitor
*v
, const char *name
,
947 void *opaque
, Error
**errp
)
949 const VirtIOMEM
*vmem
= VIRTIO_MEM(obj
);
950 uint64_t value
= vmem
->size
;
952 visit_type_size(v
, name
, &value
, errp
);
955 static void virtio_mem_get_requested_size(Object
*obj
, Visitor
*v
,
956 const char *name
, void *opaque
,
959 const VirtIOMEM
*vmem
= VIRTIO_MEM(obj
);
960 uint64_t value
= vmem
->requested_size
;
962 visit_type_size(v
, name
, &value
, errp
);
965 static void virtio_mem_set_requested_size(Object
*obj
, Visitor
*v
,
966 const char *name
, void *opaque
,
969 VirtIOMEM
*vmem
= VIRTIO_MEM(obj
);
973 visit_type_size(v
, name
, &value
, &err
);
975 error_propagate(errp
, err
);
980 * The block size and memory backend are not fixed until the device was
981 * realized. realize() will verify these properties then.
983 if (DEVICE(obj
)->realized
) {
984 if (!QEMU_IS_ALIGNED(value
, vmem
->block_size
)) {
985 error_setg(errp
, "'%s' has to be multiples of '%s' (0x%" PRIx64
986 ")", name
, VIRTIO_MEM_BLOCK_SIZE_PROP
,
989 } else if (value
> memory_region_size(&vmem
->memdev
->mr
)) {
990 error_setg(errp
, "'%s' cannot exceed the memory backend size"
991 "(0x%" PRIx64
")", name
,
992 memory_region_size(&vmem
->memdev
->mr
));
996 if (value
!= vmem
->requested_size
) {
997 virtio_mem_resize_usable_region(vmem
, value
, false);
998 vmem
->requested_size
= value
;
1001 * Trigger a config update so the guest gets notified. We trigger
1002 * even if the size didn't change (especially helpful for debugging).
1004 virtio_notify_config(VIRTIO_DEVICE(vmem
));
1006 vmem
->requested_size
= value
;
1010 static void virtio_mem_get_block_size(Object
*obj
, Visitor
*v
, const char *name
,
1011 void *opaque
, Error
**errp
)
1013 const VirtIOMEM
*vmem
= VIRTIO_MEM(obj
);
1014 uint64_t value
= vmem
->block_size
;
1017 * If not configured by the user (and we're not realized yet), use the
1018 * default block size we would use with the current memory backend.
1021 if (vmem
->memdev
&& memory_region_is_ram(&vmem
->memdev
->mr
)) {
1022 value
= virtio_mem_default_block_size(vmem
->memdev
->mr
.ram_block
);
1024 value
= virtio_mem_thp_size();
1028 visit_type_size(v
, name
, &value
, errp
);
1031 static void virtio_mem_set_block_size(Object
*obj
, Visitor
*v
, const char *name
,
1032 void *opaque
, Error
**errp
)
1034 VirtIOMEM
*vmem
= VIRTIO_MEM(obj
);
1038 if (DEVICE(obj
)->realized
) {
1039 error_setg(errp
, "'%s' cannot be changed", name
);
1043 visit_type_size(v
, name
, &value
, &err
);
1045 error_propagate(errp
, err
);
1049 if (value
< VIRTIO_MEM_MIN_BLOCK_SIZE
) {
1050 error_setg(errp
, "'%s' property has to be at least 0x%" PRIx32
, name
,
1051 VIRTIO_MEM_MIN_BLOCK_SIZE
);
1053 } else if (!is_power_of_2(value
)) {
1054 error_setg(errp
, "'%s' property has to be a power of two", name
);
1057 vmem
->block_size
= value
;
1060 static int virtio_mem_precopy_exclude_range_cb(const VirtIOMEM
*vmem
, void *arg
,
1061 uint64_t offset
, uint64_t size
)
1063 void * const host
= qemu_ram_get_host_addr(vmem
->memdev
->mr
.ram_block
);
1065 qemu_guest_free_page_hint(host
+ offset
, size
);
1069 static void virtio_mem_precopy_exclude_unplugged(VirtIOMEM
*vmem
)
1071 virtio_mem_for_each_unplugged_range(vmem
, NULL
,
1072 virtio_mem_precopy_exclude_range_cb
);
1075 static int virtio_mem_precopy_notify(NotifierWithReturn
*n
, void *data
)
1077 VirtIOMEM
*vmem
= container_of(n
, VirtIOMEM
, precopy_notifier
);
1078 PrecopyNotifyData
*pnd
= data
;
1080 switch (pnd
->reason
) {
1081 case PRECOPY_NOTIFY_AFTER_BITMAP_SYNC
:
1082 virtio_mem_precopy_exclude_unplugged(vmem
);
1091 static void virtio_mem_instance_init(Object
*obj
)
1093 VirtIOMEM
*vmem
= VIRTIO_MEM(obj
);
1095 notifier_list_init(&vmem
->size_change_notifiers
);
1096 vmem
->precopy_notifier
.notify
= virtio_mem_precopy_notify
;
1097 QLIST_INIT(&vmem
->rdl_list
);
1099 object_property_add(obj
, VIRTIO_MEM_SIZE_PROP
, "size", virtio_mem_get_size
,
1101 object_property_add(obj
, VIRTIO_MEM_REQUESTED_SIZE_PROP
, "size",
1102 virtio_mem_get_requested_size
,
1103 virtio_mem_set_requested_size
, NULL
, NULL
);
1104 object_property_add(obj
, VIRTIO_MEM_BLOCK_SIZE_PROP
, "size",
1105 virtio_mem_get_block_size
, virtio_mem_set_block_size
,
1109 static Property virtio_mem_properties
[] = {
1110 DEFINE_PROP_UINT64(VIRTIO_MEM_ADDR_PROP
, VirtIOMEM
, addr
, 0),
1111 DEFINE_PROP_UINT32(VIRTIO_MEM_NODE_PROP
, VirtIOMEM
, node
, 0),
1112 DEFINE_PROP_LINK(VIRTIO_MEM_MEMDEV_PROP
, VirtIOMEM
, memdev
,
1113 TYPE_MEMORY_BACKEND
, HostMemoryBackend
*),
1114 DEFINE_PROP_END_OF_LIST(),
1117 static uint64_t virtio_mem_rdm_get_min_granularity(const RamDiscardManager
*rdm
,
1118 const MemoryRegion
*mr
)
1120 const VirtIOMEM
*vmem
= VIRTIO_MEM(rdm
);
1122 g_assert(mr
== &vmem
->memdev
->mr
);
1123 return vmem
->block_size
;
1126 static bool virtio_mem_rdm_is_populated(const RamDiscardManager
*rdm
,
1127 const MemoryRegionSection
*s
)
1129 const VirtIOMEM
*vmem
= VIRTIO_MEM(rdm
);
1130 uint64_t start_gpa
= vmem
->addr
+ s
->offset_within_region
;
1131 uint64_t end_gpa
= start_gpa
+ int128_get64(s
->size
);
1133 g_assert(s
->mr
== &vmem
->memdev
->mr
);
1135 start_gpa
= QEMU_ALIGN_DOWN(start_gpa
, vmem
->block_size
);
1136 end_gpa
= QEMU_ALIGN_UP(end_gpa
, vmem
->block_size
);
1138 if (!virtio_mem_valid_range(vmem
, start_gpa
, end_gpa
- start_gpa
)) {
1142 return virtio_mem_test_bitmap(vmem
, start_gpa
, end_gpa
- start_gpa
, true);
1145 struct VirtIOMEMReplayData
{
1150 static int virtio_mem_rdm_replay_populated_cb(MemoryRegionSection
*s
, void *arg
)
1152 struct VirtIOMEMReplayData
*data
= arg
;
1154 return ((ReplayRamPopulate
)data
->fn
)(s
, data
->opaque
);
1157 static int virtio_mem_rdm_replay_populated(const RamDiscardManager
*rdm
,
1158 MemoryRegionSection
*s
,
1159 ReplayRamPopulate replay_fn
,
1162 const VirtIOMEM
*vmem
= VIRTIO_MEM(rdm
);
1163 struct VirtIOMEMReplayData data
= {
1168 g_assert(s
->mr
== &vmem
->memdev
->mr
);
1169 return virtio_mem_for_each_plugged_section(vmem
, s
, &data
,
1170 virtio_mem_rdm_replay_populated_cb
);
1173 static void virtio_mem_rdm_register_listener(RamDiscardManager
*rdm
,
1174 RamDiscardListener
*rdl
,
1175 MemoryRegionSection
*s
)
1177 VirtIOMEM
*vmem
= VIRTIO_MEM(rdm
);
1180 g_assert(s
->mr
== &vmem
->memdev
->mr
);
1181 rdl
->section
= memory_region_section_new_copy(s
);
1183 QLIST_INSERT_HEAD(&vmem
->rdl_list
, rdl
, next
);
1184 ret
= virtio_mem_for_each_plugged_section(vmem
, rdl
->section
, rdl
,
1185 virtio_mem_notify_populate_cb
);
1187 error_report("%s: Replaying plugged ranges failed: %s", __func__
,
1192 static void virtio_mem_rdm_unregister_listener(RamDiscardManager
*rdm
,
1193 RamDiscardListener
*rdl
)
1195 VirtIOMEM
*vmem
= VIRTIO_MEM(rdm
);
1197 g_assert(rdl
->section
->mr
== &vmem
->memdev
->mr
);
1199 if (rdl
->double_discard_supported
) {
1200 rdl
->notify_discard(rdl
, rdl
->section
);
1202 virtio_mem_for_each_plugged_section(vmem
, rdl
->section
, rdl
,
1203 virtio_mem_notify_discard_cb
);
1207 memory_region_section_free_copy(rdl
->section
);
1208 rdl
->section
= NULL
;
1209 QLIST_REMOVE(rdl
, next
);
1212 static void virtio_mem_class_init(ObjectClass
*klass
, void *data
)
1214 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1215 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1216 VirtIOMEMClass
*vmc
= VIRTIO_MEM_CLASS(klass
);
1217 RamDiscardManagerClass
*rdmc
= RAM_DISCARD_MANAGER_CLASS(klass
);
1219 device_class_set_props(dc
, virtio_mem_properties
);
1220 dc
->vmsd
= &vmstate_virtio_mem
;
1222 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
1223 vdc
->realize
= virtio_mem_device_realize
;
1224 vdc
->unrealize
= virtio_mem_device_unrealize
;
1225 vdc
->get_config
= virtio_mem_get_config
;
1226 vdc
->get_features
= virtio_mem_get_features
;
1227 vdc
->vmsd
= &vmstate_virtio_mem_device
;
1229 vmc
->fill_device_info
= virtio_mem_fill_device_info
;
1230 vmc
->get_memory_region
= virtio_mem_get_memory_region
;
1231 vmc
->add_size_change_notifier
= virtio_mem_add_size_change_notifier
;
1232 vmc
->remove_size_change_notifier
= virtio_mem_remove_size_change_notifier
;
1234 rdmc
->get_min_granularity
= virtio_mem_rdm_get_min_granularity
;
1235 rdmc
->is_populated
= virtio_mem_rdm_is_populated
;
1236 rdmc
->replay_populated
= virtio_mem_rdm_replay_populated
;
1237 rdmc
->register_listener
= virtio_mem_rdm_register_listener
;
1238 rdmc
->unregister_listener
= virtio_mem_rdm_unregister_listener
;
1241 static const TypeInfo virtio_mem_info
= {
1242 .name
= TYPE_VIRTIO_MEM
,
1243 .parent
= TYPE_VIRTIO_DEVICE
,
1244 .instance_size
= sizeof(VirtIOMEM
),
1245 .instance_init
= virtio_mem_instance_init
,
1246 .class_init
= virtio_mem_class_init
,
1247 .class_size
= sizeof(VirtIOMEMClass
),
1248 .interfaces
= (InterfaceInfo
[]) {
1249 { TYPE_RAM_DISCARD_MANAGER
},
1254 static void virtio_register_types(void)
1256 type_register_static(&virtio_mem_info
);
1259 type_init(virtio_register_types
)