4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
8 * Juan Quintela <quintela@redhat.com>
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 #include "qemu/osdep.h"
30 #include "qemu/cutils.h"
31 #include "qemu/bitops.h"
32 #include "qemu/bitmap.h"
33 #include "qemu/madvise.h"
34 #include "qemu/main-loop.h"
37 #include "migration.h"
38 #include "migration-stats.h"
39 #include "migration/register.h"
40 #include "migration/misc.h"
41 #include "qemu-file.h"
42 #include "postcopy-ram.h"
43 #include "page_cache.h"
44 #include "qemu/error-report.h"
45 #include "qapi/error.h"
46 #include "qapi/qapi-types-migration.h"
47 #include "qapi/qapi-events-migration.h"
48 #include "qapi/qapi-commands-migration.h"
49 #include "qapi/qmp/qerror.h"
51 #include "exec/ram_addr.h"
52 #include "exec/target_page.h"
53 #include "qemu/rcu_queue.h"
54 #include "migration/colo.h"
55 #include "sysemu/cpu-throttle.h"
59 #include "sysemu/runstate.h"
62 #include "sysemu/dirtylimit.h"
63 #include "sysemu/kvm.h"
65 #include "hw/boards.h" /* for machine_dump_guest_core() */
67 #if defined(__linux__)
68 #include "qemu/userfaultfd.h"
69 #endif /* defined(__linux__) */
71 /***********************************************************/
72 /* ram save/restore */
75 * RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
76 * worked for pages that were filled with the same char. We switched
77 * it to only search for the zero value. And to avoid confusion with
78 * RAM_SAVE_FLAG_COMPRESS_PAGE just rename it.
80 * RAM_SAVE_FLAG_FULL was obsoleted in 2009.
82 * RAM_SAVE_FLAG_COMPRESS_PAGE (0x100) was removed in QEMU 9.1.
84 #define RAM_SAVE_FLAG_FULL 0x01
85 #define RAM_SAVE_FLAG_ZERO 0x02
86 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
87 #define RAM_SAVE_FLAG_PAGE 0x08
88 #define RAM_SAVE_FLAG_EOS 0x10
89 #define RAM_SAVE_FLAG_CONTINUE 0x20
90 #define RAM_SAVE_FLAG_XBZRLE 0x40
91 /* 0x80 is reserved in rdma.h for RAM_SAVE_FLAG_HOOK */
92 #define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x200
93 /* We can't use any flag that is bigger than 0x200 */
96 * mapped-ram migration supports O_DIRECT, so we need to make sure the
97 * userspace buffer, the IO operation size and the file offset are
98 * aligned according to the underlying device's block size. The first
99 * two are already aligned to page size, but we need to add padding to
100 * the file to align the offset. We cannot read the block size
101 * dynamically because the migration file can be moved between
102 * different systems, so use 1M to cover most block sizes and to keep
103 * the file offset aligned at page size as well.
105 #define MAPPED_RAM_FILE_OFFSET_ALIGNMENT 0x100000
108 * When doing mapped-ram migration, this is the amount we read from
109 * the pages region in the migration file at a time.
111 #define MAPPED_RAM_LOAD_BUF_SIZE 0x100000
113 XBZRLECacheStats xbzrle_counters
;
115 /* used by the search for pages to send */
116 struct PageSearchStatus
{
117 /* The migration channel used for a specific host page */
118 QEMUFile
*pss_channel
;
119 /* Last block from where we have sent data */
120 RAMBlock
*last_sent_block
;
121 /* Current block being searched */
123 /* Current page to search from */
125 /* Set once we wrap around */
127 /* Whether we're sending a host page */
128 bool host_page_sending
;
129 /* The start/end of current host page. Invalid if host_page_sending==false */
130 unsigned long host_page_start
;
131 unsigned long host_page_end
;
133 typedef struct PageSearchStatus PageSearchStatus
;
135 /* struct contains XBZRLE cache and a static page
136 used by the compression */
138 /* buffer used for XBZRLE encoding */
139 uint8_t *encoded_buf
;
140 /* buffer for storing page content */
141 uint8_t *current_buf
;
142 /* Cache for XBZRLE, Protected by lock. */
145 /* it will store a page full of zeros */
146 uint8_t *zero_target_page
;
147 /* buffer used for XBZRLE decoding */
148 uint8_t *decoded_buf
;
151 static void XBZRLE_cache_lock(void)
153 if (migrate_xbzrle()) {
154 qemu_mutex_lock(&XBZRLE
.lock
);
158 static void XBZRLE_cache_unlock(void)
160 if (migrate_xbzrle()) {
161 qemu_mutex_unlock(&XBZRLE
.lock
);
166 * xbzrle_cache_resize: resize the xbzrle cache
168 * This function is called from migrate_params_apply in main
169 * thread, possibly while a migration is in progress. A running
170 * migration may be using the cache and might finish during this call,
171 * hence changes to the cache are protected by XBZRLE.lock().
173 * Returns 0 for success or -1 for error
175 * @new_size: new cache size
176 * @errp: set *errp if the check failed, with reason
178 int xbzrle_cache_resize(uint64_t new_size
, Error
**errp
)
180 PageCache
*new_cache
;
183 /* Check for truncation */
184 if (new_size
!= (size_t)new_size
) {
185 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, "cache size",
186 "exceeding address space");
190 if (new_size
== migrate_xbzrle_cache_size()) {
197 if (XBZRLE
.cache
!= NULL
) {
198 new_cache
= cache_init(new_size
, TARGET_PAGE_SIZE
, errp
);
204 cache_fini(XBZRLE
.cache
);
205 XBZRLE
.cache
= new_cache
;
208 XBZRLE_cache_unlock();
212 static bool postcopy_preempt_active(void)
214 return migrate_postcopy_preempt() && migration_in_postcopy();
217 bool migrate_ram_is_ignored(RAMBlock
*block
)
219 return !qemu_ram_is_migratable(block
) ||
220 (migrate_ignore_shared() && qemu_ram_is_shared(block
)
221 && qemu_ram_is_named_file(block
));
224 #undef RAMBLOCK_FOREACH
226 int foreach_not_ignored_block(RAMBlockIterFunc func
, void *opaque
)
231 RCU_READ_LOCK_GUARD();
233 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
234 ret
= func(block
, opaque
);
242 static void ramblock_recv_map_init(void)
246 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
247 assert(!rb
->receivedmap
);
248 rb
->receivedmap
= bitmap_new(rb
->max_length
>> qemu_target_page_bits());
252 int ramblock_recv_bitmap_test(RAMBlock
*rb
, void *host_addr
)
254 return test_bit(ramblock_recv_bitmap_offset(host_addr
, rb
),
258 bool ramblock_recv_bitmap_test_byte_offset(RAMBlock
*rb
, uint64_t byte_offset
)
260 return test_bit(byte_offset
>> TARGET_PAGE_BITS
, rb
->receivedmap
);
263 void ramblock_recv_bitmap_set(RAMBlock
*rb
, void *host_addr
)
265 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr
, rb
), rb
->receivedmap
);
268 void ramblock_recv_bitmap_set_range(RAMBlock
*rb
, void *host_addr
,
271 bitmap_set_atomic(rb
->receivedmap
,
272 ramblock_recv_bitmap_offset(host_addr
, rb
),
276 void ramblock_recv_bitmap_set_offset(RAMBlock
*rb
, uint64_t byte_offset
)
278 set_bit_atomic(byte_offset
>> TARGET_PAGE_BITS
, rb
->receivedmap
);
280 #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
283 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
285 * Returns >0 if success with sent bytes, or <0 if error.
287 int64_t ramblock_recv_bitmap_send(QEMUFile
*file
,
288 const char *block_name
)
290 RAMBlock
*block
= qemu_ram_block_by_name(block_name
);
291 unsigned long *le_bitmap
, nbits
;
295 error_report("%s: invalid block name: %s", __func__
, block_name
);
299 nbits
= block
->postcopy_length
>> TARGET_PAGE_BITS
;
302 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
303 * machines we may need 4 more bytes for padding (see below
304 * comment). So extend it a bit before hand.
306 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
309 * Always use little endian when sending the bitmap. This is
310 * required that when source and destination VMs are not using the
311 * same endianness. (Note: big endian won't work.)
313 bitmap_to_le(le_bitmap
, block
->receivedmap
, nbits
);
315 /* Size of the bitmap, in bytes */
316 size
= DIV_ROUND_UP(nbits
, 8);
319 * size is always aligned to 8 bytes for 64bit machines, but it
320 * may not be true for 32bit machines. We need this padding to
321 * make sure the migration can survive even between 32bit and
324 size
= ROUND_UP(size
, 8);
326 qemu_put_be64(file
, size
);
327 qemu_put_buffer(file
, (const uint8_t *)le_bitmap
, size
);
330 * Mark as an end, in case the middle part is screwed up due to
331 * some "mysterious" reason.
333 qemu_put_be64(file
, RAMBLOCK_RECV_BITMAP_ENDING
);
334 int ret
= qemu_fflush(file
);
339 return size
+ sizeof(size
);
343 * An outstanding page request, on the source, having been received
346 struct RAMSrcPageRequest
{
351 QSIMPLEQ_ENTRY(RAMSrcPageRequest
) next_req
;
354 /* State of RAM for migration */
357 * PageSearchStatus structures for the channels when send pages.
358 * Protected by the bitmap_mutex.
360 PageSearchStatus pss
[RAM_CHANNEL_MAX
];
361 /* UFFD file descriptor, used in 'write-tracking' migration */
363 /* total ram size in bytes */
364 uint64_t ram_bytes_total
;
365 /* Last block that we have visited searching for dirty pages */
366 RAMBlock
*last_seen_block
;
367 /* Last dirty target page we have sent */
368 ram_addr_t last_page
;
369 /* last ram version we have seen */
370 uint32_t last_version
;
371 /* How many times we have dirty too many pages */
372 int dirty_rate_high_cnt
;
373 /* these variables are used for bitmap sync */
374 /* last time we did a full bitmap_sync */
375 int64_t time_last_bitmap_sync
;
376 /* bytes transferred at start_time */
377 uint64_t bytes_xfer_prev
;
378 /* number of dirty pages since start_time */
379 uint64_t num_dirty_pages_period
;
380 /* xbzrle misses since the beginning of the period */
381 uint64_t xbzrle_cache_miss_prev
;
382 /* Amount of xbzrle pages since the beginning of the period */
383 uint64_t xbzrle_pages_prev
;
384 /* Amount of xbzrle encoded bytes since the beginning of the period */
385 uint64_t xbzrle_bytes_prev
;
386 /* Are we really using XBZRLE (e.g., after the first round). */
388 /* Are we on the last stage of migration */
391 /* total handled target pages at the beginning of period */
392 uint64_t target_page_count_prev
;
393 /* total handled target pages since start */
394 uint64_t target_page_count
;
395 /* number of dirty bits in the bitmap */
396 uint64_t migration_dirty_pages
;
399 * - dirty/clear bitmap
400 * - migration_dirty_pages
403 QemuMutex bitmap_mutex
;
404 /* The RAMBlock used in the last src_page_requests */
405 RAMBlock
*last_req_rb
;
406 /* Queue of outstanding page requests from the destination */
407 QemuMutex src_page_req_mutex
;
408 QSIMPLEQ_HEAD(, RAMSrcPageRequest
) src_page_requests
;
411 * This is only used when postcopy is in recovery phase, to communicate
412 * between the migration thread and the return path thread on dirty
413 * bitmap synchronizations. This field is unused in other stages of
416 unsigned int postcopy_bmap_sync_requested
;
418 typedef struct RAMState RAMState
;
420 static RAMState
*ram_state
;
422 static NotifierWithReturnList precopy_notifier_list
;
424 /* Whether postcopy has queued requests? */
425 static bool postcopy_has_request(RAMState
*rs
)
427 return !QSIMPLEQ_EMPTY_ATOMIC(&rs
->src_page_requests
);
430 void precopy_infrastructure_init(void)
432 notifier_with_return_list_init(&precopy_notifier_list
);
435 void precopy_add_notifier(NotifierWithReturn
*n
)
437 notifier_with_return_list_add(&precopy_notifier_list
, n
);
440 void precopy_remove_notifier(NotifierWithReturn
*n
)
442 notifier_with_return_remove(n
);
445 int precopy_notify(PrecopyNotifyReason reason
, Error
**errp
)
447 PrecopyNotifyData pnd
;
450 return notifier_with_return_list_notify(&precopy_notifier_list
, &pnd
, errp
);
453 uint64_t ram_bytes_remaining(void)
455 return ram_state
? (ram_state
->migration_dirty_pages
* TARGET_PAGE_SIZE
) :
459 void ram_transferred_add(uint64_t bytes
)
461 if (runstate_is_running()) {
462 stat64_add(&mig_stats
.precopy_bytes
, bytes
);
463 } else if (migration_in_postcopy()) {
464 stat64_add(&mig_stats
.postcopy_bytes
, bytes
);
466 stat64_add(&mig_stats
.downtime_bytes
, bytes
);
470 struct MigrationOps
{
471 int (*ram_save_target_page
)(RAMState
*rs
, PageSearchStatus
*pss
);
473 typedef struct MigrationOps MigrationOps
;
475 MigrationOps
*migration_ops
;
477 static int ram_save_host_page_urgent(PageSearchStatus
*pss
);
479 /* NOTE: page is the PFN not real ram_addr_t. */
480 static void pss_init(PageSearchStatus
*pss
, RAMBlock
*rb
, ram_addr_t page
)
484 pss
->complete_round
= false;
488 * Check whether two PSSs are actively sending the same page. Return true
489 * if it is, false otherwise.
491 static bool pss_overlap(PageSearchStatus
*pss1
, PageSearchStatus
*pss2
)
493 return pss1
->host_page_sending
&& pss2
->host_page_sending
&&
494 (pss1
->host_page_start
== pss2
->host_page_start
);
498 * save_page_header: write page header to wire
500 * If this is the 1st block, it also writes the block identification
502 * Returns the number of bytes written
504 * @pss: current PSS channel status
505 * @block: block that contains the page we want to send
506 * @offset: offset inside the block for the page
507 * in the lower bits, it contains flags
509 static size_t save_page_header(PageSearchStatus
*pss
, QEMUFile
*f
,
510 RAMBlock
*block
, ram_addr_t offset
)
513 bool same_block
= (block
== pss
->last_sent_block
);
516 offset
|= RAM_SAVE_FLAG_CONTINUE
;
518 qemu_put_be64(f
, offset
);
522 len
= strlen(block
->idstr
);
523 qemu_put_byte(f
, len
);
524 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, len
);
526 pss
->last_sent_block
= block
;
532 * mig_throttle_guest_down: throttle down the guest
534 * Reduce amount of guest cpu execution to hopefully slow down memory
535 * writes. If guest dirty memory rate is reduced below the rate at
536 * which we can transfer pages to the destination then we should be
537 * able to complete migration. Some workloads dirty memory way too
538 * fast and will not effectively converge, even with auto-converge.
540 static void mig_throttle_guest_down(uint64_t bytes_dirty_period
,
541 uint64_t bytes_dirty_threshold
)
543 uint64_t pct_initial
= migrate_cpu_throttle_initial();
544 uint64_t pct_increment
= migrate_cpu_throttle_increment();
545 bool pct_tailslow
= migrate_cpu_throttle_tailslow();
546 int pct_max
= migrate_max_cpu_throttle();
548 uint64_t throttle_now
= cpu_throttle_get_percentage();
549 uint64_t cpu_now
, cpu_ideal
, throttle_inc
;
551 /* We have not started throttling yet. Let's start it. */
552 if (!cpu_throttle_active()) {
553 cpu_throttle_set(pct_initial
);
555 /* Throttling already on, just increase the rate */
557 throttle_inc
= pct_increment
;
559 /* Compute the ideal CPU percentage used by Guest, which may
560 * make the dirty rate match the dirty rate threshold. */
561 cpu_now
= 100 - throttle_now
;
562 cpu_ideal
= cpu_now
* (bytes_dirty_threshold
* 1.0 /
564 throttle_inc
= MIN(cpu_now
- cpu_ideal
, pct_increment
);
566 cpu_throttle_set(MIN(throttle_now
+ throttle_inc
, pct_max
));
570 void mig_throttle_counter_reset(void)
572 RAMState
*rs
= ram_state
;
574 rs
->time_last_bitmap_sync
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
575 rs
->num_dirty_pages_period
= 0;
576 rs
->bytes_xfer_prev
= migration_transferred_bytes();
580 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
582 * @current_addr: address for the zero page
584 * Update the xbzrle cache to reflect a page that's been sent as all 0.
585 * The important thing is that a stale (not-yet-0'd) page be replaced
587 * As a bonus, if the page wasn't in the cache it gets added so that
588 * when a small write is made into the 0'd page it gets XBZRLE sent.
590 static void xbzrle_cache_zero_page(ram_addr_t current_addr
)
592 /* We don't care if this fails to allocate a new cache page
593 * as long as it updated an old one */
594 cache_insert(XBZRLE
.cache
, current_addr
, XBZRLE
.zero_target_page
,
595 stat64_get(&mig_stats
.dirty_sync_count
));
598 #define ENCODING_FLAG_XBZRLE 0x1
601 * save_xbzrle_page: compress and send current page
603 * Returns: 1 means that we wrote the page
604 * 0 means that page is identical to the one already sent
605 * -1 means that xbzrle would be longer than normal
607 * @rs: current RAM state
608 * @pss: current PSS channel
609 * @current_data: pointer to the address of the page contents
610 * @current_addr: addr of the page
611 * @block: block that contains the page we want to send
612 * @offset: offset inside the block for the page
614 static int save_xbzrle_page(RAMState
*rs
, PageSearchStatus
*pss
,
615 uint8_t **current_data
, ram_addr_t current_addr
,
616 RAMBlock
*block
, ram_addr_t offset
)
618 int encoded_len
= 0, bytes_xbzrle
;
619 uint8_t *prev_cached_page
;
620 QEMUFile
*file
= pss
->pss_channel
;
621 uint64_t generation
= stat64_get(&mig_stats
.dirty_sync_count
);
623 if (!cache_is_cached(XBZRLE
.cache
, current_addr
, generation
)) {
624 xbzrle_counters
.cache_miss
++;
625 if (!rs
->last_stage
) {
626 if (cache_insert(XBZRLE
.cache
, current_addr
, *current_data
,
630 /* update *current_data when the page has been
631 inserted into cache */
632 *current_data
= get_cached_data(XBZRLE
.cache
, current_addr
);
639 * Reaching here means the page has hit the xbzrle cache, no matter what
640 * encoding result it is (normal encoding, overflow or skipping the page),
641 * count the page as encoded. This is used to calculate the encoding rate.
643 * Example: 2 pages (8KB) being encoded, first page encoding generates 2KB,
644 * 2nd page turns out to be skipped (i.e. no new bytes written to the
645 * page), the overall encoding rate will be 8KB / 2KB = 4, which has the
646 * skipped page included. In this way, the encoding rate can tell if the
647 * guest page is good for xbzrle encoding.
649 xbzrle_counters
.pages
++;
650 prev_cached_page
= get_cached_data(XBZRLE
.cache
, current_addr
);
652 /* save current buffer into memory */
653 memcpy(XBZRLE
.current_buf
, *current_data
, TARGET_PAGE_SIZE
);
655 /* XBZRLE encoding (if there is no overflow) */
656 encoded_len
= xbzrle_encode_buffer(prev_cached_page
, XBZRLE
.current_buf
,
657 TARGET_PAGE_SIZE
, XBZRLE
.encoded_buf
,
661 * Update the cache contents, so that it corresponds to the data
662 * sent, in all cases except where we skip the page.
664 if (!rs
->last_stage
&& encoded_len
!= 0) {
665 memcpy(prev_cached_page
, XBZRLE
.current_buf
, TARGET_PAGE_SIZE
);
667 * In the case where we couldn't compress, ensure that the caller
668 * sends the data from the cache, since the guest might have
669 * changed the RAM since we copied it.
671 *current_data
= prev_cached_page
;
674 if (encoded_len
== 0) {
675 trace_save_xbzrle_page_skipping();
677 } else if (encoded_len
== -1) {
678 trace_save_xbzrle_page_overflow();
679 xbzrle_counters
.overflow
++;
680 xbzrle_counters
.bytes
+= TARGET_PAGE_SIZE
;
684 /* Send XBZRLE based compressed page */
685 bytes_xbzrle
= save_page_header(pss
, pss
->pss_channel
, block
,
686 offset
| RAM_SAVE_FLAG_XBZRLE
);
687 qemu_put_byte(file
, ENCODING_FLAG_XBZRLE
);
688 qemu_put_be16(file
, encoded_len
);
689 qemu_put_buffer(file
, XBZRLE
.encoded_buf
, encoded_len
);
690 bytes_xbzrle
+= encoded_len
+ 1 + 2;
692 * The xbzrle encoded bytes don't count the 8 byte header with
693 * RAM_SAVE_FLAG_CONTINUE.
695 xbzrle_counters
.bytes
+= bytes_xbzrle
- 8;
696 ram_transferred_add(bytes_xbzrle
);
702 * pss_find_next_dirty: find the next dirty page of current ramblock
704 * This function updates pss->page to point to the next dirty page index
705 * within the ramblock to migrate, or the end of ramblock when nothing
706 * found. Note that when pss->host_page_sending==true it means we're
707 * during sending a host page, so we won't look for dirty page that is
708 * outside the host page boundary.
710 * @pss: the current page search status
712 static void pss_find_next_dirty(PageSearchStatus
*pss
)
714 RAMBlock
*rb
= pss
->block
;
715 unsigned long size
= rb
->used_length
>> TARGET_PAGE_BITS
;
716 unsigned long *bitmap
= rb
->bmap
;
718 if (migrate_ram_is_ignored(rb
)) {
719 /* Points directly to the end, so we know no dirty page */
725 * If during sending a host page, only look for dirty pages within the
726 * current host page being send.
728 if (pss
->host_page_sending
) {
729 assert(pss
->host_page_end
);
730 size
= MIN(size
, pss
->host_page_end
);
733 pss
->page
= find_next_bit(bitmap
, size
, pss
->page
);
736 static void migration_clear_memory_region_dirty_bitmap(RAMBlock
*rb
,
742 if (!rb
->clear_bmap
|| !clear_bmap_test_and_clear(rb
, page
)) {
746 shift
= rb
->clear_bmap_shift
;
748 * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
749 * can make things easier sometimes since then start address
750 * of the small chunk will always be 64 pages aligned so the
751 * bitmap will always be aligned to unsigned long. We should
752 * even be able to remove this restriction but I'm simply
757 size
= 1ULL << (TARGET_PAGE_BITS
+ shift
);
758 start
= QEMU_ALIGN_DOWN((ram_addr_t
)page
<< TARGET_PAGE_BITS
, size
);
759 trace_migration_bitmap_clear_dirty(rb
->idstr
, start
, size
, page
);
760 memory_region_clear_dirty_bitmap(rb
->mr
, start
, size
);
764 migration_clear_memory_region_dirty_bitmap_range(RAMBlock
*rb
,
766 unsigned long npages
)
768 unsigned long i
, chunk_pages
= 1UL << rb
->clear_bmap_shift
;
769 unsigned long chunk_start
= QEMU_ALIGN_DOWN(start
, chunk_pages
);
770 unsigned long chunk_end
= QEMU_ALIGN_UP(start
+ npages
, chunk_pages
);
773 * Clear pages from start to start + npages - 1, so the end boundary is
776 for (i
= chunk_start
; i
< chunk_end
; i
+= chunk_pages
) {
777 migration_clear_memory_region_dirty_bitmap(rb
, i
);
782 * colo_bitmap_find_diry:find contiguous dirty pages from start
784 * Returns the page offset within memory region of the start of the contiguout
787 * @rs: current RAM state
788 * @rb: RAMBlock where to search for dirty pages
789 * @start: page where we start the search
790 * @num: the number of contiguous dirty pages
793 unsigned long colo_bitmap_find_dirty(RAMState
*rs
, RAMBlock
*rb
,
794 unsigned long start
, unsigned long *num
)
796 unsigned long size
= rb
->used_length
>> TARGET_PAGE_BITS
;
797 unsigned long *bitmap
= rb
->bmap
;
798 unsigned long first
, next
;
802 if (migrate_ram_is_ignored(rb
)) {
806 first
= find_next_bit(bitmap
, size
, start
);
810 next
= find_next_zero_bit(bitmap
, size
, first
+ 1);
811 assert(next
>= first
);
816 static inline bool migration_bitmap_clear_dirty(RAMState
*rs
,
823 * Clear dirty bitmap if needed. This _must_ be called before we
824 * send any of the page in the chunk because we need to make sure
825 * we can capture further page content changes when we sync dirty
826 * log the next time. So as long as we are going to send any of
827 * the page in the chunk we clear the remote dirty bitmap for all.
828 * Clearing it earlier won't be a problem, but too late will.
830 migration_clear_memory_region_dirty_bitmap(rb
, page
);
832 ret
= test_and_clear_bit(page
, rb
->bmap
);
834 rs
->migration_dirty_pages
--;
840 static void dirty_bitmap_clear_section(MemoryRegionSection
*section
,
843 const hwaddr offset
= section
->offset_within_region
;
844 const hwaddr size
= int128_get64(section
->size
);
845 const unsigned long start
= offset
>> TARGET_PAGE_BITS
;
846 const unsigned long npages
= size
>> TARGET_PAGE_BITS
;
847 RAMBlock
*rb
= section
->mr
->ram_block
;
848 uint64_t *cleared_bits
= opaque
;
851 * We don't grab ram_state->bitmap_mutex because we expect to run
852 * only when starting migration or during postcopy recovery where
853 * we don't have concurrent access.
855 if (!migration_in_postcopy() && !migrate_background_snapshot()) {
856 migration_clear_memory_region_dirty_bitmap_range(rb
, start
, npages
);
858 *cleared_bits
+= bitmap_count_one_with_offset(rb
->bmap
, start
, npages
);
859 bitmap_clear(rb
->bmap
, start
, npages
);
863 * Exclude all dirty pages from migration that fall into a discarded range as
864 * managed by a RamDiscardManager responsible for the mapped memory region of
865 * the RAMBlock. Clear the corresponding bits in the dirty bitmaps.
867 * Discarded pages ("logically unplugged") have undefined content and must
868 * not get migrated, because even reading these pages for migration might
869 * result in undesired behavior.
871 * Returns the number of cleared bits in the RAMBlock dirty bitmap.
873 * Note: The result is only stable while migrating (precopy/postcopy).
875 static uint64_t ramblock_dirty_bitmap_clear_discarded_pages(RAMBlock
*rb
)
877 uint64_t cleared_bits
= 0;
879 if (rb
->mr
&& rb
->bmap
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
880 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
881 MemoryRegionSection section
= {
883 .offset_within_region
= 0,
884 .size
= int128_make64(qemu_ram_get_used_length(rb
)),
887 ram_discard_manager_replay_discarded(rdm
, §ion
,
888 dirty_bitmap_clear_section
,
895 * Check if a host-page aligned page falls into a discarded range as managed by
896 * a RamDiscardManager responsible for the mapped memory region of the RAMBlock.
898 * Note: The result is only stable while migrating (precopy/postcopy).
900 bool ramblock_page_is_discarded(RAMBlock
*rb
, ram_addr_t start
)
902 if (rb
->mr
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
903 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
904 MemoryRegionSection section
= {
906 .offset_within_region
= start
,
907 .size
= int128_make64(qemu_ram_pagesize(rb
)),
910 return !ram_discard_manager_is_populated(rdm
, §ion
);
915 /* Called with RCU critical section */
916 static void ramblock_sync_dirty_bitmap(RAMState
*rs
, RAMBlock
*rb
)
918 uint64_t new_dirty_pages
=
919 cpu_physical_memory_sync_dirty_bitmap(rb
, 0, rb
->used_length
);
921 rs
->migration_dirty_pages
+= new_dirty_pages
;
922 rs
->num_dirty_pages_period
+= new_dirty_pages
;
926 * ram_pagesize_summary: calculate all the pagesizes of a VM
928 * Returns a summary bitmap of the page sizes of all RAMBlocks
930 * For VMs with just normal pages this is equivalent to the host page
931 * size. If it's got some huge pages then it's the OR of all the
932 * different page sizes.
934 uint64_t ram_pagesize_summary(void)
937 uint64_t summary
= 0;
939 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
940 summary
|= block
->page_size
;
946 uint64_t ram_get_total_transferred_pages(void)
948 return stat64_get(&mig_stats
.normal_pages
) +
949 stat64_get(&mig_stats
.zero_pages
) +
950 xbzrle_counters
.pages
;
953 static void migration_update_rates(RAMState
*rs
, int64_t end_time
)
955 uint64_t page_count
= rs
->target_page_count
- rs
->target_page_count_prev
;
957 /* calculate period counters */
958 stat64_set(&mig_stats
.dirty_pages_rate
,
959 rs
->num_dirty_pages_period
* 1000 /
960 (end_time
- rs
->time_last_bitmap_sync
));
966 if (migrate_xbzrle()) {
967 double encoded_size
, unencoded_size
;
969 xbzrle_counters
.cache_miss_rate
= (double)(xbzrle_counters
.cache_miss
-
970 rs
->xbzrle_cache_miss_prev
) / page_count
;
971 rs
->xbzrle_cache_miss_prev
= xbzrle_counters
.cache_miss
;
972 unencoded_size
= (xbzrle_counters
.pages
- rs
->xbzrle_pages_prev
) *
974 encoded_size
= xbzrle_counters
.bytes
- rs
->xbzrle_bytes_prev
;
975 if (xbzrle_counters
.pages
== rs
->xbzrle_pages_prev
|| !encoded_size
) {
976 xbzrle_counters
.encoding_rate
= 0;
978 xbzrle_counters
.encoding_rate
= unencoded_size
/ encoded_size
;
980 rs
->xbzrle_pages_prev
= xbzrle_counters
.pages
;
981 rs
->xbzrle_bytes_prev
= xbzrle_counters
.bytes
;
986 * Enable dirty-limit to throttle down the guest
988 static void migration_dirty_limit_guest(void)
991 * dirty page rate quota for all vCPUs fetched from
992 * migration parameter 'vcpu_dirty_limit'
994 static int64_t quota_dirtyrate
;
995 MigrationState
*s
= migrate_get_current();
998 * If dirty limit already enabled and migration parameter
999 * vcpu-dirty-limit untouched.
1001 if (dirtylimit_in_service() &&
1002 quota_dirtyrate
== s
->parameters
.vcpu_dirty_limit
) {
1006 quota_dirtyrate
= s
->parameters
.vcpu_dirty_limit
;
1009 * Set all vCPU a quota dirtyrate, note that the second
1010 * parameter will be ignored if setting all vCPU for the vm
1012 qmp_set_vcpu_dirty_limit(false, -1, quota_dirtyrate
, NULL
);
1013 trace_migration_dirty_limit_guest(quota_dirtyrate
);
1016 static void migration_trigger_throttle(RAMState
*rs
)
1018 uint64_t threshold
= migrate_throttle_trigger_threshold();
1019 uint64_t bytes_xfer_period
=
1020 migration_transferred_bytes() - rs
->bytes_xfer_prev
;
1021 uint64_t bytes_dirty_period
= rs
->num_dirty_pages_period
* TARGET_PAGE_SIZE
;
1022 uint64_t bytes_dirty_threshold
= bytes_xfer_period
* threshold
/ 100;
1025 * The following detection logic can be refined later. For now:
1026 * Check to see if the ratio between dirtied bytes and the approx.
1027 * amount of bytes that just got transferred since the last time
1028 * we were in this routine reaches the threshold. If that happens
1029 * twice, start or increase throttling.
1031 if ((bytes_dirty_period
> bytes_dirty_threshold
) &&
1032 (++rs
->dirty_rate_high_cnt
>= 2)) {
1033 rs
->dirty_rate_high_cnt
= 0;
1034 if (migrate_auto_converge()) {
1035 trace_migration_throttle();
1036 mig_throttle_guest_down(bytes_dirty_period
,
1037 bytes_dirty_threshold
);
1038 } else if (migrate_dirty_limit()) {
1039 migration_dirty_limit_guest();
1044 static void migration_bitmap_sync(RAMState
*rs
, bool last_stage
)
1049 stat64_add(&mig_stats
.dirty_sync_count
, 1);
1051 if (!rs
->time_last_bitmap_sync
) {
1052 rs
->time_last_bitmap_sync
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1055 trace_migration_bitmap_sync_start();
1056 memory_global_dirty_log_sync(last_stage
);
1058 WITH_QEMU_LOCK_GUARD(&rs
->bitmap_mutex
) {
1059 WITH_RCU_READ_LOCK_GUARD() {
1060 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1061 ramblock_sync_dirty_bitmap(rs
, block
);
1063 stat64_set(&mig_stats
.dirty_bytes_last_sync
, ram_bytes_remaining());
1067 memory_global_after_dirty_log_sync();
1068 trace_migration_bitmap_sync_end(rs
->num_dirty_pages_period
);
1070 end_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1072 /* more than 1 second = 1000 millisecons */
1073 if (end_time
> rs
->time_last_bitmap_sync
+ 1000) {
1074 migration_trigger_throttle(rs
);
1076 migration_update_rates(rs
, end_time
);
1078 rs
->target_page_count_prev
= rs
->target_page_count
;
1080 /* reset period counters */
1081 rs
->time_last_bitmap_sync
= end_time
;
1082 rs
->num_dirty_pages_period
= 0;
1083 rs
->bytes_xfer_prev
= migration_transferred_bytes();
1085 if (migrate_events()) {
1086 uint64_t generation
= stat64_get(&mig_stats
.dirty_sync_count
);
1087 qapi_event_send_migration_pass(generation
);
1091 static void migration_bitmap_sync_precopy(RAMState
*rs
, bool last_stage
)
1093 Error
*local_err
= NULL
;
1096 * The current notifier usage is just an optimization to migration, so we
1097 * don't stop the normal migration process in the error case.
1099 if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC
, &local_err
)) {
1100 error_report_err(local_err
);
1104 migration_bitmap_sync(rs
, last_stage
);
1106 if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC
, &local_err
)) {
1107 error_report_err(local_err
);
1111 void ram_release_page(const char *rbname
, uint64_t offset
)
1113 if (!migrate_release_ram() || !migration_in_postcopy()) {
1117 ram_discard_range(rbname
, offset
, TARGET_PAGE_SIZE
);
1121 * save_zero_page: send the zero page to the stream
1123 * Returns the number of pages written.
1125 * @rs: current RAM state
1126 * @pss: current PSS channel
1127 * @offset: offset inside the block for the page
1129 static int save_zero_page(RAMState
*rs
, PageSearchStatus
*pss
,
1132 uint8_t *p
= pss
->block
->host
+ offset
;
1133 QEMUFile
*file
= pss
->pss_channel
;
1136 if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_NONE
) {
1140 if (!buffer_is_zero(p
, TARGET_PAGE_SIZE
)) {
1144 stat64_add(&mig_stats
.zero_pages
, 1);
1146 if (migrate_mapped_ram()) {
1147 /* zero pages are not transferred with mapped-ram */
1148 clear_bit_atomic(offset
>> TARGET_PAGE_BITS
, pss
->block
->file_bmap
);
1152 len
+= save_page_header(pss
, file
, pss
->block
, offset
| RAM_SAVE_FLAG_ZERO
);
1153 qemu_put_byte(file
, 0);
1155 ram_release_page(pss
->block
->idstr
, offset
);
1156 ram_transferred_add(len
);
1159 * Must let xbzrle know, otherwise a previous (now 0'd) cached
1160 * page would be stale.
1162 if (rs
->xbzrle_started
) {
1163 XBZRLE_cache_lock();
1164 xbzrle_cache_zero_page(pss
->block
->offset
+ offset
);
1165 XBZRLE_cache_unlock();
1172 * @pages: the number of pages written by the control path,
1174 * > 0 - number of pages written
1176 * Return true if the pages has been saved, otherwise false is returned.
1178 static bool control_save_page(PageSearchStatus
*pss
,
1179 ram_addr_t offset
, int *pages
)
1183 ret
= rdma_control_save_page(pss
->pss_channel
, pss
->block
->offset
, offset
,
1185 if (ret
== RAM_SAVE_CONTROL_NOT_SUPP
) {
1189 if (ret
== RAM_SAVE_CONTROL_DELAYED
) {
1198 * directly send the page to the stream
1200 * Returns the number of pages written.
1202 * @pss: current PSS channel
1203 * @block: block that contains the page we want to send
1204 * @offset: offset inside the block for the page
1205 * @buf: the page to be sent
1206 * @async: send to page asyncly
1208 static int save_normal_page(PageSearchStatus
*pss
, RAMBlock
*block
,
1209 ram_addr_t offset
, uint8_t *buf
, bool async
)
1211 QEMUFile
*file
= pss
->pss_channel
;
1213 if (migrate_mapped_ram()) {
1214 qemu_put_buffer_at(file
, buf
, TARGET_PAGE_SIZE
,
1215 block
->pages_offset
+ offset
);
1216 set_bit(offset
>> TARGET_PAGE_BITS
, block
->file_bmap
);
1218 ram_transferred_add(save_page_header(pss
, pss
->pss_channel
, block
,
1219 offset
| RAM_SAVE_FLAG_PAGE
));
1221 qemu_put_buffer_async(file
, buf
, TARGET_PAGE_SIZE
,
1222 migrate_release_ram() &&
1223 migration_in_postcopy());
1225 qemu_put_buffer(file
, buf
, TARGET_PAGE_SIZE
);
1228 ram_transferred_add(TARGET_PAGE_SIZE
);
1229 stat64_add(&mig_stats
.normal_pages
, 1);
1234 * ram_save_page: send the given page to the stream
1236 * Returns the number of pages written.
1238 * >=0 - Number of pages written - this might legally be 0
1239 * if xbzrle noticed the page was the same.
1241 * @rs: current RAM state
1242 * @block: block that contains the page we want to send
1243 * @offset: offset inside the block for the page
1245 static int ram_save_page(RAMState
*rs
, PageSearchStatus
*pss
)
1249 bool send_async
= true;
1250 RAMBlock
*block
= pss
->block
;
1251 ram_addr_t offset
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
1252 ram_addr_t current_addr
= block
->offset
+ offset
;
1254 p
= block
->host
+ offset
;
1255 trace_ram_save_page(block
->idstr
, (uint64_t)offset
, p
);
1257 XBZRLE_cache_lock();
1258 if (rs
->xbzrle_started
&& !migration_in_postcopy()) {
1259 pages
= save_xbzrle_page(rs
, pss
, &p
, current_addr
,
1261 if (!rs
->last_stage
) {
1262 /* Can't send this cached data async, since the cache page
1263 * might get updated before it gets to the wire
1269 /* XBZRLE overflow or normal page */
1271 pages
= save_normal_page(pss
, block
, offset
, p
, send_async
);
1274 XBZRLE_cache_unlock();
1279 static int ram_save_multifd_page(RAMBlock
*block
, ram_addr_t offset
)
1281 if (!multifd_queue_page(block
, offset
)) {
1289 #define PAGE_ALL_CLEAN 0
1290 #define PAGE_TRY_AGAIN 1
1291 #define PAGE_DIRTY_FOUND 2
1293 * find_dirty_block: find the next dirty page and update any state
1294 * associated with the search process.
1297 * <0: An error happened
1298 * PAGE_ALL_CLEAN: no dirty page found, give up
1299 * PAGE_TRY_AGAIN: no dirty page found, retry for next block
1300 * PAGE_DIRTY_FOUND: dirty page found
1302 * @rs: current RAM state
1303 * @pss: data about the state of the current dirty page scan
1304 * @again: set to false if the search has scanned the whole of RAM
1306 static int find_dirty_block(RAMState
*rs
, PageSearchStatus
*pss
)
1308 /* Update pss->page for the next dirty bit in ramblock */
1309 pss_find_next_dirty(pss
);
1311 if (pss
->complete_round
&& pss
->block
== rs
->last_seen_block
&&
1312 pss
->page
>= rs
->last_page
) {
1314 * We've been once around the RAM and haven't found anything.
1317 return PAGE_ALL_CLEAN
;
1319 if (!offset_in_ramblock(pss
->block
,
1320 ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
)) {
1321 /* Didn't find anything in this RAM Block */
1323 pss
->block
= QLIST_NEXT_RCU(pss
->block
, next
);
1325 if (migrate_multifd() &&
1326 (!migrate_multifd_flush_after_each_section() ||
1327 migrate_mapped_ram())) {
1328 QEMUFile
*f
= rs
->pss
[RAM_CHANNEL_PRECOPY
].pss_channel
;
1329 int ret
= multifd_ram_flush_and_sync();
1334 if (!migrate_mapped_ram()) {
1335 qemu_put_be64(f
, RAM_SAVE_FLAG_MULTIFD_FLUSH
);
1340 /* Hit the end of the list */
1341 pss
->block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
1342 /* Flag that we've looped */
1343 pss
->complete_round
= true;
1344 /* After the first round, enable XBZRLE. */
1345 if (migrate_xbzrle()) {
1346 rs
->xbzrle_started
= true;
1349 /* Didn't find anything this time, but try again on the new block */
1350 return PAGE_TRY_AGAIN
;
1352 /* We've found something */
1353 return PAGE_DIRTY_FOUND
;
1358 * unqueue_page: gets a page of the queue
1360 * Helper for 'get_queued_page' - gets a page off the queue
1362 * Returns the block of the page (or NULL if none available)
1364 * @rs: current RAM state
1365 * @offset: used to return the offset within the RAMBlock
1367 static RAMBlock
*unqueue_page(RAMState
*rs
, ram_addr_t
*offset
)
1369 struct RAMSrcPageRequest
*entry
;
1370 RAMBlock
*block
= NULL
;
1372 if (!postcopy_has_request(rs
)) {
1376 QEMU_LOCK_GUARD(&rs
->src_page_req_mutex
);
1379 * This should _never_ change even after we take the lock, because no one
1380 * should be taking anything off the request list other than us.
1382 assert(postcopy_has_request(rs
));
1384 entry
= QSIMPLEQ_FIRST(&rs
->src_page_requests
);
1386 *offset
= entry
->offset
;
1388 if (entry
->len
> TARGET_PAGE_SIZE
) {
1389 entry
->len
-= TARGET_PAGE_SIZE
;
1390 entry
->offset
+= TARGET_PAGE_SIZE
;
1392 memory_region_unref(block
->mr
);
1393 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
1395 migration_consume_urgent_request();
1401 #if defined(__linux__)
1403 * poll_fault_page: try to get next UFFD write fault page and, if pending fault
1404 * is found, return RAM block pointer and page offset
1406 * Returns pointer to the RAMBlock containing faulting page,
1407 * NULL if no write faults are pending
1409 * @rs: current RAM state
1410 * @offset: page offset from the beginning of the block
1412 static RAMBlock
*poll_fault_page(RAMState
*rs
, ram_addr_t
*offset
)
1414 struct uffd_msg uffd_msg
;
1419 if (!migrate_background_snapshot()) {
1423 res
= uffd_read_events(rs
->uffdio_fd
, &uffd_msg
, 1);
1428 page_address
= (void *)(uintptr_t) uffd_msg
.arg
.pagefault
.address
;
1429 block
= qemu_ram_block_from_host(page_address
, false, offset
);
1430 assert(block
&& (block
->flags
& RAM_UF_WRITEPROTECT
) != 0);
1435 * ram_save_release_protection: release UFFD write protection after
1436 * a range of pages has been saved
1438 * @rs: current RAM state
1439 * @pss: page-search-status structure
1440 * @start_page: index of the first page in the range relative to pss->block
1442 * Returns 0 on success, negative value in case of an error
1444 static int ram_save_release_protection(RAMState
*rs
, PageSearchStatus
*pss
,
1445 unsigned long start_page
)
1449 /* Check if page is from UFFD-managed region. */
1450 if (pss
->block
->flags
& RAM_UF_WRITEPROTECT
) {
1451 void *page_address
= pss
->block
->host
+ (start_page
<< TARGET_PAGE_BITS
);
1452 uint64_t run_length
= (pss
->page
- start_page
) << TARGET_PAGE_BITS
;
1454 /* Flush async buffers before un-protect. */
1455 qemu_fflush(pss
->pss_channel
);
1456 /* Un-protect memory range. */
1457 res
= uffd_change_protection(rs
->uffdio_fd
, page_address
, run_length
,
1464 /* ram_write_tracking_available: check if kernel supports required UFFD features
1466 * Returns true if supports, false otherwise
1468 bool ram_write_tracking_available(void)
1470 uint64_t uffd_features
;
1473 res
= uffd_query_features(&uffd_features
);
1475 (uffd_features
& UFFD_FEATURE_PAGEFAULT_FLAG_WP
) != 0);
1478 /* ram_write_tracking_compatible: check if guest configuration is
1479 * compatible with 'write-tracking'
1481 * Returns true if compatible, false otherwise
1483 bool ram_write_tracking_compatible(void)
1485 const uint64_t uffd_ioctls_mask
= BIT(_UFFDIO_WRITEPROTECT
);
1490 /* Open UFFD file descriptor */
1491 uffd_fd
= uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP
, false);
1496 RCU_READ_LOCK_GUARD();
1498 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1499 uint64_t uffd_ioctls
;
1501 /* Nothing to do with read-only and MMIO-writable regions */
1502 if (block
->mr
->readonly
|| block
->mr
->rom_device
) {
1505 /* Try to register block memory via UFFD-IO to track writes */
1506 if (uffd_register_memory(uffd_fd
, block
->host
, block
->max_length
,
1507 UFFDIO_REGISTER_MODE_WP
, &uffd_ioctls
)) {
1510 if ((uffd_ioctls
& uffd_ioctls_mask
) != uffd_ioctls_mask
) {
1517 uffd_close_fd(uffd_fd
);
1521 static inline void populate_read_range(RAMBlock
*block
, ram_addr_t offset
,
1524 const ram_addr_t end
= offset
+ size
;
1527 * We read one byte of each page; this will preallocate page tables if
1528 * required and populate the shared zeropage on MAP_PRIVATE anonymous memory
1529 * where no page was populated yet. This might require adaption when
1530 * supporting other mappings, like shmem.
1532 for (; offset
< end
; offset
+= block
->page_size
) {
1533 char tmp
= *((char *)block
->host
+ offset
);
1535 /* Don't optimize the read out */
1536 asm volatile("" : "+r" (tmp
));
1540 static inline int populate_read_section(MemoryRegionSection
*section
,
1543 const hwaddr size
= int128_get64(section
->size
);
1544 hwaddr offset
= section
->offset_within_region
;
1545 RAMBlock
*block
= section
->mr
->ram_block
;
1547 populate_read_range(block
, offset
, size
);
1552 * ram_block_populate_read: preallocate page tables and populate pages in the
1553 * RAM block by reading a byte of each page.
1555 * Since it's solely used for userfault_fd WP feature, here we just
1556 * hardcode page size to qemu_real_host_page_size.
1558 * @block: RAM block to populate
1560 static void ram_block_populate_read(RAMBlock
*rb
)
1563 * Skip populating all pages that fall into a discarded range as managed by
1564 * a RamDiscardManager responsible for the mapped memory region of the
1565 * RAMBlock. Such discarded ("logically unplugged") parts of a RAMBlock
1566 * must not get populated automatically. We don't have to track
1567 * modifications via userfaultfd WP reliably, because these pages will
1568 * not be part of the migration stream either way -- see
1569 * ramblock_dirty_bitmap_exclude_discarded_pages().
1571 * Note: The result is only stable while migrating (precopy/postcopy).
1573 if (rb
->mr
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
1574 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
1575 MemoryRegionSection section
= {
1577 .offset_within_region
= 0,
1578 .size
= rb
->mr
->size
,
1581 ram_discard_manager_replay_populated(rdm
, §ion
,
1582 populate_read_section
, NULL
);
1584 populate_read_range(rb
, 0, rb
->used_length
);
1589 * ram_write_tracking_prepare: prepare for UFFD-WP memory tracking
1591 void ram_write_tracking_prepare(void)
1595 RCU_READ_LOCK_GUARD();
1597 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1598 /* Nothing to do with read-only and MMIO-writable regions */
1599 if (block
->mr
->readonly
|| block
->mr
->rom_device
) {
1604 * Populate pages of the RAM block before enabling userfault_fd
1607 * This stage is required since ioctl(UFFDIO_WRITEPROTECT) with
1608 * UFFDIO_WRITEPROTECT_MODE_WP mode setting would silently skip
1609 * pages with pte_none() entries in page table.
1611 ram_block_populate_read(block
);
1615 static inline int uffd_protect_section(MemoryRegionSection
*section
,
1618 const hwaddr size
= int128_get64(section
->size
);
1619 const hwaddr offset
= section
->offset_within_region
;
1620 RAMBlock
*rb
= section
->mr
->ram_block
;
1621 int uffd_fd
= (uintptr_t)opaque
;
1623 return uffd_change_protection(uffd_fd
, rb
->host
+ offset
, size
, true,
1627 static int ram_block_uffd_protect(RAMBlock
*rb
, int uffd_fd
)
1629 assert(rb
->flags
& RAM_UF_WRITEPROTECT
);
1631 /* See ram_block_populate_read() */
1632 if (rb
->mr
&& memory_region_has_ram_discard_manager(rb
->mr
)) {
1633 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(rb
->mr
);
1634 MemoryRegionSection section
= {
1636 .offset_within_region
= 0,
1637 .size
= rb
->mr
->size
,
1640 return ram_discard_manager_replay_populated(rdm
, §ion
,
1641 uffd_protect_section
,
1642 (void *)(uintptr_t)uffd_fd
);
1644 return uffd_change_protection(uffd_fd
, rb
->host
,
1645 rb
->used_length
, true, false);
1649 * ram_write_tracking_start: start UFFD-WP memory tracking
1651 * Returns 0 for success or negative value in case of error
1653 int ram_write_tracking_start(void)
1656 RAMState
*rs
= ram_state
;
1659 /* Open UFFD file descriptor */
1660 uffd_fd
= uffd_create_fd(UFFD_FEATURE_PAGEFAULT_FLAG_WP
, true);
1664 rs
->uffdio_fd
= uffd_fd
;
1666 RCU_READ_LOCK_GUARD();
1668 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1669 /* Nothing to do with read-only and MMIO-writable regions */
1670 if (block
->mr
->readonly
|| block
->mr
->rom_device
) {
1674 /* Register block memory with UFFD to track writes */
1675 if (uffd_register_memory(rs
->uffdio_fd
, block
->host
,
1676 block
->max_length
, UFFDIO_REGISTER_MODE_WP
, NULL
)) {
1679 block
->flags
|= RAM_UF_WRITEPROTECT
;
1680 memory_region_ref(block
->mr
);
1682 /* Apply UFFD write protection to the block memory range */
1683 if (ram_block_uffd_protect(block
, uffd_fd
)) {
1687 trace_ram_write_tracking_ramblock_start(block
->idstr
, block
->page_size
,
1688 block
->host
, block
->max_length
);
1694 error_report("ram_write_tracking_start() failed: restoring initial memory state");
1696 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1697 if ((block
->flags
& RAM_UF_WRITEPROTECT
) == 0) {
1700 uffd_unregister_memory(rs
->uffdio_fd
, block
->host
, block
->max_length
);
1701 /* Cleanup flags and remove reference */
1702 block
->flags
&= ~RAM_UF_WRITEPROTECT
;
1703 memory_region_unref(block
->mr
);
1706 uffd_close_fd(uffd_fd
);
1712 * ram_write_tracking_stop: stop UFFD-WP memory tracking and remove protection
1714 void ram_write_tracking_stop(void)
1716 RAMState
*rs
= ram_state
;
1719 RCU_READ_LOCK_GUARD();
1721 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
1722 if ((block
->flags
& RAM_UF_WRITEPROTECT
) == 0) {
1725 uffd_unregister_memory(rs
->uffdio_fd
, block
->host
, block
->max_length
);
1727 trace_ram_write_tracking_ramblock_stop(block
->idstr
, block
->page_size
,
1728 block
->host
, block
->max_length
);
1730 /* Cleanup flags and remove reference */
1731 block
->flags
&= ~RAM_UF_WRITEPROTECT
;
1732 memory_region_unref(block
->mr
);
1735 /* Finally close UFFD file descriptor */
1736 uffd_close_fd(rs
->uffdio_fd
);
1741 /* No target OS support, stubs just fail or ignore */
1743 static RAMBlock
*poll_fault_page(RAMState
*rs
, ram_addr_t
*offset
)
1751 static int ram_save_release_protection(RAMState
*rs
, PageSearchStatus
*pss
,
1752 unsigned long start_page
)
1761 bool ram_write_tracking_available(void)
1766 bool ram_write_tracking_compatible(void)
1768 g_assert_not_reached();
1771 int ram_write_tracking_start(void)
1773 g_assert_not_reached();
1776 void ram_write_tracking_stop(void)
1778 g_assert_not_reached();
1780 #endif /* defined(__linux__) */
1783 * get_queued_page: unqueue a page from the postcopy requests
1785 * Skips pages that are already sent (!dirty)
1787 * Returns true if a queued page is found
1789 * @rs: current RAM state
1790 * @pss: data about the state of the current dirty page scan
1792 static bool get_queued_page(RAMState
*rs
, PageSearchStatus
*pss
)
1799 block
= unqueue_page(rs
, &offset
);
1801 * We're sending this page, and since it's postcopy nothing else
1802 * will dirty it, and we must make sure it doesn't get sent again
1803 * even if this queue request was received after the background
1804 * search already sent it.
1809 page
= offset
>> TARGET_PAGE_BITS
;
1810 dirty
= test_bit(page
, block
->bmap
);
1812 trace_get_queued_page_not_dirty(block
->idstr
, (uint64_t)offset
,
1815 trace_get_queued_page(block
->idstr
, (uint64_t)offset
, page
);
1819 } while (block
&& !dirty
);
1823 * Poll write faults too if background snapshot is enabled; that's
1824 * when we have vcpus got blocked by the write protected pages.
1826 block
= poll_fault_page(rs
, &offset
);
1831 * We want the background search to continue from the queued page
1832 * since the guest is likely to want other pages near to the page
1833 * it just requested.
1836 pss
->page
= offset
>> TARGET_PAGE_BITS
;
1839 * This unqueued page would break the "one round" check, even is
1842 pss
->complete_round
= false;
1849 * migration_page_queue_free: drop any remaining pages in the ram
1852 * It should be empty at the end anyway, but in error cases there may
1853 * be some left. in case that there is any page left, we drop it.
1856 static void migration_page_queue_free(RAMState
*rs
)
1858 struct RAMSrcPageRequest
*mspr
, *next_mspr
;
1859 /* This queue generally should be empty - but in the case of a failed
1860 * migration might have some droppings in.
1862 RCU_READ_LOCK_GUARD();
1863 QSIMPLEQ_FOREACH_SAFE(mspr
, &rs
->src_page_requests
, next_req
, next_mspr
) {
1864 memory_region_unref(mspr
->rb
->mr
);
1865 QSIMPLEQ_REMOVE_HEAD(&rs
->src_page_requests
, next_req
);
1871 * ram_save_queue_pages: queue the page for transmission
1873 * A request from postcopy destination for example.
1875 * Returns zero on success or negative on error
1877 * @rbname: Name of the RAMBLock of the request. NULL means the
1878 * same that last one.
1879 * @start: starting address from the start of the RAMBlock
1880 * @len: length (in bytes) to send
1882 int ram_save_queue_pages(const char *rbname
, ram_addr_t start
, ram_addr_t len
,
1886 RAMState
*rs
= ram_state
;
1888 stat64_add(&mig_stats
.postcopy_requests
, 1);
1889 RCU_READ_LOCK_GUARD();
1892 /* Reuse last RAMBlock */
1893 ramblock
= rs
->last_req_rb
;
1897 * Shouldn't happen, we can't reuse the last RAMBlock if
1898 * it's the 1st request.
1900 error_setg(errp
, "MIG_RP_MSG_REQ_PAGES has no previous block");
1904 ramblock
= qemu_ram_block_by_name(rbname
);
1907 /* We shouldn't be asked for a non-existent RAMBlock */
1908 error_setg(errp
, "MIG_RP_MSG_REQ_PAGES has no block '%s'", rbname
);
1911 rs
->last_req_rb
= ramblock
;
1913 trace_ram_save_queue_pages(ramblock
->idstr
, start
, len
);
1914 if (!offset_in_ramblock(ramblock
, start
+ len
- 1)) {
1915 error_setg(errp
, "MIG_RP_MSG_REQ_PAGES request overrun, "
1916 "start=" RAM_ADDR_FMT
" len="
1917 RAM_ADDR_FMT
" blocklen=" RAM_ADDR_FMT
,
1918 start
, len
, ramblock
->used_length
);
1923 * When with postcopy preempt, we send back the page directly in the
1926 if (postcopy_preempt_active()) {
1927 ram_addr_t page_start
= start
>> TARGET_PAGE_BITS
;
1928 size_t page_size
= qemu_ram_pagesize(ramblock
);
1929 PageSearchStatus
*pss
= &ram_state
->pss
[RAM_CHANNEL_POSTCOPY
];
1932 qemu_mutex_lock(&rs
->bitmap_mutex
);
1934 pss_init(pss
, ramblock
, page_start
);
1936 * Always use the preempt channel, and make sure it's there. It's
1937 * safe to access without lock, because when rp-thread is running
1938 * we should be the only one who operates on the qemufile
1940 pss
->pss_channel
= migrate_get_current()->postcopy_qemufile_src
;
1941 assert(pss
->pss_channel
);
1944 * It must be either one or multiple of host page size. Just
1945 * assert; if something wrong we're mostly split brain anyway.
1947 assert(len
% page_size
== 0);
1949 if (ram_save_host_page_urgent(pss
)) {
1950 error_setg(errp
, "ram_save_host_page_urgent() failed: "
1951 "ramblock=%s, start_addr=0x"RAM_ADDR_FMT
,
1952 ramblock
->idstr
, start
);
1957 * NOTE: after ram_save_host_page_urgent() succeeded, pss->page
1958 * will automatically be moved and point to the next host page
1959 * we're going to send, so no need to update here.
1961 * Normally QEMU never sends >1 host page in requests, so
1962 * logically we don't even need that as the loop should only
1963 * run once, but just to be consistent.
1967 qemu_mutex_unlock(&rs
->bitmap_mutex
);
1972 struct RAMSrcPageRequest
*new_entry
=
1973 g_new0(struct RAMSrcPageRequest
, 1);
1974 new_entry
->rb
= ramblock
;
1975 new_entry
->offset
= start
;
1976 new_entry
->len
= len
;
1978 memory_region_ref(ramblock
->mr
);
1979 qemu_mutex_lock(&rs
->src_page_req_mutex
);
1980 QSIMPLEQ_INSERT_TAIL(&rs
->src_page_requests
, new_entry
, next_req
);
1981 migration_make_urgent_request();
1982 qemu_mutex_unlock(&rs
->src_page_req_mutex
);
1988 * ram_save_target_page_legacy: save one target page
1990 * Returns the number of pages written
1992 * @rs: current RAM state
1993 * @pss: data about the page we want to send
1995 static int ram_save_target_page_legacy(RAMState
*rs
, PageSearchStatus
*pss
)
1997 ram_addr_t offset
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
2000 if (control_save_page(pss
, offset
, &res
)) {
2004 if (save_zero_page(rs
, pss
, offset
)) {
2008 return ram_save_page(rs
, pss
);
2012 * ram_save_target_page_multifd: send one target page to multifd workers
2014 * Returns 1 if the page was queued, -1 otherwise.
2016 * @rs: current RAM state
2017 * @pss: data about the page we want to send
2019 static int ram_save_target_page_multifd(RAMState
*rs
, PageSearchStatus
*pss
)
2021 RAMBlock
*block
= pss
->block
;
2022 ram_addr_t offset
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
2025 * While using multifd live migration, we still need to handle zero
2026 * page checking on the migration main thread.
2028 if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY
) {
2029 if (save_zero_page(rs
, pss
, offset
)) {
2034 return ram_save_multifd_page(block
, offset
);
2037 /* Should be called before sending a host page */
2038 static void pss_host_page_prepare(PageSearchStatus
*pss
)
2040 /* How many guest pages are there in one host page? */
2041 size_t guest_pfns
= qemu_ram_pagesize(pss
->block
) >> TARGET_PAGE_BITS
;
2043 pss
->host_page_sending
= true;
2044 if (guest_pfns
<= 1) {
2046 * This covers both when guest psize == host psize, or when guest
2047 * has larger psize than the host (guest_pfns==0).
2049 * For the latter, we always send one whole guest page per
2050 * iteration of the host page (example: an Alpha VM on x86 host
2051 * will have guest psize 8K while host psize 4K).
2053 pss
->host_page_start
= pss
->page
;
2054 pss
->host_page_end
= pss
->page
+ 1;
2057 * The host page spans over multiple guest pages, we send them
2058 * within the same host page iteration.
2060 pss
->host_page_start
= ROUND_DOWN(pss
->page
, guest_pfns
);
2061 pss
->host_page_end
= ROUND_UP(pss
->page
+ 1, guest_pfns
);
2066 * Whether the page pointed by PSS is within the host page being sent.
2067 * Must be called after a previous pss_host_page_prepare().
2069 static bool pss_within_range(PageSearchStatus
*pss
)
2071 ram_addr_t ram_addr
;
2073 assert(pss
->host_page_sending
);
2075 /* Over host-page boundary? */
2076 if (pss
->page
>= pss
->host_page_end
) {
2080 ram_addr
= ((ram_addr_t
)pss
->page
) << TARGET_PAGE_BITS
;
2082 return offset_in_ramblock(pss
->block
, ram_addr
);
2085 static void pss_host_page_finish(PageSearchStatus
*pss
)
2087 pss
->host_page_sending
= false;
2088 /* This is not needed, but just to reset it */
2089 pss
->host_page_start
= pss
->host_page_end
= 0;
2093 * Send an urgent host page specified by `pss'. Need to be called with
2094 * bitmap_mutex held.
2096 * Returns 0 if save host page succeeded, false otherwise.
2098 static int ram_save_host_page_urgent(PageSearchStatus
*pss
)
2100 bool page_dirty
, sent
= false;
2101 RAMState
*rs
= ram_state
;
2104 trace_postcopy_preempt_send_host_page(pss
->block
->idstr
, pss
->page
);
2105 pss_host_page_prepare(pss
);
2108 * If precopy is sending the same page, let it be done in precopy, or
2109 * we could send the same page in two channels and none of them will
2110 * receive the whole page.
2112 if (pss_overlap(pss
, &ram_state
->pss
[RAM_CHANNEL_PRECOPY
])) {
2113 trace_postcopy_preempt_hit(pss
->block
->idstr
,
2114 pss
->page
<< TARGET_PAGE_BITS
);
2119 page_dirty
= migration_bitmap_clear_dirty(rs
, pss
->block
, pss
->page
);
2122 /* Be strict to return code; it must be 1, or what else? */
2123 if (migration_ops
->ram_save_target_page(rs
, pss
) != 1) {
2124 error_report_once("%s: ram_save_target_page failed", __func__
);
2130 pss_find_next_dirty(pss
);
2131 } while (pss_within_range(pss
));
2133 pss_host_page_finish(pss
);
2134 /* For urgent requests, flush immediately if sent */
2136 qemu_fflush(pss
->pss_channel
);
2142 * ram_save_host_page: save a whole host page
2144 * Starting at *offset send pages up to the end of the current host
2145 * page. It's valid for the initial offset to point into the middle of
2146 * a host page in which case the remainder of the hostpage is sent.
2147 * Only dirty target pages are sent. Note that the host page size may
2148 * be a huge page for this block.
2150 * The saving stops at the boundary of the used_length of the block
2151 * if the RAMBlock isn't a multiple of the host page size.
2153 * The caller must be with ram_state.bitmap_mutex held to call this
2154 * function. Note that this function can temporarily release the lock, but
2155 * when the function is returned it'll make sure the lock is still held.
2157 * Returns the number of pages written or negative on error
2159 * @rs: current RAM state
2160 * @pss: data about the page we want to send
2162 static int ram_save_host_page(RAMState
*rs
, PageSearchStatus
*pss
)
2164 bool page_dirty
, preempt_active
= postcopy_preempt_active();
2165 int tmppages
, pages
= 0;
2166 size_t pagesize_bits
=
2167 qemu_ram_pagesize(pss
->block
) >> TARGET_PAGE_BITS
;
2168 unsigned long start_page
= pss
->page
;
2171 if (migrate_ram_is_ignored(pss
->block
)) {
2172 error_report("block %s should not be migrated !", pss
->block
->idstr
);
2176 /* Update host page boundary information */
2177 pss_host_page_prepare(pss
);
2180 page_dirty
= migration_bitmap_clear_dirty(rs
, pss
->block
, pss
->page
);
2182 /* Check the pages is dirty and if it is send it */
2185 * Properly yield the lock only in postcopy preempt mode
2186 * because both migration thread and rp-return thread can
2187 * operate on the bitmaps.
2189 if (preempt_active
) {
2190 qemu_mutex_unlock(&rs
->bitmap_mutex
);
2192 tmppages
= migration_ops
->ram_save_target_page(rs
, pss
);
2193 if (tmppages
>= 0) {
2196 * Allow rate limiting to happen in the middle of huge pages if
2197 * something is sent in the current iteration.
2199 if (pagesize_bits
> 1 && tmppages
> 0) {
2200 migration_rate_limit();
2203 if (preempt_active
) {
2204 qemu_mutex_lock(&rs
->bitmap_mutex
);
2211 pss_host_page_finish(pss
);
2215 pss_find_next_dirty(pss
);
2216 } while (pss_within_range(pss
));
2218 pss_host_page_finish(pss
);
2220 res
= ram_save_release_protection(rs
, pss
, start_page
);
2221 return (res
< 0 ? res
: pages
);
2225 * ram_find_and_save_block: finds a dirty page and sends it to f
2227 * Called within an RCU critical section.
2229 * Returns the number of pages written where zero means no dirty pages,
2230 * or negative on error
2232 * @rs: current RAM state
2234 * On systems where host-page-size > target-page-size it will send all the
2235 * pages in a host page that are dirty.
2237 static int ram_find_and_save_block(RAMState
*rs
)
2239 PageSearchStatus
*pss
= &rs
->pss
[RAM_CHANNEL_PRECOPY
];
2242 /* No dirty page as there is zero RAM */
2243 if (!rs
->ram_bytes_total
) {
2248 * Always keep last_seen_block/last_page valid during this procedure,
2249 * because find_dirty_block() relies on these values (e.g., we compare
2250 * last_seen_block with pss.block to see whether we searched all the
2251 * ramblocks) to detect the completion of migration. Having NULL value
2252 * of last_seen_block can conditionally cause below loop to run forever.
2254 if (!rs
->last_seen_block
) {
2255 rs
->last_seen_block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
2259 pss_init(pss
, rs
->last_seen_block
, rs
->last_page
);
2262 if (!get_queued_page(rs
, pss
)) {
2263 /* priority queue empty, so just search for something dirty */
2264 int res
= find_dirty_block(rs
, pss
);
2265 if (res
!= PAGE_DIRTY_FOUND
) {
2266 if (res
== PAGE_ALL_CLEAN
) {
2268 } else if (res
== PAGE_TRY_AGAIN
) {
2270 } else if (res
< 0) {
2276 pages
= ram_save_host_page(rs
, pss
);
2282 rs
->last_seen_block
= pss
->block
;
2283 rs
->last_page
= pss
->page
;
2288 static uint64_t ram_bytes_total_with_ignored(void)
2293 RCU_READ_LOCK_GUARD();
2295 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
2296 total
+= block
->used_length
;
2301 uint64_t ram_bytes_total(void)
2306 RCU_READ_LOCK_GUARD();
2308 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2309 total
+= block
->used_length
;
2314 static void xbzrle_load_setup(void)
2316 XBZRLE
.decoded_buf
= g_malloc(TARGET_PAGE_SIZE
);
2319 static void xbzrle_load_cleanup(void)
2321 g_free(XBZRLE
.decoded_buf
);
2322 XBZRLE
.decoded_buf
= NULL
;
2325 static void ram_state_cleanup(RAMState
**rsp
)
2328 migration_page_queue_free(*rsp
);
2329 qemu_mutex_destroy(&(*rsp
)->bitmap_mutex
);
2330 qemu_mutex_destroy(&(*rsp
)->src_page_req_mutex
);
2336 static void xbzrle_cleanup(void)
2338 XBZRLE_cache_lock();
2340 cache_fini(XBZRLE
.cache
);
2341 g_free(XBZRLE
.encoded_buf
);
2342 g_free(XBZRLE
.current_buf
);
2343 g_free(XBZRLE
.zero_target_page
);
2344 XBZRLE
.cache
= NULL
;
2345 XBZRLE
.encoded_buf
= NULL
;
2346 XBZRLE
.current_buf
= NULL
;
2347 XBZRLE
.zero_target_page
= NULL
;
2349 XBZRLE_cache_unlock();
2352 static void ram_bitmaps_destroy(void)
2356 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2357 g_free(block
->clear_bmap
);
2358 block
->clear_bmap
= NULL
;
2359 g_free(block
->bmap
);
2361 g_free(block
->file_bmap
);
2362 block
->file_bmap
= NULL
;
2366 static void ram_save_cleanup(void *opaque
)
2368 RAMState
**rsp
= opaque
;
2370 /* We don't use dirty log with background snapshots */
2371 if (!migrate_background_snapshot()) {
2372 /* caller have hold BQL or is in a bh, so there is
2373 * no writing race against the migration bitmap
2375 if (global_dirty_tracking
& GLOBAL_DIRTY_MIGRATION
) {
2377 * do not stop dirty log without starting it, since
2378 * memory_global_dirty_log_stop will assert that
2379 * memory_global_dirty_log_start/stop used in pairs
2381 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION
);
2385 ram_bitmaps_destroy();
2388 multifd_ram_save_cleanup();
2389 ram_state_cleanup(rsp
);
2390 g_free(migration_ops
);
2391 migration_ops
= NULL
;
2394 static void ram_state_reset(RAMState
*rs
)
2398 for (i
= 0; i
< RAM_CHANNEL_MAX
; i
++) {
2399 rs
->pss
[i
].last_sent_block
= NULL
;
2402 rs
->last_seen_block
= NULL
;
2404 rs
->last_version
= ram_list
.version
;
2405 rs
->xbzrle_started
= false;
2408 #define MAX_WAIT 50 /* ms, half buffered_file limit */
2410 /* **** functions for postcopy ***** */
2412 void ram_postcopy_migrated_memory_release(MigrationState
*ms
)
2414 struct RAMBlock
*block
;
2416 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2417 unsigned long *bitmap
= block
->bmap
;
2418 unsigned long range
= block
->used_length
>> TARGET_PAGE_BITS
;
2419 unsigned long run_start
= find_next_zero_bit(bitmap
, range
, 0);
2421 while (run_start
< range
) {
2422 unsigned long run_end
= find_next_bit(bitmap
, range
, run_start
+ 1);
2423 ram_discard_range(block
->idstr
,
2424 ((ram_addr_t
)run_start
) << TARGET_PAGE_BITS
,
2425 ((ram_addr_t
)(run_end
- run_start
))
2426 << TARGET_PAGE_BITS
);
2427 run_start
= find_next_zero_bit(bitmap
, range
, run_end
+ 1);
2433 * postcopy_send_discard_bm_ram: discard a RAMBlock
2435 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2437 * @ms: current migration state
2438 * @block: RAMBlock to discard
2440 static void postcopy_send_discard_bm_ram(MigrationState
*ms
, RAMBlock
*block
)
2442 unsigned long end
= block
->used_length
>> TARGET_PAGE_BITS
;
2443 unsigned long current
;
2444 unsigned long *bitmap
= block
->bmap
;
2446 for (current
= 0; current
< end
; ) {
2447 unsigned long one
= find_next_bit(bitmap
, end
, current
);
2448 unsigned long zero
, discard_length
;
2454 zero
= find_next_zero_bit(bitmap
, end
, one
+ 1);
2457 discard_length
= end
- one
;
2459 discard_length
= zero
- one
;
2461 postcopy_discard_send_range(ms
, one
, discard_length
);
2462 current
= one
+ discard_length
;
2466 static void postcopy_chunk_hostpages_pass(MigrationState
*ms
, RAMBlock
*block
);
2469 * postcopy_each_ram_send_discard: discard all RAMBlocks
2471 * Utility for the outgoing postcopy code.
2472 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2473 * passing it bitmap indexes and name.
2474 * (qemu_ram_foreach_block ends up passing unscaled lengths
2475 * which would mean postcopy code would have to deal with target page)
2477 * @ms: current migration state
2479 static void postcopy_each_ram_send_discard(MigrationState
*ms
)
2481 struct RAMBlock
*block
;
2483 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2484 postcopy_discard_send_init(ms
, block
->idstr
);
2487 * Deal with TPS != HPS and huge pages. It discard any partially sent
2488 * host-page size chunks, mark any partially dirty host-page size
2489 * chunks as all dirty. In this case the host-page is the host-page
2490 * for the particular RAMBlock, i.e. it might be a huge page.
2492 postcopy_chunk_hostpages_pass(ms
, block
);
2495 * Postcopy sends chunks of bitmap over the wire, but it
2496 * just needs indexes at this point, avoids it having
2497 * target page specific code.
2499 postcopy_send_discard_bm_ram(ms
, block
);
2500 postcopy_discard_send_finish(ms
);
2505 * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages
2507 * Helper for postcopy_chunk_hostpages; it's called twice to
2508 * canonicalize the two bitmaps, that are similar, but one is
2511 * Postcopy requires that all target pages in a hostpage are dirty or
2512 * clean, not a mix. This function canonicalizes the bitmaps.
2514 * @ms: current migration state
2515 * @block: block that contains the page we want to canonicalize
2517 static void postcopy_chunk_hostpages_pass(MigrationState
*ms
, RAMBlock
*block
)
2519 RAMState
*rs
= ram_state
;
2520 unsigned long *bitmap
= block
->bmap
;
2521 unsigned int host_ratio
= block
->page_size
/ TARGET_PAGE_SIZE
;
2522 unsigned long pages
= block
->used_length
>> TARGET_PAGE_BITS
;
2523 unsigned long run_start
;
2525 if (block
->page_size
== TARGET_PAGE_SIZE
) {
2526 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2530 /* Find a dirty page */
2531 run_start
= find_next_bit(bitmap
, pages
, 0);
2533 while (run_start
< pages
) {
2536 * If the start of this run of pages is in the middle of a host
2537 * page, then we need to fixup this host page.
2539 if (QEMU_IS_ALIGNED(run_start
, host_ratio
)) {
2540 /* Find the end of this run */
2541 run_start
= find_next_zero_bit(bitmap
, pages
, run_start
+ 1);
2543 * If the end isn't at the start of a host page, then the
2544 * run doesn't finish at the end of a host page
2545 * and we need to discard.
2549 if (!QEMU_IS_ALIGNED(run_start
, host_ratio
)) {
2551 unsigned long fixup_start_addr
= QEMU_ALIGN_DOWN(run_start
,
2553 run_start
= QEMU_ALIGN_UP(run_start
, host_ratio
);
2555 /* Clean up the bitmap */
2556 for (page
= fixup_start_addr
;
2557 page
< fixup_start_addr
+ host_ratio
; page
++) {
2559 * Remark them as dirty, updating the count for any pages
2560 * that weren't previously dirty.
2562 rs
->migration_dirty_pages
+= !test_and_set_bit(page
, bitmap
);
2566 /* Find the next dirty page for the next iteration */
2567 run_start
= find_next_bit(bitmap
, pages
, run_start
);
2572 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2574 * Transmit the set of pages to be discarded after precopy to the target
2575 * these are pages that:
2576 * a) Have been previously transmitted but are now dirty again
2577 * b) Pages that have never been transmitted, this ensures that
2578 * any pages on the destination that have been mapped by background
2579 * tasks get discarded (transparent huge pages is the specific concern)
2580 * Hopefully this is pretty sparse
2582 * @ms: current migration state
2584 void ram_postcopy_send_discard_bitmap(MigrationState
*ms
)
2586 RAMState
*rs
= ram_state
;
2588 RCU_READ_LOCK_GUARD();
2590 /* This should be our last sync, the src is now paused */
2591 migration_bitmap_sync(rs
, false);
2593 /* Easiest way to make sure we don't resume in the middle of a host-page */
2594 rs
->pss
[RAM_CHANNEL_PRECOPY
].last_sent_block
= NULL
;
2595 rs
->last_seen_block
= NULL
;
2598 postcopy_each_ram_send_discard(ms
);
2600 trace_ram_postcopy_send_discard_bitmap();
2604 * ram_discard_range: discard dirtied pages at the beginning of postcopy
2606 * Returns zero on success
2608 * @rbname: name of the RAMBlock of the request. NULL means the
2609 * same that last one.
2610 * @start: RAMBlock starting page
2611 * @length: RAMBlock size
2613 int ram_discard_range(const char *rbname
, uint64_t start
, size_t length
)
2615 trace_ram_discard_range(rbname
, start
, length
);
2617 RCU_READ_LOCK_GUARD();
2618 RAMBlock
*rb
= qemu_ram_block_by_name(rbname
);
2621 error_report("ram_discard_range: Failed to find block '%s'", rbname
);
2626 * On source VM, we don't need to update the received bitmap since
2627 * we don't even have one.
2629 if (rb
->receivedmap
) {
2630 bitmap_clear(rb
->receivedmap
, start
>> qemu_target_page_bits(),
2631 length
>> qemu_target_page_bits());
2634 return ram_block_discard_range(rb
, start
, length
);
2638 * For every allocation, we will try not to crash the VM if the
2639 * allocation failed.
2641 static bool xbzrle_init(Error
**errp
)
2643 if (!migrate_xbzrle()) {
2647 XBZRLE_cache_lock();
2649 XBZRLE
.zero_target_page
= g_try_malloc0(TARGET_PAGE_SIZE
);
2650 if (!XBZRLE
.zero_target_page
) {
2651 error_setg(errp
, "%s: Error allocating zero page", __func__
);
2655 XBZRLE
.cache
= cache_init(migrate_xbzrle_cache_size(),
2656 TARGET_PAGE_SIZE
, errp
);
2657 if (!XBZRLE
.cache
) {
2658 goto free_zero_page
;
2661 XBZRLE
.encoded_buf
= g_try_malloc0(TARGET_PAGE_SIZE
);
2662 if (!XBZRLE
.encoded_buf
) {
2663 error_setg(errp
, "%s: Error allocating encoded_buf", __func__
);
2667 XBZRLE
.current_buf
= g_try_malloc(TARGET_PAGE_SIZE
);
2668 if (!XBZRLE
.current_buf
) {
2669 error_setg(errp
, "%s: Error allocating current_buf", __func__
);
2670 goto free_encoded_buf
;
2673 /* We are all good */
2674 XBZRLE_cache_unlock();
2678 g_free(XBZRLE
.encoded_buf
);
2679 XBZRLE
.encoded_buf
= NULL
;
2681 cache_fini(XBZRLE
.cache
);
2682 XBZRLE
.cache
= NULL
;
2684 g_free(XBZRLE
.zero_target_page
);
2685 XBZRLE
.zero_target_page
= NULL
;
2687 XBZRLE_cache_unlock();
2691 static bool ram_state_init(RAMState
**rsp
, Error
**errp
)
2693 *rsp
= g_try_new0(RAMState
, 1);
2696 error_setg(errp
, "%s: Init ramstate fail", __func__
);
2700 qemu_mutex_init(&(*rsp
)->bitmap_mutex
);
2701 qemu_mutex_init(&(*rsp
)->src_page_req_mutex
);
2702 QSIMPLEQ_INIT(&(*rsp
)->src_page_requests
);
2703 (*rsp
)->ram_bytes_total
= ram_bytes_total();
2706 * Count the total number of pages used by ram blocks not including any
2707 * gaps due to alignment or unplugs.
2708 * This must match with the initial values of dirty bitmap.
2710 (*rsp
)->migration_dirty_pages
= (*rsp
)->ram_bytes_total
>> TARGET_PAGE_BITS
;
2711 ram_state_reset(*rsp
);
2716 static void ram_list_init_bitmaps(void)
2718 MigrationState
*ms
= migrate_get_current();
2720 unsigned long pages
;
2723 /* Skip setting bitmap if there is no RAM */
2724 if (ram_bytes_total()) {
2725 shift
= ms
->clear_bitmap_shift
;
2726 if (shift
> CLEAR_BITMAP_SHIFT_MAX
) {
2727 error_report("clear_bitmap_shift (%u) too big, using "
2728 "max value (%u)", shift
, CLEAR_BITMAP_SHIFT_MAX
);
2729 shift
= CLEAR_BITMAP_SHIFT_MAX
;
2730 } else if (shift
< CLEAR_BITMAP_SHIFT_MIN
) {
2731 error_report("clear_bitmap_shift (%u) too small, using "
2732 "min value (%u)", shift
, CLEAR_BITMAP_SHIFT_MIN
);
2733 shift
= CLEAR_BITMAP_SHIFT_MIN
;
2736 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2737 pages
= block
->max_length
>> TARGET_PAGE_BITS
;
2739 * The initial dirty bitmap for migration must be set with all
2740 * ones to make sure we'll migrate every guest RAM page to
2742 * Here we set RAMBlock.bmap all to 1 because when rebegin a
2743 * new migration after a failed migration, ram_list.
2744 * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
2747 block
->bmap
= bitmap_new(pages
);
2748 bitmap_set(block
->bmap
, 0, pages
);
2749 if (migrate_mapped_ram()) {
2750 block
->file_bmap
= bitmap_new(pages
);
2752 block
->clear_bmap_shift
= shift
;
2753 block
->clear_bmap
= bitmap_new(clear_bmap_size(pages
, shift
));
2758 static void migration_bitmap_clear_discarded_pages(RAMState
*rs
)
2760 unsigned long pages
;
2763 RCU_READ_LOCK_GUARD();
2765 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
2766 pages
= ramblock_dirty_bitmap_clear_discarded_pages(rb
);
2767 rs
->migration_dirty_pages
-= pages
;
2771 static bool ram_init_bitmaps(RAMState
*rs
, Error
**errp
)
2775 qemu_mutex_lock_ramlist();
2777 WITH_RCU_READ_LOCK_GUARD() {
2778 ram_list_init_bitmaps();
2779 /* We don't use dirty log with background snapshots */
2780 if (!migrate_background_snapshot()) {
2781 ret
= memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION
, errp
);
2785 migration_bitmap_sync_precopy(rs
, false);
2789 qemu_mutex_unlock_ramlist();
2792 ram_bitmaps_destroy();
2797 * After an eventual first bitmap sync, fixup the initial bitmap
2798 * containing all 1s to exclude any discarded pages from migration.
2800 migration_bitmap_clear_discarded_pages(rs
);
2804 static int ram_init_all(RAMState
**rsp
, Error
**errp
)
2806 if (!ram_state_init(rsp
, errp
)) {
2810 if (!xbzrle_init(errp
)) {
2811 ram_state_cleanup(rsp
);
2815 if (!ram_init_bitmaps(*rsp
, errp
)) {
2822 static void ram_state_resume_prepare(RAMState
*rs
, QEMUFile
*out
)
2828 * Postcopy is not using xbzrle/compression, so no need for that.
2829 * Also, since source are already halted, we don't need to care
2830 * about dirty page logging as well.
2833 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
2834 pages
+= bitmap_count_one(block
->bmap
,
2835 block
->used_length
>> TARGET_PAGE_BITS
);
2838 /* This may not be aligned with current bitmaps. Recalculate. */
2839 rs
->migration_dirty_pages
= pages
;
2841 ram_state_reset(rs
);
2843 /* Update RAMState cache of output QEMUFile */
2844 rs
->pss
[RAM_CHANNEL_PRECOPY
].pss_channel
= out
;
2846 trace_ram_state_resume_prepare(pages
);
2850 * This function clears bits of the free pages reported by the caller from the
2851 * migration dirty bitmap. @addr is the host address corresponding to the
2852 * start of the continuous guest free pages, and @len is the total bytes of
2855 void qemu_guest_free_page_hint(void *addr
, size_t len
)
2859 size_t used_len
, start
, npages
;
2861 /* This function is currently expected to be used during live migration */
2862 if (!migration_is_setup_or_active()) {
2866 for (; len
> 0; len
-= used_len
, addr
+= used_len
) {
2867 block
= qemu_ram_block_from_host(addr
, false, &offset
);
2868 if (unlikely(!block
|| offset
>= block
->used_length
)) {
2870 * The implementation might not support RAMBlock resize during
2871 * live migration, but it could happen in theory with future
2872 * updates. So we add a check here to capture that case.
2874 error_report_once("%s unexpected error", __func__
);
2878 if (len
<= block
->used_length
- offset
) {
2881 used_len
= block
->used_length
- offset
;
2884 start
= offset
>> TARGET_PAGE_BITS
;
2885 npages
= used_len
>> TARGET_PAGE_BITS
;
2887 qemu_mutex_lock(&ram_state
->bitmap_mutex
);
2889 * The skipped free pages are equavalent to be sent from clear_bmap's
2890 * perspective, so clear the bits from the memory region bitmap which
2891 * are initially set. Otherwise those skipped pages will be sent in
2892 * the next round after syncing from the memory region bitmap.
2894 migration_clear_memory_region_dirty_bitmap_range(block
, start
, npages
);
2895 ram_state
->migration_dirty_pages
-=
2896 bitmap_count_one_with_offset(block
->bmap
, start
, npages
);
2897 bitmap_clear(block
->bmap
, start
, npages
);
2898 qemu_mutex_unlock(&ram_state
->bitmap_mutex
);
2902 #define MAPPED_RAM_HDR_VERSION 1
2903 struct MappedRamHeader
{
2906 * The target's page size, so we know how many pages are in the
2911 * The offset in the migration file where the pages bitmap is
2914 uint64_t bitmap_offset
;
2916 * The offset in the migration file where the actual pages (data)
2919 uint64_t pages_offset
;
2921 typedef struct MappedRamHeader MappedRamHeader
;
2923 static void mapped_ram_setup_ramblock(QEMUFile
*file
, RAMBlock
*block
)
2925 g_autofree MappedRamHeader
*header
= NULL
;
2926 size_t header_size
, bitmap_size
;
2929 header
= g_new0(MappedRamHeader
, 1);
2930 header_size
= sizeof(MappedRamHeader
);
2932 num_pages
= block
->used_length
>> TARGET_PAGE_BITS
;
2933 bitmap_size
= BITS_TO_LONGS(num_pages
) * sizeof(unsigned long);
2936 * Save the file offsets of where the bitmap and the pages should
2937 * go as they are written at the end of migration and during the
2938 * iterative phase, respectively.
2940 block
->bitmap_offset
= qemu_get_offset(file
) + header_size
;
2941 block
->pages_offset
= ROUND_UP(block
->bitmap_offset
+
2943 MAPPED_RAM_FILE_OFFSET_ALIGNMENT
);
2945 header
->version
= cpu_to_be32(MAPPED_RAM_HDR_VERSION
);
2946 header
->page_size
= cpu_to_be64(TARGET_PAGE_SIZE
);
2947 header
->bitmap_offset
= cpu_to_be64(block
->bitmap_offset
);
2948 header
->pages_offset
= cpu_to_be64(block
->pages_offset
);
2950 qemu_put_buffer(file
, (uint8_t *) header
, header_size
);
2952 /* prepare offset for next ramblock */
2953 qemu_set_offset(file
, block
->pages_offset
+ block
->used_length
, SEEK_SET
);
2956 static bool mapped_ram_read_header(QEMUFile
*file
, MappedRamHeader
*header
,
2959 size_t ret
, header_size
= sizeof(MappedRamHeader
);
2961 ret
= qemu_get_buffer(file
, (uint8_t *)header
, header_size
);
2962 if (ret
!= header_size
) {
2963 error_setg(errp
, "Could not read whole mapped-ram migration header "
2964 "(expected %zd, got %zd bytes)", header_size
, ret
);
2968 /* migration stream is big-endian */
2969 header
->version
= be32_to_cpu(header
->version
);
2971 if (header
->version
> MAPPED_RAM_HDR_VERSION
) {
2972 error_setg(errp
, "Migration mapped-ram capability version not "
2973 "supported (expected <= %d, got %d)", MAPPED_RAM_HDR_VERSION
,
2978 header
->page_size
= be64_to_cpu(header
->page_size
);
2979 header
->bitmap_offset
= be64_to_cpu(header
->bitmap_offset
);
2980 header
->pages_offset
= be64_to_cpu(header
->pages_offset
);
2986 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
2987 * long-running RCU critical section. When rcu-reclaims in the code
2988 * start to become numerous it will be necessary to reduce the
2989 * granularity of these critical sections.
2993 * ram_save_setup: Setup RAM for migration
2995 * Returns zero to indicate success and negative for error
2997 * @f: QEMUFile where to send the data
2998 * @opaque: RAMState pointer
2999 * @errp: pointer to Error*, to store an error if it happens.
3001 static int ram_save_setup(QEMUFile
*f
, void *opaque
, Error
**errp
)
3003 RAMState
**rsp
= opaque
;
3005 int ret
, max_hg_page_size
;
3007 /* migration has already setup the bitmap, reuse it. */
3008 if (!migration_in_colo_state()) {
3009 if (ram_init_all(rsp
, errp
) != 0) {
3013 (*rsp
)->pss
[RAM_CHANNEL_PRECOPY
].pss_channel
= f
;
3016 * ??? Mirrors the previous value of qemu_host_page_size,
3017 * but is this really what was intended for the migration?
3019 max_hg_page_size
= MAX(qemu_real_host_page_size(), TARGET_PAGE_SIZE
);
3021 WITH_RCU_READ_LOCK_GUARD() {
3022 qemu_put_be64(f
, ram_bytes_total_with_ignored()
3023 | RAM_SAVE_FLAG_MEM_SIZE
);
3025 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3026 qemu_put_byte(f
, strlen(block
->idstr
));
3027 qemu_put_buffer(f
, (uint8_t *)block
->idstr
, strlen(block
->idstr
));
3028 qemu_put_be64(f
, block
->used_length
);
3029 if (migrate_postcopy_ram() &&
3030 block
->page_size
!= max_hg_page_size
) {
3031 qemu_put_be64(f
, block
->page_size
);
3033 if (migrate_ignore_shared()) {
3034 qemu_put_be64(f
, block
->mr
->addr
);
3037 if (migrate_mapped_ram()) {
3038 mapped_ram_setup_ramblock(f
, block
);
3043 ret
= rdma_registration_start(f
, RAM_CONTROL_SETUP
);
3045 error_setg(errp
, "%s: failed to start RDMA registration", __func__
);
3046 qemu_file_set_error(f
, ret
);
3050 ret
= rdma_registration_stop(f
, RAM_CONTROL_SETUP
);
3052 error_setg(errp
, "%s: failed to stop RDMA registration", __func__
);
3053 qemu_file_set_error(f
, ret
);
3057 migration_ops
= g_malloc0(sizeof(MigrationOps
));
3059 if (migrate_multifd()) {
3060 multifd_ram_save_setup();
3061 migration_ops
->ram_save_target_page
= ram_save_target_page_multifd
;
3063 migration_ops
->ram_save_target_page
= ram_save_target_page_legacy
;
3067 ret
= multifd_ram_flush_and_sync();
3070 error_setg(errp
, "%s: multifd synchronization failed", __func__
);
3074 if (migrate_multifd() && !migrate_multifd_flush_after_each_section()
3075 && !migrate_mapped_ram()) {
3076 qemu_put_be64(f
, RAM_SAVE_FLAG_MULTIFD_FLUSH
);
3079 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3080 ret
= qemu_fflush(f
);
3082 error_setg_errno(errp
, -ret
, "%s failed", __func__
);
3087 static void ram_save_file_bmap(QEMUFile
*f
)
3091 RAMBLOCK_FOREACH_MIGRATABLE(block
) {
3092 long num_pages
= block
->used_length
>> TARGET_PAGE_BITS
;
3093 long bitmap_size
= BITS_TO_LONGS(num_pages
) * sizeof(unsigned long);
3095 qemu_put_buffer_at(f
, (uint8_t *)block
->file_bmap
, bitmap_size
,
3096 block
->bitmap_offset
);
3097 ram_transferred_add(bitmap_size
);
3100 * Free the bitmap here to catch any synchronization issues
3101 * with multifd channels. No channels should be sending pages
3102 * after we've written the bitmap to file.
3104 g_free(block
->file_bmap
);
3105 block
->file_bmap
= NULL
;
3109 void ramblock_set_file_bmap_atomic(RAMBlock
*block
, ram_addr_t offset
, bool set
)
3112 set_bit_atomic(offset
>> TARGET_PAGE_BITS
, block
->file_bmap
);
3114 clear_bit_atomic(offset
>> TARGET_PAGE_BITS
, block
->file_bmap
);
3119 * ram_save_iterate: iterative stage for migration
3121 * Returns zero to indicate success and negative for error
3123 * @f: QEMUFile where to send the data
3124 * @opaque: RAMState pointer
3126 static int ram_save_iterate(QEMUFile
*f
, void *opaque
)
3128 RAMState
**temp
= opaque
;
3129 RAMState
*rs
= *temp
;
3136 * We'll take this lock a little bit long, but it's okay for two reasons.
3137 * Firstly, the only possible other thread to take it is who calls
3138 * qemu_guest_free_page_hint(), which should be rare; secondly, see
3139 * MAX_WAIT (if curious, further see commit 4508bd9ed8053ce) below, which
3140 * guarantees that we'll at least released it in a regular basis.
3142 WITH_QEMU_LOCK_GUARD(&rs
->bitmap_mutex
) {
3143 WITH_RCU_READ_LOCK_GUARD() {
3144 if (ram_list
.version
!= rs
->last_version
) {
3145 ram_state_reset(rs
);
3148 /* Read version before ram_list.blocks */
3151 ret
= rdma_registration_start(f
, RAM_CONTROL_ROUND
);
3153 qemu_file_set_error(f
, ret
);
3157 t0
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
3159 while ((ret
= migration_rate_exceeded(f
)) == 0 ||
3160 postcopy_has_request(rs
)) {
3163 if (qemu_file_get_error(f
)) {
3167 pages
= ram_find_and_save_block(rs
);
3168 /* no more pages to sent */
3175 qemu_file_set_error(f
, pages
);
3179 rs
->target_page_count
+= pages
;
3182 * we want to check in the 1st loop, just in case it was the 1st
3183 * time and we had to sync the dirty bitmap.
3184 * qemu_clock_get_ns() is a bit expensive, so we only check each
3187 if ((i
& 63) == 0) {
3188 uint64_t t1
= (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - t0
) /
3190 if (t1
> MAX_WAIT
) {
3191 trace_ram_save_iterate_big_wait(t1
, i
);
3201 * Must occur before EOS (or any QEMUFile operation)
3202 * because of RDMA protocol.
3204 ret
= rdma_registration_stop(f
, RAM_CONTROL_ROUND
);
3206 qemu_file_set_error(f
, ret
);
3211 && migration_is_setup_or_active()) {
3212 if (migrate_multifd() && migrate_multifd_flush_after_each_section() &&
3213 !migrate_mapped_ram()) {
3214 ret
= multifd_ram_flush_and_sync();
3220 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3221 ram_transferred_add(8);
3222 ret
= qemu_fflush(f
);
3232 * ram_save_complete: function called to send the remaining amount of ram
3234 * Returns zero to indicate success or negative on error
3236 * Called with the BQL
3238 * @f: QEMUFile where to send the data
3239 * @opaque: RAMState pointer
3241 static int ram_save_complete(QEMUFile
*f
, void *opaque
)
3243 RAMState
**temp
= opaque
;
3244 RAMState
*rs
= *temp
;
3247 rs
->last_stage
= !migration_in_colo_state();
3249 WITH_RCU_READ_LOCK_GUARD() {
3250 if (!migration_in_postcopy()) {
3251 migration_bitmap_sync_precopy(rs
, true);
3254 ret
= rdma_registration_start(f
, RAM_CONTROL_FINISH
);
3256 qemu_file_set_error(f
, ret
);
3260 /* try transferring iterative blocks of memory */
3262 /* flush all remaining blocks regardless of rate limiting */
3263 qemu_mutex_lock(&rs
->bitmap_mutex
);
3267 pages
= ram_find_and_save_block(rs
);
3268 /* no more blocks to sent */
3273 qemu_mutex_unlock(&rs
->bitmap_mutex
);
3277 qemu_mutex_unlock(&rs
->bitmap_mutex
);
3279 ret
= rdma_registration_stop(f
, RAM_CONTROL_FINISH
);
3281 qemu_file_set_error(f
, ret
);
3286 ret
= multifd_ram_flush_and_sync();
3291 if (migrate_mapped_ram()) {
3292 ram_save_file_bmap(f
);
3294 if (qemu_file_get_error(f
)) {
3295 Error
*local_err
= NULL
;
3296 int err
= qemu_file_get_error_obj(f
, &local_err
);
3298 error_reportf_err(local_err
, "Failed to write bitmap to file: ");
3303 qemu_put_be64(f
, RAM_SAVE_FLAG_EOS
);
3304 return qemu_fflush(f
);
3307 static void ram_state_pending_estimate(void *opaque
, uint64_t *must_precopy
,
3308 uint64_t *can_postcopy
)
3310 RAMState
**temp
= opaque
;
3311 RAMState
*rs
= *temp
;
3313 uint64_t remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3315 if (migrate_postcopy_ram()) {
3316 /* We can do postcopy, and all the data is postcopiable */
3317 *can_postcopy
+= remaining_size
;
3319 *must_precopy
+= remaining_size
;
3323 static void ram_state_pending_exact(void *opaque
, uint64_t *must_precopy
,
3324 uint64_t *can_postcopy
)
3326 RAMState
**temp
= opaque
;
3327 RAMState
*rs
= *temp
;
3328 uint64_t remaining_size
;
3330 if (!migration_in_postcopy()) {
3332 WITH_RCU_READ_LOCK_GUARD() {
3333 migration_bitmap_sync_precopy(rs
, false);
3338 remaining_size
= rs
->migration_dirty_pages
* TARGET_PAGE_SIZE
;
3340 if (migrate_postcopy_ram()) {
3341 /* We can do postcopy, and all the data is postcopiable */
3342 *can_postcopy
+= remaining_size
;
3344 *must_precopy
+= remaining_size
;
3348 static int load_xbzrle(QEMUFile
*f
, ram_addr_t addr
, void *host
)
3350 unsigned int xh_len
;
3352 uint8_t *loaded_data
;
3354 /* extract RLE header */
3355 xh_flags
= qemu_get_byte(f
);
3356 xh_len
= qemu_get_be16(f
);
3358 if (xh_flags
!= ENCODING_FLAG_XBZRLE
) {
3359 error_report("Failed to load XBZRLE page - wrong compression!");
3363 if (xh_len
> TARGET_PAGE_SIZE
) {
3364 error_report("Failed to load XBZRLE page - len overflow!");
3367 loaded_data
= XBZRLE
.decoded_buf
;
3368 /* load data and decode */
3369 /* it can change loaded_data to point to an internal buffer */
3370 qemu_get_buffer_in_place(f
, &loaded_data
, xh_len
);
3373 if (xbzrle_decode_buffer(loaded_data
, xh_len
, host
,
3374 TARGET_PAGE_SIZE
) == -1) {
3375 error_report("Failed to load XBZRLE page - decode error!");
3383 * ram_block_from_stream: read a RAMBlock id from the migration stream
3385 * Must be called from within a rcu critical section.
3387 * Returns a pointer from within the RCU-protected ram_list.
3389 * @mis: the migration incoming state pointer
3390 * @f: QEMUFile where to read the data from
3391 * @flags: Page flags (mostly to see if it's a continuation of previous block)
3392 * @channel: the channel we're using
3394 static inline RAMBlock
*ram_block_from_stream(MigrationIncomingState
*mis
,
3395 QEMUFile
*f
, int flags
,
3398 RAMBlock
*block
= mis
->last_recv_block
[channel
];
3402 if (flags
& RAM_SAVE_FLAG_CONTINUE
) {
3404 error_report("Ack, bad migration stream!");
3410 len
= qemu_get_byte(f
);
3411 qemu_get_buffer(f
, (uint8_t *)id
, len
);
3414 block
= qemu_ram_block_by_name(id
);
3416 error_report("Can't find block %s", id
);
3420 if (migrate_ram_is_ignored(block
)) {
3421 error_report("block %s should not be migrated !", id
);
3425 mis
->last_recv_block
[channel
] = block
;
3430 static inline void *host_from_ram_block_offset(RAMBlock
*block
,
3433 if (!offset_in_ramblock(block
, offset
)) {
3437 return block
->host
+ offset
;
3440 static void *host_page_from_ram_block_offset(RAMBlock
*block
,
3443 /* Note: Explicitly no check against offset_in_ramblock(). */
3444 return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block
->host
+ offset
),
3448 static ram_addr_t
host_page_offset_from_ram_block_offset(RAMBlock
*block
,
3451 return ((uintptr_t)block
->host
+ offset
) & (block
->page_size
- 1);
3454 void colo_record_bitmap(RAMBlock
*block
, ram_addr_t
*normal
, uint32_t pages
)
3456 qemu_mutex_lock(&ram_state
->bitmap_mutex
);
3457 for (int i
= 0; i
< pages
; i
++) {
3458 ram_addr_t offset
= normal
[i
];
3459 ram_state
->migration_dirty_pages
+= !test_and_set_bit(
3460 offset
>> TARGET_PAGE_BITS
,
3463 qemu_mutex_unlock(&ram_state
->bitmap_mutex
);
3466 static inline void *colo_cache_from_block_offset(RAMBlock
*block
,
3467 ram_addr_t offset
, bool record_bitmap
)
3469 if (!offset_in_ramblock(block
, offset
)) {
3472 if (!block
->colo_cache
) {
3473 error_report("%s: colo_cache is NULL in block :%s",
3474 __func__
, block
->idstr
);
3479 * During colo checkpoint, we need bitmap of these migrated pages.
3480 * It help us to decide which pages in ram cache should be flushed
3481 * into VM's RAM later.
3483 if (record_bitmap
) {
3484 colo_record_bitmap(block
, &offset
, 1);
3486 return block
->colo_cache
+ offset
;
3490 * ram_handle_zero: handle the zero page case
3492 * If a page (or a whole RDMA chunk) has been
3493 * determined to be zero, then zap it.
3495 * @host: host address for the zero page
3496 * @ch: what the page is filled from. We only support zero
3497 * @size: size of the zero page
3499 void ram_handle_zero(void *host
, uint64_t size
)
3501 if (!buffer_is_zero(host
, size
)) {
3502 memset(host
, 0, size
);
3506 static void colo_init_ram_state(void)
3508 Error
*local_err
= NULL
;
3510 if (!ram_state_init(&ram_state
, &local_err
)) {
3511 error_report_err(local_err
);
3516 * colo cache: this is for secondary VM, we cache the whole
3517 * memory of the secondary VM, it is need to hold the global lock
3518 * to call this helper.
3520 int colo_init_ram_cache(void)
3524 WITH_RCU_READ_LOCK_GUARD() {
3525 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3526 block
->colo_cache
= qemu_anon_ram_alloc(block
->used_length
,
3527 NULL
, false, false);
3528 if (!block
->colo_cache
) {
3529 error_report("%s: Can't alloc memory for COLO cache of block %s,"
3530 "size 0x" RAM_ADDR_FMT
, __func__
, block
->idstr
,
3531 block
->used_length
);
3532 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3533 if (block
->colo_cache
) {
3534 qemu_anon_ram_free(block
->colo_cache
, block
->used_length
);
3535 block
->colo_cache
= NULL
;
3540 if (!machine_dump_guest_core(current_machine
)) {
3541 qemu_madvise(block
->colo_cache
, block
->used_length
,
3542 QEMU_MADV_DONTDUMP
);
3548 * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3549 * with to decide which page in cache should be flushed into SVM's RAM. Here
3550 * we use the same name 'ram_bitmap' as for migration.
3552 if (ram_bytes_total()) {
3553 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3554 unsigned long pages
= block
->max_length
>> TARGET_PAGE_BITS
;
3555 block
->bmap
= bitmap_new(pages
);
3559 colo_init_ram_state();
3563 /* TODO: duplicated with ram_init_bitmaps */
3564 void colo_incoming_start_dirty_log(void)
3566 RAMBlock
*block
= NULL
;
3567 Error
*local_err
= NULL
;
3569 /* For memory_global_dirty_log_start below. */
3571 qemu_mutex_lock_ramlist();
3573 memory_global_dirty_log_sync(false);
3574 WITH_RCU_READ_LOCK_GUARD() {
3575 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3576 ramblock_sync_dirty_bitmap(ram_state
, block
);
3577 /* Discard this dirty bitmap record */
3578 bitmap_zero(block
->bmap
, block
->max_length
>> TARGET_PAGE_BITS
);
3580 if (!memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION
,
3582 error_report_err(local_err
);
3585 ram_state
->migration_dirty_pages
= 0;
3586 qemu_mutex_unlock_ramlist();
3590 /* It is need to hold the global lock to call this helper */
3591 void colo_release_ram_cache(void)
3595 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION
);
3596 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3597 g_free(block
->bmap
);
3601 WITH_RCU_READ_LOCK_GUARD() {
3602 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3603 if (block
->colo_cache
) {
3604 qemu_anon_ram_free(block
->colo_cache
, block
->used_length
);
3605 block
->colo_cache
= NULL
;
3609 ram_state_cleanup(&ram_state
);
3613 * ram_load_setup: Setup RAM for migration incoming side
3615 * Returns zero to indicate success and negative for error
3617 * @f: QEMUFile where to receive the data
3618 * @opaque: RAMState pointer
3619 * @errp: pointer to Error*, to store an error if it happens.
3621 static int ram_load_setup(QEMUFile
*f
, void *opaque
, Error
**errp
)
3623 xbzrle_load_setup();
3624 ramblock_recv_map_init();
3629 static int ram_load_cleanup(void *opaque
)
3633 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
3634 qemu_ram_block_writeback(rb
);
3637 xbzrle_load_cleanup();
3639 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
3640 g_free(rb
->receivedmap
);
3641 rb
->receivedmap
= NULL
;
3648 * ram_postcopy_incoming_init: allocate postcopy data structures
3650 * Returns 0 for success and negative if there was one error
3652 * @mis: current migration incoming state
3654 * Allocate data structures etc needed by incoming migration with
3655 * postcopy-ram. postcopy-ram's similarly names
3656 * postcopy_ram_incoming_init does the work.
3658 int ram_postcopy_incoming_init(MigrationIncomingState
*mis
)
3660 return postcopy_ram_incoming_init(mis
);
3664 * ram_load_postcopy: load a page in postcopy case
3666 * Returns 0 for success or -errno in case of error
3668 * Called in postcopy mode by ram_load().
3669 * rcu_read_lock is taken prior to this being called.
3671 * @f: QEMUFile where to send the data
3672 * @channel: the channel to use for loading
3674 int ram_load_postcopy(QEMUFile
*f
, int channel
)
3676 int flags
= 0, ret
= 0;
3677 bool place_needed
= false;
3678 bool matches_target_page_size
= false;
3679 MigrationIncomingState
*mis
= migration_incoming_get_current();
3680 PostcopyTmpPage
*tmp_page
= &mis
->postcopy_tmp_pages
[channel
];
3682 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
3684 void *page_buffer
= NULL
;
3685 void *place_source
= NULL
;
3686 RAMBlock
*block
= NULL
;
3689 addr
= qemu_get_be64(f
);
3692 * If qemu file error, we should stop here, and then "addr"
3695 ret
= qemu_file_get_error(f
);
3700 flags
= addr
& ~TARGET_PAGE_MASK
;
3701 addr
&= TARGET_PAGE_MASK
;
3703 trace_ram_load_postcopy_loop(channel
, (uint64_t)addr
, flags
);
3704 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
)) {
3705 block
= ram_block_from_stream(mis
, f
, flags
, channel
);
3712 * Relying on used_length is racy and can result in false positives.
3713 * We might place pages beyond used_length in case RAM was shrunk
3714 * while in postcopy, which is fine - trying to place via
3715 * UFFDIO_COPY/UFFDIO_ZEROPAGE will never segfault.
3717 if (!block
->host
|| addr
>= block
->postcopy_length
) {
3718 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
3722 tmp_page
->target_pages
++;
3723 matches_target_page_size
= block
->page_size
== TARGET_PAGE_SIZE
;
3725 * Postcopy requires that we place whole host pages atomically;
3726 * these may be huge pages for RAMBlocks that are backed by
3728 * To make it atomic, the data is read into a temporary page
3729 * that's moved into place later.
3730 * The migration protocol uses, possibly smaller, target-pages
3731 * however the source ensures it always sends all the components
3732 * of a host page in one chunk.
3734 page_buffer
= tmp_page
->tmp_huge_page
+
3735 host_page_offset_from_ram_block_offset(block
, addr
);
3736 /* If all TP are zero then we can optimise the place */
3737 if (tmp_page
->target_pages
== 1) {
3738 tmp_page
->host_addr
=
3739 host_page_from_ram_block_offset(block
, addr
);
3740 } else if (tmp_page
->host_addr
!=
3741 host_page_from_ram_block_offset(block
, addr
)) {
3742 /* not the 1st TP within the HP */
3743 error_report("Non-same host page detected on channel %d: "
3744 "Target host page %p, received host page %p "
3745 "(rb %s offset 0x"RAM_ADDR_FMT
" target_pages %d)",
3746 channel
, tmp_page
->host_addr
,
3747 host_page_from_ram_block_offset(block
, addr
),
3748 block
->idstr
, addr
, tmp_page
->target_pages
);
3754 * If it's the last part of a host page then we place the host
3757 if (tmp_page
->target_pages
==
3758 (block
->page_size
/ TARGET_PAGE_SIZE
)) {
3759 place_needed
= true;
3761 place_source
= tmp_page
->tmp_huge_page
;
3764 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
3765 case RAM_SAVE_FLAG_ZERO
:
3766 ch
= qemu_get_byte(f
);
3768 error_report("Found a zero page with value %d", ch
);
3773 * Can skip to set page_buffer when
3774 * this is a zero page and (block->page_size == TARGET_PAGE_SIZE).
3776 if (!matches_target_page_size
) {
3777 memset(page_buffer
, ch
, TARGET_PAGE_SIZE
);
3781 case RAM_SAVE_FLAG_PAGE
:
3782 tmp_page
->all_zero
= false;
3783 if (!matches_target_page_size
) {
3784 /* For huge pages, we always use temporary buffer */
3785 qemu_get_buffer(f
, page_buffer
, TARGET_PAGE_SIZE
);
3788 * For small pages that matches target page size, we
3789 * avoid the qemu_file copy. Instead we directly use
3790 * the buffer of QEMUFile to place the page. Note: we
3791 * cannot do any QEMUFile operation before using that
3792 * buffer to make sure the buffer is valid when
3795 qemu_get_buffer_in_place(f
, (uint8_t **)&place_source
,
3799 case RAM_SAVE_FLAG_MULTIFD_FLUSH
:
3800 multifd_recv_sync_main();
3802 case RAM_SAVE_FLAG_EOS
:
3804 if (migrate_multifd() &&
3805 migrate_multifd_flush_after_each_section()) {
3806 multifd_recv_sync_main();
3810 error_report("Unknown combination of migration flags: 0x%x"
3811 " (postcopy mode)", flags
);
3816 /* Detect for any possible file errors */
3817 if (!ret
&& qemu_file_get_error(f
)) {
3818 ret
= qemu_file_get_error(f
);
3821 if (!ret
&& place_needed
) {
3822 if (tmp_page
->all_zero
) {
3823 ret
= postcopy_place_page_zero(mis
, tmp_page
->host_addr
, block
);
3825 ret
= postcopy_place_page(mis
, tmp_page
->host_addr
,
3826 place_source
, block
);
3828 place_needed
= false;
3829 postcopy_temp_page_reset(tmp_page
);
3836 static bool postcopy_is_running(void)
3838 PostcopyState ps
= postcopy_state_get();
3839 return ps
>= POSTCOPY_INCOMING_LISTENING
&& ps
< POSTCOPY_INCOMING_END
;
3843 * Flush content of RAM cache into SVM's memory.
3844 * Only flush the pages that be dirtied by PVM or SVM or both.
3846 void colo_flush_ram_cache(void)
3848 RAMBlock
*block
= NULL
;
3851 unsigned long offset
= 0;
3853 memory_global_dirty_log_sync(false);
3854 qemu_mutex_lock(&ram_state
->bitmap_mutex
);
3855 WITH_RCU_READ_LOCK_GUARD() {
3856 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
3857 ramblock_sync_dirty_bitmap(ram_state
, block
);
3861 trace_colo_flush_ram_cache_begin(ram_state
->migration_dirty_pages
);
3862 WITH_RCU_READ_LOCK_GUARD() {
3863 block
= QLIST_FIRST_RCU(&ram_list
.blocks
);
3866 unsigned long num
= 0;
3868 offset
= colo_bitmap_find_dirty(ram_state
, block
, offset
, &num
);
3869 if (!offset_in_ramblock(block
,
3870 ((ram_addr_t
)offset
) << TARGET_PAGE_BITS
)) {
3873 block
= QLIST_NEXT_RCU(block
, next
);
3875 unsigned long i
= 0;
3877 for (i
= 0; i
< num
; i
++) {
3878 migration_bitmap_clear_dirty(ram_state
, block
, offset
+ i
);
3880 dst_host
= block
->host
3881 + (((ram_addr_t
)offset
) << TARGET_PAGE_BITS
);
3882 src_host
= block
->colo_cache
3883 + (((ram_addr_t
)offset
) << TARGET_PAGE_BITS
);
3884 memcpy(dst_host
, src_host
, TARGET_PAGE_SIZE
* num
);
3889 qemu_mutex_unlock(&ram_state
->bitmap_mutex
);
3890 trace_colo_flush_ram_cache_end();
3893 static size_t ram_load_multifd_pages(void *host_addr
, size_t size
,
3896 MultiFDRecvData
*data
= multifd_get_recv_data();
3898 data
->opaque
= host_addr
;
3899 data
->file_offset
= offset
;
3902 if (!multifd_recv()) {
3909 static bool read_ramblock_mapped_ram(QEMUFile
*f
, RAMBlock
*block
,
3910 long num_pages
, unsigned long *bitmap
,
3914 unsigned long set_bit_idx
, clear_bit_idx
;
3917 size_t read
, unread
, size
;
3919 for (set_bit_idx
= find_first_bit(bitmap
, num_pages
);
3920 set_bit_idx
< num_pages
;
3921 set_bit_idx
= find_next_bit(bitmap
, num_pages
, clear_bit_idx
+ 1)) {
3923 clear_bit_idx
= find_next_zero_bit(bitmap
, num_pages
, set_bit_idx
+ 1);
3925 unread
= TARGET_PAGE_SIZE
* (clear_bit_idx
- set_bit_idx
);
3926 offset
= set_bit_idx
<< TARGET_PAGE_BITS
;
3928 while (unread
> 0) {
3929 host
= host_from_ram_block_offset(block
, offset
);
3931 error_setg(errp
, "page outside of ramblock %s range",
3936 size
= MIN(unread
, MAPPED_RAM_LOAD_BUF_SIZE
);
3938 if (migrate_multifd()) {
3939 read
= ram_load_multifd_pages(host
, size
,
3940 block
->pages_offset
+ offset
);
3942 read
= qemu_get_buffer_at(f
, host
, size
,
3943 block
->pages_offset
+ offset
);
3957 qemu_file_get_error_obj(f
, errp
);
3958 error_prepend(errp
, "(%s) failed to read page " RAM_ADDR_FMT
3959 "from file offset %" PRIx64
": ", block
->idstr
, offset
,
3960 block
->pages_offset
+ offset
);
3964 static void parse_ramblock_mapped_ram(QEMUFile
*f
, RAMBlock
*block
,
3965 ram_addr_t length
, Error
**errp
)
3967 g_autofree
unsigned long *bitmap
= NULL
;
3968 MappedRamHeader header
;
3972 if (!mapped_ram_read_header(f
, &header
, errp
)) {
3976 block
->pages_offset
= header
.pages_offset
;
3979 * Check the alignment of the file region that contains pages. We
3980 * don't enforce MAPPED_RAM_FILE_OFFSET_ALIGNMENT to allow that
3981 * value to change in the future. Do only a sanity check with page
3984 if (!QEMU_IS_ALIGNED(block
->pages_offset
, TARGET_PAGE_SIZE
)) {
3986 "Error reading ramblock %s pages, region has bad alignment",
3991 num_pages
= length
/ header
.page_size
;
3992 bitmap_size
= BITS_TO_LONGS(num_pages
) * sizeof(unsigned long);
3994 bitmap
= g_malloc0(bitmap_size
);
3995 if (qemu_get_buffer_at(f
, (uint8_t *)bitmap
, bitmap_size
,
3996 header
.bitmap_offset
) != bitmap_size
) {
3997 error_setg(errp
, "Error reading dirty bitmap");
4001 if (!read_ramblock_mapped_ram(f
, block
, num_pages
, bitmap
, errp
)) {
4005 /* Skip pages array */
4006 qemu_set_offset(f
, block
->pages_offset
+ length
, SEEK_SET
);
4011 static int parse_ramblock(QEMUFile
*f
, RAMBlock
*block
, ram_addr_t length
)
4014 /* ADVISE is earlier, it shows the source has the postcopy capability on */
4015 bool postcopy_advised
= migration_incoming_postcopy_advised();
4016 int max_hg_page_size
;
4017 Error
*local_err
= NULL
;
4021 if (migrate_mapped_ram()) {
4022 parse_ramblock_mapped_ram(f
, block
, length
, &local_err
);
4024 error_report_err(local_err
);
4030 if (!qemu_ram_is_migratable(block
)) {
4031 error_report("block %s should not be migrated !", block
->idstr
);
4035 if (length
!= block
->used_length
) {
4036 ret
= qemu_ram_resize(block
, length
, &local_err
);
4038 error_report_err(local_err
);
4044 * ??? Mirrors the previous value of qemu_host_page_size,
4045 * but is this really what was intended for the migration?
4047 max_hg_page_size
= MAX(qemu_real_host_page_size(), TARGET_PAGE_SIZE
);
4049 /* For postcopy we need to check hugepage sizes match */
4050 if (postcopy_advised
&& migrate_postcopy_ram() &&
4051 block
->page_size
!= max_hg_page_size
) {
4052 uint64_t remote_page_size
= qemu_get_be64(f
);
4053 if (remote_page_size
!= block
->page_size
) {
4054 error_report("Mismatched RAM page size %s "
4055 "(local) %zd != %" PRId64
, block
->idstr
,
4056 block
->page_size
, remote_page_size
);
4060 if (migrate_ignore_shared()) {
4061 hwaddr addr
= qemu_get_be64(f
);
4062 if (migrate_ram_is_ignored(block
) &&
4063 block
->mr
->addr
!= addr
) {
4064 error_report("Mismatched GPAs for block %s "
4065 "%" PRId64
"!= %" PRId64
, block
->idstr
,
4066 (uint64_t)addr
, (uint64_t)block
->mr
->addr
);
4070 ret
= rdma_block_notification_handle(f
, block
->idstr
);
4072 qemu_file_set_error(f
, ret
);
4078 static int parse_ramblocks(QEMUFile
*f
, ram_addr_t total_ram_bytes
)
4082 /* Synchronize RAM block list */
4083 while (!ret
&& total_ram_bytes
) {
4087 int len
= qemu_get_byte(f
);
4089 qemu_get_buffer(f
, (uint8_t *)id
, len
);
4091 length
= qemu_get_be64(f
);
4093 block
= qemu_ram_block_by_name(id
);
4095 ret
= parse_ramblock(f
, block
, length
);
4097 error_report("Unknown ramblock \"%s\", cannot accept "
4101 total_ram_bytes
-= length
;
4108 * ram_load_precopy: load pages in precopy case
4110 * Returns 0 for success or -errno in case of error
4112 * Called in precopy mode by ram_load().
4113 * rcu_read_lock is taken prior to this being called.
4115 * @f: QEMUFile where to send the data
4117 static int ram_load_precopy(QEMUFile
*f
)
4119 MigrationIncomingState
*mis
= migration_incoming_get_current();
4120 int flags
= 0, ret
= 0, invalid_flags
= 0, i
= 0;
4122 if (migrate_mapped_ram()) {
4123 invalid_flags
|= (RAM_SAVE_FLAG_HOOK
| RAM_SAVE_FLAG_MULTIFD_FLUSH
|
4124 RAM_SAVE_FLAG_PAGE
| RAM_SAVE_FLAG_XBZRLE
|
4125 RAM_SAVE_FLAG_ZERO
);
4128 while (!ret
&& !(flags
& RAM_SAVE_FLAG_EOS
)) {
4130 void *host
= NULL
, *host_bak
= NULL
;
4134 * Yield periodically to let main loop run, but an iteration of
4135 * the main loop is expensive, so do it each some iterations
4137 if ((i
& 32767) == 0 && qemu_in_coroutine()) {
4138 aio_co_schedule(qemu_get_current_aio_context(),
4139 qemu_coroutine_self());
4140 qemu_coroutine_yield();
4144 addr
= qemu_get_be64(f
);
4145 ret
= qemu_file_get_error(f
);
4147 error_report("Getting RAM address failed");
4151 flags
= addr
& ~TARGET_PAGE_MASK
;
4152 addr
&= TARGET_PAGE_MASK
;
4154 if (flags
& invalid_flags
) {
4155 error_report("Unexpected RAM flags: %d", flags
& invalid_flags
);
4161 if (flags
& (RAM_SAVE_FLAG_ZERO
| RAM_SAVE_FLAG_PAGE
|
4162 RAM_SAVE_FLAG_XBZRLE
)) {
4163 RAMBlock
*block
= ram_block_from_stream(mis
, f
, flags
,
4164 RAM_CHANNEL_PRECOPY
);
4166 host
= host_from_ram_block_offset(block
, addr
);
4168 * After going into COLO stage, we should not load the page
4169 * into SVM's memory directly, we put them into colo_cache firstly.
4170 * NOTE: We need to keep a copy of SVM's ram in colo_cache.
4171 * Previously, we copied all these memory in preparing stage of COLO
4172 * while we need to stop VM, which is a time-consuming process.
4173 * Here we optimize it by a trick, back-up every page while in
4174 * migration process while COLO is enabled, though it affects the
4175 * speed of the migration, but it obviously reduce the downtime of
4176 * back-up all SVM'S memory in COLO preparing stage.
4178 if (migration_incoming_colo_enabled()) {
4179 if (migration_incoming_in_colo_state()) {
4180 /* In COLO stage, put all pages into cache temporarily */
4181 host
= colo_cache_from_block_offset(block
, addr
, true);
4184 * In migration stage but before COLO stage,
4185 * Put all pages into both cache and SVM's memory.
4187 host_bak
= colo_cache_from_block_offset(block
, addr
, false);
4191 error_report("Illegal RAM offset " RAM_ADDR_FMT
, addr
);
4195 if (!migration_incoming_in_colo_state()) {
4196 ramblock_recv_bitmap_set(block
, host
);
4199 trace_ram_load_loop(block
->idstr
, (uint64_t)addr
, flags
, host
);
4202 switch (flags
& ~RAM_SAVE_FLAG_CONTINUE
) {
4203 case RAM_SAVE_FLAG_MEM_SIZE
:
4204 ret
= parse_ramblocks(f
, addr
);
4206 * For mapped-ram migration (to a file) using multifd, we sync
4207 * once and for all here to make sure all tasks we queued to
4208 * multifd threads are completed, so that all the ramblocks
4209 * (including all the guest memory pages within) are fully
4210 * loaded after this sync returns.
4212 if (migrate_mapped_ram()) {
4213 multifd_recv_sync_main();
4217 case RAM_SAVE_FLAG_ZERO
:
4218 ch
= qemu_get_byte(f
);
4220 error_report("Found a zero page with value %d", ch
);
4224 ram_handle_zero(host
, TARGET_PAGE_SIZE
);
4227 case RAM_SAVE_FLAG_PAGE
:
4228 qemu_get_buffer(f
, host
, TARGET_PAGE_SIZE
);
4231 case RAM_SAVE_FLAG_XBZRLE
:
4232 if (load_xbzrle(f
, addr
, host
) < 0) {
4233 error_report("Failed to decompress XBZRLE page at "
4234 RAM_ADDR_FMT
, addr
);
4239 case RAM_SAVE_FLAG_MULTIFD_FLUSH
:
4240 multifd_recv_sync_main();
4242 case RAM_SAVE_FLAG_EOS
:
4244 if (migrate_multifd() &&
4245 migrate_multifd_flush_after_each_section() &&
4247 * Mapped-ram migration flushes once and for all after
4248 * parsing ramblocks. Always ignore EOS for it.
4250 !migrate_mapped_ram()) {
4251 multifd_recv_sync_main();
4254 case RAM_SAVE_FLAG_HOOK
:
4255 ret
= rdma_registration_handle(f
);
4257 qemu_file_set_error(f
, ret
);
4261 error_report("Unknown combination of migration flags: 0x%x", flags
);
4265 ret
= qemu_file_get_error(f
);
4267 if (!ret
&& host_bak
) {
4268 memcpy(host_bak
, host
, TARGET_PAGE_SIZE
);
4275 static int ram_load(QEMUFile
*f
, void *opaque
, int version_id
)
4278 static uint64_t seq_iter
;
4280 * If system is running in postcopy mode, page inserts to host memory must
4283 bool postcopy_running
= postcopy_is_running();
4287 if (version_id
!= 4) {
4292 * This RCU critical section can be very long running.
4293 * When RCU reclaims in the code start to become numerous,
4294 * it will be necessary to reduce the granularity of this
4297 WITH_RCU_READ_LOCK_GUARD() {
4298 if (postcopy_running
) {
4300 * Note! Here RAM_CHANNEL_PRECOPY is the precopy channel of
4301 * postcopy migration, we have another RAM_CHANNEL_POSTCOPY to
4302 * service fast page faults.
4304 ret
= ram_load_postcopy(f
, RAM_CHANNEL_PRECOPY
);
4306 ret
= ram_load_precopy(f
);
4309 trace_ram_load_complete(ret
, seq_iter
);
4314 static bool ram_has_postcopy(void *opaque
)
4317 RAMBLOCK_FOREACH_NOT_IGNORED(rb
) {
4318 if (ramblock_is_pmem(rb
)) {
4319 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4320 "is not supported now!", rb
->idstr
, rb
->host
);
4325 return migrate_postcopy_ram();
4328 /* Sync all the dirty bitmap with destination VM. */
4329 static int ram_dirty_bitmap_sync_all(MigrationState
*s
, RAMState
*rs
)
4332 QEMUFile
*file
= s
->to_dst_file
;
4334 trace_ram_dirty_bitmap_sync_start();
4336 qatomic_set(&rs
->postcopy_bmap_sync_requested
, 0);
4337 RAMBLOCK_FOREACH_NOT_IGNORED(block
) {
4338 qemu_savevm_send_recv_bitmap(file
, block
->idstr
);
4339 trace_ram_dirty_bitmap_request(block
->idstr
);
4340 qatomic_inc(&rs
->postcopy_bmap_sync_requested
);
4343 trace_ram_dirty_bitmap_sync_wait();
4345 /* Wait until all the ramblocks' dirty bitmap synced */
4346 while (qatomic_read(&rs
->postcopy_bmap_sync_requested
)) {
4347 if (migration_rp_wait(s
)) {
4352 trace_ram_dirty_bitmap_sync_complete();
4358 * Read the received bitmap, revert it as the initial dirty bitmap.
4359 * This is only used when the postcopy migration is paused but wants
4360 * to resume from a middle point.
4362 * Returns true if succeeded, false for errors.
4364 bool ram_dirty_bitmap_reload(MigrationState
*s
, RAMBlock
*block
, Error
**errp
)
4366 /* from_dst_file is always valid because we're within rp_thread */
4367 QEMUFile
*file
= s
->rp_state
.from_dst_file
;
4368 g_autofree
unsigned long *le_bitmap
= NULL
;
4369 unsigned long nbits
= block
->used_length
>> TARGET_PAGE_BITS
;
4370 uint64_t local_size
= DIV_ROUND_UP(nbits
, 8);
4371 uint64_t size
, end_mark
;
4372 RAMState
*rs
= ram_state
;
4374 trace_ram_dirty_bitmap_reload_begin(block
->idstr
);
4376 if (s
->state
!= MIGRATION_STATUS_POSTCOPY_RECOVER
) {
4377 error_setg(errp
, "Reload bitmap in incorrect state %s",
4378 MigrationStatus_str(s
->state
));
4383 * Note: see comments in ramblock_recv_bitmap_send() on why we
4384 * need the endianness conversion, and the paddings.
4386 local_size
= ROUND_UP(local_size
, 8);
4389 le_bitmap
= bitmap_new(nbits
+ BITS_PER_LONG
);
4391 size
= qemu_get_be64(file
);
4393 /* The size of the bitmap should match with our ramblock */
4394 if (size
!= local_size
) {
4395 error_setg(errp
, "ramblock '%s' bitmap size mismatch (0x%"PRIx64
4396 " != 0x%"PRIx64
")", block
->idstr
, size
, local_size
);
4400 size
= qemu_get_buffer(file
, (uint8_t *)le_bitmap
, local_size
);
4401 end_mark
= qemu_get_be64(file
);
4403 if (qemu_file_get_error(file
) || size
!= local_size
) {
4404 error_setg(errp
, "read bitmap failed for ramblock '%s': "
4405 "(size 0x%"PRIx64
", got: 0x%"PRIx64
")",
4406 block
->idstr
, local_size
, size
);
4410 if (end_mark
!= RAMBLOCK_RECV_BITMAP_ENDING
) {
4411 error_setg(errp
, "ramblock '%s' end mark incorrect: 0x%"PRIx64
,
4412 block
->idstr
, end_mark
);
4417 * Endianness conversion. We are during postcopy (though paused).
4418 * The dirty bitmap won't change. We can directly modify it.
4420 bitmap_from_le(block
->bmap
, le_bitmap
, nbits
);
4423 * What we received is "received bitmap". Revert it as the initial
4424 * dirty bitmap for this ramblock.
4426 bitmap_complement(block
->bmap
, block
->bmap
, nbits
);
4428 /* Clear dirty bits of discarded ranges that we don't want to migrate. */
4429 ramblock_dirty_bitmap_clear_discarded_pages(block
);
4431 /* We'll recalculate migration_dirty_pages in ram_state_resume_prepare(). */
4432 trace_ram_dirty_bitmap_reload_complete(block
->idstr
);
4434 qatomic_dec(&rs
->postcopy_bmap_sync_requested
);
4437 * We succeeded to sync bitmap for current ramblock. Always kick the
4438 * migration thread to check whether all requested bitmaps are
4439 * reloaded. NOTE: it's racy to only kick when requested==0, because
4440 * we don't know whether the migration thread may still be increasing
4443 migration_rp_kick(s
);
4448 static int ram_resume_prepare(MigrationState
*s
, void *opaque
)
4450 RAMState
*rs
= *(RAMState
**)opaque
;
4453 ret
= ram_dirty_bitmap_sync_all(s
, rs
);
4458 ram_state_resume_prepare(rs
, s
->to_dst_file
);
4463 void postcopy_preempt_shutdown_file(MigrationState
*s
)
4465 qemu_put_be64(s
->postcopy_qemufile_src
, RAM_SAVE_FLAG_EOS
);
4466 qemu_fflush(s
->postcopy_qemufile_src
);
4469 static SaveVMHandlers savevm_ram_handlers
= {
4470 .save_setup
= ram_save_setup
,
4471 .save_live_iterate
= ram_save_iterate
,
4472 .save_live_complete_postcopy
= ram_save_complete
,
4473 .save_live_complete_precopy
= ram_save_complete
,
4474 .has_postcopy
= ram_has_postcopy
,
4475 .state_pending_exact
= ram_state_pending_exact
,
4476 .state_pending_estimate
= ram_state_pending_estimate
,
4477 .load_state
= ram_load
,
4478 .save_cleanup
= ram_save_cleanup
,
4479 .load_setup
= ram_load_setup
,
4480 .load_cleanup
= ram_load_cleanup
,
4481 .resume_prepare
= ram_resume_prepare
,
4484 static void ram_mig_ram_block_resized(RAMBlockNotifier
*n
, void *host
,
4485 size_t old_size
, size_t new_size
)
4487 PostcopyState ps
= postcopy_state_get();
4489 RAMBlock
*rb
= qemu_ram_block_from_host(host
, false, &offset
);
4493 error_report("RAM block not found");
4497 if (migrate_ram_is_ignored(rb
)) {
4501 if (!migration_is_idle()) {
4503 * Precopy code on the source cannot deal with the size of RAM blocks
4504 * changing at random points in time - especially after sending the
4505 * RAM block sizes in the migration stream, they must no longer change.
4506 * Abort and indicate a proper reason.
4508 error_setg(&err
, "RAM block '%s' resized during precopy.", rb
->idstr
);
4509 migration_cancel(err
);
4514 case POSTCOPY_INCOMING_ADVISE
:
4516 * Update what ram_postcopy_incoming_init()->init_range() does at the
4517 * time postcopy was advised. Syncing RAM blocks with the source will
4518 * result in RAM resizes.
4520 if (old_size
< new_size
) {
4521 if (ram_discard_range(rb
->idstr
, old_size
, new_size
- old_size
)) {
4522 error_report("RAM block '%s' discard of resized RAM failed",
4526 rb
->postcopy_length
= new_size
;
4528 case POSTCOPY_INCOMING_NONE
:
4529 case POSTCOPY_INCOMING_RUNNING
:
4530 case POSTCOPY_INCOMING_END
:
4532 * Once our guest is running, postcopy does no longer care about
4533 * resizes. When growing, the new memory was not available on the
4534 * source, no handler needed.
4538 error_report("RAM block '%s' resized during postcopy state: %d",
4544 static RAMBlockNotifier ram_mig_ram_notifier
= {
4545 .ram_block_resized
= ram_mig_ram_block_resized
,
4548 void ram_mig_init(void)
4550 qemu_mutex_init(&XBZRLE
.lock
);
4551 register_savevm_live("ram", 0, 4, &savevm_ram_handlers
, &ram_state
);
4552 ram_block_notifier_add(&ram_mig_ram_notifier
);