2 * Declarations for cpu physical memory functions
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
24 #include "exec/ramlist.h"
28 struct MemoryRegion
*mr
;
31 ram_addr_t used_length
;
32 ram_addr_t max_length
;
33 void (*resized
)(const char*, uint64_t length
, void *host
);
35 /* Protected by iothread lock. */
37 /* RCU-enabled, writes protected by the ramlist lock */
38 QLIST_ENTRY(RAMBlock
) next
;
39 QLIST_HEAD(, RAMBlockNotifier
) ramblock_notifiers
;
42 /* dirty bitmap used during migration */
44 /* bitmap of pages that haven't been sent even once
45 * only maintained and used in postcopy at the moment
46 * where it's used to send the dirtymap at the start
47 * of the postcopy phase
49 unsigned long *unsentmap
;
52 static inline bool offset_in_ramblock(RAMBlock
*b
, ram_addr_t offset
)
54 return (b
&& b
->host
&& offset
< b
->used_length
) ? true : false;
57 static inline void *ramblock_ptr(RAMBlock
*block
, ram_addr_t offset
)
59 assert(offset_in_ramblock(block
, offset
));
60 return (char *)block
->host
+ offset
;
63 long qemu_getrampagesize(void);
64 unsigned long last_ram_page(void);
65 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
66 bool share
, const char *mem_path
,
68 RAMBlock
*qemu_ram_alloc_from_fd(ram_addr_t size
, MemoryRegion
*mr
,
71 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
72 MemoryRegion
*mr
, Error
**errp
);
73 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
);
74 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t max_size
,
75 void (*resized
)(const char*,
78 MemoryRegion
*mr
, Error
**errp
);
79 void qemu_ram_free(RAMBlock
*block
);
81 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
);
83 #define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
84 #define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
86 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start
,
90 DirtyMemoryBlocks
*blocks
;
91 unsigned long end
, page
;
92 unsigned long idx
, offset
, base
;
95 assert(client
< DIRTY_MEMORY_NUM
);
97 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
98 page
= start
>> TARGET_PAGE_BITS
;
102 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
104 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
105 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
106 base
= page
- offset
;
108 unsigned long next
= MIN(end
, base
+ DIRTY_MEMORY_BLOCK_SIZE
);
109 unsigned long num
= next
- base
;
110 unsigned long found
= find_next_bit(blocks
->blocks
[idx
], num
, offset
);
119 base
+= DIRTY_MEMORY_BLOCK_SIZE
;
127 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start
,
131 DirtyMemoryBlocks
*blocks
;
132 unsigned long end
, page
;
133 unsigned long idx
, offset
, base
;
136 assert(client
< DIRTY_MEMORY_NUM
);
138 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
139 page
= start
>> TARGET_PAGE_BITS
;
143 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
145 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
146 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
147 base
= page
- offset
;
149 unsigned long next
= MIN(end
, base
+ DIRTY_MEMORY_BLOCK_SIZE
);
150 unsigned long num
= next
- base
;
151 unsigned long found
= find_next_zero_bit(blocks
->blocks
[idx
], num
, offset
);
160 base
+= DIRTY_MEMORY_BLOCK_SIZE
;
168 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr
,
171 return cpu_physical_memory_get_dirty(addr
, 1, client
);
174 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr
)
176 bool vga
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_VGA
);
177 bool code
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_CODE
);
179 cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_MIGRATION
);
180 return !(vga
&& code
&& migration
);
183 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start
,
189 if (mask
& (1 << DIRTY_MEMORY_VGA
) &&
190 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_VGA
)) {
191 ret
|= (1 << DIRTY_MEMORY_VGA
);
193 if (mask
& (1 << DIRTY_MEMORY_CODE
) &&
194 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_CODE
)) {
195 ret
|= (1 << DIRTY_MEMORY_CODE
);
197 if (mask
& (1 << DIRTY_MEMORY_MIGRATION
) &&
198 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_MIGRATION
)) {
199 ret
|= (1 << DIRTY_MEMORY_MIGRATION
);
204 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr
,
207 unsigned long page
, idx
, offset
;
208 DirtyMemoryBlocks
*blocks
;
210 assert(client
< DIRTY_MEMORY_NUM
);
212 page
= addr
>> TARGET_PAGE_BITS
;
213 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
214 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
218 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
220 set_bit_atomic(offset
, blocks
->blocks
[idx
]);
225 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start
,
229 DirtyMemoryBlocks
*blocks
[DIRTY_MEMORY_NUM
];
230 unsigned long end
, page
;
231 unsigned long idx
, offset
, base
;
234 if (!mask
&& !xen_enabled()) {
238 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
239 page
= start
>> TARGET_PAGE_BITS
;
243 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
244 blocks
[i
] = atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
247 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
248 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
249 base
= page
- offset
;
251 unsigned long next
= MIN(end
, base
+ DIRTY_MEMORY_BLOCK_SIZE
);
253 if (likely(mask
& (1 << DIRTY_MEMORY_MIGRATION
))) {
254 bitmap_set_atomic(blocks
[DIRTY_MEMORY_MIGRATION
]->blocks
[idx
],
255 offset
, next
- page
);
257 if (unlikely(mask
& (1 << DIRTY_MEMORY_VGA
))) {
258 bitmap_set_atomic(blocks
[DIRTY_MEMORY_VGA
]->blocks
[idx
],
259 offset
, next
- page
);
261 if (unlikely(mask
& (1 << DIRTY_MEMORY_CODE
))) {
262 bitmap_set_atomic(blocks
[DIRTY_MEMORY_CODE
]->blocks
[idx
],
263 offset
, next
- page
);
269 base
+= DIRTY_MEMORY_BLOCK_SIZE
;
274 xen_hvm_modified_memory(start
, length
);
278 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap
,
283 unsigned long page_number
, c
;
286 unsigned long len
= (pages
+ HOST_LONG_BITS
- 1) / HOST_LONG_BITS
;
287 unsigned long hpratio
= getpagesize() / TARGET_PAGE_SIZE
;
288 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
290 /* start address is aligned at the start of a word? */
291 if ((((page
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) == start
) &&
293 unsigned long **blocks
[DIRTY_MEMORY_NUM
];
295 unsigned long offset
;
297 long nr
= BITS_TO_LONGS(pages
);
299 idx
= (start
>> TARGET_PAGE_BITS
) / DIRTY_MEMORY_BLOCK_SIZE
;
300 offset
= BIT_WORD((start
>> TARGET_PAGE_BITS
) %
301 DIRTY_MEMORY_BLOCK_SIZE
);
305 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
306 blocks
[i
] = atomic_rcu_read(&ram_list
.dirty_memory
[i
])->blocks
;
309 for (k
= 0; k
< nr
; k
++) {
311 unsigned long temp
= leul_to_cpu(bitmap
[k
]);
313 atomic_or(&blocks
[DIRTY_MEMORY_MIGRATION
][idx
][offset
], temp
);
314 atomic_or(&blocks
[DIRTY_MEMORY_VGA
][idx
][offset
], temp
);
316 atomic_or(&blocks
[DIRTY_MEMORY_CODE
][idx
][offset
], temp
);
320 if (++offset
>= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE
)) {
328 xen_hvm_modified_memory(start
, pages
<< TARGET_PAGE_BITS
);
330 uint8_t clients
= tcg_enabled() ? DIRTY_CLIENTS_ALL
: DIRTY_CLIENTS_NOCODE
;
332 * bitmap-traveling is faster than memory-traveling (for addr...)
333 * especially when most of the memory is not dirty.
335 for (i
= 0; i
< len
; i
++) {
336 if (bitmap
[i
] != 0) {
337 c
= leul_to_cpu(bitmap
[i
]);
341 page_number
= (i
* HOST_LONG_BITS
+ j
) * hpratio
;
342 addr
= page_number
* TARGET_PAGE_SIZE
;
343 ram_addr
= start
+ addr
;
344 cpu_physical_memory_set_dirty_range(ram_addr
,
345 TARGET_PAGE_SIZE
* hpratio
, clients
);
351 #endif /* not _WIN32 */
353 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
357 DirtyBitmapSnapshot
*cpu_physical_memory_snapshot_and_clear_dirty
358 (ram_addr_t start
, ram_addr_t length
, unsigned client
);
360 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot
*snap
,
364 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start
,
367 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_MIGRATION
);
368 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_VGA
);
369 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_CODE
);
374 uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock
*rb
,
377 uint64_t *real_dirty_pages
)
380 unsigned long word
= BIT_WORD((start
+ rb
->offset
) >> TARGET_PAGE_BITS
);
381 uint64_t num_dirty
= 0;
382 unsigned long *dest
= rb
->bmap
;
384 /* start address is aligned at the start of a word? */
385 if (((word
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) ==
386 (start
+ rb
->offset
)) {
388 int nr
= BITS_TO_LONGS(length
>> TARGET_PAGE_BITS
);
389 unsigned long * const *src
;
390 unsigned long idx
= (word
* BITS_PER_LONG
) / DIRTY_MEMORY_BLOCK_SIZE
;
391 unsigned long offset
= BIT_WORD((word
* BITS_PER_LONG
) %
392 DIRTY_MEMORY_BLOCK_SIZE
);
393 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
397 src
= atomic_rcu_read(
398 &ram_list
.dirty_memory
[DIRTY_MEMORY_MIGRATION
])->blocks
;
400 for (k
= page
; k
< page
+ nr
; k
++) {
401 if (src
[idx
][offset
]) {
402 unsigned long bits
= atomic_xchg(&src
[idx
][offset
], 0);
403 unsigned long new_dirty
;
404 *real_dirty_pages
+= ctpopl(bits
);
405 new_dirty
= ~dest
[k
];
408 num_dirty
+= ctpopl(new_dirty
);
411 if (++offset
>= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE
)) {
419 ram_addr_t offset
= rb
->offset
;
421 for (addr
= 0; addr
< length
; addr
+= TARGET_PAGE_SIZE
) {
422 if (cpu_physical_memory_test_and_clear_dirty(
423 start
+ addr
+ offset
,
425 DIRTY_MEMORY_MIGRATION
)) {
426 *real_dirty_pages
+= 1;
427 long k
= (start
+ addr
) >> TARGET_PAGE_BITS
;
428 if (!test_and_set_bit(k
, dest
)) {