tests: Restore check-qdict unit test
[qemu/armbru.git] / include / exec / ram_addr.h
blob3abb63905681113442b6b467dff777e7fccd245d
1 /*
2 * Declarations for cpu physical memory functions
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
19 #ifndef RAM_ADDR_H
20 #define RAM_ADDR_H
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
24 #include "exec/ramlist.h"
26 struct RAMBlock {
27 struct rcu_head rcu;
28 struct MemoryRegion *mr;
29 uint8_t *host;
30 ram_addr_t offset;
31 ram_addr_t used_length;
32 ram_addr_t max_length;
33 void (*resized)(const char*, uint64_t length, void *host);
34 uint32_t flags;
35 /* Protected by iothread lock. */
36 char idstr[256];
37 /* RCU-enabled, writes protected by the ramlist lock */
38 QLIST_ENTRY(RAMBlock) next;
39 QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
40 int fd;
41 size_t page_size;
42 /* dirty bitmap used during migration */
43 unsigned long *bmap;
44 /* bitmap of pages that haven't been sent even once
45 * only maintained and used in postcopy at the moment
46 * where it's used to send the dirtymap at the start
47 * of the postcopy phase
49 unsigned long *unsentmap;
50 /* bitmap of already received pages in postcopy */
51 unsigned long *receivedmap;
54 static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
56 return (b && b->host && offset < b->used_length) ? true : false;
59 static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
61 assert(offset_in_ramblock(block, offset));
62 return (char *)block->host + offset;
65 static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr,
66 RAMBlock *rb)
68 uint64_t host_addr_offset =
69 (uint64_t)(uintptr_t)(host_addr - (void *)rb->host);
70 return host_addr_offset >> TARGET_PAGE_BITS;
73 bool ramblock_is_pmem(RAMBlock *rb);
75 long qemu_getrampagesize(void);
77 /**
78 * qemu_ram_alloc_from_file,
79 * qemu_ram_alloc_from_fd: Allocate a ram block from the specified backing
80 * file or device
82 * Parameters:
83 * @size: the size in bytes of the ram block
84 * @mr: the memory region where the ram block is
85 * @ram_flags: specify the properties of the ram block, which can be one
86 * or bit-or of following values
87 * - RAM_SHARED: mmap the backing file or device with MAP_SHARED
88 * - RAM_PMEM: the backend @mem_path or @fd is persistent memory
89 * Other bits are ignored.
90 * @mem_path or @fd: specify the backing file or device
91 * @errp: pointer to Error*, to store an error if it happens
93 * Return:
94 * On success, return a pointer to the ram block.
95 * On failure, return NULL.
97 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
98 uint32_t ram_flags, const char *mem_path,
99 Error **errp);
100 RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
101 uint32_t ram_flags, int fd,
102 Error **errp);
104 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
105 MemoryRegion *mr, Error **errp);
106 RAMBlock *qemu_ram_alloc(ram_addr_t size, bool share, MemoryRegion *mr,
107 Error **errp);
108 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
109 void (*resized)(const char*,
110 uint64_t length,
111 void *host),
112 MemoryRegion *mr, Error **errp);
113 void qemu_ram_free(RAMBlock *block);
115 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
117 #define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
118 #define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
120 void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end);
122 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
123 ram_addr_t length,
124 unsigned client)
126 DirtyMemoryBlocks *blocks;
127 unsigned long end, page;
128 unsigned long idx, offset, base;
129 bool dirty = false;
131 assert(client < DIRTY_MEMORY_NUM);
133 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
134 page = start >> TARGET_PAGE_BITS;
136 rcu_read_lock();
138 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
140 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
141 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
142 base = page - offset;
143 while (page < end) {
144 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
145 unsigned long num = next - base;
146 unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
147 if (found < num) {
148 dirty = true;
149 break;
152 page = next;
153 idx++;
154 offset = 0;
155 base += DIRTY_MEMORY_BLOCK_SIZE;
158 rcu_read_unlock();
160 return dirty;
163 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
164 ram_addr_t length,
165 unsigned client)
167 DirtyMemoryBlocks *blocks;
168 unsigned long end, page;
169 unsigned long idx, offset, base;
170 bool dirty = true;
172 assert(client < DIRTY_MEMORY_NUM);
174 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
175 page = start >> TARGET_PAGE_BITS;
177 rcu_read_lock();
179 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
181 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
182 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
183 base = page - offset;
184 while (page < end) {
185 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
186 unsigned long num = next - base;
187 unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
188 if (found < num) {
189 dirty = false;
190 break;
193 page = next;
194 idx++;
195 offset = 0;
196 base += DIRTY_MEMORY_BLOCK_SIZE;
199 rcu_read_unlock();
201 return dirty;
204 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
205 unsigned client)
207 return cpu_physical_memory_get_dirty(addr, 1, client);
210 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
212 bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
213 bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
214 bool migration =
215 cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
216 return !(vga && code && migration);
219 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
220 ram_addr_t length,
221 uint8_t mask)
223 uint8_t ret = 0;
225 if (mask & (1 << DIRTY_MEMORY_VGA) &&
226 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
227 ret |= (1 << DIRTY_MEMORY_VGA);
229 if (mask & (1 << DIRTY_MEMORY_CODE) &&
230 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
231 ret |= (1 << DIRTY_MEMORY_CODE);
233 if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
234 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
235 ret |= (1 << DIRTY_MEMORY_MIGRATION);
237 return ret;
240 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
241 unsigned client)
243 unsigned long page, idx, offset;
244 DirtyMemoryBlocks *blocks;
246 assert(client < DIRTY_MEMORY_NUM);
248 page = addr >> TARGET_PAGE_BITS;
249 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
250 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
252 rcu_read_lock();
254 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
256 set_bit_atomic(offset, blocks->blocks[idx]);
258 rcu_read_unlock();
261 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
262 ram_addr_t length,
263 uint8_t mask)
265 DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
266 unsigned long end, page;
267 unsigned long idx, offset, base;
268 int i;
270 if (!mask && !xen_enabled()) {
271 return;
274 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
275 page = start >> TARGET_PAGE_BITS;
277 rcu_read_lock();
279 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
280 blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
283 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
284 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
285 base = page - offset;
286 while (page < end) {
287 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
289 if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
290 bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
291 offset, next - page);
293 if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
294 bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
295 offset, next - page);
297 if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
298 bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
299 offset, next - page);
302 page = next;
303 idx++;
304 offset = 0;
305 base += DIRTY_MEMORY_BLOCK_SIZE;
308 rcu_read_unlock();
310 xen_hvm_modified_memory(start, length);
313 #if !defined(_WIN32)
314 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
315 ram_addr_t start,
316 ram_addr_t pages)
318 unsigned long i, j;
319 unsigned long page_number, c;
320 hwaddr addr;
321 ram_addr_t ram_addr;
322 unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
323 unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
324 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
326 /* start address is aligned at the start of a word? */
327 if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
328 (hpratio == 1)) {
329 unsigned long **blocks[DIRTY_MEMORY_NUM];
330 unsigned long idx;
331 unsigned long offset;
332 long k;
333 long nr = BITS_TO_LONGS(pages);
335 idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
336 offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
337 DIRTY_MEMORY_BLOCK_SIZE);
339 rcu_read_lock();
341 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
342 blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
345 for (k = 0; k < nr; k++) {
346 if (bitmap[k]) {
347 unsigned long temp = leul_to_cpu(bitmap[k]);
349 atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
350 atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
351 if (tcg_enabled()) {
352 atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
356 if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
357 offset = 0;
358 idx++;
362 rcu_read_unlock();
364 xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
365 } else {
366 uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
368 * bitmap-traveling is faster than memory-traveling (for addr...)
369 * especially when most of the memory is not dirty.
371 for (i = 0; i < len; i++) {
372 if (bitmap[i] != 0) {
373 c = leul_to_cpu(bitmap[i]);
374 do {
375 j = ctzl(c);
376 c &= ~(1ul << j);
377 page_number = (i * HOST_LONG_BITS + j) * hpratio;
378 addr = page_number * TARGET_PAGE_SIZE;
379 ram_addr = start + addr;
380 cpu_physical_memory_set_dirty_range(ram_addr,
381 TARGET_PAGE_SIZE * hpratio, clients);
382 } while (c != 0);
387 #endif /* not _WIN32 */
389 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
390 ram_addr_t length,
391 unsigned client);
393 DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
394 (ram_addr_t start, ram_addr_t length, unsigned client);
396 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
397 ram_addr_t start,
398 ram_addr_t length);
400 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
401 ram_addr_t length)
403 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
404 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
405 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
409 static inline
410 uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
411 ram_addr_t start,
412 ram_addr_t length,
413 uint64_t *real_dirty_pages)
415 ram_addr_t addr;
416 unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS);
417 uint64_t num_dirty = 0;
418 unsigned long *dest = rb->bmap;
420 /* start address and length is aligned at the start of a word? */
421 if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) ==
422 (start + rb->offset) &&
423 !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) {
424 int k;
425 int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
426 unsigned long * const *src;
427 unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
428 unsigned long offset = BIT_WORD((word * BITS_PER_LONG) %
429 DIRTY_MEMORY_BLOCK_SIZE);
430 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
432 rcu_read_lock();
434 src = atomic_rcu_read(
435 &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
437 for (k = page; k < page + nr; k++) {
438 if (src[idx][offset]) {
439 unsigned long bits = atomic_xchg(&src[idx][offset], 0);
440 unsigned long new_dirty;
441 *real_dirty_pages += ctpopl(bits);
442 new_dirty = ~dest[k];
443 dest[k] |= bits;
444 new_dirty &= bits;
445 num_dirty += ctpopl(new_dirty);
448 if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
449 offset = 0;
450 idx++;
454 rcu_read_unlock();
455 } else {
456 ram_addr_t offset = rb->offset;
458 for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
459 if (cpu_physical_memory_test_and_clear_dirty(
460 start + addr + offset,
461 TARGET_PAGE_SIZE,
462 DIRTY_MEMORY_MIGRATION)) {
463 *real_dirty_pages += 1;
464 long k = (start + addr) >> TARGET_PAGE_BITS;
465 if (!test_and_set_bit(k, dest)) {
466 num_dirty++;
472 return num_dirty;
474 #endif
475 #endif