2 * HAX memory mapping operations
4 * Copyright (c) 2015-16 Intel Corporation
5 * Copyright 2016 Google, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2. See
8 * the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "exec/address-spaces.h"
14 #include "qemu/error-report.h"
16 #include "hax-accel-ops.h"
17 #include "qemu/queue.h"
19 #define DEBUG_HAX_MEM 0
21 #define DPRINTF(fmt, ...) \
23 if (DEBUG_HAX_MEM) { \
24 fprintf(stdout, fmt, ## __VA_ARGS__); \
29 * HAXMapping: describes a pending guest physical memory mapping
31 * @start_pa: a guest physical address marking the start of the region; must be
33 * @size: a guest physical address marking the end of the region; must be
35 * @host_va: the host virtual address of the start of the mapping
36 * @flags: mapping parameters e.g. HAX_RAM_INFO_ROM or HAX_RAM_INFO_INVALID
37 * @entry: additional fields for linking #HAXMapping instances together
39 typedef struct HAXMapping
{
44 QTAILQ_ENTRY(HAXMapping
) entry
;
48 * A doubly-linked list (actually a tail queue) of the pending page mappings
49 * for the ongoing memory transaction.
51 * It is used to optimize the number of page mapping updates done through the
52 * kernel module. For example, it's effective when a driver is digging an MMIO
53 * hole inside an existing memory mapping. It will get a deletion of the whole
54 * region, then the addition of the 2 remaining RAM areas around the hole and
55 * finally the memory transaction commit. During the commit, it will effectively
56 * send to the kernel only the removal of the pages from the MMIO hole after
57 * having computed locally the result of the deletion and additions.
59 static QTAILQ_HEAD(, HAXMapping
) mappings
=
60 QTAILQ_HEAD_INITIALIZER(mappings
);
63 * hax_mapping_dump_list: dumps @mappings to stdout (for debugging)
65 static void hax_mapping_dump_list(void)
69 DPRINTF("%s updates:\n", __func__
);
70 QTAILQ_FOREACH(entry
, &mappings
, entry
) {
71 DPRINTF("\t%c 0x%016" PRIx64
"->0x%016" PRIx64
" VA 0x%016" PRIx64
72 "%s\n", entry
->flags
& HAX_RAM_INFO_INVALID
? '-' : '+',
73 entry
->start_pa
, entry
->start_pa
+ entry
->size
, entry
->host_va
,
74 entry
->flags
& HAX_RAM_INFO_ROM
? " ROM" : "");
78 static void hax_insert_mapping_before(HAXMapping
*next
, uint64_t start_pa
,
79 uint32_t size
, uint64_t host_va
,
84 entry
= g_malloc0(sizeof(*entry
));
85 entry
->start_pa
= start_pa
;
87 entry
->host_va
= host_va
;
90 QTAILQ_INSERT_TAIL(&mappings
, entry
, entry
);
92 QTAILQ_INSERT_BEFORE(next
, entry
, entry
);
96 static bool hax_mapping_is_opposite(HAXMapping
*entry
, uint64_t host_va
,
99 /* removed then added without change for the read-only flag */
100 bool nop_flags
= (entry
->flags
^ flags
) == HAX_RAM_INFO_INVALID
;
102 return (entry
->host_va
== host_va
) && nop_flags
;
105 static void hax_update_mapping(uint64_t start_pa
, uint32_t size
,
106 uint64_t host_va
, uint8_t flags
)
108 uint64_t end_pa
= start_pa
+ size
;
109 HAXMapping
*entry
, *next
;
111 QTAILQ_FOREACH_SAFE(entry
, &mappings
, entry
, next
) {
113 if (start_pa
>= entry
->start_pa
+ entry
->size
) {
116 if (start_pa
< entry
->start_pa
) {
117 chunk_sz
= end_pa
<= entry
->start_pa
? size
118 : entry
->start_pa
- start_pa
;
119 hax_insert_mapping_before(entry
, start_pa
, chunk_sz
,
121 start_pa
+= chunk_sz
;
124 } else if (start_pa
> entry
->start_pa
) {
125 /* split the existing chunk at start_pa */
126 chunk_sz
= start_pa
- entry
->start_pa
;
127 hax_insert_mapping_before(entry
, entry
->start_pa
, chunk_sz
,
128 entry
->host_va
, entry
->flags
);
129 entry
->start_pa
+= chunk_sz
;
130 entry
->host_va
+= chunk_sz
;
131 entry
->size
-= chunk_sz
;
133 /* now start_pa == entry->start_pa */
134 chunk_sz
= MIN(size
, entry
->size
);
136 bool nop
= hax_mapping_is_opposite(entry
, host_va
, flags
);
137 bool partial
= chunk_sz
< entry
->size
;
139 /* remove the beginning of the existing chunk */
140 entry
->start_pa
+= chunk_sz
;
141 entry
->host_va
+= chunk_sz
;
142 entry
->size
-= chunk_sz
;
144 hax_insert_mapping_before(entry
, start_pa
, chunk_sz
,
147 } else { /* affects the full mapping entry */
148 if (nop
) { /* no change to this mapping, remove it */
149 QTAILQ_REMOVE(&mappings
, entry
, entry
);
151 } else { /* update mapping properties */
152 entry
->host_va
= host_va
;
153 entry
->flags
= flags
;
156 start_pa
+= chunk_sz
;
160 if (!size
) { /* we are done */
164 if (size
) { /* add the leftover */
165 hax_insert_mapping_before(NULL
, start_pa
, size
, host_va
, flags
);
169 static void hax_process_section(MemoryRegionSection
*section
, uint8_t flags
)
171 MemoryRegion
*mr
= section
->mr
;
172 hwaddr start_pa
= section
->offset_within_address_space
;
173 ram_addr_t size
= int128_get64(section
->size
);
176 uint32_t max_mapping_size
;
178 /* We only care about RAM and ROM regions */
179 if (!memory_region_is_ram(mr
)) {
180 if (memory_region_is_romd(mr
)) {
181 /* HAXM kernel module does not support ROMD yet */
182 warn_report("Ignoring ROMD region 0x%016" PRIx64
"->0x%016" PRIx64
,
183 start_pa
, start_pa
+ size
);
188 /* Adjust start_pa and size so that they are page-aligned. (Cf
189 * kvm_set_phys_mem() in kvm-all.c).
191 delta
= qemu_real_host_page_size() - (start_pa
& ~qemu_real_host_page_mask());
192 delta
&= ~qemu_real_host_page_mask();
198 size
&= qemu_real_host_page_mask();
199 if (!size
|| (start_pa
& ~qemu_real_host_page_mask())) {
203 host_va
= (uintptr_t)memory_region_get_ram_ptr(mr
)
204 + section
->offset_within_region
+ delta
;
205 if (memory_region_is_rom(section
->mr
)) {
206 flags
|= HAX_RAM_INFO_ROM
;
210 * The kernel module interface uses 32-bit sizes:
211 * https://github.com/intel/haxm/blob/master/API.md#hax_vm_ioctl_set_ram
213 * If the mapping size is longer than 32 bits, we can't process it in one
214 * call into the kernel. Instead, we split the mapping into smaller ones,
215 * and call hax_update_mapping() on each.
217 max_mapping_size
= UINT32_MAX
& qemu_real_host_page_mask();
218 while (size
> max_mapping_size
) {
219 hax_update_mapping(start_pa
, max_mapping_size
, host_va
, flags
);
220 start_pa
+= max_mapping_size
;
221 size
-= max_mapping_size
;
222 host_va
+= max_mapping_size
;
224 /* Now size <= max_mapping_size */
225 hax_update_mapping(start_pa
, (uint32_t)size
, host_va
, flags
);
228 static void hax_region_add(MemoryListener
*listener
,
229 MemoryRegionSection
*section
)
231 memory_region_ref(section
->mr
);
232 hax_process_section(section
, 0);
235 static void hax_region_del(MemoryListener
*listener
,
236 MemoryRegionSection
*section
)
238 hax_process_section(section
, HAX_RAM_INFO_INVALID
);
239 memory_region_unref(section
->mr
);
242 static void hax_transaction_begin(MemoryListener
*listener
)
244 g_assert(QTAILQ_EMPTY(&mappings
));
247 static void hax_transaction_commit(MemoryListener
*listener
)
249 if (!QTAILQ_EMPTY(&mappings
)) {
250 HAXMapping
*entry
, *next
;
253 hax_mapping_dump_list();
255 QTAILQ_FOREACH_SAFE(entry
, &mappings
, entry
, next
) {
256 if (entry
->flags
& HAX_RAM_INFO_INVALID
) {
257 /* for unmapping, put the values expected by the kernel */
258 entry
->flags
= HAX_RAM_INFO_INVALID
;
261 if (hax_set_ram(entry
->start_pa
, entry
->size
,
262 entry
->host_va
, entry
->flags
)) {
263 fprintf(stderr
, "%s: Failed mapping @0x%016" PRIx64
"+0x%"
264 PRIx32
" flags %02x\n", __func__
, entry
->start_pa
,
265 entry
->size
, entry
->flags
);
267 QTAILQ_REMOVE(&mappings
, entry
, entry
);
273 /* currently we fake the dirty bitmap sync, always dirty */
274 static void hax_log_sync(MemoryListener
*listener
,
275 MemoryRegionSection
*section
)
277 MemoryRegion
*mr
= section
->mr
;
279 if (!memory_region_is_ram(mr
)) {
280 /* Skip MMIO regions */
284 memory_region_set_dirty(mr
, 0, int128_get64(section
->size
));
287 static MemoryListener hax_memory_listener
= {
289 .begin
= hax_transaction_begin
,
290 .commit
= hax_transaction_commit
,
291 .region_add
= hax_region_add
,
292 .region_del
= hax_region_del
,
293 .log_sync
= hax_log_sync
,
297 static void hax_ram_block_added(RAMBlockNotifier
*n
, void *host
, size_t size
,
301 * We must register each RAM block with the HAXM kernel module, or
302 * hax_set_ram() will fail for any mapping into the RAM block:
303 * https://github.com/intel/haxm/blob/master/API.md#hax_vm_ioctl_alloc_ram
305 * Old versions of the HAXM kernel module (< 6.2.0) used to preallocate all
306 * host physical pages for the RAM block as part of this registration
307 * process, hence the name hax_populate_ram().
309 if (hax_populate_ram((uint64_t)(uintptr_t)host
, max_size
) < 0) {
310 fprintf(stderr
, "HAX failed to populate RAM\n");
315 static struct RAMBlockNotifier hax_ram_notifier
= {
316 .ram_block_added
= hax_ram_block_added
,
319 void hax_memory_init(void)
321 ram_block_notifier_add(&hax_ram_notifier
);
322 memory_listener_register(&hax_memory_listener
, &address_space_memory
);