Merge tag 'pull-loongarch-20241016' of https://gitlab.com/gaosong/qemu into staging
[qemu/armbru.git] / hw / hyperv / hv-balloon-our_range_memslots.c
blob1505a395cf7da5fa134a1a1610ea698a2eec07fa
1 /*
2 * QEMU Hyper-V Dynamic Memory Protocol driver
4 * Copyright (C) 2020-2023 Oracle and/or its affiliates.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 */
10 #include "qemu/osdep.h"
11 #include "hv-balloon-internal.h"
12 #include "hv-balloon-our_range_memslots.h"
13 #include "trace.h"
15 /* OurRange */
16 static void our_range_init(OurRange *our_range, uint64_t start, uint64_t count)
18 assert(count <= UINT64_MAX - start);
19 our_range->range.start = start;
20 our_range->range.count = count;
22 hvb_page_range_tree_init(&our_range->removed_guest);
23 hvb_page_range_tree_init(&our_range->removed_both);
25 /* mark the whole range as unused but for potential use */
26 our_range->added = 0;
27 our_range->unusable_tail = 0;
30 static void our_range_destroy(OurRange *our_range)
32 hvb_page_range_tree_destroy(&our_range->removed_guest);
33 hvb_page_range_tree_destroy(&our_range->removed_both);
36 void hvb_our_range_clear_removed_trees(OurRange *our_range)
38 hvb_page_range_tree_destroy(&our_range->removed_guest);
39 hvb_page_range_tree_destroy(&our_range->removed_both);
40 hvb_page_range_tree_init(&our_range->removed_guest);
41 hvb_page_range_tree_init(&our_range->removed_both);
44 void hvb_our_range_mark_added(OurRange *our_range, uint64_t additional_size)
46 assert(additional_size <= UINT64_MAX - our_range->added);
48 our_range->added += additional_size;
50 assert(our_range->added <= UINT64_MAX - our_range->unusable_tail);
51 assert(our_range->added + our_range->unusable_tail <=
52 our_range->range.count);
55 /* OurRangeMemslots */
56 static void our_range_memslots_init_slots(OurRangeMemslots *our_range,
57 MemoryRegion *backing_mr,
58 Object *memslot_owner)
60 OurRangeMemslotsSlots *memslots = &our_range->slots;
61 unsigned int idx;
62 uint64_t memslot_offset;
64 assert(memslots->count > 0);
65 memslots->slots = g_new0(MemoryRegion, memslots->count);
67 /* Initialize our memslots, but don't map them yet. */
68 assert(memslots->size_each > 0);
69 for (idx = 0, memslot_offset = 0; idx < memslots->count;
70 idx++, memslot_offset += memslots->size_each) {
71 uint64_t memslot_size;
72 g_autofree char *name = NULL;
74 /* The size of the last memslot might be smaller. */
75 if (idx == memslots->count - 1) {
76 uint64_t region_size;
78 assert(our_range->mr);
79 region_size = memory_region_size(our_range->mr);
80 memslot_size = region_size - memslot_offset;
81 } else {
82 memslot_size = memslots->size_each;
85 name = g_strdup_printf("memslot-%u", idx);
86 memory_region_init_alias(&memslots->slots[idx], memslot_owner, name,
87 backing_mr, memslot_offset, memslot_size);
89 * We want to be able to atomically and efficiently activate/deactivate
90 * individual memslots without affecting adjacent memslots in memory
91 * notifiers.
93 memory_region_set_unmergeable(&memslots->slots[idx], true);
96 memslots->mapped_count = 0;
99 OurRangeMemslots *hvb_our_range_memslots_new(uint64_t addr,
100 MemoryRegion *parent_mr,
101 MemoryRegion *backing_mr,
102 Object *memslot_owner,
103 unsigned int memslot_count,
104 uint64_t memslot_size)
106 OurRangeMemslots *our_range;
108 our_range = g_malloc(sizeof(*our_range));
109 our_range_init(&our_range->range,
110 addr / HV_BALLOON_PAGE_SIZE,
111 memory_region_size(parent_mr) / HV_BALLOON_PAGE_SIZE);
112 our_range->slots.size_each = memslot_size;
113 our_range->slots.count = memslot_count;
114 our_range->mr = parent_mr;
115 our_range_memslots_init_slots(our_range, backing_mr, memslot_owner);
117 return our_range;
120 static void our_range_memslots_free_memslots(OurRangeMemslots *our_range)
122 OurRangeMemslotsSlots *memslots = &our_range->slots;
123 unsigned int idx;
124 uint64_t offset;
126 memory_region_transaction_begin();
127 for (idx = 0, offset = 0; idx < memslots->mapped_count;
128 idx++, offset += memslots->size_each) {
129 trace_hv_balloon_unmap_slot(idx, memslots->count, offset);
130 assert(memory_region_is_mapped(&memslots->slots[idx]));
131 memory_region_del_subregion(our_range->mr, &memslots->slots[idx]);
133 memory_region_transaction_commit();
135 for (idx = 0; idx < memslots->count; idx++) {
136 object_unparent(OBJECT(&memslots->slots[idx]));
139 g_clear_pointer(&our_range->slots.slots, g_free);
142 void hvb_our_range_memslots_free(OurRangeMemslots *our_range)
144 OurRangeMemslotsSlots *memslots = &our_range->slots;
145 MemoryRegion *hostmem_mr;
146 RAMBlock *rb;
148 assert(our_range->slots.count > 0);
149 assert(our_range->slots.slots);
151 hostmem_mr = memslots->slots[0].alias;
152 rb = hostmem_mr->ram_block;
153 ram_block_discard_range(rb, 0, qemu_ram_get_used_length(rb));
155 our_range_memslots_free_memslots(our_range);
156 our_range_destroy(&our_range->range);
157 g_free(our_range);
160 void hvb_our_range_memslots_ensure_mapped_additional(OurRangeMemslots *our_range,
161 uint64_t additional_map_size)
163 OurRangeMemslotsSlots *memslots = &our_range->slots;
164 uint64_t total_map_size;
165 unsigned int idx;
166 uint64_t offset;
168 total_map_size = (our_range->range.added + additional_map_size) *
169 HV_BALLOON_PAGE_SIZE;
170 idx = memslots->mapped_count;
171 assert(memslots->size_each > 0);
172 offset = idx * memslots->size_each;
175 * Activate all memslots covered by the newly added region in a single
176 * transaction.
178 memory_region_transaction_begin();
179 for ( ; idx < memslots->count;
180 idx++, offset += memslots->size_each) {
182 * If this memslot starts beyond or at the end of the range to map so
183 * does every next one.
185 if (offset >= total_map_size) {
186 break;
190 * Instead of enabling/disabling memslot, we add/remove them. This
191 * should make address space updates faster, because we don't have to
192 * loop over many disabled subregions.
194 trace_hv_balloon_map_slot(idx, memslots->count, offset);
195 assert(!memory_region_is_mapped(&memslots->slots[idx]));
196 memory_region_add_subregion(our_range->mr, offset,
197 &memslots->slots[idx]);
199 memslots->mapped_count++;
201 memory_region_transaction_commit();