Update vboot submodule to upstream main
[coreboot2.git] / tests / lib / bootmem-test.c
blob51375339d91d2a5f2077f02774c5ffa75bdd109d
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <bootmem.h>
4 #include <commonlib/coreboot_tables.h>
5 #include <device/device.h>
6 #include <memrange.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <symbols.h>
10 #include <tests/test.h>
12 /* Stubs defined to satisfy linker dependencies */
13 void cbmem_add_bootmem(void)
17 void bootmem_arch_add_ranges(void)
21 struct bootmem_ranges_t {
22 uint64_t start;
23 uint64_t size;
24 uint32_t type;
27 /* Define symbols for regions required by bootmem.
28 Define constants for regions that do not need to be defined in the executable.
29 There is no need for region memory, just start, end and size symbols are required.
30 Only used values are defined. */
31 #define ZERO_REGION_START ((uintptr_t)0x0)
32 #define ZERO_REGION_SIZE ((uintptr_t)0x10000)
34 TEST_REGION_UNALLOCATED(program, 0x10000000, 0x40000);
35 #define PROGRAM_START ((uintptr_t)_program)
36 #define PROGRAM_SIZE REGION_SIZE(program)
38 #define CACHEABLE_START ((uintptr_t)0x10000000ULL)
39 #define CACHEABLE_SIZE ((uintptr_t)0x100000000ULL)
40 #define CACHEABLE_END ((uintptr_t)(CACHEABLE_START + CACHEABLE_SIZE))
42 /* Stack region end address is hardcoded because `<const> - <symbol>` does not work in GCC */
43 TEST_REGION_UNALLOCATED(stack, 0x10040000, 0x1000);
44 #define STACK_START ((uintptr_t)_stack)
45 #define STACK_SIZE REGION_SIZE(stack)
46 #define STACK_END ((uintptr_t)(0x10040000 + 0x1000))
48 #define RESERVED_START ((uintptr_t)0x100000000ULL)
49 #define RESERVED_SIZE ((uintptr_t)0x100000)
50 #define RESERVED_END ((uintptr_t)(RESERVED_START + RESERVED_SIZE))
52 TEST_REGION_UNALLOCATED(ramstage, 0x10000000, 0x41000);
53 #define RAMSTAGE_START ((uintptr_t)_ramstage)
54 #define RAMSTAGE_SIZE REGION_SIZE(ramstage)
56 #define CACHEABLE_START_TO_RESERVED_START_SIZE (RESERVED_START - CACHEABLE_START)
57 #define RESERVED_END_TO_CACHEABLE_END_SIZE (CACHEABLE_END - RESERVED_END)
58 #define STACK_END_TO_RESERVED_START_SIZE (RESERVED_START - STACK_END)
61 /* Bootmem layout for tests
63 * Regions marked with asterisks (***) are not visible for OS
65 * +------------------ZERO-----------------+ <-0x0
66 * | |
67 * +---------------------------------------+ <-0x10000
69 * +-------+----CACHEABLE_MEMORY---------+-+ <-0x10000000
70 * | | ***PROGRAM*** | |
71 * | +-----------------------------+ | <-0x10040000
72 * | | ***STACK*** | |
73 * | +-----------------------------+ | <-0x10041000
74 * | |
75 * | |
76 * | |
77 * | +-------RESERVED_MEMORY-------+ | <-0x100000000
78 * | | | |
79 * | | | |
80 * | | | |
81 * | +-----------------------------+ | <-0x100100000
82 * | |
83 * | |
84 * +---------------------------------------+ <-0x110000000
86 * Ramstage covers PROGRAM and STACK regions.
88 struct bootmem_ranges_t os_ranges_mock[] = {
89 [0] = { .start = ZERO_REGION_START, .size = ZERO_REGION_SIZE,
90 .type = BM_MEM_RAM},
91 [1] = { .start = CACHEABLE_START, .size = CACHEABLE_START_TO_RESERVED_START_SIZE,
92 .type = BM_MEM_RAM },
93 [2] = { .start = RESERVED_START, .size = RESERVED_SIZE,
94 .type = BM_MEM_RESERVED },
95 [3] = { .start = RESERVED_END, .size = RESERVED_END_TO_CACHEABLE_END_SIZE,
96 .type = BM_MEM_RAM },
99 struct bootmem_ranges_t ranges_mock[] = {
100 [0] = { .start = ZERO_REGION_START, .size = ZERO_REGION_SIZE,
101 .type = BM_MEM_RAM },
102 [1] = { .start = RAMSTAGE_START, .size = RAMSTAGE_SIZE,
103 .type = BM_MEM_RAMSTAGE },
104 [2] = { .start = STACK_END, .size = STACK_END_TO_RESERVED_START_SIZE,
105 .type = BM_MEM_RAM },
106 [3] = { .start = RESERVED_START, .size = RESERVED_SIZE,
107 .type = BM_MEM_RESERVED },
108 [4] = { .start = RESERVED_END, .size = RESERVED_END_TO_CACHEABLE_END_SIZE,
109 .type = BM_MEM_RAM },
112 struct bootmem_ranges_t *os_ranges = os_ranges_mock;
113 struct bootmem_ranges_t *ranges = ranges_mock;
115 /* Note that second region overlaps first */
116 struct resource res_mock[] = {
117 { .base = ZERO_REGION_START, .size = ZERO_REGION_SIZE, .next = &res_mock[1],
118 .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM | IORESOURCE_ASSIGNED },
119 { .base = CACHEABLE_START, .size = CACHEABLE_SIZE, .next = &res_mock[2],
120 .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM | IORESOURCE_ASSIGNED },
121 { .base = RESERVED_START, .size = RESERVED_SIZE, .next = NULL,
122 .flags = IORESOURCE_RESERVE | IORESOURCE_MEM | IORESOURCE_ASSIGNED }
125 /* Device simulating RAM */
126 struct device mem_device_mock = {
127 .enabled = 1,
128 .resource_list = res_mock,
129 .next = NULL
132 struct device *all_devices = &mem_device_mock;
134 /* Simplified version for the purpose of tests */
135 static uint32_t bootmem_to_lb_tag(const enum bootmem_type tag)
137 switch (tag) {
138 case BM_MEM_RAM:
139 return LB_MEM_RAM;
140 case BM_MEM_RESERVED:
141 return LB_MEM_RESERVED;
142 default:
143 return LB_MEM_RESERVED;
147 static void test_bootmem_write_mem_table(void **state)
149 /* Space for 10 lb_mem entries to be safe */
150 const size_t lb_mem_max_size = sizeof(struct lb_memory)
151 + 10 * sizeof(struct lb_memory_range);
152 const size_t expected_allocation_size =
153 (sizeof(struct lb_memory)
154 + ARRAY_SIZE(os_ranges_mock) * sizeof(struct lb_memory_range));
155 const size_t required_unused_space_size = lb_mem_max_size - expected_allocation_size;
156 int i;
157 struct lb_memory *lb_mem;
158 /* Allocate buffer and fill it. Use it to ensure correct size of space used
159 by bootmem_write_memory_table() */
160 u8 sentinel_value_buffer[required_unused_space_size];
161 memset(sentinel_value_buffer, 0x77, required_unused_space_size);
163 lb_mem = malloc(lb_mem_max_size);
164 lb_mem->tag = LB_TAG_MEMORY;
165 lb_mem->size = sizeof(*lb_mem);
166 /* Fill rest of buffer with sentinel value */
167 memset(((u8 *)lb_mem) + expected_allocation_size, 0x77, required_unused_space_size);
169 bootmem_write_memory_table(lb_mem);
171 /* There should be only `os_ranges_mock` entries visible in coreboot table */
172 assert_int_equal(lb_mem->size, sizeof(*lb_mem) +
173 ARRAY_SIZE(os_ranges_mock) * sizeof(struct lb_memory_range));
174 assert_memory_equal(sentinel_value_buffer,
175 ((u8 *)lb_mem) + expected_allocation_size,
176 required_unused_space_size);
178 for (i = 0; i < lb_mem->size / sizeof(struct lb_memory_range); i++) {
179 assert_int_equal(lb_mem->map[i].start, os_ranges[i].start);
180 assert_int_equal(lb_mem->map[i].size, os_ranges[i].size);
181 assert_int_equal(lb_mem->map[i].type, bootmem_to_lb_tag(os_ranges[i].type));
184 free(lb_mem);
187 int os_bootmem_walk_cnt;
188 int bootmem_walk_cnt;
190 static bool verify_os_bootmem_walk(const struct range_entry *r, void *arg)
192 assert_int_equal(range_entry_base(r), os_ranges[os_bootmem_walk_cnt].start);
193 assert_int_equal(range_entry_size(r), os_ranges[os_bootmem_walk_cnt].size);
194 assert_int_equal(range_entry_tag(r), os_ranges[os_bootmem_walk_cnt].type);
196 os_bootmem_walk_cnt++;
198 return true;
201 static bool verify_bootmem_walk(const struct range_entry *r, void *arg)
203 assert_int_equal(range_entry_base(r), ranges[bootmem_walk_cnt].start);
204 assert_int_equal(range_entry_size(r), ranges[bootmem_walk_cnt].size);
205 assert_int_equal(range_entry_tag(r), ranges[bootmem_walk_cnt].type);
207 bootmem_walk_cnt++;
209 return true;
212 static bool count_entries_os_bootmem_walk(const struct range_entry *r, void *arg)
214 os_bootmem_walk_cnt++;
216 return true;
219 static bool count_entries_bootmem_walk(const struct range_entry *r, void *arg)
221 bootmem_walk_cnt++;
223 return true;
226 /* This function initializes bootmem using bootmem_write_memory_table().
227 bootmem_init() is not accessible directly because it is static. */
228 static void init_memory_table_library(void)
230 struct lb_memory *lb_mem;
232 /* Allocate space for 10 lb_mem entries to be safe */
233 lb_mem = malloc(sizeof(*lb_mem) + 10 * sizeof(struct lb_memory_range));
234 lb_mem->tag = LB_TAG_MEMORY;
235 lb_mem->size = sizeof(*lb_mem);
237 /* We need to call this only to initialize library */
238 bootmem_write_memory_table(lb_mem);
240 free(lb_mem);
243 static void test_bootmem_add_range(void **state)
245 init_memory_table_library();
247 os_bootmem_walk_cnt = 0;
248 bootmem_walk_os_mem(count_entries_os_bootmem_walk, NULL);
249 assert_int_equal(os_bootmem_walk_cnt, 4);
251 bootmem_walk_cnt = 0;
252 bootmem_walk(count_entries_bootmem_walk, NULL);
253 assert_int_equal(bootmem_walk_cnt, 5);
255 expect_assert_failure(
256 bootmem_add_range(ALIGN_UP(PROGRAM_START, 4096),
257 ALIGN_DOWN(PROGRAM_SIZE / 2, 4096),
258 BM_MEM_ACPI)
261 os_bootmem_walk_cnt = 0;
262 bootmem_walk_os_mem(count_entries_os_bootmem_walk, NULL);
263 assert_int_equal(os_bootmem_walk_cnt, 4);
265 bootmem_walk_cnt = 0;
266 bootmem_walk(count_entries_bootmem_walk, NULL);
267 assert_int_equal(bootmem_walk_cnt, 6);
269 /* Do not expect assert failure as BM_MEM_RAMSTAGE should not be added to os_bootmem */
270 bootmem_add_range(ALIGN_UP(STACK_END + 4096, 4096),
271 ALIGN_DOWN(STACK_END_TO_RESERVED_START_SIZE / 2, 4096),
272 BM_MEM_RAMSTAGE);
274 os_bootmem_walk_cnt = 0;
275 bootmem_walk_os_mem(count_entries_os_bootmem_walk, NULL);
276 assert_int_equal(os_bootmem_walk_cnt, 4);
278 /* Two entries are added because added range is in middle of another */
279 bootmem_walk_cnt = 0;
280 bootmem_walk(count_entries_bootmem_walk, NULL);
281 assert_int_equal(bootmem_walk_cnt, 8);
284 static void test_bootmem_walk(void **state)
286 init_memory_table_library();
288 os_bootmem_walk_cnt = 0;
289 bootmem_walk_os_mem(verify_os_bootmem_walk, NULL);
290 assert_int_equal(os_bootmem_walk_cnt, 4);
292 bootmem_walk_cnt = 0;
293 bootmem_walk(verify_bootmem_walk, NULL);
294 assert_int_equal(bootmem_walk_cnt, 5);
297 static void test_bootmem_region_targets_type(void **state)
299 int ret;
300 u64 subregion_start;
301 u64 subregion_size;
303 init_memory_table_library();
305 /* Single whole region */
306 ret = bootmem_region_targets_type(RAMSTAGE_START, RAMSTAGE_SIZE, BM_MEM_RAMSTAGE);
307 assert_int_equal(ret, 1);
309 /* Expect fail because of incorrect bootmem_type */
310 ret = bootmem_region_targets_type(RAMSTAGE_START, RAMSTAGE_SIZE, BM_MEM_RESERVED);
311 assert_int_equal(ret, 0);
313 /* Range covering one more byte than one region */
314 ret = bootmem_region_targets_type(RAMSTAGE_START, RAMSTAGE_SIZE + 1, BM_MEM_RAMSTAGE);
315 assert_int_equal(ret, 0);
317 /* Expect success for subregion of ramstage stretching from point in program range
318 to point in stack range. */
319 subregion_start = PROGRAM_START + PROGRAM_SIZE / 4;
320 subregion_size = STACK_END - STACK_SIZE / 4 - subregion_start;
321 ret = bootmem_region_targets_type(subregion_start, subregion_size, BM_MEM_RAMSTAGE);
322 assert_int_equal(ret, 1);
324 /* Expect fail for range covering more than one tag as there is no BM_MEM_CACHEABLE */
325 subregion_start = STACK_START + STACK_SIZE / 2;
326 subregion_size = RESERVED_START + RESERVED_SIZE / 4 * 3 - subregion_start;
327 ret = bootmem_region_targets_type(subregion_start, subregion_size, BM_MEM_RAM);
328 assert_int_equal(ret, 0);
330 /* Middle of range should not fail */
331 ret = bootmem_region_targets_type(RESERVED_START + RESERVED_SIZE / 4,
332 RESERVED_SIZE / 2, BM_MEM_RESERVED);
333 assert_int_equal(ret, 1);
335 /* Subsection of range bordering end edge */
336 ret = bootmem_region_targets_type(RESERVED_END + RESERVED_END_TO_CACHEABLE_END_SIZE / 2,
337 RESERVED_END_TO_CACHEABLE_END_SIZE / 2, BM_MEM_RAM);
338 assert_int_equal(ret, 1);
340 /* Region touching zero */
341 ret = bootmem_region_targets_type(ZERO_REGION_START, ZERO_REGION_SIZE, BM_MEM_RAM);
342 assert_int_equal(ret, 1);
344 /* Expect failure when passing zero as size. */
345 ret = bootmem_region_targets_type(ZERO_REGION_START, 0, BM_MEM_RAM);
346 assert_int_equal(ret, 0);
347 ret = bootmem_region_targets_type(RESERVED_START, 0, BM_MEM_RESERVED);
348 assert_int_equal(ret, 0);
351 /* Action function used to check alignment of size and base of allocated ranges */
352 static bool verify_bootmem_allocate_buffer(const struct range_entry *r, void *arg)
354 if (range_entry_tag(r) == BM_MEM_PAYLOAD) {
355 assert_true(IS_ALIGNED(range_entry_base(r), 4096));
356 assert_true(IS_ALIGNED(range_entry_size(r), 4096));
359 return true;
363 static void test_bootmem_allocate_buffer(void **state)
365 void *buf;
366 void *prev;
368 init_memory_table_library();
370 /* All allocated buffers should be below 32bit boundary */
371 buf = bootmem_allocate_buffer(1ULL << 32);
372 assert_null(buf);
374 /* Try too big size for our BM_MEM_RAM range below 32bit boundary */
375 buf = bootmem_allocate_buffer(RESERVED_START - PROGRAM_START);
376 assert_null(buf);
378 /* Two working cases */
379 buf = bootmem_allocate_buffer(0xE0000000);
380 assert_non_null(buf);
381 assert_int_equal(1, bootmem_region_targets_type((uintptr_t)buf,
382 0xE0000000, BM_MEM_PAYLOAD));
383 assert_in_range((uintptr_t)buf, CACHEABLE_START + RAMSTAGE_SIZE, RESERVED_START);
384 /* Check if allocated (payload) ranges have their base and size aligned */
385 bootmem_walk(verify_bootmem_allocate_buffer, NULL);
387 prev = buf;
388 buf = bootmem_allocate_buffer(0xF000000);
389 assert_non_null(buf);
390 assert_int_equal(1, bootmem_region_targets_type((uintptr_t)buf,
391 0xF000000, BM_MEM_PAYLOAD));
392 assert_in_range((uintptr_t)buf, CACHEABLE_START + RAMSTAGE_SIZE, RESERVED_START);
393 /* Check if newly allocated buffer does not overlap with previously allocated range */
394 assert_not_in_range((uintptr_t)buf, (uintptr_t)prev, (uintptr_t)prev + 0xE0000000);
395 /* Check if allocated (payload) ranges have their base and size aligned */
396 bootmem_walk(verify_bootmem_allocate_buffer, NULL);
398 /* Run out of memory for new allocations */
399 buf = bootmem_allocate_buffer(0x1000000);
400 assert_null(buf);
403 int main(void)
405 const struct CMUnitTest tests[] = {
406 cmocka_unit_test(test_bootmem_write_mem_table),
407 cmocka_unit_test(test_bootmem_add_range),
408 cmocka_unit_test(test_bootmem_walk),
409 cmocka_unit_test(test_bootmem_allocate_buffer),
410 cmocka_unit_test(test_bootmem_region_targets_type)
413 return cb_run_group_tests(tests, NULL, NULL);