mb/system76/whl-u/dt: Make use of chipset devicetree
[coreboot.git] / tests / lib / memrange-test.c
blobeac4769240baf43b66a4a3cc2d8347b9cf0d2859
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <tests/test.h>
5 #include <device/device.h>
6 #include <commonlib/helpers.h>
7 #include <memrange.h>
9 #define MEMRANGE_ALIGN (POWER_OF_2(12))
11 enum mem_types {
12 /* Avoid using 0 to verify that UUT really sets this memory,
13 but keep value small, as this will be an index in the table */
14 CACHEABLE_TAG = 0x10,
15 RESERVED_TAG,
16 READONLY_TAG,
17 INSERTED_TAG,
18 HOLE_TAG,
19 UNASSIGNED_TAG,
20 END_OF_RESOURCES
23 /* Indices of entries matters, since it must reflect mem_types enum */
24 struct resource res_mock_1[] = {
25 [UNASSIGNED_TAG] = {.base = 0x0,
26 .size = 0x8000,
27 .next = &res_mock_1[CACHEABLE_TAG],
28 .flags = IORESOURCE_MEM | IORESOURCE_PREFETCH},
29 [CACHEABLE_TAG] = {.base = 0xE000,
30 .size = 0xF2000,
31 .next = &res_mock_1[RESERVED_TAG],
32 .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM |
33 IORESOURCE_ASSIGNED },
34 [RESERVED_TAG] = {.base = 4ULL * GiB,
35 .size = 4ULL * KiB,
36 .next = &res_mock_1[READONLY_TAG],
37 .flags = IORESOURCE_RESERVE | IORESOURCE_MEM |
38 IORESOURCE_ASSIGNED },
39 [READONLY_TAG] = {.base = 0xFF0000,
40 .size = 0x10000,
41 .next = NULL,
42 .flags = IORESOURCE_READONLY | IORESOURCE_MEM |
43 IORESOURCE_ASSIGNED }
46 /* Boundary 1 byte below 4GiB and 1 byte above 4GiB. */
47 struct resource res_mock_2[] = {
48 [CACHEABLE_TAG] = {.base = 0x1000000,
49 .size = 4ULL * GiB - 0x1000001ULL,
50 .next = &res_mock_2[RESERVED_TAG],
51 .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM |
52 IORESOURCE_ASSIGNED },
53 [RESERVED_TAG] = {.base = 4ULL * GiB + 1ULL,
54 .size = 4ULL * GiB,
55 .next = &res_mock_2[READONLY_TAG],
56 .flags = IORESOURCE_RESERVE | IORESOURCE_MEM |
57 IORESOURCE_ASSIGNED },
58 [READONLY_TAG] = {.base = 0,
59 .size = 0x10000,
60 .next = NULL,
61 .flags = IORESOURCE_READONLY | IORESOURCE_MEM |
62 IORESOURCE_ASSIGNED }
65 /* Boundary crossing 4GiB. */
66 struct resource res_mock_3[] = {
67 [CACHEABLE_TAG] = {.base = 0xD000,
68 .size = 0xF3000,
69 .next = &res_mock_3[RESERVED_TAG],
70 .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM |
71 IORESOURCE_ASSIGNED },
72 [RESERVED_TAG] = {.base = 1ULL * GiB,
73 .size = 4ULL * GiB,
74 .next = &res_mock_3[READONLY_TAG],
75 .flags = IORESOURCE_RESERVE | IORESOURCE_MEM |
76 IORESOURCE_ASSIGNED },
77 [READONLY_TAG] = {.base = 0xFF0000,
78 .size = 0x10000,
79 .next = NULL,
80 .flags = IORESOURCE_READONLY | IORESOURCE_MEM |
81 IORESOURCE_ASSIGNED}
85 struct device mock_device = {.enabled = 1};
87 /* Fake memory devices handle */
88 struct device *all_devices = &mock_device;
90 int setup_test_1(void **state)
92 *state = res_mock_1;
93 mock_device.resource_list = &res_mock_1[UNASSIGNED_TAG];
95 return 0;
98 int setup_test_2(void **state)
100 *state = res_mock_2;
101 mock_device.resource_list = &res_mock_2[CACHEABLE_TAG];
103 return 0;
106 int setup_test_3(void **state)
108 *state = res_mock_3;
109 mock_device.resource_list = &res_mock_3[CACHEABLE_TAG];
111 return 0;
114 resource_t get_aligned_base(struct resource *res, struct range_entry *entry)
116 return ALIGN_DOWN(res[range_entry_tag(entry)].base, MEMRANGE_ALIGN);
119 resource_t get_aligned_end(struct resource *res, struct range_entry *entry)
121 resource_t end = res[range_entry_tag(entry)].base + res[range_entry_tag(entry)].size
122 + (res[range_entry_tag(entry)].base - range_entry_base(entry));
123 return ALIGN_UP(end, MEMRANGE_ALIGN);
127 * This test verifies memranges_init(), memranges_add_resources() and memranges_teardown()
128 * functions. It covers basic functionality of memrange library - implementation of creating
129 * memrange structure from resources available on the platform and method for free'ing
130 * allocated memory.
132 * Example memory ranges (res_mock1) for test_memrange_basic.
133 * Ranges marked with asterisks (***) are not added to the test_memrange.
135 * +-------UNASSIGNED_TAG--------+ <-0x0
136 * | |
137 * +-----------------------------+ <-0x8000
141 * +--------CACHEABLE_TAG--------+ <-0xE000
142 * | |
143 * | |
144 * | |
145 * +-----------------------------+ <-0x100000
149 * +-----***READONLY_TAG***------+ <-0xFF0000
150 * | |
151 * | |
152 * | |
153 * +-----------------------------+ <-0x1000000
156 * +--------RESERVED_TAG---------+ <-0x100000000
157 * | |
158 * +-----------------------------+ <-0x100001000
160 static void test_memrange_basic(void **state)
162 int counter = 0;
163 const unsigned long cacheable = IORESOURCE_CACHEABLE;
164 const unsigned long reserved = IORESOURCE_RESERVE;
165 const unsigned long prefetchable = IORESOURCE_PREFETCH;
166 struct range_entry *ptr;
167 struct memranges test_memrange;
168 struct resource *res_mock = *state;
169 resource_t prev_base = 0;
171 memranges_init_empty(&test_memrange, NULL, 0);
172 memranges_add_resources(&test_memrange, prefetchable, prefetchable, UNASSIGNED_TAG);
173 memranges_add_resources(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
174 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
176 /* There should be two entries, since cacheable and reserved regions are not neighbors.
177 Besides these two, a region with an unassigned tag is defined, to emulate an unmapped
178 PCI BAR resource. This resource is not mapped into host physical address and hence
179 should not be picked up by memranges_add_resources().*/
181 memranges_each_entry(ptr, &test_memrange)
183 assert_in_range(range_entry_tag(ptr), CACHEABLE_TAG, RESERVED_TAG);
184 assert_int_equal(range_entry_base(ptr), get_aligned_base(res_mock, ptr));
186 assert_int_equal(range_entry_end(ptr), get_aligned_end(res_mock, ptr));
188 /* Ranges have to be returned in increasing order */
189 assert_true(prev_base <= range_entry_base(ptr));
191 prev_base = range_entry_base(ptr);
192 counter++;
194 assert_int_equal(counter, 2);
195 counter = 0;
197 /* Remove initial memrange */
198 memranges_teardown(&test_memrange);
199 memranges_each_entry(ptr, &test_memrange) counter++;
200 assert_int_equal(counter, 0);
204 * This test verifies memranges_clone(), memranges_insert() and memranges_update_tag()
205 * functions. All operations are performed on cloned memrange. One of the most important thing
206 * to check, is that memrange_insert() should remove all ranges which are covered by the newly
207 * inserted one.
209 * Example memory ranges (res_mock1) for test_memrange_clone_insert.
210 * Ranges marked with asterisks (***) are not added to the clone_memrange.
211 * Ranges marked with (^) have tag value changed during test.
213 * +--------CACHEABLE_TAG--------+ <-0xE000
214 * +------|----INSERTED_TAG----------+ | <-0xF000
215 * | | (^READONLY_TAG^) | |
216 * | | | |
217 * | +-----------------------------+ <-0x100000
218 * +---------------------------------+ <-0x101000
221 * +-----***READONLY_TAG***------+ <-0xFF0000
222 * | |
223 * | |
224 * | |
225 * +-----------------------------+ <-0x1000000
228 * +------+---------RESERVED_TAG-----+--+ <-0x100000000
229 * | | | |
230 * | +-----------------------------+ <-0x100001000
231 * +-----------INSERTED_TAG----------+ <-0x100002000
233 static void test_memrange_clone_insert(void **state)
235 int counter = 0;
236 const unsigned long cacheable = IORESOURCE_CACHEABLE;
237 const unsigned long reserved = IORESOURCE_RESERVE;
238 struct range_entry *ptr;
239 struct memranges test_memrange, clone_memrange;
240 struct resource *res_mock = *state;
241 const resource_t new_range_begin_offset = 1ULL << 12;
243 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
244 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
246 memranges_clone(&clone_memrange, &test_memrange);
247 memranges_teardown(&test_memrange);
249 /* Verify that new one is really a clone */
250 memranges_each_entry(ptr, &clone_memrange)
252 assert_in_range(range_entry_tag(ptr), CACHEABLE_TAG, END_OF_RESOURCES - 1);
253 assert_int_equal(range_entry_base(ptr), get_aligned_base(res_mock, ptr));
255 assert_int_equal(range_entry_end(ptr), get_aligned_end(res_mock, ptr));
257 counter++;
259 assert_int_equal(counter, 2);
260 counter = 0;
262 /* Insert new range, which will overlap with first region. */
263 memranges_insert(&clone_memrange, res_mock[CACHEABLE_TAG].base + new_range_begin_offset,
264 res_mock[CACHEABLE_TAG].size, INSERTED_TAG);
266 /* Three ranges should be there - CACHEABLE(shrunk), INSERTED and RESERVED */
267 memranges_each_entry(ptr, &clone_memrange)
269 resource_t expected_end;
271 if (range_entry_tag(ptr) == CACHEABLE_TAG) {
272 assert_int_equal(range_entry_base(ptr), res_mock[CACHEABLE_TAG].base);
274 expected_end = res_mock[CACHEABLE_TAG].base + new_range_begin_offset;
275 assert_int_equal(range_entry_end(ptr), expected_end);
277 if (range_entry_tag(ptr) == INSERTED_TAG) {
278 assert_int_equal(range_entry_base(ptr),
279 res_mock[CACHEABLE_TAG].base + new_range_begin_offset);
281 expected_end = res_mock[CACHEABLE_TAG].base + new_range_begin_offset
282 + res_mock[CACHEABLE_TAG].size;
283 assert_int_equal(range_entry_end(ptr),
284 ALIGN_UP(expected_end, MEMRANGE_ALIGN));
286 counter++;
288 assert_int_equal(counter, 3);
289 counter = 0;
291 /* Insert new region, which will shadow readonly range.
292 * Additionally verify API for updating tags */
293 memranges_update_tag(&clone_memrange, INSERTED_TAG, READONLY_TAG);
295 memranges_each_entry(ptr, &clone_memrange)
297 resource_t expected_end;
299 assert_int_not_equal(range_entry_tag(ptr), INSERTED_TAG);
300 if (range_entry_tag(ptr) == READONLY_TAG) {
301 assert_int_equal(range_entry_base(ptr),
302 res_mock[CACHEABLE_TAG].base + new_range_begin_offset);
304 expected_end = res_mock[CACHEABLE_TAG].base + new_range_begin_offset
305 + res_mock[CACHEABLE_TAG].size;
306 assert_int_equal(range_entry_end(ptr),
307 ALIGN_UP(expected_end, MEMRANGE_ALIGN));
311 /* Check if alignment (4KiB) is properly applied, that is begin - DOWN and end - UP */
312 memranges_insert(&clone_memrange, res_mock[RESERVED_TAG].base + 0xAD,
313 res_mock[RESERVED_TAG].size, INSERTED_TAG);
315 memranges_each_entry(ptr, &clone_memrange)
317 resource_t expected_end;
319 assert_int_not_equal(range_entry_tag(ptr), RESERVED_TAG);
320 if (range_entry_tag(ptr) == INSERTED_TAG) {
321 assert_int_equal(
322 range_entry_base(ptr),
323 ALIGN_DOWN(res_mock[RESERVED_TAG].base, MEMRANGE_ALIGN));
325 expected_end = ALIGN_DOWN(res_mock[RESERVED_TAG].base, MEMRANGE_ALIGN)
326 + new_range_begin_offset + res_mock[RESERVED_TAG].size;
327 expected_end = ALIGN_UP(expected_end, MEMRANGE_ALIGN);
329 assert_int_equal(range_entry_end(ptr), expected_end);
331 counter++;
333 assert_int_equal(counter, 3);
335 /* Free clone */
336 memranges_teardown(&clone_memrange);
340 * This test verifies memranges_fill_holes_up_to() and memranges_create_hole(). Idea of the test
341 * is to fill all holes, so that we end up with contiguous address space fully covered by
342 * entries. Then, holes are created on the border of two different regions
344 * Example memory ranges (res_mock1) for test_memrange_holes.
345 * Space marked with (/) is not covered by any region at the end of the test.
347 * +--------CACHEABLE_TAG--------+ <-0xE000
348 * | |
349 * | |
350 * //|/////////////////////////////| <-0xFF000
351 * //+-----------HOLE_TAG----------+ <-0x100000
352 * //|/////////////////////////////| <-0x101000
353 * | |
354 * | |
355 * | |
356 * | |
357 * +--------RESERVED_TAG---------+ <-0x100000000
358 * | |
359 * +-----------------------------+ <-0x100001000
361 static void test_memrange_holes(void **state)
363 int counter = 0;
364 const unsigned long cacheable = IORESOURCE_CACHEABLE;
365 const unsigned long reserved = IORESOURCE_RESERVE;
366 struct range_entry *ptr;
367 struct range_entry *hole_ptr = NULL;
368 struct memranges test_memrange;
369 struct resource *res_mock = *state;
370 int holes_found = 0;
371 resource_t last_range_end = 0;
372 const resource_t holes_fill_end = res_mock[RESERVED_TAG].base;
374 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
375 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
377 /* Count holes in ranges */
378 memranges_each_entry(ptr, &test_memrange)
380 if (!last_range_end) {
381 last_range_end = range_entry_end(ptr);
382 continue;
386 if (range_entry_base(ptr) != last_range_end) {
387 holes_found++;
388 last_range_end = range_entry_end(ptr);
391 if (range_entry_base(ptr) >= holes_fill_end)
392 break;
395 /* Create range entries which covers continuous memory range
396 (but with different tags) */
397 memranges_fill_holes_up_to(&test_memrange, holes_fill_end, HOLE_TAG);
399 memranges_each_entry(ptr, &test_memrange)
401 if (range_entry_tag(ptr) == HOLE_TAG) {
402 assert_int_equal(range_entry_base(ptr),
403 ALIGN_UP(res_mock[CACHEABLE_TAG].base
404 + res_mock[CACHEABLE_TAG].size,
405 MEMRANGE_ALIGN));
406 assert_int_equal(range_entry_end(ptr), holes_fill_end);
407 /* Store pointer to HOLE_TAG region for future use */
408 hole_ptr = ptr;
410 counter++;
412 assert_int_equal(counter, 2 + holes_found);
414 /* If test data does not have any holes in it then terminate this test */
415 if (holes_found == 0)
416 return;
418 assert_non_null(hole_ptr);
419 counter = 0;
421 /* Create hole crossing the border of two range entries */
422 const resource_t new_cacheable_end = ALIGN_DOWN(
423 res_mock[CACHEABLE_TAG].base + res_mock[CACHEABLE_TAG].size - 4 * KiB,
424 MEMRANGE_ALIGN);
425 const resource_t new_hole_begin =
426 ALIGN_UP(range_entry_base(hole_ptr) + 4 * KiB, MEMRANGE_ALIGN);
427 const resource_t ranges_diff = new_hole_begin - new_cacheable_end;
429 memranges_create_hole(&test_memrange, new_cacheable_end, ranges_diff);
431 memranges_each_entry(ptr, &test_memrange)
433 switch (range_entry_tag(ptr)) {
434 case CACHEABLE_TAG:
435 assert_int_equal(range_entry_base(ptr), res_mock[CACHEABLE_TAG].base);
436 assert_int_equal(range_entry_end(ptr), new_cacheable_end);
437 break;
438 case RESERVED_TAG:
439 assert_int_equal(range_entry_base(ptr), res_mock[RESERVED_TAG].base);
440 assert_int_equal(range_entry_end(ptr),
441 res_mock[RESERVED_TAG].base
442 + res_mock[RESERVED_TAG].size);
443 break;
444 case HOLE_TAG:
445 assert_int_equal(range_entry_base(ptr), new_hole_begin);
446 assert_int_equal(range_entry_end(ptr), res_mock[RESERVED_TAG].base);
447 break;
448 default:
449 break;
451 counter++;
453 assert_int_equal(counter, 3);
455 memranges_teardown(&test_memrange);
459 * This test verifies memranges_steal() function. Simple check is done by attempt
460 * to steal some memory from the top of region with CACHEABLE_TAG and some from
461 * the bottom of region with READONLY_TAG.
463 * Example memory ranges (res_mock1) for test_memrange_steal.
464 * Space marked with (/) is stolen during the test.
466 * +--------CACHEABLE_TAG--------+ <-0xE000
467 * | |
468 * | |
469 * |/////////////////////////////| <-stolen_base
470 * +-----------------------------+ <-0x100000 <-stolen_base + 0x4000
474 * +--------READONLY_TAG---------+ <-0xFF0000 <-stolen_base
475 * |/////////////////////////////| <-stolen_base + 0x4000
476 * | |
477 * | |
478 * +-----------------------------+ <-0x1000000
481 * +--------RESERVED_TAG---------+ <-0x100000000
482 * | |
483 * +-----------------------------+ <-0x100001000
485 static void test_memrange_steal(void **state)
487 bool status = false;
488 resource_t stolen;
489 const unsigned long cacheable = IORESOURCE_CACHEABLE;
490 const unsigned long reserved = IORESOURCE_RESERVE;
491 const unsigned long readonly = IORESOURCE_READONLY;
492 const resource_t stolen_range_size = 0x4000;
493 struct memranges test_memrange;
494 struct resource *res_mock = *state;
495 struct range_entry *ptr;
496 size_t count = 0;
498 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
499 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
500 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
502 status = memranges_steal(&test_memrange,
503 res_mock[RESERVED_TAG].base + res_mock[RESERVED_TAG].size,
504 stolen_range_size, 12, CACHEABLE_TAG, &stolen, true);
505 assert_true(status);
506 assert_in_range(stolen, res_mock[CACHEABLE_TAG].base,
507 res_mock[CACHEABLE_TAG].base + res_mock[CACHEABLE_TAG].size);
508 status = memranges_steal(&test_memrange,
509 res_mock[RESERVED_TAG].base + res_mock[RESERVED_TAG].size,
510 stolen_range_size, 12, READONLY_TAG, &stolen, false);
511 assert_true(status);
512 assert_in_range(stolen, res_mock[READONLY_TAG].base,
513 res_mock[READONLY_TAG].base + res_mock[READONLY_TAG].size);
515 memranges_each_entry(ptr, &test_memrange)
517 if (range_entry_tag(ptr) == CACHEABLE_TAG) {
518 assert_int_equal(range_entry_end(ptr),
519 ALIGN_DOWN(ALIGN_UP(res_mock[CACHEABLE_TAG].base
520 + res_mock[CACHEABLE_TAG].size,
521 MEMRANGE_ALIGN)
522 - stolen_range_size,
523 MEMRANGE_ALIGN));
525 if (range_entry_tag(ptr) == READONLY_TAG) {
526 assert_int_equal(range_entry_base(ptr),
527 ALIGN_DOWN(res_mock[READONLY_TAG].base, MEMRANGE_ALIGN)
528 + stolen_range_size);
530 count++;
532 assert_int_equal(count, 3);
533 count = 0;
535 /* Check if inserting ranges in previously stolen areas will merge them. */
536 memranges_insert(&test_memrange,
537 res_mock[CACHEABLE_TAG].base + res_mock[CACHEABLE_TAG].size
538 - stolen_range_size - 0x12,
539 stolen_range_size, CACHEABLE_TAG);
540 memranges_insert(&test_memrange, res_mock[READONLY_TAG].base + 0xCC, stolen_range_size,
541 READONLY_TAG);
542 memranges_each_entry(ptr, &test_memrange)
544 const unsigned long tag = range_entry_tag(ptr);
545 assert_true(tag == CACHEABLE_TAG || tag == READONLY_TAG || tag == RESERVED_TAG);
546 assert_int_equal(
547 range_entry_base(ptr),
548 ALIGN_DOWN(res_mock[tag].base, MEMRANGE_ALIGN));
549 assert_int_equal(
550 range_entry_end(ptr),
551 ALIGN_UP(res_mock[tag].base + res_mock[tag].size, MEMRANGE_ALIGN));
552 count++;
554 assert_int_equal(count, 3);
555 count = 0;
557 memranges_teardown(&test_memrange);
560 /* Utility function checking number of entries and alignment of their base and end pointers */
561 static void check_range_entries_count_and_alignment(struct memranges *ranges,
562 size_t ranges_count, resource_t alignment)
564 size_t count = 0;
565 struct range_entry *ptr;
567 memranges_each_entry(ptr, ranges)
569 assert_true(IS_ALIGNED(range_entry_base(ptr), alignment));
570 assert_true(IS_ALIGNED(range_entry_end(ptr), alignment));
572 count++;
574 assert_int_equal(ranges_count, count);
577 /* This test verifies memranges_init*() and memranges_teardown() functions.
578 Added ranges are checked correct count and alignment. */
579 static void test_memrange_init_and_teardown(void **state)
581 const unsigned long cacheable = IORESOURCE_CACHEABLE;
582 const unsigned long reserved = IORESOURCE_RESERVE;
583 const unsigned long readonly = IORESOURCE_READONLY;
584 struct memranges test_memrange;
585 struct range_entry range_entries[4] = {0};
587 /* Test memranges_init() correctness */
588 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
589 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
590 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
592 /* Expect all entries to be aligned to 4KiB (2^12) */
593 check_range_entries_count_and_alignment(&test_memrange, 3, MEMRANGE_ALIGN);
595 /* Expect ranges list to be empty after teardown */
596 memranges_teardown(&test_memrange);
597 assert_true(memranges_is_empty(&test_memrange));
600 /* Test memranges_init_with_alignment() correctness with alignment of 1KiB (2^10) */
601 memranges_init_with_alignment(&test_memrange, cacheable, cacheable, CACHEABLE_TAG, 10);
602 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
603 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
605 check_range_entries_count_and_alignment(&test_memrange, 3, POWER_OF_2(10));
607 memranges_teardown(&test_memrange);
608 assert_true(memranges_is_empty(&test_memrange));
611 /* Test memranges_init_empty() correctness */
612 memranges_init_empty(&test_memrange, &range_entries[0], ARRAY_SIZE(range_entries));
613 assert_true(memranges_is_empty(&test_memrange));
615 memranges_add_resources(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
616 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
617 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
619 check_range_entries_count_and_alignment(&test_memrange, 3, MEMRANGE_ALIGN);
621 memranges_teardown(&test_memrange);
622 assert_true(memranges_is_empty(&test_memrange));
625 /* Test memranges_init_with_alignment() correctness with alignment of 8KiB (2^13) */
626 memranges_init_empty_with_alignment(&test_memrange, &range_entries[0],
627 ARRAY_SIZE(range_entries), 13);
628 assert_true(memranges_is_empty(&test_memrange));
630 memranges_add_resources(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
631 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
632 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
634 check_range_entries_count_and_alignment(&test_memrange, 3, POWER_OF_2(13));
636 memranges_teardown(&test_memrange);
637 assert_true(memranges_is_empty(&test_memrange));
640 /* Filter function accepting ranges having memory resource flag */
641 static int memrange_filter_mem_only(struct device *dev, struct resource *res)
643 /* Accept only memory resources */
644 return res->flags & IORESOURCE_MEM;
647 /* Filter function rejecting ranges having memory resource flag */
648 static int memrange_filter_non_mem(struct device *dev, struct resource *res)
650 /* Accept only memory resources */
651 return !(res->flags & IORESOURCE_MEM);
654 /* This test verifies memranges_add_resources_filter() function by providing filter functions
655 which accept or reject ranges. */
656 static void test_memrange_add_resources_filter(void **state)
658 const unsigned long cacheable = IORESOURCE_CACHEABLE;
659 const unsigned long reserved = IORESOURCE_RESERVE;
660 struct memranges test_memrange;
661 struct range_entry *ptr;
662 size_t count = 0;
663 size_t accepted_tags[] = {CACHEABLE_TAG, RESERVED_TAG};
665 /* Check if filter accepts range correctly */
666 memranges_init(&test_memrange, reserved, reserved, RESERVED_TAG);
667 memranges_add_resources_filter(&test_memrange, cacheable, cacheable, CACHEABLE_TAG,
668 memrange_filter_mem_only);
670 /* Check if filter accepted desired range. */
671 memranges_each_entry(ptr, &test_memrange)
673 assert_in_set(range_entry_tag(ptr), accepted_tags, ARRAY_SIZE(accepted_tags));
674 assert_true(IS_ALIGNED(range_entry_base(ptr), MEMRANGE_ALIGN));
675 assert_true(IS_ALIGNED(range_entry_end(ptr), MEMRANGE_ALIGN));
676 count++;
678 assert_int_equal(2, count);
679 count = 0;
680 memranges_teardown(&test_memrange);
682 /* Check if filter rejects range correctly */
683 memranges_init(&test_memrange, reserved, reserved, RESERVED_TAG);
684 memranges_add_resources_filter(&test_memrange, cacheable, cacheable, CACHEABLE_TAG,
685 memrange_filter_non_mem);
687 check_range_entries_count_and_alignment(&test_memrange, 1, MEMRANGE_ALIGN);
689 memranges_teardown(&test_memrange);
692 int main(void)
694 const struct CMUnitTest tests[] = {
695 cmocka_unit_test(test_memrange_basic),
696 cmocka_unit_test(test_memrange_clone_insert),
697 cmocka_unit_test(test_memrange_holes),
698 cmocka_unit_test(test_memrange_steal),
699 cmocka_unit_test(test_memrange_init_and_teardown),
700 cmocka_unit_test(test_memrange_add_resources_filter),
703 return cmocka_run_group_tests_name(__TEST_NAME__ "(Boundary on 4GiB)", tests,
704 setup_test_1, NULL)
705 + cmocka_run_group_tests_name(__TEST_NAME__ "(Boundaries 1 byte from 4GiB)",
706 tests, setup_test_2, NULL)
707 + cmocka_run_group_tests_name(__TEST_NAME__ "(Range over 4GiB boundary)", tests,
708 setup_test_3, NULL);