1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <tests/test.h>
5 #include <device/device.h>
6 #include <device/resource.h>
7 #include <commonlib/helpers.h>
10 #define MEMRANGE_ALIGN (POWER_OF_2(12))
13 /* Avoid using 0 to verify that UUT really sets this memory,
14 but keep value small, as this will be an index in the table */
23 /* Indices of entries matters, since it must reflect mem_types enum */
24 struct resource res_mock_1
[] = {
25 [CACHEABLE_TAG
] = {.base
= 0xE000,
27 .next
= &res_mock_1
[RESERVED_TAG
],
28 .flags
= IORESOURCE_CACHEABLE
| IORESOURCE_MEM
},
29 [RESERVED_TAG
] = {.base
= 4ULL * GiB
,
31 .next
= &res_mock_1
[READONLY_TAG
],
32 .flags
= IORESOURCE_RESERVE
| IORESOURCE_MEM
},
33 [READONLY_TAG
] = {.base
= 0xFF0000,
36 .flags
= IORESOURCE_READONLY
| IORESOURCE_MEM
}
39 /* Boundary 1 byte below 4GiB and 1 byte above 4GiB. */
40 struct resource res_mock_2
[] = {
41 [CACHEABLE_TAG
] = {.base
= 0x1000000,
42 .size
= 4ULL * GiB
- 0x1000001ULL
,
43 .next
= &res_mock_2
[RESERVED_TAG
],
44 .flags
= IORESOURCE_CACHEABLE
| IORESOURCE_MEM
},
45 [RESERVED_TAG
] = {.base
= 4ULL * GiB
+ 1ULL,
47 .next
= &res_mock_2
[READONLY_TAG
],
48 .flags
= IORESOURCE_RESERVE
| IORESOURCE_MEM
},
49 [READONLY_TAG
] = {.base
= 0,
52 .flags
= IORESOURCE_READONLY
| IORESOURCE_MEM
}
55 /* Boundary crossing 4GiB. */
56 struct resource res_mock_3
[] = {
57 [CACHEABLE_TAG
] = {.base
= 0xD000,
59 .next
= &res_mock_3
[RESERVED_TAG
],
60 .flags
= IORESOURCE_CACHEABLE
| IORESOURCE_MEM
},
61 [RESERVED_TAG
] = {.base
= 1ULL * GiB
,
63 .next
= &res_mock_3
[READONLY_TAG
],
64 .flags
= IORESOURCE_RESERVE
| IORESOURCE_MEM
},
65 [READONLY_TAG
] = {.base
= 0xFF0000,
68 .flags
= IORESOURCE_READONLY
| IORESOURCE_MEM
}
72 struct device mock_device
= {.enabled
= 1};
74 /* Fake memory devices handle */
75 struct device
*all_devices
= &mock_device
;
77 int setup_test_1(void **state
)
80 mock_device
.resource_list
= &res_mock_1
[CACHEABLE_TAG
];
85 int setup_test_2(void **state
)
88 mock_device
.resource_list
= &res_mock_2
[CACHEABLE_TAG
];
93 int setup_test_3(void **state
)
96 mock_device
.resource_list
= &res_mock_3
[CACHEABLE_TAG
];
101 resource_t
get_aligned_base(struct resource
*res
, struct range_entry
*entry
)
103 return ALIGN_DOWN(res
[range_entry_tag(entry
)].base
, MEMRANGE_ALIGN
);
106 resource_t
get_aligned_end(struct resource
*res
, struct range_entry
*entry
)
108 resource_t end
= res
[range_entry_tag(entry
)].base
+ res
[range_entry_tag(entry
)].size
109 + (res
[range_entry_tag(entry
)].base
- range_entry_base(entry
));
110 return ALIGN_UP(end
, MEMRANGE_ALIGN
);
114 * This test verifies memranges_init(), memranges_add_resources() and memranges_teardown()
115 * functions. It covers basic functionality of memrange library - implementation of creating
116 * memrange structure from resources available on the platform and method for free'ing
119 * Example memory ranges (res_mock1) for test_memrange_basic.
120 * Ranges marked with asterisks (***) are not added to the test_memrange.
122 * +--------CACHEABLE_TAG--------+ <-0xE000
126 * +-----------------------------+ <-0x100000
130 * +-----***READONLY_TAG***------+ <-0xFF0000
134 * +-----------------------------+ <-0x1000000
137 * +--------RESERVED_TAG---------+ <-0x100000000
139 * +-----------------------------+ <-0x100001000
141 static void test_memrange_basic(void **state
)
144 const unsigned long cacheable
= IORESOURCE_CACHEABLE
;
145 const unsigned long reserved
= IORESOURCE_RESERVE
;
146 struct range_entry
*ptr
;
147 struct memranges test_memrange
;
148 struct resource
*res_mock
= *state
;
149 resource_t prev_base
= 0;
151 memranges_init(&test_memrange
, cacheable
, cacheable
, CACHEABLE_TAG
);
152 memranges_add_resources(&test_memrange
, reserved
, reserved
, RESERVED_TAG
);
154 /* There should be two entries, since cacheable and
155 reserved regions are not neighbors */
156 memranges_each_entry(ptr
, &test_memrange
)
158 assert_in_range(range_entry_tag(ptr
), CACHEABLE_TAG
, RESERVED_TAG
);
159 assert_int_equal(range_entry_base(ptr
), get_aligned_base(res_mock
, ptr
));
161 assert_int_equal(range_entry_end(ptr
), get_aligned_end(res_mock
, ptr
));
163 /* Ranges have to be returned in increasing order */
164 assert_true(prev_base
<= range_entry_base(ptr
));
166 prev_base
= range_entry_base(ptr
);
169 assert_int_equal(counter
, 2);
172 /* Remove initial memrange */
173 memranges_teardown(&test_memrange
);
174 memranges_each_entry(ptr
, &test_memrange
) counter
++;
175 assert_int_equal(counter
, 0);
179 * This test verifies memranges_clone(), memranges_insert() and memranges_update_tag()
180 * functions. All operations are performed on cloned memrange. One of the most important thing
181 * to check, is that memrange_insert() should remove all ranges which are covered by the newly
184 * Example memory ranges (res_mock1) for test_memrange_clone_insert.
185 * Ranges marked with asterisks (***) are not added to the clone_memrange.
186 * Ranges marked with (^) have tag value changed during test.
188 * +--------CACHEABLE_TAG--------+ <-0xE000
189 * +------|----INSERTED_TAG----------+ | <-0xF000
190 * | | (^READONLY_TAG^) | |
192 * | +-----------------------------+ <-0x100000
193 * +---------------------------------+ <-0x101000
196 * +-----***READONLY_TAG***------+ <-0xFF0000
200 * +-----------------------------+ <-0x1000000
203 * +------+---------RESERVED_TAG-----+--+ <-0x100000000
205 * | +-----------------------------+ <-0x100001000
206 * +-----------INSERTED_TAG----------+ <-0x100002000
208 static void test_memrange_clone_insert(void **state
)
211 const unsigned long cacheable
= IORESOURCE_CACHEABLE
;
212 const unsigned long reserved
= IORESOURCE_RESERVE
;
213 struct range_entry
*ptr
;
214 struct memranges test_memrange
, clone_memrange
;
215 struct resource
*res_mock
= *state
;
216 const resource_t new_range_begin_offset
= 1ULL << 12;
218 memranges_init(&test_memrange
, cacheable
, cacheable
, CACHEABLE_TAG
);
219 memranges_add_resources(&test_memrange
, reserved
, reserved
, RESERVED_TAG
);
221 memranges_clone(&clone_memrange
, &test_memrange
);
222 memranges_teardown(&test_memrange
);
224 /* Verify that new one is really a clone */
225 memranges_each_entry(ptr
, &clone_memrange
)
227 assert_in_range(range_entry_tag(ptr
), CACHEABLE_TAG
, END_OF_RESOURCES
- 1);
228 assert_int_equal(range_entry_base(ptr
), get_aligned_base(res_mock
, ptr
));
230 assert_int_equal(range_entry_end(ptr
), get_aligned_end(res_mock
, ptr
));
234 assert_int_equal(counter
, 2);
237 /* Insert new range, which will overlap with first region. */
238 memranges_insert(&clone_memrange
, res_mock
[CACHEABLE_TAG
].base
+ new_range_begin_offset
,
239 res_mock
[CACHEABLE_TAG
].size
, INSERTED_TAG
);
241 /* Three ranges should be there - CACHEABLE(shrunk), INSERTED and RESERVED */
242 memranges_each_entry(ptr
, &clone_memrange
)
244 resource_t expected_end
;
246 if (range_entry_tag(ptr
) == CACHEABLE_TAG
) {
247 assert_int_equal(range_entry_base(ptr
), res_mock
[CACHEABLE_TAG
].base
);
249 expected_end
= res_mock
[CACHEABLE_TAG
].base
+ new_range_begin_offset
;
250 assert_int_equal(range_entry_end(ptr
), expected_end
);
252 if (range_entry_tag(ptr
) == INSERTED_TAG
) {
253 assert_int_equal(range_entry_base(ptr
),
254 res_mock
[CACHEABLE_TAG
].base
+ new_range_begin_offset
);
256 expected_end
= res_mock
[CACHEABLE_TAG
].base
+ new_range_begin_offset
257 + res_mock
[CACHEABLE_TAG
].size
;
258 assert_int_equal(range_entry_end(ptr
),
259 ALIGN_UP(expected_end
, MEMRANGE_ALIGN
));
263 assert_int_equal(counter
, 3);
266 /* Insert new region, which will shadow readonly range.
267 * Additionally verify API for updating tags */
268 memranges_update_tag(&clone_memrange
, INSERTED_TAG
, READONLY_TAG
);
270 memranges_each_entry(ptr
, &clone_memrange
)
272 resource_t expected_end
;
274 assert_int_not_equal(range_entry_tag(ptr
), INSERTED_TAG
);
275 if (range_entry_tag(ptr
) == READONLY_TAG
) {
276 assert_int_equal(range_entry_base(ptr
),
277 res_mock
[CACHEABLE_TAG
].base
+ new_range_begin_offset
);
279 expected_end
= res_mock
[CACHEABLE_TAG
].base
+ new_range_begin_offset
280 + res_mock
[CACHEABLE_TAG
].size
;
281 assert_int_equal(range_entry_end(ptr
),
282 ALIGN_UP(expected_end
, MEMRANGE_ALIGN
));
286 /* Check if alignment (4KiB) is properly applied, that is begin - DOWN and end - UP */
287 memranges_insert(&clone_memrange
, res_mock
[RESERVED_TAG
].base
+ 0xAD,
288 res_mock
[RESERVED_TAG
].size
, INSERTED_TAG
);
290 memranges_each_entry(ptr
, &clone_memrange
)
292 resource_t expected_end
;
294 assert_int_not_equal(range_entry_tag(ptr
), RESERVED_TAG
);
295 if (range_entry_tag(ptr
) == INSERTED_TAG
) {
297 range_entry_base(ptr
),
298 ALIGN_DOWN(res_mock
[RESERVED_TAG
].base
, MEMRANGE_ALIGN
));
300 expected_end
= ALIGN_DOWN(res_mock
[RESERVED_TAG
].base
, MEMRANGE_ALIGN
)
301 + new_range_begin_offset
+ res_mock
[RESERVED_TAG
].size
;
302 expected_end
= ALIGN_UP(expected_end
, MEMRANGE_ALIGN
);
304 assert_int_equal(range_entry_end(ptr
), expected_end
);
308 assert_int_equal(counter
, 3);
311 memranges_teardown(&clone_memrange
);
315 * This test verifies memranges_fill_holes_up_to() and memranges_create_hole(). Idea of the test
316 * is to fill all holes, so that we end up with contiguous address space fully covered by
317 * entries. Then, holes are created on the border of two different regions
319 * Example memory ranges (res_mock1) for test_memrange_holes.
320 * Space marked with (/) is not covered by any region at the end of the test.
322 * +--------CACHEABLE_TAG--------+ <-0xE000
325 * //|/////////////////////////////| <-0xFF000
326 * //+-----------HOLE_TAG----------+ <-0x100000
327 * //|/////////////////////////////| <-0x101000
332 * +--------RESERVED_TAG---------+ <-0x100000000
334 * +-----------------------------+ <-0x100001000
336 static void test_memrange_holes(void **state
)
339 const unsigned long cacheable
= IORESOURCE_CACHEABLE
;
340 const unsigned long reserved
= IORESOURCE_RESERVE
;
341 struct range_entry
*ptr
;
342 struct range_entry
*hole_ptr
= NULL
;
343 struct memranges test_memrange
;
344 struct resource
*res_mock
= *state
;
346 resource_t last_range_end
= 0;
347 const resource_t holes_fill_end
= res_mock
[RESERVED_TAG
].base
;
349 memranges_init(&test_memrange
, cacheable
, cacheable
, CACHEABLE_TAG
);
350 memranges_add_resources(&test_memrange
, reserved
, reserved
, RESERVED_TAG
);
352 /* Count holes in ranges */
353 memranges_each_entry(ptr
, &test_memrange
)
355 if (!last_range_end
) {
356 last_range_end
= range_entry_end(ptr
);
361 if (range_entry_base(ptr
) != last_range_end
) {
363 last_range_end
= range_entry_end(ptr
);
366 if (range_entry_base(ptr
) >= holes_fill_end
)
370 /* Create range entries which covers continuous memory range
371 (but with different tags) */
372 memranges_fill_holes_up_to(&test_memrange
, holes_fill_end
, HOLE_TAG
);
374 memranges_each_entry(ptr
, &test_memrange
)
376 if (range_entry_tag(ptr
) == HOLE_TAG
) {
377 assert_int_equal(range_entry_base(ptr
),
378 ALIGN_UP(res_mock
[CACHEABLE_TAG
].base
379 + res_mock
[CACHEABLE_TAG
].size
,
381 assert_int_equal(range_entry_end(ptr
), holes_fill_end
);
382 /* Store pointer to HOLE_TAG region for future use */
387 assert_int_equal(counter
, 2 + holes_found
);
389 /* If test data does not have any holes in it then terminate this test */
390 if (holes_found
== 0)
393 assert_non_null(hole_ptr
);
396 /* Create hole crossing the border of two range entries */
397 const resource_t new_cacheable_end
= ALIGN_DOWN(
398 res_mock
[CACHEABLE_TAG
].base
+ res_mock
[CACHEABLE_TAG
].size
- 4 * KiB
,
400 const resource_t new_hole_begin
=
401 ALIGN_UP(range_entry_base(hole_ptr
) + 4 * KiB
, MEMRANGE_ALIGN
);
402 const resource_t ranges_diff
= new_hole_begin
- new_cacheable_end
;
404 memranges_create_hole(&test_memrange
, new_cacheable_end
, ranges_diff
);
406 memranges_each_entry(ptr
, &test_memrange
)
408 switch (range_entry_tag(ptr
)) {
410 assert_int_equal(range_entry_base(ptr
), res_mock
[CACHEABLE_TAG
].base
);
411 assert_int_equal(range_entry_end(ptr
), new_cacheable_end
);
414 assert_int_equal(range_entry_base(ptr
), res_mock
[RESERVED_TAG
].base
);
415 assert_int_equal(range_entry_end(ptr
),
416 res_mock
[RESERVED_TAG
].base
417 + res_mock
[RESERVED_TAG
].size
);
420 assert_int_equal(range_entry_base(ptr
), new_hole_begin
);
421 assert_int_equal(range_entry_end(ptr
), res_mock
[RESERVED_TAG
].base
);
428 assert_int_equal(counter
, 3);
430 memranges_teardown(&test_memrange
);
434 * This test verifies memranges_steal() function. Simple check is done by attempt so steal some
435 * memory from region with READONLY_TAG.
437 * Example memory ranges (res_mock1) for test_memrange_steal.
438 * Space marked with (/) is not covered by any region at the end of the test.
440 * +--------CACHEABLE_TAG--------+ <-0xE000
444 * +-----------------------------+ <-0x100000
448 * +--------READONLY_TAG---------+ <-0xFF0000
450 * |/////////////////////////////| <-stolen_base
451 * |/////////////////////////////| <-stolen_base + 0x4000
452 * +-----------------------------+ <-0x1000000
455 * +--------RESERVED_TAG---------+ <-0x100000000
457 * +-----------------------------+ <-0x100001000
459 static void test_memrange_steal(void **state
)
463 const unsigned long cacheable
= IORESOURCE_CACHEABLE
;
464 const unsigned long reserved
= IORESOURCE_RESERVE
;
465 const unsigned long readonly
= IORESOURCE_READONLY
;
466 const resource_t stolen_range_size
= 0x4000;
467 struct memranges test_memrange
;
468 struct resource
*res_mock
= *state
;
469 struct range_entry
*ptr
;
472 memranges_init(&test_memrange
, cacheable
, cacheable
, CACHEABLE_TAG
);
473 memranges_add_resources(&test_memrange
, reserved
, reserved
, RESERVED_TAG
);
474 memranges_add_resources(&test_memrange
, readonly
, readonly
, READONLY_TAG
);
476 status
= memranges_steal(&test_memrange
,
477 res_mock
[RESERVED_TAG
].base
+ res_mock
[RESERVED_TAG
].size
,
478 stolen_range_size
, 12, READONLY_TAG
, &stolen
);
480 assert_in_range(stolen
, res_mock
[READONLY_TAG
].base
,
481 res_mock
[READONLY_TAG
].base
+ res_mock
[READONLY_TAG
].size
);
483 memranges_each_entry(ptr
, &test_memrange
)
485 if (range_entry_tag(ptr
) == READONLY_TAG
) {
486 assert_int_equal(range_entry_base(ptr
),
487 ALIGN_DOWN(res_mock
[READONLY_TAG
].base
, MEMRANGE_ALIGN
)
488 + stolen_range_size
);
492 assert_int_equal(count
, 3);
495 /* Check if inserting range in previously stolen area will merge it. */
496 memranges_insert(&test_memrange
, res_mock
[READONLY_TAG
].base
+ 0xCC, stolen_range_size
,
498 memranges_each_entry(ptr
, &test_memrange
)
500 if (range_entry_tag(ptr
) == READONLY_TAG
) {
502 range_entry_base(ptr
),
503 ALIGN_DOWN(res_mock
[READONLY_TAG
].base
, MEMRANGE_ALIGN
));
505 range_entry_end(ptr
),
506 ALIGN_UP(range_entry_base(ptr
) + res_mock
[READONLY_TAG
].size
,
511 assert_int_equal(count
, 3);
514 memranges_teardown(&test_memrange
);
517 /* Utility function checking number of entries and alignment of their base and end pointers */
518 static void check_range_entries_count_and_alignment(struct memranges
*ranges
,
519 size_t ranges_count
, resource_t alignment
)
522 struct range_entry
*ptr
;
524 memranges_each_entry(ptr
, ranges
)
526 assert_true(IS_ALIGNED(range_entry_base(ptr
), alignment
));
527 assert_true(IS_ALIGNED(range_entry_end(ptr
), alignment
));
531 assert_int_equal(ranges_count
, count
);
534 /* This test verifies memranges_init*() and memranges_teardown() functions.
535 Added ranges are checked correct count and alignment. */
536 static void test_memrange_init_and_teardown(void **state
)
538 const unsigned long cacheable
= IORESOURCE_CACHEABLE
;
539 const unsigned long reserved
= IORESOURCE_RESERVE
;
540 const unsigned long readonly
= IORESOURCE_READONLY
;
541 struct memranges test_memrange
;
542 struct range_entry range_entries
[4] = {0};
544 /* Test memranges_init() correctness */
545 memranges_init(&test_memrange
, cacheable
, cacheable
, CACHEABLE_TAG
);
546 memranges_add_resources(&test_memrange
, reserved
, reserved
, RESERVED_TAG
);
547 memranges_add_resources(&test_memrange
, readonly
, readonly
, READONLY_TAG
);
549 /* Expect all entries to be aligned to 4KiB (2^12) */
550 check_range_entries_count_and_alignment(&test_memrange
, 3, MEMRANGE_ALIGN
);
552 /* Expect ranges list to be empty after teardown */
553 memranges_teardown(&test_memrange
);
554 assert_true(memranges_is_empty(&test_memrange
));
557 /* Test memranges_init_with_alignment() correctness with alignment of 1KiB (2^10) */
558 memranges_init_with_alignment(&test_memrange
, cacheable
, cacheable
, CACHEABLE_TAG
, 10);
559 memranges_add_resources(&test_memrange
, reserved
, reserved
, RESERVED_TAG
);
560 memranges_add_resources(&test_memrange
, readonly
, readonly
, READONLY_TAG
);
562 check_range_entries_count_and_alignment(&test_memrange
, 3, POWER_OF_2(10));
564 memranges_teardown(&test_memrange
);
565 assert_true(memranges_is_empty(&test_memrange
));
568 /* Test memranges_init_empty() correctness */
569 memranges_init_empty(&test_memrange
, &range_entries
[0], ARRAY_SIZE(range_entries
));
570 assert_true(memranges_is_empty(&test_memrange
));
572 memranges_add_resources(&test_memrange
, cacheable
, cacheable
, CACHEABLE_TAG
);
573 memranges_add_resources(&test_memrange
, reserved
, reserved
, RESERVED_TAG
);
574 memranges_add_resources(&test_memrange
, readonly
, readonly
, READONLY_TAG
);
576 check_range_entries_count_and_alignment(&test_memrange
, 3, MEMRANGE_ALIGN
);
578 memranges_teardown(&test_memrange
);
579 assert_true(memranges_is_empty(&test_memrange
));
582 /* Test memranges_init_with_alignment() correctness with alignment of 8KiB (2^13) */
583 memranges_init_empty_with_alignment(&test_memrange
, &range_entries
[0],
584 ARRAY_SIZE(range_entries
), 13);
585 assert_true(memranges_is_empty(&test_memrange
));
587 memranges_add_resources(&test_memrange
, cacheable
, cacheable
, CACHEABLE_TAG
);
588 memranges_add_resources(&test_memrange
, reserved
, reserved
, RESERVED_TAG
);
589 memranges_add_resources(&test_memrange
, readonly
, readonly
, READONLY_TAG
);
591 check_range_entries_count_and_alignment(&test_memrange
, 3, POWER_OF_2(13));
593 memranges_teardown(&test_memrange
);
594 assert_true(memranges_is_empty(&test_memrange
));
597 /* Filter function accepting ranges having memory resource flag */
598 static int memrange_filter_mem_only(struct device
*dev
, struct resource
*res
)
600 /* Accept only memory resources */
601 return res
->flags
& IORESOURCE_MEM
;
604 /* Filter function rejecting ranges having memory resource flag */
605 static int memrange_filter_non_mem(struct device
*dev
, struct resource
*res
)
607 /* Accept only memory resources */
608 return !(res
->flags
& IORESOURCE_MEM
);
611 /* This test verifies memranges_add_resources_filter() function by providing filter functions
612 which accept or reject ranges. */
613 static void test_memrange_add_resources_filter(void **state
)
615 const unsigned long cacheable
= IORESOURCE_CACHEABLE
;
616 const unsigned long reserved
= IORESOURCE_RESERVE
;
617 struct memranges test_memrange
;
618 struct range_entry
*ptr
;
620 size_t accepted_tags
[] = {CACHEABLE_TAG
, RESERVED_TAG
};
622 /* Check if filter accepts range correctly */
623 memranges_init(&test_memrange
, reserved
, reserved
, RESERVED_TAG
);
624 memranges_add_resources_filter(&test_memrange
, cacheable
, cacheable
, CACHEABLE_TAG
,
625 memrange_filter_mem_only
);
627 /* Check if filter accepted desired range. */
628 memranges_each_entry(ptr
, &test_memrange
)
630 assert_in_set(range_entry_tag(ptr
), accepted_tags
, ARRAY_SIZE(accepted_tags
));
631 assert_true(IS_ALIGNED(range_entry_base(ptr
), MEMRANGE_ALIGN
));
632 assert_true(IS_ALIGNED(range_entry_end(ptr
), MEMRANGE_ALIGN
));
635 assert_int_equal(2, count
);
637 memranges_teardown(&test_memrange
);
639 /* Check if filter rejects range correctly */
640 memranges_init(&test_memrange
, reserved
, reserved
, RESERVED_TAG
);
641 memranges_add_resources_filter(&test_memrange
, cacheable
, cacheable
, CACHEABLE_TAG
,
642 memrange_filter_non_mem
);
644 check_range_entries_count_and_alignment(&test_memrange
, 1, MEMRANGE_ALIGN
);
646 memranges_teardown(&test_memrange
);
651 const struct CMUnitTest tests
[] = {
652 cmocka_unit_test(test_memrange_basic
),
653 cmocka_unit_test(test_memrange_clone_insert
),
654 cmocka_unit_test(test_memrange_holes
),
655 cmocka_unit_test(test_memrange_steal
),
656 cmocka_unit_test(test_memrange_init_and_teardown
),
657 cmocka_unit_test(test_memrange_add_resources_filter
),
660 return cmocka_run_group_tests_name(__TEST_NAME__
"(Boundary on 4GiB)", tests
,
662 + cmocka_run_group_tests_name(__TEST_NAME__
"(Boundaries 1 byte from 4GiB)",
663 tests
, setup_test_2
, NULL
)
664 + cmocka_run_group_tests_name(__TEST_NAME__
"(Range over 4GiB boundary)", tests
,