1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include "alloc_nid_api.h"
4 static int alloc_nid_test_flags
= TEST_F_NONE
;
7 * contains the fraction of MEM_SIZE contained in each node in basis point
8 * units (one hundredth of 1% or 1/10000)
10 static const unsigned int node_fractions
[] = {
21 static inline const char * const get_memblock_alloc_nid_name(int flags
)
23 if (flags
& TEST_F_EXACT
)
24 return "memblock_alloc_exact_nid_raw";
25 if (flags
& TEST_F_RAW
)
26 return "memblock_alloc_try_nid_raw";
27 return "memblock_alloc_try_nid";
30 static inline void *run_memblock_alloc_nid(phys_addr_t size
,
33 phys_addr_t max_addr
, int nid
)
35 assert(!(alloc_nid_test_flags
& TEST_F_EXACT
) ||
36 (alloc_nid_test_flags
& TEST_F_RAW
));
38 * TEST_F_EXACT should be checked before TEST_F_RAW since
39 * memblock_alloc_exact_nid_raw() performs raw allocations.
41 if (alloc_nid_test_flags
& TEST_F_EXACT
)
42 return memblock_alloc_exact_nid_raw(size
, align
, min_addr
,
44 if (alloc_nid_test_flags
& TEST_F_RAW
)
45 return memblock_alloc_try_nid_raw(size
, align
, min_addr
,
47 return memblock_alloc_try_nid(size
, align
, min_addr
, max_addr
, nid
);
51 * A simple test that tries to allocate a memory region within min_addr and
57 * +----+-------+-----------+------+
62 * Expect to allocate a region that ends at max_addr.
64 static int alloc_nid_top_down_simple_check(void)
66 struct memblock_region
*rgn
= &memblock
.reserved
.regions
[0];
67 void *allocated_ptr
= NULL
;
68 phys_addr_t size
= SZ_128
;
76 min_addr
= memblock_start_of_DRAM() + SMP_CACHE_BYTES
* 2;
77 max_addr
= min_addr
+ SZ_512
;
79 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
82 rgn_end
= rgn
->base
+ rgn
->size
;
84 ASSERT_NE(allocated_ptr
, NULL
);
85 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
87 ASSERT_EQ(rgn
->size
, size
);
88 ASSERT_EQ(rgn
->base
, max_addr
- size
);
89 ASSERT_EQ(rgn_end
, max_addr
);
91 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
92 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
100 * A simple test that tries to allocate a memory region within min_addr and
101 * max_addr range, where the end address is misaligned:
104 * | + +---------+ + |
106 * +------+-------+---------+--+----+
114 * Expect to allocate an aligned region that ends before max_addr.
116 static int alloc_nid_top_down_end_misaligned_check(void)
118 struct memblock_region
*rgn
= &memblock
.reserved
.regions
[0];
119 void *allocated_ptr
= NULL
;
120 phys_addr_t size
= SZ_128
;
121 phys_addr_t misalign
= SZ_2
;
122 phys_addr_t min_addr
;
123 phys_addr_t max_addr
;
129 min_addr
= memblock_start_of_DRAM() + SMP_CACHE_BYTES
* 2;
130 max_addr
= min_addr
+ SZ_512
+ misalign
;
132 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
135 rgn_end
= rgn
->base
+ rgn
->size
;
137 ASSERT_NE(allocated_ptr
, NULL
);
138 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
140 ASSERT_EQ(rgn
->size
, size
);
141 ASSERT_EQ(rgn
->base
, max_addr
- size
- misalign
);
142 ASSERT_LT(rgn_end
, max_addr
);
144 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
145 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
153 * A simple test that tries to allocate a memory region, which spans over the
154 * min_addr and max_addr range:
157 * | +---------------+ |
159 * +------+---------------+-------+
164 * Expect to allocate a region that starts at min_addr and ends at
165 * max_addr, given that min_addr is aligned.
167 static int alloc_nid_exact_address_generic_check(void)
169 struct memblock_region
*rgn
= &memblock
.reserved
.regions
[0];
170 void *allocated_ptr
= NULL
;
171 phys_addr_t size
= SZ_1K
;
172 phys_addr_t min_addr
;
173 phys_addr_t max_addr
;
179 min_addr
= memblock_start_of_DRAM() + SMP_CACHE_BYTES
;
180 max_addr
= min_addr
+ size
;
182 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
185 rgn_end
= rgn
->base
+ rgn
->size
;
187 ASSERT_NE(allocated_ptr
, NULL
);
188 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
190 ASSERT_EQ(rgn
->size
, size
);
191 ASSERT_EQ(rgn
->base
, min_addr
);
192 ASSERT_EQ(rgn_end
, max_addr
);
194 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
195 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
203 * A test that tries to allocate a memory region, which can't fit into
204 * min_addr and max_addr range:
207 * | +----------+-----+ |
209 * +--------+----------+-----+----+
216 * Expect to drop the lower limit and allocate a memory region which
217 * ends at max_addr (if the address is aligned).
219 static int alloc_nid_top_down_narrow_range_check(void)
221 struct memblock_region
*rgn
= &memblock
.reserved
.regions
[0];
222 void *allocated_ptr
= NULL
;
223 phys_addr_t size
= SZ_256
;
224 phys_addr_t min_addr
;
225 phys_addr_t max_addr
;
230 min_addr
= memblock_start_of_DRAM() + SZ_512
;
231 max_addr
= min_addr
+ SMP_CACHE_BYTES
;
233 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
237 ASSERT_NE(allocated_ptr
, NULL
);
238 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
240 ASSERT_EQ(rgn
->size
, size
);
241 ASSERT_EQ(rgn
->base
, max_addr
- size
);
243 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
244 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
252 * A test that tries to allocate a memory region, which can't fit into
253 * min_addr and max_addr range, with the latter being too close to the beginning
254 * of the available memory:
262 * +-------+--------------+
269 * Expect no allocation to happen.
271 static int alloc_nid_low_max_generic_check(void)
273 void *allocated_ptr
= NULL
;
274 phys_addr_t size
= SZ_1K
;
275 phys_addr_t min_addr
;
276 phys_addr_t max_addr
;
281 min_addr
= memblock_start_of_DRAM();
282 max_addr
= min_addr
+ SMP_CACHE_BYTES
;
284 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
288 ASSERT_EQ(allocated_ptr
, NULL
);
296 * A test that tries to allocate a memory region within min_addr min_addr range,
297 * with min_addr being so close that it's next to an allocated region:
300 * | +--------+---------------|
302 * +-------+--------+---------------+
307 * Expect a merge of both regions. Only the region size gets updated.
309 static int alloc_nid_min_reserved_generic_check(void)
311 struct memblock_region
*rgn
= &memblock
.reserved
.regions
[0];
312 void *allocated_ptr
= NULL
;
313 phys_addr_t r1_size
= SZ_128
;
314 phys_addr_t r2_size
= SZ_64
;
315 phys_addr_t total_size
= r1_size
+ r2_size
;
316 phys_addr_t min_addr
;
317 phys_addr_t max_addr
;
318 phys_addr_t reserved_base
;
323 max_addr
= memblock_end_of_DRAM();
324 min_addr
= max_addr
- r2_size
;
325 reserved_base
= min_addr
- r1_size
;
327 memblock_reserve(reserved_base
, r1_size
);
329 allocated_ptr
= run_memblock_alloc_nid(r2_size
, SMP_CACHE_BYTES
,
333 ASSERT_NE(allocated_ptr
, NULL
);
334 assert_mem_content(allocated_ptr
, r2_size
, alloc_nid_test_flags
);
336 ASSERT_EQ(rgn
->size
, total_size
);
337 ASSERT_EQ(rgn
->base
, reserved_base
);
339 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
340 ASSERT_EQ(memblock
.reserved
.total_size
, total_size
);
348 * A test that tries to allocate a memory region within min_addr and max_addr,
349 * with max_addr being so close that it's next to an allocated region:
352 * | +-------------+--------|
354 * +----------+-------------+--------+
359 * Expect a merge of regions. Only the region size gets updated.
361 static int alloc_nid_max_reserved_generic_check(void)
363 struct memblock_region
*rgn
= &memblock
.reserved
.regions
[0];
364 void *allocated_ptr
= NULL
;
365 phys_addr_t r1_size
= SZ_64
;
366 phys_addr_t r2_size
= SZ_128
;
367 phys_addr_t total_size
= r1_size
+ r2_size
;
368 phys_addr_t min_addr
;
369 phys_addr_t max_addr
;
374 max_addr
= memblock_end_of_DRAM() - r1_size
;
375 min_addr
= max_addr
- r2_size
;
377 memblock_reserve(max_addr
, r1_size
);
379 allocated_ptr
= run_memblock_alloc_nid(r2_size
, SMP_CACHE_BYTES
,
383 ASSERT_NE(allocated_ptr
, NULL
);
384 assert_mem_content(allocated_ptr
, r2_size
, alloc_nid_test_flags
);
386 ASSERT_EQ(rgn
->size
, total_size
);
387 ASSERT_EQ(rgn
->base
, min_addr
);
389 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
390 ASSERT_EQ(memblock
.reserved
.total_size
, total_size
);
398 * A test that tries to allocate memory within min_addr and max_add range, when
399 * there are two reserved regions at the borders, with a gap big enough to fit
403 * | +--------+ +-------+------+ |
404 * | | r2 | | rgn | r1 | |
405 * +----+--------+---+-------+------+--+
410 * Expect to merge the new region with r1. The second region does not get
411 * updated. The total size field gets updated.
414 static int alloc_nid_top_down_reserved_with_space_check(void)
416 struct memblock_region
*rgn1
= &memblock
.reserved
.regions
[1];
417 struct memblock_region
*rgn2
= &memblock
.reserved
.regions
[0];
418 void *allocated_ptr
= NULL
;
419 struct region r1
, r2
;
420 phys_addr_t r3_size
= SZ_64
;
421 phys_addr_t gap_size
= SMP_CACHE_BYTES
;
422 phys_addr_t total_size
;
423 phys_addr_t max_addr
;
424 phys_addr_t min_addr
;
429 r1
.base
= memblock_end_of_DRAM() - SMP_CACHE_BYTES
* 2;
430 r1
.size
= SMP_CACHE_BYTES
;
433 r2
.base
= r1
.base
- (r3_size
+ gap_size
+ r2
.size
);
435 total_size
= r1
.size
+ r2
.size
+ r3_size
;
436 min_addr
= r2
.base
+ r2
.size
;
439 memblock_reserve(r1
.base
, r1
.size
);
440 memblock_reserve(r2
.base
, r2
.size
);
442 allocated_ptr
= run_memblock_alloc_nid(r3_size
, SMP_CACHE_BYTES
,
446 ASSERT_NE(allocated_ptr
, NULL
);
447 assert_mem_content(allocated_ptr
, r3_size
, alloc_nid_test_flags
);
449 ASSERT_EQ(rgn1
->size
, r1
.size
+ r3_size
);
450 ASSERT_EQ(rgn1
->base
, max_addr
- r3_size
);
452 ASSERT_EQ(rgn2
->size
, r2
.size
);
453 ASSERT_EQ(rgn2
->base
, r2
.base
);
455 ASSERT_EQ(memblock
.reserved
.cnt
, 2);
456 ASSERT_EQ(memblock
.reserved
.total_size
, total_size
);
464 * A test that tries to allocate memory within min_addr and max_add range, when
465 * there are two reserved regions at the borders, with a gap of a size equal to
466 * the size of the new region:
469 * | +--------+--------+--------+ |
470 * | | r2 | r3 | r1 | |
471 * +-----+--------+--------+--------+-----+
476 * Expect to merge all of the regions into one. The region counter and total
477 * size fields get updated.
479 static int alloc_nid_reserved_full_merge_generic_check(void)
481 struct memblock_region
*rgn
= &memblock
.reserved
.regions
[0];
482 void *allocated_ptr
= NULL
;
483 struct region r1
, r2
;
484 phys_addr_t r3_size
= SZ_64
;
485 phys_addr_t total_size
;
486 phys_addr_t max_addr
;
487 phys_addr_t min_addr
;
492 r1
.base
= memblock_end_of_DRAM() - SMP_CACHE_BYTES
* 2;
493 r1
.size
= SMP_CACHE_BYTES
;
496 r2
.base
= r1
.base
- (r3_size
+ r2
.size
);
498 total_size
= r1
.size
+ r2
.size
+ r3_size
;
499 min_addr
= r2
.base
+ r2
.size
;
502 memblock_reserve(r1
.base
, r1
.size
);
503 memblock_reserve(r2
.base
, r2
.size
);
505 allocated_ptr
= run_memblock_alloc_nid(r3_size
, SMP_CACHE_BYTES
,
509 ASSERT_NE(allocated_ptr
, NULL
);
510 assert_mem_content(allocated_ptr
, r3_size
, alloc_nid_test_flags
);
512 ASSERT_EQ(rgn
->size
, total_size
);
513 ASSERT_EQ(rgn
->base
, r2
.base
);
515 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
516 ASSERT_EQ(memblock
.reserved
.total_size
, total_size
);
524 * A test that tries to allocate memory within min_addr and max_add range, when
525 * there are two reserved regions at the borders, with a gap that can't fit
529 * | +----------+------+ +------+ |
530 * | | r3 | r2 | | r1 | |
531 * +--+----------+------+----+------+---+
538 * Expect to merge the new region with r2. The second region does not get
539 * updated. The total size counter gets updated.
541 static int alloc_nid_top_down_reserved_no_space_check(void)
543 struct memblock_region
*rgn1
= &memblock
.reserved
.regions
[1];
544 struct memblock_region
*rgn2
= &memblock
.reserved
.regions
[0];
545 void *allocated_ptr
= NULL
;
546 struct region r1
, r2
;
547 phys_addr_t r3_size
= SZ_256
;
548 phys_addr_t gap_size
= SMP_CACHE_BYTES
;
549 phys_addr_t total_size
;
550 phys_addr_t max_addr
;
551 phys_addr_t min_addr
;
556 r1
.base
= memblock_end_of_DRAM() - SMP_CACHE_BYTES
* 2;
557 r1
.size
= SMP_CACHE_BYTES
;
560 r2
.base
= r1
.base
- (r2
.size
+ gap_size
);
562 total_size
= r1
.size
+ r2
.size
+ r3_size
;
563 min_addr
= r2
.base
+ r2
.size
;
566 memblock_reserve(r1
.base
, r1
.size
);
567 memblock_reserve(r2
.base
, r2
.size
);
569 allocated_ptr
= run_memblock_alloc_nid(r3_size
, SMP_CACHE_BYTES
,
573 ASSERT_NE(allocated_ptr
, NULL
);
574 assert_mem_content(allocated_ptr
, r3_size
, alloc_nid_test_flags
);
576 ASSERT_EQ(rgn1
->size
, r1
.size
);
577 ASSERT_EQ(rgn1
->base
, r1
.base
);
579 ASSERT_EQ(rgn2
->size
, r2
.size
+ r3_size
);
580 ASSERT_EQ(rgn2
->base
, r2
.base
- r3_size
);
582 ASSERT_EQ(memblock
.reserved
.cnt
, 2);
583 ASSERT_EQ(memblock
.reserved
.total_size
, total_size
);
591 * A test that tries to allocate memory within min_addr and max_add range, but
592 * it's too narrow and everything else is reserved:
598 * |--------------+ +----------|
600 * +--------------+------+----------+
607 * Expect no allocation to happen.
610 static int alloc_nid_reserved_all_generic_check(void)
612 void *allocated_ptr
= NULL
;
613 struct region r1
, r2
;
614 phys_addr_t r3_size
= SZ_256
;
615 phys_addr_t gap_size
= SMP_CACHE_BYTES
;
616 phys_addr_t max_addr
;
617 phys_addr_t min_addr
;
622 r1
.base
= memblock_end_of_DRAM() - SMP_CACHE_BYTES
;
623 r1
.size
= SMP_CACHE_BYTES
;
625 r2
.size
= MEM_SIZE
- (r1
.size
+ gap_size
);
626 r2
.base
= memblock_start_of_DRAM();
628 min_addr
= r2
.base
+ r2
.size
;
631 memblock_reserve(r1
.base
, r1
.size
);
632 memblock_reserve(r2
.base
, r2
.size
);
634 allocated_ptr
= run_memblock_alloc_nid(r3_size
, SMP_CACHE_BYTES
,
638 ASSERT_EQ(allocated_ptr
, NULL
);
646 * A test that tries to allocate a memory region, where max_addr is
647 * bigger than the end address of the available memory. Expect to allocate
648 * a region that ends before the end of the memory.
650 static int alloc_nid_top_down_cap_max_check(void)
652 struct memblock_region
*rgn
= &memblock
.reserved
.regions
[0];
653 void *allocated_ptr
= NULL
;
654 phys_addr_t size
= SZ_256
;
655 phys_addr_t min_addr
;
656 phys_addr_t max_addr
;
661 min_addr
= memblock_end_of_DRAM() - SZ_1K
;
662 max_addr
= memblock_end_of_DRAM() + SZ_256
;
664 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
668 ASSERT_NE(allocated_ptr
, NULL
);
669 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
671 ASSERT_EQ(rgn
->size
, size
);
672 ASSERT_EQ(rgn
->base
, memblock_end_of_DRAM() - size
);
674 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
675 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
683 * A test that tries to allocate a memory region, where min_addr is
684 * smaller than the start address of the available memory. Expect to allocate
685 * a region that ends before the end of the memory.
687 static int alloc_nid_top_down_cap_min_check(void)
689 struct memblock_region
*rgn
= &memblock
.reserved
.regions
[0];
690 void *allocated_ptr
= NULL
;
691 phys_addr_t size
= SZ_1K
;
692 phys_addr_t min_addr
;
693 phys_addr_t max_addr
;
698 min_addr
= memblock_start_of_DRAM() - SZ_256
;
699 max_addr
= memblock_end_of_DRAM();
701 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
705 ASSERT_NE(allocated_ptr
, NULL
);
706 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
708 ASSERT_EQ(rgn
->size
, size
);
709 ASSERT_EQ(rgn
->base
, memblock_end_of_DRAM() - size
);
711 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
712 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
720 * A simple test that tries to allocate a memory region within min_addr and
724 * | +-----------+ | |
726 * +----+-----------+-----------+------+
731 * Expect to allocate a region that ends before max_addr.
733 static int alloc_nid_bottom_up_simple_check(void)
735 struct memblock_region
*rgn
= &memblock
.reserved
.regions
[0];
736 void *allocated_ptr
= NULL
;
737 phys_addr_t size
= SZ_128
;
738 phys_addr_t min_addr
;
739 phys_addr_t max_addr
;
745 min_addr
= memblock_start_of_DRAM() + SMP_CACHE_BYTES
* 2;
746 max_addr
= min_addr
+ SZ_512
;
748 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
751 rgn_end
= rgn
->base
+ rgn
->size
;
753 ASSERT_NE(allocated_ptr
, NULL
);
754 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
756 ASSERT_EQ(rgn
->size
, size
);
757 ASSERT_EQ(rgn
->base
, min_addr
);
758 ASSERT_LT(rgn_end
, max_addr
);
760 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
761 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
769 * A simple test that tries to allocate a memory region within min_addr and
770 * max_addr range, where the start address is misaligned:
773 * | + +-----------+ + |
775 * +-----+---+-----------+-----+-----+
783 * Expect to allocate an aligned region that ends before max_addr.
785 static int alloc_nid_bottom_up_start_misaligned_check(void)
787 struct memblock_region
*rgn
= &memblock
.reserved
.regions
[0];
788 void *allocated_ptr
= NULL
;
789 phys_addr_t size
= SZ_128
;
790 phys_addr_t misalign
= SZ_2
;
791 phys_addr_t min_addr
;
792 phys_addr_t max_addr
;
798 min_addr
= memblock_start_of_DRAM() + misalign
;
799 max_addr
= min_addr
+ SZ_512
;
801 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
804 rgn_end
= rgn
->base
+ rgn
->size
;
806 ASSERT_NE(allocated_ptr
, NULL
);
807 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
809 ASSERT_EQ(rgn
->size
, size
);
810 ASSERT_EQ(rgn
->base
, min_addr
+ (SMP_CACHE_BYTES
- misalign
));
811 ASSERT_LT(rgn_end
, max_addr
);
813 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
814 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
822 * A test that tries to allocate a memory region, which can't fit into min_addr
823 * and max_addr range:
828 * +---------+---------+----+------+
835 * Expect to drop the lower limit and allocate a memory region which
836 * starts at the beginning of the available memory.
838 static int alloc_nid_bottom_up_narrow_range_check(void)
840 struct memblock_region
*rgn
= &memblock
.reserved
.regions
[0];
841 void *allocated_ptr
= NULL
;
842 phys_addr_t size
= SZ_256
;
843 phys_addr_t min_addr
;
844 phys_addr_t max_addr
;
849 min_addr
= memblock_start_of_DRAM() + SZ_512
;
850 max_addr
= min_addr
+ SMP_CACHE_BYTES
;
852 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
856 ASSERT_NE(allocated_ptr
, NULL
);
857 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
859 ASSERT_EQ(rgn
->size
, size
);
860 ASSERT_EQ(rgn
->base
, memblock_start_of_DRAM());
862 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
863 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
871 * A test that tries to allocate memory within min_addr and max_add range, when
872 * there are two reserved regions at the borders, with a gap big enough to fit
876 * | +--------+-------+ +------+ |
877 * | | r2 | rgn | | r1 | |
878 * +----+--------+-------+---+------+--+
883 * Expect to merge the new region with r2. The second region does not get
884 * updated. The total size field gets updated.
887 static int alloc_nid_bottom_up_reserved_with_space_check(void)
889 struct memblock_region
*rgn1
= &memblock
.reserved
.regions
[1];
890 struct memblock_region
*rgn2
= &memblock
.reserved
.regions
[0];
891 void *allocated_ptr
= NULL
;
892 struct region r1
, r2
;
893 phys_addr_t r3_size
= SZ_64
;
894 phys_addr_t gap_size
= SMP_CACHE_BYTES
;
895 phys_addr_t total_size
;
896 phys_addr_t max_addr
;
897 phys_addr_t min_addr
;
902 r1
.base
= memblock_end_of_DRAM() - SMP_CACHE_BYTES
* 2;
903 r1
.size
= SMP_CACHE_BYTES
;
906 r2
.base
= r1
.base
- (r3_size
+ gap_size
+ r2
.size
);
908 total_size
= r1
.size
+ r2
.size
+ r3_size
;
909 min_addr
= r2
.base
+ r2
.size
;
912 memblock_reserve(r1
.base
, r1
.size
);
913 memblock_reserve(r2
.base
, r2
.size
);
915 allocated_ptr
= run_memblock_alloc_nid(r3_size
, SMP_CACHE_BYTES
,
919 ASSERT_NE(allocated_ptr
, NULL
);
920 assert_mem_content(allocated_ptr
, r3_size
, alloc_nid_test_flags
);
922 ASSERT_EQ(rgn1
->size
, r1
.size
);
923 ASSERT_EQ(rgn1
->base
, max_addr
);
925 ASSERT_EQ(rgn2
->size
, r2
.size
+ r3_size
);
926 ASSERT_EQ(rgn2
->base
, r2
.base
);
928 ASSERT_EQ(memblock
.reserved
.cnt
, 2);
929 ASSERT_EQ(memblock
.reserved
.total_size
, total_size
);
937 * A test that tries to allocate memory within min_addr and max_add range, when
938 * there are two reserved regions at the borders, with a gap of a size equal to
939 * the size of the new region:
942 * |----------+ +------+ +----+ |
943 * | r3 | | r2 | | r1 | |
944 * +----------+----+------+---+----+--+
951 * Expect to drop the lower limit and allocate memory at the beginning of the
952 * available memory. The region counter and total size fields get updated.
953 * Other regions are not modified.
956 static int alloc_nid_bottom_up_reserved_no_space_check(void)
958 struct memblock_region
*rgn1
= &memblock
.reserved
.regions
[2];
959 struct memblock_region
*rgn2
= &memblock
.reserved
.regions
[1];
960 struct memblock_region
*rgn3
= &memblock
.reserved
.regions
[0];
961 void *allocated_ptr
= NULL
;
962 struct region r1
, r2
;
963 phys_addr_t r3_size
= SZ_256
;
964 phys_addr_t gap_size
= SMP_CACHE_BYTES
;
965 phys_addr_t total_size
;
966 phys_addr_t max_addr
;
967 phys_addr_t min_addr
;
972 r1
.base
= memblock_end_of_DRAM() - SMP_CACHE_BYTES
* 2;
973 r1
.size
= SMP_CACHE_BYTES
;
976 r2
.base
= r1
.base
- (r2
.size
+ gap_size
);
978 total_size
= r1
.size
+ r2
.size
+ r3_size
;
979 min_addr
= r2
.base
+ r2
.size
;
982 memblock_reserve(r1
.base
, r1
.size
);
983 memblock_reserve(r2
.base
, r2
.size
);
985 allocated_ptr
= run_memblock_alloc_nid(r3_size
, SMP_CACHE_BYTES
,
989 ASSERT_NE(allocated_ptr
, NULL
);
990 assert_mem_content(allocated_ptr
, r3_size
, alloc_nid_test_flags
);
992 ASSERT_EQ(rgn3
->size
, r3_size
);
993 ASSERT_EQ(rgn3
->base
, memblock_start_of_DRAM());
995 ASSERT_EQ(rgn2
->size
, r2
.size
);
996 ASSERT_EQ(rgn2
->base
, r2
.base
);
998 ASSERT_EQ(rgn1
->size
, r1
.size
);
999 ASSERT_EQ(rgn1
->base
, r1
.base
);
1001 ASSERT_EQ(memblock
.reserved
.cnt
, 3);
1002 ASSERT_EQ(memblock
.reserved
.total_size
, total_size
);
1010 * A test that tries to allocate a memory region, where max_addr is
1011 * bigger than the end address of the available memory. Expect to allocate
1012 * a region that starts at the min_addr.
1014 static int alloc_nid_bottom_up_cap_max_check(void)
1016 struct memblock_region
*rgn
= &memblock
.reserved
.regions
[0];
1017 void *allocated_ptr
= NULL
;
1018 phys_addr_t size
= SZ_256
;
1019 phys_addr_t min_addr
;
1020 phys_addr_t max_addr
;
1025 min_addr
= memblock_start_of_DRAM() + SZ_1K
;
1026 max_addr
= memblock_end_of_DRAM() + SZ_256
;
1028 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
1032 ASSERT_NE(allocated_ptr
, NULL
);
1033 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
1035 ASSERT_EQ(rgn
->size
, size
);
1036 ASSERT_EQ(rgn
->base
, min_addr
);
1038 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
1039 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
1047 * A test that tries to allocate a memory region, where min_addr is
1048 * smaller than the start address of the available memory. Expect to allocate
1049 * a region at the beginning of the available memory.
1051 static int alloc_nid_bottom_up_cap_min_check(void)
1053 struct memblock_region
*rgn
= &memblock
.reserved
.regions
[0];
1054 void *allocated_ptr
= NULL
;
1055 phys_addr_t size
= SZ_1K
;
1056 phys_addr_t min_addr
;
1057 phys_addr_t max_addr
;
1062 min_addr
= memblock_start_of_DRAM();
1063 max_addr
= memblock_end_of_DRAM() - SZ_256
;
1065 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
1069 ASSERT_NE(allocated_ptr
, NULL
);
1070 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
1072 ASSERT_EQ(rgn
->size
, size
);
1073 ASSERT_EQ(rgn
->base
, memblock_start_of_DRAM());
1075 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
1076 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
1083 /* Test case wrappers for range tests */
1084 static int alloc_nid_simple_check(void)
1086 test_print("\tRunning %s...\n", __func__
);
1087 memblock_set_bottom_up(false);
1088 alloc_nid_top_down_simple_check();
1089 memblock_set_bottom_up(true);
1090 alloc_nid_bottom_up_simple_check();
1095 static int alloc_nid_misaligned_check(void)
1097 test_print("\tRunning %s...\n", __func__
);
1098 memblock_set_bottom_up(false);
1099 alloc_nid_top_down_end_misaligned_check();
1100 memblock_set_bottom_up(true);
1101 alloc_nid_bottom_up_start_misaligned_check();
1106 static int alloc_nid_narrow_range_check(void)
1108 test_print("\tRunning %s...\n", __func__
);
1109 memblock_set_bottom_up(false);
1110 alloc_nid_top_down_narrow_range_check();
1111 memblock_set_bottom_up(true);
1112 alloc_nid_bottom_up_narrow_range_check();
1117 static int alloc_nid_reserved_with_space_check(void)
1119 test_print("\tRunning %s...\n", __func__
);
1120 memblock_set_bottom_up(false);
1121 alloc_nid_top_down_reserved_with_space_check();
1122 memblock_set_bottom_up(true);
1123 alloc_nid_bottom_up_reserved_with_space_check();
1128 static int alloc_nid_reserved_no_space_check(void)
1130 test_print("\tRunning %s...\n", __func__
);
1131 memblock_set_bottom_up(false);
1132 alloc_nid_top_down_reserved_no_space_check();
1133 memblock_set_bottom_up(true);
1134 alloc_nid_bottom_up_reserved_no_space_check();
1139 static int alloc_nid_cap_max_check(void)
1141 test_print("\tRunning %s...\n", __func__
);
1142 memblock_set_bottom_up(false);
1143 alloc_nid_top_down_cap_max_check();
1144 memblock_set_bottom_up(true);
1145 alloc_nid_bottom_up_cap_max_check();
1150 static int alloc_nid_cap_min_check(void)
1152 test_print("\tRunning %s...\n", __func__
);
1153 memblock_set_bottom_up(false);
1154 alloc_nid_top_down_cap_min_check();
1155 memblock_set_bottom_up(true);
1156 alloc_nid_bottom_up_cap_min_check();
1161 static int alloc_nid_min_reserved_check(void)
1163 test_print("\tRunning %s...\n", __func__
);
1164 run_top_down(alloc_nid_min_reserved_generic_check
);
1165 run_bottom_up(alloc_nid_min_reserved_generic_check
);
1170 static int alloc_nid_max_reserved_check(void)
1172 test_print("\tRunning %s...\n", __func__
);
1173 run_top_down(alloc_nid_max_reserved_generic_check
);
1174 run_bottom_up(alloc_nid_max_reserved_generic_check
);
1179 static int alloc_nid_exact_address_check(void)
1181 test_print("\tRunning %s...\n", __func__
);
1182 run_top_down(alloc_nid_exact_address_generic_check
);
1183 run_bottom_up(alloc_nid_exact_address_generic_check
);
1188 static int alloc_nid_reserved_full_merge_check(void)
1190 test_print("\tRunning %s...\n", __func__
);
1191 run_top_down(alloc_nid_reserved_full_merge_generic_check
);
1192 run_bottom_up(alloc_nid_reserved_full_merge_generic_check
);
1197 static int alloc_nid_reserved_all_check(void)
1199 test_print("\tRunning %s...\n", __func__
);
1200 run_top_down(alloc_nid_reserved_all_generic_check
);
1201 run_bottom_up(alloc_nid_reserved_all_generic_check
);
1206 static int alloc_nid_low_max_check(void)
1208 test_print("\tRunning %s...\n", __func__
);
1209 run_top_down(alloc_nid_low_max_generic_check
);
1210 run_bottom_up(alloc_nid_low_max_generic_check
);
1215 static int memblock_alloc_nid_range_checks(void)
1217 test_print("Running %s range tests...\n",
1218 get_memblock_alloc_nid_name(alloc_nid_test_flags
));
1220 alloc_nid_simple_check();
1221 alloc_nid_misaligned_check();
1222 alloc_nid_narrow_range_check();
1223 alloc_nid_reserved_with_space_check();
1224 alloc_nid_reserved_no_space_check();
1225 alloc_nid_cap_max_check();
1226 alloc_nid_cap_min_check();
1228 alloc_nid_min_reserved_check();
1229 alloc_nid_max_reserved_check();
1230 alloc_nid_exact_address_check();
1231 alloc_nid_reserved_full_merge_check();
1232 alloc_nid_reserved_all_check();
1233 alloc_nid_low_max_check();
1239 * A test that tries to allocate a memory region in a specific NUMA node that
1240 * has enough memory to allocate a region of the requested size.
1241 * Expect to allocate an aligned region at the end of the requested node.
1243 static int alloc_nid_top_down_numa_simple_check(void)
1246 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[0];
1247 struct memblock_region
*req_node
= &memblock
.memory
.regions
[nid_req
];
1248 void *allocated_ptr
= NULL
;
1250 phys_addr_t min_addr
;
1251 phys_addr_t max_addr
;
1254 setup_numa_memblock(node_fractions
);
1256 ASSERT_LE(SZ_4
, req_node
->size
);
1257 size
= req_node
->size
/ SZ_4
;
1258 min_addr
= memblock_start_of_DRAM();
1259 max_addr
= memblock_end_of_DRAM();
1261 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
1262 min_addr
, max_addr
, nid_req
);
1264 ASSERT_NE(allocated_ptr
, NULL
);
1265 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
1267 ASSERT_EQ(new_rgn
->size
, size
);
1268 ASSERT_EQ(new_rgn
->base
, region_end(req_node
) - size
);
1269 ASSERT_LE(req_node
->base
, new_rgn
->base
);
1271 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
1272 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
1280 * A test that tries to allocate a memory region in a specific NUMA node that
1281 * does not have enough memory to allocate a region of the requested size:
1283 * | +-----+ +------------------+ |
1284 * | | req | | expected | |
1285 * +---+-----+----------+------------------+-----+
1289 * +-----------------------------+---------+-----+
1291 * Expect to allocate an aligned region at the end of the last node that has
1292 * enough memory (in this case, nid = 6) after falling back to NUMA_NO_NODE.
1294 static int alloc_nid_top_down_numa_small_node_check(void)
1298 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[0];
1299 struct memblock_region
*req_node
= &memblock
.memory
.regions
[nid_req
];
1300 struct memblock_region
*exp_node
= &memblock
.memory
.regions
[nid_exp
];
1301 void *allocated_ptr
= NULL
;
1303 phys_addr_t min_addr
;
1304 phys_addr_t max_addr
;
1307 setup_numa_memblock(node_fractions
);
1309 size
= SZ_2
* req_node
->size
;
1310 min_addr
= memblock_start_of_DRAM();
1311 max_addr
= memblock_end_of_DRAM();
1313 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
1314 min_addr
, max_addr
, nid_req
);
1316 ASSERT_NE(allocated_ptr
, NULL
);
1317 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
1319 ASSERT_EQ(new_rgn
->size
, size
);
1320 ASSERT_EQ(new_rgn
->base
, region_end(exp_node
) - size
);
1321 ASSERT_LE(exp_node
->base
, new_rgn
->base
);
1323 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
1324 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
1332 * A test that tries to allocate a memory region in a specific NUMA node that
1333 * is fully reserved:
1335 * | +---------+ +------------------+ |
1336 * | |requested| | expected | |
1337 * +--------------+---------+------------+------------------+-----+
1339 * | +---------+ +---------+ |
1340 * | | reserved| | new | |
1341 * +--------------+---------+---------------------+---------+-----+
1343 * Expect to allocate an aligned region at the end of the last node that is
1344 * large enough and has enough unreserved memory (in this case, nid = 6) after
1345 * falling back to NUMA_NO_NODE. The region count and total size get updated.
1347 static int alloc_nid_top_down_numa_node_reserved_check(void)
1351 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[1];
1352 struct memblock_region
*req_node
= &memblock
.memory
.regions
[nid_req
];
1353 struct memblock_region
*exp_node
= &memblock
.memory
.regions
[nid_exp
];
1354 void *allocated_ptr
= NULL
;
1356 phys_addr_t min_addr
;
1357 phys_addr_t max_addr
;
1360 setup_numa_memblock(node_fractions
);
1362 size
= req_node
->size
;
1363 min_addr
= memblock_start_of_DRAM();
1364 max_addr
= memblock_end_of_DRAM();
1366 memblock_reserve(req_node
->base
, req_node
->size
);
1367 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
1368 min_addr
, max_addr
, nid_req
);
1370 ASSERT_NE(allocated_ptr
, NULL
);
1371 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
1373 ASSERT_EQ(new_rgn
->size
, size
);
1374 ASSERT_EQ(new_rgn
->base
, region_end(exp_node
) - size
);
1375 ASSERT_LE(exp_node
->base
, new_rgn
->base
);
1377 ASSERT_EQ(memblock
.reserved
.cnt
, 2);
1378 ASSERT_EQ(memblock
.reserved
.total_size
, size
+ req_node
->size
);
1386 * A test that tries to allocate a memory region in a specific NUMA node that
1387 * is partially reserved but has enough memory for the allocated region:
1389 * | +---------------------------------------+ |
1391 * +-----------+---------------------------------------+----------+
1393 * | +------------------+ +-----+ |
1394 * | | reserved | | new | |
1395 * +-----------+------------------+--------------+-----+----------+
1397 * Expect to allocate an aligned region at the end of the requested node. The
1398 * region count and total size get updated.
1400 static int alloc_nid_top_down_numa_part_reserved_check(void)
1403 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[1];
1404 struct memblock_region
*req_node
= &memblock
.memory
.regions
[nid_req
];
1405 void *allocated_ptr
= NULL
;
1408 phys_addr_t min_addr
;
1409 phys_addr_t max_addr
;
1412 setup_numa_memblock(node_fractions
);
1414 ASSERT_LE(SZ_8
, req_node
->size
);
1415 r1
.base
= req_node
->base
;
1416 r1
.size
= req_node
->size
/ SZ_2
;
1417 size
= r1
.size
/ SZ_4
;
1418 min_addr
= memblock_start_of_DRAM();
1419 max_addr
= memblock_end_of_DRAM();
1421 memblock_reserve(r1
.base
, r1
.size
);
1422 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
1423 min_addr
, max_addr
, nid_req
);
1425 ASSERT_NE(allocated_ptr
, NULL
);
1426 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
1428 ASSERT_EQ(new_rgn
->size
, size
);
1429 ASSERT_EQ(new_rgn
->base
, region_end(req_node
) - size
);
1430 ASSERT_LE(req_node
->base
, new_rgn
->base
);
1432 ASSERT_EQ(memblock
.reserved
.cnt
, 2);
1433 ASSERT_EQ(memblock
.reserved
.total_size
, size
+ r1
.size
);
1441 * A test that tries to allocate a memory region in a specific NUMA node that
1442 * is partially reserved and does not have enough contiguous memory for the
1445 * | +-----------------------+ +----------------------|
1446 * | | requested | | expected |
1447 * +-----------+-----------------------+---------+----------------------+
1449 * | +----------+ +-----------|
1450 * | | reserved | | new |
1451 * +-----------------+----------+---------------------------+-----------+
1453 * Expect to allocate an aligned region at the end of the last node that is
1454 * large enough and has enough unreserved memory (in this case,
1455 * nid = NUMA_NODES - 1) after falling back to NUMA_NO_NODE. The region count
1456 * and total size get updated.
1458 static int alloc_nid_top_down_numa_part_reserved_fallback_check(void)
1461 int nid_exp
= NUMA_NODES
- 1;
1462 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[1];
1463 struct memblock_region
*req_node
= &memblock
.memory
.regions
[nid_req
];
1464 struct memblock_region
*exp_node
= &memblock
.memory
.regions
[nid_exp
];
1465 void *allocated_ptr
= NULL
;
1468 phys_addr_t min_addr
;
1469 phys_addr_t max_addr
;
1472 setup_numa_memblock(node_fractions
);
1474 ASSERT_LE(SZ_4
, req_node
->size
);
1475 size
= req_node
->size
/ SZ_2
;
1476 r1
.base
= req_node
->base
+ (size
/ SZ_2
);
1479 min_addr
= memblock_start_of_DRAM();
1480 max_addr
= memblock_end_of_DRAM();
1482 memblock_reserve(r1
.base
, r1
.size
);
1483 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
1484 min_addr
, max_addr
, nid_req
);
1486 ASSERT_NE(allocated_ptr
, NULL
);
1487 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
1489 ASSERT_EQ(new_rgn
->size
, size
);
1490 ASSERT_EQ(new_rgn
->base
, region_end(exp_node
) - size
);
1491 ASSERT_LE(exp_node
->base
, new_rgn
->base
);
1493 ASSERT_EQ(memblock
.reserved
.cnt
, 2);
1494 ASSERT_EQ(memblock
.reserved
.total_size
, size
+ r1
.size
);
1502 * A test that tries to allocate a memory region that spans over the min_addr
1503 * and max_addr range and overlaps with two different nodes, where the first
1504 * node is the requested node:
1510 * | +-----------------------+-----------+ |
1511 * | | requested | node3 | |
1512 * +-----------+-----------------------+-----------+--------------+
1516 * +-----------------------+-----------+--------------------------+
1518 * Expect to drop the lower limit and allocate a memory region that ends at
1519 * the end of the requested node.
1521 static int alloc_nid_top_down_numa_split_range_low_check(void)
1524 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[0];
1525 struct memblock_region
*req_node
= &memblock
.memory
.regions
[nid_req
];
1526 void *allocated_ptr
= NULL
;
1527 phys_addr_t size
= SZ_512
;
1528 phys_addr_t min_addr
;
1529 phys_addr_t max_addr
;
1530 phys_addr_t req_node_end
;
1533 setup_numa_memblock(node_fractions
);
1535 req_node_end
= region_end(req_node
);
1536 min_addr
= req_node_end
- SZ_256
;
1537 max_addr
= min_addr
+ size
;
1539 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
1540 min_addr
, max_addr
, nid_req
);
1542 ASSERT_NE(allocated_ptr
, NULL
);
1543 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
1545 ASSERT_EQ(new_rgn
->size
, size
);
1546 ASSERT_EQ(new_rgn
->base
, req_node_end
- size
);
1547 ASSERT_LE(req_node
->base
, new_rgn
->base
);
1549 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
1550 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
1558 * A test that tries to allocate a memory region that spans over the min_addr
1559 * and max_addr range and overlaps with two different nodes, where the second
1560 * node is the requested node:
1566 * | +--------------------------+---------+ |
1567 * | | expected |requested| |
1568 * +------+--------------------------+---------+----------------+
1572 * +-----------------------+---------+--------------------------+
1574 * Expect to drop the lower limit and allocate a memory region that
1575 * ends at the end of the first node that overlaps with the range.
1577 static int alloc_nid_top_down_numa_split_range_high_check(void)
1580 int nid_exp
= nid_req
- 1;
1581 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[0];
1582 struct memblock_region
*exp_node
= &memblock
.memory
.regions
[nid_exp
];
1583 void *allocated_ptr
= NULL
;
1584 phys_addr_t size
= SZ_512
;
1585 phys_addr_t min_addr
;
1586 phys_addr_t max_addr
;
1587 phys_addr_t exp_node_end
;
1590 setup_numa_memblock(node_fractions
);
1592 exp_node_end
= region_end(exp_node
);
1593 min_addr
= exp_node_end
- SZ_256
;
1594 max_addr
= min_addr
+ size
;
1596 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
1597 min_addr
, max_addr
, nid_req
);
1599 ASSERT_NE(allocated_ptr
, NULL
);
1600 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
1602 ASSERT_EQ(new_rgn
->size
, size
);
1603 ASSERT_EQ(new_rgn
->base
, exp_node_end
- size
);
1604 ASSERT_LE(exp_node
->base
, new_rgn
->base
);
1606 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
1607 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
1615 * A test that tries to allocate a memory region that spans over the min_addr
1616 * and max_addr range and overlaps with two different nodes, where the requested
1617 * node ends before min_addr:
1623 * | +---------------+ +-------------+---------+ |
1624 * | | requested | | node1 | node2 | |
1625 * +----+---------------+--------+-------------+---------+----------+
1629 * +----------+---------+-------------------------------------------+
1631 * Expect to drop the lower limit and allocate a memory region that ends at
1632 * the end of the requested node.
1634 static int alloc_nid_top_down_numa_no_overlap_split_check(void)
1637 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[0];
1638 struct memblock_region
*req_node
= &memblock
.memory
.regions
[nid_req
];
1639 struct memblock_region
*node2
= &memblock
.memory
.regions
[6];
1640 void *allocated_ptr
= NULL
;
1642 phys_addr_t min_addr
;
1643 phys_addr_t max_addr
;
1646 setup_numa_memblock(node_fractions
);
1649 min_addr
= node2
->base
- SZ_256
;
1650 max_addr
= min_addr
+ size
;
1652 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
1653 min_addr
, max_addr
, nid_req
);
1655 ASSERT_NE(allocated_ptr
, NULL
);
1656 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
1658 ASSERT_EQ(new_rgn
->size
, size
);
1659 ASSERT_EQ(new_rgn
->base
, region_end(req_node
) - size
);
1660 ASSERT_LE(req_node
->base
, new_rgn
->base
);
1662 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
1663 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
1671 * A test that tries to allocate memory within min_addr and max_add range when
1672 * the requested node and the range do not overlap, and requested node ends
1673 * before min_addr. The range overlaps with multiple nodes along node
1680 * |-----------+ +----------+----...----+----------+ |
1681 * | requested | | min node | ... | max node | |
1682 * +-----------+-----------+----------+----...----+----------+------+
1686 * +---------------------------------------------------+-----+------+
1688 * Expect to allocate a memory region at the end of the final node in
1689 * the range after falling back to NUMA_NO_NODE.
1691 static int alloc_nid_top_down_numa_no_overlap_low_check(void)
1694 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[0];
1695 struct memblock_region
*min_node
= &memblock
.memory
.regions
[2];
1696 struct memblock_region
*max_node
= &memblock
.memory
.regions
[5];
1697 void *allocated_ptr
= NULL
;
1698 phys_addr_t size
= SZ_64
;
1699 phys_addr_t max_addr
;
1700 phys_addr_t min_addr
;
1703 setup_numa_memblock(node_fractions
);
1705 min_addr
= min_node
->base
;
1706 max_addr
= region_end(max_node
);
1708 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
1709 min_addr
, max_addr
, nid_req
);
1711 ASSERT_NE(allocated_ptr
, NULL
);
1712 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
1714 ASSERT_EQ(new_rgn
->size
, size
);
1715 ASSERT_EQ(new_rgn
->base
, max_addr
- size
);
1716 ASSERT_LE(max_node
->base
, new_rgn
->base
);
1718 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
1719 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
1727 * A test that tries to allocate memory within min_addr and max_add range when
1728 * the requested node and the range do not overlap, and requested node starts
1729 * after max_addr. The range overlaps with multiple nodes along node
1736 * | +----------+----...----+----------+ +-----------+ |
1737 * | | min node | ... | max node | | requested | |
1738 * +-----+----------+----...----+----------+--------+-----------+---+
1742 * +---------------------------------+-----+------------------------+
1744 * Expect to allocate a memory region at the end of the final node in
1745 * the range after falling back to NUMA_NO_NODE.
1747 static int alloc_nid_top_down_numa_no_overlap_high_check(void)
1750 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[0];
1751 struct memblock_region
*min_node
= &memblock
.memory
.regions
[2];
1752 struct memblock_region
*max_node
= &memblock
.memory
.regions
[5];
1753 void *allocated_ptr
= NULL
;
1754 phys_addr_t size
= SZ_64
;
1755 phys_addr_t max_addr
;
1756 phys_addr_t min_addr
;
1759 setup_numa_memblock(node_fractions
);
1761 min_addr
= min_node
->base
;
1762 max_addr
= region_end(max_node
);
1764 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
1765 min_addr
, max_addr
, nid_req
);
1767 ASSERT_NE(allocated_ptr
, NULL
);
1768 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
1770 ASSERT_EQ(new_rgn
->size
, size
);
1771 ASSERT_EQ(new_rgn
->base
, max_addr
- size
);
1772 ASSERT_LE(max_node
->base
, new_rgn
->base
);
1774 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
1775 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
1783 * A test that tries to allocate a memory region in a specific NUMA node that
1784 * has enough memory to allocate a region of the requested size.
1785 * Expect to allocate an aligned region at the beginning of the requested node.
1787 static int alloc_nid_bottom_up_numa_simple_check(void)
1790 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[0];
1791 struct memblock_region
*req_node
= &memblock
.memory
.regions
[nid_req
];
1792 void *allocated_ptr
= NULL
;
1794 phys_addr_t min_addr
;
1795 phys_addr_t max_addr
;
1798 setup_numa_memblock(node_fractions
);
1800 ASSERT_LE(SZ_4
, req_node
->size
);
1801 size
= req_node
->size
/ SZ_4
;
1802 min_addr
= memblock_start_of_DRAM();
1803 max_addr
= memblock_end_of_DRAM();
1805 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
1806 min_addr
, max_addr
, nid_req
);
1808 ASSERT_NE(allocated_ptr
, NULL
);
1809 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
1811 ASSERT_EQ(new_rgn
->size
, size
);
1812 ASSERT_EQ(new_rgn
->base
, req_node
->base
);
1813 ASSERT_LE(region_end(new_rgn
), region_end(req_node
));
1815 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
1816 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
1824 * A test that tries to allocate a memory region in a specific NUMA node that
1825 * does not have enough memory to allocate a region of the requested size:
1827 * |----------------------+-----+ |
1828 * | expected | req | |
1829 * +----------------------+-----+----------------+
1833 * +---------+-----------------------------------+
1835 * Expect to allocate an aligned region at the beginning of the first node that
1836 * has enough memory (in this case, nid = 0) after falling back to NUMA_NO_NODE.
1838 static int alloc_nid_bottom_up_numa_small_node_check(void)
1842 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[0];
1843 struct memblock_region
*req_node
= &memblock
.memory
.regions
[nid_req
];
1844 struct memblock_region
*exp_node
= &memblock
.memory
.regions
[nid_exp
];
1845 void *allocated_ptr
= NULL
;
1847 phys_addr_t min_addr
;
1848 phys_addr_t max_addr
;
1851 setup_numa_memblock(node_fractions
);
1853 size
= SZ_2
* req_node
->size
;
1854 min_addr
= memblock_start_of_DRAM();
1855 max_addr
= memblock_end_of_DRAM();
1857 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
1858 min_addr
, max_addr
, nid_req
);
1860 ASSERT_NE(allocated_ptr
, NULL
);
1861 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
1863 ASSERT_EQ(new_rgn
->size
, size
);
1864 ASSERT_EQ(new_rgn
->base
, exp_node
->base
);
1865 ASSERT_LE(region_end(new_rgn
), region_end(exp_node
));
1867 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
1868 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
1876 * A test that tries to allocate a memory region in a specific NUMA node that
1877 * is fully reserved:
1879 * |----------------------+ +-----------+ |
1880 * | expected | | requested | |
1881 * +----------------------+-----+-----------+--------------------+
1883 * |-----------+ +-----------+ |
1884 * | new | | reserved | |
1885 * +-----------+----------------+-----------+--------------------+
1887 * Expect to allocate an aligned region at the beginning of the first node that
1888 * is large enough and has enough unreserved memory (in this case, nid = 0)
1889 * after falling back to NUMA_NO_NODE. The region count and total size get
1892 static int alloc_nid_bottom_up_numa_node_reserved_check(void)
1896 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[0];
1897 struct memblock_region
*req_node
= &memblock
.memory
.regions
[nid_req
];
1898 struct memblock_region
*exp_node
= &memblock
.memory
.regions
[nid_exp
];
1899 void *allocated_ptr
= NULL
;
1901 phys_addr_t min_addr
;
1902 phys_addr_t max_addr
;
1905 setup_numa_memblock(node_fractions
);
1907 size
= req_node
->size
;
1908 min_addr
= memblock_start_of_DRAM();
1909 max_addr
= memblock_end_of_DRAM();
1911 memblock_reserve(req_node
->base
, req_node
->size
);
1912 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
1913 min_addr
, max_addr
, nid_req
);
1915 ASSERT_NE(allocated_ptr
, NULL
);
1916 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
1918 ASSERT_EQ(new_rgn
->size
, size
);
1919 ASSERT_EQ(new_rgn
->base
, exp_node
->base
);
1920 ASSERT_LE(region_end(new_rgn
), region_end(exp_node
));
1922 ASSERT_EQ(memblock
.reserved
.cnt
, 2);
1923 ASSERT_EQ(memblock
.reserved
.total_size
, size
+ req_node
->size
);
1931 * A test that tries to allocate a memory region in a specific NUMA node that
1932 * is partially reserved but has enough memory for the allocated region:
1934 * | +---------------------------------------+ |
1936 * +-----------+---------------------------------------+---------+
1938 * | +------------------+-----+ |
1939 * | | reserved | new | |
1940 * +-----------+------------------+-----+------------------------+
1942 * Expect to allocate an aligned region in the requested node that merges with
1943 * the existing reserved region. The total size gets updated.
1945 static int alloc_nid_bottom_up_numa_part_reserved_check(void)
1948 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[0];
1949 struct memblock_region
*req_node
= &memblock
.memory
.regions
[nid_req
];
1950 void *allocated_ptr
= NULL
;
1953 phys_addr_t min_addr
;
1954 phys_addr_t max_addr
;
1955 phys_addr_t total_size
;
1958 setup_numa_memblock(node_fractions
);
1960 ASSERT_LE(SZ_8
, req_node
->size
);
1961 r1
.base
= req_node
->base
;
1962 r1
.size
= req_node
->size
/ SZ_2
;
1963 size
= r1
.size
/ SZ_4
;
1964 min_addr
= memblock_start_of_DRAM();
1965 max_addr
= memblock_end_of_DRAM();
1966 total_size
= size
+ r1
.size
;
1968 memblock_reserve(r1
.base
, r1
.size
);
1969 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
1970 min_addr
, max_addr
, nid_req
);
1972 ASSERT_NE(allocated_ptr
, NULL
);
1973 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
1975 ASSERT_EQ(new_rgn
->size
, total_size
);
1976 ASSERT_EQ(new_rgn
->base
, req_node
->base
);
1977 ASSERT_LE(region_end(new_rgn
), region_end(req_node
));
1979 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
1980 ASSERT_EQ(memblock
.reserved
.total_size
, total_size
);
1988 * A test that tries to allocate a memory region in a specific NUMA node that
1989 * is partially reserved and does not have enough contiguous memory for the
1992 * |----------------------+ +-----------------------+ |
1993 * | expected | | requested | |
1994 * +----------------------+-------+-----------------------+---------+
1996 * |-----------+ +----------+ |
1997 * | new | | reserved | |
1998 * +-----------+------------------------+----------+----------------+
2000 * Expect to allocate an aligned region at the beginning of the first
2001 * node that is large enough and has enough unreserved memory (in this case,
2002 * nid = 0) after falling back to NUMA_NO_NODE. The region count and total size
2005 static int alloc_nid_bottom_up_numa_part_reserved_fallback_check(void)
2009 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[0];
2010 struct memblock_region
*req_node
= &memblock
.memory
.regions
[nid_req
];
2011 struct memblock_region
*exp_node
= &memblock
.memory
.regions
[nid_exp
];
2012 void *allocated_ptr
= NULL
;
2015 phys_addr_t min_addr
;
2016 phys_addr_t max_addr
;
2019 setup_numa_memblock(node_fractions
);
2021 ASSERT_LE(SZ_4
, req_node
->size
);
2022 size
= req_node
->size
/ SZ_2
;
2023 r1
.base
= req_node
->base
+ (size
/ SZ_2
);
2026 min_addr
= memblock_start_of_DRAM();
2027 max_addr
= memblock_end_of_DRAM();
2029 memblock_reserve(r1
.base
, r1
.size
);
2030 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
2031 min_addr
, max_addr
, nid_req
);
2033 ASSERT_NE(allocated_ptr
, NULL
);
2034 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
2036 ASSERT_EQ(new_rgn
->size
, size
);
2037 ASSERT_EQ(new_rgn
->base
, exp_node
->base
);
2038 ASSERT_LE(region_end(new_rgn
), region_end(exp_node
));
2040 ASSERT_EQ(memblock
.reserved
.cnt
, 2);
2041 ASSERT_EQ(memblock
.reserved
.total_size
, size
+ r1
.size
);
2049 * A test that tries to allocate a memory region that spans over the min_addr
2050 * and max_addr range and overlaps with two different nodes, where the first
2051 * node is the requested node:
2057 * | +-----------------------+-----------+ |
2058 * | | requested | node3 | |
2059 * +-----------+-----------------------+-----------+--------------+
2063 * +-----------+-----------+--------------------------------------+
2065 * Expect to drop the lower limit and allocate a memory region at the beginning
2066 * of the requested node.
2068 static int alloc_nid_bottom_up_numa_split_range_low_check(void)
2071 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[0];
2072 struct memblock_region
*req_node
= &memblock
.memory
.regions
[nid_req
];
2073 void *allocated_ptr
= NULL
;
2074 phys_addr_t size
= SZ_512
;
2075 phys_addr_t min_addr
;
2076 phys_addr_t max_addr
;
2077 phys_addr_t req_node_end
;
2080 setup_numa_memblock(node_fractions
);
2082 req_node_end
= region_end(req_node
);
2083 min_addr
= req_node_end
- SZ_256
;
2084 max_addr
= min_addr
+ size
;
2086 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
2087 min_addr
, max_addr
, nid_req
);
2089 ASSERT_NE(allocated_ptr
, NULL
);
2090 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
2092 ASSERT_EQ(new_rgn
->size
, size
);
2093 ASSERT_EQ(new_rgn
->base
, req_node
->base
);
2094 ASSERT_LE(region_end(new_rgn
), req_node_end
);
2096 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
2097 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
2105 * A test that tries to allocate a memory region that spans over the min_addr
2106 * and max_addr range and overlaps with two different nodes, where the second
2107 * node is the requested node:
2113 * |------------------+ +----------------------+---------+ |
2114 * | expected | | previous |requested| |
2115 * +------------------+--------+----------------------+---------+------+
2119 * +---------+---------------------------------------------------------+
2121 * Expect to drop the lower limit and allocate a memory region at the beginning
2122 * of the first node that has enough memory.
2124 static int alloc_nid_bottom_up_numa_split_range_high_check(void)
2128 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[0];
2129 struct memblock_region
*req_node
= &memblock
.memory
.regions
[nid_req
];
2130 struct memblock_region
*exp_node
= &memblock
.memory
.regions
[nid_exp
];
2131 void *allocated_ptr
= NULL
;
2132 phys_addr_t size
= SZ_512
;
2133 phys_addr_t min_addr
;
2134 phys_addr_t max_addr
;
2135 phys_addr_t exp_node_end
;
2138 setup_numa_memblock(node_fractions
);
2140 exp_node_end
= region_end(req_node
);
2141 min_addr
= req_node
->base
- SZ_256
;
2142 max_addr
= min_addr
+ size
;
2144 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
2145 min_addr
, max_addr
, nid_req
);
2147 ASSERT_NE(allocated_ptr
, NULL
);
2148 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
2150 ASSERT_EQ(new_rgn
->size
, size
);
2151 ASSERT_EQ(new_rgn
->base
, exp_node
->base
);
2152 ASSERT_LE(region_end(new_rgn
), exp_node_end
);
2154 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
2155 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
2163 * A test that tries to allocate a memory region that spans over the min_addr
2164 * and max_addr range and overlaps with two different nodes, where the requested
2165 * node ends before min_addr:
2171 * | +---------------+ +-------------+---------+ |
2172 * | | requested | | node1 | node2 | |
2173 * +----+---------------+--------+-------------+---------+---------+
2177 * +----+---------+------------------------------------------------+
2179 * Expect to drop the lower limit and allocate a memory region that starts at
2180 * the beginning of the requested node.
2182 static int alloc_nid_bottom_up_numa_no_overlap_split_check(void)
2185 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[0];
2186 struct memblock_region
*req_node
= &memblock
.memory
.regions
[nid_req
];
2187 struct memblock_region
*node2
= &memblock
.memory
.regions
[6];
2188 void *allocated_ptr
= NULL
;
2190 phys_addr_t min_addr
;
2191 phys_addr_t max_addr
;
2194 setup_numa_memblock(node_fractions
);
2197 min_addr
= node2
->base
- SZ_256
;
2198 max_addr
= min_addr
+ size
;
2200 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
2201 min_addr
, max_addr
, nid_req
);
2203 ASSERT_NE(allocated_ptr
, NULL
);
2204 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
2206 ASSERT_EQ(new_rgn
->size
, size
);
2207 ASSERT_EQ(new_rgn
->base
, req_node
->base
);
2208 ASSERT_LE(region_end(new_rgn
), region_end(req_node
));
2210 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
2211 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
2219 * A test that tries to allocate memory within min_addr and max_add range when
2220 * the requested node and the range do not overlap, and requested node ends
2221 * before min_addr. The range overlaps with multiple nodes along node
2228 * |-----------+ +----------+----...----+----------+ |
2229 * | requested | | min node | ... | max node | |
2230 * +-----------+-----------+----------+----...----+----------+------+
2234 * +-----------------------+-----+----------------------------------+
2236 * Expect to allocate a memory region at the beginning of the first node
2237 * in the range after falling back to NUMA_NO_NODE.
2239 static int alloc_nid_bottom_up_numa_no_overlap_low_check(void)
2242 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[0];
2243 struct memblock_region
*min_node
= &memblock
.memory
.regions
[2];
2244 struct memblock_region
*max_node
= &memblock
.memory
.regions
[5];
2245 void *allocated_ptr
= NULL
;
2246 phys_addr_t size
= SZ_64
;
2247 phys_addr_t max_addr
;
2248 phys_addr_t min_addr
;
2251 setup_numa_memblock(node_fractions
);
2253 min_addr
= min_node
->base
;
2254 max_addr
= region_end(max_node
);
2256 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
2257 min_addr
, max_addr
, nid_req
);
2259 ASSERT_NE(allocated_ptr
, NULL
);
2260 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
2262 ASSERT_EQ(new_rgn
->size
, size
);
2263 ASSERT_EQ(new_rgn
->base
, min_addr
);
2264 ASSERT_LE(region_end(new_rgn
), region_end(min_node
));
2266 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
2267 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
2275 * A test that tries to allocate memory within min_addr and max_add range when
2276 * the requested node and the range do not overlap, and requested node starts
2277 * after max_addr. The range overlaps with multiple nodes along node
2284 * | +----------+----...----+----------+ +---------+ |
2285 * | | min node | ... | max node | |requested| |
2286 * +-----+----------+----...----+----------+---------+---------+---+
2290 * +-----+-----+---------------------------------------------------+
2292 * Expect to allocate a memory region at the beginning of the first node
2293 * in the range after falling back to NUMA_NO_NODE.
2295 static int alloc_nid_bottom_up_numa_no_overlap_high_check(void)
2298 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[0];
2299 struct memblock_region
*min_node
= &memblock
.memory
.regions
[2];
2300 struct memblock_region
*max_node
= &memblock
.memory
.regions
[5];
2301 void *allocated_ptr
= NULL
;
2302 phys_addr_t size
= SZ_64
;
2303 phys_addr_t max_addr
;
2304 phys_addr_t min_addr
;
2307 setup_numa_memblock(node_fractions
);
2309 min_addr
= min_node
->base
;
2310 max_addr
= region_end(max_node
);
2312 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
2313 min_addr
, max_addr
, nid_req
);
2315 ASSERT_NE(allocated_ptr
, NULL
);
2316 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
2318 ASSERT_EQ(new_rgn
->size
, size
);
2319 ASSERT_EQ(new_rgn
->base
, min_addr
);
2320 ASSERT_LE(region_end(new_rgn
), region_end(min_node
));
2322 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
2323 ASSERT_EQ(memblock
.reserved
.total_size
, size
);
2331 * A test that tries to allocate a memory region in a specific NUMA node that
2332 * does not have enough memory to allocate a region of the requested size.
2333 * Additionally, none of the nodes have enough memory to allocate the region:
2335 * +-----------------------------------+
2337 * +-----------------------------------+
2338 * |-------+-------+-------+-------+-------+-------+-------+-------|
2339 * | node0 | node1 | node2 | node3 | node4 | node5 | node6 | node7 |
2340 * +-------+-------+-------+-------+-------+-------+-------+-------+
2342 * Expect no allocation to happen.
2344 static int alloc_nid_numa_large_region_generic_check(void)
2347 void *allocated_ptr
= NULL
;
2348 phys_addr_t size
= MEM_SIZE
/ SZ_2
;
2349 phys_addr_t min_addr
;
2350 phys_addr_t max_addr
;
2353 setup_numa_memblock(node_fractions
);
2355 min_addr
= memblock_start_of_DRAM();
2356 max_addr
= memblock_end_of_DRAM();
2358 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
2359 min_addr
, max_addr
, nid_req
);
2360 ASSERT_EQ(allocated_ptr
, NULL
);
2368 * A test that tries to allocate memory within min_addr and max_addr range when
2369 * there are two reserved regions at the borders. The requested node starts at
2370 * min_addr and ends at max_addr and is the same size as the region to be
2377 * | +-----------+-----------------------+-----------------------|
2378 * | | node5 | requested | node7 |
2379 * +------+-----------+-----------------------+-----------------------+
2381 * | +----+-----------------------+----+ |
2382 * | | r2 | new | r1 | |
2383 * +-------------+----+-----------------------+----+------------------+
2385 * Expect to merge all of the regions into one. The region counter and total
2386 * size fields get updated.
2388 static int alloc_nid_numa_reserved_full_merge_generic_check(void)
2391 int nid_next
= nid_req
+ 1;
2392 struct memblock_region
*new_rgn
= &memblock
.reserved
.regions
[0];
2393 struct memblock_region
*req_node
= &memblock
.memory
.regions
[nid_req
];
2394 struct memblock_region
*next_node
= &memblock
.memory
.regions
[nid_next
];
2395 void *allocated_ptr
= NULL
;
2396 struct region r1
, r2
;
2397 phys_addr_t size
= req_node
->size
;
2398 phys_addr_t total_size
;
2399 phys_addr_t max_addr
;
2400 phys_addr_t min_addr
;
2403 setup_numa_memblock(node_fractions
);
2405 r1
.base
= next_node
->base
;
2409 r2
.base
= r1
.base
- (size
+ r2
.size
);
2411 total_size
= r1
.size
+ r2
.size
+ size
;
2412 min_addr
= r2
.base
+ r2
.size
;
2415 memblock_reserve(r1
.base
, r1
.size
);
2416 memblock_reserve(r2
.base
, r2
.size
);
2418 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
2419 min_addr
, max_addr
, nid_req
);
2421 ASSERT_NE(allocated_ptr
, NULL
);
2422 assert_mem_content(allocated_ptr
, size
, alloc_nid_test_flags
);
2424 ASSERT_EQ(new_rgn
->size
, total_size
);
2425 ASSERT_EQ(new_rgn
->base
, r2
.base
);
2427 ASSERT_LE(new_rgn
->base
, req_node
->base
);
2428 ASSERT_LE(region_end(req_node
), region_end(new_rgn
));
2430 ASSERT_EQ(memblock
.reserved
.cnt
, 1);
2431 ASSERT_EQ(memblock
.reserved
.total_size
, total_size
);
2439 * A test that tries to allocate memory within min_addr and max_add range,
2440 * where the total range can fit the region, but it is split between two nodes
2441 * and everything else is reserved. Additionally, nid is set to NUMA_NO_NODE
2442 * instead of requesting a specific node:
2447 * | +---------------------+-----------|
2448 * | | prev node | next node |
2449 * +------+---------------------+-----------+
2451 * |----------------------+ +-----|
2453 * +----------------------+-----------+-----+
2460 * Expect no allocation to happen.
2462 static int alloc_nid_numa_split_all_reserved_generic_check(void)
2464 void *allocated_ptr
= NULL
;
2465 struct memblock_region
*next_node
= &memblock
.memory
.regions
[7];
2466 struct region r1
, r2
;
2467 phys_addr_t size
= SZ_256
;
2468 phys_addr_t max_addr
;
2469 phys_addr_t min_addr
;
2472 setup_numa_memblock(node_fractions
);
2474 r2
.base
= next_node
->base
+ SZ_128
;
2475 r2
.size
= memblock_end_of_DRAM() - r2
.base
;
2477 r1
.size
= MEM_SIZE
- (r2
.size
+ size
);
2478 r1
.base
= memblock_start_of_DRAM();
2480 min_addr
= r1
.base
+ r1
.size
;
2483 memblock_reserve(r1
.base
, r1
.size
);
2484 memblock_reserve(r2
.base
, r2
.size
);
2486 allocated_ptr
= run_memblock_alloc_nid(size
, SMP_CACHE_BYTES
,
2490 ASSERT_EQ(allocated_ptr
, NULL
);
2498 * A simple test that tries to allocate a memory region through the
2499 * memblock_alloc_node() on a NUMA node with id `nid`. Expected to have the
2500 * correct NUMA node set for the new region.
2502 static int alloc_node_on_correct_nid(void)
2505 void *allocated_ptr
= NULL
;
2507 struct memblock_region
*req_node
= &memblock
.memory
.regions
[nid_req
];
2509 phys_addr_t size
= SZ_512
;
2512 setup_numa_memblock(node_fractions
);
2514 allocated_ptr
= memblock_alloc_node(size
, SMP_CACHE_BYTES
, nid_req
);
2516 ASSERT_NE(allocated_ptr
, NULL
);
2518 ASSERT_EQ(nid_req
, req_node
->nid
);
2526 /* Test case wrappers for NUMA tests */
2527 static int alloc_nid_numa_simple_check(void)
2529 test_print("\tRunning %s...\n", __func__
);
2530 memblock_set_bottom_up(false);
2531 alloc_nid_top_down_numa_simple_check();
2532 memblock_set_bottom_up(true);
2533 alloc_nid_bottom_up_numa_simple_check();
2538 static int alloc_nid_numa_small_node_check(void)
2540 test_print("\tRunning %s...\n", __func__
);
2541 memblock_set_bottom_up(false);
2542 alloc_nid_top_down_numa_small_node_check();
2543 memblock_set_bottom_up(true);
2544 alloc_nid_bottom_up_numa_small_node_check();
2549 static int alloc_nid_numa_node_reserved_check(void)
2551 test_print("\tRunning %s...\n", __func__
);
2552 memblock_set_bottom_up(false);
2553 alloc_nid_top_down_numa_node_reserved_check();
2554 memblock_set_bottom_up(true);
2555 alloc_nid_bottom_up_numa_node_reserved_check();
2560 static int alloc_nid_numa_part_reserved_check(void)
2562 test_print("\tRunning %s...\n", __func__
);
2563 memblock_set_bottom_up(false);
2564 alloc_nid_top_down_numa_part_reserved_check();
2565 memblock_set_bottom_up(true);
2566 alloc_nid_bottom_up_numa_part_reserved_check();
2571 static int alloc_nid_numa_part_reserved_fallback_check(void)
2573 test_print("\tRunning %s...\n", __func__
);
2574 memblock_set_bottom_up(false);
2575 alloc_nid_top_down_numa_part_reserved_fallback_check();
2576 memblock_set_bottom_up(true);
2577 alloc_nid_bottom_up_numa_part_reserved_fallback_check();
2582 static int alloc_nid_numa_split_range_low_check(void)
2584 test_print("\tRunning %s...\n", __func__
);
2585 memblock_set_bottom_up(false);
2586 alloc_nid_top_down_numa_split_range_low_check();
2587 memblock_set_bottom_up(true);
2588 alloc_nid_bottom_up_numa_split_range_low_check();
2593 static int alloc_nid_numa_split_range_high_check(void)
2595 test_print("\tRunning %s...\n", __func__
);
2596 memblock_set_bottom_up(false);
2597 alloc_nid_top_down_numa_split_range_high_check();
2598 memblock_set_bottom_up(true);
2599 alloc_nid_bottom_up_numa_split_range_high_check();
2604 static int alloc_nid_numa_no_overlap_split_check(void)
2606 test_print("\tRunning %s...\n", __func__
);
2607 memblock_set_bottom_up(false);
2608 alloc_nid_top_down_numa_no_overlap_split_check();
2609 memblock_set_bottom_up(true);
2610 alloc_nid_bottom_up_numa_no_overlap_split_check();
2615 static int alloc_nid_numa_no_overlap_low_check(void)
2617 test_print("\tRunning %s...\n", __func__
);
2618 memblock_set_bottom_up(false);
2619 alloc_nid_top_down_numa_no_overlap_low_check();
2620 memblock_set_bottom_up(true);
2621 alloc_nid_bottom_up_numa_no_overlap_low_check();
2626 static int alloc_nid_numa_no_overlap_high_check(void)
2628 test_print("\tRunning %s...\n", __func__
);
2629 memblock_set_bottom_up(false);
2630 alloc_nid_top_down_numa_no_overlap_high_check();
2631 memblock_set_bottom_up(true);
2632 alloc_nid_bottom_up_numa_no_overlap_high_check();
2637 static int alloc_nid_numa_large_region_check(void)
2639 test_print("\tRunning %s...\n", __func__
);
2640 run_top_down(alloc_nid_numa_large_region_generic_check
);
2641 run_bottom_up(alloc_nid_numa_large_region_generic_check
);
2646 static int alloc_nid_numa_reserved_full_merge_check(void)
2648 test_print("\tRunning %s...\n", __func__
);
2649 run_top_down(alloc_nid_numa_reserved_full_merge_generic_check
);
2650 run_bottom_up(alloc_nid_numa_reserved_full_merge_generic_check
);
2655 static int alloc_nid_numa_split_all_reserved_check(void)
2657 test_print("\tRunning %s...\n", __func__
);
2658 run_top_down(alloc_nid_numa_split_all_reserved_generic_check
);
2659 run_bottom_up(alloc_nid_numa_split_all_reserved_generic_check
);
2664 static int alloc_node_numa_on_correct_nid(void)
2666 test_print("\tRunning %s...\n", __func__
);
2667 run_top_down(alloc_node_on_correct_nid
);
2668 run_bottom_up(alloc_node_on_correct_nid
);
2673 int __memblock_alloc_nid_numa_checks(void)
2675 test_print("Running %s NUMA tests...\n",
2676 get_memblock_alloc_nid_name(alloc_nid_test_flags
));
2678 alloc_nid_numa_simple_check();
2679 alloc_nid_numa_small_node_check();
2680 alloc_nid_numa_node_reserved_check();
2681 alloc_nid_numa_part_reserved_check();
2682 alloc_nid_numa_part_reserved_fallback_check();
2683 alloc_nid_numa_split_range_low_check();
2684 alloc_nid_numa_split_range_high_check();
2686 alloc_nid_numa_no_overlap_split_check();
2687 alloc_nid_numa_no_overlap_low_check();
2688 alloc_nid_numa_no_overlap_high_check();
2689 alloc_nid_numa_large_region_check();
2690 alloc_nid_numa_reserved_full_merge_check();
2691 alloc_nid_numa_split_all_reserved_check();
2693 alloc_node_numa_on_correct_nid();
2698 static int memblock_alloc_nid_checks_internal(int flags
)
2700 alloc_nid_test_flags
= flags
;
2703 prefix_push(get_memblock_alloc_nid_name(flags
));
2705 reset_memblock_attributes();
2706 dummy_physical_memory_init();
2708 memblock_alloc_nid_range_checks();
2709 memblock_alloc_nid_numa_checks();
2711 dummy_physical_memory_cleanup();
2718 int memblock_alloc_nid_checks(void)
2720 memblock_alloc_nid_checks_internal(TEST_F_NONE
);
2721 memblock_alloc_nid_checks_internal(TEST_F_RAW
);
2726 int memblock_alloc_exact_nid_range_checks(void)
2728 alloc_nid_test_flags
= (TEST_F_RAW
| TEST_F_EXACT
);
2730 memblock_alloc_nid_range_checks();