Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / tools / testing / memblock / tests / alloc_nid_api.c
blob49bb416d34ffcc3bbf5578fec3504bccb24f0719
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include "alloc_nid_api.h"
4 static int alloc_nid_test_flags = TEST_F_NONE;
6 /*
7 * contains the fraction of MEM_SIZE contained in each node in basis point
8 * units (one hundredth of 1% or 1/10000)
9 */
10 static const unsigned int node_fractions[] = {
11 2500, /* 1/4 */
12 625, /* 1/16 */
13 1250, /* 1/8 */
14 1250, /* 1/8 */
15 625, /* 1/16 */
16 625, /* 1/16 */
17 2500, /* 1/4 */
18 625, /* 1/16 */
21 static inline const char * const get_memblock_alloc_nid_name(int flags)
23 if (flags & TEST_F_EXACT)
24 return "memblock_alloc_exact_nid_raw";
25 if (flags & TEST_F_RAW)
26 return "memblock_alloc_try_nid_raw";
27 return "memblock_alloc_try_nid";
30 static inline void *run_memblock_alloc_nid(phys_addr_t size,
31 phys_addr_t align,
32 phys_addr_t min_addr,
33 phys_addr_t max_addr, int nid)
35 assert(!(alloc_nid_test_flags & TEST_F_EXACT) ||
36 (alloc_nid_test_flags & TEST_F_RAW));
38 * TEST_F_EXACT should be checked before TEST_F_RAW since
39 * memblock_alloc_exact_nid_raw() performs raw allocations.
41 if (alloc_nid_test_flags & TEST_F_EXACT)
42 return memblock_alloc_exact_nid_raw(size, align, min_addr,
43 max_addr, nid);
44 if (alloc_nid_test_flags & TEST_F_RAW)
45 return memblock_alloc_try_nid_raw(size, align, min_addr,
46 max_addr, nid);
47 return memblock_alloc_try_nid(size, align, min_addr, max_addr, nid);
51 * A simple test that tries to allocate a memory region within min_addr and
52 * max_addr range:
54 * + +
55 * | + +-----------+ |
56 * | | | rgn | |
57 * +----+-------+-----------+------+
58 * ^ ^
59 * | |
60 * min_addr max_addr
62 * Expect to allocate a region that ends at max_addr.
64 static int alloc_nid_top_down_simple_check(void)
66 struct memblock_region *rgn = &memblock.reserved.regions[0];
67 void *allocated_ptr = NULL;
68 phys_addr_t size = SZ_128;
69 phys_addr_t min_addr;
70 phys_addr_t max_addr;
71 phys_addr_t rgn_end;
73 PREFIX_PUSH();
74 setup_memblock();
76 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
77 max_addr = min_addr + SZ_512;
79 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
80 min_addr, max_addr,
81 NUMA_NO_NODE);
82 rgn_end = rgn->base + rgn->size;
84 ASSERT_NE(allocated_ptr, NULL);
85 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
87 ASSERT_EQ(rgn->size, size);
88 ASSERT_EQ(rgn->base, max_addr - size);
89 ASSERT_EQ(rgn_end, max_addr);
91 ASSERT_EQ(memblock.reserved.cnt, 1);
92 ASSERT_EQ(memblock.reserved.total_size, size);
94 test_pass_pop();
96 return 0;
100 * A simple test that tries to allocate a memory region within min_addr and
101 * max_addr range, where the end address is misaligned:
103 * + + +
104 * | + +---------+ + |
105 * | | | rgn | | |
106 * +------+-------+---------+--+----+
107 * ^ ^ ^
108 * | | |
109 * min_add | max_addr
111 * Aligned address
112 * boundary
114 * Expect to allocate an aligned region that ends before max_addr.
116 static int alloc_nid_top_down_end_misaligned_check(void)
118 struct memblock_region *rgn = &memblock.reserved.regions[0];
119 void *allocated_ptr = NULL;
120 phys_addr_t size = SZ_128;
121 phys_addr_t misalign = SZ_2;
122 phys_addr_t min_addr;
123 phys_addr_t max_addr;
124 phys_addr_t rgn_end;
126 PREFIX_PUSH();
127 setup_memblock();
129 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
130 max_addr = min_addr + SZ_512 + misalign;
132 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
133 min_addr, max_addr,
134 NUMA_NO_NODE);
135 rgn_end = rgn->base + rgn->size;
137 ASSERT_NE(allocated_ptr, NULL);
138 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
140 ASSERT_EQ(rgn->size, size);
141 ASSERT_EQ(rgn->base, max_addr - size - misalign);
142 ASSERT_LT(rgn_end, max_addr);
144 ASSERT_EQ(memblock.reserved.cnt, 1);
145 ASSERT_EQ(memblock.reserved.total_size, size);
147 test_pass_pop();
149 return 0;
153 * A simple test that tries to allocate a memory region, which spans over the
154 * min_addr and max_addr range:
156 * + +
157 * | +---------------+ |
158 * | | rgn | |
159 * +------+---------------+-------+
160 * ^ ^
161 * | |
162 * min_addr max_addr
164 * Expect to allocate a region that starts at min_addr and ends at
165 * max_addr, given that min_addr is aligned.
167 static int alloc_nid_exact_address_generic_check(void)
169 struct memblock_region *rgn = &memblock.reserved.regions[0];
170 void *allocated_ptr = NULL;
171 phys_addr_t size = SZ_1K;
172 phys_addr_t min_addr;
173 phys_addr_t max_addr;
174 phys_addr_t rgn_end;
176 PREFIX_PUSH();
177 setup_memblock();
179 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES;
180 max_addr = min_addr + size;
182 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
183 min_addr, max_addr,
184 NUMA_NO_NODE);
185 rgn_end = rgn->base + rgn->size;
187 ASSERT_NE(allocated_ptr, NULL);
188 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
190 ASSERT_EQ(rgn->size, size);
191 ASSERT_EQ(rgn->base, min_addr);
192 ASSERT_EQ(rgn_end, max_addr);
194 ASSERT_EQ(memblock.reserved.cnt, 1);
195 ASSERT_EQ(memblock.reserved.total_size, size);
197 test_pass_pop();
199 return 0;
203 * A test that tries to allocate a memory region, which can't fit into
204 * min_addr and max_addr range:
206 * + + +
207 * | +----------+-----+ |
208 * | | rgn + | |
209 * +--------+----------+-----+----+
210 * ^ ^ ^
211 * | | |
212 * Aligned | max_addr
213 * address |
214 * boundary min_add
216 * Expect to drop the lower limit and allocate a memory region which
217 * ends at max_addr (if the address is aligned).
219 static int alloc_nid_top_down_narrow_range_check(void)
221 struct memblock_region *rgn = &memblock.reserved.regions[0];
222 void *allocated_ptr = NULL;
223 phys_addr_t size = SZ_256;
224 phys_addr_t min_addr;
225 phys_addr_t max_addr;
227 PREFIX_PUSH();
228 setup_memblock();
230 min_addr = memblock_start_of_DRAM() + SZ_512;
231 max_addr = min_addr + SMP_CACHE_BYTES;
233 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
234 min_addr, max_addr,
235 NUMA_NO_NODE);
237 ASSERT_NE(allocated_ptr, NULL);
238 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
240 ASSERT_EQ(rgn->size, size);
241 ASSERT_EQ(rgn->base, max_addr - size);
243 ASSERT_EQ(memblock.reserved.cnt, 1);
244 ASSERT_EQ(memblock.reserved.total_size, size);
246 test_pass_pop();
248 return 0;
252 * A test that tries to allocate a memory region, which can't fit into
253 * min_addr and max_addr range, with the latter being too close to the beginning
254 * of the available memory:
256 * +-------------+
257 * | new |
258 * +-------------+
259 * + +
260 * | + |
261 * | | |
262 * +-------+--------------+
263 * ^ ^
264 * | |
265 * | max_addr
267 * min_addr
269 * Expect no allocation to happen.
271 static int alloc_nid_low_max_generic_check(void)
273 void *allocated_ptr = NULL;
274 phys_addr_t size = SZ_1K;
275 phys_addr_t min_addr;
276 phys_addr_t max_addr;
278 PREFIX_PUSH();
279 setup_memblock();
281 min_addr = memblock_start_of_DRAM();
282 max_addr = min_addr + SMP_CACHE_BYTES;
284 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
285 min_addr, max_addr,
286 NUMA_NO_NODE);
288 ASSERT_EQ(allocated_ptr, NULL);
290 test_pass_pop();
292 return 0;
296 * A test that tries to allocate a memory region within min_addr min_addr range,
297 * with min_addr being so close that it's next to an allocated region:
299 * + +
300 * | +--------+---------------|
301 * | | r1 | rgn |
302 * +-------+--------+---------------+
303 * ^ ^
304 * | |
305 * min_addr max_addr
307 * Expect a merge of both regions. Only the region size gets updated.
309 static int alloc_nid_min_reserved_generic_check(void)
311 struct memblock_region *rgn = &memblock.reserved.regions[0];
312 void *allocated_ptr = NULL;
313 phys_addr_t r1_size = SZ_128;
314 phys_addr_t r2_size = SZ_64;
315 phys_addr_t total_size = r1_size + r2_size;
316 phys_addr_t min_addr;
317 phys_addr_t max_addr;
318 phys_addr_t reserved_base;
320 PREFIX_PUSH();
321 setup_memblock();
323 max_addr = memblock_end_of_DRAM();
324 min_addr = max_addr - r2_size;
325 reserved_base = min_addr - r1_size;
327 memblock_reserve(reserved_base, r1_size);
329 allocated_ptr = run_memblock_alloc_nid(r2_size, SMP_CACHE_BYTES,
330 min_addr, max_addr,
331 NUMA_NO_NODE);
333 ASSERT_NE(allocated_ptr, NULL);
334 assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags);
336 ASSERT_EQ(rgn->size, total_size);
337 ASSERT_EQ(rgn->base, reserved_base);
339 ASSERT_EQ(memblock.reserved.cnt, 1);
340 ASSERT_EQ(memblock.reserved.total_size, total_size);
342 test_pass_pop();
344 return 0;
348 * A test that tries to allocate a memory region within min_addr and max_addr,
349 * with max_addr being so close that it's next to an allocated region:
351 * + +
352 * | +-------------+--------|
353 * | | rgn | r1 |
354 * +----------+-------------+--------+
355 * ^ ^
356 * | |
357 * min_addr max_addr
359 * Expect a merge of regions. Only the region size gets updated.
361 static int alloc_nid_max_reserved_generic_check(void)
363 struct memblock_region *rgn = &memblock.reserved.regions[0];
364 void *allocated_ptr = NULL;
365 phys_addr_t r1_size = SZ_64;
366 phys_addr_t r2_size = SZ_128;
367 phys_addr_t total_size = r1_size + r2_size;
368 phys_addr_t min_addr;
369 phys_addr_t max_addr;
371 PREFIX_PUSH();
372 setup_memblock();
374 max_addr = memblock_end_of_DRAM() - r1_size;
375 min_addr = max_addr - r2_size;
377 memblock_reserve(max_addr, r1_size);
379 allocated_ptr = run_memblock_alloc_nid(r2_size, SMP_CACHE_BYTES,
380 min_addr, max_addr,
381 NUMA_NO_NODE);
383 ASSERT_NE(allocated_ptr, NULL);
384 assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags);
386 ASSERT_EQ(rgn->size, total_size);
387 ASSERT_EQ(rgn->base, min_addr);
389 ASSERT_EQ(memblock.reserved.cnt, 1);
390 ASSERT_EQ(memblock.reserved.total_size, total_size);
392 test_pass_pop();
394 return 0;
398 * A test that tries to allocate memory within min_addr and max_add range, when
399 * there are two reserved regions at the borders, with a gap big enough to fit
400 * a new region:
402 * + +
403 * | +--------+ +-------+------+ |
404 * | | r2 | | rgn | r1 | |
405 * +----+--------+---+-------+------+--+
406 * ^ ^
407 * | |
408 * min_addr max_addr
410 * Expect to merge the new region with r1. The second region does not get
411 * updated. The total size field gets updated.
414 static int alloc_nid_top_down_reserved_with_space_check(void)
416 struct memblock_region *rgn1 = &memblock.reserved.regions[1];
417 struct memblock_region *rgn2 = &memblock.reserved.regions[0];
418 void *allocated_ptr = NULL;
419 struct region r1, r2;
420 phys_addr_t r3_size = SZ_64;
421 phys_addr_t gap_size = SMP_CACHE_BYTES;
422 phys_addr_t total_size;
423 phys_addr_t max_addr;
424 phys_addr_t min_addr;
426 PREFIX_PUSH();
427 setup_memblock();
429 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
430 r1.size = SMP_CACHE_BYTES;
432 r2.size = SZ_128;
433 r2.base = r1.base - (r3_size + gap_size + r2.size);
435 total_size = r1.size + r2.size + r3_size;
436 min_addr = r2.base + r2.size;
437 max_addr = r1.base;
439 memblock_reserve(r1.base, r1.size);
440 memblock_reserve(r2.base, r2.size);
442 allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
443 min_addr, max_addr,
444 NUMA_NO_NODE);
446 ASSERT_NE(allocated_ptr, NULL);
447 assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
449 ASSERT_EQ(rgn1->size, r1.size + r3_size);
450 ASSERT_EQ(rgn1->base, max_addr - r3_size);
452 ASSERT_EQ(rgn2->size, r2.size);
453 ASSERT_EQ(rgn2->base, r2.base);
455 ASSERT_EQ(memblock.reserved.cnt, 2);
456 ASSERT_EQ(memblock.reserved.total_size, total_size);
458 test_pass_pop();
460 return 0;
464 * A test that tries to allocate memory within min_addr and max_add range, when
465 * there are two reserved regions at the borders, with a gap of a size equal to
466 * the size of the new region:
468 * + +
469 * | +--------+--------+--------+ |
470 * | | r2 | r3 | r1 | |
471 * +-----+--------+--------+--------+-----+
472 * ^ ^
473 * | |
474 * min_addr max_addr
476 * Expect to merge all of the regions into one. The region counter and total
477 * size fields get updated.
479 static int alloc_nid_reserved_full_merge_generic_check(void)
481 struct memblock_region *rgn = &memblock.reserved.regions[0];
482 void *allocated_ptr = NULL;
483 struct region r1, r2;
484 phys_addr_t r3_size = SZ_64;
485 phys_addr_t total_size;
486 phys_addr_t max_addr;
487 phys_addr_t min_addr;
489 PREFIX_PUSH();
490 setup_memblock();
492 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
493 r1.size = SMP_CACHE_BYTES;
495 r2.size = SZ_128;
496 r2.base = r1.base - (r3_size + r2.size);
498 total_size = r1.size + r2.size + r3_size;
499 min_addr = r2.base + r2.size;
500 max_addr = r1.base;
502 memblock_reserve(r1.base, r1.size);
503 memblock_reserve(r2.base, r2.size);
505 allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
506 min_addr, max_addr,
507 NUMA_NO_NODE);
509 ASSERT_NE(allocated_ptr, NULL);
510 assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
512 ASSERT_EQ(rgn->size, total_size);
513 ASSERT_EQ(rgn->base, r2.base);
515 ASSERT_EQ(memblock.reserved.cnt, 1);
516 ASSERT_EQ(memblock.reserved.total_size, total_size);
518 test_pass_pop();
520 return 0;
524 * A test that tries to allocate memory within min_addr and max_add range, when
525 * there are two reserved regions at the borders, with a gap that can't fit
526 * a new region:
528 * + +
529 * | +----------+------+ +------+ |
530 * | | r3 | r2 | | r1 | |
531 * +--+----------+------+----+------+---+
532 * ^ ^
533 * | |
534 * | max_addr
536 * min_addr
538 * Expect to merge the new region with r2. The second region does not get
539 * updated. The total size counter gets updated.
541 static int alloc_nid_top_down_reserved_no_space_check(void)
543 struct memblock_region *rgn1 = &memblock.reserved.regions[1];
544 struct memblock_region *rgn2 = &memblock.reserved.regions[0];
545 void *allocated_ptr = NULL;
546 struct region r1, r2;
547 phys_addr_t r3_size = SZ_256;
548 phys_addr_t gap_size = SMP_CACHE_BYTES;
549 phys_addr_t total_size;
550 phys_addr_t max_addr;
551 phys_addr_t min_addr;
553 PREFIX_PUSH();
554 setup_memblock();
556 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
557 r1.size = SMP_CACHE_BYTES;
559 r2.size = SZ_128;
560 r2.base = r1.base - (r2.size + gap_size);
562 total_size = r1.size + r2.size + r3_size;
563 min_addr = r2.base + r2.size;
564 max_addr = r1.base;
566 memblock_reserve(r1.base, r1.size);
567 memblock_reserve(r2.base, r2.size);
569 allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
570 min_addr, max_addr,
571 NUMA_NO_NODE);
573 ASSERT_NE(allocated_ptr, NULL);
574 assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
576 ASSERT_EQ(rgn1->size, r1.size);
577 ASSERT_EQ(rgn1->base, r1.base);
579 ASSERT_EQ(rgn2->size, r2.size + r3_size);
580 ASSERT_EQ(rgn2->base, r2.base - r3_size);
582 ASSERT_EQ(memblock.reserved.cnt, 2);
583 ASSERT_EQ(memblock.reserved.total_size, total_size);
585 test_pass_pop();
587 return 0;
591 * A test that tries to allocate memory within min_addr and max_add range, but
592 * it's too narrow and everything else is reserved:
594 * +-----------+
595 * | new |
596 * +-----------+
597 * + +
598 * |--------------+ +----------|
599 * | r2 | | r1 |
600 * +--------------+------+----------+
601 * ^ ^
602 * | |
603 * | max_addr
605 * min_addr
607 * Expect no allocation to happen.
610 static int alloc_nid_reserved_all_generic_check(void)
612 void *allocated_ptr = NULL;
613 struct region r1, r2;
614 phys_addr_t r3_size = SZ_256;
615 phys_addr_t gap_size = SMP_CACHE_BYTES;
616 phys_addr_t max_addr;
617 phys_addr_t min_addr;
619 PREFIX_PUSH();
620 setup_memblock();
622 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES;
623 r1.size = SMP_CACHE_BYTES;
625 r2.size = MEM_SIZE - (r1.size + gap_size);
626 r2.base = memblock_start_of_DRAM();
628 min_addr = r2.base + r2.size;
629 max_addr = r1.base;
631 memblock_reserve(r1.base, r1.size);
632 memblock_reserve(r2.base, r2.size);
634 allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
635 min_addr, max_addr,
636 NUMA_NO_NODE);
638 ASSERT_EQ(allocated_ptr, NULL);
640 test_pass_pop();
642 return 0;
646 * A test that tries to allocate a memory region, where max_addr is
647 * bigger than the end address of the available memory. Expect to allocate
648 * a region that ends before the end of the memory.
650 static int alloc_nid_top_down_cap_max_check(void)
652 struct memblock_region *rgn = &memblock.reserved.regions[0];
653 void *allocated_ptr = NULL;
654 phys_addr_t size = SZ_256;
655 phys_addr_t min_addr;
656 phys_addr_t max_addr;
658 PREFIX_PUSH();
659 setup_memblock();
661 min_addr = memblock_end_of_DRAM() - SZ_1K;
662 max_addr = memblock_end_of_DRAM() + SZ_256;
664 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
665 min_addr, max_addr,
666 NUMA_NO_NODE);
668 ASSERT_NE(allocated_ptr, NULL);
669 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
671 ASSERT_EQ(rgn->size, size);
672 ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
674 ASSERT_EQ(memblock.reserved.cnt, 1);
675 ASSERT_EQ(memblock.reserved.total_size, size);
677 test_pass_pop();
679 return 0;
683 * A test that tries to allocate a memory region, where min_addr is
684 * smaller than the start address of the available memory. Expect to allocate
685 * a region that ends before the end of the memory.
687 static int alloc_nid_top_down_cap_min_check(void)
689 struct memblock_region *rgn = &memblock.reserved.regions[0];
690 void *allocated_ptr = NULL;
691 phys_addr_t size = SZ_1K;
692 phys_addr_t min_addr;
693 phys_addr_t max_addr;
695 PREFIX_PUSH();
696 setup_memblock();
698 min_addr = memblock_start_of_DRAM() - SZ_256;
699 max_addr = memblock_end_of_DRAM();
701 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
702 min_addr, max_addr,
703 NUMA_NO_NODE);
705 ASSERT_NE(allocated_ptr, NULL);
706 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
708 ASSERT_EQ(rgn->size, size);
709 ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
711 ASSERT_EQ(memblock.reserved.cnt, 1);
712 ASSERT_EQ(memblock.reserved.total_size, size);
714 test_pass_pop();
716 return 0;
720 * A simple test that tries to allocate a memory region within min_addr and
721 * max_addr range:
723 * + +
724 * | +-----------+ | |
725 * | | rgn | | |
726 * +----+-----------+-----------+------+
727 * ^ ^
728 * | |
729 * min_addr max_addr
731 * Expect to allocate a region that ends before max_addr.
733 static int alloc_nid_bottom_up_simple_check(void)
735 struct memblock_region *rgn = &memblock.reserved.regions[0];
736 void *allocated_ptr = NULL;
737 phys_addr_t size = SZ_128;
738 phys_addr_t min_addr;
739 phys_addr_t max_addr;
740 phys_addr_t rgn_end;
742 PREFIX_PUSH();
743 setup_memblock();
745 min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2;
746 max_addr = min_addr + SZ_512;
748 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
749 min_addr, max_addr,
750 NUMA_NO_NODE);
751 rgn_end = rgn->base + rgn->size;
753 ASSERT_NE(allocated_ptr, NULL);
754 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
756 ASSERT_EQ(rgn->size, size);
757 ASSERT_EQ(rgn->base, min_addr);
758 ASSERT_LT(rgn_end, max_addr);
760 ASSERT_EQ(memblock.reserved.cnt, 1);
761 ASSERT_EQ(memblock.reserved.total_size, size);
763 test_pass_pop();
765 return 0;
769 * A simple test that tries to allocate a memory region within min_addr and
770 * max_addr range, where the start address is misaligned:
772 * + +
773 * | + +-----------+ + |
774 * | | | rgn | | |
775 * +-----+---+-----------+-----+-----+
776 * ^ ^----. ^
777 * | | |
778 * min_add | max_addr
780 * Aligned address
781 * boundary
783 * Expect to allocate an aligned region that ends before max_addr.
785 static int alloc_nid_bottom_up_start_misaligned_check(void)
787 struct memblock_region *rgn = &memblock.reserved.regions[0];
788 void *allocated_ptr = NULL;
789 phys_addr_t size = SZ_128;
790 phys_addr_t misalign = SZ_2;
791 phys_addr_t min_addr;
792 phys_addr_t max_addr;
793 phys_addr_t rgn_end;
795 PREFIX_PUSH();
796 setup_memblock();
798 min_addr = memblock_start_of_DRAM() + misalign;
799 max_addr = min_addr + SZ_512;
801 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
802 min_addr, max_addr,
803 NUMA_NO_NODE);
804 rgn_end = rgn->base + rgn->size;
806 ASSERT_NE(allocated_ptr, NULL);
807 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
809 ASSERT_EQ(rgn->size, size);
810 ASSERT_EQ(rgn->base, min_addr + (SMP_CACHE_BYTES - misalign));
811 ASSERT_LT(rgn_end, max_addr);
813 ASSERT_EQ(memblock.reserved.cnt, 1);
814 ASSERT_EQ(memblock.reserved.total_size, size);
816 test_pass_pop();
818 return 0;
822 * A test that tries to allocate a memory region, which can't fit into min_addr
823 * and max_addr range:
825 * + +
826 * |---------+ + + |
827 * | rgn | | | |
828 * +---------+---------+----+------+
829 * ^ ^
830 * | |
831 * | max_addr
833 * min_add
835 * Expect to drop the lower limit and allocate a memory region which
836 * starts at the beginning of the available memory.
838 static int alloc_nid_bottom_up_narrow_range_check(void)
840 struct memblock_region *rgn = &memblock.reserved.regions[0];
841 void *allocated_ptr = NULL;
842 phys_addr_t size = SZ_256;
843 phys_addr_t min_addr;
844 phys_addr_t max_addr;
846 PREFIX_PUSH();
847 setup_memblock();
849 min_addr = memblock_start_of_DRAM() + SZ_512;
850 max_addr = min_addr + SMP_CACHE_BYTES;
852 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
853 min_addr, max_addr,
854 NUMA_NO_NODE);
856 ASSERT_NE(allocated_ptr, NULL);
857 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
859 ASSERT_EQ(rgn->size, size);
860 ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
862 ASSERT_EQ(memblock.reserved.cnt, 1);
863 ASSERT_EQ(memblock.reserved.total_size, size);
865 test_pass_pop();
867 return 0;
871 * A test that tries to allocate memory within min_addr and max_add range, when
872 * there are two reserved regions at the borders, with a gap big enough to fit
873 * a new region:
875 * + +
876 * | +--------+-------+ +------+ |
877 * | | r2 | rgn | | r1 | |
878 * +----+--------+-------+---+------+--+
879 * ^ ^
880 * | |
881 * min_addr max_addr
883 * Expect to merge the new region with r2. The second region does not get
884 * updated. The total size field gets updated.
887 static int alloc_nid_bottom_up_reserved_with_space_check(void)
889 struct memblock_region *rgn1 = &memblock.reserved.regions[1];
890 struct memblock_region *rgn2 = &memblock.reserved.regions[0];
891 void *allocated_ptr = NULL;
892 struct region r1, r2;
893 phys_addr_t r3_size = SZ_64;
894 phys_addr_t gap_size = SMP_CACHE_BYTES;
895 phys_addr_t total_size;
896 phys_addr_t max_addr;
897 phys_addr_t min_addr;
899 PREFIX_PUSH();
900 setup_memblock();
902 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
903 r1.size = SMP_CACHE_BYTES;
905 r2.size = SZ_128;
906 r2.base = r1.base - (r3_size + gap_size + r2.size);
908 total_size = r1.size + r2.size + r3_size;
909 min_addr = r2.base + r2.size;
910 max_addr = r1.base;
912 memblock_reserve(r1.base, r1.size);
913 memblock_reserve(r2.base, r2.size);
915 allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
916 min_addr, max_addr,
917 NUMA_NO_NODE);
919 ASSERT_NE(allocated_ptr, NULL);
920 assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
922 ASSERT_EQ(rgn1->size, r1.size);
923 ASSERT_EQ(rgn1->base, max_addr);
925 ASSERT_EQ(rgn2->size, r2.size + r3_size);
926 ASSERT_EQ(rgn2->base, r2.base);
928 ASSERT_EQ(memblock.reserved.cnt, 2);
929 ASSERT_EQ(memblock.reserved.total_size, total_size);
931 test_pass_pop();
933 return 0;
937 * A test that tries to allocate memory within min_addr and max_add range, when
938 * there are two reserved regions at the borders, with a gap of a size equal to
939 * the size of the new region:
941 * + +
942 * |----------+ +------+ +----+ |
943 * | r3 | | r2 | | r1 | |
944 * +----------+----+------+---+----+--+
945 * ^ ^
946 * | |
947 * | max_addr
949 * min_addr
951 * Expect to drop the lower limit and allocate memory at the beginning of the
952 * available memory. The region counter and total size fields get updated.
953 * Other regions are not modified.
956 static int alloc_nid_bottom_up_reserved_no_space_check(void)
958 struct memblock_region *rgn1 = &memblock.reserved.regions[2];
959 struct memblock_region *rgn2 = &memblock.reserved.regions[1];
960 struct memblock_region *rgn3 = &memblock.reserved.regions[0];
961 void *allocated_ptr = NULL;
962 struct region r1, r2;
963 phys_addr_t r3_size = SZ_256;
964 phys_addr_t gap_size = SMP_CACHE_BYTES;
965 phys_addr_t total_size;
966 phys_addr_t max_addr;
967 phys_addr_t min_addr;
969 PREFIX_PUSH();
970 setup_memblock();
972 r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2;
973 r1.size = SMP_CACHE_BYTES;
975 r2.size = SZ_128;
976 r2.base = r1.base - (r2.size + gap_size);
978 total_size = r1.size + r2.size + r3_size;
979 min_addr = r2.base + r2.size;
980 max_addr = r1.base;
982 memblock_reserve(r1.base, r1.size);
983 memblock_reserve(r2.base, r2.size);
985 allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES,
986 min_addr, max_addr,
987 NUMA_NO_NODE);
989 ASSERT_NE(allocated_ptr, NULL);
990 assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags);
992 ASSERT_EQ(rgn3->size, r3_size);
993 ASSERT_EQ(rgn3->base, memblock_start_of_DRAM());
995 ASSERT_EQ(rgn2->size, r2.size);
996 ASSERT_EQ(rgn2->base, r2.base);
998 ASSERT_EQ(rgn1->size, r1.size);
999 ASSERT_EQ(rgn1->base, r1.base);
1001 ASSERT_EQ(memblock.reserved.cnt, 3);
1002 ASSERT_EQ(memblock.reserved.total_size, total_size);
1004 test_pass_pop();
1006 return 0;
1010 * A test that tries to allocate a memory region, where max_addr is
1011 * bigger than the end address of the available memory. Expect to allocate
1012 * a region that starts at the min_addr.
1014 static int alloc_nid_bottom_up_cap_max_check(void)
1016 struct memblock_region *rgn = &memblock.reserved.regions[0];
1017 void *allocated_ptr = NULL;
1018 phys_addr_t size = SZ_256;
1019 phys_addr_t min_addr;
1020 phys_addr_t max_addr;
1022 PREFIX_PUSH();
1023 setup_memblock();
1025 min_addr = memblock_start_of_DRAM() + SZ_1K;
1026 max_addr = memblock_end_of_DRAM() + SZ_256;
1028 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1029 min_addr, max_addr,
1030 NUMA_NO_NODE);
1032 ASSERT_NE(allocated_ptr, NULL);
1033 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1035 ASSERT_EQ(rgn->size, size);
1036 ASSERT_EQ(rgn->base, min_addr);
1038 ASSERT_EQ(memblock.reserved.cnt, 1);
1039 ASSERT_EQ(memblock.reserved.total_size, size);
1041 test_pass_pop();
1043 return 0;
1047 * A test that tries to allocate a memory region, where min_addr is
1048 * smaller than the start address of the available memory. Expect to allocate
1049 * a region at the beginning of the available memory.
1051 static int alloc_nid_bottom_up_cap_min_check(void)
1053 struct memblock_region *rgn = &memblock.reserved.regions[0];
1054 void *allocated_ptr = NULL;
1055 phys_addr_t size = SZ_1K;
1056 phys_addr_t min_addr;
1057 phys_addr_t max_addr;
1059 PREFIX_PUSH();
1060 setup_memblock();
1062 min_addr = memblock_start_of_DRAM();
1063 max_addr = memblock_end_of_DRAM() - SZ_256;
1065 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1066 min_addr, max_addr,
1067 NUMA_NO_NODE);
1069 ASSERT_NE(allocated_ptr, NULL);
1070 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1072 ASSERT_EQ(rgn->size, size);
1073 ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
1075 ASSERT_EQ(memblock.reserved.cnt, 1);
1076 ASSERT_EQ(memblock.reserved.total_size, size);
1078 test_pass_pop();
1080 return 0;
1083 /* Test case wrappers for range tests */
1084 static int alloc_nid_simple_check(void)
1086 test_print("\tRunning %s...\n", __func__);
1087 memblock_set_bottom_up(false);
1088 alloc_nid_top_down_simple_check();
1089 memblock_set_bottom_up(true);
1090 alloc_nid_bottom_up_simple_check();
1092 return 0;
1095 static int alloc_nid_misaligned_check(void)
1097 test_print("\tRunning %s...\n", __func__);
1098 memblock_set_bottom_up(false);
1099 alloc_nid_top_down_end_misaligned_check();
1100 memblock_set_bottom_up(true);
1101 alloc_nid_bottom_up_start_misaligned_check();
1103 return 0;
1106 static int alloc_nid_narrow_range_check(void)
1108 test_print("\tRunning %s...\n", __func__);
1109 memblock_set_bottom_up(false);
1110 alloc_nid_top_down_narrow_range_check();
1111 memblock_set_bottom_up(true);
1112 alloc_nid_bottom_up_narrow_range_check();
1114 return 0;
1117 static int alloc_nid_reserved_with_space_check(void)
1119 test_print("\tRunning %s...\n", __func__);
1120 memblock_set_bottom_up(false);
1121 alloc_nid_top_down_reserved_with_space_check();
1122 memblock_set_bottom_up(true);
1123 alloc_nid_bottom_up_reserved_with_space_check();
1125 return 0;
1128 static int alloc_nid_reserved_no_space_check(void)
1130 test_print("\tRunning %s...\n", __func__);
1131 memblock_set_bottom_up(false);
1132 alloc_nid_top_down_reserved_no_space_check();
1133 memblock_set_bottom_up(true);
1134 alloc_nid_bottom_up_reserved_no_space_check();
1136 return 0;
1139 static int alloc_nid_cap_max_check(void)
1141 test_print("\tRunning %s...\n", __func__);
1142 memblock_set_bottom_up(false);
1143 alloc_nid_top_down_cap_max_check();
1144 memblock_set_bottom_up(true);
1145 alloc_nid_bottom_up_cap_max_check();
1147 return 0;
1150 static int alloc_nid_cap_min_check(void)
1152 test_print("\tRunning %s...\n", __func__);
1153 memblock_set_bottom_up(false);
1154 alloc_nid_top_down_cap_min_check();
1155 memblock_set_bottom_up(true);
1156 alloc_nid_bottom_up_cap_min_check();
1158 return 0;
1161 static int alloc_nid_min_reserved_check(void)
1163 test_print("\tRunning %s...\n", __func__);
1164 run_top_down(alloc_nid_min_reserved_generic_check);
1165 run_bottom_up(alloc_nid_min_reserved_generic_check);
1167 return 0;
1170 static int alloc_nid_max_reserved_check(void)
1172 test_print("\tRunning %s...\n", __func__);
1173 run_top_down(alloc_nid_max_reserved_generic_check);
1174 run_bottom_up(alloc_nid_max_reserved_generic_check);
1176 return 0;
1179 static int alloc_nid_exact_address_check(void)
1181 test_print("\tRunning %s...\n", __func__);
1182 run_top_down(alloc_nid_exact_address_generic_check);
1183 run_bottom_up(alloc_nid_exact_address_generic_check);
1185 return 0;
1188 static int alloc_nid_reserved_full_merge_check(void)
1190 test_print("\tRunning %s...\n", __func__);
1191 run_top_down(alloc_nid_reserved_full_merge_generic_check);
1192 run_bottom_up(alloc_nid_reserved_full_merge_generic_check);
1194 return 0;
1197 static int alloc_nid_reserved_all_check(void)
1199 test_print("\tRunning %s...\n", __func__);
1200 run_top_down(alloc_nid_reserved_all_generic_check);
1201 run_bottom_up(alloc_nid_reserved_all_generic_check);
1203 return 0;
1206 static int alloc_nid_low_max_check(void)
1208 test_print("\tRunning %s...\n", __func__);
1209 run_top_down(alloc_nid_low_max_generic_check);
1210 run_bottom_up(alloc_nid_low_max_generic_check);
1212 return 0;
1215 static int memblock_alloc_nid_range_checks(void)
1217 test_print("Running %s range tests...\n",
1218 get_memblock_alloc_nid_name(alloc_nid_test_flags));
1220 alloc_nid_simple_check();
1221 alloc_nid_misaligned_check();
1222 alloc_nid_narrow_range_check();
1223 alloc_nid_reserved_with_space_check();
1224 alloc_nid_reserved_no_space_check();
1225 alloc_nid_cap_max_check();
1226 alloc_nid_cap_min_check();
1228 alloc_nid_min_reserved_check();
1229 alloc_nid_max_reserved_check();
1230 alloc_nid_exact_address_check();
1231 alloc_nid_reserved_full_merge_check();
1232 alloc_nid_reserved_all_check();
1233 alloc_nid_low_max_check();
1235 return 0;
1239 * A test that tries to allocate a memory region in a specific NUMA node that
1240 * has enough memory to allocate a region of the requested size.
1241 * Expect to allocate an aligned region at the end of the requested node.
1243 static int alloc_nid_top_down_numa_simple_check(void)
1245 int nid_req = 3;
1246 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1247 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1248 void *allocated_ptr = NULL;
1249 phys_addr_t size;
1250 phys_addr_t min_addr;
1251 phys_addr_t max_addr;
1253 PREFIX_PUSH();
1254 setup_numa_memblock(node_fractions);
1256 ASSERT_LE(SZ_4, req_node->size);
1257 size = req_node->size / SZ_4;
1258 min_addr = memblock_start_of_DRAM();
1259 max_addr = memblock_end_of_DRAM();
1261 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1262 min_addr, max_addr, nid_req);
1264 ASSERT_NE(allocated_ptr, NULL);
1265 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1267 ASSERT_EQ(new_rgn->size, size);
1268 ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
1269 ASSERT_LE(req_node->base, new_rgn->base);
1271 ASSERT_EQ(memblock.reserved.cnt, 1);
1272 ASSERT_EQ(memblock.reserved.total_size, size);
1274 test_pass_pop();
1276 return 0;
1280 * A test that tries to allocate a memory region in a specific NUMA node that
1281 * does not have enough memory to allocate a region of the requested size:
1283 * | +-----+ +------------------+ |
1284 * | | req | | expected | |
1285 * +---+-----+----------+------------------+-----+
1287 * | +---------+ |
1288 * | | rgn | |
1289 * +-----------------------------+---------+-----+
1291 * Expect to allocate an aligned region at the end of the last node that has
1292 * enough memory (in this case, nid = 6) after falling back to NUMA_NO_NODE.
1294 static int alloc_nid_top_down_numa_small_node_check(void)
1296 int nid_req = 1;
1297 int nid_exp = 6;
1298 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1299 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1300 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1301 void *allocated_ptr = NULL;
1302 phys_addr_t size;
1303 phys_addr_t min_addr;
1304 phys_addr_t max_addr;
1306 PREFIX_PUSH();
1307 setup_numa_memblock(node_fractions);
1309 size = SZ_2 * req_node->size;
1310 min_addr = memblock_start_of_DRAM();
1311 max_addr = memblock_end_of_DRAM();
1313 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1314 min_addr, max_addr, nid_req);
1316 ASSERT_NE(allocated_ptr, NULL);
1317 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1319 ASSERT_EQ(new_rgn->size, size);
1320 ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
1321 ASSERT_LE(exp_node->base, new_rgn->base);
1323 ASSERT_EQ(memblock.reserved.cnt, 1);
1324 ASSERT_EQ(memblock.reserved.total_size, size);
1326 test_pass_pop();
1328 return 0;
1332 * A test that tries to allocate a memory region in a specific NUMA node that
1333 * is fully reserved:
1335 * | +---------+ +------------------+ |
1336 * | |requested| | expected | |
1337 * +--------------+---------+------------+------------------+-----+
1339 * | +---------+ +---------+ |
1340 * | | reserved| | new | |
1341 * +--------------+---------+---------------------+---------+-----+
1343 * Expect to allocate an aligned region at the end of the last node that is
1344 * large enough and has enough unreserved memory (in this case, nid = 6) after
1345 * falling back to NUMA_NO_NODE. The region count and total size get updated.
1347 static int alloc_nid_top_down_numa_node_reserved_check(void)
1349 int nid_req = 2;
1350 int nid_exp = 6;
1351 struct memblock_region *new_rgn = &memblock.reserved.regions[1];
1352 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1353 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1354 void *allocated_ptr = NULL;
1355 phys_addr_t size;
1356 phys_addr_t min_addr;
1357 phys_addr_t max_addr;
1359 PREFIX_PUSH();
1360 setup_numa_memblock(node_fractions);
1362 size = req_node->size;
1363 min_addr = memblock_start_of_DRAM();
1364 max_addr = memblock_end_of_DRAM();
1366 memblock_reserve(req_node->base, req_node->size);
1367 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1368 min_addr, max_addr, nid_req);
1370 ASSERT_NE(allocated_ptr, NULL);
1371 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1373 ASSERT_EQ(new_rgn->size, size);
1374 ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
1375 ASSERT_LE(exp_node->base, new_rgn->base);
1377 ASSERT_EQ(memblock.reserved.cnt, 2);
1378 ASSERT_EQ(memblock.reserved.total_size, size + req_node->size);
1380 test_pass_pop();
1382 return 0;
1386 * A test that tries to allocate a memory region in a specific NUMA node that
1387 * is partially reserved but has enough memory for the allocated region:
1389 * | +---------------------------------------+ |
1390 * | | requested | |
1391 * +-----------+---------------------------------------+----------+
1393 * | +------------------+ +-----+ |
1394 * | | reserved | | new | |
1395 * +-----------+------------------+--------------+-----+----------+
1397 * Expect to allocate an aligned region at the end of the requested node. The
1398 * region count and total size get updated.
1400 static int alloc_nid_top_down_numa_part_reserved_check(void)
1402 int nid_req = 4;
1403 struct memblock_region *new_rgn = &memblock.reserved.regions[1];
1404 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1405 void *allocated_ptr = NULL;
1406 struct region r1;
1407 phys_addr_t size;
1408 phys_addr_t min_addr;
1409 phys_addr_t max_addr;
1411 PREFIX_PUSH();
1412 setup_numa_memblock(node_fractions);
1414 ASSERT_LE(SZ_8, req_node->size);
1415 r1.base = req_node->base;
1416 r1.size = req_node->size / SZ_2;
1417 size = r1.size / SZ_4;
1418 min_addr = memblock_start_of_DRAM();
1419 max_addr = memblock_end_of_DRAM();
1421 memblock_reserve(r1.base, r1.size);
1422 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1423 min_addr, max_addr, nid_req);
1425 ASSERT_NE(allocated_ptr, NULL);
1426 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1428 ASSERT_EQ(new_rgn->size, size);
1429 ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
1430 ASSERT_LE(req_node->base, new_rgn->base);
1432 ASSERT_EQ(memblock.reserved.cnt, 2);
1433 ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
1435 test_pass_pop();
1437 return 0;
1441 * A test that tries to allocate a memory region in a specific NUMA node that
1442 * is partially reserved and does not have enough contiguous memory for the
1443 * allocated region:
1445 * | +-----------------------+ +----------------------|
1446 * | | requested | | expected |
1447 * +-----------+-----------------------+---------+----------------------+
1449 * | +----------+ +-----------|
1450 * | | reserved | | new |
1451 * +-----------------+----------+---------------------------+-----------+
1453 * Expect to allocate an aligned region at the end of the last node that is
1454 * large enough and has enough unreserved memory (in this case,
1455 * nid = NUMA_NODES - 1) after falling back to NUMA_NO_NODE. The region count
1456 * and total size get updated.
1458 static int alloc_nid_top_down_numa_part_reserved_fallback_check(void)
1460 int nid_req = 4;
1461 int nid_exp = NUMA_NODES - 1;
1462 struct memblock_region *new_rgn = &memblock.reserved.regions[1];
1463 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1464 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1465 void *allocated_ptr = NULL;
1466 struct region r1;
1467 phys_addr_t size;
1468 phys_addr_t min_addr;
1469 phys_addr_t max_addr;
1471 PREFIX_PUSH();
1472 setup_numa_memblock(node_fractions);
1474 ASSERT_LE(SZ_4, req_node->size);
1475 size = req_node->size / SZ_2;
1476 r1.base = req_node->base + (size / SZ_2);
1477 r1.size = size;
1479 min_addr = memblock_start_of_DRAM();
1480 max_addr = memblock_end_of_DRAM();
1482 memblock_reserve(r1.base, r1.size);
1483 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1484 min_addr, max_addr, nid_req);
1486 ASSERT_NE(allocated_ptr, NULL);
1487 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1489 ASSERT_EQ(new_rgn->size, size);
1490 ASSERT_EQ(new_rgn->base, region_end(exp_node) - size);
1491 ASSERT_LE(exp_node->base, new_rgn->base);
1493 ASSERT_EQ(memblock.reserved.cnt, 2);
1494 ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
1496 test_pass_pop();
1498 return 0;
1502 * A test that tries to allocate a memory region that spans over the min_addr
1503 * and max_addr range and overlaps with two different nodes, where the first
1504 * node is the requested node:
1506 * min_addr
1507 * | max_addr
1508 * | |
1509 * v v
1510 * | +-----------------------+-----------+ |
1511 * | | requested | node3 | |
1512 * +-----------+-----------------------+-----------+--------------+
1513 * + +
1514 * | +-----------+ |
1515 * | | rgn | |
1516 * +-----------------------+-----------+--------------------------+
1518 * Expect to drop the lower limit and allocate a memory region that ends at
1519 * the end of the requested node.
1521 static int alloc_nid_top_down_numa_split_range_low_check(void)
1523 int nid_req = 2;
1524 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1525 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1526 void *allocated_ptr = NULL;
1527 phys_addr_t size = SZ_512;
1528 phys_addr_t min_addr;
1529 phys_addr_t max_addr;
1530 phys_addr_t req_node_end;
1532 PREFIX_PUSH();
1533 setup_numa_memblock(node_fractions);
1535 req_node_end = region_end(req_node);
1536 min_addr = req_node_end - SZ_256;
1537 max_addr = min_addr + size;
1539 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1540 min_addr, max_addr, nid_req);
1542 ASSERT_NE(allocated_ptr, NULL);
1543 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1545 ASSERT_EQ(new_rgn->size, size);
1546 ASSERT_EQ(new_rgn->base, req_node_end - size);
1547 ASSERT_LE(req_node->base, new_rgn->base);
1549 ASSERT_EQ(memblock.reserved.cnt, 1);
1550 ASSERT_EQ(memblock.reserved.total_size, size);
1552 test_pass_pop();
1554 return 0;
1558 * A test that tries to allocate a memory region that spans over the min_addr
1559 * and max_addr range and overlaps with two different nodes, where the second
1560 * node is the requested node:
1562 * min_addr
1563 * | max_addr
1564 * | |
1565 * v v
1566 * | +--------------------------+---------+ |
1567 * | | expected |requested| |
1568 * +------+--------------------------+---------+----------------+
1569 * + +
1570 * | +---------+ |
1571 * | | rgn | |
1572 * +-----------------------+---------+--------------------------+
1574 * Expect to drop the lower limit and allocate a memory region that
1575 * ends at the end of the first node that overlaps with the range.
1577 static int alloc_nid_top_down_numa_split_range_high_check(void)
1579 int nid_req = 3;
1580 int nid_exp = nid_req - 1;
1581 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1582 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1583 void *allocated_ptr = NULL;
1584 phys_addr_t size = SZ_512;
1585 phys_addr_t min_addr;
1586 phys_addr_t max_addr;
1587 phys_addr_t exp_node_end;
1589 PREFIX_PUSH();
1590 setup_numa_memblock(node_fractions);
1592 exp_node_end = region_end(exp_node);
1593 min_addr = exp_node_end - SZ_256;
1594 max_addr = min_addr + size;
1596 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1597 min_addr, max_addr, nid_req);
1599 ASSERT_NE(allocated_ptr, NULL);
1600 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1602 ASSERT_EQ(new_rgn->size, size);
1603 ASSERT_EQ(new_rgn->base, exp_node_end - size);
1604 ASSERT_LE(exp_node->base, new_rgn->base);
1606 ASSERT_EQ(memblock.reserved.cnt, 1);
1607 ASSERT_EQ(memblock.reserved.total_size, size);
1609 test_pass_pop();
1611 return 0;
1615 * A test that tries to allocate a memory region that spans over the min_addr
1616 * and max_addr range and overlaps with two different nodes, where the requested
1617 * node ends before min_addr:
1619 * min_addr
1620 * | max_addr
1621 * | |
1622 * v v
1623 * | +---------------+ +-------------+---------+ |
1624 * | | requested | | node1 | node2 | |
1625 * +----+---------------+--------+-------------+---------+----------+
1626 * + +
1627 * | +---------+ |
1628 * | | rgn | |
1629 * +----------+---------+-------------------------------------------+
1631 * Expect to drop the lower limit and allocate a memory region that ends at
1632 * the end of the requested node.
1634 static int alloc_nid_top_down_numa_no_overlap_split_check(void)
1636 int nid_req = 2;
1637 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1638 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1639 struct memblock_region *node2 = &memblock.memory.regions[6];
1640 void *allocated_ptr = NULL;
1641 phys_addr_t size;
1642 phys_addr_t min_addr;
1643 phys_addr_t max_addr;
1645 PREFIX_PUSH();
1646 setup_numa_memblock(node_fractions);
1648 size = SZ_512;
1649 min_addr = node2->base - SZ_256;
1650 max_addr = min_addr + size;
1652 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1653 min_addr, max_addr, nid_req);
1655 ASSERT_NE(allocated_ptr, NULL);
1656 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1658 ASSERT_EQ(new_rgn->size, size);
1659 ASSERT_EQ(new_rgn->base, region_end(req_node) - size);
1660 ASSERT_LE(req_node->base, new_rgn->base);
1662 ASSERT_EQ(memblock.reserved.cnt, 1);
1663 ASSERT_EQ(memblock.reserved.total_size, size);
1665 test_pass_pop();
1667 return 0;
1671 * A test that tries to allocate memory within min_addr and max_add range when
1672 * the requested node and the range do not overlap, and requested node ends
1673 * before min_addr. The range overlaps with multiple nodes along node
1674 * boundaries:
1676 * min_addr
1677 * | max_addr
1678 * | |
1679 * v v
1680 * |-----------+ +----------+----...----+----------+ |
1681 * | requested | | min node | ... | max node | |
1682 * +-----------+-----------+----------+----...----+----------+------+
1683 * + +
1684 * | +-----+ |
1685 * | | rgn | |
1686 * +---------------------------------------------------+-----+------+
1688 * Expect to allocate a memory region at the end of the final node in
1689 * the range after falling back to NUMA_NO_NODE.
1691 static int alloc_nid_top_down_numa_no_overlap_low_check(void)
1693 int nid_req = 0;
1694 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1695 struct memblock_region *min_node = &memblock.memory.regions[2];
1696 struct memblock_region *max_node = &memblock.memory.regions[5];
1697 void *allocated_ptr = NULL;
1698 phys_addr_t size = SZ_64;
1699 phys_addr_t max_addr;
1700 phys_addr_t min_addr;
1702 PREFIX_PUSH();
1703 setup_numa_memblock(node_fractions);
1705 min_addr = min_node->base;
1706 max_addr = region_end(max_node);
1708 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1709 min_addr, max_addr, nid_req);
1711 ASSERT_NE(allocated_ptr, NULL);
1712 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1714 ASSERT_EQ(new_rgn->size, size);
1715 ASSERT_EQ(new_rgn->base, max_addr - size);
1716 ASSERT_LE(max_node->base, new_rgn->base);
1718 ASSERT_EQ(memblock.reserved.cnt, 1);
1719 ASSERT_EQ(memblock.reserved.total_size, size);
1721 test_pass_pop();
1723 return 0;
1727 * A test that tries to allocate memory within min_addr and max_add range when
1728 * the requested node and the range do not overlap, and requested node starts
1729 * after max_addr. The range overlaps with multiple nodes along node
1730 * boundaries:
1732 * min_addr
1733 * | max_addr
1734 * | |
1735 * v v
1736 * | +----------+----...----+----------+ +-----------+ |
1737 * | | min node | ... | max node | | requested | |
1738 * +-----+----------+----...----+----------+--------+-----------+---+
1739 * + +
1740 * | +-----+ |
1741 * | | rgn | |
1742 * +---------------------------------+-----+------------------------+
1744 * Expect to allocate a memory region at the end of the final node in
1745 * the range after falling back to NUMA_NO_NODE.
1747 static int alloc_nid_top_down_numa_no_overlap_high_check(void)
1749 int nid_req = 7;
1750 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1751 struct memblock_region *min_node = &memblock.memory.regions[2];
1752 struct memblock_region *max_node = &memblock.memory.regions[5];
1753 void *allocated_ptr = NULL;
1754 phys_addr_t size = SZ_64;
1755 phys_addr_t max_addr;
1756 phys_addr_t min_addr;
1758 PREFIX_PUSH();
1759 setup_numa_memblock(node_fractions);
1761 min_addr = min_node->base;
1762 max_addr = region_end(max_node);
1764 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1765 min_addr, max_addr, nid_req);
1767 ASSERT_NE(allocated_ptr, NULL);
1768 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1770 ASSERT_EQ(new_rgn->size, size);
1771 ASSERT_EQ(new_rgn->base, max_addr - size);
1772 ASSERT_LE(max_node->base, new_rgn->base);
1774 ASSERT_EQ(memblock.reserved.cnt, 1);
1775 ASSERT_EQ(memblock.reserved.total_size, size);
1777 test_pass_pop();
1779 return 0;
1783 * A test that tries to allocate a memory region in a specific NUMA node that
1784 * has enough memory to allocate a region of the requested size.
1785 * Expect to allocate an aligned region at the beginning of the requested node.
1787 static int alloc_nid_bottom_up_numa_simple_check(void)
1789 int nid_req = 3;
1790 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1791 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1792 void *allocated_ptr = NULL;
1793 phys_addr_t size;
1794 phys_addr_t min_addr;
1795 phys_addr_t max_addr;
1797 PREFIX_PUSH();
1798 setup_numa_memblock(node_fractions);
1800 ASSERT_LE(SZ_4, req_node->size);
1801 size = req_node->size / SZ_4;
1802 min_addr = memblock_start_of_DRAM();
1803 max_addr = memblock_end_of_DRAM();
1805 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1806 min_addr, max_addr, nid_req);
1808 ASSERT_NE(allocated_ptr, NULL);
1809 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1811 ASSERT_EQ(new_rgn->size, size);
1812 ASSERT_EQ(new_rgn->base, req_node->base);
1813 ASSERT_LE(region_end(new_rgn), region_end(req_node));
1815 ASSERT_EQ(memblock.reserved.cnt, 1);
1816 ASSERT_EQ(memblock.reserved.total_size, size);
1818 test_pass_pop();
1820 return 0;
1824 * A test that tries to allocate a memory region in a specific NUMA node that
1825 * does not have enough memory to allocate a region of the requested size:
1827 * |----------------------+-----+ |
1828 * | expected | req | |
1829 * +----------------------+-----+----------------+
1831 * |---------+ |
1832 * | rgn | |
1833 * +---------+-----------------------------------+
1835 * Expect to allocate an aligned region at the beginning of the first node that
1836 * has enough memory (in this case, nid = 0) after falling back to NUMA_NO_NODE.
1838 static int alloc_nid_bottom_up_numa_small_node_check(void)
1840 int nid_req = 1;
1841 int nid_exp = 0;
1842 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1843 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1844 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1845 void *allocated_ptr = NULL;
1846 phys_addr_t size;
1847 phys_addr_t min_addr;
1848 phys_addr_t max_addr;
1850 PREFIX_PUSH();
1851 setup_numa_memblock(node_fractions);
1853 size = SZ_2 * req_node->size;
1854 min_addr = memblock_start_of_DRAM();
1855 max_addr = memblock_end_of_DRAM();
1857 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1858 min_addr, max_addr, nid_req);
1860 ASSERT_NE(allocated_ptr, NULL);
1861 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1863 ASSERT_EQ(new_rgn->size, size);
1864 ASSERT_EQ(new_rgn->base, exp_node->base);
1865 ASSERT_LE(region_end(new_rgn), region_end(exp_node));
1867 ASSERT_EQ(memblock.reserved.cnt, 1);
1868 ASSERT_EQ(memblock.reserved.total_size, size);
1870 test_pass_pop();
1872 return 0;
1876 * A test that tries to allocate a memory region in a specific NUMA node that
1877 * is fully reserved:
1879 * |----------------------+ +-----------+ |
1880 * | expected | | requested | |
1881 * +----------------------+-----+-----------+--------------------+
1883 * |-----------+ +-----------+ |
1884 * | new | | reserved | |
1885 * +-----------+----------------+-----------+--------------------+
1887 * Expect to allocate an aligned region at the beginning of the first node that
1888 * is large enough and has enough unreserved memory (in this case, nid = 0)
1889 * after falling back to NUMA_NO_NODE. The region count and total size get
1890 * updated.
1892 static int alloc_nid_bottom_up_numa_node_reserved_check(void)
1894 int nid_req = 2;
1895 int nid_exp = 0;
1896 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1897 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1898 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
1899 void *allocated_ptr = NULL;
1900 phys_addr_t size;
1901 phys_addr_t min_addr;
1902 phys_addr_t max_addr;
1904 PREFIX_PUSH();
1905 setup_numa_memblock(node_fractions);
1907 size = req_node->size;
1908 min_addr = memblock_start_of_DRAM();
1909 max_addr = memblock_end_of_DRAM();
1911 memblock_reserve(req_node->base, req_node->size);
1912 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1913 min_addr, max_addr, nid_req);
1915 ASSERT_NE(allocated_ptr, NULL);
1916 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1918 ASSERT_EQ(new_rgn->size, size);
1919 ASSERT_EQ(new_rgn->base, exp_node->base);
1920 ASSERT_LE(region_end(new_rgn), region_end(exp_node));
1922 ASSERT_EQ(memblock.reserved.cnt, 2);
1923 ASSERT_EQ(memblock.reserved.total_size, size + req_node->size);
1925 test_pass_pop();
1927 return 0;
1931 * A test that tries to allocate a memory region in a specific NUMA node that
1932 * is partially reserved but has enough memory for the allocated region:
1934 * | +---------------------------------------+ |
1935 * | | requested | |
1936 * +-----------+---------------------------------------+---------+
1938 * | +------------------+-----+ |
1939 * | | reserved | new | |
1940 * +-----------+------------------+-----+------------------------+
1942 * Expect to allocate an aligned region in the requested node that merges with
1943 * the existing reserved region. The total size gets updated.
1945 static int alloc_nid_bottom_up_numa_part_reserved_check(void)
1947 int nid_req = 4;
1948 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
1949 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
1950 void *allocated_ptr = NULL;
1951 struct region r1;
1952 phys_addr_t size;
1953 phys_addr_t min_addr;
1954 phys_addr_t max_addr;
1955 phys_addr_t total_size;
1957 PREFIX_PUSH();
1958 setup_numa_memblock(node_fractions);
1960 ASSERT_LE(SZ_8, req_node->size);
1961 r1.base = req_node->base;
1962 r1.size = req_node->size / SZ_2;
1963 size = r1.size / SZ_4;
1964 min_addr = memblock_start_of_DRAM();
1965 max_addr = memblock_end_of_DRAM();
1966 total_size = size + r1.size;
1968 memblock_reserve(r1.base, r1.size);
1969 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
1970 min_addr, max_addr, nid_req);
1972 ASSERT_NE(allocated_ptr, NULL);
1973 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
1975 ASSERT_EQ(new_rgn->size, total_size);
1976 ASSERT_EQ(new_rgn->base, req_node->base);
1977 ASSERT_LE(region_end(new_rgn), region_end(req_node));
1979 ASSERT_EQ(memblock.reserved.cnt, 1);
1980 ASSERT_EQ(memblock.reserved.total_size, total_size);
1982 test_pass_pop();
1984 return 0;
1988 * A test that tries to allocate a memory region in a specific NUMA node that
1989 * is partially reserved and does not have enough contiguous memory for the
1990 * allocated region:
1992 * |----------------------+ +-----------------------+ |
1993 * | expected | | requested | |
1994 * +----------------------+-------+-----------------------+---------+
1996 * |-----------+ +----------+ |
1997 * | new | | reserved | |
1998 * +-----------+------------------------+----------+----------------+
2000 * Expect to allocate an aligned region at the beginning of the first
2001 * node that is large enough and has enough unreserved memory (in this case,
2002 * nid = 0) after falling back to NUMA_NO_NODE. The region count and total size
2003 * get updated.
2005 static int alloc_nid_bottom_up_numa_part_reserved_fallback_check(void)
2007 int nid_req = 4;
2008 int nid_exp = 0;
2009 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2010 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2011 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
2012 void *allocated_ptr = NULL;
2013 struct region r1;
2014 phys_addr_t size;
2015 phys_addr_t min_addr;
2016 phys_addr_t max_addr;
2018 PREFIX_PUSH();
2019 setup_numa_memblock(node_fractions);
2021 ASSERT_LE(SZ_4, req_node->size);
2022 size = req_node->size / SZ_2;
2023 r1.base = req_node->base + (size / SZ_2);
2024 r1.size = size;
2026 min_addr = memblock_start_of_DRAM();
2027 max_addr = memblock_end_of_DRAM();
2029 memblock_reserve(r1.base, r1.size);
2030 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
2031 min_addr, max_addr, nid_req);
2033 ASSERT_NE(allocated_ptr, NULL);
2034 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2036 ASSERT_EQ(new_rgn->size, size);
2037 ASSERT_EQ(new_rgn->base, exp_node->base);
2038 ASSERT_LE(region_end(new_rgn), region_end(exp_node));
2040 ASSERT_EQ(memblock.reserved.cnt, 2);
2041 ASSERT_EQ(memblock.reserved.total_size, size + r1.size);
2043 test_pass_pop();
2045 return 0;
2049 * A test that tries to allocate a memory region that spans over the min_addr
2050 * and max_addr range and overlaps with two different nodes, where the first
2051 * node is the requested node:
2053 * min_addr
2054 * | max_addr
2055 * | |
2056 * v v
2057 * | +-----------------------+-----------+ |
2058 * | | requested | node3 | |
2059 * +-----------+-----------------------+-----------+--------------+
2060 * + +
2061 * | +-----------+ |
2062 * | | rgn | |
2063 * +-----------+-----------+--------------------------------------+
2065 * Expect to drop the lower limit and allocate a memory region at the beginning
2066 * of the requested node.
2068 static int alloc_nid_bottom_up_numa_split_range_low_check(void)
2070 int nid_req = 2;
2071 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2072 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2073 void *allocated_ptr = NULL;
2074 phys_addr_t size = SZ_512;
2075 phys_addr_t min_addr;
2076 phys_addr_t max_addr;
2077 phys_addr_t req_node_end;
2079 PREFIX_PUSH();
2080 setup_numa_memblock(node_fractions);
2082 req_node_end = region_end(req_node);
2083 min_addr = req_node_end - SZ_256;
2084 max_addr = min_addr + size;
2086 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
2087 min_addr, max_addr, nid_req);
2089 ASSERT_NE(allocated_ptr, NULL);
2090 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2092 ASSERT_EQ(new_rgn->size, size);
2093 ASSERT_EQ(new_rgn->base, req_node->base);
2094 ASSERT_LE(region_end(new_rgn), req_node_end);
2096 ASSERT_EQ(memblock.reserved.cnt, 1);
2097 ASSERT_EQ(memblock.reserved.total_size, size);
2099 test_pass_pop();
2101 return 0;
2105 * A test that tries to allocate a memory region that spans over the min_addr
2106 * and max_addr range and overlaps with two different nodes, where the second
2107 * node is the requested node:
2109 * min_addr
2110 * | max_addr
2111 * | |
2112 * v v
2113 * |------------------+ +----------------------+---------+ |
2114 * | expected | | previous |requested| |
2115 * +------------------+--------+----------------------+---------+------+
2116 * + +
2117 * |---------+ |
2118 * | rgn | |
2119 * +---------+---------------------------------------------------------+
2121 * Expect to drop the lower limit and allocate a memory region at the beginning
2122 * of the first node that has enough memory.
2124 static int alloc_nid_bottom_up_numa_split_range_high_check(void)
2126 int nid_req = 3;
2127 int nid_exp = 0;
2128 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2129 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2130 struct memblock_region *exp_node = &memblock.memory.regions[nid_exp];
2131 void *allocated_ptr = NULL;
2132 phys_addr_t size = SZ_512;
2133 phys_addr_t min_addr;
2134 phys_addr_t max_addr;
2135 phys_addr_t exp_node_end;
2137 PREFIX_PUSH();
2138 setup_numa_memblock(node_fractions);
2140 exp_node_end = region_end(req_node);
2141 min_addr = req_node->base - SZ_256;
2142 max_addr = min_addr + size;
2144 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
2145 min_addr, max_addr, nid_req);
2147 ASSERT_NE(allocated_ptr, NULL);
2148 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2150 ASSERT_EQ(new_rgn->size, size);
2151 ASSERT_EQ(new_rgn->base, exp_node->base);
2152 ASSERT_LE(region_end(new_rgn), exp_node_end);
2154 ASSERT_EQ(memblock.reserved.cnt, 1);
2155 ASSERT_EQ(memblock.reserved.total_size, size);
2157 test_pass_pop();
2159 return 0;
2163 * A test that tries to allocate a memory region that spans over the min_addr
2164 * and max_addr range and overlaps with two different nodes, where the requested
2165 * node ends before min_addr:
2167 * min_addr
2168 * | max_addr
2169 * | |
2170 * v v
2171 * | +---------------+ +-------------+---------+ |
2172 * | | requested | | node1 | node2 | |
2173 * +----+---------------+--------+-------------+---------+---------+
2174 * + +
2175 * | +---------+ |
2176 * | | rgn | |
2177 * +----+---------+------------------------------------------------+
2179 * Expect to drop the lower limit and allocate a memory region that starts at
2180 * the beginning of the requested node.
2182 static int alloc_nid_bottom_up_numa_no_overlap_split_check(void)
2184 int nid_req = 2;
2185 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2186 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2187 struct memblock_region *node2 = &memblock.memory.regions[6];
2188 void *allocated_ptr = NULL;
2189 phys_addr_t size;
2190 phys_addr_t min_addr;
2191 phys_addr_t max_addr;
2193 PREFIX_PUSH();
2194 setup_numa_memblock(node_fractions);
2196 size = SZ_512;
2197 min_addr = node2->base - SZ_256;
2198 max_addr = min_addr + size;
2200 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
2201 min_addr, max_addr, nid_req);
2203 ASSERT_NE(allocated_ptr, NULL);
2204 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2206 ASSERT_EQ(new_rgn->size, size);
2207 ASSERT_EQ(new_rgn->base, req_node->base);
2208 ASSERT_LE(region_end(new_rgn), region_end(req_node));
2210 ASSERT_EQ(memblock.reserved.cnt, 1);
2211 ASSERT_EQ(memblock.reserved.total_size, size);
2213 test_pass_pop();
2215 return 0;
2219 * A test that tries to allocate memory within min_addr and max_add range when
2220 * the requested node and the range do not overlap, and requested node ends
2221 * before min_addr. The range overlaps with multiple nodes along node
2222 * boundaries:
2224 * min_addr
2225 * | max_addr
2226 * | |
2227 * v v
2228 * |-----------+ +----------+----...----+----------+ |
2229 * | requested | | min node | ... | max node | |
2230 * +-----------+-----------+----------+----...----+----------+------+
2231 * + +
2232 * | +-----+ |
2233 * | | rgn | |
2234 * +-----------------------+-----+----------------------------------+
2236 * Expect to allocate a memory region at the beginning of the first node
2237 * in the range after falling back to NUMA_NO_NODE.
2239 static int alloc_nid_bottom_up_numa_no_overlap_low_check(void)
2241 int nid_req = 0;
2242 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2243 struct memblock_region *min_node = &memblock.memory.regions[2];
2244 struct memblock_region *max_node = &memblock.memory.regions[5];
2245 void *allocated_ptr = NULL;
2246 phys_addr_t size = SZ_64;
2247 phys_addr_t max_addr;
2248 phys_addr_t min_addr;
2250 PREFIX_PUSH();
2251 setup_numa_memblock(node_fractions);
2253 min_addr = min_node->base;
2254 max_addr = region_end(max_node);
2256 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
2257 min_addr, max_addr, nid_req);
2259 ASSERT_NE(allocated_ptr, NULL);
2260 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2262 ASSERT_EQ(new_rgn->size, size);
2263 ASSERT_EQ(new_rgn->base, min_addr);
2264 ASSERT_LE(region_end(new_rgn), region_end(min_node));
2266 ASSERT_EQ(memblock.reserved.cnt, 1);
2267 ASSERT_EQ(memblock.reserved.total_size, size);
2269 test_pass_pop();
2271 return 0;
2275 * A test that tries to allocate memory within min_addr and max_add range when
2276 * the requested node and the range do not overlap, and requested node starts
2277 * after max_addr. The range overlaps with multiple nodes along node
2278 * boundaries:
2280 * min_addr
2281 * | max_addr
2282 * | |
2283 * v v
2284 * | +----------+----...----+----------+ +---------+ |
2285 * | | min node | ... | max node | |requested| |
2286 * +-----+----------+----...----+----------+---------+---------+---+
2287 * + +
2288 * | +-----+ |
2289 * | | rgn | |
2290 * +-----+-----+---------------------------------------------------+
2292 * Expect to allocate a memory region at the beginning of the first node
2293 * in the range after falling back to NUMA_NO_NODE.
2295 static int alloc_nid_bottom_up_numa_no_overlap_high_check(void)
2297 int nid_req = 7;
2298 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2299 struct memblock_region *min_node = &memblock.memory.regions[2];
2300 struct memblock_region *max_node = &memblock.memory.regions[5];
2301 void *allocated_ptr = NULL;
2302 phys_addr_t size = SZ_64;
2303 phys_addr_t max_addr;
2304 phys_addr_t min_addr;
2306 PREFIX_PUSH();
2307 setup_numa_memblock(node_fractions);
2309 min_addr = min_node->base;
2310 max_addr = region_end(max_node);
2312 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
2313 min_addr, max_addr, nid_req);
2315 ASSERT_NE(allocated_ptr, NULL);
2316 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2318 ASSERT_EQ(new_rgn->size, size);
2319 ASSERT_EQ(new_rgn->base, min_addr);
2320 ASSERT_LE(region_end(new_rgn), region_end(min_node));
2322 ASSERT_EQ(memblock.reserved.cnt, 1);
2323 ASSERT_EQ(memblock.reserved.total_size, size);
2325 test_pass_pop();
2327 return 0;
2331 * A test that tries to allocate a memory region in a specific NUMA node that
2332 * does not have enough memory to allocate a region of the requested size.
2333 * Additionally, none of the nodes have enough memory to allocate the region:
2335 * +-----------------------------------+
2336 * | new |
2337 * +-----------------------------------+
2338 * |-------+-------+-------+-------+-------+-------+-------+-------|
2339 * | node0 | node1 | node2 | node3 | node4 | node5 | node6 | node7 |
2340 * +-------+-------+-------+-------+-------+-------+-------+-------+
2342 * Expect no allocation to happen.
2344 static int alloc_nid_numa_large_region_generic_check(void)
2346 int nid_req = 3;
2347 void *allocated_ptr = NULL;
2348 phys_addr_t size = MEM_SIZE / SZ_2;
2349 phys_addr_t min_addr;
2350 phys_addr_t max_addr;
2352 PREFIX_PUSH();
2353 setup_numa_memblock(node_fractions);
2355 min_addr = memblock_start_of_DRAM();
2356 max_addr = memblock_end_of_DRAM();
2358 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
2359 min_addr, max_addr, nid_req);
2360 ASSERT_EQ(allocated_ptr, NULL);
2362 test_pass_pop();
2364 return 0;
2368 * A test that tries to allocate memory within min_addr and max_addr range when
2369 * there are two reserved regions at the borders. The requested node starts at
2370 * min_addr and ends at max_addr and is the same size as the region to be
2371 * allocated:
2373 * min_addr
2374 * | max_addr
2375 * | |
2376 * v v
2377 * | +-----------+-----------------------+-----------------------|
2378 * | | node5 | requested | node7 |
2379 * +------+-----------+-----------------------+-----------------------+
2380 * + +
2381 * | +----+-----------------------+----+ |
2382 * | | r2 | new | r1 | |
2383 * +-------------+----+-----------------------+----+------------------+
2385 * Expect to merge all of the regions into one. The region counter and total
2386 * size fields get updated.
2388 static int alloc_nid_numa_reserved_full_merge_generic_check(void)
2390 int nid_req = 6;
2391 int nid_next = nid_req + 1;
2392 struct memblock_region *new_rgn = &memblock.reserved.regions[0];
2393 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2394 struct memblock_region *next_node = &memblock.memory.regions[nid_next];
2395 void *allocated_ptr = NULL;
2396 struct region r1, r2;
2397 phys_addr_t size = req_node->size;
2398 phys_addr_t total_size;
2399 phys_addr_t max_addr;
2400 phys_addr_t min_addr;
2402 PREFIX_PUSH();
2403 setup_numa_memblock(node_fractions);
2405 r1.base = next_node->base;
2406 r1.size = SZ_128;
2408 r2.size = SZ_128;
2409 r2.base = r1.base - (size + r2.size);
2411 total_size = r1.size + r2.size + size;
2412 min_addr = r2.base + r2.size;
2413 max_addr = r1.base;
2415 memblock_reserve(r1.base, r1.size);
2416 memblock_reserve(r2.base, r2.size);
2418 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
2419 min_addr, max_addr, nid_req);
2421 ASSERT_NE(allocated_ptr, NULL);
2422 assert_mem_content(allocated_ptr, size, alloc_nid_test_flags);
2424 ASSERT_EQ(new_rgn->size, total_size);
2425 ASSERT_EQ(new_rgn->base, r2.base);
2427 ASSERT_LE(new_rgn->base, req_node->base);
2428 ASSERT_LE(region_end(req_node), region_end(new_rgn));
2430 ASSERT_EQ(memblock.reserved.cnt, 1);
2431 ASSERT_EQ(memblock.reserved.total_size, total_size);
2433 test_pass_pop();
2435 return 0;
2439 * A test that tries to allocate memory within min_addr and max_add range,
2440 * where the total range can fit the region, but it is split between two nodes
2441 * and everything else is reserved. Additionally, nid is set to NUMA_NO_NODE
2442 * instead of requesting a specific node:
2444 * +-----------+
2445 * | new |
2446 * +-----------+
2447 * | +---------------------+-----------|
2448 * | | prev node | next node |
2449 * +------+---------------------+-----------+
2450 * + +
2451 * |----------------------+ +-----|
2452 * | r1 | | r2 |
2453 * +----------------------+-----------+-----+
2454 * ^ ^
2455 * | |
2456 * | max_addr
2458 * min_addr
2460 * Expect no allocation to happen.
2462 static int alloc_nid_numa_split_all_reserved_generic_check(void)
2464 void *allocated_ptr = NULL;
2465 struct memblock_region *next_node = &memblock.memory.regions[7];
2466 struct region r1, r2;
2467 phys_addr_t size = SZ_256;
2468 phys_addr_t max_addr;
2469 phys_addr_t min_addr;
2471 PREFIX_PUSH();
2472 setup_numa_memblock(node_fractions);
2474 r2.base = next_node->base + SZ_128;
2475 r2.size = memblock_end_of_DRAM() - r2.base;
2477 r1.size = MEM_SIZE - (r2.size + size);
2478 r1.base = memblock_start_of_DRAM();
2480 min_addr = r1.base + r1.size;
2481 max_addr = r2.base;
2483 memblock_reserve(r1.base, r1.size);
2484 memblock_reserve(r2.base, r2.size);
2486 allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES,
2487 min_addr, max_addr,
2488 NUMA_NO_NODE);
2490 ASSERT_EQ(allocated_ptr, NULL);
2492 test_pass_pop();
2494 return 0;
2498 * A simple test that tries to allocate a memory region through the
2499 * memblock_alloc_node() on a NUMA node with id `nid`. Expected to have the
2500 * correct NUMA node set for the new region.
2502 static int alloc_node_on_correct_nid(void)
2504 int nid_req = 2;
2505 void *allocated_ptr = NULL;
2506 #ifdef CONFIG_NUMA
2507 struct memblock_region *req_node = &memblock.memory.regions[nid_req];
2508 #endif
2509 phys_addr_t size = SZ_512;
2511 PREFIX_PUSH();
2512 setup_numa_memblock(node_fractions);
2514 allocated_ptr = memblock_alloc_node(size, SMP_CACHE_BYTES, nid_req);
2516 ASSERT_NE(allocated_ptr, NULL);
2517 #ifdef CONFIG_NUMA
2518 ASSERT_EQ(nid_req, req_node->nid);
2519 #endif
2521 test_pass_pop();
2523 return 0;
2526 /* Test case wrappers for NUMA tests */
2527 static int alloc_nid_numa_simple_check(void)
2529 test_print("\tRunning %s...\n", __func__);
2530 memblock_set_bottom_up(false);
2531 alloc_nid_top_down_numa_simple_check();
2532 memblock_set_bottom_up(true);
2533 alloc_nid_bottom_up_numa_simple_check();
2535 return 0;
2538 static int alloc_nid_numa_small_node_check(void)
2540 test_print("\tRunning %s...\n", __func__);
2541 memblock_set_bottom_up(false);
2542 alloc_nid_top_down_numa_small_node_check();
2543 memblock_set_bottom_up(true);
2544 alloc_nid_bottom_up_numa_small_node_check();
2546 return 0;
2549 static int alloc_nid_numa_node_reserved_check(void)
2551 test_print("\tRunning %s...\n", __func__);
2552 memblock_set_bottom_up(false);
2553 alloc_nid_top_down_numa_node_reserved_check();
2554 memblock_set_bottom_up(true);
2555 alloc_nid_bottom_up_numa_node_reserved_check();
2557 return 0;
2560 static int alloc_nid_numa_part_reserved_check(void)
2562 test_print("\tRunning %s...\n", __func__);
2563 memblock_set_bottom_up(false);
2564 alloc_nid_top_down_numa_part_reserved_check();
2565 memblock_set_bottom_up(true);
2566 alloc_nid_bottom_up_numa_part_reserved_check();
2568 return 0;
2571 static int alloc_nid_numa_part_reserved_fallback_check(void)
2573 test_print("\tRunning %s...\n", __func__);
2574 memblock_set_bottom_up(false);
2575 alloc_nid_top_down_numa_part_reserved_fallback_check();
2576 memblock_set_bottom_up(true);
2577 alloc_nid_bottom_up_numa_part_reserved_fallback_check();
2579 return 0;
2582 static int alloc_nid_numa_split_range_low_check(void)
2584 test_print("\tRunning %s...\n", __func__);
2585 memblock_set_bottom_up(false);
2586 alloc_nid_top_down_numa_split_range_low_check();
2587 memblock_set_bottom_up(true);
2588 alloc_nid_bottom_up_numa_split_range_low_check();
2590 return 0;
2593 static int alloc_nid_numa_split_range_high_check(void)
2595 test_print("\tRunning %s...\n", __func__);
2596 memblock_set_bottom_up(false);
2597 alloc_nid_top_down_numa_split_range_high_check();
2598 memblock_set_bottom_up(true);
2599 alloc_nid_bottom_up_numa_split_range_high_check();
2601 return 0;
2604 static int alloc_nid_numa_no_overlap_split_check(void)
2606 test_print("\tRunning %s...\n", __func__);
2607 memblock_set_bottom_up(false);
2608 alloc_nid_top_down_numa_no_overlap_split_check();
2609 memblock_set_bottom_up(true);
2610 alloc_nid_bottom_up_numa_no_overlap_split_check();
2612 return 0;
2615 static int alloc_nid_numa_no_overlap_low_check(void)
2617 test_print("\tRunning %s...\n", __func__);
2618 memblock_set_bottom_up(false);
2619 alloc_nid_top_down_numa_no_overlap_low_check();
2620 memblock_set_bottom_up(true);
2621 alloc_nid_bottom_up_numa_no_overlap_low_check();
2623 return 0;
2626 static int alloc_nid_numa_no_overlap_high_check(void)
2628 test_print("\tRunning %s...\n", __func__);
2629 memblock_set_bottom_up(false);
2630 alloc_nid_top_down_numa_no_overlap_high_check();
2631 memblock_set_bottom_up(true);
2632 alloc_nid_bottom_up_numa_no_overlap_high_check();
2634 return 0;
2637 static int alloc_nid_numa_large_region_check(void)
2639 test_print("\tRunning %s...\n", __func__);
2640 run_top_down(alloc_nid_numa_large_region_generic_check);
2641 run_bottom_up(alloc_nid_numa_large_region_generic_check);
2643 return 0;
2646 static int alloc_nid_numa_reserved_full_merge_check(void)
2648 test_print("\tRunning %s...\n", __func__);
2649 run_top_down(alloc_nid_numa_reserved_full_merge_generic_check);
2650 run_bottom_up(alloc_nid_numa_reserved_full_merge_generic_check);
2652 return 0;
2655 static int alloc_nid_numa_split_all_reserved_check(void)
2657 test_print("\tRunning %s...\n", __func__);
2658 run_top_down(alloc_nid_numa_split_all_reserved_generic_check);
2659 run_bottom_up(alloc_nid_numa_split_all_reserved_generic_check);
2661 return 0;
2664 static int alloc_node_numa_on_correct_nid(void)
2666 test_print("\tRunning %s...\n", __func__);
2667 run_top_down(alloc_node_on_correct_nid);
2668 run_bottom_up(alloc_node_on_correct_nid);
2670 return 0;
2673 int __memblock_alloc_nid_numa_checks(void)
2675 test_print("Running %s NUMA tests...\n",
2676 get_memblock_alloc_nid_name(alloc_nid_test_flags));
2678 alloc_nid_numa_simple_check();
2679 alloc_nid_numa_small_node_check();
2680 alloc_nid_numa_node_reserved_check();
2681 alloc_nid_numa_part_reserved_check();
2682 alloc_nid_numa_part_reserved_fallback_check();
2683 alloc_nid_numa_split_range_low_check();
2684 alloc_nid_numa_split_range_high_check();
2686 alloc_nid_numa_no_overlap_split_check();
2687 alloc_nid_numa_no_overlap_low_check();
2688 alloc_nid_numa_no_overlap_high_check();
2689 alloc_nid_numa_large_region_check();
2690 alloc_nid_numa_reserved_full_merge_check();
2691 alloc_nid_numa_split_all_reserved_check();
2693 alloc_node_numa_on_correct_nid();
2695 return 0;
2698 static int memblock_alloc_nid_checks_internal(int flags)
2700 alloc_nid_test_flags = flags;
2702 prefix_reset();
2703 prefix_push(get_memblock_alloc_nid_name(flags));
2705 reset_memblock_attributes();
2706 dummy_physical_memory_init();
2708 memblock_alloc_nid_range_checks();
2709 memblock_alloc_nid_numa_checks();
2711 dummy_physical_memory_cleanup();
2713 prefix_pop();
2715 return 0;
2718 int memblock_alloc_nid_checks(void)
2720 memblock_alloc_nid_checks_internal(TEST_F_NONE);
2721 memblock_alloc_nid_checks_internal(TEST_F_RAW);
2723 return 0;
2726 int memblock_alloc_exact_nid_range_checks(void)
2728 alloc_nid_test_flags = (TEST_F_RAW | TEST_F_EXACT);
2730 memblock_alloc_nid_range_checks();
2732 return 0;