Merge tag 'dmaengine-fix-5.9-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / mm / memblock.c
blob45f198750be924f9f0e1cd513ad3d5dba213261d
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Procedures for maintaining information about logical memory blocks.
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
7 */
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/bitops.h>
13 #include <linux/poison.h>
14 #include <linux/pfn.h>
15 #include <linux/debugfs.h>
16 #include <linux/kmemleak.h>
17 #include <linux/seq_file.h>
18 #include <linux/memblock.h>
20 #include <asm/sections.h>
21 #include <linux/io.h>
23 #include "internal.h"
25 #define INIT_MEMBLOCK_REGIONS 128
26 #define INIT_PHYSMEM_REGIONS 4
28 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS
29 # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
30 #endif
32 /**
33 * DOC: memblock overview
35 * Memblock is a method of managing memory regions during the early
36 * boot period when the usual kernel memory allocators are not up and
37 * running.
39 * Memblock views the system memory as collections of contiguous
40 * regions. There are several types of these collections:
42 * * ``memory`` - describes the physical memory available to the
43 * kernel; this may differ from the actual physical memory installed
44 * in the system, for instance when the memory is restricted with
45 * ``mem=`` command line parameter
46 * * ``reserved`` - describes the regions that were allocated
47 * * ``physmem`` - describes the actual physical memory available during
48 * boot regardless of the possible restrictions and memory hot(un)plug;
49 * the ``physmem`` type is only available on some architectures.
51 * Each region is represented by :c:type:`struct memblock_region` that
52 * defines the region extents, its attributes and NUMA node id on NUMA
53 * systems. Every memory type is described by the :c:type:`struct
54 * memblock_type` which contains an array of memory regions along with
55 * the allocator metadata. The "memory" and "reserved" types are nicely
56 * wrapped with :c:type:`struct memblock`. This structure is statically
57 * initialized at build time. The region arrays are initially sized to
58 * %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS
59 * for "reserved". The region array for "physmem" is initially sized to
60 * %INIT_PHYSMEM_REGIONS.
61 * The memblock_allow_resize() enables automatic resizing of the region
62 * arrays during addition of new regions. This feature should be used
63 * with care so that memory allocated for the region array will not
64 * overlap with areas that should be reserved, for example initrd.
66 * The early architecture setup should tell memblock what the physical
67 * memory layout is by using memblock_add() or memblock_add_node()
68 * functions. The first function does not assign the region to a NUMA
69 * node and it is appropriate for UMA systems. Yet, it is possible to
70 * use it on NUMA systems as well and assign the region to a NUMA node
71 * later in the setup process using memblock_set_node(). The
72 * memblock_add_node() performs such an assignment directly.
74 * Once memblock is setup the memory can be allocated using one of the
75 * API variants:
77 * * memblock_phys_alloc*() - these functions return the **physical**
78 * address of the allocated memory
79 * * memblock_alloc*() - these functions return the **virtual** address
80 * of the allocated memory.
82 * Note, that both API variants use implicit assumptions about allowed
83 * memory ranges and the fallback methods. Consult the documentation
84 * of memblock_alloc_internal() and memblock_alloc_range_nid()
85 * functions for more elaborate description.
87 * As the system boot progresses, the architecture specific mem_init()
88 * function frees all the memory to the buddy page allocator.
90 * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
91 * memblock data structures (except "physmem") will be discarded after the
92 * system initialization completes.
95 #ifndef CONFIG_NEED_MULTIPLE_NODES
96 struct pglist_data __refdata contig_page_data;
97 EXPORT_SYMBOL(contig_page_data);
98 #endif
100 unsigned long max_low_pfn;
101 unsigned long min_low_pfn;
102 unsigned long max_pfn;
103 unsigned long long max_possible_pfn;
105 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
106 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
107 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
108 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
109 #endif
111 struct memblock memblock __initdata_memblock = {
112 .memory.regions = memblock_memory_init_regions,
113 .memory.cnt = 1, /* empty dummy entry */
114 .memory.max = INIT_MEMBLOCK_REGIONS,
115 .memory.name = "memory",
117 .reserved.regions = memblock_reserved_init_regions,
118 .reserved.cnt = 1, /* empty dummy entry */
119 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
120 .reserved.name = "reserved",
122 .bottom_up = false,
123 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
126 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
127 struct memblock_type physmem = {
128 .regions = memblock_physmem_init_regions,
129 .cnt = 1, /* empty dummy entry */
130 .max = INIT_PHYSMEM_REGIONS,
131 .name = "physmem",
133 #endif
135 int memblock_debug __initdata_memblock;
136 static bool system_has_some_mirror __initdata_memblock = false;
137 static int memblock_can_resize __initdata_memblock;
138 static int memblock_memory_in_slab __initdata_memblock = 0;
139 static int memblock_reserved_in_slab __initdata_memblock = 0;
141 static enum memblock_flags __init_memblock choose_memblock_flags(void)
143 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
146 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
147 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
149 return *size = min(*size, PHYS_ADDR_MAX - base);
153 * Address comparison utilities
155 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
156 phys_addr_t base2, phys_addr_t size2)
158 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
161 bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
162 phys_addr_t base, phys_addr_t size)
164 unsigned long i;
166 for (i = 0; i < type->cnt; i++)
167 if (memblock_addrs_overlap(base, size, type->regions[i].base,
168 type->regions[i].size))
169 break;
170 return i < type->cnt;
174 * __memblock_find_range_bottom_up - find free area utility in bottom-up
175 * @start: start of candidate range
176 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
177 * %MEMBLOCK_ALLOC_ACCESSIBLE
178 * @size: size of free area to find
179 * @align: alignment of free area to find
180 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
181 * @flags: pick from blocks based on memory attributes
183 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
185 * Return:
186 * Found address on success, 0 on failure.
188 static phys_addr_t __init_memblock
189 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
190 phys_addr_t size, phys_addr_t align, int nid,
191 enum memblock_flags flags)
193 phys_addr_t this_start, this_end, cand;
194 u64 i;
196 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
197 this_start = clamp(this_start, start, end);
198 this_end = clamp(this_end, start, end);
200 cand = round_up(this_start, align);
201 if (cand < this_end && this_end - cand >= size)
202 return cand;
205 return 0;
209 * __memblock_find_range_top_down - find free area utility, in top-down
210 * @start: start of candidate range
211 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
212 * %MEMBLOCK_ALLOC_ACCESSIBLE
213 * @size: size of free area to find
214 * @align: alignment of free area to find
215 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
216 * @flags: pick from blocks based on memory attributes
218 * Utility called from memblock_find_in_range_node(), find free area top-down.
220 * Return:
221 * Found address on success, 0 on failure.
223 static phys_addr_t __init_memblock
224 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
225 phys_addr_t size, phys_addr_t align, int nid,
226 enum memblock_flags flags)
228 phys_addr_t this_start, this_end, cand;
229 u64 i;
231 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
232 NULL) {
233 this_start = clamp(this_start, start, end);
234 this_end = clamp(this_end, start, end);
236 if (this_end < size)
237 continue;
239 cand = round_down(this_end - size, align);
240 if (cand >= this_start)
241 return cand;
244 return 0;
248 * memblock_find_in_range_node - find free area in given range and node
249 * @size: size of free area to find
250 * @align: alignment of free area to find
251 * @start: start of candidate range
252 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
253 * %MEMBLOCK_ALLOC_ACCESSIBLE
254 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
255 * @flags: pick from blocks based on memory attributes
257 * Find @size free area aligned to @align in the specified range and node.
259 * When allocation direction is bottom-up, the @start should be greater
260 * than the end of the kernel image. Otherwise, it will be trimmed. The
261 * reason is that we want the bottom-up allocation just near the kernel
262 * image so it is highly likely that the allocated memory and the kernel
263 * will reside in the same node.
265 * If bottom-up allocation failed, will try to allocate memory top-down.
267 * Return:
268 * Found address on success, 0 on failure.
270 static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
271 phys_addr_t align, phys_addr_t start,
272 phys_addr_t end, int nid,
273 enum memblock_flags flags)
275 phys_addr_t kernel_end, ret;
277 /* pump up @end */
278 if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
279 end == MEMBLOCK_ALLOC_KASAN)
280 end = memblock.current_limit;
282 /* avoid allocating the first page */
283 start = max_t(phys_addr_t, start, PAGE_SIZE);
284 end = max(start, end);
285 kernel_end = __pa_symbol(_end);
288 * try bottom-up allocation only when bottom-up mode
289 * is set and @end is above the kernel image.
291 if (memblock_bottom_up() && end > kernel_end) {
292 phys_addr_t bottom_up_start;
294 /* make sure we will allocate above the kernel */
295 bottom_up_start = max(start, kernel_end);
297 /* ok, try bottom-up allocation first */
298 ret = __memblock_find_range_bottom_up(bottom_up_start, end,
299 size, align, nid, flags);
300 if (ret)
301 return ret;
304 * we always limit bottom-up allocation above the kernel,
305 * but top-down allocation doesn't have the limit, so
306 * retrying top-down allocation may succeed when bottom-up
307 * allocation failed.
309 * bottom-up allocation is expected to be fail very rarely,
310 * so we use WARN_ONCE() here to see the stack trace if
311 * fail happens.
313 WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
314 "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
317 return __memblock_find_range_top_down(start, end, size, align, nid,
318 flags);
322 * memblock_find_in_range - find free area in given range
323 * @start: start of candidate range
324 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
325 * %MEMBLOCK_ALLOC_ACCESSIBLE
326 * @size: size of free area to find
327 * @align: alignment of free area to find
329 * Find @size free area aligned to @align in the specified range.
331 * Return:
332 * Found address on success, 0 on failure.
334 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
335 phys_addr_t end, phys_addr_t size,
336 phys_addr_t align)
338 phys_addr_t ret;
339 enum memblock_flags flags = choose_memblock_flags();
341 again:
342 ret = memblock_find_in_range_node(size, align, start, end,
343 NUMA_NO_NODE, flags);
345 if (!ret && (flags & MEMBLOCK_MIRROR)) {
346 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
347 &size);
348 flags &= ~MEMBLOCK_MIRROR;
349 goto again;
352 return ret;
355 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
357 type->total_size -= type->regions[r].size;
358 memmove(&type->regions[r], &type->regions[r + 1],
359 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
360 type->cnt--;
362 /* Special case for empty arrays */
363 if (type->cnt == 0) {
364 WARN_ON(type->total_size != 0);
365 type->cnt = 1;
366 type->regions[0].base = 0;
367 type->regions[0].size = 0;
368 type->regions[0].flags = 0;
369 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
373 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
375 * memblock_discard - discard memory and reserved arrays if they were allocated
377 void __init memblock_discard(void)
379 phys_addr_t addr, size;
381 if (memblock.reserved.regions != memblock_reserved_init_regions) {
382 addr = __pa(memblock.reserved.regions);
383 size = PAGE_ALIGN(sizeof(struct memblock_region) *
384 memblock.reserved.max);
385 __memblock_free_late(addr, size);
388 if (memblock.memory.regions != memblock_memory_init_regions) {
389 addr = __pa(memblock.memory.regions);
390 size = PAGE_ALIGN(sizeof(struct memblock_region) *
391 memblock.memory.max);
392 __memblock_free_late(addr, size);
395 #endif
398 * memblock_double_array - double the size of the memblock regions array
399 * @type: memblock type of the regions array being doubled
400 * @new_area_start: starting address of memory range to avoid overlap with
401 * @new_area_size: size of memory range to avoid overlap with
403 * Double the size of the @type regions array. If memblock is being used to
404 * allocate memory for a new reserved regions array and there is a previously
405 * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
406 * waiting to be reserved, ensure the memory used by the new array does
407 * not overlap.
409 * Return:
410 * 0 on success, -1 on failure.
412 static int __init_memblock memblock_double_array(struct memblock_type *type,
413 phys_addr_t new_area_start,
414 phys_addr_t new_area_size)
416 struct memblock_region *new_array, *old_array;
417 phys_addr_t old_alloc_size, new_alloc_size;
418 phys_addr_t old_size, new_size, addr, new_end;
419 int use_slab = slab_is_available();
420 int *in_slab;
422 /* We don't allow resizing until we know about the reserved regions
423 * of memory that aren't suitable for allocation
425 if (!memblock_can_resize)
426 return -1;
428 /* Calculate new doubled size */
429 old_size = type->max * sizeof(struct memblock_region);
430 new_size = old_size << 1;
432 * We need to allocated new one align to PAGE_SIZE,
433 * so we can free them completely later.
435 old_alloc_size = PAGE_ALIGN(old_size);
436 new_alloc_size = PAGE_ALIGN(new_size);
438 /* Retrieve the slab flag */
439 if (type == &memblock.memory)
440 in_slab = &memblock_memory_in_slab;
441 else
442 in_slab = &memblock_reserved_in_slab;
444 /* Try to find some space for it */
445 if (use_slab) {
446 new_array = kmalloc(new_size, GFP_KERNEL);
447 addr = new_array ? __pa(new_array) : 0;
448 } else {
449 /* only exclude range when trying to double reserved.regions */
450 if (type != &memblock.reserved)
451 new_area_start = new_area_size = 0;
453 addr = memblock_find_in_range(new_area_start + new_area_size,
454 memblock.current_limit,
455 new_alloc_size, PAGE_SIZE);
456 if (!addr && new_area_size)
457 addr = memblock_find_in_range(0,
458 min(new_area_start, memblock.current_limit),
459 new_alloc_size, PAGE_SIZE);
461 new_array = addr ? __va(addr) : NULL;
463 if (!addr) {
464 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
465 type->name, type->max, type->max * 2);
466 return -1;
469 new_end = addr + new_size - 1;
470 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
471 type->name, type->max * 2, &addr, &new_end);
474 * Found space, we now need to move the array over before we add the
475 * reserved region since it may be our reserved array itself that is
476 * full.
478 memcpy(new_array, type->regions, old_size);
479 memset(new_array + type->max, 0, old_size);
480 old_array = type->regions;
481 type->regions = new_array;
482 type->max <<= 1;
484 /* Free old array. We needn't free it if the array is the static one */
485 if (*in_slab)
486 kfree(old_array);
487 else if (old_array != memblock_memory_init_regions &&
488 old_array != memblock_reserved_init_regions)
489 memblock_free(__pa(old_array), old_alloc_size);
492 * Reserve the new array if that comes from the memblock. Otherwise, we
493 * needn't do it
495 if (!use_slab)
496 BUG_ON(memblock_reserve(addr, new_alloc_size));
498 /* Update slab flag */
499 *in_slab = use_slab;
501 return 0;
505 * memblock_merge_regions - merge neighboring compatible regions
506 * @type: memblock type to scan
508 * Scan @type and merge neighboring compatible regions.
510 static void __init_memblock memblock_merge_regions(struct memblock_type *type)
512 int i = 0;
514 /* cnt never goes below 1 */
515 while (i < type->cnt - 1) {
516 struct memblock_region *this = &type->regions[i];
517 struct memblock_region *next = &type->regions[i + 1];
519 if (this->base + this->size != next->base ||
520 memblock_get_region_node(this) !=
521 memblock_get_region_node(next) ||
522 this->flags != next->flags) {
523 BUG_ON(this->base + this->size > next->base);
524 i++;
525 continue;
528 this->size += next->size;
529 /* move forward from next + 1, index of which is i + 2 */
530 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
531 type->cnt--;
536 * memblock_insert_region - insert new memblock region
537 * @type: memblock type to insert into
538 * @idx: index for the insertion point
539 * @base: base address of the new region
540 * @size: size of the new region
541 * @nid: node id of the new region
542 * @flags: flags of the new region
544 * Insert new memblock region [@base, @base + @size) into @type at @idx.
545 * @type must already have extra room to accommodate the new region.
547 static void __init_memblock memblock_insert_region(struct memblock_type *type,
548 int idx, phys_addr_t base,
549 phys_addr_t size,
550 int nid,
551 enum memblock_flags flags)
553 struct memblock_region *rgn = &type->regions[idx];
555 BUG_ON(type->cnt >= type->max);
556 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
557 rgn->base = base;
558 rgn->size = size;
559 rgn->flags = flags;
560 memblock_set_region_node(rgn, nid);
561 type->cnt++;
562 type->total_size += size;
566 * memblock_add_range - add new memblock region
567 * @type: memblock type to add new region into
568 * @base: base address of the new region
569 * @size: size of the new region
570 * @nid: nid of the new region
571 * @flags: flags of the new region
573 * Add new memblock region [@base, @base + @size) into @type. The new region
574 * is allowed to overlap with existing ones - overlaps don't affect already
575 * existing regions. @type is guaranteed to be minimal (all neighbouring
576 * compatible regions are merged) after the addition.
578 * Return:
579 * 0 on success, -errno on failure.
581 static int __init_memblock memblock_add_range(struct memblock_type *type,
582 phys_addr_t base, phys_addr_t size,
583 int nid, enum memblock_flags flags)
585 bool insert = false;
586 phys_addr_t obase = base;
587 phys_addr_t end = base + memblock_cap_size(base, &size);
588 int idx, nr_new;
589 struct memblock_region *rgn;
591 if (!size)
592 return 0;
594 /* special case for empty array */
595 if (type->regions[0].size == 0) {
596 WARN_ON(type->cnt != 1 || type->total_size);
597 type->regions[0].base = base;
598 type->regions[0].size = size;
599 type->regions[0].flags = flags;
600 memblock_set_region_node(&type->regions[0], nid);
601 type->total_size = size;
602 return 0;
604 repeat:
606 * The following is executed twice. Once with %false @insert and
607 * then with %true. The first counts the number of regions needed
608 * to accommodate the new area. The second actually inserts them.
610 base = obase;
611 nr_new = 0;
613 for_each_memblock_type(idx, type, rgn) {
614 phys_addr_t rbase = rgn->base;
615 phys_addr_t rend = rbase + rgn->size;
617 if (rbase >= end)
618 break;
619 if (rend <= base)
620 continue;
622 * @rgn overlaps. If it separates the lower part of new
623 * area, insert that portion.
625 if (rbase > base) {
626 #ifdef CONFIG_NEED_MULTIPLE_NODES
627 WARN_ON(nid != memblock_get_region_node(rgn));
628 #endif
629 WARN_ON(flags != rgn->flags);
630 nr_new++;
631 if (insert)
632 memblock_insert_region(type, idx++, base,
633 rbase - base, nid,
634 flags);
636 /* area below @rend is dealt with, forget about it */
637 base = min(rend, end);
640 /* insert the remaining portion */
641 if (base < end) {
642 nr_new++;
643 if (insert)
644 memblock_insert_region(type, idx, base, end - base,
645 nid, flags);
648 if (!nr_new)
649 return 0;
652 * If this was the first round, resize array and repeat for actual
653 * insertions; otherwise, merge and return.
655 if (!insert) {
656 while (type->cnt + nr_new > type->max)
657 if (memblock_double_array(type, obase, size) < 0)
658 return -ENOMEM;
659 insert = true;
660 goto repeat;
661 } else {
662 memblock_merge_regions(type);
663 return 0;
668 * memblock_add_node - add new memblock region within a NUMA node
669 * @base: base address of the new region
670 * @size: size of the new region
671 * @nid: nid of the new region
673 * Add new memblock region [@base, @base + @size) to the "memory"
674 * type. See memblock_add_range() description for mode details
676 * Return:
677 * 0 on success, -errno on failure.
679 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
680 int nid)
682 return memblock_add_range(&memblock.memory, base, size, nid, 0);
686 * memblock_add - add new memblock region
687 * @base: base address of the new region
688 * @size: size of the new region
690 * Add new memblock region [@base, @base + @size) to the "memory"
691 * type. See memblock_add_range() description for mode details
693 * Return:
694 * 0 on success, -errno on failure.
696 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
698 phys_addr_t end = base + size - 1;
700 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
701 &base, &end, (void *)_RET_IP_);
703 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
707 * memblock_isolate_range - isolate given range into disjoint memblocks
708 * @type: memblock type to isolate range for
709 * @base: base of range to isolate
710 * @size: size of range to isolate
711 * @start_rgn: out parameter for the start of isolated region
712 * @end_rgn: out parameter for the end of isolated region
714 * Walk @type and ensure that regions don't cross the boundaries defined by
715 * [@base, @base + @size). Crossing regions are split at the boundaries,
716 * which may create at most two more regions. The index of the first
717 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
719 * Return:
720 * 0 on success, -errno on failure.
722 static int __init_memblock memblock_isolate_range(struct memblock_type *type,
723 phys_addr_t base, phys_addr_t size,
724 int *start_rgn, int *end_rgn)
726 phys_addr_t end = base + memblock_cap_size(base, &size);
727 int idx;
728 struct memblock_region *rgn;
730 *start_rgn = *end_rgn = 0;
732 if (!size)
733 return 0;
735 /* we'll create at most two more regions */
736 while (type->cnt + 2 > type->max)
737 if (memblock_double_array(type, base, size) < 0)
738 return -ENOMEM;
740 for_each_memblock_type(idx, type, rgn) {
741 phys_addr_t rbase = rgn->base;
742 phys_addr_t rend = rbase + rgn->size;
744 if (rbase >= end)
745 break;
746 if (rend <= base)
747 continue;
749 if (rbase < base) {
751 * @rgn intersects from below. Split and continue
752 * to process the next region - the new top half.
754 rgn->base = base;
755 rgn->size -= base - rbase;
756 type->total_size -= base - rbase;
757 memblock_insert_region(type, idx, rbase, base - rbase,
758 memblock_get_region_node(rgn),
759 rgn->flags);
760 } else if (rend > end) {
762 * @rgn intersects from above. Split and redo the
763 * current region - the new bottom half.
765 rgn->base = end;
766 rgn->size -= end - rbase;
767 type->total_size -= end - rbase;
768 memblock_insert_region(type, idx--, rbase, end - rbase,
769 memblock_get_region_node(rgn),
770 rgn->flags);
771 } else {
772 /* @rgn is fully contained, record it */
773 if (!*end_rgn)
774 *start_rgn = idx;
775 *end_rgn = idx + 1;
779 return 0;
782 static int __init_memblock memblock_remove_range(struct memblock_type *type,
783 phys_addr_t base, phys_addr_t size)
785 int start_rgn, end_rgn;
786 int i, ret;
788 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
789 if (ret)
790 return ret;
792 for (i = end_rgn - 1; i >= start_rgn; i--)
793 memblock_remove_region(type, i);
794 return 0;
797 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
799 phys_addr_t end = base + size - 1;
801 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
802 &base, &end, (void *)_RET_IP_);
804 return memblock_remove_range(&memblock.memory, base, size);
808 * memblock_free - free boot memory block
809 * @base: phys starting address of the boot memory block
810 * @size: size of the boot memory block in bytes
812 * Free boot memory block previously allocated by memblock_alloc_xx() API.
813 * The freeing memory will not be released to the buddy allocator.
815 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
817 phys_addr_t end = base + size - 1;
819 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
820 &base, &end, (void *)_RET_IP_);
822 kmemleak_free_part_phys(base, size);
823 return memblock_remove_range(&memblock.reserved, base, size);
826 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
828 phys_addr_t end = base + size - 1;
830 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
831 &base, &end, (void *)_RET_IP_);
833 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
836 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
837 int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
839 phys_addr_t end = base + size - 1;
841 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
842 &base, &end, (void *)_RET_IP_);
844 return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
846 #endif
849 * memblock_setclr_flag - set or clear flag for a memory region
850 * @base: base address of the region
851 * @size: size of the region
852 * @set: set or clear the flag
853 * @flag: the flag to udpate
855 * This function isolates region [@base, @base + @size), and sets/clears flag
857 * Return: 0 on success, -errno on failure.
859 static int __init_memblock memblock_setclr_flag(phys_addr_t base,
860 phys_addr_t size, int set, int flag)
862 struct memblock_type *type = &memblock.memory;
863 int i, ret, start_rgn, end_rgn;
865 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
866 if (ret)
867 return ret;
869 for (i = start_rgn; i < end_rgn; i++) {
870 struct memblock_region *r = &type->regions[i];
872 if (set)
873 r->flags |= flag;
874 else
875 r->flags &= ~flag;
878 memblock_merge_regions(type);
879 return 0;
883 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
884 * @base: the base phys addr of the region
885 * @size: the size of the region
887 * Return: 0 on success, -errno on failure.
889 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
891 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
895 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
896 * @base: the base phys addr of the region
897 * @size: the size of the region
899 * Return: 0 on success, -errno on failure.
901 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
903 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
907 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
908 * @base: the base phys addr of the region
909 * @size: the size of the region
911 * Return: 0 on success, -errno on failure.
913 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
915 system_has_some_mirror = true;
917 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
921 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
922 * @base: the base phys addr of the region
923 * @size: the size of the region
925 * Return: 0 on success, -errno on failure.
927 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
929 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
933 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
934 * @base: the base phys addr of the region
935 * @size: the size of the region
937 * Return: 0 on success, -errno on failure.
939 int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
941 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
945 * __next_reserved_mem_region - next function for for_each_reserved_region()
946 * @idx: pointer to u64 loop variable
947 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
948 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
950 * Iterate over all reserved memory regions.
952 void __init_memblock __next_reserved_mem_region(u64 *idx,
953 phys_addr_t *out_start,
954 phys_addr_t *out_end)
956 struct memblock_type *type = &memblock.reserved;
958 if (*idx < type->cnt) {
959 struct memblock_region *r = &type->regions[*idx];
960 phys_addr_t base = r->base;
961 phys_addr_t size = r->size;
963 if (out_start)
964 *out_start = base;
965 if (out_end)
966 *out_end = base + size - 1;
968 *idx += 1;
969 return;
972 /* signal end of iteration */
973 *idx = ULLONG_MAX;
976 static bool should_skip_region(struct memblock_region *m, int nid, int flags)
978 int m_nid = memblock_get_region_node(m);
980 /* only memory regions are associated with nodes, check it */
981 if (nid != NUMA_NO_NODE && nid != m_nid)
982 return true;
984 /* skip hotpluggable memory regions if needed */
985 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
986 return true;
988 /* if we want mirror memory skip non-mirror memory regions */
989 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
990 return true;
992 /* skip nomap memory unless we were asked for it explicitly */
993 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
994 return true;
996 return false;
1000 * __next_mem_range - next function for for_each_free_mem_range() etc.
1001 * @idx: pointer to u64 loop variable
1002 * @nid: node selector, %NUMA_NO_NODE for all nodes
1003 * @flags: pick from blocks based on memory attributes
1004 * @type_a: pointer to memblock_type from where the range is taken
1005 * @type_b: pointer to memblock_type which excludes memory from being taken
1006 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1007 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1008 * @out_nid: ptr to int for nid of the range, can be %NULL
1010 * Find the first area from *@idx which matches @nid, fill the out
1011 * parameters, and update *@idx for the next iteration. The lower 32bit of
1012 * *@idx contains index into type_a and the upper 32bit indexes the
1013 * areas before each region in type_b. For example, if type_b regions
1014 * look like the following,
1016 * 0:[0-16), 1:[32-48), 2:[128-130)
1018 * The upper 32bit indexes the following regions.
1020 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
1022 * As both region arrays are sorted, the function advances the two indices
1023 * in lockstep and returns each intersection.
1025 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
1026 struct memblock_type *type_a,
1027 struct memblock_type *type_b, phys_addr_t *out_start,
1028 phys_addr_t *out_end, int *out_nid)
1030 int idx_a = *idx & 0xffffffff;
1031 int idx_b = *idx >> 32;
1033 if (WARN_ONCE(nid == MAX_NUMNODES,
1034 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1035 nid = NUMA_NO_NODE;
1037 for (; idx_a < type_a->cnt; idx_a++) {
1038 struct memblock_region *m = &type_a->regions[idx_a];
1040 phys_addr_t m_start = m->base;
1041 phys_addr_t m_end = m->base + m->size;
1042 int m_nid = memblock_get_region_node(m);
1044 if (should_skip_region(m, nid, flags))
1045 continue;
1047 if (!type_b) {
1048 if (out_start)
1049 *out_start = m_start;
1050 if (out_end)
1051 *out_end = m_end;
1052 if (out_nid)
1053 *out_nid = m_nid;
1054 idx_a++;
1055 *idx = (u32)idx_a | (u64)idx_b << 32;
1056 return;
1059 /* scan areas before each reservation */
1060 for (; idx_b < type_b->cnt + 1; idx_b++) {
1061 struct memblock_region *r;
1062 phys_addr_t r_start;
1063 phys_addr_t r_end;
1065 r = &type_b->regions[idx_b];
1066 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1067 r_end = idx_b < type_b->cnt ?
1068 r->base : PHYS_ADDR_MAX;
1071 * if idx_b advanced past idx_a,
1072 * break out to advance idx_a
1074 if (r_start >= m_end)
1075 break;
1076 /* if the two regions intersect, we're done */
1077 if (m_start < r_end) {
1078 if (out_start)
1079 *out_start =
1080 max(m_start, r_start);
1081 if (out_end)
1082 *out_end = min(m_end, r_end);
1083 if (out_nid)
1084 *out_nid = m_nid;
1086 * The region which ends first is
1087 * advanced for the next iteration.
1089 if (m_end <= r_end)
1090 idx_a++;
1091 else
1092 idx_b++;
1093 *idx = (u32)idx_a | (u64)idx_b << 32;
1094 return;
1099 /* signal end of iteration */
1100 *idx = ULLONG_MAX;
1104 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1106 * @idx: pointer to u64 loop variable
1107 * @nid: node selector, %NUMA_NO_NODE for all nodes
1108 * @flags: pick from blocks based on memory attributes
1109 * @type_a: pointer to memblock_type from where the range is taken
1110 * @type_b: pointer to memblock_type which excludes memory from being taken
1111 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1112 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1113 * @out_nid: ptr to int for nid of the range, can be %NULL
1115 * Finds the next range from type_a which is not marked as unsuitable
1116 * in type_b.
1118 * Reverse of __next_mem_range().
1120 void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1121 enum memblock_flags flags,
1122 struct memblock_type *type_a,
1123 struct memblock_type *type_b,
1124 phys_addr_t *out_start,
1125 phys_addr_t *out_end, int *out_nid)
1127 int idx_a = *idx & 0xffffffff;
1128 int idx_b = *idx >> 32;
1130 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1131 nid = NUMA_NO_NODE;
1133 if (*idx == (u64)ULLONG_MAX) {
1134 idx_a = type_a->cnt - 1;
1135 if (type_b != NULL)
1136 idx_b = type_b->cnt;
1137 else
1138 idx_b = 0;
1141 for (; idx_a >= 0; idx_a--) {
1142 struct memblock_region *m = &type_a->regions[idx_a];
1144 phys_addr_t m_start = m->base;
1145 phys_addr_t m_end = m->base + m->size;
1146 int m_nid = memblock_get_region_node(m);
1148 if (should_skip_region(m, nid, flags))
1149 continue;
1151 if (!type_b) {
1152 if (out_start)
1153 *out_start = m_start;
1154 if (out_end)
1155 *out_end = m_end;
1156 if (out_nid)
1157 *out_nid = m_nid;
1158 idx_a--;
1159 *idx = (u32)idx_a | (u64)idx_b << 32;
1160 return;
1163 /* scan areas before each reservation */
1164 for (; idx_b >= 0; idx_b--) {
1165 struct memblock_region *r;
1166 phys_addr_t r_start;
1167 phys_addr_t r_end;
1169 r = &type_b->regions[idx_b];
1170 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1171 r_end = idx_b < type_b->cnt ?
1172 r->base : PHYS_ADDR_MAX;
1174 * if idx_b advanced past idx_a,
1175 * break out to advance idx_a
1178 if (r_end <= m_start)
1179 break;
1180 /* if the two regions intersect, we're done */
1181 if (m_end > r_start) {
1182 if (out_start)
1183 *out_start = max(m_start, r_start);
1184 if (out_end)
1185 *out_end = min(m_end, r_end);
1186 if (out_nid)
1187 *out_nid = m_nid;
1188 if (m_start >= r_start)
1189 idx_a--;
1190 else
1191 idx_b--;
1192 *idx = (u32)idx_a | (u64)idx_b << 32;
1193 return;
1197 /* signal end of iteration */
1198 *idx = ULLONG_MAX;
1202 * Common iterator interface used to define for_each_mem_pfn_range().
1204 void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1205 unsigned long *out_start_pfn,
1206 unsigned long *out_end_pfn, int *out_nid)
1208 struct memblock_type *type = &memblock.memory;
1209 struct memblock_region *r;
1210 int r_nid;
1212 while (++*idx < type->cnt) {
1213 r = &type->regions[*idx];
1214 r_nid = memblock_get_region_node(r);
1216 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1217 continue;
1218 if (nid == MAX_NUMNODES || nid == r_nid)
1219 break;
1221 if (*idx >= type->cnt) {
1222 *idx = -1;
1223 return;
1226 if (out_start_pfn)
1227 *out_start_pfn = PFN_UP(r->base);
1228 if (out_end_pfn)
1229 *out_end_pfn = PFN_DOWN(r->base + r->size);
1230 if (out_nid)
1231 *out_nid = r_nid;
1235 * memblock_set_node - set node ID on memblock regions
1236 * @base: base of area to set node ID for
1237 * @size: size of area to set node ID for
1238 * @type: memblock type to set node ID for
1239 * @nid: node ID to set
1241 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1242 * Regions which cross the area boundaries are split as necessary.
1244 * Return:
1245 * 0 on success, -errno on failure.
1247 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1248 struct memblock_type *type, int nid)
1250 #ifdef CONFIG_NEED_MULTIPLE_NODES
1251 int start_rgn, end_rgn;
1252 int i, ret;
1254 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1255 if (ret)
1256 return ret;
1258 for (i = start_rgn; i < end_rgn; i++)
1259 memblock_set_region_node(&type->regions[i], nid);
1261 memblock_merge_regions(type);
1262 #endif
1263 return 0;
1266 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1268 * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone()
1270 * @idx: pointer to u64 loop variable
1271 * @zone: zone in which all of the memory blocks reside
1272 * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL
1273 * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL
1275 * This function is meant to be a zone/pfn specific wrapper for the
1276 * for_each_mem_range type iterators. Specifically they are used in the
1277 * deferred memory init routines and as such we were duplicating much of
1278 * this logic throughout the code. So instead of having it in multiple
1279 * locations it seemed like it would make more sense to centralize this to
1280 * one new iterator that does everything they need.
1282 void __init_memblock
1283 __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
1284 unsigned long *out_spfn, unsigned long *out_epfn)
1286 int zone_nid = zone_to_nid(zone);
1287 phys_addr_t spa, epa;
1288 int nid;
1290 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1291 &memblock.memory, &memblock.reserved,
1292 &spa, &epa, &nid);
1294 while (*idx != U64_MAX) {
1295 unsigned long epfn = PFN_DOWN(epa);
1296 unsigned long spfn = PFN_UP(spa);
1299 * Verify the end is at least past the start of the zone and
1300 * that we have at least one PFN to initialize.
1302 if (zone->zone_start_pfn < epfn && spfn < epfn) {
1303 /* if we went too far just stop searching */
1304 if (zone_end_pfn(zone) <= spfn) {
1305 *idx = U64_MAX;
1306 break;
1309 if (out_spfn)
1310 *out_spfn = max(zone->zone_start_pfn, spfn);
1311 if (out_epfn)
1312 *out_epfn = min(zone_end_pfn(zone), epfn);
1314 return;
1317 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1318 &memblock.memory, &memblock.reserved,
1319 &spa, &epa, &nid);
1322 /* signal end of iteration */
1323 if (out_spfn)
1324 *out_spfn = ULONG_MAX;
1325 if (out_epfn)
1326 *out_epfn = 0;
1329 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1332 * memblock_alloc_range_nid - allocate boot memory block
1333 * @size: size of memory block to be allocated in bytes
1334 * @align: alignment of the region and block's size
1335 * @start: the lower bound of the memory region to allocate (phys address)
1336 * @end: the upper bound of the memory region to allocate (phys address)
1337 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1338 * @exact_nid: control the allocation fall back to other nodes
1340 * The allocation is performed from memory region limited by
1341 * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
1343 * If the specified node can not hold the requested memory and @exact_nid
1344 * is false, the allocation falls back to any node in the system.
1346 * For systems with memory mirroring, the allocation is attempted first
1347 * from the regions with mirroring enabled and then retried from any
1348 * memory region.
1350 * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for
1351 * allocated boot memory block, so that it is never reported as leaks.
1353 * Return:
1354 * Physical address of allocated memory block on success, %0 on failure.
1356 phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1357 phys_addr_t align, phys_addr_t start,
1358 phys_addr_t end, int nid,
1359 bool exact_nid)
1361 enum memblock_flags flags = choose_memblock_flags();
1362 phys_addr_t found;
1364 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1365 nid = NUMA_NO_NODE;
1367 if (!align) {
1368 /* Can't use WARNs this early in boot on powerpc */
1369 dump_stack();
1370 align = SMP_CACHE_BYTES;
1373 again:
1374 found = memblock_find_in_range_node(size, align, start, end, nid,
1375 flags);
1376 if (found && !memblock_reserve(found, size))
1377 goto done;
1379 if (nid != NUMA_NO_NODE && !exact_nid) {
1380 found = memblock_find_in_range_node(size, align, start,
1381 end, NUMA_NO_NODE,
1382 flags);
1383 if (found && !memblock_reserve(found, size))
1384 goto done;
1387 if (flags & MEMBLOCK_MIRROR) {
1388 flags &= ~MEMBLOCK_MIRROR;
1389 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1390 &size);
1391 goto again;
1394 return 0;
1396 done:
1397 /* Skip kmemleak for kasan_init() due to high volume. */
1398 if (end != MEMBLOCK_ALLOC_KASAN)
1400 * The min_count is set to 0 so that memblock allocated
1401 * blocks are never reported as leaks. This is because many
1402 * of these blocks are only referred via the physical
1403 * address which is not looked up by kmemleak.
1405 kmemleak_alloc_phys(found, size, 0, 0);
1407 return found;
1411 * memblock_phys_alloc_range - allocate a memory block inside specified range
1412 * @size: size of memory block to be allocated in bytes
1413 * @align: alignment of the region and block's size
1414 * @start: the lower bound of the memory region to allocate (physical address)
1415 * @end: the upper bound of the memory region to allocate (physical address)
1417 * Allocate @size bytes in the between @start and @end.
1419 * Return: physical address of the allocated memory block on success,
1420 * %0 on failure.
1422 phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1423 phys_addr_t align,
1424 phys_addr_t start,
1425 phys_addr_t end)
1427 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1428 false);
1432 * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node
1433 * @size: size of memory block to be allocated in bytes
1434 * @align: alignment of the region and block's size
1435 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1437 * Allocates memory block from the specified NUMA node. If the node
1438 * has no available memory, attempts to allocated from any node in the
1439 * system.
1441 * Return: physical address of the allocated memory block on success,
1442 * %0 on failure.
1444 phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1446 return memblock_alloc_range_nid(size, align, 0,
1447 MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
1451 * memblock_alloc_internal - allocate boot memory block
1452 * @size: size of memory block to be allocated in bytes
1453 * @align: alignment of the region and block's size
1454 * @min_addr: the lower bound of the memory region to allocate (phys address)
1455 * @max_addr: the upper bound of the memory region to allocate (phys address)
1456 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1457 * @exact_nid: control the allocation fall back to other nodes
1459 * Allocates memory block using memblock_alloc_range_nid() and
1460 * converts the returned physical address to virtual.
1462 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1463 * will fall back to memory below @min_addr. Other constraints, such
1464 * as node and mirrored memory will be handled again in
1465 * memblock_alloc_range_nid().
1467 * Return:
1468 * Virtual address of allocated memory block on success, NULL on failure.
1470 static void * __init memblock_alloc_internal(
1471 phys_addr_t size, phys_addr_t align,
1472 phys_addr_t min_addr, phys_addr_t max_addr,
1473 int nid, bool exact_nid)
1475 phys_addr_t alloc;
1478 * Detect any accidental use of these APIs after slab is ready, as at
1479 * this moment memblock may be deinitialized already and its
1480 * internal data may be destroyed (after execution of memblock_free_all)
1482 if (WARN_ON_ONCE(slab_is_available()))
1483 return kzalloc_node(size, GFP_NOWAIT, nid);
1485 if (max_addr > memblock.current_limit)
1486 max_addr = memblock.current_limit;
1488 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
1489 exact_nid);
1491 /* retry allocation without lower limit */
1492 if (!alloc && min_addr)
1493 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
1494 exact_nid);
1496 if (!alloc)
1497 return NULL;
1499 return phys_to_virt(alloc);
1503 * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
1504 * without zeroing memory
1505 * @size: size of memory block to be allocated in bytes
1506 * @align: alignment of the region and block's size
1507 * @min_addr: the lower bound of the memory region from where the allocation
1508 * is preferred (phys address)
1509 * @max_addr: the upper bound of the memory region from where the allocation
1510 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1511 * allocate only from memory limited by memblock.current_limit value
1512 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1514 * Public function, provides additional debug information (including caller
1515 * info), if enabled. Does not zero allocated memory.
1517 * Return:
1518 * Virtual address of allocated memory block on success, NULL on failure.
1520 void * __init memblock_alloc_exact_nid_raw(
1521 phys_addr_t size, phys_addr_t align,
1522 phys_addr_t min_addr, phys_addr_t max_addr,
1523 int nid)
1525 void *ptr;
1527 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1528 __func__, (u64)size, (u64)align, nid, &min_addr,
1529 &max_addr, (void *)_RET_IP_);
1531 ptr = memblock_alloc_internal(size, align,
1532 min_addr, max_addr, nid, true);
1533 if (ptr && size > 0)
1534 page_init_poison(ptr, size);
1536 return ptr;
1540 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1541 * memory and without panicking
1542 * @size: size of memory block to be allocated in bytes
1543 * @align: alignment of the region and block's size
1544 * @min_addr: the lower bound of the memory region from where the allocation
1545 * is preferred (phys address)
1546 * @max_addr: the upper bound of the memory region from where the allocation
1547 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1548 * allocate only from memory limited by memblock.current_limit value
1549 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1551 * Public function, provides additional debug information (including caller
1552 * info), if enabled. Does not zero allocated memory, does not panic if request
1553 * cannot be satisfied.
1555 * Return:
1556 * Virtual address of allocated memory block on success, NULL on failure.
1558 void * __init memblock_alloc_try_nid_raw(
1559 phys_addr_t size, phys_addr_t align,
1560 phys_addr_t min_addr, phys_addr_t max_addr,
1561 int nid)
1563 void *ptr;
1565 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1566 __func__, (u64)size, (u64)align, nid, &min_addr,
1567 &max_addr, (void *)_RET_IP_);
1569 ptr = memblock_alloc_internal(size, align,
1570 min_addr, max_addr, nid, false);
1571 if (ptr && size > 0)
1572 page_init_poison(ptr, size);
1574 return ptr;
1578 * memblock_alloc_try_nid - allocate boot memory block
1579 * @size: size of memory block to be allocated in bytes
1580 * @align: alignment of the region and block's size
1581 * @min_addr: the lower bound of the memory region from where the allocation
1582 * is preferred (phys address)
1583 * @max_addr: the upper bound of the memory region from where the allocation
1584 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1585 * allocate only from memory limited by memblock.current_limit value
1586 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1588 * Public function, provides additional debug information (including caller
1589 * info), if enabled. This function zeroes the allocated memory.
1591 * Return:
1592 * Virtual address of allocated memory block on success, NULL on failure.
1594 void * __init memblock_alloc_try_nid(
1595 phys_addr_t size, phys_addr_t align,
1596 phys_addr_t min_addr, phys_addr_t max_addr,
1597 int nid)
1599 void *ptr;
1601 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1602 __func__, (u64)size, (u64)align, nid, &min_addr,
1603 &max_addr, (void *)_RET_IP_);
1604 ptr = memblock_alloc_internal(size, align,
1605 min_addr, max_addr, nid, false);
1606 if (ptr)
1607 memset(ptr, 0, size);
1609 return ptr;
1613 * __memblock_free_late - free pages directly to buddy allocator
1614 * @base: phys starting address of the boot memory block
1615 * @size: size of the boot memory block in bytes
1617 * This is only useful when the memblock allocator has already been torn
1618 * down, but we are still initializing the system. Pages are released directly
1619 * to the buddy allocator.
1621 void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1623 phys_addr_t cursor, end;
1625 end = base + size - 1;
1626 memblock_dbg("%s: [%pa-%pa] %pS\n",
1627 __func__, &base, &end, (void *)_RET_IP_);
1628 kmemleak_free_part_phys(base, size);
1629 cursor = PFN_UP(base);
1630 end = PFN_DOWN(base + size);
1632 for (; cursor < end; cursor++) {
1633 memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1634 totalram_pages_inc();
1639 * Remaining API functions
1642 phys_addr_t __init_memblock memblock_phys_mem_size(void)
1644 return memblock.memory.total_size;
1647 phys_addr_t __init_memblock memblock_reserved_size(void)
1649 return memblock.reserved.total_size;
1652 phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1654 unsigned long pages = 0;
1655 struct memblock_region *r;
1656 unsigned long start_pfn, end_pfn;
1658 for_each_memblock(memory, r) {
1659 start_pfn = memblock_region_memory_base_pfn(r);
1660 end_pfn = memblock_region_memory_end_pfn(r);
1661 start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1662 end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1663 pages += end_pfn - start_pfn;
1666 return PFN_PHYS(pages);
1669 /* lowest address */
1670 phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1672 return memblock.memory.regions[0].base;
1675 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1677 int idx = memblock.memory.cnt - 1;
1679 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1682 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1684 phys_addr_t max_addr = PHYS_ADDR_MAX;
1685 struct memblock_region *r;
1688 * translate the memory @limit size into the max address within one of
1689 * the memory memblock regions, if the @limit exceeds the total size
1690 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1692 for_each_memblock(memory, r) {
1693 if (limit <= r->size) {
1694 max_addr = r->base + limit;
1695 break;
1697 limit -= r->size;
1700 return max_addr;
1703 void __init memblock_enforce_memory_limit(phys_addr_t limit)
1705 phys_addr_t max_addr;
1707 if (!limit)
1708 return;
1710 max_addr = __find_max_addr(limit);
1712 /* @limit exceeds the total size of the memory, do nothing */
1713 if (max_addr == PHYS_ADDR_MAX)
1714 return;
1716 /* truncate both memory and reserved regions */
1717 memblock_remove_range(&memblock.memory, max_addr,
1718 PHYS_ADDR_MAX);
1719 memblock_remove_range(&memblock.reserved, max_addr,
1720 PHYS_ADDR_MAX);
1723 void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1725 int start_rgn, end_rgn;
1726 int i, ret;
1728 if (!size)
1729 return;
1731 ret = memblock_isolate_range(&memblock.memory, base, size,
1732 &start_rgn, &end_rgn);
1733 if (ret)
1734 return;
1736 /* remove all the MAP regions */
1737 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1738 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1739 memblock_remove_region(&memblock.memory, i);
1741 for (i = start_rgn - 1; i >= 0; i--)
1742 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1743 memblock_remove_region(&memblock.memory, i);
1745 /* truncate the reserved regions */
1746 memblock_remove_range(&memblock.reserved, 0, base);
1747 memblock_remove_range(&memblock.reserved,
1748 base + size, PHYS_ADDR_MAX);
1751 void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1753 phys_addr_t max_addr;
1755 if (!limit)
1756 return;
1758 max_addr = __find_max_addr(limit);
1760 /* @limit exceeds the total size of the memory, do nothing */
1761 if (max_addr == PHYS_ADDR_MAX)
1762 return;
1764 memblock_cap_memory_range(0, max_addr);
1767 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1769 unsigned int left = 0, right = type->cnt;
1771 do {
1772 unsigned int mid = (right + left) / 2;
1774 if (addr < type->regions[mid].base)
1775 right = mid;
1776 else if (addr >= (type->regions[mid].base +
1777 type->regions[mid].size))
1778 left = mid + 1;
1779 else
1780 return mid;
1781 } while (left < right);
1782 return -1;
1785 bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1787 return memblock_search(&memblock.reserved, addr) != -1;
1790 bool __init_memblock memblock_is_memory(phys_addr_t addr)
1792 return memblock_search(&memblock.memory, addr) != -1;
1795 bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1797 int i = memblock_search(&memblock.memory, addr);
1799 if (i == -1)
1800 return false;
1801 return !memblock_is_nomap(&memblock.memory.regions[i]);
1804 int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1805 unsigned long *start_pfn, unsigned long *end_pfn)
1807 struct memblock_type *type = &memblock.memory;
1808 int mid = memblock_search(type, PFN_PHYS(pfn));
1810 if (mid == -1)
1811 return -1;
1813 *start_pfn = PFN_DOWN(type->regions[mid].base);
1814 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1816 return memblock_get_region_node(&type->regions[mid]);
1820 * memblock_is_region_memory - check if a region is a subset of memory
1821 * @base: base of region to check
1822 * @size: size of region to check
1824 * Check if the region [@base, @base + @size) is a subset of a memory block.
1826 * Return:
1827 * 0 if false, non-zero if true
1829 bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1831 int idx = memblock_search(&memblock.memory, base);
1832 phys_addr_t end = base + memblock_cap_size(base, &size);
1834 if (idx == -1)
1835 return false;
1836 return (memblock.memory.regions[idx].base +
1837 memblock.memory.regions[idx].size) >= end;
1841 * memblock_is_region_reserved - check if a region intersects reserved memory
1842 * @base: base of region to check
1843 * @size: size of region to check
1845 * Check if the region [@base, @base + @size) intersects a reserved
1846 * memory block.
1848 * Return:
1849 * True if they intersect, false if not.
1851 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1853 memblock_cap_size(base, &size);
1854 return memblock_overlaps_region(&memblock.reserved, base, size);
1857 void __init_memblock memblock_trim_memory(phys_addr_t align)
1859 phys_addr_t start, end, orig_start, orig_end;
1860 struct memblock_region *r;
1862 for_each_memblock(memory, r) {
1863 orig_start = r->base;
1864 orig_end = r->base + r->size;
1865 start = round_up(orig_start, align);
1866 end = round_down(orig_end, align);
1868 if (start == orig_start && end == orig_end)
1869 continue;
1871 if (start < end) {
1872 r->base = start;
1873 r->size = end - start;
1874 } else {
1875 memblock_remove_region(&memblock.memory,
1876 r - memblock.memory.regions);
1877 r--;
1882 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1884 memblock.current_limit = limit;
1887 phys_addr_t __init_memblock memblock_get_current_limit(void)
1889 return memblock.current_limit;
1892 static void __init_memblock memblock_dump(struct memblock_type *type)
1894 phys_addr_t base, end, size;
1895 enum memblock_flags flags;
1896 int idx;
1897 struct memblock_region *rgn;
1899 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
1901 for_each_memblock_type(idx, type, rgn) {
1902 char nid_buf[32] = "";
1904 base = rgn->base;
1905 size = rgn->size;
1906 end = base + size - 1;
1907 flags = rgn->flags;
1908 #ifdef CONFIG_NEED_MULTIPLE_NODES
1909 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1910 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1911 memblock_get_region_node(rgn));
1912 #endif
1913 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1914 type->name, idx, &base, &end, &size, nid_buf, flags);
1918 void __init_memblock __memblock_dump_all(void)
1920 pr_info("MEMBLOCK configuration:\n");
1921 pr_info(" memory size = %pa reserved size = %pa\n",
1922 &memblock.memory.total_size,
1923 &memblock.reserved.total_size);
1925 memblock_dump(&memblock.memory);
1926 memblock_dump(&memblock.reserved);
1927 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1928 memblock_dump(&physmem);
1929 #endif
1932 void __init memblock_allow_resize(void)
1934 memblock_can_resize = 1;
1937 static int __init early_memblock(char *p)
1939 if (p && strstr(p, "debug"))
1940 memblock_debug = 1;
1941 return 0;
1943 early_param("memblock", early_memblock);
1945 static void __init __free_pages_memory(unsigned long start, unsigned long end)
1947 int order;
1949 while (start < end) {
1950 order = min(MAX_ORDER - 1UL, __ffs(start));
1952 while (start + (1UL << order) > end)
1953 order--;
1955 memblock_free_pages(pfn_to_page(start), start, order);
1957 start += (1UL << order);
1961 static unsigned long __init __free_memory_core(phys_addr_t start,
1962 phys_addr_t end)
1964 unsigned long start_pfn = PFN_UP(start);
1965 unsigned long end_pfn = min_t(unsigned long,
1966 PFN_DOWN(end), max_low_pfn);
1968 if (start_pfn >= end_pfn)
1969 return 0;
1971 __free_pages_memory(start_pfn, end_pfn);
1973 return end_pfn - start_pfn;
1976 static unsigned long __init free_low_memory_core_early(void)
1978 unsigned long count = 0;
1979 phys_addr_t start, end;
1980 u64 i;
1982 memblock_clear_hotplug(0, -1);
1984 for_each_reserved_mem_region(i, &start, &end)
1985 reserve_bootmem_region(start, end);
1988 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
1989 * because in some case like Node0 doesn't have RAM installed
1990 * low ram will be on Node1
1992 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
1993 NULL)
1994 count += __free_memory_core(start, end);
1996 return count;
1999 static int reset_managed_pages_done __initdata;
2001 void reset_node_managed_pages(pg_data_t *pgdat)
2003 struct zone *z;
2005 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
2006 atomic_long_set(&z->managed_pages, 0);
2009 void __init reset_all_zones_managed_pages(void)
2011 struct pglist_data *pgdat;
2013 if (reset_managed_pages_done)
2014 return;
2016 for_each_online_pgdat(pgdat)
2017 reset_node_managed_pages(pgdat);
2019 reset_managed_pages_done = 1;
2023 * memblock_free_all - release free pages to the buddy allocator
2025 * Return: the number of pages actually released.
2027 unsigned long __init memblock_free_all(void)
2029 unsigned long pages;
2031 reset_all_zones_managed_pages();
2033 pages = free_low_memory_core_early();
2034 totalram_pages_add(pages);
2036 return pages;
2039 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
2041 static int memblock_debug_show(struct seq_file *m, void *private)
2043 struct memblock_type *type = m->private;
2044 struct memblock_region *reg;
2045 int i;
2046 phys_addr_t end;
2048 for (i = 0; i < type->cnt; i++) {
2049 reg = &type->regions[i];
2050 end = reg->base + reg->size - 1;
2052 seq_printf(m, "%4d: ", i);
2053 seq_printf(m, "%pa..%pa\n", &reg->base, &end);
2055 return 0;
2057 DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2059 static int __init memblock_init_debugfs(void)
2061 struct dentry *root = debugfs_create_dir("memblock", NULL);
2063 debugfs_create_file("memory", 0444, root,
2064 &memblock.memory, &memblock_debug_fops);
2065 debugfs_create_file("reserved", 0444, root,
2066 &memblock.reserved, &memblock_debug_fops);
2067 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2068 debugfs_create_file("physmem", 0444, root, &physmem,
2069 &memblock_debug_fops);
2070 #endif
2072 return 0;
2074 __initcall(memblock_init_debugfs);
2076 #endif /* CONFIG_DEBUG_FS */