2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/kmemleak.h>
21 #include <linux/seq_file.h>
22 #include <linux/memblock.h>
24 #include <asm/sections.h>
29 #define INIT_MEMBLOCK_REGIONS 128
30 #define INIT_PHYSMEM_REGIONS 4
32 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS
33 # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
37 * DOC: memblock overview
39 * Memblock is a method of managing memory regions during the early
40 * boot period when the usual kernel memory allocators are not up and
43 * Memblock views the system memory as collections of contiguous
44 * regions. There are several types of these collections:
46 * * ``memory`` - describes the physical memory available to the
47 * kernel; this may differ from the actual physical memory installed
48 * in the system, for instance when the memory is restricted with
49 * ``mem=`` command line parameter
50 * * ``reserved`` - describes the regions that were allocated
51 * * ``physmap`` - describes the actual physical memory regardless of
52 * the possible restrictions; the ``physmap`` type is only available
53 * on some architectures.
55 * Each region is represented by :c:type:`struct memblock_region` that
56 * defines the region extents, its attributes and NUMA node id on NUMA
57 * systems. Every memory type is described by the :c:type:`struct
58 * memblock_type` which contains an array of memory regions along with
59 * the allocator metadata. The memory types are nicely wrapped with
60 * :c:type:`struct memblock`. This structure is statically initialzed
61 * at build time. The region arrays for the "memory" and "reserved"
62 * types are initially sized to %INIT_MEMBLOCK_REGIONS and for the
63 * "physmap" type to %INIT_PHYSMEM_REGIONS.
64 * The :c:func:`memblock_allow_resize` enables automatic resizing of
65 * the region arrays during addition of new regions. This feature
66 * should be used with care so that memory allocated for the region
67 * array will not overlap with areas that should be reserved, for
70 * The early architecture setup should tell memblock what the physical
71 * memory layout is by using :c:func:`memblock_add` or
72 * :c:func:`memblock_add_node` functions. The first function does not
73 * assign the region to a NUMA node and it is appropriate for UMA
74 * systems. Yet, it is possible to use it on NUMA systems as well and
75 * assign the region to a NUMA node later in the setup process using
76 * :c:func:`memblock_set_node`. The :c:func:`memblock_add_node`
77 * performs such an assignment directly.
79 * Once memblock is setup the memory can be allocated using one of the
82 * * :c:func:`memblock_phys_alloc*` - these functions return the
83 * **physical** address of the allocated memory
84 * * :c:func:`memblock_alloc*` - these functions return the **virtual**
85 * address of the allocated memory.
87 * Note, that both API variants use implict assumptions about allowed
88 * memory ranges and the fallback methods. Consult the documentation
89 * of :c:func:`memblock_alloc_internal` and
90 * :c:func:`memblock_alloc_range_nid` functions for more elaboarte
93 * As the system boot progresses, the architecture specific
94 * :c:func:`mem_init` function frees all the memory to the buddy page
97 * Unless an architecure enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
98 * memblock data structures will be discarded after the system
99 * initialization compltes.
102 #ifndef CONFIG_NEED_MULTIPLE_NODES
103 struct pglist_data __refdata contig_page_data
;
104 EXPORT_SYMBOL(contig_page_data
);
107 unsigned long max_low_pfn
;
108 unsigned long min_low_pfn
;
109 unsigned long max_pfn
;
110 unsigned long long max_possible_pfn
;
112 static struct memblock_region memblock_memory_init_regions
[INIT_MEMBLOCK_REGIONS
] __initdata_memblock
;
113 static struct memblock_region memblock_reserved_init_regions
[INIT_MEMBLOCK_RESERVED_REGIONS
] __initdata_memblock
;
114 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
115 static struct memblock_region memblock_physmem_init_regions
[INIT_PHYSMEM_REGIONS
] __initdata_memblock
;
118 struct memblock memblock __initdata_memblock
= {
119 .memory
.regions
= memblock_memory_init_regions
,
120 .memory
.cnt
= 1, /* empty dummy entry */
121 .memory
.max
= INIT_MEMBLOCK_REGIONS
,
122 .memory
.name
= "memory",
124 .reserved
.regions
= memblock_reserved_init_regions
,
125 .reserved
.cnt
= 1, /* empty dummy entry */
126 .reserved
.max
= INIT_MEMBLOCK_RESERVED_REGIONS
,
127 .reserved
.name
= "reserved",
129 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
130 .physmem
.regions
= memblock_physmem_init_regions
,
131 .physmem
.cnt
= 1, /* empty dummy entry */
132 .physmem
.max
= INIT_PHYSMEM_REGIONS
,
133 .physmem
.name
= "physmem",
137 .current_limit
= MEMBLOCK_ALLOC_ANYWHERE
,
140 int memblock_debug __initdata_memblock
;
141 static bool system_has_some_mirror __initdata_memblock
= false;
142 static int memblock_can_resize __initdata_memblock
;
143 static int memblock_memory_in_slab __initdata_memblock
= 0;
144 static int memblock_reserved_in_slab __initdata_memblock
= 0;
146 static enum memblock_flags __init_memblock
choose_memblock_flags(void)
148 return system_has_some_mirror
? MEMBLOCK_MIRROR
: MEMBLOCK_NONE
;
151 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
152 static inline phys_addr_t
memblock_cap_size(phys_addr_t base
, phys_addr_t
*size
)
154 return *size
= min(*size
, PHYS_ADDR_MAX
- base
);
158 * Address comparison utilities
160 static unsigned long __init_memblock
memblock_addrs_overlap(phys_addr_t base1
, phys_addr_t size1
,
161 phys_addr_t base2
, phys_addr_t size2
)
163 return ((base1
< (base2
+ size2
)) && (base2
< (base1
+ size1
)));
166 bool __init_memblock
memblock_overlaps_region(struct memblock_type
*type
,
167 phys_addr_t base
, phys_addr_t size
)
171 for (i
= 0; i
< type
->cnt
; i
++)
172 if (memblock_addrs_overlap(base
, size
, type
->regions
[i
].base
,
173 type
->regions
[i
].size
))
175 return i
< type
->cnt
;
179 * __memblock_find_range_bottom_up - find free area utility in bottom-up
180 * @start: start of candidate range
181 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
182 * %MEMBLOCK_ALLOC_ACCESSIBLE
183 * @size: size of free area to find
184 * @align: alignment of free area to find
185 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
186 * @flags: pick from blocks based on memory attributes
188 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
191 * Found address on success, 0 on failure.
193 static phys_addr_t __init_memblock
194 __memblock_find_range_bottom_up(phys_addr_t start
, phys_addr_t end
,
195 phys_addr_t size
, phys_addr_t align
, int nid
,
196 enum memblock_flags flags
)
198 phys_addr_t this_start
, this_end
, cand
;
201 for_each_free_mem_range(i
, nid
, flags
, &this_start
, &this_end
, NULL
) {
202 this_start
= clamp(this_start
, start
, end
);
203 this_end
= clamp(this_end
, start
, end
);
205 cand
= round_up(this_start
, align
);
206 if (cand
< this_end
&& this_end
- cand
>= size
)
214 * __memblock_find_range_top_down - find free area utility, in top-down
215 * @start: start of candidate range
216 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
217 * %MEMBLOCK_ALLOC_ACCESSIBLE
218 * @size: size of free area to find
219 * @align: alignment of free area to find
220 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
221 * @flags: pick from blocks based on memory attributes
223 * Utility called from memblock_find_in_range_node(), find free area top-down.
226 * Found address on success, 0 on failure.
228 static phys_addr_t __init_memblock
229 __memblock_find_range_top_down(phys_addr_t start
, phys_addr_t end
,
230 phys_addr_t size
, phys_addr_t align
, int nid
,
231 enum memblock_flags flags
)
233 phys_addr_t this_start
, this_end
, cand
;
236 for_each_free_mem_range_reverse(i
, nid
, flags
, &this_start
, &this_end
,
238 this_start
= clamp(this_start
, start
, end
);
239 this_end
= clamp(this_end
, start
, end
);
244 cand
= round_down(this_end
- size
, align
);
245 if (cand
>= this_start
)
253 * memblock_find_in_range_node - find free area in given range and node
254 * @size: size of free area to find
255 * @align: alignment of free area to find
256 * @start: start of candidate range
257 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
258 * %MEMBLOCK_ALLOC_ACCESSIBLE
259 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
260 * @flags: pick from blocks based on memory attributes
262 * Find @size free area aligned to @align in the specified range and node.
264 * When allocation direction is bottom-up, the @start should be greater
265 * than the end of the kernel image. Otherwise, it will be trimmed. The
266 * reason is that we want the bottom-up allocation just near the kernel
267 * image so it is highly likely that the allocated memory and the kernel
268 * will reside in the same node.
270 * If bottom-up allocation failed, will try to allocate memory top-down.
273 * Found address on success, 0 on failure.
275 static phys_addr_t __init_memblock
memblock_find_in_range_node(phys_addr_t size
,
276 phys_addr_t align
, phys_addr_t start
,
277 phys_addr_t end
, int nid
,
278 enum memblock_flags flags
)
280 phys_addr_t kernel_end
, ret
;
283 if (end
== MEMBLOCK_ALLOC_ACCESSIBLE
||
284 end
== MEMBLOCK_ALLOC_KASAN
)
285 end
= memblock
.current_limit
;
287 /* avoid allocating the first page */
288 start
= max_t(phys_addr_t
, start
, PAGE_SIZE
);
289 end
= max(start
, end
);
290 kernel_end
= __pa_symbol(_end
);
293 * try bottom-up allocation only when bottom-up mode
294 * is set and @end is above the kernel image.
296 if (memblock_bottom_up() && end
> kernel_end
) {
297 phys_addr_t bottom_up_start
;
299 /* make sure we will allocate above the kernel */
300 bottom_up_start
= max(start
, kernel_end
);
302 /* ok, try bottom-up allocation first */
303 ret
= __memblock_find_range_bottom_up(bottom_up_start
, end
,
304 size
, align
, nid
, flags
);
309 * we always limit bottom-up allocation above the kernel,
310 * but top-down allocation doesn't have the limit, so
311 * retrying top-down allocation may succeed when bottom-up
314 * bottom-up allocation is expected to be fail very rarely,
315 * so we use WARN_ONCE() here to see the stack trace if
318 WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE
),
319 "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
322 return __memblock_find_range_top_down(start
, end
, size
, align
, nid
,
327 * memblock_find_in_range - find free area in given range
328 * @start: start of candidate range
329 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
330 * %MEMBLOCK_ALLOC_ACCESSIBLE
331 * @size: size of free area to find
332 * @align: alignment of free area to find
334 * Find @size free area aligned to @align in the specified range.
337 * Found address on success, 0 on failure.
339 phys_addr_t __init_memblock
memblock_find_in_range(phys_addr_t start
,
340 phys_addr_t end
, phys_addr_t size
,
344 enum memblock_flags flags
= choose_memblock_flags();
347 ret
= memblock_find_in_range_node(size
, align
, start
, end
,
348 NUMA_NO_NODE
, flags
);
350 if (!ret
&& (flags
& MEMBLOCK_MIRROR
)) {
351 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
353 flags
&= ~MEMBLOCK_MIRROR
;
360 static void __init_memblock
memblock_remove_region(struct memblock_type
*type
, unsigned long r
)
362 type
->total_size
-= type
->regions
[r
].size
;
363 memmove(&type
->regions
[r
], &type
->regions
[r
+ 1],
364 (type
->cnt
- (r
+ 1)) * sizeof(type
->regions
[r
]));
367 /* Special case for empty arrays */
368 if (type
->cnt
== 0) {
369 WARN_ON(type
->total_size
!= 0);
371 type
->regions
[0].base
= 0;
372 type
->regions
[0].size
= 0;
373 type
->regions
[0].flags
= 0;
374 memblock_set_region_node(&type
->regions
[0], MAX_NUMNODES
);
378 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
380 * memblock_discard - discard memory and reserved arrays if they were allocated
382 void __init
memblock_discard(void)
384 phys_addr_t addr
, size
;
386 if (memblock
.reserved
.regions
!= memblock_reserved_init_regions
) {
387 addr
= __pa(memblock
.reserved
.regions
);
388 size
= PAGE_ALIGN(sizeof(struct memblock_region
) *
389 memblock
.reserved
.max
);
390 __memblock_free_late(addr
, size
);
393 if (memblock
.memory
.regions
!= memblock_memory_init_regions
) {
394 addr
= __pa(memblock
.memory
.regions
);
395 size
= PAGE_ALIGN(sizeof(struct memblock_region
) *
396 memblock
.memory
.max
);
397 __memblock_free_late(addr
, size
);
403 * memblock_double_array - double the size of the memblock regions array
404 * @type: memblock type of the regions array being doubled
405 * @new_area_start: starting address of memory range to avoid overlap with
406 * @new_area_size: size of memory range to avoid overlap with
408 * Double the size of the @type regions array. If memblock is being used to
409 * allocate memory for a new reserved regions array and there is a previously
410 * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
411 * waiting to be reserved, ensure the memory used by the new array does
415 * 0 on success, -1 on failure.
417 static int __init_memblock
memblock_double_array(struct memblock_type
*type
,
418 phys_addr_t new_area_start
,
419 phys_addr_t new_area_size
)
421 struct memblock_region
*new_array
, *old_array
;
422 phys_addr_t old_alloc_size
, new_alloc_size
;
423 phys_addr_t old_size
, new_size
, addr
, new_end
;
424 int use_slab
= slab_is_available();
427 /* We don't allow resizing until we know about the reserved regions
428 * of memory that aren't suitable for allocation
430 if (!memblock_can_resize
)
433 /* Calculate new doubled size */
434 old_size
= type
->max
* sizeof(struct memblock_region
);
435 new_size
= old_size
<< 1;
437 * We need to allocated new one align to PAGE_SIZE,
438 * so we can free them completely later.
440 old_alloc_size
= PAGE_ALIGN(old_size
);
441 new_alloc_size
= PAGE_ALIGN(new_size
);
443 /* Retrieve the slab flag */
444 if (type
== &memblock
.memory
)
445 in_slab
= &memblock_memory_in_slab
;
447 in_slab
= &memblock_reserved_in_slab
;
449 /* Try to find some space for it */
451 new_array
= kmalloc(new_size
, GFP_KERNEL
);
452 addr
= new_array
? __pa(new_array
) : 0;
454 /* only exclude range when trying to double reserved.regions */
455 if (type
!= &memblock
.reserved
)
456 new_area_start
= new_area_size
= 0;
458 addr
= memblock_find_in_range(new_area_start
+ new_area_size
,
459 memblock
.current_limit
,
460 new_alloc_size
, PAGE_SIZE
);
461 if (!addr
&& new_area_size
)
462 addr
= memblock_find_in_range(0,
463 min(new_area_start
, memblock
.current_limit
),
464 new_alloc_size
, PAGE_SIZE
);
466 new_array
= addr
? __va(addr
) : NULL
;
469 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
470 type
->name
, type
->max
, type
->max
* 2);
474 new_end
= addr
+ new_size
- 1;
475 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
476 type
->name
, type
->max
* 2, &addr
, &new_end
);
479 * Found space, we now need to move the array over before we add the
480 * reserved region since it may be our reserved array itself that is
483 memcpy(new_array
, type
->regions
, old_size
);
484 memset(new_array
+ type
->max
, 0, old_size
);
485 old_array
= type
->regions
;
486 type
->regions
= new_array
;
489 /* Free old array. We needn't free it if the array is the static one */
492 else if (old_array
!= memblock_memory_init_regions
&&
493 old_array
!= memblock_reserved_init_regions
)
494 memblock_free(__pa(old_array
), old_alloc_size
);
497 * Reserve the new array if that comes from the memblock. Otherwise, we
501 BUG_ON(memblock_reserve(addr
, new_alloc_size
));
503 /* Update slab flag */
510 * memblock_merge_regions - merge neighboring compatible regions
511 * @type: memblock type to scan
513 * Scan @type and merge neighboring compatible regions.
515 static void __init_memblock
memblock_merge_regions(struct memblock_type
*type
)
519 /* cnt never goes below 1 */
520 while (i
< type
->cnt
- 1) {
521 struct memblock_region
*this = &type
->regions
[i
];
522 struct memblock_region
*next
= &type
->regions
[i
+ 1];
524 if (this->base
+ this->size
!= next
->base
||
525 memblock_get_region_node(this) !=
526 memblock_get_region_node(next
) ||
527 this->flags
!= next
->flags
) {
528 BUG_ON(this->base
+ this->size
> next
->base
);
533 this->size
+= next
->size
;
534 /* move forward from next + 1, index of which is i + 2 */
535 memmove(next
, next
+ 1, (type
->cnt
- (i
+ 2)) * sizeof(*next
));
541 * memblock_insert_region - insert new memblock region
542 * @type: memblock type to insert into
543 * @idx: index for the insertion point
544 * @base: base address of the new region
545 * @size: size of the new region
546 * @nid: node id of the new region
547 * @flags: flags of the new region
549 * Insert new memblock region [@base, @base + @size) into @type at @idx.
550 * @type must already have extra room to accommodate the new region.
552 static void __init_memblock
memblock_insert_region(struct memblock_type
*type
,
553 int idx
, phys_addr_t base
,
556 enum memblock_flags flags
)
558 struct memblock_region
*rgn
= &type
->regions
[idx
];
560 BUG_ON(type
->cnt
>= type
->max
);
561 memmove(rgn
+ 1, rgn
, (type
->cnt
- idx
) * sizeof(*rgn
));
565 memblock_set_region_node(rgn
, nid
);
567 type
->total_size
+= size
;
571 * memblock_add_range - add new memblock region
572 * @type: memblock type to add new region into
573 * @base: base address of the new region
574 * @size: size of the new region
575 * @nid: nid of the new region
576 * @flags: flags of the new region
578 * Add new memblock region [@base, @base + @size) into @type. The new region
579 * is allowed to overlap with existing ones - overlaps don't affect already
580 * existing regions. @type is guaranteed to be minimal (all neighbouring
581 * compatible regions are merged) after the addition.
584 * 0 on success, -errno on failure.
586 int __init_memblock
memblock_add_range(struct memblock_type
*type
,
587 phys_addr_t base
, phys_addr_t size
,
588 int nid
, enum memblock_flags flags
)
591 phys_addr_t obase
= base
;
592 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
594 struct memblock_region
*rgn
;
599 /* special case for empty array */
600 if (type
->regions
[0].size
== 0) {
601 WARN_ON(type
->cnt
!= 1 || type
->total_size
);
602 type
->regions
[0].base
= base
;
603 type
->regions
[0].size
= size
;
604 type
->regions
[0].flags
= flags
;
605 memblock_set_region_node(&type
->regions
[0], nid
);
606 type
->total_size
= size
;
611 * The following is executed twice. Once with %false @insert and
612 * then with %true. The first counts the number of regions needed
613 * to accommodate the new area. The second actually inserts them.
618 for_each_memblock_type(idx
, type
, rgn
) {
619 phys_addr_t rbase
= rgn
->base
;
620 phys_addr_t rend
= rbase
+ rgn
->size
;
627 * @rgn overlaps. If it separates the lower part of new
628 * area, insert that portion.
631 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
632 WARN_ON(nid
!= memblock_get_region_node(rgn
));
634 WARN_ON(flags
!= rgn
->flags
);
637 memblock_insert_region(type
, idx
++, base
,
641 /* area below @rend is dealt with, forget about it */
642 base
= min(rend
, end
);
645 /* insert the remaining portion */
649 memblock_insert_region(type
, idx
, base
, end
- base
,
657 * If this was the first round, resize array and repeat for actual
658 * insertions; otherwise, merge and return.
661 while (type
->cnt
+ nr_new
> type
->max
)
662 if (memblock_double_array(type
, obase
, size
) < 0)
667 memblock_merge_regions(type
);
673 * memblock_add_node - add new memblock region within a NUMA node
674 * @base: base address of the new region
675 * @size: size of the new region
676 * @nid: nid of the new region
678 * Add new memblock region [@base, @base + @size) to the "memory"
679 * type. See memblock_add_range() description for mode details
682 * 0 on success, -errno on failure.
684 int __init_memblock
memblock_add_node(phys_addr_t base
, phys_addr_t size
,
687 return memblock_add_range(&memblock
.memory
, base
, size
, nid
, 0);
691 * memblock_add - add new memblock region
692 * @base: base address of the new region
693 * @size: size of the new region
695 * Add new memblock region [@base, @base + @size) to the "memory"
696 * type. See memblock_add_range() description for mode details
699 * 0 on success, -errno on failure.
701 int __init_memblock
memblock_add(phys_addr_t base
, phys_addr_t size
)
703 phys_addr_t end
= base
+ size
- 1;
705 memblock_dbg("memblock_add: [%pa-%pa] %pS\n",
706 &base
, &end
, (void *)_RET_IP_
);
708 return memblock_add_range(&memblock
.memory
, base
, size
, MAX_NUMNODES
, 0);
712 * memblock_isolate_range - isolate given range into disjoint memblocks
713 * @type: memblock type to isolate range for
714 * @base: base of range to isolate
715 * @size: size of range to isolate
716 * @start_rgn: out parameter for the start of isolated region
717 * @end_rgn: out parameter for the end of isolated region
719 * Walk @type and ensure that regions don't cross the boundaries defined by
720 * [@base, @base + @size). Crossing regions are split at the boundaries,
721 * which may create at most two more regions. The index of the first
722 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
725 * 0 on success, -errno on failure.
727 static int __init_memblock
memblock_isolate_range(struct memblock_type
*type
,
728 phys_addr_t base
, phys_addr_t size
,
729 int *start_rgn
, int *end_rgn
)
731 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
733 struct memblock_region
*rgn
;
735 *start_rgn
= *end_rgn
= 0;
740 /* we'll create at most two more regions */
741 while (type
->cnt
+ 2 > type
->max
)
742 if (memblock_double_array(type
, base
, size
) < 0)
745 for_each_memblock_type(idx
, type
, rgn
) {
746 phys_addr_t rbase
= rgn
->base
;
747 phys_addr_t rend
= rbase
+ rgn
->size
;
756 * @rgn intersects from below. Split and continue
757 * to process the next region - the new top half.
760 rgn
->size
-= base
- rbase
;
761 type
->total_size
-= base
- rbase
;
762 memblock_insert_region(type
, idx
, rbase
, base
- rbase
,
763 memblock_get_region_node(rgn
),
765 } else if (rend
> end
) {
767 * @rgn intersects from above. Split and redo the
768 * current region - the new bottom half.
771 rgn
->size
-= end
- rbase
;
772 type
->total_size
-= end
- rbase
;
773 memblock_insert_region(type
, idx
--, rbase
, end
- rbase
,
774 memblock_get_region_node(rgn
),
777 /* @rgn is fully contained, record it */
787 static int __init_memblock
memblock_remove_range(struct memblock_type
*type
,
788 phys_addr_t base
, phys_addr_t size
)
790 int start_rgn
, end_rgn
;
793 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
797 for (i
= end_rgn
- 1; i
>= start_rgn
; i
--)
798 memblock_remove_region(type
, i
);
802 int __init_memblock
memblock_remove(phys_addr_t base
, phys_addr_t size
)
804 phys_addr_t end
= base
+ size
- 1;
806 memblock_dbg("memblock_remove: [%pa-%pa] %pS\n",
807 &base
, &end
, (void *)_RET_IP_
);
809 return memblock_remove_range(&memblock
.memory
, base
, size
);
813 * memblock_free - free boot memory block
814 * @base: phys starting address of the boot memory block
815 * @size: size of the boot memory block in bytes
817 * Free boot memory block previously allocated by memblock_alloc_xx() API.
818 * The freeing memory will not be released to the buddy allocator.
820 int __init_memblock
memblock_free(phys_addr_t base
, phys_addr_t size
)
822 phys_addr_t end
= base
+ size
- 1;
824 memblock_dbg(" memblock_free: [%pa-%pa] %pS\n",
825 &base
, &end
, (void *)_RET_IP_
);
827 kmemleak_free_part_phys(base
, size
);
828 return memblock_remove_range(&memblock
.reserved
, base
, size
);
831 int __init_memblock
memblock_reserve(phys_addr_t base
, phys_addr_t size
)
833 phys_addr_t end
= base
+ size
- 1;
835 memblock_dbg("memblock_reserve: [%pa-%pa] %pS\n",
836 &base
, &end
, (void *)_RET_IP_
);
838 return memblock_add_range(&memblock
.reserved
, base
, size
, MAX_NUMNODES
, 0);
842 * memblock_setclr_flag - set or clear flag for a memory region
843 * @base: base address of the region
844 * @size: size of the region
845 * @set: set or clear the flag
846 * @flag: the flag to udpate
848 * This function isolates region [@base, @base + @size), and sets/clears flag
850 * Return: 0 on success, -errno on failure.
852 static int __init_memblock
memblock_setclr_flag(phys_addr_t base
,
853 phys_addr_t size
, int set
, int flag
)
855 struct memblock_type
*type
= &memblock
.memory
;
856 int i
, ret
, start_rgn
, end_rgn
;
858 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
862 for (i
= start_rgn
; i
< end_rgn
; i
++) {
863 struct memblock_region
*r
= &type
->regions
[i
];
871 memblock_merge_regions(type
);
876 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
877 * @base: the base phys addr of the region
878 * @size: the size of the region
880 * Return: 0 on success, -errno on failure.
882 int __init_memblock
memblock_mark_hotplug(phys_addr_t base
, phys_addr_t size
)
884 return memblock_setclr_flag(base
, size
, 1, MEMBLOCK_HOTPLUG
);
888 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
889 * @base: the base phys addr of the region
890 * @size: the size of the region
892 * Return: 0 on success, -errno on failure.
894 int __init_memblock
memblock_clear_hotplug(phys_addr_t base
, phys_addr_t size
)
896 return memblock_setclr_flag(base
, size
, 0, MEMBLOCK_HOTPLUG
);
900 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
901 * @base: the base phys addr of the region
902 * @size: the size of the region
904 * Return: 0 on success, -errno on failure.
906 int __init_memblock
memblock_mark_mirror(phys_addr_t base
, phys_addr_t size
)
908 system_has_some_mirror
= true;
910 return memblock_setclr_flag(base
, size
, 1, MEMBLOCK_MIRROR
);
914 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
915 * @base: the base phys addr of the region
916 * @size: the size of the region
918 * Return: 0 on success, -errno on failure.
920 int __init_memblock
memblock_mark_nomap(phys_addr_t base
, phys_addr_t size
)
922 return memblock_setclr_flag(base
, size
, 1, MEMBLOCK_NOMAP
);
926 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
927 * @base: the base phys addr of the region
928 * @size: the size of the region
930 * Return: 0 on success, -errno on failure.
932 int __init_memblock
memblock_clear_nomap(phys_addr_t base
, phys_addr_t size
)
934 return memblock_setclr_flag(base
, size
, 0, MEMBLOCK_NOMAP
);
938 * __next_reserved_mem_region - next function for for_each_reserved_region()
939 * @idx: pointer to u64 loop variable
940 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
941 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
943 * Iterate over all reserved memory regions.
945 void __init_memblock
__next_reserved_mem_region(u64
*idx
,
946 phys_addr_t
*out_start
,
947 phys_addr_t
*out_end
)
949 struct memblock_type
*type
= &memblock
.reserved
;
951 if (*idx
< type
->cnt
) {
952 struct memblock_region
*r
= &type
->regions
[*idx
];
953 phys_addr_t base
= r
->base
;
954 phys_addr_t size
= r
->size
;
959 *out_end
= base
+ size
- 1;
965 /* signal end of iteration */
969 static bool should_skip_region(struct memblock_region
*m
, int nid
, int flags
)
971 int m_nid
= memblock_get_region_node(m
);
973 /* only memory regions are associated with nodes, check it */
974 if (nid
!= NUMA_NO_NODE
&& nid
!= m_nid
)
977 /* skip hotpluggable memory regions if needed */
978 if (movable_node_is_enabled() && memblock_is_hotpluggable(m
))
981 /* if we want mirror memory skip non-mirror memory regions */
982 if ((flags
& MEMBLOCK_MIRROR
) && !memblock_is_mirror(m
))
985 /* skip nomap memory unless we were asked for it explicitly */
986 if (!(flags
& MEMBLOCK_NOMAP
) && memblock_is_nomap(m
))
993 * __next_mem_range - next function for for_each_free_mem_range() etc.
994 * @idx: pointer to u64 loop variable
995 * @nid: node selector, %NUMA_NO_NODE for all nodes
996 * @flags: pick from blocks based on memory attributes
997 * @type_a: pointer to memblock_type from where the range is taken
998 * @type_b: pointer to memblock_type which excludes memory from being taken
999 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1000 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1001 * @out_nid: ptr to int for nid of the range, can be %NULL
1003 * Find the first area from *@idx which matches @nid, fill the out
1004 * parameters, and update *@idx for the next iteration. The lower 32bit of
1005 * *@idx contains index into type_a and the upper 32bit indexes the
1006 * areas before each region in type_b. For example, if type_b regions
1007 * look like the following,
1009 * 0:[0-16), 1:[32-48), 2:[128-130)
1011 * The upper 32bit indexes the following regions.
1013 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
1015 * As both region arrays are sorted, the function advances the two indices
1016 * in lockstep and returns each intersection.
1018 void __init_memblock
__next_mem_range(u64
*idx
, int nid
,
1019 enum memblock_flags flags
,
1020 struct memblock_type
*type_a
,
1021 struct memblock_type
*type_b
,
1022 phys_addr_t
*out_start
,
1023 phys_addr_t
*out_end
, int *out_nid
)
1025 int idx_a
= *idx
& 0xffffffff;
1026 int idx_b
= *idx
>> 32;
1028 if (WARN_ONCE(nid
== MAX_NUMNODES
,
1029 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1032 for (; idx_a
< type_a
->cnt
; idx_a
++) {
1033 struct memblock_region
*m
= &type_a
->regions
[idx_a
];
1035 phys_addr_t m_start
= m
->base
;
1036 phys_addr_t m_end
= m
->base
+ m
->size
;
1037 int m_nid
= memblock_get_region_node(m
);
1039 if (should_skip_region(m
, nid
, flags
))
1044 *out_start
= m_start
;
1050 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1054 /* scan areas before each reservation */
1055 for (; idx_b
< type_b
->cnt
+ 1; idx_b
++) {
1056 struct memblock_region
*r
;
1057 phys_addr_t r_start
;
1060 r
= &type_b
->regions
[idx_b
];
1061 r_start
= idx_b
? r
[-1].base
+ r
[-1].size
: 0;
1062 r_end
= idx_b
< type_b
->cnt
?
1063 r
->base
: PHYS_ADDR_MAX
;
1066 * if idx_b advanced past idx_a,
1067 * break out to advance idx_a
1069 if (r_start
>= m_end
)
1071 /* if the two regions intersect, we're done */
1072 if (m_start
< r_end
) {
1075 max(m_start
, r_start
);
1077 *out_end
= min(m_end
, r_end
);
1081 * The region which ends first is
1082 * advanced for the next iteration.
1088 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1094 /* signal end of iteration */
1099 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1101 * @idx: pointer to u64 loop variable
1102 * @nid: node selector, %NUMA_NO_NODE for all nodes
1103 * @flags: pick from blocks based on memory attributes
1104 * @type_a: pointer to memblock_type from where the range is taken
1105 * @type_b: pointer to memblock_type which excludes memory from being taken
1106 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1107 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1108 * @out_nid: ptr to int for nid of the range, can be %NULL
1110 * Finds the next range from type_a which is not marked as unsuitable
1113 * Reverse of __next_mem_range().
1115 void __init_memblock
__next_mem_range_rev(u64
*idx
, int nid
,
1116 enum memblock_flags flags
,
1117 struct memblock_type
*type_a
,
1118 struct memblock_type
*type_b
,
1119 phys_addr_t
*out_start
,
1120 phys_addr_t
*out_end
, int *out_nid
)
1122 int idx_a
= *idx
& 0xffffffff;
1123 int idx_b
= *idx
>> 32;
1125 if (WARN_ONCE(nid
== MAX_NUMNODES
, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1128 if (*idx
== (u64
)ULLONG_MAX
) {
1129 idx_a
= type_a
->cnt
- 1;
1131 idx_b
= type_b
->cnt
;
1136 for (; idx_a
>= 0; idx_a
--) {
1137 struct memblock_region
*m
= &type_a
->regions
[idx_a
];
1139 phys_addr_t m_start
= m
->base
;
1140 phys_addr_t m_end
= m
->base
+ m
->size
;
1141 int m_nid
= memblock_get_region_node(m
);
1143 if (should_skip_region(m
, nid
, flags
))
1148 *out_start
= m_start
;
1154 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1158 /* scan areas before each reservation */
1159 for (; idx_b
>= 0; idx_b
--) {
1160 struct memblock_region
*r
;
1161 phys_addr_t r_start
;
1164 r
= &type_b
->regions
[idx_b
];
1165 r_start
= idx_b
? r
[-1].base
+ r
[-1].size
: 0;
1166 r_end
= idx_b
< type_b
->cnt
?
1167 r
->base
: PHYS_ADDR_MAX
;
1169 * if idx_b advanced past idx_a,
1170 * break out to advance idx_a
1173 if (r_end
<= m_start
)
1175 /* if the two regions intersect, we're done */
1176 if (m_end
> r_start
) {
1178 *out_start
= max(m_start
, r_start
);
1180 *out_end
= min(m_end
, r_end
);
1183 if (m_start
>= r_start
)
1187 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1192 /* signal end of iteration */
1196 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1198 * Common iterator interface used to define for_each_mem_pfn_range().
1200 void __init_memblock
__next_mem_pfn_range(int *idx
, int nid
,
1201 unsigned long *out_start_pfn
,
1202 unsigned long *out_end_pfn
, int *out_nid
)
1204 struct memblock_type
*type
= &memblock
.memory
;
1205 struct memblock_region
*r
;
1207 while (++*idx
< type
->cnt
) {
1208 r
= &type
->regions
[*idx
];
1210 if (PFN_UP(r
->base
) >= PFN_DOWN(r
->base
+ r
->size
))
1212 if (nid
== MAX_NUMNODES
|| nid
== r
->nid
)
1215 if (*idx
>= type
->cnt
) {
1221 *out_start_pfn
= PFN_UP(r
->base
);
1223 *out_end_pfn
= PFN_DOWN(r
->base
+ r
->size
);
1229 * memblock_set_node - set node ID on memblock regions
1230 * @base: base of area to set node ID for
1231 * @size: size of area to set node ID for
1232 * @type: memblock type to set node ID for
1233 * @nid: node ID to set
1235 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1236 * Regions which cross the area boundaries are split as necessary.
1239 * 0 on success, -errno on failure.
1241 int __init_memblock
memblock_set_node(phys_addr_t base
, phys_addr_t size
,
1242 struct memblock_type
*type
, int nid
)
1244 int start_rgn
, end_rgn
;
1247 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
1251 for (i
= start_rgn
; i
< end_rgn
; i
++)
1252 memblock_set_region_node(&type
->regions
[i
], nid
);
1254 memblock_merge_regions(type
);
1257 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1258 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1260 * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone()
1262 * @idx: pointer to u64 loop variable
1263 * @zone: zone in which all of the memory blocks reside
1264 * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL
1265 * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL
1267 * This function is meant to be a zone/pfn specific wrapper for the
1268 * for_each_mem_range type iterators. Specifically they are used in the
1269 * deferred memory init routines and as such we were duplicating much of
1270 * this logic throughout the code. So instead of having it in multiple
1271 * locations it seemed like it would make more sense to centralize this to
1272 * one new iterator that does everything they need.
1274 void __init_memblock
1275 __next_mem_pfn_range_in_zone(u64
*idx
, struct zone
*zone
,
1276 unsigned long *out_spfn
, unsigned long *out_epfn
)
1278 int zone_nid
= zone_to_nid(zone
);
1279 phys_addr_t spa
, epa
;
1282 __next_mem_range(idx
, zone_nid
, MEMBLOCK_NONE
,
1283 &memblock
.memory
, &memblock
.reserved
,
1286 while (*idx
!= U64_MAX
) {
1287 unsigned long epfn
= PFN_DOWN(epa
);
1288 unsigned long spfn
= PFN_UP(spa
);
1291 * Verify the end is at least past the start of the zone and
1292 * that we have at least one PFN to initialize.
1294 if (zone
->zone_start_pfn
< epfn
&& spfn
< epfn
) {
1295 /* if we went too far just stop searching */
1296 if (zone_end_pfn(zone
) <= spfn
) {
1302 *out_spfn
= max(zone
->zone_start_pfn
, spfn
);
1304 *out_epfn
= min(zone_end_pfn(zone
), epfn
);
1309 __next_mem_range(idx
, zone_nid
, MEMBLOCK_NONE
,
1310 &memblock
.memory
, &memblock
.reserved
,
1314 /* signal end of iteration */
1316 *out_spfn
= ULONG_MAX
;
1321 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1324 * memblock_alloc_range_nid - allocate boot memory block
1325 * @size: size of memory block to be allocated in bytes
1326 * @align: alignment of the region and block's size
1327 * @start: the lower bound of the memory region to allocate (phys address)
1328 * @end: the upper bound of the memory region to allocate (phys address)
1329 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1331 * The allocation is performed from memory region limited by
1332 * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
1334 * If the specified node can not hold the requested memory the
1335 * allocation falls back to any node in the system
1337 * For systems with memory mirroring, the allocation is attempted first
1338 * from the regions with mirroring enabled and then retried from any
1341 * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for
1342 * allocated boot memory block, so that it is never reported as leaks.
1345 * Physical address of allocated memory block on success, %0 on failure.
1347 static phys_addr_t __init
memblock_alloc_range_nid(phys_addr_t size
,
1348 phys_addr_t align
, phys_addr_t start
,
1349 phys_addr_t end
, int nid
)
1351 enum memblock_flags flags
= choose_memblock_flags();
1354 if (WARN_ONCE(nid
== MAX_NUMNODES
, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1358 /* Can't use WARNs this early in boot on powerpc */
1360 align
= SMP_CACHE_BYTES
;
1363 if (end
> memblock
.current_limit
)
1364 end
= memblock
.current_limit
;
1367 found
= memblock_find_in_range_node(size
, align
, start
, end
, nid
,
1369 if (found
&& !memblock_reserve(found
, size
))
1372 if (nid
!= NUMA_NO_NODE
) {
1373 found
= memblock_find_in_range_node(size
, align
, start
,
1376 if (found
&& !memblock_reserve(found
, size
))
1380 if (flags
& MEMBLOCK_MIRROR
) {
1381 flags
&= ~MEMBLOCK_MIRROR
;
1382 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1390 /* Skip kmemleak for kasan_init() due to high volume. */
1391 if (end
!= MEMBLOCK_ALLOC_KASAN
)
1393 * The min_count is set to 0 so that memblock allocated
1394 * blocks are never reported as leaks. This is because many
1395 * of these blocks are only referred via the physical
1396 * address which is not looked up by kmemleak.
1398 kmemleak_alloc_phys(found
, size
, 0, 0);
1404 * memblock_phys_alloc_range - allocate a memory block inside specified range
1405 * @size: size of memory block to be allocated in bytes
1406 * @align: alignment of the region and block's size
1407 * @start: the lower bound of the memory region to allocate (physical address)
1408 * @end: the upper bound of the memory region to allocate (physical address)
1410 * Allocate @size bytes in the between @start and @end.
1412 * Return: physical address of the allocated memory block on success,
1415 phys_addr_t __init
memblock_phys_alloc_range(phys_addr_t size
,
1420 return memblock_alloc_range_nid(size
, align
, start
, end
, NUMA_NO_NODE
);
1424 * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node
1425 * @size: size of memory block to be allocated in bytes
1426 * @align: alignment of the region and block's size
1427 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1429 * Allocates memory block from the specified NUMA node. If the node
1430 * has no available memory, attempts to allocated from any node in the
1433 * Return: physical address of the allocated memory block on success,
1436 phys_addr_t __init
memblock_phys_alloc_try_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
1438 return memblock_alloc_range_nid(size
, align
, 0,
1439 MEMBLOCK_ALLOC_ACCESSIBLE
, nid
);
1443 * memblock_alloc_internal - allocate boot memory block
1444 * @size: size of memory block to be allocated in bytes
1445 * @align: alignment of the region and block's size
1446 * @min_addr: the lower bound of the memory region to allocate (phys address)
1447 * @max_addr: the upper bound of the memory region to allocate (phys address)
1448 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1450 * Allocates memory block using memblock_alloc_range_nid() and
1451 * converts the returned physical address to virtual.
1453 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1454 * will fall back to memory below @min_addr. Other constraints, such
1455 * as node and mirrored memory will be handled again in
1456 * memblock_alloc_range_nid().
1459 * Virtual address of allocated memory block on success, NULL on failure.
1461 static void * __init
memblock_alloc_internal(
1462 phys_addr_t size
, phys_addr_t align
,
1463 phys_addr_t min_addr
, phys_addr_t max_addr
,
1469 * Detect any accidental use of these APIs after slab is ready, as at
1470 * this moment memblock may be deinitialized already and its
1471 * internal data may be destroyed (after execution of memblock_free_all)
1473 if (WARN_ON_ONCE(slab_is_available()))
1474 return kzalloc_node(size
, GFP_NOWAIT
, nid
);
1476 alloc
= memblock_alloc_range_nid(size
, align
, min_addr
, max_addr
, nid
);
1478 /* retry allocation without lower limit */
1479 if (!alloc
&& min_addr
)
1480 alloc
= memblock_alloc_range_nid(size
, align
, 0, max_addr
, nid
);
1485 return phys_to_virt(alloc
);
1489 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1490 * memory and without panicking
1491 * @size: size of memory block to be allocated in bytes
1492 * @align: alignment of the region and block's size
1493 * @min_addr: the lower bound of the memory region from where the allocation
1494 * is preferred (phys address)
1495 * @max_addr: the upper bound of the memory region from where the allocation
1496 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1497 * allocate only from memory limited by memblock.current_limit value
1498 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1500 * Public function, provides additional debug information (including caller
1501 * info), if enabled. Does not zero allocated memory, does not panic if request
1502 * cannot be satisfied.
1505 * Virtual address of allocated memory block on success, NULL on failure.
1507 void * __init
memblock_alloc_try_nid_raw(
1508 phys_addr_t size
, phys_addr_t align
,
1509 phys_addr_t min_addr
, phys_addr_t max_addr
,
1514 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1515 __func__
, (u64
)size
, (u64
)align
, nid
, &min_addr
,
1516 &max_addr
, (void *)_RET_IP_
);
1518 ptr
= memblock_alloc_internal(size
, align
,
1519 min_addr
, max_addr
, nid
);
1520 if (ptr
&& size
> 0)
1521 page_init_poison(ptr
, size
);
1527 * memblock_alloc_try_nid - allocate boot memory block
1528 * @size: size of memory block to be allocated in bytes
1529 * @align: alignment of the region and block's size
1530 * @min_addr: the lower bound of the memory region from where the allocation
1531 * is preferred (phys address)
1532 * @max_addr: the upper bound of the memory region from where the allocation
1533 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1534 * allocate only from memory limited by memblock.current_limit value
1535 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1537 * Public function, provides additional debug information (including caller
1538 * info), if enabled. This function zeroes the allocated memory.
1541 * Virtual address of allocated memory block on success, NULL on failure.
1543 void * __init
memblock_alloc_try_nid(
1544 phys_addr_t size
, phys_addr_t align
,
1545 phys_addr_t min_addr
, phys_addr_t max_addr
,
1550 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1551 __func__
, (u64
)size
, (u64
)align
, nid
, &min_addr
,
1552 &max_addr
, (void *)_RET_IP_
);
1553 ptr
= memblock_alloc_internal(size
, align
,
1554 min_addr
, max_addr
, nid
);
1556 memset(ptr
, 0, size
);
1562 * __memblock_free_late - free pages directly to buddy allocator
1563 * @base: phys starting address of the boot memory block
1564 * @size: size of the boot memory block in bytes
1566 * This is only useful when the memblock allocator has already been torn
1567 * down, but we are still initializing the system. Pages are released directly
1568 * to the buddy allocator.
1570 void __init
__memblock_free_late(phys_addr_t base
, phys_addr_t size
)
1572 phys_addr_t cursor
, end
;
1574 end
= base
+ size
- 1;
1575 memblock_dbg("%s: [%pa-%pa] %pS\n",
1576 __func__
, &base
, &end
, (void *)_RET_IP_
);
1577 kmemleak_free_part_phys(base
, size
);
1578 cursor
= PFN_UP(base
);
1579 end
= PFN_DOWN(base
+ size
);
1581 for (; cursor
< end
; cursor
++) {
1582 memblock_free_pages(pfn_to_page(cursor
), cursor
, 0);
1583 totalram_pages_inc();
1588 * Remaining API functions
1591 phys_addr_t __init_memblock
memblock_phys_mem_size(void)
1593 return memblock
.memory
.total_size
;
1596 phys_addr_t __init_memblock
memblock_reserved_size(void)
1598 return memblock
.reserved
.total_size
;
1601 phys_addr_t __init
memblock_mem_size(unsigned long limit_pfn
)
1603 unsigned long pages
= 0;
1604 struct memblock_region
*r
;
1605 unsigned long start_pfn
, end_pfn
;
1607 for_each_memblock(memory
, r
) {
1608 start_pfn
= memblock_region_memory_base_pfn(r
);
1609 end_pfn
= memblock_region_memory_end_pfn(r
);
1610 start_pfn
= min_t(unsigned long, start_pfn
, limit_pfn
);
1611 end_pfn
= min_t(unsigned long, end_pfn
, limit_pfn
);
1612 pages
+= end_pfn
- start_pfn
;
1615 return PFN_PHYS(pages
);
1618 /* lowest address */
1619 phys_addr_t __init_memblock
memblock_start_of_DRAM(void)
1621 return memblock
.memory
.regions
[0].base
;
1624 phys_addr_t __init_memblock
memblock_end_of_DRAM(void)
1626 int idx
= memblock
.memory
.cnt
- 1;
1628 return (memblock
.memory
.regions
[idx
].base
+ memblock
.memory
.regions
[idx
].size
);
1631 static phys_addr_t __init_memblock
__find_max_addr(phys_addr_t limit
)
1633 phys_addr_t max_addr
= PHYS_ADDR_MAX
;
1634 struct memblock_region
*r
;
1637 * translate the memory @limit size into the max address within one of
1638 * the memory memblock regions, if the @limit exceeds the total size
1639 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1641 for_each_memblock(memory
, r
) {
1642 if (limit
<= r
->size
) {
1643 max_addr
= r
->base
+ limit
;
1652 void __init
memblock_enforce_memory_limit(phys_addr_t limit
)
1654 phys_addr_t max_addr
= PHYS_ADDR_MAX
;
1659 max_addr
= __find_max_addr(limit
);
1661 /* @limit exceeds the total size of the memory, do nothing */
1662 if (max_addr
== PHYS_ADDR_MAX
)
1665 /* truncate both memory and reserved regions */
1666 memblock_remove_range(&memblock
.memory
, max_addr
,
1668 memblock_remove_range(&memblock
.reserved
, max_addr
,
1672 void __init
memblock_cap_memory_range(phys_addr_t base
, phys_addr_t size
)
1674 int start_rgn
, end_rgn
;
1680 ret
= memblock_isolate_range(&memblock
.memory
, base
, size
,
1681 &start_rgn
, &end_rgn
);
1685 /* remove all the MAP regions */
1686 for (i
= memblock
.memory
.cnt
- 1; i
>= end_rgn
; i
--)
1687 if (!memblock_is_nomap(&memblock
.memory
.regions
[i
]))
1688 memblock_remove_region(&memblock
.memory
, i
);
1690 for (i
= start_rgn
- 1; i
>= 0; i
--)
1691 if (!memblock_is_nomap(&memblock
.memory
.regions
[i
]))
1692 memblock_remove_region(&memblock
.memory
, i
);
1694 /* truncate the reserved regions */
1695 memblock_remove_range(&memblock
.reserved
, 0, base
);
1696 memblock_remove_range(&memblock
.reserved
,
1697 base
+ size
, PHYS_ADDR_MAX
);
1700 void __init
memblock_mem_limit_remove_map(phys_addr_t limit
)
1702 phys_addr_t max_addr
;
1707 max_addr
= __find_max_addr(limit
);
1709 /* @limit exceeds the total size of the memory, do nothing */
1710 if (max_addr
== PHYS_ADDR_MAX
)
1713 memblock_cap_memory_range(0, max_addr
);
1716 static int __init_memblock
memblock_search(struct memblock_type
*type
, phys_addr_t addr
)
1718 unsigned int left
= 0, right
= type
->cnt
;
1721 unsigned int mid
= (right
+ left
) / 2;
1723 if (addr
< type
->regions
[mid
].base
)
1725 else if (addr
>= (type
->regions
[mid
].base
+
1726 type
->regions
[mid
].size
))
1730 } while (left
< right
);
1734 bool __init_memblock
memblock_is_reserved(phys_addr_t addr
)
1736 return memblock_search(&memblock
.reserved
, addr
) != -1;
1739 bool __init_memblock
memblock_is_memory(phys_addr_t addr
)
1741 return memblock_search(&memblock
.memory
, addr
) != -1;
1744 bool __init_memblock
memblock_is_map_memory(phys_addr_t addr
)
1746 int i
= memblock_search(&memblock
.memory
, addr
);
1750 return !memblock_is_nomap(&memblock
.memory
.regions
[i
]);
1753 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1754 int __init_memblock
memblock_search_pfn_nid(unsigned long pfn
,
1755 unsigned long *start_pfn
, unsigned long *end_pfn
)
1757 struct memblock_type
*type
= &memblock
.memory
;
1758 int mid
= memblock_search(type
, PFN_PHYS(pfn
));
1763 *start_pfn
= PFN_DOWN(type
->regions
[mid
].base
);
1764 *end_pfn
= PFN_DOWN(type
->regions
[mid
].base
+ type
->regions
[mid
].size
);
1766 return type
->regions
[mid
].nid
;
1771 * memblock_is_region_memory - check if a region is a subset of memory
1772 * @base: base of region to check
1773 * @size: size of region to check
1775 * Check if the region [@base, @base + @size) is a subset of a memory block.
1778 * 0 if false, non-zero if true
1780 bool __init_memblock
memblock_is_region_memory(phys_addr_t base
, phys_addr_t size
)
1782 int idx
= memblock_search(&memblock
.memory
, base
);
1783 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
1787 return (memblock
.memory
.regions
[idx
].base
+
1788 memblock
.memory
.regions
[idx
].size
) >= end
;
1792 * memblock_is_region_reserved - check if a region intersects reserved memory
1793 * @base: base of region to check
1794 * @size: size of region to check
1796 * Check if the region [@base, @base + @size) intersects a reserved
1800 * True if they intersect, false if not.
1802 bool __init_memblock
memblock_is_region_reserved(phys_addr_t base
, phys_addr_t size
)
1804 memblock_cap_size(base
, &size
);
1805 return memblock_overlaps_region(&memblock
.reserved
, base
, size
);
1808 void __init_memblock
memblock_trim_memory(phys_addr_t align
)
1810 phys_addr_t start
, end
, orig_start
, orig_end
;
1811 struct memblock_region
*r
;
1813 for_each_memblock(memory
, r
) {
1814 orig_start
= r
->base
;
1815 orig_end
= r
->base
+ r
->size
;
1816 start
= round_up(orig_start
, align
);
1817 end
= round_down(orig_end
, align
);
1819 if (start
== orig_start
&& end
== orig_end
)
1824 r
->size
= end
- start
;
1826 memblock_remove_region(&memblock
.memory
,
1827 r
- memblock
.memory
.regions
);
1833 void __init_memblock
memblock_set_current_limit(phys_addr_t limit
)
1835 memblock
.current_limit
= limit
;
1838 phys_addr_t __init_memblock
memblock_get_current_limit(void)
1840 return memblock
.current_limit
;
1843 static void __init_memblock
memblock_dump(struct memblock_type
*type
)
1845 phys_addr_t base
, end
, size
;
1846 enum memblock_flags flags
;
1848 struct memblock_region
*rgn
;
1850 pr_info(" %s.cnt = 0x%lx\n", type
->name
, type
->cnt
);
1852 for_each_memblock_type(idx
, type
, rgn
) {
1853 char nid_buf
[32] = "";
1857 end
= base
+ size
- 1;
1859 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1860 if (memblock_get_region_node(rgn
) != MAX_NUMNODES
)
1861 snprintf(nid_buf
, sizeof(nid_buf
), " on node %d",
1862 memblock_get_region_node(rgn
));
1864 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1865 type
->name
, idx
, &base
, &end
, &size
, nid_buf
, flags
);
1869 void __init_memblock
__memblock_dump_all(void)
1871 pr_info("MEMBLOCK configuration:\n");
1872 pr_info(" memory size = %pa reserved size = %pa\n",
1873 &memblock
.memory
.total_size
,
1874 &memblock
.reserved
.total_size
);
1876 memblock_dump(&memblock
.memory
);
1877 memblock_dump(&memblock
.reserved
);
1878 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1879 memblock_dump(&memblock
.physmem
);
1883 void __init
memblock_allow_resize(void)
1885 memblock_can_resize
= 1;
1888 static int __init
early_memblock(char *p
)
1890 if (p
&& strstr(p
, "debug"))
1894 early_param("memblock", early_memblock
);
1896 static void __init
__free_pages_memory(unsigned long start
, unsigned long end
)
1900 while (start
< end
) {
1901 order
= min(MAX_ORDER
- 1UL, __ffs(start
));
1903 while (start
+ (1UL << order
) > end
)
1906 memblock_free_pages(pfn_to_page(start
), start
, order
);
1908 start
+= (1UL << order
);
1912 static unsigned long __init
__free_memory_core(phys_addr_t start
,
1915 unsigned long start_pfn
= PFN_UP(start
);
1916 unsigned long end_pfn
= min_t(unsigned long,
1917 PFN_DOWN(end
), max_low_pfn
);
1919 if (start_pfn
>= end_pfn
)
1922 __free_pages_memory(start_pfn
, end_pfn
);
1924 return end_pfn
- start_pfn
;
1927 static unsigned long __init
free_low_memory_core_early(void)
1929 unsigned long count
= 0;
1930 phys_addr_t start
, end
;
1933 memblock_clear_hotplug(0, -1);
1935 for_each_reserved_mem_region(i
, &start
, &end
)
1936 reserve_bootmem_region(start
, end
);
1939 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
1940 * because in some case like Node0 doesn't have RAM installed
1941 * low ram will be on Node1
1943 for_each_free_mem_range(i
, NUMA_NO_NODE
, MEMBLOCK_NONE
, &start
, &end
,
1945 count
+= __free_memory_core(start
, end
);
1950 static int reset_managed_pages_done __initdata
;
1952 void reset_node_managed_pages(pg_data_t
*pgdat
)
1956 for (z
= pgdat
->node_zones
; z
< pgdat
->node_zones
+ MAX_NR_ZONES
; z
++)
1957 atomic_long_set(&z
->managed_pages
, 0);
1960 void __init
reset_all_zones_managed_pages(void)
1962 struct pglist_data
*pgdat
;
1964 if (reset_managed_pages_done
)
1967 for_each_online_pgdat(pgdat
)
1968 reset_node_managed_pages(pgdat
);
1970 reset_managed_pages_done
= 1;
1974 * memblock_free_all - release free pages to the buddy allocator
1976 * Return: the number of pages actually released.
1978 unsigned long __init
memblock_free_all(void)
1980 unsigned long pages
;
1982 reset_all_zones_managed_pages();
1984 pages
= free_low_memory_core_early();
1985 totalram_pages_add(pages
);
1990 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
1992 static int memblock_debug_show(struct seq_file
*m
, void *private)
1994 struct memblock_type
*type
= m
->private;
1995 struct memblock_region
*reg
;
1999 for (i
= 0; i
< type
->cnt
; i
++) {
2000 reg
= &type
->regions
[i
];
2001 end
= reg
->base
+ reg
->size
- 1;
2003 seq_printf(m
, "%4d: ", i
);
2004 seq_printf(m
, "%pa..%pa\n", ®
->base
, &end
);
2008 DEFINE_SHOW_ATTRIBUTE(memblock_debug
);
2010 static int __init
memblock_init_debugfs(void)
2012 struct dentry
*root
= debugfs_create_dir("memblock", NULL
);
2014 debugfs_create_file("memory", 0444, root
,
2015 &memblock
.memory
, &memblock_debug_fops
);
2016 debugfs_create_file("reserved", 0444, root
,
2017 &memblock
.reserved
, &memblock_debug_fops
);
2018 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2019 debugfs_create_file("physmem", 0444, root
,
2020 &memblock
.physmem
, &memblock_debug_fops
);
2025 __initcall(memblock_init_debugfs
);
2027 #endif /* CONFIG_DEBUG_FS */