2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/kmemleak.h>
21 #include <linux/seq_file.h>
22 #include <linux/memblock.h>
24 #include <asm/sections.h>
30 * DOC: memblock overview
32 * Memblock is a method of managing memory regions during the early
33 * boot period when the usual kernel memory allocators are not up and
36 * Memblock views the system memory as collections of contiguous
37 * regions. There are several types of these collections:
39 * * ``memory`` - describes the physical memory available to the
40 * kernel; this may differ from the actual physical memory installed
41 * in the system, for instance when the memory is restricted with
42 * ``mem=`` command line parameter
43 * * ``reserved`` - describes the regions that were allocated
44 * * ``physmap`` - describes the actual physical memory regardless of
45 * the possible restrictions; the ``physmap`` type is only available
46 * on some architectures.
48 * Each region is represented by :c:type:`struct memblock_region` that
49 * defines the region extents, its attributes and NUMA node id on NUMA
50 * systems. Every memory type is described by the :c:type:`struct
51 * memblock_type` which contains an array of memory regions along with
52 * the allocator metadata. The memory types are nicely wrapped with
53 * :c:type:`struct memblock`. This structure is statically initialzed
54 * at build time. The region arrays for the "memory" and "reserved"
55 * types are initially sized to %INIT_MEMBLOCK_REGIONS and for the
56 * "physmap" type to %INIT_PHYSMEM_REGIONS.
57 * The :c:func:`memblock_allow_resize` enables automatic resizing of
58 * the region arrays during addition of new regions. This feature
59 * should be used with care so that memory allocated for the region
60 * array will not overlap with areas that should be reserved, for
63 * The early architecture setup should tell memblock what the physical
64 * memory layout is by using :c:func:`memblock_add` or
65 * :c:func:`memblock_add_node` functions. The first function does not
66 * assign the region to a NUMA node and it is appropriate for UMA
67 * systems. Yet, it is possible to use it on NUMA systems as well and
68 * assign the region to a NUMA node later in the setup process using
69 * :c:func:`memblock_set_node`. The :c:func:`memblock_add_node`
70 * performs such an assignment directly.
72 * Once memblock is setup the memory can be allocated using either
73 * memblock or bootmem APIs.
75 * As the system boot progresses, the architecture specific
76 * :c:func:`mem_init` function frees all the memory to the buddy page
79 * If an architecure enables %CONFIG_ARCH_DISCARD_MEMBLOCK, the
80 * memblock data structures will be discarded after the system
81 * initialization compltes.
84 #ifndef CONFIG_NEED_MULTIPLE_NODES
85 struct pglist_data __refdata contig_page_data
;
86 EXPORT_SYMBOL(contig_page_data
);
89 unsigned long max_low_pfn
;
90 unsigned long min_low_pfn
;
91 unsigned long max_pfn
;
92 unsigned long long max_possible_pfn
;
94 static struct memblock_region memblock_memory_init_regions
[INIT_MEMBLOCK_REGIONS
] __initdata_memblock
;
95 static struct memblock_region memblock_reserved_init_regions
[INIT_MEMBLOCK_REGIONS
] __initdata_memblock
;
96 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
97 static struct memblock_region memblock_physmem_init_regions
[INIT_PHYSMEM_REGIONS
] __initdata_memblock
;
100 struct memblock memblock __initdata_memblock
= {
101 .memory
.regions
= memblock_memory_init_regions
,
102 .memory
.cnt
= 1, /* empty dummy entry */
103 .memory
.max
= INIT_MEMBLOCK_REGIONS
,
104 .memory
.name
= "memory",
106 .reserved
.regions
= memblock_reserved_init_regions
,
107 .reserved
.cnt
= 1, /* empty dummy entry */
108 .reserved
.max
= INIT_MEMBLOCK_REGIONS
,
109 .reserved
.name
= "reserved",
111 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
112 .physmem
.regions
= memblock_physmem_init_regions
,
113 .physmem
.cnt
= 1, /* empty dummy entry */
114 .physmem
.max
= INIT_PHYSMEM_REGIONS
,
115 .physmem
.name
= "physmem",
119 .current_limit
= MEMBLOCK_ALLOC_ANYWHERE
,
122 int memblock_debug __initdata_memblock
;
123 static bool system_has_some_mirror __initdata_memblock
= false;
124 static int memblock_can_resize __initdata_memblock
;
125 static int memblock_memory_in_slab __initdata_memblock
= 0;
126 static int memblock_reserved_in_slab __initdata_memblock
= 0;
128 enum memblock_flags __init_memblock
choose_memblock_flags(void)
130 return system_has_some_mirror
? MEMBLOCK_MIRROR
: MEMBLOCK_NONE
;
133 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
134 static inline phys_addr_t
memblock_cap_size(phys_addr_t base
, phys_addr_t
*size
)
136 return *size
= min(*size
, PHYS_ADDR_MAX
- base
);
140 * Address comparison utilities
142 static unsigned long __init_memblock
memblock_addrs_overlap(phys_addr_t base1
, phys_addr_t size1
,
143 phys_addr_t base2
, phys_addr_t size2
)
145 return ((base1
< (base2
+ size2
)) && (base2
< (base1
+ size1
)));
148 bool __init_memblock
memblock_overlaps_region(struct memblock_type
*type
,
149 phys_addr_t base
, phys_addr_t size
)
153 for (i
= 0; i
< type
->cnt
; i
++)
154 if (memblock_addrs_overlap(base
, size
, type
->regions
[i
].base
,
155 type
->regions
[i
].size
))
157 return i
< type
->cnt
;
161 * __memblock_find_range_bottom_up - find free area utility in bottom-up
162 * @start: start of candidate range
163 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
164 * %MEMBLOCK_ALLOC_ACCESSIBLE
165 * @size: size of free area to find
166 * @align: alignment of free area to find
167 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
168 * @flags: pick from blocks based on memory attributes
170 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
173 * Found address on success, 0 on failure.
175 static phys_addr_t __init_memblock
176 __memblock_find_range_bottom_up(phys_addr_t start
, phys_addr_t end
,
177 phys_addr_t size
, phys_addr_t align
, int nid
,
178 enum memblock_flags flags
)
180 phys_addr_t this_start
, this_end
, cand
;
183 for_each_free_mem_range(i
, nid
, flags
, &this_start
, &this_end
, NULL
) {
184 this_start
= clamp(this_start
, start
, end
);
185 this_end
= clamp(this_end
, start
, end
);
187 cand
= round_up(this_start
, align
);
188 if (cand
< this_end
&& this_end
- cand
>= size
)
196 * __memblock_find_range_top_down - find free area utility, in top-down
197 * @start: start of candidate range
198 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
199 * %MEMBLOCK_ALLOC_ACCESSIBLE
200 * @size: size of free area to find
201 * @align: alignment of free area to find
202 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
203 * @flags: pick from blocks based on memory attributes
205 * Utility called from memblock_find_in_range_node(), find free area top-down.
208 * Found address on success, 0 on failure.
210 static phys_addr_t __init_memblock
211 __memblock_find_range_top_down(phys_addr_t start
, phys_addr_t end
,
212 phys_addr_t size
, phys_addr_t align
, int nid
,
213 enum memblock_flags flags
)
215 phys_addr_t this_start
, this_end
, cand
;
218 for_each_free_mem_range_reverse(i
, nid
, flags
, &this_start
, &this_end
,
220 this_start
= clamp(this_start
, start
, end
);
221 this_end
= clamp(this_end
, start
, end
);
226 cand
= round_down(this_end
- size
, align
);
227 if (cand
>= this_start
)
235 * memblock_find_in_range_node - find free area in given range and node
236 * @size: size of free area to find
237 * @align: alignment of free area to find
238 * @start: start of candidate range
239 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
240 * %MEMBLOCK_ALLOC_ACCESSIBLE
241 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
242 * @flags: pick from blocks based on memory attributes
244 * Find @size free area aligned to @align in the specified range and node.
246 * When allocation direction is bottom-up, the @start should be greater
247 * than the end of the kernel image. Otherwise, it will be trimmed. The
248 * reason is that we want the bottom-up allocation just near the kernel
249 * image so it is highly likely that the allocated memory and the kernel
250 * will reside in the same node.
252 * If bottom-up allocation failed, will try to allocate memory top-down.
255 * Found address on success, 0 on failure.
257 phys_addr_t __init_memblock
memblock_find_in_range_node(phys_addr_t size
,
258 phys_addr_t align
, phys_addr_t start
,
259 phys_addr_t end
, int nid
,
260 enum memblock_flags flags
)
262 phys_addr_t kernel_end
, ret
;
265 if (end
== MEMBLOCK_ALLOC_ACCESSIBLE
)
266 end
= memblock
.current_limit
;
268 /* avoid allocating the first page */
269 start
= max_t(phys_addr_t
, start
, PAGE_SIZE
);
270 end
= max(start
, end
);
271 kernel_end
= __pa_symbol(_end
);
274 * try bottom-up allocation only when bottom-up mode
275 * is set and @end is above the kernel image.
277 if (memblock_bottom_up() && end
> kernel_end
) {
278 phys_addr_t bottom_up_start
;
280 /* make sure we will allocate above the kernel */
281 bottom_up_start
= max(start
, kernel_end
);
283 /* ok, try bottom-up allocation first */
284 ret
= __memblock_find_range_bottom_up(bottom_up_start
, end
,
285 size
, align
, nid
, flags
);
290 * we always limit bottom-up allocation above the kernel,
291 * but top-down allocation doesn't have the limit, so
292 * retrying top-down allocation may succeed when bottom-up
295 * bottom-up allocation is expected to be fail very rarely,
296 * so we use WARN_ONCE() here to see the stack trace if
299 WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE
),
300 "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
303 return __memblock_find_range_top_down(start
, end
, size
, align
, nid
,
308 * memblock_find_in_range - find free area in given range
309 * @start: start of candidate range
310 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
311 * %MEMBLOCK_ALLOC_ACCESSIBLE
312 * @size: size of free area to find
313 * @align: alignment of free area to find
315 * Find @size free area aligned to @align in the specified range.
318 * Found address on success, 0 on failure.
320 phys_addr_t __init_memblock
memblock_find_in_range(phys_addr_t start
,
321 phys_addr_t end
, phys_addr_t size
,
325 enum memblock_flags flags
= choose_memblock_flags();
328 ret
= memblock_find_in_range_node(size
, align
, start
, end
,
329 NUMA_NO_NODE
, flags
);
331 if (!ret
&& (flags
& MEMBLOCK_MIRROR
)) {
332 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
334 flags
&= ~MEMBLOCK_MIRROR
;
341 static void __init_memblock
memblock_remove_region(struct memblock_type
*type
, unsigned long r
)
343 type
->total_size
-= type
->regions
[r
].size
;
344 memmove(&type
->regions
[r
], &type
->regions
[r
+ 1],
345 (type
->cnt
- (r
+ 1)) * sizeof(type
->regions
[r
]));
348 /* Special case for empty arrays */
349 if (type
->cnt
== 0) {
350 WARN_ON(type
->total_size
!= 0);
352 type
->regions
[0].base
= 0;
353 type
->regions
[0].size
= 0;
354 type
->regions
[0].flags
= 0;
355 memblock_set_region_node(&type
->regions
[0], MAX_NUMNODES
);
359 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
361 * memblock_discard - discard memory and reserved arrays if they were allocated
363 void __init
memblock_discard(void)
365 phys_addr_t addr
, size
;
367 if (memblock
.reserved
.regions
!= memblock_reserved_init_regions
) {
368 addr
= __pa(memblock
.reserved
.regions
);
369 size
= PAGE_ALIGN(sizeof(struct memblock_region
) *
370 memblock
.reserved
.max
);
371 __memblock_free_late(addr
, size
);
374 if (memblock
.memory
.regions
!= memblock_memory_init_regions
) {
375 addr
= __pa(memblock
.memory
.regions
);
376 size
= PAGE_ALIGN(sizeof(struct memblock_region
) *
377 memblock
.memory
.max
);
378 __memblock_free_late(addr
, size
);
384 * memblock_double_array - double the size of the memblock regions array
385 * @type: memblock type of the regions array being doubled
386 * @new_area_start: starting address of memory range to avoid overlap with
387 * @new_area_size: size of memory range to avoid overlap with
389 * Double the size of the @type regions array. If memblock is being used to
390 * allocate memory for a new reserved regions array and there is a previously
391 * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
392 * waiting to be reserved, ensure the memory used by the new array does
396 * 0 on success, -1 on failure.
398 static int __init_memblock
memblock_double_array(struct memblock_type
*type
,
399 phys_addr_t new_area_start
,
400 phys_addr_t new_area_size
)
402 struct memblock_region
*new_array
, *old_array
;
403 phys_addr_t old_alloc_size
, new_alloc_size
;
404 phys_addr_t old_size
, new_size
, addr
, new_end
;
405 int use_slab
= slab_is_available();
408 /* We don't allow resizing until we know about the reserved regions
409 * of memory that aren't suitable for allocation
411 if (!memblock_can_resize
)
414 /* Calculate new doubled size */
415 old_size
= type
->max
* sizeof(struct memblock_region
);
416 new_size
= old_size
<< 1;
418 * We need to allocated new one align to PAGE_SIZE,
419 * so we can free them completely later.
421 old_alloc_size
= PAGE_ALIGN(old_size
);
422 new_alloc_size
= PAGE_ALIGN(new_size
);
424 /* Retrieve the slab flag */
425 if (type
== &memblock
.memory
)
426 in_slab
= &memblock_memory_in_slab
;
428 in_slab
= &memblock_reserved_in_slab
;
430 /* Try to find some space for it.
432 * WARNING: We assume that either slab_is_available() and we use it or
433 * we use MEMBLOCK for allocations. That means that this is unsafe to
434 * use when bootmem is currently active (unless bootmem itself is
435 * implemented on top of MEMBLOCK which isn't the case yet)
437 * This should however not be an issue for now, as we currently only
438 * call into MEMBLOCK while it's still active, or much later when slab
439 * is active for memory hotplug operations
442 new_array
= kmalloc(new_size
, GFP_KERNEL
);
443 addr
= new_array
? __pa(new_array
) : 0;
445 /* only exclude range when trying to double reserved.regions */
446 if (type
!= &memblock
.reserved
)
447 new_area_start
= new_area_size
= 0;
449 addr
= memblock_find_in_range(new_area_start
+ new_area_size
,
450 memblock
.current_limit
,
451 new_alloc_size
, PAGE_SIZE
);
452 if (!addr
&& new_area_size
)
453 addr
= memblock_find_in_range(0,
454 min(new_area_start
, memblock
.current_limit
),
455 new_alloc_size
, PAGE_SIZE
);
457 new_array
= addr
? __va(addr
) : NULL
;
460 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
461 type
->name
, type
->max
, type
->max
* 2);
465 new_end
= addr
+ new_size
- 1;
466 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
467 type
->name
, type
->max
* 2, &addr
, &new_end
);
470 * Found space, we now need to move the array over before we add the
471 * reserved region since it may be our reserved array itself that is
474 memcpy(new_array
, type
->regions
, old_size
);
475 memset(new_array
+ type
->max
, 0, old_size
);
476 old_array
= type
->regions
;
477 type
->regions
= new_array
;
480 /* Free old array. We needn't free it if the array is the static one */
483 else if (old_array
!= memblock_memory_init_regions
&&
484 old_array
!= memblock_reserved_init_regions
)
485 memblock_free(__pa(old_array
), old_alloc_size
);
488 * Reserve the new array if that comes from the memblock. Otherwise, we
492 BUG_ON(memblock_reserve(addr
, new_alloc_size
));
494 /* Update slab flag */
501 * memblock_merge_regions - merge neighboring compatible regions
502 * @type: memblock type to scan
504 * Scan @type and merge neighboring compatible regions.
506 static void __init_memblock
memblock_merge_regions(struct memblock_type
*type
)
510 /* cnt never goes below 1 */
511 while (i
< type
->cnt
- 1) {
512 struct memblock_region
*this = &type
->regions
[i
];
513 struct memblock_region
*next
= &type
->regions
[i
+ 1];
515 if (this->base
+ this->size
!= next
->base
||
516 memblock_get_region_node(this) !=
517 memblock_get_region_node(next
) ||
518 this->flags
!= next
->flags
) {
519 BUG_ON(this->base
+ this->size
> next
->base
);
524 this->size
+= next
->size
;
525 /* move forward from next + 1, index of which is i + 2 */
526 memmove(next
, next
+ 1, (type
->cnt
- (i
+ 2)) * sizeof(*next
));
532 * memblock_insert_region - insert new memblock region
533 * @type: memblock type to insert into
534 * @idx: index for the insertion point
535 * @base: base address of the new region
536 * @size: size of the new region
537 * @nid: node id of the new region
538 * @flags: flags of the new region
540 * Insert new memblock region [@base, @base + @size) into @type at @idx.
541 * @type must already have extra room to accommodate the new region.
543 static void __init_memblock
memblock_insert_region(struct memblock_type
*type
,
544 int idx
, phys_addr_t base
,
547 enum memblock_flags flags
)
549 struct memblock_region
*rgn
= &type
->regions
[idx
];
551 BUG_ON(type
->cnt
>= type
->max
);
552 memmove(rgn
+ 1, rgn
, (type
->cnt
- idx
) * sizeof(*rgn
));
556 memblock_set_region_node(rgn
, nid
);
558 type
->total_size
+= size
;
562 * memblock_add_range - add new memblock region
563 * @type: memblock type to add new region into
564 * @base: base address of the new region
565 * @size: size of the new region
566 * @nid: nid of the new region
567 * @flags: flags of the new region
569 * Add new memblock region [@base, @base + @size) into @type. The new region
570 * is allowed to overlap with existing ones - overlaps don't affect already
571 * existing regions. @type is guaranteed to be minimal (all neighbouring
572 * compatible regions are merged) after the addition.
575 * 0 on success, -errno on failure.
577 int __init_memblock
memblock_add_range(struct memblock_type
*type
,
578 phys_addr_t base
, phys_addr_t size
,
579 int nid
, enum memblock_flags flags
)
582 phys_addr_t obase
= base
;
583 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
585 struct memblock_region
*rgn
;
590 /* special case for empty array */
591 if (type
->regions
[0].size
== 0) {
592 WARN_ON(type
->cnt
!= 1 || type
->total_size
);
593 type
->regions
[0].base
= base
;
594 type
->regions
[0].size
= size
;
595 type
->regions
[0].flags
= flags
;
596 memblock_set_region_node(&type
->regions
[0], nid
);
597 type
->total_size
= size
;
602 * The following is executed twice. Once with %false @insert and
603 * then with %true. The first counts the number of regions needed
604 * to accommodate the new area. The second actually inserts them.
609 for_each_memblock_type(idx
, type
, rgn
) {
610 phys_addr_t rbase
= rgn
->base
;
611 phys_addr_t rend
= rbase
+ rgn
->size
;
618 * @rgn overlaps. If it separates the lower part of new
619 * area, insert that portion.
622 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
623 WARN_ON(nid
!= memblock_get_region_node(rgn
));
625 WARN_ON(flags
!= rgn
->flags
);
628 memblock_insert_region(type
, idx
++, base
,
632 /* area below @rend is dealt with, forget about it */
633 base
= min(rend
, end
);
636 /* insert the remaining portion */
640 memblock_insert_region(type
, idx
, base
, end
- base
,
648 * If this was the first round, resize array and repeat for actual
649 * insertions; otherwise, merge and return.
652 while (type
->cnt
+ nr_new
> type
->max
)
653 if (memblock_double_array(type
, obase
, size
) < 0)
658 memblock_merge_regions(type
);
664 * memblock_add_node - add new memblock region within a NUMA node
665 * @base: base address of the new region
666 * @size: size of the new region
667 * @nid: nid of the new region
669 * Add new memblock region [@base, @base + @size) to the "memory"
670 * type. See memblock_add_range() description for mode details
673 * 0 on success, -errno on failure.
675 int __init_memblock
memblock_add_node(phys_addr_t base
, phys_addr_t size
,
678 return memblock_add_range(&memblock
.memory
, base
, size
, nid
, 0);
682 * memblock_add - add new memblock region
683 * @base: base address of the new region
684 * @size: size of the new region
686 * Add new memblock region [@base, @base + @size) to the "memory"
687 * type. See memblock_add_range() description for mode details
690 * 0 on success, -errno on failure.
692 int __init_memblock
memblock_add(phys_addr_t base
, phys_addr_t size
)
694 phys_addr_t end
= base
+ size
- 1;
696 memblock_dbg("memblock_add: [%pa-%pa] %pF\n",
697 &base
, &end
, (void *)_RET_IP_
);
699 return memblock_add_range(&memblock
.memory
, base
, size
, MAX_NUMNODES
, 0);
703 * memblock_isolate_range - isolate given range into disjoint memblocks
704 * @type: memblock type to isolate range for
705 * @base: base of range to isolate
706 * @size: size of range to isolate
707 * @start_rgn: out parameter for the start of isolated region
708 * @end_rgn: out parameter for the end of isolated region
710 * Walk @type and ensure that regions don't cross the boundaries defined by
711 * [@base, @base + @size). Crossing regions are split at the boundaries,
712 * which may create at most two more regions. The index of the first
713 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
716 * 0 on success, -errno on failure.
718 static int __init_memblock
memblock_isolate_range(struct memblock_type
*type
,
719 phys_addr_t base
, phys_addr_t size
,
720 int *start_rgn
, int *end_rgn
)
722 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
724 struct memblock_region
*rgn
;
726 *start_rgn
= *end_rgn
= 0;
731 /* we'll create at most two more regions */
732 while (type
->cnt
+ 2 > type
->max
)
733 if (memblock_double_array(type
, base
, size
) < 0)
736 for_each_memblock_type(idx
, type
, rgn
) {
737 phys_addr_t rbase
= rgn
->base
;
738 phys_addr_t rend
= rbase
+ rgn
->size
;
747 * @rgn intersects from below. Split and continue
748 * to process the next region - the new top half.
751 rgn
->size
-= base
- rbase
;
752 type
->total_size
-= base
- rbase
;
753 memblock_insert_region(type
, idx
, rbase
, base
- rbase
,
754 memblock_get_region_node(rgn
),
756 } else if (rend
> end
) {
758 * @rgn intersects from above. Split and redo the
759 * current region - the new bottom half.
762 rgn
->size
-= end
- rbase
;
763 type
->total_size
-= end
- rbase
;
764 memblock_insert_region(type
, idx
--, rbase
, end
- rbase
,
765 memblock_get_region_node(rgn
),
768 /* @rgn is fully contained, record it */
778 static int __init_memblock
memblock_remove_range(struct memblock_type
*type
,
779 phys_addr_t base
, phys_addr_t size
)
781 int start_rgn
, end_rgn
;
784 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
788 for (i
= end_rgn
- 1; i
>= start_rgn
; i
--)
789 memblock_remove_region(type
, i
);
793 int __init_memblock
memblock_remove(phys_addr_t base
, phys_addr_t size
)
795 phys_addr_t end
= base
+ size
- 1;
797 memblock_dbg("memblock_remove: [%pa-%pa] %pS\n",
798 &base
, &end
, (void *)_RET_IP_
);
800 return memblock_remove_range(&memblock
.memory
, base
, size
);
804 int __init_memblock
memblock_free(phys_addr_t base
, phys_addr_t size
)
806 phys_addr_t end
= base
+ size
- 1;
808 memblock_dbg(" memblock_free: [%pa-%pa] %pF\n",
809 &base
, &end
, (void *)_RET_IP_
);
811 kmemleak_free_part_phys(base
, size
);
812 return memblock_remove_range(&memblock
.reserved
, base
, size
);
815 int __init_memblock
memblock_reserve(phys_addr_t base
, phys_addr_t size
)
817 phys_addr_t end
= base
+ size
- 1;
819 memblock_dbg("memblock_reserve: [%pa-%pa] %pF\n",
820 &base
, &end
, (void *)_RET_IP_
);
822 return memblock_add_range(&memblock
.reserved
, base
, size
, MAX_NUMNODES
, 0);
826 * memblock_setclr_flag - set or clear flag for a memory region
827 * @base: base address of the region
828 * @size: size of the region
829 * @set: set or clear the flag
830 * @flag: the flag to udpate
832 * This function isolates region [@base, @base + @size), and sets/clears flag
834 * Return: 0 on success, -errno on failure.
836 static int __init_memblock
memblock_setclr_flag(phys_addr_t base
,
837 phys_addr_t size
, int set
, int flag
)
839 struct memblock_type
*type
= &memblock
.memory
;
840 int i
, ret
, start_rgn
, end_rgn
;
842 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
846 for (i
= start_rgn
; i
< end_rgn
; i
++)
848 memblock_set_region_flags(&type
->regions
[i
], flag
);
850 memblock_clear_region_flags(&type
->regions
[i
], flag
);
852 memblock_merge_regions(type
);
857 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
858 * @base: the base phys addr of the region
859 * @size: the size of the region
861 * Return: 0 on success, -errno on failure.
863 int __init_memblock
memblock_mark_hotplug(phys_addr_t base
, phys_addr_t size
)
865 return memblock_setclr_flag(base
, size
, 1, MEMBLOCK_HOTPLUG
);
869 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
870 * @base: the base phys addr of the region
871 * @size: the size of the region
873 * Return: 0 on success, -errno on failure.
875 int __init_memblock
memblock_clear_hotplug(phys_addr_t base
, phys_addr_t size
)
877 return memblock_setclr_flag(base
, size
, 0, MEMBLOCK_HOTPLUG
);
881 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
882 * @base: the base phys addr of the region
883 * @size: the size of the region
885 * Return: 0 on success, -errno on failure.
887 int __init_memblock
memblock_mark_mirror(phys_addr_t base
, phys_addr_t size
)
889 system_has_some_mirror
= true;
891 return memblock_setclr_flag(base
, size
, 1, MEMBLOCK_MIRROR
);
895 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
896 * @base: the base phys addr of the region
897 * @size: the size of the region
899 * Return: 0 on success, -errno on failure.
901 int __init_memblock
memblock_mark_nomap(phys_addr_t base
, phys_addr_t size
)
903 return memblock_setclr_flag(base
, size
, 1, MEMBLOCK_NOMAP
);
907 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
908 * @base: the base phys addr of the region
909 * @size: the size of the region
911 * Return: 0 on success, -errno on failure.
913 int __init_memblock
memblock_clear_nomap(phys_addr_t base
, phys_addr_t size
)
915 return memblock_setclr_flag(base
, size
, 0, MEMBLOCK_NOMAP
);
919 * __next_reserved_mem_region - next function for for_each_reserved_region()
920 * @idx: pointer to u64 loop variable
921 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
922 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
924 * Iterate over all reserved memory regions.
926 void __init_memblock
__next_reserved_mem_region(u64
*idx
,
927 phys_addr_t
*out_start
,
928 phys_addr_t
*out_end
)
930 struct memblock_type
*type
= &memblock
.reserved
;
932 if (*idx
< type
->cnt
) {
933 struct memblock_region
*r
= &type
->regions
[*idx
];
934 phys_addr_t base
= r
->base
;
935 phys_addr_t size
= r
->size
;
940 *out_end
= base
+ size
- 1;
946 /* signal end of iteration */
951 * __next__mem_range - next function for for_each_free_mem_range() etc.
952 * @idx: pointer to u64 loop variable
953 * @nid: node selector, %NUMA_NO_NODE for all nodes
954 * @flags: pick from blocks based on memory attributes
955 * @type_a: pointer to memblock_type from where the range is taken
956 * @type_b: pointer to memblock_type which excludes memory from being taken
957 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
958 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
959 * @out_nid: ptr to int for nid of the range, can be %NULL
961 * Find the first area from *@idx which matches @nid, fill the out
962 * parameters, and update *@idx for the next iteration. The lower 32bit of
963 * *@idx contains index into type_a and the upper 32bit indexes the
964 * areas before each region in type_b. For example, if type_b regions
965 * look like the following,
967 * 0:[0-16), 1:[32-48), 2:[128-130)
969 * The upper 32bit indexes the following regions.
971 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
973 * As both region arrays are sorted, the function advances the two indices
974 * in lockstep and returns each intersection.
976 void __init_memblock
__next_mem_range(u64
*idx
, int nid
,
977 enum memblock_flags flags
,
978 struct memblock_type
*type_a
,
979 struct memblock_type
*type_b
,
980 phys_addr_t
*out_start
,
981 phys_addr_t
*out_end
, int *out_nid
)
983 int idx_a
= *idx
& 0xffffffff;
984 int idx_b
= *idx
>> 32;
986 if (WARN_ONCE(nid
== MAX_NUMNODES
,
987 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
990 for (; idx_a
< type_a
->cnt
; idx_a
++) {
991 struct memblock_region
*m
= &type_a
->regions
[idx_a
];
993 phys_addr_t m_start
= m
->base
;
994 phys_addr_t m_end
= m
->base
+ m
->size
;
995 int m_nid
= memblock_get_region_node(m
);
997 /* only memory regions are associated with nodes, check it */
998 if (nid
!= NUMA_NO_NODE
&& nid
!= m_nid
)
1001 /* skip hotpluggable memory regions if needed */
1002 if (movable_node_is_enabled() && memblock_is_hotpluggable(m
))
1005 /* if we want mirror memory skip non-mirror memory regions */
1006 if ((flags
& MEMBLOCK_MIRROR
) && !memblock_is_mirror(m
))
1009 /* skip nomap memory unless we were asked for it explicitly */
1010 if (!(flags
& MEMBLOCK_NOMAP
) && memblock_is_nomap(m
))
1015 *out_start
= m_start
;
1021 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1025 /* scan areas before each reservation */
1026 for (; idx_b
< type_b
->cnt
+ 1; idx_b
++) {
1027 struct memblock_region
*r
;
1028 phys_addr_t r_start
;
1031 r
= &type_b
->regions
[idx_b
];
1032 r_start
= idx_b
? r
[-1].base
+ r
[-1].size
: 0;
1033 r_end
= idx_b
< type_b
->cnt
?
1034 r
->base
: PHYS_ADDR_MAX
;
1037 * if idx_b advanced past idx_a,
1038 * break out to advance idx_a
1040 if (r_start
>= m_end
)
1042 /* if the two regions intersect, we're done */
1043 if (m_start
< r_end
) {
1046 max(m_start
, r_start
);
1048 *out_end
= min(m_end
, r_end
);
1052 * The region which ends first is
1053 * advanced for the next iteration.
1059 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1065 /* signal end of iteration */
1070 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1072 * @idx: pointer to u64 loop variable
1073 * @nid: node selector, %NUMA_NO_NODE for all nodes
1074 * @flags: pick from blocks based on memory attributes
1075 * @type_a: pointer to memblock_type from where the range is taken
1076 * @type_b: pointer to memblock_type which excludes memory from being taken
1077 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1078 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1079 * @out_nid: ptr to int for nid of the range, can be %NULL
1081 * Finds the next range from type_a which is not marked as unsuitable
1084 * Reverse of __next_mem_range().
1086 void __init_memblock
__next_mem_range_rev(u64
*idx
, int nid
,
1087 enum memblock_flags flags
,
1088 struct memblock_type
*type_a
,
1089 struct memblock_type
*type_b
,
1090 phys_addr_t
*out_start
,
1091 phys_addr_t
*out_end
, int *out_nid
)
1093 int idx_a
= *idx
& 0xffffffff;
1094 int idx_b
= *idx
>> 32;
1096 if (WARN_ONCE(nid
== MAX_NUMNODES
, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1099 if (*idx
== (u64
)ULLONG_MAX
) {
1100 idx_a
= type_a
->cnt
- 1;
1102 idx_b
= type_b
->cnt
;
1107 for (; idx_a
>= 0; idx_a
--) {
1108 struct memblock_region
*m
= &type_a
->regions
[idx_a
];
1110 phys_addr_t m_start
= m
->base
;
1111 phys_addr_t m_end
= m
->base
+ m
->size
;
1112 int m_nid
= memblock_get_region_node(m
);
1114 /* only memory regions are associated with nodes, check it */
1115 if (nid
!= NUMA_NO_NODE
&& nid
!= m_nid
)
1118 /* skip hotpluggable memory regions if needed */
1119 if (movable_node_is_enabled() && memblock_is_hotpluggable(m
))
1122 /* if we want mirror memory skip non-mirror memory regions */
1123 if ((flags
& MEMBLOCK_MIRROR
) && !memblock_is_mirror(m
))
1126 /* skip nomap memory unless we were asked for it explicitly */
1127 if (!(flags
& MEMBLOCK_NOMAP
) && memblock_is_nomap(m
))
1132 *out_start
= m_start
;
1138 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1142 /* scan areas before each reservation */
1143 for (; idx_b
>= 0; idx_b
--) {
1144 struct memblock_region
*r
;
1145 phys_addr_t r_start
;
1148 r
= &type_b
->regions
[idx_b
];
1149 r_start
= idx_b
? r
[-1].base
+ r
[-1].size
: 0;
1150 r_end
= idx_b
< type_b
->cnt
?
1151 r
->base
: PHYS_ADDR_MAX
;
1153 * if idx_b advanced past idx_a,
1154 * break out to advance idx_a
1157 if (r_end
<= m_start
)
1159 /* if the two regions intersect, we're done */
1160 if (m_end
> r_start
) {
1162 *out_start
= max(m_start
, r_start
);
1164 *out_end
= min(m_end
, r_end
);
1167 if (m_start
>= r_start
)
1171 *idx
= (u32
)idx_a
| (u64
)idx_b
<< 32;
1176 /* signal end of iteration */
1180 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1182 * Common iterator interface used to define for_each_mem_range().
1184 void __init_memblock
__next_mem_pfn_range(int *idx
, int nid
,
1185 unsigned long *out_start_pfn
,
1186 unsigned long *out_end_pfn
, int *out_nid
)
1188 struct memblock_type
*type
= &memblock
.memory
;
1189 struct memblock_region
*r
;
1191 while (++*idx
< type
->cnt
) {
1192 r
= &type
->regions
[*idx
];
1194 if (PFN_UP(r
->base
) >= PFN_DOWN(r
->base
+ r
->size
))
1196 if (nid
== MAX_NUMNODES
|| nid
== r
->nid
)
1199 if (*idx
>= type
->cnt
) {
1205 *out_start_pfn
= PFN_UP(r
->base
);
1207 *out_end_pfn
= PFN_DOWN(r
->base
+ r
->size
);
1213 * memblock_set_node - set node ID on memblock regions
1214 * @base: base of area to set node ID for
1215 * @size: size of area to set node ID for
1216 * @type: memblock type to set node ID for
1217 * @nid: node ID to set
1219 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1220 * Regions which cross the area boundaries are split as necessary.
1223 * 0 on success, -errno on failure.
1225 int __init_memblock
memblock_set_node(phys_addr_t base
, phys_addr_t size
,
1226 struct memblock_type
*type
, int nid
)
1228 int start_rgn
, end_rgn
;
1231 ret
= memblock_isolate_range(type
, base
, size
, &start_rgn
, &end_rgn
);
1235 for (i
= start_rgn
; i
< end_rgn
; i
++)
1236 memblock_set_region_node(&type
->regions
[i
], nid
);
1238 memblock_merge_regions(type
);
1241 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1243 static phys_addr_t __init
memblock_alloc_range_nid(phys_addr_t size
,
1244 phys_addr_t align
, phys_addr_t start
,
1245 phys_addr_t end
, int nid
,
1246 enum memblock_flags flags
)
1251 /* Can't use WARNs this early in boot on powerpc */
1253 align
= SMP_CACHE_BYTES
;
1256 found
= memblock_find_in_range_node(size
, align
, start
, end
, nid
,
1258 if (found
&& !memblock_reserve(found
, size
)) {
1260 * The min_count is set to 0 so that memblock allocations are
1261 * never reported as leaks.
1263 kmemleak_alloc_phys(found
, size
, 0, 0);
1269 phys_addr_t __init
memblock_alloc_range(phys_addr_t size
, phys_addr_t align
,
1270 phys_addr_t start
, phys_addr_t end
,
1271 enum memblock_flags flags
)
1273 return memblock_alloc_range_nid(size
, align
, start
, end
, NUMA_NO_NODE
,
1277 phys_addr_t __init
memblock_alloc_base_nid(phys_addr_t size
,
1278 phys_addr_t align
, phys_addr_t max_addr
,
1279 int nid
, enum memblock_flags flags
)
1281 return memblock_alloc_range_nid(size
, align
, 0, max_addr
, nid
, flags
);
1284 phys_addr_t __init
memblock_phys_alloc_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
1286 enum memblock_flags flags
= choose_memblock_flags();
1290 ret
= memblock_alloc_base_nid(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
,
1293 if (!ret
&& (flags
& MEMBLOCK_MIRROR
)) {
1294 flags
&= ~MEMBLOCK_MIRROR
;
1300 phys_addr_t __init
__memblock_alloc_base(phys_addr_t size
, phys_addr_t align
, phys_addr_t max_addr
)
1302 return memblock_alloc_base_nid(size
, align
, max_addr
, NUMA_NO_NODE
,
1306 phys_addr_t __init
memblock_alloc_base(phys_addr_t size
, phys_addr_t align
, phys_addr_t max_addr
)
1310 alloc
= __memblock_alloc_base(size
, align
, max_addr
);
1313 panic("ERROR: Failed to allocate %pa bytes below %pa.\n",
1319 phys_addr_t __init
memblock_phys_alloc(phys_addr_t size
, phys_addr_t align
)
1321 return memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
);
1324 phys_addr_t __init
memblock_phys_alloc_try_nid(phys_addr_t size
, phys_addr_t align
, int nid
)
1326 phys_addr_t res
= memblock_phys_alloc_nid(size
, align
, nid
);
1330 return memblock_alloc_base(size
, align
, MEMBLOCK_ALLOC_ACCESSIBLE
);
1334 * memblock_alloc_internal - allocate boot memory block
1335 * @size: size of memory block to be allocated in bytes
1336 * @align: alignment of the region and block's size
1337 * @min_addr: the lower bound of the memory region to allocate (phys address)
1338 * @max_addr: the upper bound of the memory region to allocate (phys address)
1339 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1341 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1342 * will fall back to memory below @min_addr. Also, allocation may fall back
1343 * to any node in the system if the specified node can not
1344 * hold the requested memory.
1346 * The allocation is performed from memory region limited by
1347 * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
1349 * The phys address of allocated boot memory block is converted to virtual and
1350 * allocated memory is reset to 0.
1352 * In addition, function sets the min_count to 0 using kmemleak_alloc for
1353 * allocated boot memory block, so that it is never reported as leaks.
1356 * Virtual address of allocated memory block on success, NULL on failure.
1358 static void * __init
memblock_alloc_internal(
1359 phys_addr_t size
, phys_addr_t align
,
1360 phys_addr_t min_addr
, phys_addr_t max_addr
,
1365 enum memblock_flags flags
= choose_memblock_flags();
1367 if (WARN_ONCE(nid
== MAX_NUMNODES
, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1371 * Detect any accidental use of these APIs after slab is ready, as at
1372 * this moment memblock may be deinitialized already and its
1373 * internal data may be destroyed (after execution of memblock_free_all)
1375 if (WARN_ON_ONCE(slab_is_available()))
1376 return kzalloc_node(size
, GFP_NOWAIT
, nid
);
1380 align
= SMP_CACHE_BYTES
;
1383 if (max_addr
> memblock
.current_limit
)
1384 max_addr
= memblock
.current_limit
;
1386 alloc
= memblock_find_in_range_node(size
, align
, min_addr
, max_addr
,
1388 if (alloc
&& !memblock_reserve(alloc
, size
))
1391 if (nid
!= NUMA_NO_NODE
) {
1392 alloc
= memblock_find_in_range_node(size
, align
, min_addr
,
1393 max_addr
, NUMA_NO_NODE
,
1395 if (alloc
&& !memblock_reserve(alloc
, size
))
1404 if (flags
& MEMBLOCK_MIRROR
) {
1405 flags
&= ~MEMBLOCK_MIRROR
;
1406 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1413 ptr
= phys_to_virt(alloc
);
1416 * The min_count is set to 0 so that bootmem allocated blocks
1417 * are never reported as leaks. This is because many of these blocks
1418 * are only referred via the physical address which is not
1419 * looked up by kmemleak.
1421 kmemleak_alloc(ptr
, size
, 0, 0);
1427 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1428 * memory and without panicking
1429 * @size: size of memory block to be allocated in bytes
1430 * @align: alignment of the region and block's size
1431 * @min_addr: the lower bound of the memory region from where the allocation
1432 * is preferred (phys address)
1433 * @max_addr: the upper bound of the memory region from where the allocation
1434 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1435 * allocate only from memory limited by memblock.current_limit value
1436 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1438 * Public function, provides additional debug information (including caller
1439 * info), if enabled. Does not zero allocated memory, does not panic if request
1440 * cannot be satisfied.
1443 * Virtual address of allocated memory block on success, NULL on failure.
1445 void * __init
memblock_alloc_try_nid_raw(
1446 phys_addr_t size
, phys_addr_t align
,
1447 phys_addr_t min_addr
, phys_addr_t max_addr
,
1452 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n",
1453 __func__
, (u64
)size
, (u64
)align
, nid
, &min_addr
,
1454 &max_addr
, (void *)_RET_IP_
);
1456 ptr
= memblock_alloc_internal(size
, align
,
1457 min_addr
, max_addr
, nid
);
1458 if (ptr
&& size
> 0)
1459 page_init_poison(ptr
, size
);
1465 * memblock_alloc_try_nid_nopanic - allocate boot memory block
1466 * @size: size of memory block to be allocated in bytes
1467 * @align: alignment of the region and block's size
1468 * @min_addr: the lower bound of the memory region from where the allocation
1469 * is preferred (phys address)
1470 * @max_addr: the upper bound of the memory region from where the allocation
1471 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1472 * allocate only from memory limited by memblock.current_limit value
1473 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1475 * Public function, provides additional debug information (including caller
1476 * info), if enabled. This function zeroes the allocated memory.
1479 * Virtual address of allocated memory block on success, NULL on failure.
1481 void * __init
memblock_alloc_try_nid_nopanic(
1482 phys_addr_t size
, phys_addr_t align
,
1483 phys_addr_t min_addr
, phys_addr_t max_addr
,
1488 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n",
1489 __func__
, (u64
)size
, (u64
)align
, nid
, &min_addr
,
1490 &max_addr
, (void *)_RET_IP_
);
1492 ptr
= memblock_alloc_internal(size
, align
,
1493 min_addr
, max_addr
, nid
);
1495 memset(ptr
, 0, size
);
1500 * memblock_alloc_try_nid - allocate boot memory block with panicking
1501 * @size: size of memory block to be allocated in bytes
1502 * @align: alignment of the region and block's size
1503 * @min_addr: the lower bound of the memory region from where the allocation
1504 * is preferred (phys address)
1505 * @max_addr: the upper bound of the memory region from where the allocation
1506 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1507 * allocate only from memory limited by memblock.current_limit value
1508 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1510 * Public panicking version of memblock_alloc_try_nid_nopanic()
1511 * which provides debug information (including caller info), if enabled,
1512 * and panics if the request can not be satisfied.
1515 * Virtual address of allocated memory block on success, NULL on failure.
1517 void * __init
memblock_alloc_try_nid(
1518 phys_addr_t size
, phys_addr_t align
,
1519 phys_addr_t min_addr
, phys_addr_t max_addr
,
1524 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n",
1525 __func__
, (u64
)size
, (u64
)align
, nid
, &min_addr
,
1526 &max_addr
, (void *)_RET_IP_
);
1527 ptr
= memblock_alloc_internal(size
, align
,
1528 min_addr
, max_addr
, nid
);
1530 memset(ptr
, 0, size
);
1534 panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa\n",
1535 __func__
, (u64
)size
, (u64
)align
, nid
, &min_addr
, &max_addr
);
1540 * __memblock_free_early - free boot memory block
1541 * @base: phys starting address of the boot memory block
1542 * @size: size of the boot memory block in bytes
1544 * Free boot memory block previously allocated by memblock_alloc_xx() API.
1545 * The freeing memory will not be released to the buddy allocator.
1547 void __init
__memblock_free_early(phys_addr_t base
, phys_addr_t size
)
1549 phys_addr_t end
= base
+ size
- 1;
1551 memblock_dbg("%s: [%pa-%pa] %pF\n",
1552 __func__
, &base
, &end
, (void *)_RET_IP_
);
1553 kmemleak_free_part_phys(base
, size
);
1554 memblock_remove_range(&memblock
.reserved
, base
, size
);
1558 * __memblock_free_late - free bootmem block pages directly to buddy allocator
1559 * @base: phys starting address of the boot memory block
1560 * @size: size of the boot memory block in bytes
1562 * This is only useful when the bootmem allocator has already been torn
1563 * down, but we are still initializing the system. Pages are released directly
1564 * to the buddy allocator, no bootmem metadata is updated because it is gone.
1566 void __init
__memblock_free_late(phys_addr_t base
, phys_addr_t size
)
1568 phys_addr_t cursor
, end
;
1570 end
= base
+ size
- 1;
1571 memblock_dbg("%s: [%pa-%pa] %pF\n",
1572 __func__
, &base
, &end
, (void *)_RET_IP_
);
1573 kmemleak_free_part_phys(base
, size
);
1574 cursor
= PFN_UP(base
);
1575 end
= PFN_DOWN(base
+ size
);
1577 for (; cursor
< end
; cursor
++) {
1578 memblock_free_pages(pfn_to_page(cursor
), cursor
, 0);
1584 * Remaining API functions
1587 phys_addr_t __init_memblock
memblock_phys_mem_size(void)
1589 return memblock
.memory
.total_size
;
1592 phys_addr_t __init_memblock
memblock_reserved_size(void)
1594 return memblock
.reserved
.total_size
;
1597 phys_addr_t __init
memblock_mem_size(unsigned long limit_pfn
)
1599 unsigned long pages
= 0;
1600 struct memblock_region
*r
;
1601 unsigned long start_pfn
, end_pfn
;
1603 for_each_memblock(memory
, r
) {
1604 start_pfn
= memblock_region_memory_base_pfn(r
);
1605 end_pfn
= memblock_region_memory_end_pfn(r
);
1606 start_pfn
= min_t(unsigned long, start_pfn
, limit_pfn
);
1607 end_pfn
= min_t(unsigned long, end_pfn
, limit_pfn
);
1608 pages
+= end_pfn
- start_pfn
;
1611 return PFN_PHYS(pages
);
1614 /* lowest address */
1615 phys_addr_t __init_memblock
memblock_start_of_DRAM(void)
1617 return memblock
.memory
.regions
[0].base
;
1620 phys_addr_t __init_memblock
memblock_end_of_DRAM(void)
1622 int idx
= memblock
.memory
.cnt
- 1;
1624 return (memblock
.memory
.regions
[idx
].base
+ memblock
.memory
.regions
[idx
].size
);
1627 static phys_addr_t __init_memblock
__find_max_addr(phys_addr_t limit
)
1629 phys_addr_t max_addr
= PHYS_ADDR_MAX
;
1630 struct memblock_region
*r
;
1633 * translate the memory @limit size into the max address within one of
1634 * the memory memblock regions, if the @limit exceeds the total size
1635 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1637 for_each_memblock(memory
, r
) {
1638 if (limit
<= r
->size
) {
1639 max_addr
= r
->base
+ limit
;
1648 void __init
memblock_enforce_memory_limit(phys_addr_t limit
)
1650 phys_addr_t max_addr
= PHYS_ADDR_MAX
;
1655 max_addr
= __find_max_addr(limit
);
1657 /* @limit exceeds the total size of the memory, do nothing */
1658 if (max_addr
== PHYS_ADDR_MAX
)
1661 /* truncate both memory and reserved regions */
1662 memblock_remove_range(&memblock
.memory
, max_addr
,
1664 memblock_remove_range(&memblock
.reserved
, max_addr
,
1668 void __init
memblock_cap_memory_range(phys_addr_t base
, phys_addr_t size
)
1670 int start_rgn
, end_rgn
;
1676 ret
= memblock_isolate_range(&memblock
.memory
, base
, size
,
1677 &start_rgn
, &end_rgn
);
1681 /* remove all the MAP regions */
1682 for (i
= memblock
.memory
.cnt
- 1; i
>= end_rgn
; i
--)
1683 if (!memblock_is_nomap(&memblock
.memory
.regions
[i
]))
1684 memblock_remove_region(&memblock
.memory
, i
);
1686 for (i
= start_rgn
- 1; i
>= 0; i
--)
1687 if (!memblock_is_nomap(&memblock
.memory
.regions
[i
]))
1688 memblock_remove_region(&memblock
.memory
, i
);
1690 /* truncate the reserved regions */
1691 memblock_remove_range(&memblock
.reserved
, 0, base
);
1692 memblock_remove_range(&memblock
.reserved
,
1693 base
+ size
, PHYS_ADDR_MAX
);
1696 void __init
memblock_mem_limit_remove_map(phys_addr_t limit
)
1698 phys_addr_t max_addr
;
1703 max_addr
= __find_max_addr(limit
);
1705 /* @limit exceeds the total size of the memory, do nothing */
1706 if (max_addr
== PHYS_ADDR_MAX
)
1709 memblock_cap_memory_range(0, max_addr
);
1712 static int __init_memblock
memblock_search(struct memblock_type
*type
, phys_addr_t addr
)
1714 unsigned int left
= 0, right
= type
->cnt
;
1717 unsigned int mid
= (right
+ left
) / 2;
1719 if (addr
< type
->regions
[mid
].base
)
1721 else if (addr
>= (type
->regions
[mid
].base
+
1722 type
->regions
[mid
].size
))
1726 } while (left
< right
);
1730 bool __init
memblock_is_reserved(phys_addr_t addr
)
1732 return memblock_search(&memblock
.reserved
, addr
) != -1;
1735 bool __init_memblock
memblock_is_memory(phys_addr_t addr
)
1737 return memblock_search(&memblock
.memory
, addr
) != -1;
1740 bool __init_memblock
memblock_is_map_memory(phys_addr_t addr
)
1742 int i
= memblock_search(&memblock
.memory
, addr
);
1746 return !memblock_is_nomap(&memblock
.memory
.regions
[i
]);
1749 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1750 int __init_memblock
memblock_search_pfn_nid(unsigned long pfn
,
1751 unsigned long *start_pfn
, unsigned long *end_pfn
)
1753 struct memblock_type
*type
= &memblock
.memory
;
1754 int mid
= memblock_search(type
, PFN_PHYS(pfn
));
1759 *start_pfn
= PFN_DOWN(type
->regions
[mid
].base
);
1760 *end_pfn
= PFN_DOWN(type
->regions
[mid
].base
+ type
->regions
[mid
].size
);
1762 return type
->regions
[mid
].nid
;
1767 * memblock_is_region_memory - check if a region is a subset of memory
1768 * @base: base of region to check
1769 * @size: size of region to check
1771 * Check if the region [@base, @base + @size) is a subset of a memory block.
1774 * 0 if false, non-zero if true
1776 bool __init_memblock
memblock_is_region_memory(phys_addr_t base
, phys_addr_t size
)
1778 int idx
= memblock_search(&memblock
.memory
, base
);
1779 phys_addr_t end
= base
+ memblock_cap_size(base
, &size
);
1783 return (memblock
.memory
.regions
[idx
].base
+
1784 memblock
.memory
.regions
[idx
].size
) >= end
;
1788 * memblock_is_region_reserved - check if a region intersects reserved memory
1789 * @base: base of region to check
1790 * @size: size of region to check
1792 * Check if the region [@base, @base + @size) intersects a reserved
1796 * True if they intersect, false if not.
1798 bool __init_memblock
memblock_is_region_reserved(phys_addr_t base
, phys_addr_t size
)
1800 memblock_cap_size(base
, &size
);
1801 return memblock_overlaps_region(&memblock
.reserved
, base
, size
);
1804 void __init_memblock
memblock_trim_memory(phys_addr_t align
)
1806 phys_addr_t start
, end
, orig_start
, orig_end
;
1807 struct memblock_region
*r
;
1809 for_each_memblock(memory
, r
) {
1810 orig_start
= r
->base
;
1811 orig_end
= r
->base
+ r
->size
;
1812 start
= round_up(orig_start
, align
);
1813 end
= round_down(orig_end
, align
);
1815 if (start
== orig_start
&& end
== orig_end
)
1820 r
->size
= end
- start
;
1822 memblock_remove_region(&memblock
.memory
,
1823 r
- memblock
.memory
.regions
);
1829 void __init_memblock
memblock_set_current_limit(phys_addr_t limit
)
1831 memblock
.current_limit
= limit
;
1834 phys_addr_t __init_memblock
memblock_get_current_limit(void)
1836 return memblock
.current_limit
;
1839 static void __init_memblock
memblock_dump(struct memblock_type
*type
)
1841 phys_addr_t base
, end
, size
;
1842 enum memblock_flags flags
;
1844 struct memblock_region
*rgn
;
1846 pr_info(" %s.cnt = 0x%lx\n", type
->name
, type
->cnt
);
1848 for_each_memblock_type(idx
, type
, rgn
) {
1849 char nid_buf
[32] = "";
1853 end
= base
+ size
- 1;
1855 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1856 if (memblock_get_region_node(rgn
) != MAX_NUMNODES
)
1857 snprintf(nid_buf
, sizeof(nid_buf
), " on node %d",
1858 memblock_get_region_node(rgn
));
1860 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1861 type
->name
, idx
, &base
, &end
, &size
, nid_buf
, flags
);
1865 void __init_memblock
__memblock_dump_all(void)
1867 pr_info("MEMBLOCK configuration:\n");
1868 pr_info(" memory size = %pa reserved size = %pa\n",
1869 &memblock
.memory
.total_size
,
1870 &memblock
.reserved
.total_size
);
1872 memblock_dump(&memblock
.memory
);
1873 memblock_dump(&memblock
.reserved
);
1874 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1875 memblock_dump(&memblock
.physmem
);
1879 void __init
memblock_allow_resize(void)
1881 memblock_can_resize
= 1;
1884 static int __init
early_memblock(char *p
)
1886 if (p
&& strstr(p
, "debug"))
1890 early_param("memblock", early_memblock
);
1892 static void __init
__free_pages_memory(unsigned long start
, unsigned long end
)
1896 while (start
< end
) {
1897 order
= min(MAX_ORDER
- 1UL, __ffs(start
));
1899 while (start
+ (1UL << order
) > end
)
1902 memblock_free_pages(pfn_to_page(start
), start
, order
);
1904 start
+= (1UL << order
);
1908 static unsigned long __init
__free_memory_core(phys_addr_t start
,
1911 unsigned long start_pfn
= PFN_UP(start
);
1912 unsigned long end_pfn
= min_t(unsigned long,
1913 PFN_DOWN(end
), max_low_pfn
);
1915 if (start_pfn
>= end_pfn
)
1918 __free_pages_memory(start_pfn
, end_pfn
);
1920 return end_pfn
- start_pfn
;
1923 static unsigned long __init
free_low_memory_core_early(void)
1925 unsigned long count
= 0;
1926 phys_addr_t start
, end
;
1929 memblock_clear_hotplug(0, -1);
1931 for_each_reserved_mem_region(i
, &start
, &end
)
1932 reserve_bootmem_region(start
, end
);
1935 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
1936 * because in some case like Node0 doesn't have RAM installed
1937 * low ram will be on Node1
1939 for_each_free_mem_range(i
, NUMA_NO_NODE
, MEMBLOCK_NONE
, &start
, &end
,
1941 count
+= __free_memory_core(start
, end
);
1946 static int reset_managed_pages_done __initdata
;
1948 void reset_node_managed_pages(pg_data_t
*pgdat
)
1952 for (z
= pgdat
->node_zones
; z
< pgdat
->node_zones
+ MAX_NR_ZONES
; z
++)
1953 z
->managed_pages
= 0;
1956 void __init
reset_all_zones_managed_pages(void)
1958 struct pglist_data
*pgdat
;
1960 if (reset_managed_pages_done
)
1963 for_each_online_pgdat(pgdat
)
1964 reset_node_managed_pages(pgdat
);
1966 reset_managed_pages_done
= 1;
1970 * memblock_free_all - release free pages to the buddy allocator
1972 * Return: the number of pages actually released.
1974 unsigned long __init
memblock_free_all(void)
1976 unsigned long pages
;
1978 reset_all_zones_managed_pages();
1980 pages
= free_low_memory_core_early();
1981 totalram_pages
+= pages
;
1986 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1988 static int memblock_debug_show(struct seq_file
*m
, void *private)
1990 struct memblock_type
*type
= m
->private;
1991 struct memblock_region
*reg
;
1995 for (i
= 0; i
< type
->cnt
; i
++) {
1996 reg
= &type
->regions
[i
];
1997 end
= reg
->base
+ reg
->size
- 1;
1999 seq_printf(m
, "%4d: ", i
);
2000 seq_printf(m
, "%pa..%pa\n", ®
->base
, &end
);
2004 DEFINE_SHOW_ATTRIBUTE(memblock_debug
);
2006 static int __init
memblock_init_debugfs(void)
2008 struct dentry
*root
= debugfs_create_dir("memblock", NULL
);
2011 debugfs_create_file("memory", 0444, root
,
2012 &memblock
.memory
, &memblock_debug_fops
);
2013 debugfs_create_file("reserved", 0444, root
,
2014 &memblock
.reserved
, &memblock_debug_fops
);
2015 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2016 debugfs_create_file("physmem", 0444, root
,
2017 &memblock
.physmem
, &memblock_debug_fops
);
2022 __initcall(memblock_init_debugfs
);
2024 #endif /* CONFIG_DEBUG_FS */