2 * Based on arch/arm/mm/init.c
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/errno.h>
23 #include <linux/swap.h>
24 #include <linux/init.h>
25 #include <linux/bootmem.h>
26 #include <linux/cache.h>
27 #include <linux/mman.h>
28 #include <linux/nodemask.h>
29 #include <linux/initrd.h>
30 #include <linux/gfp.h>
31 #include <linux/memblock.h>
32 #include <linux/sort.h>
34 #include <linux/of_fdt.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/dma-contiguous.h>
37 #include <linux/efi.h>
38 #include <linux/swiotlb.h>
39 #include <linux/vmalloc.h>
41 #include <linux/kexec.h>
42 #include <linux/crash_dump.h>
45 #include <asm/fixmap.h>
46 #include <asm/kasan.h>
47 #include <asm/kernel-pgtable.h>
48 #include <asm/memory.h>
50 #include <asm/sections.h>
51 #include <asm/setup.h>
52 #include <asm/sizes.h>
54 #include <asm/alternative.h>
57 * We need to be able to catch inadvertent references to memstart_addr
58 * that occur (potentially in generic code) before arm64_memblock_init()
59 * executes, which assigns it its actual value. So use a default value
60 * that cannot be mistaken for a real physical address.
62 s64 memstart_addr __ro_after_init
= -1;
63 phys_addr_t arm64_dma_phys_limit __ro_after_init
;
65 #ifdef CONFIG_BLK_DEV_INITRD
66 static int __init
early_initrd(char *p
)
68 unsigned long start
, size
;
71 start
= memparse(p
, &endp
);
73 size
= memparse(endp
+ 1, NULL
);
76 initrd_end
= start
+ size
;
80 early_param("initrd", early_initrd
);
83 #ifdef CONFIG_KEXEC_CORE
85 * reserve_crashkernel() - reserves memory for crash kernel
87 * This function reserves memory area given in "crashkernel=" kernel command
88 * line parameter. The memory reserved is used by dump capture kernel when
89 * primary kernel is crashing.
91 static void __init
reserve_crashkernel(void)
93 unsigned long long crash_base
, crash_size
;
96 ret
= parse_crashkernel(boot_command_line
, memblock_phys_mem_size(),
97 &crash_size
, &crash_base
);
98 /* no crashkernel= or invalid value specified */
99 if (ret
|| !crash_size
)
102 crash_size
= PAGE_ALIGN(crash_size
);
104 if (crash_base
== 0) {
105 /* Current arm64 boot protocol requires 2MB alignment */
106 crash_base
= memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT
,
108 if (crash_base
== 0) {
109 pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
114 /* User specifies base address explicitly. */
115 if (!memblock_is_region_memory(crash_base
, crash_size
)) {
116 pr_warn("cannot reserve crashkernel: region is not memory\n");
120 if (memblock_is_region_reserved(crash_base
, crash_size
)) {
121 pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
125 if (!IS_ALIGNED(crash_base
, SZ_2M
)) {
126 pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
130 memblock_reserve(crash_base
, crash_size
);
132 pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
133 crash_base
, crash_base
+ crash_size
, crash_size
>> 20);
135 crashk_res
.start
= crash_base
;
136 crashk_res
.end
= crash_base
+ crash_size
- 1;
139 static void __init
kexec_reserve_crashkres_pages(void)
141 #ifdef CONFIG_HIBERNATION
149 * To reduce the size of hibernation image, all the pages are
150 * marked as Reserved initially.
152 for (addr
= crashk_res
.start
; addr
< (crashk_res
.end
+ 1);
154 page
= phys_to_page(addr
);
155 SetPageReserved(page
);
160 static void __init
reserve_crashkernel(void)
164 static void __init
kexec_reserve_crashkres_pages(void)
167 #endif /* CONFIG_KEXEC_CORE */
169 #ifdef CONFIG_CRASH_DUMP
170 static int __init
early_init_dt_scan_elfcorehdr(unsigned long node
,
171 const char *uname
, int depth
, void *data
)
176 if (depth
!= 1 || strcmp(uname
, "chosen") != 0)
179 reg
= of_get_flat_dt_prop(node
, "linux,elfcorehdr", &len
);
180 if (!reg
|| (len
< (dt_root_addr_cells
+ dt_root_size_cells
)))
183 elfcorehdr_addr
= dt_mem_next_cell(dt_root_addr_cells
, ®
);
184 elfcorehdr_size
= dt_mem_next_cell(dt_root_size_cells
, ®
);
190 * reserve_elfcorehdr() - reserves memory for elf core header
192 * This function reserves the memory occupied by an elf core header
193 * described in the device tree. This region contains all the
194 * information about primary kernel's core image and is used by a dump
195 * capture kernel to access the system memory on primary kernel.
197 static void __init
reserve_elfcorehdr(void)
199 of_scan_flat_dt(early_init_dt_scan_elfcorehdr
, NULL
);
201 if (!elfcorehdr_size
)
204 if (memblock_is_region_reserved(elfcorehdr_addr
, elfcorehdr_size
)) {
205 pr_warn("elfcorehdr is overlapped\n");
209 memblock_reserve(elfcorehdr_addr
, elfcorehdr_size
);
211 pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
212 elfcorehdr_size
>> 10, elfcorehdr_addr
);
215 static void __init
reserve_elfcorehdr(void)
218 #endif /* CONFIG_CRASH_DUMP */
220 * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It
221 * currently assumes that for memory starting above 4G, 32-bit devices will
224 static phys_addr_t __init
max_zone_dma_phys(void)
226 phys_addr_t offset
= memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
227 return min(offset
+ (1ULL << 32), memblock_end_of_DRAM());
232 static void __init
zone_sizes_init(unsigned long min
, unsigned long max
)
234 unsigned long max_zone_pfns
[MAX_NR_ZONES
] = {0};
236 if (IS_ENABLED(CONFIG_ZONE_DMA32
))
237 max_zone_pfns
[ZONE_DMA32
] = PFN_DOWN(max_zone_dma_phys());
238 max_zone_pfns
[ZONE_NORMAL
] = max
;
240 free_area_init_nodes(max_zone_pfns
);
245 static void __init
zone_sizes_init(unsigned long min
, unsigned long max
)
247 struct memblock_region
*reg
;
248 unsigned long zone_size
[MAX_NR_ZONES
], zhole_size
[MAX_NR_ZONES
];
249 unsigned long max_dma
= min
;
251 memset(zone_size
, 0, sizeof(zone_size
));
253 /* 4GB maximum for 32-bit only capable devices */
254 #ifdef CONFIG_ZONE_DMA32
255 max_dma
= PFN_DOWN(arm64_dma_phys_limit
);
256 zone_size
[ZONE_DMA32
] = max_dma
- min
;
258 zone_size
[ZONE_NORMAL
] = max
- max_dma
;
260 memcpy(zhole_size
, zone_size
, sizeof(zhole_size
));
262 for_each_memblock(memory
, reg
) {
263 unsigned long start
= memblock_region_memory_base_pfn(reg
);
264 unsigned long end
= memblock_region_memory_end_pfn(reg
);
269 #ifdef CONFIG_ZONE_DMA32
270 if (start
< max_dma
) {
271 unsigned long dma_end
= min(end
, max_dma
);
272 zhole_size
[ZONE_DMA32
] -= dma_end
- start
;
276 unsigned long normal_end
= min(end
, max
);
277 unsigned long normal_start
= max(start
, max_dma
);
278 zhole_size
[ZONE_NORMAL
] -= normal_end
- normal_start
;
282 free_area_init_node(0, zone_size
, min
, zhole_size
);
285 #endif /* CONFIG_NUMA */
287 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
288 int pfn_valid(unsigned long pfn
)
290 return memblock_is_map_memory(pfn
<< PAGE_SHIFT
);
292 EXPORT_SYMBOL(pfn_valid
);
295 #ifndef CONFIG_SPARSEMEM
296 static void __init
arm64_memory_present(void)
300 static void __init
arm64_memory_present(void)
302 struct memblock_region
*reg
;
304 for_each_memblock(memory
, reg
) {
305 int nid
= memblock_get_region_node(reg
);
307 memory_present(nid
, memblock_region_memory_base_pfn(reg
),
308 memblock_region_memory_end_pfn(reg
));
313 static phys_addr_t memory_limit
= (phys_addr_t
)ULLONG_MAX
;
316 * Limit the memory size that was specified via FDT.
318 static int __init
early_mem(char *p
)
323 memory_limit
= memparse(p
, &p
) & PAGE_MASK
;
324 pr_notice("Memory limited to %lldMB\n", memory_limit
>> 20);
328 early_param("mem", early_mem
);
330 static int __init
early_init_dt_scan_usablemem(unsigned long node
,
331 const char *uname
, int depth
, void *data
)
333 struct memblock_region
*usablemem
= data
;
337 if (depth
!= 1 || strcmp(uname
, "chosen") != 0)
340 reg
= of_get_flat_dt_prop(node
, "linux,usable-memory-range", &len
);
341 if (!reg
|| (len
< (dt_root_addr_cells
+ dt_root_size_cells
)))
344 usablemem
->base
= dt_mem_next_cell(dt_root_addr_cells
, ®
);
345 usablemem
->size
= dt_mem_next_cell(dt_root_size_cells
, ®
);
350 static void __init
fdt_enforce_memory_region(void)
352 struct memblock_region reg
= {
356 of_scan_flat_dt(early_init_dt_scan_usablemem
, ®
);
359 memblock_cap_memory_range(reg
.base
, reg
.size
);
362 void __init
arm64_memblock_init(void)
364 const s64 linear_region_size
= -(s64
)PAGE_OFFSET
;
366 /* Handle linux,usable-memory-range property */
367 fdt_enforce_memory_region();
369 /* Remove memory above our supported physical address size */
370 memblock_remove(1ULL << PHYS_MASK_SHIFT
, ULLONG_MAX
);
373 * Ensure that the linear region takes up exactly half of the kernel
374 * virtual address space. This way, we can distinguish a linear address
375 * from a kernel/module/vmalloc address by testing a single bit.
377 BUILD_BUG_ON(linear_region_size
!= BIT(VA_BITS
- 1));
380 * Select a suitable value for the base of physical memory.
382 memstart_addr
= round_down(memblock_start_of_DRAM(),
383 ARM64_MEMSTART_ALIGN
);
386 * Remove the memory that we will not be able to cover with the
387 * linear mapping. Take care not to clip the kernel which may be
390 memblock_remove(max_t(u64
, memstart_addr
+ linear_region_size
,
391 __pa_symbol(_end
)), ULLONG_MAX
);
392 if (memstart_addr
+ linear_region_size
< memblock_end_of_DRAM()) {
393 /* ensure that memstart_addr remains sufficiently aligned */
394 memstart_addr
= round_up(memblock_end_of_DRAM() - linear_region_size
,
395 ARM64_MEMSTART_ALIGN
);
396 memblock_remove(0, memstart_addr
);
400 * Apply the memory limit if it was set. Since the kernel may be loaded
401 * high up in memory, add back the kernel region that must be accessible
402 * via the linear mapping.
404 if (memory_limit
!= (phys_addr_t
)ULLONG_MAX
) {
405 memblock_mem_limit_remove_map(memory_limit
);
406 memblock_add(__pa_symbol(_text
), (u64
)(_end
- _text
));
409 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD
) && initrd_start
) {
411 * Add back the memory we just removed if it results in the
412 * initrd to become inaccessible via the linear mapping.
413 * Otherwise, this is a no-op
415 u64 base
= initrd_start
& PAGE_MASK
;
416 u64 size
= PAGE_ALIGN(initrd_end
) - base
;
419 * We can only add back the initrd memory if we don't end up
420 * with more memory than we can address via the linear mapping.
421 * It is up to the bootloader to position the kernel and the
422 * initrd reasonably close to each other (i.e., within 32 GB of
423 * each other) so that all granule/#levels combinations can
424 * always access both.
426 if (WARN(base
< memblock_start_of_DRAM() ||
427 base
+ size
> memblock_start_of_DRAM() +
429 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
432 memblock_remove(base
, size
); /* clear MEMBLOCK_ flags */
433 memblock_add(base
, size
);
434 memblock_reserve(base
, size
);
438 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE
)) {
439 extern u16 memstart_offset_seed
;
440 u64 range
= linear_region_size
-
441 (memblock_end_of_DRAM() - memblock_start_of_DRAM());
444 * If the size of the linear region exceeds, by a sufficient
445 * margin, the size of the region that the available physical
446 * memory spans, randomize the linear region as well.
448 if (memstart_offset_seed
> 0 && range
>= ARM64_MEMSTART_ALIGN
) {
449 range
= range
/ ARM64_MEMSTART_ALIGN
+ 1;
450 memstart_addr
-= ARM64_MEMSTART_ALIGN
*
451 ((range
* memstart_offset_seed
) >> 16);
456 * Register the kernel text, kernel data, initrd, and initial
457 * pagetables with memblock.
459 memblock_reserve(__pa_symbol(_text
), _end
- _text
);
460 #ifdef CONFIG_BLK_DEV_INITRD
462 memblock_reserve(initrd_start
, initrd_end
- initrd_start
);
464 /* the generic initrd code expects virtual addresses */
465 initrd_start
= __phys_to_virt(initrd_start
);
466 initrd_end
= __phys_to_virt(initrd_end
);
470 early_init_fdt_scan_reserved_mem();
472 /* 4GB maximum for 32-bit only capable devices */
473 if (IS_ENABLED(CONFIG_ZONE_DMA32
))
474 arm64_dma_phys_limit
= max_zone_dma_phys();
476 arm64_dma_phys_limit
= PHYS_MASK
+ 1;
478 reserve_crashkernel();
480 reserve_elfcorehdr();
482 high_memory
= __va(memblock_end_of_DRAM() - 1) + 1;
484 dma_contiguous_reserve(arm64_dma_phys_limit
);
486 memblock_allow_resize();
489 void __init
bootmem_init(void)
491 unsigned long min
, max
;
493 min
= PFN_UP(memblock_start_of_DRAM());
494 max
= PFN_DOWN(memblock_end_of_DRAM());
496 early_memtest(min
<< PAGE_SHIFT
, max
<< PAGE_SHIFT
);
498 max_pfn
= max_low_pfn
= max
;
502 * Sparsemem tries to allocate bootmem in memory_present(), so must be
503 * done after the fixed reservations.
505 arm64_memory_present();
508 zone_sizes_init(min
, max
);
513 #ifndef CONFIG_SPARSEMEM_VMEMMAP
514 static inline void free_memmap(unsigned long start_pfn
, unsigned long end_pfn
)
516 struct page
*start_pg
, *end_pg
;
517 unsigned long pg
, pgend
;
520 * Convert start_pfn/end_pfn to a struct page pointer.
522 start_pg
= pfn_to_page(start_pfn
- 1) + 1;
523 end_pg
= pfn_to_page(end_pfn
- 1) + 1;
526 * Convert to physical addresses, and round start upwards and end
529 pg
= (unsigned long)PAGE_ALIGN(__pa(start_pg
));
530 pgend
= (unsigned long)__pa(end_pg
) & PAGE_MASK
;
533 * If there are free pages between these, free the section of the
537 free_bootmem(pg
, pgend
- pg
);
541 * The mem_map array can get very big. Free the unused area of the memory map.
543 static void __init
free_unused_memmap(void)
545 unsigned long start
, prev_end
= 0;
546 struct memblock_region
*reg
;
548 for_each_memblock(memory
, reg
) {
549 start
= __phys_to_pfn(reg
->base
);
551 #ifdef CONFIG_SPARSEMEM
553 * Take care not to free memmap entries that don't exist due
554 * to SPARSEMEM sections which aren't present.
556 start
= min(start
, ALIGN(prev_end
, PAGES_PER_SECTION
));
559 * If we had a previous bank, and there is a space between the
560 * current bank and the previous, free it.
562 if (prev_end
&& prev_end
< start
)
563 free_memmap(prev_end
, start
);
566 * Align up here since the VM subsystem insists that the
567 * memmap entries are valid from the bank end aligned to
568 * MAX_ORDER_NR_PAGES.
570 prev_end
= ALIGN(__phys_to_pfn(reg
->base
+ reg
->size
),
574 #ifdef CONFIG_SPARSEMEM
575 if (!IS_ALIGNED(prev_end
, PAGES_PER_SECTION
))
576 free_memmap(prev_end
, ALIGN(prev_end
, PAGES_PER_SECTION
));
579 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
582 * mem_init() marks the free areas in the mem_map and tells us how much memory
583 * is free. This is done after various parts of the system have claimed their
584 * memory after the kernel image.
586 void __init
mem_init(void)
588 if (swiotlb_force
== SWIOTLB_FORCE
||
589 max_pfn
> (arm64_dma_phys_limit
>> PAGE_SHIFT
))
592 swiotlb_force
= SWIOTLB_NO_FORCE
;
594 set_max_mapnr(pfn_to_page(max_pfn
) - mem_map
);
596 #ifndef CONFIG_SPARSEMEM_VMEMMAP
597 free_unused_memmap();
599 /* this will put all unused low memory onto the freelists */
602 kexec_reserve_crashkres_pages();
604 mem_init_print_info(NULL
);
607 * Check boundaries twice: Some fundamental inconsistencies can be
608 * detected at build time already.
611 BUILD_BUG_ON(TASK_SIZE_32
> TASK_SIZE_64
);
615 * Make sure we chose the upper bound of sizeof(struct page)
618 BUILD_BUG_ON(sizeof(struct page
) > (1 << STRUCT_PAGE_MAX_SHIFT
));
620 if (PAGE_SIZE
>= 16384 && get_num_physpages() <= 128) {
621 extern int sysctl_overcommit_memory
;
623 * On a machine this small we won't get anywhere without
624 * overcommit, so turn it on by default.
626 sysctl_overcommit_memory
= OVERCOMMIT_ALWAYS
;
630 void free_initmem(void)
632 free_reserved_area(lm_alias(__init_begin
),
633 lm_alias(__init_end
),
636 * Unmap the __init region but leave the VM area in place. This
637 * prevents the region from being reused for kernel modules, which
638 * is not supported by kallsyms.
640 unmap_kernel_range((u64
)__init_begin
, (u64
)(__init_end
- __init_begin
));
643 #ifdef CONFIG_BLK_DEV_INITRD
645 static int keep_initrd __initdata
;
647 void __init
free_initrd_mem(unsigned long start
, unsigned long end
)
650 free_reserved_area((void *)start
, (void *)end
, 0, "initrd");
653 static int __init
keepinitrd_setup(char *__unused
)
659 __setup("keepinitrd", keepinitrd_setup
);
663 * Dump out memory limit information on panic.
665 static int dump_mem_limit(struct notifier_block
*self
, unsigned long v
, void *p
)
667 if (memory_limit
!= (phys_addr_t
)ULLONG_MAX
) {
668 pr_emerg("Memory Limit: %llu MB\n", memory_limit
>> 20);
670 pr_emerg("Memory Limit: none\n");
675 static struct notifier_block mem_limit_notifier
= {
676 .notifier_call
= dump_mem_limit
,
679 static int __init
register_mem_limit_dumper(void)
681 atomic_notifier_chain_register(&panic_notifier_list
,
682 &mem_limit_notifier
);
685 __initcall(register_mem_limit_dumper
);