Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / arch / arm64 / mm / init.c
blobd21f67d67cf5fd8d93521e637e471611aeb21186
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/mm/init.c
5 * Copyright (C) 1995-2005 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 */
9 #include <linux/kernel.h>
10 #include <linux/export.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/cache.h>
15 #include <linux/mman.h>
16 #include <linux/nodemask.h>
17 #include <linux/initrd.h>
18 #include <linux/gfp.h>
19 #include <linux/math.h>
20 #include <linux/memblock.h>
21 #include <linux/sort.h>
22 #include <linux/of.h>
23 #include <linux/of_fdt.h>
24 #include <linux/dma-direct.h>
25 #include <linux/dma-map-ops.h>
26 #include <linux/efi.h>
27 #include <linux/swiotlb.h>
28 #include <linux/vmalloc.h>
29 #include <linux/mm.h>
30 #include <linux/kexec.h>
31 #include <linux/crash_dump.h>
32 #include <linux/hugetlb.h>
33 #include <linux/acpi_iort.h>
34 #include <linux/kmemleak.h>
35 #include <linux/execmem.h>
37 #include <asm/boot.h>
38 #include <asm/fixmap.h>
39 #include <asm/kasan.h>
40 #include <asm/kernel-pgtable.h>
41 #include <asm/kvm_host.h>
42 #include <asm/memory.h>
43 #include <asm/numa.h>
44 #include <asm/rsi.h>
45 #include <asm/sections.h>
46 #include <asm/setup.h>
47 #include <linux/sizes.h>
48 #include <asm/tlb.h>
49 #include <asm/alternative.h>
50 #include <asm/xen/swiotlb-xen.h>
53 * We need to be able to catch inadvertent references to memstart_addr
54 * that occur (potentially in generic code) before arm64_memblock_init()
55 * executes, which assigns it its actual value. So use a default value
56 * that cannot be mistaken for a real physical address.
58 s64 memstart_addr __ro_after_init = -1;
59 EXPORT_SYMBOL(memstart_addr);
62 * If the corresponding config options are enabled, we create both ZONE_DMA
63 * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
64 * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
65 * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
66 * otherwise it is empty.
68 phys_addr_t __ro_after_init arm64_dma_phys_limit;
71 * To make optimal use of block mappings when laying out the linear
72 * mapping, round down the base of physical memory to a size that can
73 * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
74 * (64k granule), or a multiple that can be mapped using contiguous bits
75 * in the page tables: 32 * PMD_SIZE (16k granule)
77 #if defined(CONFIG_ARM64_4K_PAGES)
78 #define ARM64_MEMSTART_SHIFT PUD_SHIFT
79 #elif defined(CONFIG_ARM64_16K_PAGES)
80 #define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT
81 #else
82 #define ARM64_MEMSTART_SHIFT PMD_SHIFT
83 #endif
86 * sparsemem vmemmap imposes an additional requirement on the alignment of
87 * memstart_addr, due to the fact that the base of the vmemmap region
88 * has a direct correspondence, and needs to appear sufficiently aligned
89 * in the virtual address space.
91 #if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
92 #define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS)
93 #else
94 #define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT)
95 #endif
97 static void __init arch_reserve_crashkernel(void)
99 unsigned long long low_size = 0;
100 unsigned long long crash_base, crash_size;
101 char *cmdline = boot_command_line;
102 bool high = false;
103 int ret;
105 if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
106 return;
108 ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
109 &crash_size, &crash_base,
110 &low_size, &high);
111 if (ret)
112 return;
114 reserve_crashkernel_generic(cmdline, crash_size, crash_base,
115 low_size, high);
118 static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit)
121 * Information we get from firmware (e.g. DT dma-ranges) describe DMA
122 * bus constraints. Devices using DMA might have their own limitations.
123 * Some of them rely on DMA zone in low 32-bit memory. Keep low RAM
124 * DMA zone on platforms that have RAM there.
126 if (memblock_start_of_DRAM() < U32_MAX)
127 zone_limit = min(zone_limit, U32_MAX);
129 return min(zone_limit, memblock_end_of_DRAM() - 1) + 1;
132 static void __init zone_sizes_init(void)
134 unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
135 phys_addr_t __maybe_unused acpi_zone_dma_limit;
136 phys_addr_t __maybe_unused dt_zone_dma_limit;
137 phys_addr_t __maybe_unused dma32_phys_limit =
138 max_zone_phys(DMA_BIT_MASK(32));
140 #ifdef CONFIG_ZONE_DMA
141 acpi_zone_dma_limit = acpi_iort_dma_get_max_cpu_address();
142 dt_zone_dma_limit = of_dma_get_max_cpu_address(NULL);
143 zone_dma_limit = min(dt_zone_dma_limit, acpi_zone_dma_limit);
144 arm64_dma_phys_limit = max_zone_phys(zone_dma_limit);
145 max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
146 #endif
147 #ifdef CONFIG_ZONE_DMA32
148 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
149 if (!arm64_dma_phys_limit)
150 arm64_dma_phys_limit = dma32_phys_limit;
151 #endif
152 if (!arm64_dma_phys_limit)
153 arm64_dma_phys_limit = PHYS_MASK + 1;
154 max_zone_pfns[ZONE_NORMAL] = max_pfn;
156 free_area_init(max_zone_pfns);
159 int pfn_is_map_memory(unsigned long pfn)
161 phys_addr_t addr = PFN_PHYS(pfn);
163 /* avoid false positives for bogus PFNs, see comment in pfn_valid() */
164 if (PHYS_PFN(addr) != pfn)
165 return 0;
167 return memblock_is_map_memory(addr);
169 EXPORT_SYMBOL(pfn_is_map_memory);
171 static phys_addr_t memory_limit __ro_after_init = PHYS_ADDR_MAX;
174 * Limit the memory size that was specified via FDT.
176 static int __init early_mem(char *p)
178 if (!p)
179 return 1;
181 memory_limit = memparse(p, &p) & PAGE_MASK;
182 pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
184 return 0;
186 early_param("mem", early_mem);
188 void __init arm64_memblock_init(void)
190 s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
193 * Corner case: 52-bit VA capable systems running KVM in nVHE mode may
194 * be limited in their ability to support a linear map that exceeds 51
195 * bits of VA space, depending on the placement of the ID map. Given
196 * that the placement of the ID map may be randomized, let's simply
197 * limit the kernel's linear map to 51 bits as well if we detect this
198 * configuration.
200 if (IS_ENABLED(CONFIG_KVM) && vabits_actual == 52 &&
201 is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
202 pr_info("Capping linear region to 51 bits for KVM in nVHE mode on LVA capable hardware.\n");
203 linear_region_size = min_t(u64, linear_region_size, BIT(51));
206 /* Remove memory above our supported physical address size */
207 memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
210 * Select a suitable value for the base of physical memory.
212 memstart_addr = round_down(memblock_start_of_DRAM(),
213 ARM64_MEMSTART_ALIGN);
215 if ((memblock_end_of_DRAM() - memstart_addr) > linear_region_size)
216 pr_warn("Memory doesn't fit in the linear mapping, VA_BITS too small\n");
219 * Remove the memory that we will not be able to cover with the
220 * linear mapping. Take care not to clip the kernel which may be
221 * high in memory.
223 memblock_remove(max_t(u64, memstart_addr + linear_region_size,
224 __pa_symbol(_end)), ULLONG_MAX);
225 if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
226 /* ensure that memstart_addr remains sufficiently aligned */
227 memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
228 ARM64_MEMSTART_ALIGN);
229 memblock_remove(0, memstart_addr);
233 * If we are running with a 52-bit kernel VA config on a system that
234 * does not support it, we have to place the available physical
235 * memory in the 48-bit addressable part of the linear region, i.e.,
236 * we have to move it upward. Since memstart_addr represents the
237 * physical address of PAGE_OFFSET, we have to *subtract* from it.
239 if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
240 memstart_addr -= _PAGE_OFFSET(vabits_actual) - _PAGE_OFFSET(52);
243 * Apply the memory limit if it was set. Since the kernel may be loaded
244 * high up in memory, add back the kernel region that must be accessible
245 * via the linear mapping.
247 if (memory_limit != PHYS_ADDR_MAX) {
248 memblock_mem_limit_remove_map(memory_limit);
249 memblock_add(__pa_symbol(_text), (u64)(_end - _text));
252 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
254 * Add back the memory we just removed if it results in the
255 * initrd to become inaccessible via the linear mapping.
256 * Otherwise, this is a no-op
258 u64 base = phys_initrd_start & PAGE_MASK;
259 u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
262 * We can only add back the initrd memory if we don't end up
263 * with more memory than we can address via the linear mapping.
264 * It is up to the bootloader to position the kernel and the
265 * initrd reasonably close to each other (i.e., within 32 GB of
266 * each other) so that all granule/#levels combinations can
267 * always access both.
269 if (WARN(base < memblock_start_of_DRAM() ||
270 base + size > memblock_start_of_DRAM() +
271 linear_region_size,
272 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
273 phys_initrd_size = 0;
274 } else {
275 memblock_add(base, size);
276 memblock_clear_nomap(base, size);
277 memblock_reserve(base, size);
281 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
282 extern u16 memstart_offset_seed;
283 u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
284 int parange = cpuid_feature_extract_unsigned_field(
285 mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
286 s64 range = linear_region_size -
287 BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
290 * If the size of the linear region exceeds, by a sufficient
291 * margin, the size of the region that the physical memory can
292 * span, randomize the linear region as well.
294 if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
295 range /= ARM64_MEMSTART_ALIGN;
296 memstart_addr -= ARM64_MEMSTART_ALIGN *
297 ((range * memstart_offset_seed) >> 16);
302 * Register the kernel text, kernel data, initrd, and initial
303 * pagetables with memblock.
305 memblock_reserve(__pa_symbol(_stext), _end - _stext);
306 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
307 /* the generic initrd code expects virtual addresses */
308 initrd_start = __phys_to_virt(phys_initrd_start);
309 initrd_end = initrd_start + phys_initrd_size;
312 early_init_fdt_scan_reserved_mem();
314 high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
317 void __init bootmem_init(void)
319 unsigned long min, max;
321 min = PFN_UP(memblock_start_of_DRAM());
322 max = PFN_DOWN(memblock_end_of_DRAM());
324 early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
326 max_pfn = max_low_pfn = max;
327 min_low_pfn = min;
329 arch_numa_init();
332 * must be done after arch_numa_init() which calls numa_init() to
333 * initialize node_online_map that gets used in hugetlb_cma_reserve()
334 * while allocating required CMA size across online nodes.
336 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
337 arm64_hugetlb_cma_reserve();
338 #endif
340 kvm_hyp_reserve();
343 * sparse_init() tries to allocate memory from memblock, so must be
344 * done after the fixed reservations
346 sparse_init();
347 zone_sizes_init();
350 * Reserve the CMA area after arm64_dma_phys_limit was initialised.
352 dma_contiguous_reserve(arm64_dma_phys_limit);
355 * request_standard_resources() depends on crashkernel's memory being
356 * reserved, so do it here.
358 arch_reserve_crashkernel();
360 memblock_dump_all();
364 * mem_init() marks the free areas in the mem_map and tells us how much memory
365 * is free. This is done after various parts of the system have claimed their
366 * memory after the kernel image.
368 void __init mem_init(void)
370 unsigned int flags = SWIOTLB_VERBOSE;
371 bool swiotlb = max_pfn > PFN_DOWN(arm64_dma_phys_limit);
373 if (is_realm_world()) {
374 swiotlb = true;
375 flags |= SWIOTLB_FORCE;
378 if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && !swiotlb) {
380 * If no bouncing needed for ZONE_DMA, reduce the swiotlb
381 * buffer for kmalloc() bouncing to 1MB per 1GB of RAM.
383 unsigned long size =
384 DIV_ROUND_UP(memblock_phys_mem_size(), 1024);
385 swiotlb_adjust_size(min(swiotlb_size_or_default(), size));
386 swiotlb = true;
389 swiotlb_init(swiotlb, flags);
390 swiotlb_update_mem_attributes();
392 /* this will put all unused low memory onto the freelists */
393 memblock_free_all();
396 * Check boundaries twice: Some fundamental inconsistencies can be
397 * detected at build time already.
399 #ifdef CONFIG_COMPAT
400 BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
401 #endif
404 * Selected page table levels should match when derived from
405 * scratch using the virtual address range and page size.
407 BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) !=
408 CONFIG_PGTABLE_LEVELS);
410 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
411 extern int sysctl_overcommit_memory;
413 * On a machine this small we won't get anywhere without
414 * overcommit, so turn it on by default.
416 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
420 void free_initmem(void)
422 void *lm_init_begin = lm_alias(__init_begin);
423 void *lm_init_end = lm_alias(__init_end);
425 WARN_ON(!IS_ALIGNED((unsigned long)lm_init_begin, PAGE_SIZE));
426 WARN_ON(!IS_ALIGNED((unsigned long)lm_init_end, PAGE_SIZE));
428 /* Delete __init region from memblock.reserved. */
429 memblock_free(lm_init_begin, lm_init_end - lm_init_begin);
431 free_reserved_area(lm_init_begin, lm_init_end,
432 POISON_FREE_INITMEM, "unused kernel");
434 * Unmap the __init region but leave the VM area in place. This
435 * prevents the region from being reused for kernel modules, which
436 * is not supported by kallsyms.
438 vunmap_range((u64)__init_begin, (u64)__init_end);
441 void dump_mem_limit(void)
443 if (memory_limit != PHYS_ADDR_MAX) {
444 pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
445 } else {
446 pr_emerg("Memory Limit: none\n");
450 #ifdef CONFIG_EXECMEM
451 static u64 module_direct_base __ro_after_init = 0;
452 static u64 module_plt_base __ro_after_init = 0;
455 * Choose a random page-aligned base address for a window of 'size' bytes which
456 * entirely contains the interval [start, end - 1].
458 static u64 __init random_bounding_box(u64 size, u64 start, u64 end)
460 u64 max_pgoff, pgoff;
462 if ((end - start) >= size)
463 return 0;
465 max_pgoff = (size - (end - start)) / PAGE_SIZE;
466 pgoff = get_random_u32_inclusive(0, max_pgoff);
468 return start - pgoff * PAGE_SIZE;
472 * Modules may directly reference data and text anywhere within the kernel
473 * image and other modules. References using PREL32 relocations have a +/-2G
474 * range, and so we need to ensure that the entire kernel image and all modules
475 * fall within a 2G window such that these are always within range.
477 * Modules may directly branch to functions and code within the kernel text,
478 * and to functions and code within other modules. These branches will use
479 * CALL26/JUMP26 relocations with a +/-128M range. Without PLTs, we must ensure
480 * that the entire kernel text and all module text falls within a 128M window
481 * such that these are always within range. With PLTs, we can expand this to a
482 * 2G window.
484 * We chose the 128M region to surround the entire kernel image (rather than
485 * just the text) as using the same bounds for the 128M and 2G regions ensures
486 * by construction that we never select a 128M region that is not a subset of
487 * the 2G region. For very large and unusual kernel configurations this means
488 * we may fall back to PLTs where they could have been avoided, but this keeps
489 * the logic significantly simpler.
491 static int __init module_init_limits(void)
493 u64 kernel_end = (u64)_end;
494 u64 kernel_start = (u64)_text;
495 u64 kernel_size = kernel_end - kernel_start;
498 * The default modules region is placed immediately below the kernel
499 * image, and is large enough to use the full 2G relocation range.
501 BUILD_BUG_ON(KIMAGE_VADDR != MODULES_END);
502 BUILD_BUG_ON(MODULES_VSIZE < SZ_2G);
504 if (!kaslr_enabled()) {
505 if (kernel_size < SZ_128M)
506 module_direct_base = kernel_end - SZ_128M;
507 if (kernel_size < SZ_2G)
508 module_plt_base = kernel_end - SZ_2G;
509 } else {
510 u64 min = kernel_start;
511 u64 max = kernel_end;
513 if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
514 pr_info("2G module region forced by RANDOMIZE_MODULE_REGION_FULL\n");
515 } else {
516 module_direct_base = random_bounding_box(SZ_128M, min, max);
517 if (module_direct_base) {
518 min = module_direct_base;
519 max = module_direct_base + SZ_128M;
523 module_plt_base = random_bounding_box(SZ_2G, min, max);
526 pr_info("%llu pages in range for non-PLT usage",
527 module_direct_base ? (SZ_128M - kernel_size) / PAGE_SIZE : 0);
528 pr_info("%llu pages in range for PLT usage",
529 module_plt_base ? (SZ_2G - kernel_size) / PAGE_SIZE : 0);
531 return 0;
534 static struct execmem_info execmem_info __ro_after_init;
536 struct execmem_info __init *execmem_arch_setup(void)
538 unsigned long fallback_start = 0, fallback_end = 0;
539 unsigned long start = 0, end = 0;
541 module_init_limits();
544 * Where possible, prefer to allocate within direct branch range of the
545 * kernel such that no PLTs are necessary.
547 if (module_direct_base) {
548 start = module_direct_base;
549 end = module_direct_base + SZ_128M;
551 if (module_plt_base) {
552 fallback_start = module_plt_base;
553 fallback_end = module_plt_base + SZ_2G;
555 } else if (module_plt_base) {
556 start = module_plt_base;
557 end = module_plt_base + SZ_2G;
560 execmem_info = (struct execmem_info){
561 .ranges = {
562 [EXECMEM_DEFAULT] = {
563 .start = start,
564 .end = end,
565 .pgprot = PAGE_KERNEL,
566 .alignment = 1,
567 .fallback_start = fallback_start,
568 .fallback_end = fallback_end,
570 [EXECMEM_KPROBES] = {
571 .start = VMALLOC_START,
572 .end = VMALLOC_END,
573 .pgprot = PAGE_KERNEL_ROX,
574 .alignment = 1,
576 [EXECMEM_BPF] = {
577 .start = VMALLOC_START,
578 .end = VMALLOC_END,
579 .pgprot = PAGE_KERNEL,
580 .alignment = 1,
585 return &execmem_info;
587 #endif /* CONFIG_EXECMEM */