1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/init.c
5 * Copyright (C) 1995-2005 Russell King
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/swap.h>
10 #include <linux/init.h>
11 #include <linux/mman.h>
12 #include <linux/sched/signal.h>
13 #include <linux/sched/task.h>
14 #include <linux/export.h>
15 #include <linux/nodemask.h>
16 #include <linux/initrd.h>
17 #include <linux/of_fdt.h>
18 #include <linux/highmem.h>
19 #include <linux/gfp.h>
20 #include <linux/memblock.h>
21 #include <linux/dma-map-ops.h>
22 #include <linux/sizes.h>
23 #include <linux/stop_machine.h>
24 #include <linux/swiotlb.h>
27 #include <asm/mach-types.h>
28 #include <asm/memblock.h>
29 #include <asm/memory.h>
31 #include <asm/sections.h>
32 #include <asm/setup.h>
33 #include <asm/set_memory.h>
34 #include <asm/system_info.h>
36 #include <asm/fixmap.h>
37 #include <asm/ptdump.h>
39 #include <asm/mach/arch.h>
40 #include <asm/mach/map.h>
44 #ifdef CONFIG_CPU_CP15_MMU
45 unsigned long __init
__clear_cr(unsigned long mask
)
47 cr_alignment
= cr_alignment
& ~mask
;
52 #ifdef CONFIG_BLK_DEV_INITRD
53 static int __init
parse_tag_initrd(const struct tag
*tag
)
55 pr_warn("ATAG_INITRD is deprecated; "
56 "please update your bootloader.\n");
57 phys_initrd_start
= __virt_to_phys(tag
->u
.initrd
.start
);
58 phys_initrd_size
= tag
->u
.initrd
.size
;
62 __tagtable(ATAG_INITRD
, parse_tag_initrd
);
64 static int __init
parse_tag_initrd2(const struct tag
*tag
)
66 phys_initrd_start
= tag
->u
.initrd
.start
;
67 phys_initrd_size
= tag
->u
.initrd
.size
;
71 __tagtable(ATAG_INITRD2
, parse_tag_initrd2
);
74 static void __init
find_limits(unsigned long *min
, unsigned long *max_low
,
75 unsigned long *max_high
)
77 *max_low
= PFN_DOWN(memblock_get_current_limit());
78 *min
= PFN_UP(memblock_start_of_DRAM());
79 *max_high
= PFN_DOWN(memblock_end_of_DRAM());
82 #ifdef CONFIG_ZONE_DMA
84 phys_addr_t arm_dma_zone_size __read_mostly
;
85 EXPORT_SYMBOL(arm_dma_zone_size
);
88 * The DMA mask corresponding to the maximum bus address allocatable
89 * using GFP_DMA. The default here places no restriction on DMA
90 * allocations. This must be the smallest DMA mask in the system,
91 * so a successful GFP_DMA allocation will always satisfy this.
93 phys_addr_t arm_dma_limit
;
94 unsigned long arm_dma_pfn_limit
;
97 void __init
setup_dma_zone(const struct machine_desc
*mdesc
)
99 #ifdef CONFIG_ZONE_DMA
100 if (mdesc
->dma_zone_size
) {
101 arm_dma_zone_size
= mdesc
->dma_zone_size
;
102 arm_dma_limit
= PHYS_OFFSET
+ arm_dma_zone_size
- 1;
104 arm_dma_limit
= 0xffffffff;
105 arm_dma_pfn_limit
= arm_dma_limit
>> PAGE_SHIFT
;
109 static void __init
zone_sizes_init(unsigned long min
, unsigned long max_low
,
110 unsigned long max_high
)
112 unsigned long max_zone_pfn
[MAX_NR_ZONES
] = { 0 };
114 #ifdef CONFIG_ZONE_DMA
115 max_zone_pfn
[ZONE_DMA
] = min(arm_dma_pfn_limit
, max_low
);
117 max_zone_pfn
[ZONE_NORMAL
] = max_low
;
118 #ifdef CONFIG_HIGHMEM
119 max_zone_pfn
[ZONE_HIGHMEM
] = max_high
;
121 free_area_init(max_zone_pfn
);
124 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
125 int pfn_valid(unsigned long pfn
)
127 phys_addr_t addr
= __pfn_to_phys(pfn
);
129 if (__phys_to_pfn(addr
) != pfn
)
132 return memblock_is_map_memory(addr
);
134 EXPORT_SYMBOL(pfn_valid
);
137 static bool arm_memblock_steal_permitted
= true;
139 phys_addr_t __init
arm_memblock_steal(phys_addr_t size
, phys_addr_t align
)
143 BUG_ON(!arm_memblock_steal_permitted
);
145 phys
= memblock_phys_alloc(size
, align
);
147 panic("Failed to steal %pa bytes at %pS\n",
148 &size
, (void *)_RET_IP_
);
150 memblock_free(phys
, size
);
151 memblock_remove(phys
, size
);
156 static void __init
arm_initrd_init(void)
158 #ifdef CONFIG_BLK_DEV_INITRD
162 initrd_start
= initrd_end
= 0;
164 if (!phys_initrd_size
)
168 * Round the memory region to page boundaries as per free_initrd_mem()
169 * This allows us to detect whether the pages overlapping the initrd
170 * are in use, but more importantly, reserves the entire set of pages
171 * as we don't want these pages allocated for other purposes.
173 start
= round_down(phys_initrd_start
, PAGE_SIZE
);
174 size
= phys_initrd_size
+ (phys_initrd_start
- start
);
175 size
= round_up(size
, PAGE_SIZE
);
177 if (!memblock_is_region_memory(start
, size
)) {
178 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
183 if (memblock_is_region_reserved(start
, size
)) {
184 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
189 memblock_reserve(start
, size
);
191 /* Now convert initrd to virtual addresses */
192 initrd_start
= __phys_to_virt(phys_initrd_start
);
193 initrd_end
= initrd_start
+ phys_initrd_size
;
197 #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
198 void check_cpu_icache_size(int cpuid
)
202 asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr
));
204 size
= 1 << ((ctr
& 0xf) + 2);
205 if (cpuid
!= 0 && icache_size
!= size
)
206 pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n",
208 if (icache_size
> size
)
213 void __init
arm_memblock_init(const struct machine_desc
*mdesc
)
215 /* Register the kernel text, kernel data and initrd with memblock. */
216 memblock_reserve(__pa(KERNEL_START
), KERNEL_END
- KERNEL_START
);
220 arm_mm_memblock_reserve();
222 /* reserve any platform specific memblock areas */
226 early_init_fdt_scan_reserved_mem();
228 /* reserve memory for DMA contiguous allocations */
229 dma_contiguous_reserve(arm_dma_limit
);
231 arm_memblock_steal_permitted
= false;
235 void __init
bootmem_init(void)
237 memblock_allow_resize();
239 find_limits(&min_low_pfn
, &max_low_pfn
, &max_pfn
);
241 early_memtest((phys_addr_t
)min_low_pfn
<< PAGE_SHIFT
,
242 (phys_addr_t
)max_low_pfn
<< PAGE_SHIFT
);
245 * sparse_init() tries to allocate memory from memblock, so must be
246 * done after the fixed reservations
251 * Now free the memory - free_area_init needs
252 * the sparse mem_map arrays initialized by sparse_init()
253 * for memmap_init_zone(), otherwise all PFNs are invalid.
255 zone_sizes_init(min_low_pfn
, max_low_pfn
, max_pfn
);
259 * Poison init memory with an undefined instruction (ARM) or a branch to an
260 * undefined instruction (Thumb).
262 static inline void poison_init_mem(void *s
, size_t count
)
265 for (; count
!= 0; count
-= 4)
269 static void __init
free_highpages(void)
271 #ifdef CONFIG_HIGHMEM
272 unsigned long max_low
= max_low_pfn
;
273 phys_addr_t range_start
, range_end
;
276 /* set highmem page free */
277 for_each_free_mem_range(i
, NUMA_NO_NODE
, MEMBLOCK_NONE
,
278 &range_start
, &range_end
, NULL
) {
279 unsigned long start
= PFN_UP(range_start
);
280 unsigned long end
= PFN_DOWN(range_end
);
282 /* Ignore complete lowmem entries */
286 /* Truncate partial highmem entries */
290 for (; start
< end
; start
++)
291 free_highmem_page(pfn_to_page(start
));
297 * mem_init() marks the free areas in the mem_map and tells us how much
298 * memory is free. This is done after various parts of the system have
299 * claimed their memory after the kernel image.
301 void __init
mem_init(void)
303 #ifdef CONFIG_ARM_LPAE
307 set_max_mapnr(pfn_to_page(max_pfn
) - mem_map
);
309 /* this will put all unused low memory onto the freelists */
313 /* now that our DMA memory is actually so designated, we can free it */
314 free_reserved_area(__va(PHYS_OFFSET
), swapper_pg_dir
, -1, NULL
);
319 mem_init_print_info(NULL
);
322 * Check boundaries twice: Some fundamental inconsistencies can
323 * be detected at build time already.
326 BUILD_BUG_ON(TASK_SIZE
> MODULES_VADDR
);
327 BUG_ON(TASK_SIZE
> MODULES_VADDR
);
330 #ifdef CONFIG_HIGHMEM
331 BUILD_BUG_ON(PKMAP_BASE
+ LAST_PKMAP
* PAGE_SIZE
> PAGE_OFFSET
);
332 BUG_ON(PKMAP_BASE
+ LAST_PKMAP
* PAGE_SIZE
> PAGE_OFFSET
);
336 #ifdef CONFIG_STRICT_KERNEL_RWX
337 struct section_perm
{
346 /* First section-aligned location at or after __start_rodata. */
347 extern char __start_rodata_section_aligned
[];
349 static struct section_perm nx_perms
[] = {
350 /* Make pages tables, etc before _stext RW (set NX). */
352 .name
= "pre-text NX",
353 .start
= PAGE_OFFSET
,
354 .end
= (unsigned long)_stext
,
355 .mask
= ~PMD_SECT_XN
,
358 /* Make init RW (set NX). */
361 .start
= (unsigned long)__init_begin
,
362 .end
= (unsigned long)_sdata
,
363 .mask
= ~PMD_SECT_XN
,
366 /* Make rodata NX (set RO in ro_perms below). */
369 .start
= (unsigned long)__start_rodata_section_aligned
,
370 .end
= (unsigned long)__init_begin
,
371 .mask
= ~PMD_SECT_XN
,
376 static struct section_perm ro_perms
[] = {
377 /* Make kernel code and rodata RX (set RO). */
379 .name
= "text/rodata RO",
380 .start
= (unsigned long)_stext
,
381 .end
= (unsigned long)__init_begin
,
382 #ifdef CONFIG_ARM_LPAE
383 .mask
= ~(L_PMD_SECT_RDONLY
| PMD_SECT_AP2
),
384 .prot
= L_PMD_SECT_RDONLY
| PMD_SECT_AP2
,
386 .mask
= ~(PMD_SECT_APX
| PMD_SECT_AP_WRITE
),
387 .prot
= PMD_SECT_APX
| PMD_SECT_AP_WRITE
,
388 .clear
= PMD_SECT_AP_WRITE
,
394 * Updates section permissions only for the current mm (sections are
395 * copied into each mm). During startup, this is the init_mm. Is only
396 * safe to be called with preemption disabled, as under stop_machine().
398 static inline void section_update(unsigned long addr
, pmdval_t mask
,
399 pmdval_t prot
, struct mm_struct
*mm
)
403 pmd
= pmd_offset(pud_offset(p4d_offset(pgd_offset(mm
, addr
), addr
), addr
), addr
);
405 #ifdef CONFIG_ARM_LPAE
406 pmd
[0] = __pmd((pmd_val(pmd
[0]) & mask
) | prot
);
408 if (addr
& SECTION_SIZE
)
409 pmd
[1] = __pmd((pmd_val(pmd
[1]) & mask
) | prot
);
411 pmd
[0] = __pmd((pmd_val(pmd
[0]) & mask
) | prot
);
413 flush_pmd_entry(pmd
);
414 local_flush_tlb_kernel_range(addr
, addr
+ SECTION_SIZE
);
417 /* Make sure extended page tables are in use. */
418 static inline bool arch_has_strict_perms(void)
420 if (cpu_architecture() < CPU_ARCH_ARMv6
)
423 return !!(get_cr() & CR_XP
);
426 static void set_section_perms(struct section_perm
*perms
, int n
, bool set
,
427 struct mm_struct
*mm
)
432 if (!arch_has_strict_perms())
435 for (i
= 0; i
< n
; i
++) {
436 if (!IS_ALIGNED(perms
[i
].start
, SECTION_SIZE
) ||
437 !IS_ALIGNED(perms
[i
].end
, SECTION_SIZE
)) {
438 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
439 perms
[i
].name
, perms
[i
].start
, perms
[i
].end
,
444 for (addr
= perms
[i
].start
;
446 addr
+= SECTION_SIZE
)
447 section_update(addr
, perms
[i
].mask
,
448 set
? perms
[i
].prot
: perms
[i
].clear
, mm
);
454 * update_sections_early intended to be called only through stop_machine
455 * framework and executed by only one CPU while all other CPUs will spin and
456 * wait, so no locking is required in this function.
458 static void update_sections_early(struct section_perm perms
[], int n
)
460 struct task_struct
*t
, *s
;
462 for_each_process(t
) {
463 if (t
->flags
& PF_KTHREAD
)
465 for_each_thread(t
, s
)
467 set_section_perms(perms
, n
, true, s
->mm
);
469 set_section_perms(perms
, n
, true, current
->active_mm
);
470 set_section_perms(perms
, n
, true, &init_mm
);
473 static int __fix_kernmem_perms(void *unused
)
475 update_sections_early(nx_perms
, ARRAY_SIZE(nx_perms
));
479 static void fix_kernmem_perms(void)
481 stop_machine(__fix_kernmem_perms
, NULL
, NULL
);
484 static int __mark_rodata_ro(void *unused
)
486 update_sections_early(ro_perms
, ARRAY_SIZE(ro_perms
));
490 static int kernel_set_to_readonly __read_mostly
;
492 void mark_rodata_ro(void)
494 kernel_set_to_readonly
= 1;
495 stop_machine(__mark_rodata_ro
, NULL
, NULL
);
499 void set_kernel_text_rw(void)
501 if (!kernel_set_to_readonly
)
504 set_section_perms(ro_perms
, ARRAY_SIZE(ro_perms
), false,
508 void set_kernel_text_ro(void)
510 if (!kernel_set_to_readonly
)
513 set_section_perms(ro_perms
, ARRAY_SIZE(ro_perms
), true,
518 static inline void fix_kernmem_perms(void) { }
519 #endif /* CONFIG_STRICT_KERNEL_RWX */
521 void free_initmem(void)
525 poison_init_mem(__init_begin
, __init_end
- __init_begin
);
526 if (!machine_is_integrator() && !machine_is_cintegrator())
527 free_initmem_default(-1);
530 #ifdef CONFIG_BLK_DEV_INITRD
531 void free_initrd_mem(unsigned long start
, unsigned long end
)
533 if (start
== initrd_start
)
534 start
= round_down(start
, PAGE_SIZE
);
535 if (end
== initrd_end
)
536 end
= round_up(end
, PAGE_SIZE
);
538 poison_init_mem((void *)start
, PAGE_ALIGN(end
) - start
);
539 free_reserved_area((void *)start
, (void *)end
, -1, "initrd");