1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/init.c
5 * Copyright (C) 1995-2005 Russell King
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/swap.h>
10 #include <linux/init.h>
11 #include <linux/mman.h>
12 #include <linux/sched/signal.h>
13 #include <linux/sched/task.h>
14 #include <linux/export.h>
15 #include <linux/nodemask.h>
16 #include <linux/initrd.h>
17 #include <linux/of_fdt.h>
18 #include <linux/highmem.h>
19 #include <linux/gfp.h>
20 #include <linux/memblock.h>
21 #include <linux/dma-map-ops.h>
22 #include <linux/sizes.h>
23 #include <linux/stop_machine.h>
24 #include <linux/swiotlb.h>
25 #include <linux/execmem.h>
28 #include <asm/mach-types.h>
29 #include <asm/memblock.h>
32 #include <asm/sections.h>
33 #include <asm/setup.h>
34 #include <asm/set_memory.h>
35 #include <asm/system_info.h>
37 #include <asm/fixmap.h>
38 #include <asm/ptdump.h>
40 #include <asm/mach/arch.h>
41 #include <asm/mach/map.h>
45 #ifdef CONFIG_CPU_CP15_MMU
46 unsigned long __init
__clear_cr(unsigned long mask
)
48 cr_alignment
= cr_alignment
& ~mask
;
53 #ifdef CONFIG_BLK_DEV_INITRD
54 static int __init
parse_tag_initrd(const struct tag
*tag
)
56 pr_warn("ATAG_INITRD is deprecated; "
57 "please update your bootloader.\n");
58 phys_initrd_start
= __virt_to_phys(tag
->u
.initrd
.start
);
59 phys_initrd_size
= tag
->u
.initrd
.size
;
63 __tagtable(ATAG_INITRD
, parse_tag_initrd
);
65 static int __init
parse_tag_initrd2(const struct tag
*tag
)
67 phys_initrd_start
= tag
->u
.initrd
.start
;
68 phys_initrd_size
= tag
->u
.initrd
.size
;
72 __tagtable(ATAG_INITRD2
, parse_tag_initrd2
);
75 static void __init
find_limits(unsigned long *min
, unsigned long *max_low
,
76 unsigned long *max_high
)
78 *max_low
= PFN_DOWN(memblock_get_current_limit());
79 *min
= PFN_UP(memblock_start_of_DRAM());
80 *max_high
= PFN_DOWN(memblock_end_of_DRAM());
83 #ifdef CONFIG_ZONE_DMA
85 phys_addr_t arm_dma_zone_size __read_mostly
;
86 EXPORT_SYMBOL(arm_dma_zone_size
);
89 * The DMA mask corresponding to the maximum bus address allocatable
90 * using GFP_DMA. The default here places no restriction on DMA
91 * allocations. This must be the smallest DMA mask in the system,
92 * so a successful GFP_DMA allocation will always satisfy this.
94 phys_addr_t arm_dma_limit
;
95 unsigned long arm_dma_pfn_limit
;
98 void __init
setup_dma_zone(const struct machine_desc
*mdesc
)
100 #ifdef CONFIG_ZONE_DMA
101 if (mdesc
->dma_zone_size
) {
102 arm_dma_zone_size
= mdesc
->dma_zone_size
;
103 arm_dma_limit
= PHYS_OFFSET
+ arm_dma_zone_size
- 1;
105 arm_dma_limit
= 0xffffffff;
106 arm_dma_pfn_limit
= arm_dma_limit
>> PAGE_SHIFT
;
110 static void __init
zone_sizes_init(unsigned long min
, unsigned long max_low
,
111 unsigned long max_high
)
113 unsigned long max_zone_pfn
[MAX_NR_ZONES
] = { 0 };
115 #ifdef CONFIG_ZONE_DMA
116 max_zone_pfn
[ZONE_DMA
] = min(arm_dma_pfn_limit
, max_low
);
118 max_zone_pfn
[ZONE_NORMAL
] = max_low
;
119 #ifdef CONFIG_HIGHMEM
120 max_zone_pfn
[ZONE_HIGHMEM
] = max_high
;
122 free_area_init(max_zone_pfn
);
125 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
126 int pfn_valid(unsigned long pfn
)
128 phys_addr_t addr
= __pfn_to_phys(pfn
);
129 unsigned long pageblock_size
= PAGE_SIZE
* pageblock_nr_pages
;
131 if (__phys_to_pfn(addr
) != pfn
)
135 * If address less than pageblock_size bytes away from a present
136 * memory chunk there still will be a memory map entry for it
137 * because we round freed memory map to the pageblock boundaries.
139 if (memblock_overlaps_region(&memblock
.memory
,
140 ALIGN_DOWN(addr
, pageblock_size
),
146 EXPORT_SYMBOL(pfn_valid
);
149 static bool arm_memblock_steal_permitted
= true;
151 phys_addr_t __init
arm_memblock_steal(phys_addr_t size
, phys_addr_t align
)
155 BUG_ON(!arm_memblock_steal_permitted
);
157 phys
= memblock_phys_alloc(size
, align
);
159 panic("Failed to steal %pa bytes at %pS\n",
160 &size
, (void *)_RET_IP_
);
162 memblock_phys_free(phys
, size
);
163 memblock_remove(phys
, size
);
168 #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
169 void check_cpu_icache_size(int cpuid
)
173 asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr
));
175 size
= 1 << ((ctr
& 0xf) + 2);
176 if (cpuid
!= 0 && icache_size
!= size
)
177 pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n",
179 if (icache_size
> size
)
184 void __init
arm_memblock_init(const struct machine_desc
*mdesc
)
186 /* Register the kernel text, kernel data and initrd with memblock. */
187 memblock_reserve(__pa(KERNEL_START
), KERNEL_END
- KERNEL_START
);
189 reserve_initrd_mem();
191 arm_mm_memblock_reserve();
193 /* reserve any platform specific memblock areas */
197 early_init_fdt_scan_reserved_mem();
199 /* reserve memory for DMA contiguous allocations */
200 dma_contiguous_reserve(arm_dma_limit
);
202 arm_memblock_steal_permitted
= false;
206 void __init
bootmem_init(void)
208 memblock_allow_resize();
210 find_limits(&min_low_pfn
, &max_low_pfn
, &max_pfn
);
212 early_memtest((phys_addr_t
)min_low_pfn
<< PAGE_SHIFT
,
213 (phys_addr_t
)max_low_pfn
<< PAGE_SHIFT
);
216 * sparse_init() tries to allocate memory from memblock, so must be
217 * done after the fixed reservations
222 * Now free the memory - free_area_init needs
223 * the sparse mem_map arrays initialized by sparse_init()
224 * for memmap_init_zone(), otherwise all PFNs are invalid.
226 zone_sizes_init(min_low_pfn
, max_low_pfn
, max_pfn
);
230 * Poison init memory with an undefined instruction (ARM) or a branch to an
231 * undefined instruction (Thumb).
233 static inline void poison_init_mem(void *s
, size_t count
)
236 for (; count
!= 0; count
-= 4)
240 static void __init
free_highpages(void)
242 #ifdef CONFIG_HIGHMEM
243 unsigned long max_low
= max_low_pfn
;
244 phys_addr_t range_start
, range_end
;
247 /* set highmem page free */
248 for_each_free_mem_range(i
, NUMA_NO_NODE
, MEMBLOCK_NONE
,
249 &range_start
, &range_end
, NULL
) {
250 unsigned long start
= PFN_UP(range_start
);
251 unsigned long end
= PFN_DOWN(range_end
);
253 /* Ignore complete lowmem entries */
257 /* Truncate partial highmem entries */
261 for (; start
< end
; start
++)
262 free_highmem_page(pfn_to_page(start
));
268 * mem_init() marks the free areas in the mem_map and tells us how much
269 * memory is free. This is done after various parts of the system have
270 * claimed their memory after the kernel image.
272 void __init
mem_init(void)
274 #ifdef CONFIG_ARM_LPAE
275 swiotlb_init(max_pfn
> arm_dma_pfn_limit
, SWIOTLB_VERBOSE
);
278 set_max_mapnr(pfn_to_page(max_pfn
) - mem_map
);
280 /* this will put all unused low memory onto the freelists */
284 /* now that our DMA memory is actually so designated, we can free it */
285 free_reserved_area(__va(PHYS_OFFSET
), swapper_pg_dir
, -1, NULL
);
291 * Check boundaries twice: Some fundamental inconsistencies can
292 * be detected at build time already.
295 BUILD_BUG_ON(TASK_SIZE
> MODULES_VADDR
);
296 BUG_ON(TASK_SIZE
> MODULES_VADDR
);
299 #ifdef CONFIG_HIGHMEM
300 BUILD_BUG_ON(PKMAP_BASE
+ LAST_PKMAP
* PAGE_SIZE
> PAGE_OFFSET
);
301 BUG_ON(PKMAP_BASE
+ LAST_PKMAP
* PAGE_SIZE
> PAGE_OFFSET
);
305 #ifdef CONFIG_STRICT_KERNEL_RWX
306 struct section_perm
{
315 /* First section-aligned location at or after __start_rodata. */
316 extern char __start_rodata_section_aligned
[];
318 static struct section_perm nx_perms
[] = {
319 /* Make pages tables, etc before _stext RW (set NX). */
321 .name
= "pre-text NX",
322 .start
= PAGE_OFFSET
,
323 .end
= (unsigned long)_stext
,
324 .mask
= ~PMD_SECT_XN
,
327 /* Make init RW (set NX). */
330 .start
= (unsigned long)__init_begin
,
331 .end
= (unsigned long)_sdata
,
332 .mask
= ~PMD_SECT_XN
,
335 /* Make rodata NX (set RO in ro_perms below). */
338 .start
= (unsigned long)__start_rodata_section_aligned
,
339 .end
= (unsigned long)__init_begin
,
340 .mask
= ~PMD_SECT_XN
,
345 static struct section_perm ro_perms
[] = {
346 /* Make kernel code and rodata RX (set RO). */
348 .name
= "text/rodata RO",
349 .start
= (unsigned long)_stext
,
350 .end
= (unsigned long)__init_begin
,
351 #ifdef CONFIG_ARM_LPAE
352 .mask
= ~(L_PMD_SECT_RDONLY
| PMD_SECT_AP2
),
353 .prot
= L_PMD_SECT_RDONLY
| PMD_SECT_AP2
,
355 .mask
= ~(PMD_SECT_APX
| PMD_SECT_AP_WRITE
),
356 .prot
= PMD_SECT_APX
| PMD_SECT_AP_WRITE
,
357 .clear
= PMD_SECT_AP_WRITE
,
363 * Updates section permissions only for the current mm (sections are
364 * copied into each mm). During startup, this is the init_mm. Is only
365 * safe to be called with preemption disabled, as under stop_machine().
367 static inline void section_update(unsigned long addr
, pmdval_t mask
,
368 pmdval_t prot
, struct mm_struct
*mm
)
372 pmd
= pmd_offset(pud_offset(p4d_offset(pgd_offset(mm
, addr
), addr
), addr
), addr
);
374 #ifdef CONFIG_ARM_LPAE
375 pmd
[0] = __pmd((pmd_val(pmd
[0]) & mask
) | prot
);
377 if (addr
& SECTION_SIZE
)
378 pmd
[1] = __pmd((pmd_val(pmd
[1]) & mask
) | prot
);
380 pmd
[0] = __pmd((pmd_val(pmd
[0]) & mask
) | prot
);
382 flush_pmd_entry(pmd
);
383 local_flush_tlb_kernel_range(addr
, addr
+ SECTION_SIZE
);
386 /* Make sure extended page tables are in use. */
387 static inline bool arch_has_strict_perms(void)
389 if (cpu_architecture() < CPU_ARCH_ARMv6
)
392 return !!(get_cr() & CR_XP
);
395 static void set_section_perms(struct section_perm
*perms
, int n
, bool set
,
396 struct mm_struct
*mm
)
401 if (!arch_has_strict_perms())
404 for (i
= 0; i
< n
; i
++) {
405 if (!IS_ALIGNED(perms
[i
].start
, SECTION_SIZE
) ||
406 !IS_ALIGNED(perms
[i
].end
, SECTION_SIZE
)) {
407 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
408 perms
[i
].name
, perms
[i
].start
, perms
[i
].end
,
413 for (addr
= perms
[i
].start
;
415 addr
+= SECTION_SIZE
)
416 section_update(addr
, perms
[i
].mask
,
417 set
? perms
[i
].prot
: perms
[i
].clear
, mm
);
423 * update_sections_early intended to be called only through stop_machine
424 * framework and executed by only one CPU while all other CPUs will spin and
425 * wait, so no locking is required in this function.
427 static void update_sections_early(struct section_perm perms
[], int n
)
429 struct task_struct
*t
, *s
;
431 for_each_process(t
) {
432 if (t
->flags
& PF_KTHREAD
)
434 for_each_thread(t
, s
)
436 set_section_perms(perms
, n
, true, s
->mm
);
438 set_section_perms(perms
, n
, true, current
->active_mm
);
439 set_section_perms(perms
, n
, true, &init_mm
);
442 static int __fix_kernmem_perms(void *unused
)
444 update_sections_early(nx_perms
, ARRAY_SIZE(nx_perms
));
448 static void fix_kernmem_perms(void)
450 stop_machine(__fix_kernmem_perms
, NULL
, NULL
);
453 static int __mark_rodata_ro(void *unused
)
455 update_sections_early(ro_perms
, ARRAY_SIZE(ro_perms
));
459 void mark_rodata_ro(void)
461 stop_machine(__mark_rodata_ro
, NULL
, NULL
);
466 static inline void fix_kernmem_perms(void) { }
467 #endif /* CONFIG_STRICT_KERNEL_RWX */
469 void free_initmem(void)
473 poison_init_mem(__init_begin
, __init_end
- __init_begin
);
474 if (!machine_is_integrator() && !machine_is_cintegrator())
475 free_initmem_default(-1);
478 #ifdef CONFIG_BLK_DEV_INITRD
479 void free_initrd_mem(unsigned long start
, unsigned long end
)
481 if (start
== initrd_start
)
482 start
= round_down(start
, PAGE_SIZE
);
483 if (end
== initrd_end
)
484 end
= round_up(end
, PAGE_SIZE
);
486 poison_init_mem((void *)start
, PAGE_ALIGN(end
) - start
);
487 free_reserved_area((void *)start
, (void *)end
, -1, "initrd");
491 #ifdef CONFIG_EXECMEM
493 #ifdef CONFIG_XIP_KERNEL
495 * The XIP kernel text is mapped in the module area for modules and
496 * some other stuff to work without any indirect relocations.
497 * MODULES_VADDR is redefined here and not in asm/memory.h to avoid
498 * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
501 #define MODULES_VADDR (((unsigned long)_exiprom + ~PMD_MASK) & PMD_MASK)
505 static struct execmem_info execmem_info __ro_after_init
;
507 struct execmem_info __init
*execmem_arch_setup(void)
509 unsigned long fallback_start
= 0, fallback_end
= 0;
511 if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS
)) {
512 fallback_start
= VMALLOC_START
;
513 fallback_end
= VMALLOC_END
;
516 execmem_info
= (struct execmem_info
){
518 [EXECMEM_DEFAULT
] = {
519 .start
= MODULES_VADDR
,
521 .pgprot
= PAGE_KERNEL_EXEC
,
523 .fallback_start
= fallback_start
,
524 .fallback_end
= fallback_end
,
529 return &execmem_info
;
531 #endif /* CONFIG_MMU */
533 #endif /* CONFIG_EXECMEM */