1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
7 #include <linux/init.h>
9 #include <linux/memblock.h>
10 #include <linux/initrd.h>
11 #include <linux/swap.h>
12 #include <linux/sizes.h>
13 #include <linux/of_fdt.h>
14 #include <linux/libfdt.h>
15 #include <linux/set_memory.h>
16 #include <linux/dma-map-ops.h>
18 #include <asm/fixmap.h>
19 #include <asm/tlbflush.h>
20 #include <asm/sections.h>
23 #include <asm/ptdump.h>
25 #include "../kernel/head.h"
27 unsigned long empty_zero_page
[PAGE_SIZE
/ sizeof(unsigned long)]
29 EXPORT_SYMBOL(empty_zero_page
);
32 #define DTB_EARLY_BASE_VA PGDIR_SIZE
33 void *dtb_early_va __initdata
;
34 uintptr_t dtb_early_pa __initdata
;
37 pte_t
*(*get_pte_virt
)(phys_addr_t pa
);
38 phys_addr_t (*alloc_pte
)(uintptr_t va
);
39 #ifndef __PAGETABLE_PMD_FOLDED
40 pmd_t
*(*get_pmd_virt
)(phys_addr_t pa
);
41 phys_addr_t (*alloc_pmd
)(uintptr_t va
);
45 static phys_addr_t dma32_phys_limit __ro_after_init
;
47 static void __init
zone_sizes_init(void)
49 unsigned long max_zone_pfns
[MAX_NR_ZONES
] = { 0, };
51 #ifdef CONFIG_ZONE_DMA32
52 max_zone_pfns
[ZONE_DMA32
] = PFN_DOWN(dma32_phys_limit
);
54 max_zone_pfns
[ZONE_NORMAL
] = max_low_pfn
;
56 free_area_init(max_zone_pfns
);
59 static void setup_zero_page(void)
61 memset((void *)empty_zero_page
, 0, PAGE_SIZE
);
64 #if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
65 static inline void print_mlk(char *name
, unsigned long b
, unsigned long t
)
67 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld kB)\n", name
, b
, t
,
71 static inline void print_mlm(char *name
, unsigned long b
, unsigned long t
)
73 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld MB)\n", name
, b
, t
,
77 static void print_vm_layout(void)
79 pr_notice("Virtual kernel memory layout:\n");
80 print_mlk("fixmap", (unsigned long)FIXADDR_START
,
81 (unsigned long)FIXADDR_TOP
);
82 print_mlm("pci io", (unsigned long)PCI_IO_START
,
83 (unsigned long)PCI_IO_END
);
84 print_mlm("vmemmap", (unsigned long)VMEMMAP_START
,
85 (unsigned long)VMEMMAP_END
);
86 print_mlm("vmalloc", (unsigned long)VMALLOC_START
,
87 (unsigned long)VMALLOC_END
);
88 print_mlm("lowmem", (unsigned long)PAGE_OFFSET
,
89 (unsigned long)high_memory
);
92 static void print_vm_layout(void) { }
93 #endif /* CONFIG_DEBUG_VM */
95 void __init
mem_init(void)
99 #endif /* CONFIG_FLATMEM */
101 high_memory
= (void *)(__va(PFN_PHYS(max_low_pfn
)));
104 mem_init_print_info(NULL
);
108 #ifdef CONFIG_BLK_DEV_INITRD
109 static void __init
setup_initrd(void)
114 /* Ignore the virtul address computed during device tree parsing */
115 initrd_start
= initrd_end
= 0;
117 if (!phys_initrd_size
)
120 * Round the memory region to page boundaries as per free_initrd_mem()
121 * This allows us to detect whether the pages overlapping the initrd
122 * are in use, but more importantly, reserves the entire set of pages
123 * as we don't want these pages allocated for other purposes.
125 start
= round_down(phys_initrd_start
, PAGE_SIZE
);
126 size
= phys_initrd_size
+ (phys_initrd_start
- start
);
127 size
= round_up(size
, PAGE_SIZE
);
129 if (!memblock_is_region_memory(start
, size
)) {
130 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
135 if (memblock_is_region_reserved(start
, size
)) {
136 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
141 memblock_reserve(start
, size
);
142 /* Now convert initrd to virtual addresses */
143 initrd_start
= (unsigned long)__va(phys_initrd_start
);
144 initrd_end
= initrd_start
+ phys_initrd_size
;
145 initrd_below_start_ok
= 1;
147 pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
148 (void *)(initrd_start
), size
);
151 pr_cont(" - disabling initrd\n");
155 #endif /* CONFIG_BLK_DEV_INITRD */
157 void __init
setup_bootmem(void)
159 phys_addr_t mem_start
= 0;
160 phys_addr_t start
, end
= 0;
161 phys_addr_t vmlinux_end
= __pa_symbol(&_end
);
162 phys_addr_t vmlinux_start
= __pa_symbol(&_start
);
165 /* Find the memory region containing the kernel */
166 for_each_mem_range(i
, &start
, &end
) {
167 phys_addr_t size
= end
- start
;
170 if (start
<= vmlinux_start
&& vmlinux_end
<= end
)
175 * The maximal physical memory size is -PAGE_OFFSET.
176 * Make sure that any memory beyond mem_start + (-PAGE_OFFSET) is removed
177 * as it is unusable by kernel.
179 memblock_enforce_memory_limit(-PAGE_OFFSET
);
181 /* Reserve from the start of the kernel to the end of the kernel */
182 memblock_reserve(vmlinux_start
, vmlinux_end
- vmlinux_start
);
184 max_pfn
= PFN_DOWN(memblock_end_of_DRAM());
185 max_low_pfn
= max_pfn
;
186 dma32_phys_limit
= min(4UL * SZ_1G
, (unsigned long)PFN_PHYS(max_low_pfn
));
187 set_max_mapnr(max_low_pfn
);
189 #ifdef CONFIG_BLK_DEV_INITRD
191 #endif /* CONFIG_BLK_DEV_INITRD */
194 * Avoid using early_init_fdt_reserve_self() since __pa() does
195 * not work for DTB pointers that are fixmap addresses
197 memblock_reserve(dtb_early_pa
, fdt_totalsize(dtb_early_va
));
199 early_init_fdt_scan_reserved_mem();
200 dma_contiguous_reserve(dma32_phys_limit
);
201 memblock_allow_resize();
206 static struct pt_alloc_ops pt_ops
;
208 unsigned long va_pa_offset
;
209 EXPORT_SYMBOL(va_pa_offset
);
210 unsigned long pfn_base
;
211 EXPORT_SYMBOL(pfn_base
);
213 pgd_t swapper_pg_dir
[PTRS_PER_PGD
] __page_aligned_bss
;
214 pgd_t trampoline_pg_dir
[PTRS_PER_PGD
] __page_aligned_bss
;
215 pte_t fixmap_pte
[PTRS_PER_PTE
] __page_aligned_bss
;
217 #define MAX_EARLY_MAPPING_SIZE SZ_128M
219 pgd_t early_pg_dir
[PTRS_PER_PGD
] __initdata
__aligned(PAGE_SIZE
);
221 void __set_fixmap(enum fixed_addresses idx
, phys_addr_t phys
, pgprot_t prot
)
223 unsigned long addr
= __fix_to_virt(idx
);
226 BUG_ON(idx
<= FIX_HOLE
|| idx
>= __end_of_fixed_addresses
);
228 ptep
= &fixmap_pte
[pte_index(addr
)];
230 if (pgprot_val(prot
))
231 set_pte(ptep
, pfn_pte(phys
>> PAGE_SHIFT
, prot
));
233 pte_clear(&init_mm
, addr
, ptep
);
234 local_flush_tlb_page(addr
);
237 static inline pte_t
*__init
get_pte_virt_early(phys_addr_t pa
)
239 return (pte_t
*)((uintptr_t)pa
);
242 static inline pte_t
*__init
get_pte_virt_fixmap(phys_addr_t pa
)
244 clear_fixmap(FIX_PTE
);
245 return (pte_t
*)set_fixmap_offset(FIX_PTE
, pa
);
248 static inline pte_t
*get_pte_virt_late(phys_addr_t pa
)
250 return (pte_t
*) __va(pa
);
253 static inline phys_addr_t __init
alloc_pte_early(uintptr_t va
)
256 * We only create PMD or PGD early mappings so we
257 * should never reach here with MMU disabled.
262 static inline phys_addr_t __init
alloc_pte_fixmap(uintptr_t va
)
264 return memblock_phys_alloc(PAGE_SIZE
, PAGE_SIZE
);
267 static phys_addr_t
alloc_pte_late(uintptr_t va
)
271 vaddr
= __get_free_page(GFP_KERNEL
);
272 if (!vaddr
|| !pgtable_pte_page_ctor(virt_to_page(vaddr
)))
277 static void __init
create_pte_mapping(pte_t
*ptep
,
278 uintptr_t va
, phys_addr_t pa
,
279 phys_addr_t sz
, pgprot_t prot
)
281 uintptr_t pte_idx
= pte_index(va
);
283 BUG_ON(sz
!= PAGE_SIZE
);
285 if (pte_none(ptep
[pte_idx
]))
286 ptep
[pte_idx
] = pfn_pte(PFN_DOWN(pa
), prot
);
289 #ifndef __PAGETABLE_PMD_FOLDED
291 pmd_t trampoline_pmd
[PTRS_PER_PMD
] __page_aligned_bss
;
292 pmd_t fixmap_pmd
[PTRS_PER_PMD
] __page_aligned_bss
;
294 #if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
295 #define NUM_EARLY_PMDS 1UL
297 #define NUM_EARLY_PMDS (1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
299 pmd_t early_pmd
[PTRS_PER_PMD
* NUM_EARLY_PMDS
] __initdata
__aligned(PAGE_SIZE
);
300 pmd_t early_dtb_pmd
[PTRS_PER_PMD
] __initdata
__aligned(PAGE_SIZE
);
302 static pmd_t
*__init
get_pmd_virt_early(phys_addr_t pa
)
304 /* Before MMU is enabled */
305 return (pmd_t
*)((uintptr_t)pa
);
308 static pmd_t
*__init
get_pmd_virt_fixmap(phys_addr_t pa
)
310 clear_fixmap(FIX_PMD
);
311 return (pmd_t
*)set_fixmap_offset(FIX_PMD
, pa
);
314 static pmd_t
*get_pmd_virt_late(phys_addr_t pa
)
316 return (pmd_t
*) __va(pa
);
319 static phys_addr_t __init
alloc_pmd_early(uintptr_t va
)
323 pmd_num
= (va
- PAGE_OFFSET
) >> PGDIR_SHIFT
;
324 BUG_ON(pmd_num
>= NUM_EARLY_PMDS
);
325 return (uintptr_t)&early_pmd
[pmd_num
* PTRS_PER_PMD
];
328 static phys_addr_t __init
alloc_pmd_fixmap(uintptr_t va
)
330 return memblock_phys_alloc(PAGE_SIZE
, PAGE_SIZE
);
333 static phys_addr_t
alloc_pmd_late(uintptr_t va
)
337 vaddr
= __get_free_page(GFP_KERNEL
);
342 static void __init
create_pmd_mapping(pmd_t
*pmdp
,
343 uintptr_t va
, phys_addr_t pa
,
344 phys_addr_t sz
, pgprot_t prot
)
347 phys_addr_t pte_phys
;
348 uintptr_t pmd_idx
= pmd_index(va
);
350 if (sz
== PMD_SIZE
) {
351 if (pmd_none(pmdp
[pmd_idx
]))
352 pmdp
[pmd_idx
] = pfn_pmd(PFN_DOWN(pa
), prot
);
356 if (pmd_none(pmdp
[pmd_idx
])) {
357 pte_phys
= pt_ops
.alloc_pte(va
);
358 pmdp
[pmd_idx
] = pfn_pmd(PFN_DOWN(pte_phys
), PAGE_TABLE
);
359 ptep
= pt_ops
.get_pte_virt(pte_phys
);
360 memset(ptep
, 0, PAGE_SIZE
);
362 pte_phys
= PFN_PHYS(_pmd_pfn(pmdp
[pmd_idx
]));
363 ptep
= pt_ops
.get_pte_virt(pte_phys
);
366 create_pte_mapping(ptep
, va
, pa
, sz
, prot
);
369 #define pgd_next_t pmd_t
370 #define alloc_pgd_next(__va) pt_ops.alloc_pmd(__va)
371 #define get_pgd_next_virt(__pa) pt_ops.get_pmd_virt(__pa)
372 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
373 create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
374 #define fixmap_pgd_next fixmap_pmd
376 #define pgd_next_t pte_t
377 #define alloc_pgd_next(__va) pt_ops.alloc_pte(__va)
378 #define get_pgd_next_virt(__pa) pt_ops.get_pte_virt(__pa)
379 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
380 create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
381 #define fixmap_pgd_next fixmap_pte
384 void __init
create_pgd_mapping(pgd_t
*pgdp
,
385 uintptr_t va
, phys_addr_t pa
,
386 phys_addr_t sz
, pgprot_t prot
)
389 phys_addr_t next_phys
;
390 uintptr_t pgd_idx
= pgd_index(va
);
392 if (sz
== PGDIR_SIZE
) {
393 if (pgd_val(pgdp
[pgd_idx
]) == 0)
394 pgdp
[pgd_idx
] = pfn_pgd(PFN_DOWN(pa
), prot
);
398 if (pgd_val(pgdp
[pgd_idx
]) == 0) {
399 next_phys
= alloc_pgd_next(va
);
400 pgdp
[pgd_idx
] = pfn_pgd(PFN_DOWN(next_phys
), PAGE_TABLE
);
401 nextp
= get_pgd_next_virt(next_phys
);
402 memset(nextp
, 0, PAGE_SIZE
);
404 next_phys
= PFN_PHYS(_pgd_pfn(pgdp
[pgd_idx
]));
405 nextp
= get_pgd_next_virt(next_phys
);
408 create_pgd_next_mapping(nextp
, va
, pa
, sz
, prot
);
411 static uintptr_t __init
best_map_size(phys_addr_t base
, phys_addr_t size
)
413 /* Upgrade to PMD_SIZE mappings whenever possible */
414 if ((base
& (PMD_SIZE
- 1)) || (size
& (PMD_SIZE
- 1)))
421 * setup_vm() is called from head.S with MMU-off.
423 * Following requirements should be honoured for setup_vm() to work
425 * 1) It should use PC-relative addressing for accessing kernel symbols.
426 * To achieve this we always use GCC cmodel=medany.
427 * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
428 * so disable compiler instrumentation when FTRACE is enabled.
430 * Currently, the above requirements are honoured by using custom CFLAGS
431 * for init.o in mm/Makefile.
434 #ifndef __riscv_cmodel_medany
435 #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
438 asmlinkage
void __init
setup_vm(uintptr_t dtb_pa
)
440 uintptr_t va
, pa
, end_va
;
441 uintptr_t load_pa
= (uintptr_t)(&_start
);
442 uintptr_t load_sz
= (uintptr_t)(&_end
) - load_pa
;
443 uintptr_t map_size
= best_map_size(load_pa
, MAX_EARLY_MAPPING_SIZE
);
444 #ifndef __PAGETABLE_PMD_FOLDED
445 pmd_t fix_bmap_spmd
, fix_bmap_epmd
;
448 va_pa_offset
= PAGE_OFFSET
- load_pa
;
449 pfn_base
= PFN_DOWN(load_pa
);
452 * Enforce boot alignment requirements of RV32 and
453 * RV64 by only allowing PMD or PGD mappings.
455 BUG_ON(map_size
== PAGE_SIZE
);
457 /* Sanity check alignment and size */
458 BUG_ON((PAGE_OFFSET
% PGDIR_SIZE
) != 0);
459 BUG_ON((load_pa
% map_size
) != 0);
460 BUG_ON(load_sz
> MAX_EARLY_MAPPING_SIZE
);
462 pt_ops
.alloc_pte
= alloc_pte_early
;
463 pt_ops
.get_pte_virt
= get_pte_virt_early
;
464 #ifndef __PAGETABLE_PMD_FOLDED
465 pt_ops
.alloc_pmd
= alloc_pmd_early
;
466 pt_ops
.get_pmd_virt
= get_pmd_virt_early
;
468 /* Setup early PGD for fixmap */
469 create_pgd_mapping(early_pg_dir
, FIXADDR_START
,
470 (uintptr_t)fixmap_pgd_next
, PGDIR_SIZE
, PAGE_TABLE
);
472 #ifndef __PAGETABLE_PMD_FOLDED
473 /* Setup fixmap PMD */
474 create_pmd_mapping(fixmap_pmd
, FIXADDR_START
,
475 (uintptr_t)fixmap_pte
, PMD_SIZE
, PAGE_TABLE
);
476 /* Setup trampoline PGD and PMD */
477 create_pgd_mapping(trampoline_pg_dir
, PAGE_OFFSET
,
478 (uintptr_t)trampoline_pmd
, PGDIR_SIZE
, PAGE_TABLE
);
479 create_pmd_mapping(trampoline_pmd
, PAGE_OFFSET
,
480 load_pa
, PMD_SIZE
, PAGE_KERNEL_EXEC
);
482 /* Setup trampoline PGD */
483 create_pgd_mapping(trampoline_pg_dir
, PAGE_OFFSET
,
484 load_pa
, PGDIR_SIZE
, PAGE_KERNEL_EXEC
);
488 * Setup early PGD covering entire kernel which will allows
489 * us to reach paging_init(). We map all memory banks later
490 * in setup_vm_final() below.
492 end_va
= PAGE_OFFSET
+ load_sz
;
493 for (va
= PAGE_OFFSET
; va
< end_va
; va
+= map_size
)
494 create_pgd_mapping(early_pg_dir
, va
,
495 load_pa
+ (va
- PAGE_OFFSET
),
496 map_size
, PAGE_KERNEL_EXEC
);
498 #ifndef __PAGETABLE_PMD_FOLDED
499 /* Setup early PMD for DTB */
500 create_pgd_mapping(early_pg_dir
, DTB_EARLY_BASE_VA
,
501 (uintptr_t)early_dtb_pmd
, PGDIR_SIZE
, PAGE_TABLE
);
502 /* Create two consecutive PMD mappings for FDT early scan */
503 pa
= dtb_pa
& ~(PMD_SIZE
- 1);
504 create_pmd_mapping(early_dtb_pmd
, DTB_EARLY_BASE_VA
,
505 pa
, PMD_SIZE
, PAGE_KERNEL
);
506 create_pmd_mapping(early_dtb_pmd
, DTB_EARLY_BASE_VA
+ PMD_SIZE
,
507 pa
+ PMD_SIZE
, PMD_SIZE
, PAGE_KERNEL
);
508 dtb_early_va
= (void *)DTB_EARLY_BASE_VA
+ (dtb_pa
& (PMD_SIZE
- 1));
510 /* Create two consecutive PGD mappings for FDT early scan */
511 pa
= dtb_pa
& ~(PGDIR_SIZE
- 1);
512 create_pgd_mapping(early_pg_dir
, DTB_EARLY_BASE_VA
,
513 pa
, PGDIR_SIZE
, PAGE_KERNEL
);
514 create_pgd_mapping(early_pg_dir
, DTB_EARLY_BASE_VA
+ PGDIR_SIZE
,
515 pa
+ PGDIR_SIZE
, PGDIR_SIZE
, PAGE_KERNEL
);
516 dtb_early_va
= (void *)DTB_EARLY_BASE_VA
+ (dtb_pa
& (PGDIR_SIZE
- 1));
518 dtb_early_pa
= dtb_pa
;
521 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
522 * range can not span multiple pmds.
524 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN
) >> PMD_SHIFT
)
525 != (__fix_to_virt(FIX_BTMAP_END
) >> PMD_SHIFT
));
527 #ifndef __PAGETABLE_PMD_FOLDED
529 * Early ioremap fixmap is already created as it lies within first 2MB
530 * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END
531 * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn
534 fix_bmap_spmd
= fixmap_pmd
[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN
))];
535 fix_bmap_epmd
= fixmap_pmd
[pmd_index(__fix_to_virt(FIX_BTMAP_END
))];
536 if (pmd_val(fix_bmap_spmd
) != pmd_val(fix_bmap_epmd
)) {
538 pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n",
539 pmd_val(fix_bmap_spmd
), pmd_val(fix_bmap_epmd
));
540 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
541 fix_to_virt(FIX_BTMAP_BEGIN
));
542 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
543 fix_to_virt(FIX_BTMAP_END
));
545 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END
);
546 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN
);
551 static void __init
setup_vm_final(void)
553 uintptr_t va
, map_size
;
554 phys_addr_t pa
, start
, end
;
558 * MMU is enabled at this point. But page table setup is not complete yet.
559 * fixmap page table alloc functions should be used at this point
561 pt_ops
.alloc_pte
= alloc_pte_fixmap
;
562 pt_ops
.get_pte_virt
= get_pte_virt_fixmap
;
563 #ifndef __PAGETABLE_PMD_FOLDED
564 pt_ops
.alloc_pmd
= alloc_pmd_fixmap
;
565 pt_ops
.get_pmd_virt
= get_pmd_virt_fixmap
;
567 /* Setup swapper PGD for fixmap */
568 create_pgd_mapping(swapper_pg_dir
, FIXADDR_START
,
569 __pa_symbol(fixmap_pgd_next
),
570 PGDIR_SIZE
, PAGE_TABLE
);
572 /* Map all memory banks */
573 for_each_mem_range(i
, &start
, &end
) {
576 if (start
<= __pa(PAGE_OFFSET
) &&
577 __pa(PAGE_OFFSET
) < end
)
578 start
= __pa(PAGE_OFFSET
);
580 map_size
= best_map_size(start
, end
- start
);
581 for (pa
= start
; pa
< end
; pa
+= map_size
) {
582 va
= (uintptr_t)__va(pa
);
583 create_pgd_mapping(swapper_pg_dir
, va
, pa
,
584 map_size
, PAGE_KERNEL_EXEC
);
588 /* Clear fixmap PTE and PMD mappings */
589 clear_fixmap(FIX_PTE
);
590 clear_fixmap(FIX_PMD
);
592 /* Move to swapper page table */
593 csr_write(CSR_SATP
, PFN_DOWN(__pa_symbol(swapper_pg_dir
)) | SATP_MODE
);
594 local_flush_tlb_all();
596 /* generic page allocation functions must be used to setup page table */
597 pt_ops
.alloc_pte
= alloc_pte_late
;
598 pt_ops
.get_pte_virt
= get_pte_virt_late
;
599 #ifndef __PAGETABLE_PMD_FOLDED
600 pt_ops
.alloc_pmd
= alloc_pmd_late
;
601 pt_ops
.get_pmd_virt
= get_pmd_virt_late
;
605 asmlinkage
void __init
setup_vm(uintptr_t dtb_pa
)
607 #ifdef CONFIG_BUILTIN_DTB
608 dtb_early_va
= soc_lookup_builtin_dtb();
610 /* Fallback to first available DTS */
611 dtb_early_va
= (void *) __dtb_start
;
614 dtb_early_va
= (void *)dtb_pa
;
616 dtb_early_pa
= dtb_pa
;
619 static inline void setup_vm_final(void)
622 #endif /* CONFIG_MMU */
624 #ifdef CONFIG_STRICT_KERNEL_RWX
625 void protect_kernel_text_data(void)
627 unsigned long text_start
= (unsigned long)_start
;
628 unsigned long init_text_start
= (unsigned long)__init_text_begin
;
629 unsigned long init_data_start
= (unsigned long)__init_data_begin
;
630 unsigned long rodata_start
= (unsigned long)__start_rodata
;
631 unsigned long data_start
= (unsigned long)_data
;
632 unsigned long max_low
= (unsigned long)(__va(PFN_PHYS(max_low_pfn
)));
634 set_memory_ro(text_start
, (init_text_start
- text_start
) >> PAGE_SHIFT
);
635 set_memory_ro(init_text_start
, (init_data_start
- init_text_start
) >> PAGE_SHIFT
);
636 set_memory_nx(init_data_start
, (rodata_start
- init_data_start
) >> PAGE_SHIFT
);
637 /* rodata section is marked readonly in mark_rodata_ro */
638 set_memory_nx(rodata_start
, (data_start
- rodata_start
) >> PAGE_SHIFT
);
639 set_memory_nx(data_start
, (max_low
- data_start
) >> PAGE_SHIFT
);
642 void mark_rodata_ro(void)
644 unsigned long rodata_start
= (unsigned long)__start_rodata
;
645 unsigned long data_start
= (unsigned long)_data
;
647 set_memory_ro(rodata_start
, (data_start
- rodata_start
) >> PAGE_SHIFT
);
653 void __init
paging_init(void)
661 #ifdef CONFIG_SPARSEMEM_VMEMMAP
662 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
,
663 struct vmem_altmap
*altmap
)
665 return vmemmap_populate_basepages(start
, end
, node
, NULL
);