1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Linux architectural port borrowing liberally from similar works of
6 * others. All original copyrights apply as per the original source
9 * Modifications for the OpenRISC architecture:
10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/ptrace.h>
21 #include <linux/mman.h>
23 #include <linux/swap.h>
24 #include <linux/smp.h>
25 #include <linux/memblock.h>
26 #include <linux/init.h>
27 #include <linux/delay.h>
28 #include <linux/pagemap.h>
30 #include <asm/pgalloc.h>
34 #include <asm/mmu_context.h>
35 #include <asm/fixmap.h>
36 #include <asm/tlbflush.h>
37 #include <asm/sections.h>
41 static void __init
zone_sizes_init(void)
43 unsigned long max_zone_pfn
[MAX_NR_ZONES
] = { 0 };
46 * We use only ZONE_NORMAL
48 max_zone_pfn
[ZONE_NORMAL
] = max_low_pfn
;
50 free_area_init(max_zone_pfn
);
53 extern const char _s_kernel_ro
[], _e_kernel_ro
[];
56 * Map all physical memory into kernel's address space.
58 * This is explicitly coded for two-level page tables, so if you need
59 * something else then this needs to change.
61 static void __init
map_ram(void)
63 phys_addr_t start
, end
;
64 unsigned long v
, p
, e
;
72 /* These mark extents of read-only kernel pages...
73 * ...from vmlinux.lds.S
78 for_each_mem_range(i
, &start
, &end
) {
79 p
= (u32
) start
& PAGE_MASK
;
83 pge
= pgd_offset_k(v
);
87 p4e
= p4d_offset(pge
, v
);
88 pue
= pud_offset(p4e
, v
);
89 pme
= pmd_offset(pue
, v
);
91 if ((u32
) pue
!= (u32
) pge
|| (u32
) pme
!= (u32
) pge
) {
92 panic("%s: OR1K kernel hardcoded for "
93 "two-level page tables",
97 /* Alloc one page for holding PTE's... */
98 pte
= memblock_alloc_raw(PAGE_SIZE
, PAGE_SIZE
);
100 panic("%s: Failed to allocate page for PTEs\n",
102 set_pmd(pme
, __pmd(_KERNPG_TABLE
+ __pa(pte
)));
104 /* Fill the newly allocated page with PTE'S */
105 for (j
= 0; p
< e
&& j
< PTRS_PER_PTE
;
106 v
+= PAGE_SIZE
, p
+= PAGE_SIZE
, j
++, pte
++) {
107 if (v
>= (u32
) _e_kernel_ro
||
108 v
< (u32
) _s_kernel_ro
)
111 prot
= PAGE_KERNEL_RO
;
113 set_pte(pte
, mk_pte_phys(p
, prot
));
119 printk(KERN_INFO
"%s: Memory: 0x%x-0x%x\n", __func__
,
124 void __init
paging_init(void)
128 printk(KERN_INFO
"Setting up paging and PTEs.\n");
130 /* clear out the init_mm.pgd that will contain the kernel's mappings */
132 for (i
= 0; i
< PTRS_PER_PGD
; i
++)
133 swapper_pg_dir
[i
] = __pgd(0);
135 /* make sure the current pgd table points to something sane
136 * (even if it is most probably not used until the next
139 current_pgd
[smp_processor_id()] = init_mm
.pgd
;
145 /* self modifying code ;) */
146 /* Since the old TLB miss handler has been running up until now,
147 * the kernel pages are still all RW, so we can still modify the
148 * text directly... after this change and a TLB flush, the kernel
149 * pages will become RO.
152 extern unsigned long dtlb_miss_handler
;
153 extern unsigned long itlb_miss_handler
;
155 unsigned long *dtlb_vector
= __va(0x900);
156 unsigned long *itlb_vector
= __va(0xa00);
158 printk(KERN_INFO
"itlb_miss_handler %p\n", &itlb_miss_handler
);
159 *itlb_vector
= ((unsigned long)&itlb_miss_handler
-
160 (unsigned long)itlb_vector
) >> 2;
162 /* Soft ordering constraint to ensure that dtlb_vector is
163 * the last thing updated
167 printk(KERN_INFO
"dtlb_miss_handler %p\n", &dtlb_miss_handler
);
168 *dtlb_vector
= ((unsigned long)&dtlb_miss_handler
-
169 (unsigned long)dtlb_vector
) >> 2;
173 /* Soft ordering constraint to ensure that cache invalidation and
174 * TLB flush really happen _after_ code has been modified.
178 /* Invalidate instruction caches after code modification */
179 mtspr(SPR_ICBIR
, 0x900);
180 mtspr(SPR_ICBIR
, 0xa00);
182 /* New TLB miss handlers and kernel page tables are in now place.
183 * Make sure that page flags get updated for all pages in TLB by
184 * flushing the TLB and forcing all TLB entries to be recreated
185 * from their page table flags.
190 /* References to section boundaries */
192 void __init
mem_init(void)
196 max_mapnr
= max_low_pfn
;
197 high_memory
= (void *)__va(max_low_pfn
* PAGE_SIZE
);
199 /* clear the zero-page */
200 memset((void *)empty_zero_page
, 0, PAGE_SIZE
);
202 /* this will put all low memory onto the freelists */
205 printk("mem_init_done ...........................................\n");
210 static int __init
map_page(unsigned long va
, phys_addr_t pa
, pgprot_t prot
)
217 p4d
= p4d_offset(pgd_offset_k(va
), va
);
218 pud
= pud_offset(p4d
, va
);
219 pmd
= pmd_offset(pud
, va
);
220 pte
= pte_alloc_kernel(pmd
, va
);
225 if (pgprot_val(prot
))
226 set_pte_at(&init_mm
, va
, pte
, pfn_pte(pa
>> PAGE_SHIFT
, prot
));
228 pte_clear(&init_mm
, va
, pte
);
230 local_flush_tlb_page(NULL
, va
);
234 void __init
__set_fixmap(enum fixed_addresses idx
,
235 phys_addr_t phys
, pgprot_t prot
)
237 unsigned long address
= __fix_to_virt(idx
);
239 if (idx
>= __end_of_fixed_addresses
) {
244 map_page(address
, phys
, prot
);
247 static const pgprot_t protection_map
[16] = {
248 [VM_NONE
] = PAGE_NONE
,
249 [VM_READ
] = PAGE_READONLY_X
,
250 [VM_WRITE
] = PAGE_COPY
,
251 [VM_WRITE
| VM_READ
] = PAGE_COPY_X
,
252 [VM_EXEC
] = PAGE_READONLY
,
253 [VM_EXEC
| VM_READ
] = PAGE_READONLY_X
,
254 [VM_EXEC
| VM_WRITE
] = PAGE_COPY
,
255 [VM_EXEC
| VM_WRITE
| VM_READ
] = PAGE_COPY_X
,
256 [VM_SHARED
] = PAGE_NONE
,
257 [VM_SHARED
| VM_READ
] = PAGE_READONLY_X
,
258 [VM_SHARED
| VM_WRITE
] = PAGE_SHARED
,
259 [VM_SHARED
| VM_WRITE
| VM_READ
] = PAGE_SHARED_X
,
260 [VM_SHARED
| VM_EXEC
] = PAGE_READONLY
,
261 [VM_SHARED
| VM_EXEC
| VM_READ
] = PAGE_READONLY_X
,
262 [VM_SHARED
| VM_EXEC
| VM_WRITE
] = PAGE_SHARED
,
263 [VM_SHARED
| VM_EXEC
| VM_WRITE
| VM_READ
] = PAGE_SHARED_X
265 DECLARE_VM_GET_PAGE_PROT