Linux 6.13-rc7
[linux.git] / arch / openrisc / mm / init.c
blobd0cb1a0126f95d05c9113ff08a7f5536b4432801
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * OpenRISC idle.c
5 * Linux architectural port borrowing liberally from similar works of
6 * others. All original copyrights apply as per the original source
7 * declaration.
9 * Modifications for the OpenRISC architecture:
10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/ptrace.h>
21 #include <linux/mman.h>
22 #include <linux/mm.h>
23 #include <linux/swap.h>
24 #include <linux/smp.h>
25 #include <linux/memblock.h>
26 #include <linux/init.h>
27 #include <linux/delay.h>
28 #include <linux/pagemap.h>
30 #include <asm/pgalloc.h>
31 #include <asm/dma.h>
32 #include <asm/io.h>
33 #include <asm/tlb.h>
34 #include <asm/mmu_context.h>
35 #include <asm/fixmap.h>
36 #include <asm/tlbflush.h>
37 #include <asm/sections.h>
39 int mem_init_done;
41 static void __init zone_sizes_init(void)
43 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
46 * We use only ZONE_NORMAL
48 max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
50 free_area_init(max_zone_pfn);
53 extern const char _s_kernel_ro[], _e_kernel_ro[];
56 * Map all physical memory into kernel's address space.
58 * This is explicitly coded for two-level page tables, so if you need
59 * something else then this needs to change.
61 static void __init map_ram(void)
63 phys_addr_t start, end;
64 unsigned long v, p, e;
65 pgprot_t prot;
66 pgd_t *pge;
67 p4d_t *p4e;
68 pud_t *pue;
69 pmd_t *pme;
70 pte_t *pte;
71 u64 i;
72 /* These mark extents of read-only kernel pages...
73 * ...from vmlinux.lds.S
76 v = PAGE_OFFSET;
78 for_each_mem_range(i, &start, &end) {
79 p = (u32) start & PAGE_MASK;
80 e = (u32) end;
82 v = (u32) __va(p);
83 pge = pgd_offset_k(v);
85 while (p < e) {
86 int j;
87 p4e = p4d_offset(pge, v);
88 pue = pud_offset(p4e, v);
89 pme = pmd_offset(pue, v);
91 if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
92 panic("%s: OR1K kernel hardcoded for "
93 "two-level page tables",
94 __func__);
97 /* Alloc one page for holding PTE's... */
98 pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
99 if (!pte)
100 panic("%s: Failed to allocate page for PTEs\n",
101 __func__);
102 set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
104 /* Fill the newly allocated page with PTE'S */
105 for (j = 0; p < e && j < PTRS_PER_PTE;
106 v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
107 if (v >= (u32) _e_kernel_ro ||
108 v < (u32) _s_kernel_ro)
109 prot = PAGE_KERNEL;
110 else
111 prot = PAGE_KERNEL_RO;
113 set_pte(pte, mk_pte_phys(p, prot));
116 pge++;
119 printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
120 start, end);
124 void __init paging_init(void)
126 int i;
128 printk(KERN_INFO "Setting up paging and PTEs.\n");
130 /* clear out the init_mm.pgd that will contain the kernel's mappings */
132 for (i = 0; i < PTRS_PER_PGD; i++)
133 swapper_pg_dir[i] = __pgd(0);
135 /* make sure the current pgd table points to something sane
136 * (even if it is most probably not used until the next
137 * switch_mm)
139 current_pgd[smp_processor_id()] = init_mm.pgd;
141 map_ram();
143 zone_sizes_init();
145 /* self modifying code ;) */
146 /* Since the old TLB miss handler has been running up until now,
147 * the kernel pages are still all RW, so we can still modify the
148 * text directly... after this change and a TLB flush, the kernel
149 * pages will become RO.
152 extern unsigned long dtlb_miss_handler;
153 extern unsigned long itlb_miss_handler;
155 unsigned long *dtlb_vector = __va(0x900);
156 unsigned long *itlb_vector = __va(0xa00);
158 printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
159 *itlb_vector = ((unsigned long)&itlb_miss_handler -
160 (unsigned long)itlb_vector) >> 2;
162 /* Soft ordering constraint to ensure that dtlb_vector is
163 * the last thing updated
165 barrier();
167 printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
168 *dtlb_vector = ((unsigned long)&dtlb_miss_handler -
169 (unsigned long)dtlb_vector) >> 2;
173 /* Soft ordering constraint to ensure that cache invalidation and
174 * TLB flush really happen _after_ code has been modified.
176 barrier();
178 /* Invalidate instruction caches after code modification */
179 mtspr(SPR_ICBIR, 0x900);
180 mtspr(SPR_ICBIR, 0xa00);
182 /* New TLB miss handlers and kernel page tables are in now place.
183 * Make sure that page flags get updated for all pages in TLB by
184 * flushing the TLB and forcing all TLB entries to be recreated
185 * from their page table flags.
187 flush_tlb_all();
190 /* References to section boundaries */
192 void __init mem_init(void)
194 BUG_ON(!mem_map);
196 max_mapnr = max_low_pfn;
197 high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
199 /* clear the zero-page */
200 memset((void *)empty_zero_page, 0, PAGE_SIZE);
202 /* this will put all low memory onto the freelists */
203 memblock_free_all();
205 printk("mem_init_done ...........................................\n");
206 mem_init_done = 1;
207 return;
210 static int __init map_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
212 p4d_t *p4d;
213 pud_t *pud;
214 pmd_t *pmd;
215 pte_t *pte;
217 p4d = p4d_offset(pgd_offset_k(va), va);
218 pud = pud_offset(p4d, va);
219 pmd = pmd_offset(pud, va);
220 pte = pte_alloc_kernel(pmd, va);
222 if (pte == NULL)
223 return -ENOMEM;
225 if (pgprot_val(prot))
226 set_pte_at(&init_mm, va, pte, pfn_pte(pa >> PAGE_SHIFT, prot));
227 else
228 pte_clear(&init_mm, va, pte);
230 local_flush_tlb_page(NULL, va);
231 return 0;
234 void __init __set_fixmap(enum fixed_addresses idx,
235 phys_addr_t phys, pgprot_t prot)
237 unsigned long address = __fix_to_virt(idx);
239 if (idx >= __end_of_fixed_addresses) {
240 BUG();
241 return;
244 map_page(address, phys, prot);
247 static const pgprot_t protection_map[16] = {
248 [VM_NONE] = PAGE_NONE,
249 [VM_READ] = PAGE_READONLY_X,
250 [VM_WRITE] = PAGE_COPY,
251 [VM_WRITE | VM_READ] = PAGE_COPY_X,
252 [VM_EXEC] = PAGE_READONLY,
253 [VM_EXEC | VM_READ] = PAGE_READONLY_X,
254 [VM_EXEC | VM_WRITE] = PAGE_COPY,
255 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
256 [VM_SHARED] = PAGE_NONE,
257 [VM_SHARED | VM_READ] = PAGE_READONLY_X,
258 [VM_SHARED | VM_WRITE] = PAGE_SHARED,
259 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X,
260 [VM_SHARED | VM_EXEC] = PAGE_READONLY,
261 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
262 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
263 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
265 DECLARE_VM_GET_PAGE_PROT