arm64: dts: Revert "specify console via command line"
[linux/fpc-iii.git] / arch / openrisc / mm / init.c
blob1f87b524db78cb3d52193b93f907612e5abda0ef
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * OpenRISC idle.c
5 * Linux architectural port borrowing liberally from similar works of
6 * others. All original copyrights apply as per the original source
7 * declaration.
9 * Modifications for the OpenRISC architecture:
10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/ptrace.h>
21 #include <linux/mman.h>
22 #include <linux/mm.h>
23 #include <linux/swap.h>
24 #include <linux/smp.h>
25 #include <linux/memblock.h>
26 #include <linux/init.h>
27 #include <linux/delay.h>
28 #include <linux/blkdev.h> /* for initrd_* */
29 #include <linux/pagemap.h>
31 #include <asm/pgalloc.h>
32 #include <asm/pgtable.h>
33 #include <asm/dma.h>
34 #include <asm/io.h>
35 #include <asm/tlb.h>
36 #include <asm/mmu_context.h>
37 #include <asm/kmap_types.h>
38 #include <asm/fixmap.h>
39 #include <asm/tlbflush.h>
40 #include <asm/sections.h>
42 int mem_init_done;
44 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
46 static void __init zone_sizes_init(void)
48 unsigned long zones_size[MAX_NR_ZONES];
50 /* Clear the zone sizes */
51 memset(zones_size, 0, sizeof(zones_size));
54 * We use only ZONE_NORMAL
56 zones_size[ZONE_NORMAL] = max_low_pfn;
58 free_area_init(zones_size);
61 extern const char _s_kernel_ro[], _e_kernel_ro[];
64 * Map all physical memory into kernel's address space.
66 * This is explicitly coded for two-level page tables, so if you need
67 * something else then this needs to change.
69 static void __init map_ram(void)
71 unsigned long v, p, e;
72 pgprot_t prot;
73 pgd_t *pge;
74 pud_t *pue;
75 pmd_t *pme;
76 pte_t *pte;
77 /* These mark extents of read-only kernel pages...
78 * ...from vmlinux.lds.S
80 struct memblock_region *region;
82 v = PAGE_OFFSET;
84 for_each_memblock(memory, region) {
85 p = (u32) region->base & PAGE_MASK;
86 e = p + (u32) region->size;
88 v = (u32) __va(p);
89 pge = pgd_offset_k(v);
91 while (p < e) {
92 int j;
93 pue = pud_offset(pge, v);
94 pme = pmd_offset(pue, v);
96 if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
97 panic("%s: OR1K kernel hardcoded for "
98 "two-level page tables",
99 __func__);
102 /* Alloc one page for holding PTE's... */
103 pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
104 if (!pte)
105 panic("%s: Failed to allocate page for PTEs\n",
106 __func__);
107 set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
109 /* Fill the newly allocated page with PTE'S */
110 for (j = 0; p < e && j < PTRS_PER_PTE;
111 v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
112 if (v >= (u32) _e_kernel_ro ||
113 v < (u32) _s_kernel_ro)
114 prot = PAGE_KERNEL;
115 else
116 prot = PAGE_KERNEL_RO;
118 set_pte(pte, mk_pte_phys(p, prot));
121 pge++;
124 printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
125 region->base, region->base + region->size);
129 void __init paging_init(void)
131 extern void tlb_init(void);
133 unsigned long end;
134 int i;
136 printk(KERN_INFO "Setting up paging and PTEs.\n");
138 /* clear out the init_mm.pgd that will contain the kernel's mappings */
140 for (i = 0; i < PTRS_PER_PGD; i++)
141 swapper_pg_dir[i] = __pgd(0);
143 /* make sure the current pgd table points to something sane
144 * (even if it is most probably not used until the next
145 * switch_mm)
147 current_pgd[smp_processor_id()] = init_mm.pgd;
149 end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
151 map_ram();
153 zone_sizes_init();
155 /* self modifying code ;) */
156 /* Since the old TLB miss handler has been running up until now,
157 * the kernel pages are still all RW, so we can still modify the
158 * text directly... after this change and a TLB flush, the kernel
159 * pages will become RO.
162 extern unsigned long dtlb_miss_handler;
163 extern unsigned long itlb_miss_handler;
165 unsigned long *dtlb_vector = __va(0x900);
166 unsigned long *itlb_vector = __va(0xa00);
168 printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
169 *itlb_vector = ((unsigned long)&itlb_miss_handler -
170 (unsigned long)itlb_vector) >> 2;
172 /* Soft ordering constraint to ensure that dtlb_vector is
173 * the last thing updated
175 barrier();
177 printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
178 *dtlb_vector = ((unsigned long)&dtlb_miss_handler -
179 (unsigned long)dtlb_vector) >> 2;
183 /* Soft ordering constraint to ensure that cache invalidation and
184 * TLB flush really happen _after_ code has been modified.
186 barrier();
188 /* Invalidate instruction caches after code modification */
189 mtspr(SPR_ICBIR, 0x900);
190 mtspr(SPR_ICBIR, 0xa00);
192 /* New TLB miss handlers and kernel page tables are in now place.
193 * Make sure that page flags get updated for all pages in TLB by
194 * flushing the TLB and forcing all TLB entries to be recreated
195 * from their page table flags.
197 flush_tlb_all();
200 /* References to section boundaries */
202 void __init mem_init(void)
204 BUG_ON(!mem_map);
206 max_mapnr = max_low_pfn;
207 high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
209 /* clear the zero-page */
210 memset((void *)empty_zero_page, 0, PAGE_SIZE);
212 /* this will put all low memory onto the freelists */
213 memblock_free_all();
215 mem_init_print_info(NULL);
217 printk("mem_init_done ...........................................\n");
218 mem_init_done = 1;
219 return;