arm64: dts: Revert "specify console via command line"
[linux/fpc-iii.git] / arch / powerpc / mm / pgtable_32.c
blob5fb90edd865e67f6070e5063c93af3267eda9138
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * This file contains the routines setting up the linux page tables.
4 * -- paulus
6 * Derived from arch/ppc/mm/init.c:
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
9 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
10 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Paul Mackerras
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/mm.h>
21 #include <linux/vmalloc.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/memblock.h>
25 #include <linux/slab.h>
27 #include <asm/pgtable.h>
28 #include <asm/pgalloc.h>
29 #include <asm/fixmap.h>
30 #include <asm/setup.h>
31 #include <asm/sections.h>
33 #include <mm/mmu_decl.h>
35 extern char etext[], _stext[], _sinittext[], _einittext[];
37 static void __init *early_alloc_pgtable(unsigned long size)
39 void *ptr = memblock_alloc(size, size);
41 if (!ptr)
42 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
43 __func__, size, size);
45 return ptr;
48 static pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
50 if (pmd_none(*pmdp)) {
51 pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
53 pmd_populate_kernel(&init_mm, pmdp, ptep);
55 return pte_offset_kernel(pmdp, va);
59 int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
61 pmd_t *pd;
62 pte_t *pg;
63 int err = -ENOMEM;
65 /* Use upper 10 bits of VA to index the first level map */
66 pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
67 /* Use middle 10 bits of VA to index the second-level map */
68 if (likely(slab_is_available()))
69 pg = pte_alloc_kernel(pd, va);
70 else
71 pg = early_pte_alloc_kernel(pd, va);
72 if (pg != 0) {
73 err = 0;
74 /* The PTE should never be already set nor present in the
75 * hash table
77 BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
78 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
80 smp_wmb();
81 return err;
85 * Map in a chunk of physical memory starting at start.
87 static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
89 unsigned long v, s;
90 phys_addr_t p;
91 int ktext;
93 s = offset;
94 v = PAGE_OFFSET + s;
95 p = memstart_addr + s;
96 for (; s < top; s += PAGE_SIZE) {
97 ktext = ((char *)v >= _stext && (char *)v < etext) ||
98 ((char *)v >= _sinittext && (char *)v < _einittext);
99 map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
100 #ifdef CONFIG_PPC_BOOK3S_32
101 if (ktext)
102 hash_preload(&init_mm, v);
103 #endif
104 v += PAGE_SIZE;
105 p += PAGE_SIZE;
109 void __init mapin_ram(void)
111 struct memblock_region *reg;
113 for_each_memblock(memory, reg) {
114 phys_addr_t base = reg->base;
115 phys_addr_t top = min(base + reg->size, total_lowmem);
117 if (base >= top)
118 continue;
119 base = mmu_mapin_ram(base, top);
120 __mapin_ram_chunk(base, top);
124 /* Scan the real Linux page tables and return a PTE pointer for
125 * a virtual address in a context.
126 * Returns true (1) if PTE was found, zero otherwise. The pointer to
127 * the PTE pointer is unmodified if PTE is not found.
129 static int
130 get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
132 pgd_t *pgd;
133 pud_t *pud;
134 pmd_t *pmd;
135 pte_t *pte;
136 int retval = 0;
138 pgd = pgd_offset(mm, addr & PAGE_MASK);
139 if (pgd) {
140 pud = pud_offset(pgd, addr & PAGE_MASK);
141 if (pud && pud_present(*pud)) {
142 pmd = pmd_offset(pud, addr & PAGE_MASK);
143 if (pmd_present(*pmd)) {
144 pte = pte_offset_map(pmd, addr & PAGE_MASK);
145 if (pte) {
146 retval = 1;
147 *ptep = pte;
148 if (pmdp)
149 *pmdp = pmd;
150 /* XXX caller needs to do pte_unmap, yuck */
155 return(retval);
158 static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
160 pte_t *kpte;
161 pmd_t *kpmd;
162 unsigned long address;
164 BUG_ON(PageHighMem(page));
165 address = (unsigned long)page_address(page);
167 if (v_block_mapped(address))
168 return 0;
169 if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
170 return -EINVAL;
171 __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
172 pte_unmap(kpte);
174 return 0;
178 * Change the page attributes of an page in the linear mapping.
180 * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
182 static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
184 int i, err = 0;
185 unsigned long flags;
186 struct page *start = page;
188 local_irq_save(flags);
189 for (i = 0; i < numpages; i++, page++) {
190 err = __change_page_attr_noflush(page, prot);
191 if (err)
192 break;
194 wmb();
195 local_irq_restore(flags);
196 flush_tlb_kernel_range((unsigned long)page_address(start),
197 (unsigned long)page_address(page));
198 return err;
201 void mark_initmem_nx(void)
203 struct page *page = virt_to_page(_sinittext);
204 unsigned long numpages = PFN_UP((unsigned long)_einittext) -
205 PFN_DOWN((unsigned long)_sinittext);
207 if (v_block_mapped((unsigned long)_stext + 1))
208 mmu_mark_initmem_nx();
209 else
210 change_page_attr(page, numpages, PAGE_KERNEL);
213 #ifdef CONFIG_STRICT_KERNEL_RWX
214 void mark_rodata_ro(void)
216 struct page *page;
217 unsigned long numpages;
219 if (v_block_mapped((unsigned long)_sinittext)) {
220 mmu_mark_rodata_ro();
221 ptdump_check_wx();
222 return;
225 page = virt_to_page(_stext);
226 numpages = PFN_UP((unsigned long)_etext) -
227 PFN_DOWN((unsigned long)_stext);
229 change_page_attr(page, numpages, PAGE_KERNEL_ROX);
231 * mark .rodata as read only. Use __init_begin rather than __end_rodata
232 * to cover NOTES and EXCEPTION_TABLE.
234 page = virt_to_page(__start_rodata);
235 numpages = PFN_UP((unsigned long)__init_begin) -
236 PFN_DOWN((unsigned long)__start_rodata);
238 change_page_attr(page, numpages, PAGE_KERNEL_RO);
240 // mark_initmem_nx() should have already run by now
241 ptdump_check_wx();
243 #endif
245 #ifdef CONFIG_DEBUG_PAGEALLOC
246 void __kernel_map_pages(struct page *page, int numpages, int enable)
248 if (PageHighMem(page))
249 return;
251 change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
253 #endif /* CONFIG_DEBUG_PAGEALLOC */