WIP FPC-III support
[linux/fpc-iii.git] / arch / powerpc / mm / pgtable_32.c
blobe0ec67a1688795799bbaa0aa6e05830a93096dde
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * This file contains the routines setting up the linux page tables.
4 * -- paulus
6 * Derived from arch/ppc/mm/init.c:
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
9 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
10 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Paul Mackerras
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/mm.h>
21 #include <linux/vmalloc.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/memblock.h>
25 #include <linux/slab.h>
27 #include <asm/pgalloc.h>
28 #include <asm/fixmap.h>
29 #include <asm/setup.h>
30 #include <asm/sections.h>
31 #include <asm/early_ioremap.h>
33 #include <mm/mmu_decl.h>
35 extern char etext[], _stext[], _sinittext[], _einittext[];
37 static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data;
39 notrace void __init early_ioremap_init(void)
41 unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE);
42 pte_t *ptep = (pte_t *)early_fixmap_pagetable;
43 pmd_t *pmdp = pmd_off_k(addr);
45 for (; (s32)(FIXADDR_TOP - addr) > 0;
46 addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)
47 pmd_populate_kernel(&init_mm, pmdp, ptep);
49 early_ioremap_setup();
52 static void __init *early_alloc_pgtable(unsigned long size)
54 void *ptr = memblock_alloc(size, size);
56 if (!ptr)
57 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
58 __func__, size, size);
60 return ptr;
63 pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
65 if (pmd_none(*pmdp)) {
66 pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
68 pmd_populate_kernel(&init_mm, pmdp, ptep);
70 return pte_offset_kernel(pmdp, va);
74 int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
76 pmd_t *pd;
77 pte_t *pg;
78 int err = -ENOMEM;
80 /* Use upper 10 bits of VA to index the first level map */
81 pd = pmd_off_k(va);
82 /* Use middle 10 bits of VA to index the second-level map */
83 if (likely(slab_is_available()))
84 pg = pte_alloc_kernel(pd, va);
85 else
86 pg = early_pte_alloc_kernel(pd, va);
87 if (pg) {
88 err = 0;
89 /* The PTE should never be already set nor present in the
90 * hash table
92 BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
93 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
95 smp_wmb();
96 return err;
100 * Map in a chunk of physical memory starting at start.
102 static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
104 unsigned long v, s;
105 phys_addr_t p;
106 int ktext;
108 s = offset;
109 v = PAGE_OFFSET + s;
110 p = memstart_addr + s;
111 for (; s < top; s += PAGE_SIZE) {
112 ktext = ((char *)v >= _stext && (char *)v < etext) ||
113 ((char *)v >= _sinittext && (char *)v < _einittext);
114 map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
115 v += PAGE_SIZE;
116 p += PAGE_SIZE;
120 void __init mapin_ram(void)
122 phys_addr_t base, end;
123 u64 i;
125 for_each_mem_range(i, &base, &end) {
126 phys_addr_t top = min(end, total_lowmem);
128 if (base >= top)
129 continue;
130 base = mmu_mapin_ram(base, top);
131 __mapin_ram_chunk(base, top);
135 static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
137 pte_t *kpte;
138 unsigned long address;
140 BUG_ON(PageHighMem(page));
141 address = (unsigned long)page_address(page);
143 if (v_block_mapped(address))
144 return 0;
145 kpte = virt_to_kpte(address);
146 if (!kpte)
147 return -EINVAL;
148 __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
150 return 0;
154 * Change the page attributes of an page in the linear mapping.
156 * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
158 static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
160 int i, err = 0;
161 unsigned long flags;
162 struct page *start = page;
164 local_irq_save(flags);
165 for (i = 0; i < numpages; i++, page++) {
166 err = __change_page_attr_noflush(page, prot);
167 if (err)
168 break;
170 wmb();
171 local_irq_restore(flags);
172 flush_tlb_kernel_range((unsigned long)page_address(start),
173 (unsigned long)page_address(page));
174 return err;
177 void mark_initmem_nx(void)
179 struct page *page = virt_to_page(_sinittext);
180 unsigned long numpages = PFN_UP((unsigned long)_einittext) -
181 PFN_DOWN((unsigned long)_sinittext);
183 if (v_block_mapped((unsigned long)_sinittext))
184 mmu_mark_initmem_nx();
185 else
186 change_page_attr(page, numpages, PAGE_KERNEL);
189 #ifdef CONFIG_STRICT_KERNEL_RWX
190 void mark_rodata_ro(void)
192 struct page *page;
193 unsigned long numpages;
195 if (v_block_mapped((unsigned long)_stext + 1)) {
196 mmu_mark_rodata_ro();
197 ptdump_check_wx();
198 return;
201 page = virt_to_page(_stext);
202 numpages = PFN_UP((unsigned long)_etext) -
203 PFN_DOWN((unsigned long)_stext);
205 change_page_attr(page, numpages, PAGE_KERNEL_ROX);
207 * mark .rodata as read only. Use __init_begin rather than __end_rodata
208 * to cover NOTES and EXCEPTION_TABLE.
210 page = virt_to_page(__start_rodata);
211 numpages = PFN_UP((unsigned long)__init_begin) -
212 PFN_DOWN((unsigned long)__start_rodata);
214 change_page_attr(page, numpages, PAGE_KERNEL_RO);
216 // mark_initmem_nx() should have already run by now
217 ptdump_check_wx();
219 #endif
221 #ifdef CONFIG_DEBUG_PAGEALLOC
222 void __kernel_map_pages(struct page *page, int numpages, int enable)
224 if (PageHighMem(page))
225 return;
227 change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
229 #endif /* CONFIG_DEBUG_PAGEALLOC */