staging: rtl8192u: remove redundant assignment to pointer crypt
[linux/fpc-iii.git] / arch / powerpc / mm / pgtable_64.c
blob9ad59b733984759d15f8edeff4ec445635b6acf2
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * This file contains ioremap and related functions for 64-bit machines.
5 * Derived from arch/ppc64/mm/init.c
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 * Dave Engebretsen <engebret@us.ibm.com>
16 * Rework for PPC64 port.
19 #include <linux/signal.h>
20 #include <linux/sched.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/string.h>
24 #include <linux/export.h>
25 #include <linux/types.h>
26 #include <linux/mman.h>
27 #include <linux/mm.h>
28 #include <linux/swap.h>
29 #include <linux/stddef.h>
30 #include <linux/vmalloc.h>
31 #include <linux/slab.h>
32 #include <linux/hugetlb.h>
34 #include <asm/pgalloc.h>
35 #include <asm/page.h>
36 #include <asm/prom.h>
37 #include <asm/io.h>
38 #include <asm/mmu_context.h>
39 #include <asm/pgtable.h>
40 #include <asm/mmu.h>
41 #include <asm/smp.h>
42 #include <asm/machdep.h>
43 #include <asm/tlb.h>
44 #include <asm/processor.h>
45 #include <asm/cputable.h>
46 #include <asm/sections.h>
47 #include <asm/firmware.h>
48 #include <asm/dma.h>
50 #include <mm/mmu_decl.h>
53 #ifdef CONFIG_PPC_BOOK3S_64
55 * partition table and process table for ISA 3.0
57 struct prtb_entry *process_tb;
58 struct patb_entry *partition_tb;
60 * page table size
62 unsigned long __pte_index_size;
63 EXPORT_SYMBOL(__pte_index_size);
64 unsigned long __pmd_index_size;
65 EXPORT_SYMBOL(__pmd_index_size);
66 unsigned long __pud_index_size;
67 EXPORT_SYMBOL(__pud_index_size);
68 unsigned long __pgd_index_size;
69 EXPORT_SYMBOL(__pgd_index_size);
70 unsigned long __pud_cache_index;
71 EXPORT_SYMBOL(__pud_cache_index);
72 unsigned long __pte_table_size;
73 EXPORT_SYMBOL(__pte_table_size);
74 unsigned long __pmd_table_size;
75 EXPORT_SYMBOL(__pmd_table_size);
76 unsigned long __pud_table_size;
77 EXPORT_SYMBOL(__pud_table_size);
78 unsigned long __pgd_table_size;
79 EXPORT_SYMBOL(__pgd_table_size);
80 unsigned long __pmd_val_bits;
81 EXPORT_SYMBOL(__pmd_val_bits);
82 unsigned long __pud_val_bits;
83 EXPORT_SYMBOL(__pud_val_bits);
84 unsigned long __pgd_val_bits;
85 EXPORT_SYMBOL(__pgd_val_bits);
86 unsigned long __kernel_virt_start;
87 EXPORT_SYMBOL(__kernel_virt_start);
88 unsigned long __vmalloc_start;
89 EXPORT_SYMBOL(__vmalloc_start);
90 unsigned long __vmalloc_end;
91 EXPORT_SYMBOL(__vmalloc_end);
92 unsigned long __kernel_io_start;
93 EXPORT_SYMBOL(__kernel_io_start);
94 unsigned long __kernel_io_end;
95 struct page *vmemmap;
96 EXPORT_SYMBOL(vmemmap);
97 unsigned long __pte_frag_nr;
98 EXPORT_SYMBOL(__pte_frag_nr);
99 unsigned long __pte_frag_size_shift;
100 EXPORT_SYMBOL(__pte_frag_size_shift);
101 unsigned long ioremap_bot;
102 #else /* !CONFIG_PPC_BOOK3S_64 */
103 unsigned long ioremap_bot = IOREMAP_BASE;
104 #endif
106 int __weak ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid)
108 unsigned long i;
110 for (i = 0; i < size; i += PAGE_SIZE) {
111 int err = map_kernel_page(ea + i, pa + i, prot);
112 if (err) {
113 if (slab_is_available())
114 unmap_kernel_range(ea, size);
115 else
116 WARN_ON_ONCE(1); /* Should clean up */
117 return err;
121 return 0;
125 * __ioremap_at - Low level function to establish the page tables
126 * for an IO mapping
128 void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
130 /* We don't support the 4K PFN hack with ioremap */
131 if (pgprot_val(prot) & H_PAGE_4K_PFN)
132 return NULL;
134 if ((ea + size) >= (void *)IOREMAP_END) {
135 pr_warn("Outside the supported range\n");
136 return NULL;
139 WARN_ON(pa & ~PAGE_MASK);
140 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
141 WARN_ON(size & ~PAGE_MASK);
143 if (ioremap_range((unsigned long)ea, pa, size, prot, NUMA_NO_NODE))
144 return NULL;
146 return (void __iomem *)ea;
150 * __iounmap_from - Low level function to tear down the page tables
151 * for an IO mapping. This is used for mappings that
152 * are manipulated manually, like partial unmapping of
153 * PCI IOs or ISA space.
155 void __iounmap_at(void *ea, unsigned long size)
157 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
158 WARN_ON(size & ~PAGE_MASK);
160 unmap_kernel_range((unsigned long)ea, size);
163 void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
164 pgprot_t prot, void *caller)
166 phys_addr_t paligned;
167 void __iomem *ret;
170 * Choose an address to map it to.
171 * Once the imalloc system is running, we use it.
172 * Before that, we map using addresses going
173 * up from ioremap_bot. imalloc will use
174 * the addresses from ioremap_bot through
175 * IMALLOC_END
178 paligned = addr & PAGE_MASK;
179 size = PAGE_ALIGN(addr + size) - paligned;
181 if ((size == 0) || (paligned == 0))
182 return NULL;
184 if (slab_is_available()) {
185 struct vm_struct *area;
187 area = __get_vm_area_caller(size, VM_IOREMAP,
188 ioremap_bot, IOREMAP_END,
189 caller);
190 if (area == NULL)
191 return NULL;
193 area->phys_addr = paligned;
194 ret = __ioremap_at(paligned, area->addr, size, prot);
195 } else {
196 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot);
197 if (ret)
198 ioremap_bot += size;
201 if (ret)
202 ret += addr & ~PAGE_MASK;
203 return ret;
206 void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
207 unsigned long flags)
209 return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0));
212 void __iomem * ioremap(phys_addr_t addr, unsigned long size)
214 pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
215 void *caller = __builtin_return_address(0);
217 if (ppc_md.ioremap)
218 return ppc_md.ioremap(addr, size, prot, caller);
219 return __ioremap_caller(addr, size, prot, caller);
222 void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
224 pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
225 void *caller = __builtin_return_address(0);
227 if (ppc_md.ioremap)
228 return ppc_md.ioremap(addr, size, prot, caller);
229 return __ioremap_caller(addr, size, prot, caller);
232 void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
234 pgprot_t prot = pgprot_cached(PAGE_KERNEL);
235 void *caller = __builtin_return_address(0);
237 if (ppc_md.ioremap)
238 return ppc_md.ioremap(addr, size, prot, caller);
239 return __ioremap_caller(addr, size, prot, caller);
242 void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
243 unsigned long flags)
245 pte_t pte = __pte(flags);
246 void *caller = __builtin_return_address(0);
248 /* writeable implies dirty for kernel addresses */
249 if (pte_write(pte))
250 pte = pte_mkdirty(pte);
252 /* we don't want to let _PAGE_EXEC leak out */
253 pte = pte_exprotect(pte);
255 * Force kernel mapping.
257 pte = pte_mkprivileged(pte);
259 if (ppc_md.ioremap)
260 return ppc_md.ioremap(addr, size, pte_pgprot(pte), caller);
261 return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
266 * Unmap an IO region and remove it from imalloc'd list.
267 * Access to IO memory should be serialized by driver.
269 void __iounmap(volatile void __iomem *token)
271 void *addr;
273 if (!slab_is_available())
274 return;
276 addr = (void *) ((unsigned long __force)
277 PCI_FIX_ADDR(token) & PAGE_MASK);
278 if ((unsigned long)addr < ioremap_bot) {
279 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
280 " at 0x%p\n", addr);
281 return;
283 vunmap(addr);
286 void iounmap(volatile void __iomem *token)
288 if (ppc_md.iounmap)
289 ppc_md.iounmap(token);
290 else
291 __iounmap(token);
294 EXPORT_SYMBOL(ioremap);
295 EXPORT_SYMBOL(ioremap_wc);
296 EXPORT_SYMBOL(ioremap_prot);
297 EXPORT_SYMBOL(__ioremap);
298 EXPORT_SYMBOL(__ioremap_at);
299 EXPORT_SYMBOL(iounmap);
300 EXPORT_SYMBOL(__iounmap);
301 EXPORT_SYMBOL(__iounmap_at);
303 #ifndef __PAGETABLE_PUD_FOLDED
304 /* 4 level page table */
305 struct page *pgd_page(pgd_t pgd)
307 if (pgd_is_leaf(pgd)) {
308 VM_WARN_ON(!pgd_huge(pgd));
309 return pte_page(pgd_pte(pgd));
311 return virt_to_page(pgd_page_vaddr(pgd));
313 #endif
315 struct page *pud_page(pud_t pud)
317 if (pud_is_leaf(pud)) {
318 VM_WARN_ON(!pud_huge(pud));
319 return pte_page(pud_pte(pud));
321 return virt_to_page(pud_page_vaddr(pud));
325 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
326 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
328 struct page *pmd_page(pmd_t pmd)
330 if (pmd_is_leaf(pmd)) {
331 VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
332 return pte_page(pmd_pte(pmd));
334 return virt_to_page(pmd_page_vaddr(pmd));
337 #ifdef CONFIG_STRICT_KERNEL_RWX
338 void mark_rodata_ro(void)
340 if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) {
341 pr_warn("Warning: Unable to mark rodata read only on this CPU.\n");
342 return;
345 if (radix_enabled())
346 radix__mark_rodata_ro();
347 else
348 hash__mark_rodata_ro();
350 // mark_initmem_nx() should have already run by now
351 ptdump_check_wx();
354 void mark_initmem_nx(void)
356 if (radix_enabled())
357 radix__mark_initmem_nx();
358 else
359 hash__mark_initmem_nx();
361 #endif