treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / sparc / include / asm / pgtable_32.h
blob6d6f44c0cad9b95a335afcc1cac3efb453611c87
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _SPARC_PGTABLE_H
3 #define _SPARC_PGTABLE_H
5 /* asm/pgtable.h: Defines and functions used to work
6 * with Sparc page tables.
8 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
9 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
12 #include <linux/const.h>
14 #ifndef __ASSEMBLY__
15 #include <asm-generic/pgtable-nopud.h>
17 #include <linux/spinlock.h>
18 #include <linux/mm_types.h>
19 #include <asm/types.h>
20 #include <asm/pgtsrmmu.h>
21 #include <asm/vaddrs.h>
22 #include <asm/oplib.h>
23 #include <asm/cpu_type.h>
26 struct vm_area_struct;
27 struct page;
29 void load_mmu(void);
30 unsigned long calc_highpages(void);
31 unsigned long __init bootmem_init(unsigned long *pages_avail);
33 #define pte_ERROR(e) __builtin_trap()
34 #define pmd_ERROR(e) __builtin_trap()
35 #define pgd_ERROR(e) __builtin_trap()
37 #define PMD_SHIFT 22
38 #define PMD_SIZE (1UL << PMD_SHIFT)
39 #define PMD_MASK (~(PMD_SIZE-1))
40 #define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
41 #define PGDIR_SHIFT SRMMU_PGDIR_SHIFT
42 #define PGDIR_SIZE SRMMU_PGDIR_SIZE
43 #define PGDIR_MASK SRMMU_PGDIR_MASK
44 #define PTRS_PER_PTE 1024
45 #define PTRS_PER_PMD SRMMU_PTRS_PER_PMD
46 #define PTRS_PER_PGD SRMMU_PTRS_PER_PGD
47 #define USER_PTRS_PER_PGD PAGE_OFFSET / SRMMU_PGDIR_SIZE
48 #define FIRST_USER_ADDRESS 0UL
49 #define PTE_SIZE (PTRS_PER_PTE*4)
51 #define PAGE_NONE SRMMU_PAGE_NONE
52 #define PAGE_SHARED SRMMU_PAGE_SHARED
53 #define PAGE_COPY SRMMU_PAGE_COPY
54 #define PAGE_READONLY SRMMU_PAGE_RDONLY
55 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
57 /* Top-level page directory - dummy used by init-mm.
58 * srmmu.c will assign the real one (which is dynamically sized) */
59 #define swapper_pg_dir NULL
61 void paging_init(void);
63 extern unsigned long ptr_in_current_pgd;
65 /* xwr */
66 #define __P000 PAGE_NONE
67 #define __P001 PAGE_READONLY
68 #define __P010 PAGE_COPY
69 #define __P011 PAGE_COPY
70 #define __P100 PAGE_READONLY
71 #define __P101 PAGE_READONLY
72 #define __P110 PAGE_COPY
73 #define __P111 PAGE_COPY
75 #define __S000 PAGE_NONE
76 #define __S001 PAGE_READONLY
77 #define __S010 PAGE_SHARED
78 #define __S011 PAGE_SHARED
79 #define __S100 PAGE_READONLY
80 #define __S101 PAGE_READONLY
81 #define __S110 PAGE_SHARED
82 #define __S111 PAGE_SHARED
84 /* First physical page can be anywhere, the following is needed so that
85 * va-->pa and vice versa conversions work properly without performance
86 * hit for all __pa()/__va() operations.
88 extern unsigned long phys_base;
89 extern unsigned long pfn_base;
92 * ZERO_PAGE is a global shared page that is always zero: used
93 * for zero-mapped memory areas etc..
95 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
97 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
100 * In general all page table modifications should use the V8 atomic
101 * swap instruction. This insures the mmu and the cpu are in sync
102 * with respect to ref/mod bits in the page tables.
104 static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
106 __asm__ __volatile__("swap [%2], %0" :
107 "=&r" (value) : "0" (value), "r" (addr) : "memory");
108 return value;
111 /* Certain architectures need to do special things when pte's
112 * within a page table are directly modified. Thus, the following
113 * hook is made available.
116 static inline void set_pte(pte_t *ptep, pte_t pteval)
118 srmmu_swap((unsigned long *)ptep, pte_val(pteval));
121 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
123 static inline int srmmu_device_memory(unsigned long x)
125 return ((x & 0xF0000000) != 0);
128 static inline struct page *pmd_page(pmd_t pmd)
130 if (srmmu_device_memory(pmd_val(pmd)))
131 BUG();
132 return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
135 static inline unsigned long pud_page_vaddr(pud_t pud)
137 if (srmmu_device_memory(pud_val(pud))) {
138 return ~0;
139 } else {
140 unsigned long v = pud_val(pud) & SRMMU_PTD_PMASK;
141 return (unsigned long)__nocache_va(v << 4);
145 static inline int pte_present(pte_t pte)
147 return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
150 static inline int pte_none(pte_t pte)
152 return !pte_val(pte);
155 static inline void __pte_clear(pte_t *ptep)
157 set_pte(ptep, __pte(0));
160 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
162 __pte_clear(ptep);
165 static inline int pmd_bad(pmd_t pmd)
167 return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
170 static inline int pmd_present(pmd_t pmd)
172 return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
175 static inline int pmd_none(pmd_t pmd)
177 return !pmd_val(pmd);
180 static inline void pmd_clear(pmd_t *pmdp)
182 int i;
183 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
184 set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
187 static inline int pud_none(pud_t pud)
189 return !(pud_val(pud) & 0xFFFFFFF);
192 static inline int pud_bad(pud_t pud)
194 return (pud_val(pud) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
197 static inline int pud_present(pud_t pud)
199 return ((pud_val(pud) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
202 static inline void pud_clear(pud_t *pudp)
204 set_pte((pte_t *)pudp, __pte(0));
208 * The following only work if pte_present() is true.
209 * Undefined behaviour if not..
211 static inline int pte_write(pte_t pte)
213 return pte_val(pte) & SRMMU_WRITE;
216 static inline int pte_dirty(pte_t pte)
218 return pte_val(pte) & SRMMU_DIRTY;
221 static inline int pte_young(pte_t pte)
223 return pte_val(pte) & SRMMU_REF;
226 static inline int pte_special(pte_t pte)
228 return 0;
231 static inline pte_t pte_wrprotect(pte_t pte)
233 return __pte(pte_val(pte) & ~SRMMU_WRITE);
236 static inline pte_t pte_mkclean(pte_t pte)
238 return __pte(pte_val(pte) & ~SRMMU_DIRTY);
241 static inline pte_t pte_mkold(pte_t pte)
243 return __pte(pte_val(pte) & ~SRMMU_REF);
246 static inline pte_t pte_mkwrite(pte_t pte)
248 return __pte(pte_val(pte) | SRMMU_WRITE);
251 static inline pte_t pte_mkdirty(pte_t pte)
253 return __pte(pte_val(pte) | SRMMU_DIRTY);
256 static inline pte_t pte_mkyoung(pte_t pte)
258 return __pte(pte_val(pte) | SRMMU_REF);
261 #define pte_mkspecial(pte) (pte)
263 #define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
265 static inline unsigned long pte_pfn(pte_t pte)
267 if (srmmu_device_memory(pte_val(pte))) {
268 /* Just return something that will cause
269 * pfn_valid() to return false. This makes
270 * copy_one_pte() to just directly copy to
271 * PTE over.
273 return ~0UL;
275 return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
278 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
281 * Conversion functions: convert a page and protection to a page entry,
282 * and a page entry and page directory to the page they refer to.
284 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
286 return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
289 static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
291 return __pte(((page) >> 4) | pgprot_val(pgprot));
294 static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
296 return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
299 #define pgprot_noncached pgprot_noncached
300 static inline pgprot_t pgprot_noncached(pgprot_t prot)
302 pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE));
303 return prot;
306 static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
307 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
309 return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
310 pgprot_val(newprot));
313 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
315 /* to find an entry in a page-table-directory */
316 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
318 /* to find an entry in a kernel page-table-directory */
319 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
321 /* Find an entry in the second-level page table.. */
322 static inline pmd_t *pmd_offset(pud_t * dir, unsigned long address)
324 return (pmd_t *) pud_page_vaddr(*dir) +
325 ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
328 /* Find an entry in the third-level page table.. */
329 pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address);
332 * This shortcut works on sun4m (and sun4d) because the nocache area is static.
334 #define pte_offset_map(d, a) pte_offset_kernel(d,a)
335 #define pte_unmap(pte) do{}while(0)
337 struct seq_file;
338 void mmu_info(struct seq_file *m);
340 /* Fault handler stuff... */
341 #define FAULT_CODE_PROT 0x1
342 #define FAULT_CODE_WRITE 0x2
343 #define FAULT_CODE_USER 0x4
345 #define update_mmu_cache(vma, address, ptep) do { } while (0)
347 void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
348 unsigned long xva, unsigned int len);
349 void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
351 /* Encode and de-code a swap entry */
352 static inline unsigned long __swp_type(swp_entry_t entry)
354 return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
357 static inline unsigned long __swp_offset(swp_entry_t entry)
359 return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
362 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
364 return (swp_entry_t) {
365 (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
366 | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
369 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
370 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
372 static inline unsigned long
373 __get_phys (unsigned long addr)
375 switch (sparc_cpu_model){
376 case sun4m:
377 case sun4d:
378 return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
379 default:
380 return 0;
384 static inline int
385 __get_iospace (unsigned long addr)
387 switch (sparc_cpu_model){
388 case sun4m:
389 case sun4d:
390 return (srmmu_get_pte (addr) >> 28);
391 default:
392 return -1;
396 extern unsigned long *sparc_valid_addr_bitmap;
398 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
399 #define kern_addr_valid(addr) \
400 (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
403 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
404 * its high 4 bits. These macros/functions put it there or get it from there.
406 #define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
407 #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
408 #define GET_PFN(pfn) (pfn & 0x0fffffffUL)
410 int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
411 unsigned long, pgprot_t);
413 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
414 unsigned long from, unsigned long pfn,
415 unsigned long size, pgprot_t prot)
417 unsigned long long offset, space, phys_base;
419 offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
420 space = GET_IOSPACE(pfn);
421 phys_base = offset | (space << 32ULL);
423 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
425 #define io_remap_pfn_range io_remap_pfn_range
427 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
428 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
429 ({ \
430 int __changed = !pte_same(*(__ptep), __entry); \
431 if (__changed) { \
432 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
433 flush_tlb_page(__vma, __address); \
435 __changed; \
438 #include <asm-generic/pgtable.h>
440 #endif /* !(__ASSEMBLY__) */
442 #define VMALLOC_START _AC(0xfe600000,UL)
443 #define VMALLOC_END _AC(0xffc00000,UL)
445 /* We provide our own get_unmapped_area to cope with VA holes for userland */
446 #define HAVE_ARCH_UNMAPPED_AREA
448 #endif /* !(_SPARC_PGTABLE_H) */