treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / m68k / include / asm / motorola_pgtable.h
blob62bedc61f110f073830a6ab9561becb3ceea0b72
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _MOTOROLA_PGTABLE_H
3 #define _MOTOROLA_PGTABLE_H
6 /*
7 * Definitions for MMU descriptors
8 */
9 #define _PAGE_PRESENT 0x001
10 #define _PAGE_SHORT 0x002
11 #define _PAGE_RONLY 0x004
12 #define _PAGE_READWRITE 0x000
13 #define _PAGE_ACCESSED 0x008
14 #define _PAGE_DIRTY 0x010
15 #define _PAGE_SUPER 0x080 /* 68040 supervisor only */
16 #define _PAGE_GLOBAL040 0x400 /* 68040 global bit, used for kva descs */
17 #define _PAGE_NOCACHE030 0x040 /* 68030 no-cache mode */
18 #define _PAGE_NOCACHE 0x060 /* 68040 cache mode, non-serialized */
19 #define _PAGE_NOCACHE_S 0x040 /* 68040 no-cache mode, serialized */
20 #define _PAGE_CACHE040 0x020 /* 68040 cache mode, cachable, copyback */
21 #define _PAGE_CACHE040W 0x000 /* 68040 cache mode, cachable, write-through */
23 #define _DESCTYPE_MASK 0x003
25 #define _CACHEMASK040 (~0x060)
26 #define _TABLE_MASK (0xfffffe00)
28 #define _PAGE_TABLE (_PAGE_SHORT)
29 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE)
31 #define _PAGE_PROTNONE 0x004
33 #ifndef __ASSEMBLY__
35 /* This is the cache mode to be used for pages containing page descriptors for
36 * processors >= '040. It is in pte_mknocache(), and the variable is defined
37 * and initialized in head.S */
38 extern int m68k_pgtable_cachemode;
40 /* This is the cache mode for normal pages, for supervisor access on
41 * processors >= '040. It is used in pte_mkcache(), and the variable is
42 * defined and initialized in head.S */
44 #if defined(CPU_M68060_ONLY) && defined(CONFIG_060_WRITETHROUGH)
45 #define m68k_supervisor_cachemode _PAGE_CACHE040W
46 #elif defined(CPU_M68040_OR_M68060_ONLY)
47 #define m68k_supervisor_cachemode _PAGE_CACHE040
48 #elif defined(CPU_M68020_OR_M68030_ONLY)
49 #define m68k_supervisor_cachemode 0
50 #else
51 extern int m68k_supervisor_cachemode;
52 #endif
54 #if defined(CPU_M68040_OR_M68060_ONLY)
55 #define mm_cachebits _PAGE_CACHE040
56 #elif defined(CPU_M68020_OR_M68030_ONLY)
57 #define mm_cachebits 0
58 #else
59 extern unsigned long mm_cachebits;
60 #endif
62 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED | mm_cachebits)
63 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | mm_cachebits)
64 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits)
65 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits)
66 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | mm_cachebits)
68 /* Alternate definitions that are compile time constants, for
69 initializing protection_map. The cachebits are fixed later. */
70 #define PAGE_NONE_C __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
71 #define PAGE_SHARED_C __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
72 #define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
73 #define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
76 * The m68k can't do page protection for execute, and considers that the same are read.
77 * Also, write permissions imply read permissions. This is the closest we can get..
79 #define __P000 PAGE_NONE_C
80 #define __P001 PAGE_READONLY_C
81 #define __P010 PAGE_COPY_C
82 #define __P011 PAGE_COPY_C
83 #define __P100 PAGE_READONLY_C
84 #define __P101 PAGE_READONLY_C
85 #define __P110 PAGE_COPY_C
86 #define __P111 PAGE_COPY_C
88 #define __S000 PAGE_NONE_C
89 #define __S001 PAGE_READONLY_C
90 #define __S010 PAGE_SHARED_C
91 #define __S011 PAGE_SHARED_C
92 #define __S100 PAGE_READONLY_C
93 #define __S101 PAGE_READONLY_C
94 #define __S110 PAGE_SHARED_C
95 #define __S111 PAGE_SHARED_C
98 * Conversion functions: convert a page and protection to a page entry,
99 * and a page entry and page directory to the page they refer to.
101 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
103 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
105 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
106 return pte;
109 static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
111 unsigned long ptbl = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED;
112 unsigned long *ptr = pmdp->pmd;
113 short i = 16;
114 while (--i >= 0) {
115 *ptr++ = ptbl;
116 ptbl += (sizeof(pte_t)*PTRS_PER_PTE/16);
120 static inline void pud_set(pud_t *pudp, pmd_t *pmdp)
122 pud_val(*pudp) = _PAGE_TABLE | _PAGE_ACCESSED | __pa(pmdp);
125 #define __pte_page(pte) ((unsigned long)__va(pte_val(pte) & PAGE_MASK))
126 #define __pmd_page(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK))
127 #define pud_page_vaddr(pud) ((unsigned long)__va(pud_val(pud) & _TABLE_MASK))
130 #define pte_none(pte) (!pte_val(pte))
131 #define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
132 #define pte_clear(mm,addr,ptep) ({ pte_val(*(ptep)) = 0; })
134 #define pte_page(pte) virt_to_page(__va(pte_val(pte)))
135 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
136 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
138 #define pmd_none(pmd) (!pmd_val(pmd))
139 #define pmd_bad(pmd) ((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE)
140 #define pmd_present(pmd) (pmd_val(pmd) & _PAGE_TABLE)
141 #define pmd_clear(pmdp) ({ \
142 unsigned long *__ptr = pmdp->pmd; \
143 short __i = 16; \
144 while (--__i >= 0) \
145 *__ptr++ = 0; \
147 #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
150 #define pud_none(pud) (!pud_val(pud))
151 #define pud_bad(pud) ((pud_val(pud) & _DESCTYPE_MASK) != _PAGE_TABLE)
152 #define pud_present(pud) (pud_val(pud) & _PAGE_TABLE)
153 #define pud_clear(pudp) ({ pud_val(*pudp) = 0; })
154 #define pud_page(pud) (mem_map + ((unsigned long)(__va(pud_val(pud)) - PAGE_OFFSET) >> PAGE_SHIFT))
156 #define pte_ERROR(e) \
157 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
158 #define pmd_ERROR(e) \
159 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
160 #define pgd_ERROR(e) \
161 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
165 * The following only work if pte_present() is true.
166 * Undefined behaviour if not..
168 static inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RONLY); }
169 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
170 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
171 static inline int pte_special(pte_t pte) { return 0; }
173 static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RONLY; return pte; }
174 static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
175 static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
176 static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_RONLY; return pte; }
177 static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
178 static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
179 static inline pte_t pte_mknocache(pte_t pte)
181 pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_pgtable_cachemode;
182 return pte;
184 static inline pte_t pte_mkcache(pte_t pte)
186 pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_supervisor_cachemode;
187 return pte;
189 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
191 #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
193 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
195 /* to find an entry in a page-table-directory */
196 static inline pgd_t *pgd_offset(const struct mm_struct *mm,
197 unsigned long address)
199 return mm->pgd + pgd_index(address);
202 #define swapper_pg_dir kernel_pg_dir
203 extern pgd_t kernel_pg_dir[128];
205 static inline pgd_t *pgd_offset_k(unsigned long address)
207 return kernel_pg_dir + (address >> PGDIR_SHIFT);
211 /* Find an entry in the second-level page table.. */
212 static inline pmd_t *pmd_offset(pud_t *dir, unsigned long address)
214 return (pmd_t *)pud_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1));
217 /* Find an entry in the third-level page table.. */
218 static inline pte_t *pte_offset_kernel(pmd_t *pmdp, unsigned long address)
220 return (pte_t *)__pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
223 #define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
224 #define pte_unmap(pte) ((void)0)
227 * Allocate and free page tables. The xxx_kernel() versions are
228 * used to allocate a kernel page table - this turns on ASN bits
229 * if any.
232 /* Prior to calling these routines, the page should have been flushed
233 * from both the cache and ATC, or the CPU might not notice that the
234 * cache setting for the page has been changed. -jskov
236 static inline void nocache_page(void *vaddr)
238 unsigned long addr = (unsigned long)vaddr;
240 if (CPU_IS_040_OR_060) {
241 pgd_t *dir;
242 p4d_t *p4dp;
243 pud_t *pudp;
244 pmd_t *pmdp;
245 pte_t *ptep;
247 dir = pgd_offset_k(addr);
248 p4dp = p4d_offset(dir, addr);
249 pudp = pud_offset(p4dp, addr);
250 pmdp = pmd_offset(pudp, addr);
251 ptep = pte_offset_kernel(pmdp, addr);
252 *ptep = pte_mknocache(*ptep);
256 static inline void cache_page(void *vaddr)
258 unsigned long addr = (unsigned long)vaddr;
260 if (CPU_IS_040_OR_060) {
261 pgd_t *dir;
262 p4d_t *p4dp;
263 pud_t *pudp;
264 pmd_t *pmdp;
265 pte_t *ptep;
267 dir = pgd_offset_k(addr);
268 p4dp = p4d_offset(dir, addr);
269 pudp = pud_offset(p4dp, addr);
270 pmdp = pmd_offset(pudp, addr);
271 ptep = pte_offset_kernel(pmdp, addr);
272 *ptep = pte_mkcache(*ptep);
276 /* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
277 #define __swp_type(x) (((x).val >> 4) & 0xff)
278 #define __swp_offset(x) ((x).val >> 12)
279 #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 12) })
280 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
281 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
283 #endif /* !__ASSEMBLY__ */
284 #endif /* _MOTOROLA_PGTABLE_H */