1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ALPHA_PGTABLE_H
3 #define _ALPHA_PGTABLE_H
5 #include <asm-generic/4level-fixup.h>
8 * This file contains the functions and defines necessary to modify and use
9 * the Alpha page table tree.
11 * This hopefully works with any standard Alpha page-size, as defined
12 * in <asm/page.h> (currently 8192).
14 #include <linux/mmzone.h>
17 #include <asm/processor.h> /* For TASK_SIZE */
18 #include <asm/machvec.h>
19 #include <asm/setup.h>
22 struct vm_area_struct
;
24 /* Certain architectures need to do special things when PTEs
25 * within a page table are directly modified. Thus, the following
26 * hook is made available.
28 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
29 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
31 /* PMD_SHIFT determines the size of the area a second-level page table can map */
32 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
33 #define PMD_SIZE (1UL << PMD_SHIFT)
34 #define PMD_MASK (~(PMD_SIZE-1))
36 /* PGDIR_SHIFT determines what a third-level page table entry can map */
37 #define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
38 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
39 #define PGDIR_MASK (~(PGDIR_SIZE-1))
42 * Entries per page directory level: the Alpha is three-level, with
43 * all levels having a one-page page table.
45 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
46 #define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
47 #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3))
48 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
49 #define FIRST_USER_ADDRESS 0UL
51 /* Number of pointers that fit on a page: this will go away. */
52 #define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
54 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
55 #define VMALLOC_START 0xfffffe0000000000
57 #define VMALLOC_START (-2*PGDIR_SIZE)
59 #define VMALLOC_END (-PGDIR_SIZE)
62 * OSF/1 PAL-code-imposed page table bits
64 #define _PAGE_VALID 0x0001
65 #define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */
66 #define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */
67 #define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */
68 #define _PAGE_ASM 0x0010
69 #define _PAGE_KRE 0x0100 /* xxx - see below on the "accessed" bit */
70 #define _PAGE_URE 0x0200 /* xxx */
71 #define _PAGE_KWE 0x1000 /* used to do the dirty bit in software */
72 #define _PAGE_UWE 0x2000 /* used to do the dirty bit in software */
74 /* .. and these are ours ... */
75 #define _PAGE_DIRTY 0x20000
76 #define _PAGE_ACCESSED 0x40000
79 * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly
80 * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
81 * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use
82 * the KRE/URE bits to watch for it. That way we don't need to overload the
83 * KWE/UWE bits with both handling dirty and accessed.
85 * Note that the kernel uses the accessed bit just to check whether to page
86 * out a page or not, so it doesn't have to be exact anyway.
89 #define __DIRTY_BITS (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
90 #define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
92 #define _PFN_MASK 0xFFFFFFFF00000000UL
94 #define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
95 #define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
98 * All the normal masks have the "page accessed" bits on, as any time they are used,
99 * the page is accessed. They are cleared only by the page-out routines
101 #define PAGE_NONE __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
102 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
103 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
104 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
105 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
107 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
109 #define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
110 #define _PAGE_S(x) _PAGE_NORMAL(x)
113 * The hardware can handle write-only mappings, but as the Alpha
114 * architecture does byte-wide writes with a read-modify-write
115 * sequence, it's not practical to have write-without-read privs.
116 * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in
117 * arch/alpha/mm/fault.c)
120 #define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
121 #define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW)
122 #define __P010 _PAGE_P(_PAGE_FOE)
123 #define __P011 _PAGE_P(_PAGE_FOE)
124 #define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR)
125 #define __P101 _PAGE_P(_PAGE_FOW)
126 #define __P110 _PAGE_P(0)
127 #define __P111 _PAGE_P(0)
129 #define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
130 #define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW)
131 #define __S010 _PAGE_S(_PAGE_FOE)
132 #define __S011 _PAGE_S(_PAGE_FOE)
133 #define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR)
134 #define __S101 _PAGE_S(_PAGE_FOW)
135 #define __S110 _PAGE_S(0)
136 #define __S111 _PAGE_S(0)
139 * pgprot_noncached() is only for infiniband pci support, and a real
140 * implementation for RAM would be more complicated.
142 #define pgprot_noncached(prot) (prot)
145 * BAD_PAGETABLE is used when we need a bogus page-table, while
146 * BAD_PAGE is used for a bogus page.
148 * ZERO_PAGE is a global shared page that is always zero: used
149 * for zero-mapped memory areas etc..
151 extern pte_t
__bad_page(void);
152 extern pmd_t
* __bad_pagetable(void);
154 extern unsigned long __zero_page(void);
156 #define BAD_PAGETABLE __bad_pagetable()
157 #define BAD_PAGE __bad_page()
158 #define ZERO_PAGE(vaddr) (virt_to_page(ZERO_PGE))
160 /* number of bits that fit into a memory pointer */
161 #define BITS_PER_PTR (8*sizeof(unsigned long))
163 /* to align the pointer to a pointer address */
164 #define PTR_MASK (~(sizeof(void*)-1))
166 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
167 #define SIZEOF_PTR_LOG2 3
169 /* to find an entry in a page-table */
170 #define PAGE_PTR(address) \
171 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
174 * On certain platforms whose physical address space can overlap KSEG,
175 * namely EV6 and above, we must re-twiddle the physaddr to restore the
176 * correct high-order bits.
178 * This is extremely confusing until you realize that this is actually
179 * just working around a userspace bug. The X server was intending to
180 * provide the physical address but instead provided the KSEG address.
181 * Or tried to, except it's not representable.
183 * On Tsunami there's nothing meaningful at 0x40000000000, so this is
184 * a safe thing to do. Come the first core logic that does put something
185 * in this area -- memory or whathaveyou -- then this hack will have
186 * to go away. So be prepared!
189 #if defined(CONFIG_ALPHA_GENERIC) && defined(USE_48_BIT_KSEG)
190 #error "EV6-only feature in a generic kernel"
192 #if defined(CONFIG_ALPHA_GENERIC) || \
193 (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG))
194 #define KSEG_PFN (0xc0000000000UL >> PAGE_SHIFT)
195 #define PHYS_TWIDDLE(pfn) \
196 ((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \
197 ? ((pfn) ^= KSEG_PFN) : (pfn))
199 #define PHYS_TWIDDLE(pfn) (pfn)
203 * Conversion functions: convert a page and protection to a page entry,
204 * and a page entry and page directory to the page they refer to.
206 #ifndef CONFIG_DISCONTIGMEM
207 #define page_to_pa(page) (((page) - mem_map) << PAGE_SHIFT)
209 #define pte_pfn(pte) (pte_val(pte) >> 32)
210 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
211 #define mk_pte(page, pgprot) \
215 pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot); \
220 extern inline pte_t
pfn_pte(unsigned long physpfn
, pgprot_t pgprot
)
221 { pte_t pte
; pte_val(pte
) = (PHYS_TWIDDLE(physpfn
) << 32) | pgprot_val(pgprot
); return pte
; }
223 extern inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
224 { pte_val(pte
) = (pte_val(pte
) & _PAGE_CHG_MASK
) | pgprot_val(newprot
); return pte
; }
226 extern inline void pmd_set(pmd_t
* pmdp
, pte_t
* ptep
)
227 { pmd_val(*pmdp
) = _PAGE_TABLE
| ((((unsigned long) ptep
) - PAGE_OFFSET
) << (32-PAGE_SHIFT
)); }
229 extern inline void pgd_set(pgd_t
* pgdp
, pmd_t
* pmdp
)
230 { pgd_val(*pgdp
) = _PAGE_TABLE
| ((((unsigned long) pmdp
) - PAGE_OFFSET
) << (32-PAGE_SHIFT
)); }
233 extern inline unsigned long
234 pmd_page_vaddr(pmd_t pmd
)
236 return ((pmd_val(pmd
) & _PFN_MASK
) >> (32-PAGE_SHIFT
)) + PAGE_OFFSET
;
239 #ifndef CONFIG_DISCONTIGMEM
240 #define pmd_page(pmd) (mem_map + ((pmd_val(pmd) & _PFN_MASK) >> 32))
241 #define pgd_page(pgd) (mem_map + ((pgd_val(pgd) & _PFN_MASK) >> 32))
244 extern inline unsigned long pgd_page_vaddr(pgd_t pgd
)
245 { return PAGE_OFFSET
+ ((pgd_val(pgd
) & _PFN_MASK
) >> (32-PAGE_SHIFT
)); }
247 extern inline int pte_none(pte_t pte
) { return !pte_val(pte
); }
248 extern inline int pte_present(pte_t pte
) { return pte_val(pte
) & _PAGE_VALID
; }
249 extern inline void pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
254 extern inline int pmd_none(pmd_t pmd
) { return !pmd_val(pmd
); }
255 extern inline int pmd_bad(pmd_t pmd
) { return (pmd_val(pmd
) & ~_PFN_MASK
) != _PAGE_TABLE
; }
256 extern inline int pmd_present(pmd_t pmd
) { return pmd_val(pmd
) & _PAGE_VALID
; }
257 extern inline void pmd_clear(pmd_t
* pmdp
) { pmd_val(*pmdp
) = 0; }
259 extern inline int pgd_none(pgd_t pgd
) { return !pgd_val(pgd
); }
260 extern inline int pgd_bad(pgd_t pgd
) { return (pgd_val(pgd
) & ~_PFN_MASK
) != _PAGE_TABLE
; }
261 extern inline int pgd_present(pgd_t pgd
) { return pgd_val(pgd
) & _PAGE_VALID
; }
262 extern inline void pgd_clear(pgd_t
* pgdp
) { pgd_val(*pgdp
) = 0; }
265 * The following only work if pte_present() is true.
266 * Undefined behaviour if not..
268 extern inline int pte_write(pte_t pte
) { return !(pte_val(pte
) & _PAGE_FOW
); }
269 extern inline int pte_dirty(pte_t pte
) { return pte_val(pte
) & _PAGE_DIRTY
; }
270 extern inline int pte_young(pte_t pte
) { return pte_val(pte
) & _PAGE_ACCESSED
; }
271 extern inline int pte_special(pte_t pte
) { return 0; }
273 extern inline pte_t
pte_wrprotect(pte_t pte
) { pte_val(pte
) |= _PAGE_FOW
; return pte
; }
274 extern inline pte_t
pte_mkclean(pte_t pte
) { pte_val(pte
) &= ~(__DIRTY_BITS
); return pte
; }
275 extern inline pte_t
pte_mkold(pte_t pte
) { pte_val(pte
) &= ~(__ACCESS_BITS
); return pte
; }
276 extern inline pte_t
pte_mkwrite(pte_t pte
) { pte_val(pte
) &= ~_PAGE_FOW
; return pte
; }
277 extern inline pte_t
pte_mkdirty(pte_t pte
) { pte_val(pte
) |= __DIRTY_BITS
; return pte
; }
278 extern inline pte_t
pte_mkyoung(pte_t pte
) { pte_val(pte
) |= __ACCESS_BITS
; return pte
; }
279 extern inline pte_t
pte_mkspecial(pte_t pte
) { return pte
; }
281 #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
283 /* to find an entry in a kernel page-table-directory */
284 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
286 /* to find an entry in a page-table-directory. */
287 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
288 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
291 * The smp_read_barrier_depends() in the following functions are required to
292 * order the load of *dir (the pointer in the top level page table) with any
293 * subsequent load of the returned pmd_t *ret (ret is data dependent on *dir).
295 * If this ordering is not enforced, the CPU might load an older value of
296 * *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for
299 * Note that we never change the mm->pgd pointer after the task is running, so
300 * pgd_offset does not require such a barrier.
303 /* Find an entry in the second-level page table.. */
304 extern inline pmd_t
* pmd_offset(pgd_t
* dir
, unsigned long address
)
306 pmd_t
*ret
= (pmd_t
*) pgd_page_vaddr(*dir
) + ((address
>> PMD_SHIFT
) & (PTRS_PER_PAGE
- 1));
307 smp_read_barrier_depends(); /* see above */
311 /* Find an entry in the third-level page table.. */
312 extern inline pte_t
* pte_offset_kernel(pmd_t
* dir
, unsigned long address
)
314 pte_t
*ret
= (pte_t
*) pmd_page_vaddr(*dir
)
315 + ((address
>> PAGE_SHIFT
) & (PTRS_PER_PAGE
- 1));
316 smp_read_barrier_depends(); /* see above */
320 #define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
321 #define pte_unmap(pte) do { } while (0)
323 extern pgd_t swapper_pg_dir
[1024];
326 * The Alpha doesn't have any external MMU info: the kernel page
327 * tables contain all the necessary information.
329 extern inline void update_mmu_cache(struct vm_area_struct
* vma
,
330 unsigned long address
, pte_t
*ptep
)
335 * Non-present pages: high 24 bits are offset, next 8 bits type,
338 extern inline pte_t
mk_swap_pte(unsigned long type
, unsigned long offset
)
339 { pte_t pte
; pte_val(pte
) = (type
<< 32) | (offset
<< 40); return pte
; }
341 #define __swp_type(x) (((x).val >> 32) & 0xff)
342 #define __swp_offset(x) ((x).val >> 40)
343 #define __swp_entry(type, off) ((swp_entry_t) { pte_val(mk_swap_pte((type), (off))) })
344 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
345 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
347 #ifndef CONFIG_DISCONTIGMEM
348 #define kern_addr_valid(addr) (1)
351 #define pte_ERROR(e) \
352 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
353 #define pmd_ERROR(e) \
354 printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
355 #define pgd_ERROR(e) \
356 printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
358 extern void paging_init(void);
360 #include <asm-generic/pgtable.h>
363 * No page table caches to initialise
365 #define pgtable_cache_init() do { } while (0)
367 /* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
368 #define HAVE_ARCH_UNMAPPED_AREA
370 #endif /* _ALPHA_PGTABLE_H */