1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
3 #define _ASM_POWERPC_NOHASH_PGTABLE_H
5 #if defined(CONFIG_PPC64)
6 #include <asm/nohash/64/pgtable.h>
8 #include <asm/nohash/32/pgtable.h>
11 /* Permission masks used for kernel mappings */
12 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
13 #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
14 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
15 _PAGE_NO_CACHE | _PAGE_GUARDED)
16 #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
17 #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
18 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
21 * Protection used for kernel text. We want the debuggers to be able to
22 * set breakpoints anywhere, so don't write protect the kernel text
23 * on platforms where such control is possible.
25 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
26 defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
27 #define PAGE_KERNEL_TEXT PAGE_KERNEL_X
29 #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
32 /* Make modules code happy. We don't set RO yet */
33 #define PAGE_KERNEL_EXEC PAGE_KERNEL_X
35 /* Advertise special mapping type for AGP */
36 #define PAGE_AGP (PAGE_KERNEL_NC)
41 /* Generic accessors to PTE bits */
43 static inline int pte_write(pte_t pte
)
45 return pte_val(pte
) & _PAGE_RW
;
48 static inline int pte_read(pte_t pte
) { return 1; }
49 static inline int pte_dirty(pte_t pte
) { return pte_val(pte
) & _PAGE_DIRTY
; }
50 static inline int pte_special(pte_t pte
) { return pte_val(pte
) & _PAGE_SPECIAL
; }
51 static inline int pte_none(pte_t pte
) { return (pte_val(pte
) & ~_PTE_NONE_MASK
) == 0; }
52 static inline bool pte_hashpte(pte_t pte
) { return false; }
53 static inline bool pte_ci(pte_t pte
) { return pte_val(pte
) & _PAGE_NO_CACHE
; }
54 static inline bool pte_exec(pte_t pte
) { return pte_val(pte
) & _PAGE_EXEC
; }
56 #ifdef CONFIG_NUMA_BALANCING
58 * These work without NUMA balancing but the kernel does not care. See the
59 * comment in include/asm-generic/pgtable.h . On powerpc, this will only
60 * work for user pages and always return true for kernel pages.
62 static inline int pte_protnone(pte_t pte
)
64 return pte_present(pte
) && !pte_user(pte
);
67 static inline int pmd_protnone(pmd_t pmd
)
69 return pte_protnone(pmd_pte(pmd
));
71 #endif /* CONFIG_NUMA_BALANCING */
73 static inline int pte_present(pte_t pte
)
75 return pte_val(pte
) & _PAGE_PRESENT
;
78 static inline bool pte_hw_valid(pte_t pte
)
80 return pte_val(pte
) & _PAGE_PRESENT
;
84 * Don't just check for any non zero bits in __PAGE_USER, since for book3e
85 * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
86 * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too.
89 static inline bool pte_user(pte_t pte
)
91 return (pte_val(pte
) & _PAGE_USER
) == _PAGE_USER
;
96 * We only find page table entry in the last level
97 * Hence no need for other accessors
99 #define pte_access_permitted pte_access_permitted
100 static inline bool pte_access_permitted(pte_t pte
, bool write
)
103 * A read-only access is controlled by _PAGE_USER bit.
104 * We have _PAGE_READ set for WRITE and EXECUTE
106 if (!pte_present(pte
) || !pte_user(pte
) || !pte_read(pte
))
109 if (write
&& !pte_write(pte
))
115 /* Conversion functions: convert a page and protection to a page entry,
116 * and a page entry and page directory to the page they refer to.
118 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
121 static inline pte_t
pfn_pte(unsigned long pfn
, pgprot_t pgprot
) {
122 return __pte(((pte_basic_t
)(pfn
) << PTE_RPN_SHIFT
) |
123 pgprot_val(pgprot
)); }
124 static inline unsigned long pte_pfn(pte_t pte
) {
125 return pte_val(pte
) >> PTE_RPN_SHIFT
; }
127 /* Generic modifiers for PTE bits */
128 static inline pte_t
pte_exprotect(pte_t pte
)
130 return __pte(pte_val(pte
) & ~_PAGE_EXEC
);
134 static inline pte_t
pte_mkclean(pte_t pte
)
136 return __pte(pte_val(pte
) & ~_PAGE_DIRTY
);
140 static inline pte_t
pte_mkold(pte_t pte
)
142 return __pte(pte_val(pte
) & ~_PAGE_ACCESSED
);
145 static inline pte_t
pte_mkpte(pte_t pte
)
150 static inline pte_t
pte_mkspecial(pte_t pte
)
152 return __pte(pte_val(pte
) | _PAGE_SPECIAL
);
156 static inline pte_t
pte_mkhuge(pte_t pte
)
158 return __pte(pte_val(pte
));
162 #ifndef pte_mkprivileged
163 static inline pte_t
pte_mkprivileged(pte_t pte
)
165 return __pte(pte_val(pte
) & ~_PAGE_USER
);
170 static inline pte_t
pte_mkuser(pte_t pte
)
172 return __pte(pte_val(pte
) | _PAGE_USER
);
176 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
178 return __pte((pte_val(pte
) & _PAGE_CHG_MASK
) | pgprot_val(newprot
));
181 /* Insert a PTE, top-level function is out of line. It uses an inline
182 * low level function in the respective pgtable-* files
184 extern void set_pte_at(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
,
187 /* This low level function performs the actual PTE insertion
188 * Setting the PTE depends on the MMU type and other factors. It's
189 * an horrible mess that I'm not going to try to clean up now but
190 * I'm keeping it in one place rather than spread around
192 static inline void __set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
193 pte_t
*ptep
, pte_t pte
, int percpu
)
195 /* Second case is 32-bit with 64-bit PTE. In this case, we
196 * can just store as long as we do the two halves in the right order
197 * with a barrier in between.
198 * In the percpu case, we also fallback to the simple update
200 if (IS_ENABLED(CONFIG_PPC32
) && IS_ENABLED(CONFIG_PTE_64BIT
) && !percpu
) {
201 __asm__
__volatile__("\
205 : "=m" (*ptep
), "=m" (*((unsigned char *)ptep
+4))
206 : "r" (pte
) : "memory");
209 /* Anything else just stores the PTE normally. That covers all 64-bit
210 * cases, and 32-bit non-hash with 32-bit PTEs.
212 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
213 ptep
->pte
= ptep
->pte1
= ptep
->pte2
= ptep
->pte3
= pte_val(pte
);
219 * With hardware tablewalk, a sync is needed to ensure that
220 * subsequent accesses see the PTE we just wrote. Unlike userspace
221 * mappings, we can't tolerate spurious faults, so make sure
222 * the new PTE will be seen the first time.
224 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64
) && is_kernel_addr(addr
))
229 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
230 extern int ptep_set_access_flags(struct vm_area_struct
*vma
, unsigned long address
,
231 pte_t
*ptep
, pte_t entry
, int dirty
);
234 * Macro to mark a page protection value as "uncacheable".
237 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
240 #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
241 _PAGE_NO_CACHE | _PAGE_GUARDED))
243 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
246 #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
249 #if _PAGE_WRITETHRU != 0
250 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
251 _PAGE_COHERENT | _PAGE_WRITETHRU))
253 #define pgprot_cached_wthru(prot) pgprot_noncached(prot)
256 #define pgprot_cached_noncoherent(prot) \
257 (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
259 #define pgprot_writecombine pgprot_noncached_wc
262 extern pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
263 unsigned long size
, pgprot_t vma_prot
);
264 #define __HAVE_PHYS_MEM_ACCESS_PROT
266 #ifdef CONFIG_HUGETLB_PAGE
267 static inline int hugepd_ok(hugepd_t hpd
)
269 #ifdef CONFIG_PPC_8xx
270 return ((hpd_val(hpd
) & 0x4) != 0);
272 /* We clear the top bit to indicate hugepd */
273 return (hpd_val(hpd
) && (hpd_val(hpd
) & PD_HUGE
) == 0);
277 static inline int pmd_huge(pmd_t pmd
)
282 static inline int pud_huge(pud_t pud
)
287 static inline int pgd_huge(pgd_t pgd
)
291 #define pgd_huge pgd_huge
293 #define is_hugepd(hpd) (hugepd_ok(hpd))
297 * This gets called at the end of handling a page fault, when
298 * the kernel has put a new PTE into the page table for the process.
299 * We use it to ensure coherency between the i-cache and d-cache
300 * for the page which has just been mapped in.
302 #if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE)
303 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
, pte_t
*ptep
);
306 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
, pte_t
*ptep
) {}
309 #endif /* __ASSEMBLY__ */