1 #ifndef _ASM_POWERPC_PGTABLE_H
2 #define _ASM_POWERPC_PGTABLE_H
6 #include <linux/mmdebug.h>
7 #include <asm/processor.h> /* For TASK_SIZE */
13 #endif /* !__ASSEMBLY__ */
15 #if defined(CONFIG_PPC64)
16 # include <asm/pgtable-ppc64.h>
18 # include <asm/pgtable-ppc32.h>
22 * We save the slot number & secondary bit in the second half of the
23 * PTE page. We use the 8 bytes per each pte entry.
25 #define PTE_PAGE_HIDX_OFFSET (PTRS_PER_PTE * 8)
29 #include <asm/tlbflush.h>
31 /* Generic accessors to PTE bits */
32 static inline int pte_write(pte_t pte
) { return pte_val(pte
) & _PAGE_RW
; }
33 static inline int pte_dirty(pte_t pte
) { return pte_val(pte
) & _PAGE_DIRTY
; }
34 static inline int pte_young(pte_t pte
) { return pte_val(pte
) & _PAGE_ACCESSED
; }
35 static inline int pte_file(pte_t pte
) { return pte_val(pte
) & _PAGE_FILE
; }
36 static inline int pte_special(pte_t pte
) { return pte_val(pte
) & _PAGE_SPECIAL
; }
37 static inline int pte_none(pte_t pte
) { return (pte_val(pte
) & ~_PTE_NONE_MASK
) == 0; }
38 static inline pgprot_t
pte_pgprot(pte_t pte
) { return __pgprot(pte_val(pte
) & PAGE_PROT_BITS
); }
40 #ifdef CONFIG_NUMA_BALANCING
42 static inline int pte_present(pte_t pte
)
44 return pte_val(pte
) & (_PAGE_PRESENT
| _PAGE_NUMA
);
47 #define pte_present_nonuma pte_present_nonuma
48 static inline int pte_present_nonuma(pte_t pte
)
50 return pte_val(pte
) & (_PAGE_PRESENT
);
53 #define pte_numa pte_numa
54 static inline int pte_numa(pte_t pte
)
56 return (pte_val(pte
) &
57 (_PAGE_NUMA
|_PAGE_PRESENT
)) == _PAGE_NUMA
;
60 #define pte_mknonnuma pte_mknonnuma
61 static inline pte_t
pte_mknonnuma(pte_t pte
)
63 pte_val(pte
) &= ~_PAGE_NUMA
;
64 pte_val(pte
) |= _PAGE_PRESENT
| _PAGE_ACCESSED
;
68 #define pte_mknuma pte_mknuma
69 static inline pte_t
pte_mknuma(pte_t pte
)
72 * We should not set _PAGE_NUMA on non present ptes. Also clear the
73 * present bit so that hash_page will return 1 and we collect this
76 if (pte_present(pte
)) {
77 pte_val(pte
) |= _PAGE_NUMA
;
78 pte_val(pte
) &= ~_PAGE_PRESENT
;
84 #define ptep_set_numa ptep_set_numa
85 static inline void ptep_set_numa(struct mm_struct
*mm
, unsigned long addr
,
88 if ((pte_val(*ptep
) & _PAGE_PRESENT
) == 0)
91 pte_update(mm
, addr
, ptep
, _PAGE_PRESENT
, _PAGE_NUMA
, 0);
95 #define pmd_numa pmd_numa
96 static inline int pmd_numa(pmd_t pmd
)
98 return pte_numa(pmd_pte(pmd
));
101 #define pmdp_set_numa pmdp_set_numa
102 static inline void pmdp_set_numa(struct mm_struct
*mm
, unsigned long addr
,
105 if ((pmd_val(*pmdp
) & _PAGE_PRESENT
) == 0)
108 pmd_hugepage_update(mm
, addr
, pmdp
, _PAGE_PRESENT
, _PAGE_NUMA
);
112 #define pmd_mknonnuma pmd_mknonnuma
113 static inline pmd_t
pmd_mknonnuma(pmd_t pmd
)
115 return pte_pmd(pte_mknonnuma(pmd_pte(pmd
)));
118 #define pmd_mknuma pmd_mknuma
119 static inline pmd_t
pmd_mknuma(pmd_t pmd
)
121 return pte_pmd(pte_mknuma(pmd_pte(pmd
)));
126 static inline int pte_present(pte_t pte
)
128 return pte_val(pte
) & _PAGE_PRESENT
;
130 #endif /* CONFIG_NUMA_BALANCING */
132 /* Conversion functions: convert a page and protection to a page entry,
133 * and a page entry and page directory to the page they refer to.
135 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
138 static inline pte_t
pfn_pte(unsigned long pfn
, pgprot_t pgprot
) {
139 return __pte(((pte_basic_t
)(pfn
) << PTE_RPN_SHIFT
) |
140 pgprot_val(pgprot
)); }
141 static inline unsigned long pte_pfn(pte_t pte
) {
142 return pte_val(pte
) >> PTE_RPN_SHIFT
; }
144 /* Keep these as a macros to avoid include dependency mess */
145 #define pte_page(x) pfn_to_page(pte_pfn(x))
146 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
148 /* Generic modifiers for PTE bits */
149 static inline pte_t
pte_wrprotect(pte_t pte
) {
150 pte_val(pte
) &= ~(_PAGE_RW
| _PAGE_HWWRITE
); return pte
; }
151 static inline pte_t
pte_mkclean(pte_t pte
) {
152 pte_val(pte
) &= ~(_PAGE_DIRTY
| _PAGE_HWWRITE
); return pte
; }
153 static inline pte_t
pte_mkold(pte_t pte
) {
154 pte_val(pte
) &= ~_PAGE_ACCESSED
; return pte
; }
155 static inline pte_t
pte_mkwrite(pte_t pte
) {
156 pte_val(pte
) |= _PAGE_RW
; return pte
; }
157 static inline pte_t
pte_mkdirty(pte_t pte
) {
158 pte_val(pte
) |= _PAGE_DIRTY
; return pte
; }
159 static inline pte_t
pte_mkyoung(pte_t pte
) {
160 pte_val(pte
) |= _PAGE_ACCESSED
; return pte
; }
161 static inline pte_t
pte_mkspecial(pte_t pte
) {
162 pte_val(pte
) |= _PAGE_SPECIAL
; return pte
; }
163 static inline pte_t
pte_mkhuge(pte_t pte
) {
165 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
167 pte_val(pte
) = (pte_val(pte
) & _PAGE_CHG_MASK
) | pgprot_val(newprot
);
172 /* Insert a PTE, top-level function is out of line. It uses an inline
173 * low level function in the respective pgtable-* files
175 extern void set_pte_at(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
,
178 /* This low level function performs the actual PTE insertion
179 * Setting the PTE depends on the MMU type and other factors. It's
180 * an horrible mess that I'm not going to try to clean up now but
181 * I'm keeping it in one place rather than spread around
183 static inline void __set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
184 pte_t
*ptep
, pte_t pte
, int percpu
)
186 #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
187 /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
188 * helper pte_update() which does an atomic update. We need to do that
189 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
190 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
191 * the hash bits instead (ie, same as the non-SMP case)
194 *ptep
= __pte((pte_val(*ptep
) & _PAGE_HASHPTE
)
195 | (pte_val(pte
) & ~_PAGE_HASHPTE
));
197 pte_update(ptep
, ~_PAGE_HASHPTE
, pte_val(pte
));
199 #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
200 /* Second case is 32-bit with 64-bit PTE. In this case, we
201 * can just store as long as we do the two halves in the right order
202 * with a barrier in between. This is possible because we take care,
203 * in the hash code, to pre-invalidate if the PTE was already hashed,
204 * which synchronizes us with any concurrent invalidation.
205 * In the percpu case, we also fallback to the simple update preserving
209 *ptep
= __pte((pte_val(*ptep
) & _PAGE_HASHPTE
)
210 | (pte_val(pte
) & ~_PAGE_HASHPTE
));
213 #if _PAGE_HASHPTE != 0
214 if (pte_val(*ptep
) & _PAGE_HASHPTE
)
215 flush_hash_entry(mm
, ptep
, addr
);
217 __asm__
__volatile__("\
221 : "=m" (*ptep
), "=m" (*((unsigned char *)ptep
+4))
222 : "r" (pte
) : "memory");
224 #elif defined(CONFIG_PPC_STD_MMU_32)
225 /* Third case is 32-bit hash table in UP mode, we need to preserve
226 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
227 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
228 * and see we need to keep track that this PTE needs invalidating
230 *ptep
= __pte((pte_val(*ptep
) & _PAGE_HASHPTE
)
231 | (pte_val(pte
) & ~_PAGE_HASHPTE
));
234 /* Anything else just stores the PTE normally. That covers all 64-bit
235 * cases, and 32-bit non-hash with 32-bit PTEs.
242 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
243 extern int ptep_set_access_flags(struct vm_area_struct
*vma
, unsigned long address
,
244 pte_t
*ptep
, pte_t entry
, int dirty
);
247 * Macro to mark a page protection value as "uncacheable".
250 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
253 #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
254 _PAGE_NO_CACHE | _PAGE_GUARDED))
256 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
259 #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
262 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
263 _PAGE_COHERENT | _PAGE_WRITETHRU))
265 #define pgprot_cached_noncoherent(prot) \
266 (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
268 #define pgprot_writecombine pgprot_noncached_wc
271 extern pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
272 unsigned long size
, pgprot_t vma_prot
);
273 #define __HAVE_PHYS_MEM_ACCESS_PROT
276 * ZERO_PAGE is a global shared page that is always zero: used
277 * for zero-mapped memory areas etc..
279 extern unsigned long empty_zero_page
[];
280 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
282 extern pgd_t swapper_pg_dir
[];
284 extern void paging_init(void);
287 * kern_addr_valid is intended to indicate whether an address is a valid
288 * kernel address. Most 32-bit archs define it as always true (like this)
289 * but most 64-bit archs actually perform a test. What should we do here?
291 #define kern_addr_valid(addr) (1)
293 #include <asm-generic/pgtable.h>
297 * This gets called at the end of handling a page fault, when
298 * the kernel has put a new PTE into the page table for the process.
299 * We use it to ensure coherency between the i-cache and d-cache
300 * for the page which has just been mapped in.
301 * On machines which use an MMU hash table, we use this to put a
302 * corresponding HPTE into the hash table ahead of time, instead of
303 * waiting for the inevitable extra hash-table miss exception.
305 extern void update_mmu_cache(struct vm_area_struct
*, unsigned long, pte_t
*);
307 extern int gup_hugepd(hugepd_t
*hugepd
, unsigned pdshift
, unsigned long addr
,
308 unsigned long end
, int write
, struct page
**pages
, int *nr
);
310 extern int gup_hugepte(pte_t
*ptep
, unsigned long sz
, unsigned long addr
,
311 unsigned long end
, int write
, struct page
**pages
, int *nr
);
312 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
313 #define pmd_large(pmd) 0
314 #define has_transparent_hugepage() 0
316 pte_t
*find_linux_pte_or_hugepte(pgd_t
*pgdir
, unsigned long ea
,
319 static inline pte_t
*lookup_linux_ptep(pgd_t
*pgdir
, unsigned long hva
,
320 unsigned long *pte_sizep
)
323 unsigned long ps
= *pte_sizep
;
326 ptep
= find_linux_pte_or_hugepte(pgdir
, hva
, &shift
);
330 *pte_sizep
= 1ul << shift
;
332 *pte_sizep
= PAGE_SIZE
;
339 #endif /* __ASSEMBLY__ */
341 #endif /* __KERNEL__ */
342 #endif /* _ASM_POWERPC_PGTABLE_H */