1 #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
2 #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
4 #define __ARCH_USE_5LEVEL_HACK
5 #include <asm-generic/pgtable-nopmd.h>
7 #include <asm/book3s/32/hash.h>
9 /* And here we include common definitions */
10 #include <asm/pte-common.h>
12 #define PTE_INDEX_SIZE PTE_SHIFT
13 #define PMD_INDEX_SIZE 0
14 #define PUD_INDEX_SIZE 0
15 #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
17 #define PMD_CACHE_INDEX PMD_INDEX_SIZE
20 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
21 #define PMD_TABLE_SIZE 0
22 #define PUD_TABLE_SIZE 0
23 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
24 #endif /* __ASSEMBLY__ */
26 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
27 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
30 * The normal case is that PTEs are 32-bits and we have a 1-page
31 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
33 * For any >32-bit physical address platform, we can use the following
34 * two level page table layout where the pgdir is 8KB and the MS 13 bits
35 * are an index to the second level table. The combined pgdir/pmd first
36 * level has 2048 entries and the second level has 512 64-bit PTE entries.
39 /* PGDIR_SHIFT determines what a top-level page table entry can map */
40 #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
41 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
42 #define PGDIR_MASK (~(PGDIR_SIZE-1))
44 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
46 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
47 * value (for now) on others, from where we can start layout kernel
48 * virtual space that goes below PKMAP and FIXMAP
51 #define KVIRT_TOP PKMAP_BASE
53 #define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */
57 * ioremap_bot starts at that address. Early ioremaps move down from there,
58 * until mem_init() at which point this becomes the top of the vmalloc
61 #ifdef CONFIG_NOT_COHERENT_CACHE
62 #define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
64 #define IOREMAP_TOP KVIRT_TOP
68 * Just any arbitrary offset to the start of the vmalloc VM area: the
69 * current 16MB value just means that there will be a 64MB "hole" after the
70 * physical memory until the kernel virtual memory starts. That means that
71 * any out-of-bounds memory accesses will hopefully be caught.
72 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
73 * area for the same reason. ;)
75 * We no longer map larger than phys RAM with the BATs so we don't have
76 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
77 * about clashes between our early calls to ioremap() that start growing down
78 * from ioremap_base being run into the VM area allocations (growing upwards
79 * from VMALLOC_START). For this reason we have ioremap_bot to check when
80 * we actually run into our mappings setup in the early boot with the VM
81 * system. This really does become a problem for machines with good amounts
84 #define VMALLOC_OFFSET (0x1000000) /* 16M */
86 #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
88 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
90 #define VMALLOC_END ioremap_bot
93 #include <linux/sched.h>
94 #include <linux/threads.h>
95 #include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
97 extern unsigned long ioremap_bot
;
99 /* Bits to mask out from a PGD to get to the PUD page */
100 #define PGD_MASKED_BITS 0
102 #define pte_ERROR(e) \
103 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
104 (unsigned long long)pte_val(e))
105 #define pgd_ERROR(e) \
106 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
108 * Bits in a linux-style PTE. These match the bits in the
109 * (hardware-defined) PowerPC PTE as closely as possible.
112 #define pte_clear(mm, addr, ptep) \
113 do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
115 #define pmd_none(pmd) (!pmd_val(pmd))
116 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
117 #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
118 static inline void pmd_clear(pmd_t
*pmdp
)
125 * When flushing the tlb entry for a page, we also need to flush the hash
126 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
128 extern int flush_hash_pages(unsigned context
, unsigned long va
,
129 unsigned long pmdval
, int count
);
131 /* Add an HPTE to the hash table */
132 extern void add_hash_page(unsigned context
, unsigned long va
,
133 unsigned long pmdval
);
135 /* Flush an entry from the TLB/hash table */
136 extern void flush_hash_entry(struct mm_struct
*mm
, pte_t
*ptep
,
137 unsigned long address
);
140 * PTE updates. This function is called whenever an existing
141 * valid PTE is updated. This does -not- include set_pte_at()
142 * which nowadays only sets a new PTE.
144 * Depending on the type of MMU, we may need to use atomic updates
145 * and the PTE may be either 32 or 64 bit wide. In the later case,
146 * when using atomic updates, only the low part of the PTE is
147 * accessed atomically.
149 * In addition, on 44x, we also maintain a global flag indicating
150 * that an executable user mapping was modified, which is needed
151 * to properly flush the virtually tagged instruction cache of
152 * those implementations.
154 #ifndef CONFIG_PTE_64BIT
155 static inline unsigned long pte_update(pte_t
*p
,
159 unsigned long old
, tmp
;
161 __asm__
__volatile__("\
168 : "=&r" (old
), "=&r" (tmp
), "=m" (*p
)
169 : "r" (p
), "r" (clr
), "r" (set
), "m" (*p
)
174 #else /* CONFIG_PTE_64BIT */
175 static inline unsigned long long pte_update(pte_t
*p
,
179 unsigned long long old
;
182 __asm__
__volatile__("\
190 : "=&r" (old
), "=&r" (tmp
), "=m" (*p
)
191 : "r" (p
), "r" ((unsigned long)(p
) + 4), "r" (clr
), "r" (set
), "m" (*p
)
196 #endif /* CONFIG_PTE_64BIT */
199 * 2.6 calls this without flushing the TLB entry; this is wrong
200 * for our hash-based implementation, we fix that up here.
202 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
203 static inline int __ptep_test_and_clear_young(unsigned int context
, unsigned long addr
, pte_t
*ptep
)
206 old
= pte_update(ptep
, _PAGE_ACCESSED
, 0);
207 if (old
& _PAGE_HASHPTE
) {
208 unsigned long ptephys
= __pa(ptep
) & PAGE_MASK
;
209 flush_hash_pages(context
, addr
, ptephys
, 1);
211 return (old
& _PAGE_ACCESSED
) != 0;
213 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
214 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
216 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
217 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
220 return __pte(pte_update(ptep
, ~_PAGE_HASHPTE
, 0));
223 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
224 static inline void ptep_set_wrprotect(struct mm_struct
*mm
, unsigned long addr
,
227 pte_update(ptep
, (_PAGE_RW
| _PAGE_HWWRITE
), _PAGE_RO
);
229 static inline void huge_ptep_set_wrprotect(struct mm_struct
*mm
,
230 unsigned long addr
, pte_t
*ptep
)
232 ptep_set_wrprotect(mm
, addr
, ptep
);
236 static inline void __ptep_set_access_flags(struct mm_struct
*mm
,
237 pte_t
*ptep
, pte_t entry
,
238 unsigned long address
)
240 unsigned long set
= pte_val(entry
) &
241 (_PAGE_DIRTY
| _PAGE_ACCESSED
| _PAGE_RW
| _PAGE_EXEC
);
242 unsigned long clr
= ~pte_val(entry
) & _PAGE_RO
;
244 pte_update(ptep
, clr
, set
);
247 #define __HAVE_ARCH_PTE_SAME
248 #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
251 * Note that on Book E processors, the pmd contains the kernel virtual
252 * (lowmem) address of the pte page. The physical address is less useful
253 * because everything runs with translation enabled (even the TLB miss
254 * handler). On everything else the pmd contains the physical address
255 * of the pte page. -- paulus
258 #define pmd_page_vaddr(pmd) \
259 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
260 #define pmd_page(pmd) \
261 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
263 #define pmd_page_vaddr(pmd) \
264 ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
265 #define pmd_page(pmd) \
266 pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
269 /* to find an entry in a kernel page-table-directory */
270 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
272 /* to find an entry in a page-table-directory */
273 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
274 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
276 /* Find an entry in the third-level page table.. */
277 #define pte_index(address) \
278 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
279 #define pte_offset_kernel(dir, addr) \
280 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
281 #define pte_offset_map(dir, addr) \
282 ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
283 #define pte_unmap(pte) kunmap_atomic(pte)
286 * Encode and decode a swap entry.
287 * Note that the bits we use in a PTE for representing a swap entry
288 * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
291 #define __swp_type(entry) ((entry).val & 0x1f)
292 #define __swp_offset(entry) ((entry).val >> 5)
293 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
294 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
295 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
297 extern int get_pteptr(struct mm_struct
*mm
, unsigned long addr
, pte_t
**ptep
,
300 /* Generic accessors to PTE bits */
301 static inline int pte_write(pte_t pte
) { return !!(pte_val(pte
) & _PAGE_RW
);}
302 static inline int pte_dirty(pte_t pte
) { return !!(pte_val(pte
) & _PAGE_DIRTY
); }
303 static inline int pte_young(pte_t pte
) { return !!(pte_val(pte
) & _PAGE_ACCESSED
); }
304 static inline int pte_special(pte_t pte
) { return !!(pte_val(pte
) & _PAGE_SPECIAL
); }
305 static inline int pte_none(pte_t pte
) { return (pte_val(pte
) & ~_PTE_NONE_MASK
) == 0; }
306 static inline pgprot_t
pte_pgprot(pte_t pte
) { return __pgprot(pte_val(pte
) & PAGE_PROT_BITS
); }
308 static inline int pte_present(pte_t pte
)
310 return pte_val(pte
) & _PAGE_PRESENT
;
313 /* Conversion functions: convert a page and protection to a page entry,
314 * and a page entry and page directory to the page they refer to.
316 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
319 static inline pte_t
pfn_pte(unsigned long pfn
, pgprot_t pgprot
)
321 return __pte(((pte_basic_t
)(pfn
) << PTE_RPN_SHIFT
) |
325 static inline unsigned long pte_pfn(pte_t pte
)
327 return pte_val(pte
) >> PTE_RPN_SHIFT
;
330 /* Generic modifiers for PTE bits */
331 static inline pte_t
pte_wrprotect(pte_t pte
)
333 return __pte(pte_val(pte
) & ~_PAGE_RW
);
336 static inline pte_t
pte_mkclean(pte_t pte
)
338 return __pte(pte_val(pte
) & ~_PAGE_DIRTY
);
341 static inline pte_t
pte_mkold(pte_t pte
)
343 return __pte(pte_val(pte
) & ~_PAGE_ACCESSED
);
346 static inline pte_t
pte_mkwrite(pte_t pte
)
348 return __pte(pte_val(pte
) | _PAGE_RW
);
351 static inline pte_t
pte_mkdirty(pte_t pte
)
353 return __pte(pte_val(pte
) | _PAGE_DIRTY
);
356 static inline pte_t
pte_mkyoung(pte_t pte
)
358 return __pte(pte_val(pte
) | _PAGE_ACCESSED
);
361 static inline pte_t
pte_mkspecial(pte_t pte
)
363 return __pte(pte_val(pte
) | _PAGE_SPECIAL
);
366 static inline pte_t
pte_mkhuge(pte_t pte
)
371 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
373 return __pte((pte_val(pte
) & _PAGE_CHG_MASK
) | pgprot_val(newprot
));
378 /* This low level function performs the actual PTE insertion
379 * Setting the PTE depends on the MMU type and other factors. It's
380 * an horrible mess that I'm not going to try to clean up now but
381 * I'm keeping it in one place rather than spread around
383 static inline void __set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
384 pte_t
*ptep
, pte_t pte
, int percpu
)
386 #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
387 /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
388 * helper pte_update() which does an atomic update. We need to do that
389 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
390 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
391 * the hash bits instead (ie, same as the non-SMP case)
394 *ptep
= __pte((pte_val(*ptep
) & _PAGE_HASHPTE
)
395 | (pte_val(pte
) & ~_PAGE_HASHPTE
));
397 pte_update(ptep
, ~_PAGE_HASHPTE
, pte_val(pte
));
399 #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
400 /* Second case is 32-bit with 64-bit PTE. In this case, we
401 * can just store as long as we do the two halves in the right order
402 * with a barrier in between. This is possible because we take care,
403 * in the hash code, to pre-invalidate if the PTE was already hashed,
404 * which synchronizes us with any concurrent invalidation.
405 * In the percpu case, we also fallback to the simple update preserving
409 *ptep
= __pte((pte_val(*ptep
) & _PAGE_HASHPTE
)
410 | (pte_val(pte
) & ~_PAGE_HASHPTE
));
413 if (pte_val(*ptep
) & _PAGE_HASHPTE
)
414 flush_hash_entry(mm
, ptep
, addr
);
415 __asm__
__volatile__("\
419 : "=m" (*ptep
), "=m" (*((unsigned char *)ptep
+4))
420 : "r" (pte
) : "memory");
422 #elif defined(CONFIG_PPC_STD_MMU_32)
423 /* Third case is 32-bit hash table in UP mode, we need to preserve
424 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
425 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
426 * and see we need to keep track that this PTE needs invalidating
428 *ptep
= __pte((pte_val(*ptep
) & _PAGE_HASHPTE
)
429 | (pte_val(pte
) & ~_PAGE_HASHPTE
));
432 #error "Not supported "
437 * Macro to mark a page protection value as "uncacheable".
440 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
443 #define pgprot_noncached pgprot_noncached
444 static inline pgprot_t
pgprot_noncached(pgprot_t prot
)
446 return __pgprot((pgprot_val(prot
) & ~_PAGE_CACHE_CTL
) |
447 _PAGE_NO_CACHE
| _PAGE_GUARDED
);
450 #define pgprot_noncached_wc pgprot_noncached_wc
451 static inline pgprot_t
pgprot_noncached_wc(pgprot_t prot
)
453 return __pgprot((pgprot_val(prot
) & ~_PAGE_CACHE_CTL
) |
457 #define pgprot_cached pgprot_cached
458 static inline pgprot_t
pgprot_cached(pgprot_t prot
)
460 return __pgprot((pgprot_val(prot
) & ~_PAGE_CACHE_CTL
) |
464 #define pgprot_cached_wthru pgprot_cached_wthru
465 static inline pgprot_t
pgprot_cached_wthru(pgprot_t prot
)
467 return __pgprot((pgprot_val(prot
) & ~_PAGE_CACHE_CTL
) |
468 _PAGE_COHERENT
| _PAGE_WRITETHRU
);
471 #define pgprot_cached_noncoherent pgprot_cached_noncoherent
472 static inline pgprot_t
pgprot_cached_noncoherent(pgprot_t prot
)
474 return __pgprot(pgprot_val(prot
) & ~_PAGE_CACHE_CTL
);
477 #define pgprot_writecombine pgprot_writecombine
478 static inline pgprot_t
pgprot_writecombine(pgprot_t prot
)
480 return pgprot_noncached_wc(prot
);
483 #endif /* !__ASSEMBLY__ */
485 #endif /* _ASM_POWERPC_BOOK3S_32_PGTABLE_H */