x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / powerpc / include / asm / nohash / 64 / pgtable-64k.h
blob1facb584dd2962faf8ff334b9ca90e2840ee6d1a
1 #ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
2 #define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
4 #define __ARCH_USE_5LEVEL_HACK
5 #include <asm-generic/pgtable-nopud.h>
8 #define PTE_INDEX_SIZE 8
9 #define PMD_INDEX_SIZE 10
10 #define PUD_INDEX_SIZE 0
11 #define PGD_INDEX_SIZE 12
14 * we support 32 fragments per PTE page of 64K size
16 #define PTE_FRAG_NR 32
18 * We use a 2K PTE page fragment and another 2K for storing
19 * real_pte_t hash index
21 #define PTE_FRAG_SIZE_SHIFT 11
22 #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
24 #ifndef __ASSEMBLY__
25 #define PTE_TABLE_SIZE PTE_FRAG_SIZE
26 #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
27 #define PUD_TABLE_SIZE (0)
28 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
29 #endif /* __ASSEMBLY__ */
31 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
32 #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
33 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
35 /* PMD_SHIFT determines what a second-level page table entry can map */
36 #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
37 #define PMD_SIZE (1UL << PMD_SHIFT)
38 #define PMD_MASK (~(PMD_SIZE-1))
40 /* PGDIR_SHIFT determines what a third-level page table entry can map */
41 #define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
42 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
43 #define PGDIR_MASK (~(PGDIR_SIZE-1))
46 * Bits to mask out from a PMD to get to the PTE page
47 * PMDs point to PTE table fragments which are PTE_FRAG_SIZE aligned.
49 #define PMD_MASKED_BITS (PTE_FRAG_SIZE - 1)
50 /* Bits to mask out from a PGD/PUD to get to the PMD page */
51 #define PUD_MASKED_BITS 0x1ff
53 #define pgd_pte(pgd) (pud_pte(((pud_t){ pgd })))
54 #define pte_pgd(pte) ((pgd_t)pte_pud(pte))
56 #endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H */