x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / powerpc / include / asm / book3s / 64 / pgtable-64k.h
blob2ce4209399edc46edc5779d0cf99570638146aa4
1 #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H
2 #define _ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H
4 #ifndef __ASSEMBLY__
5 #ifdef CONFIG_HUGETLB_PAGE
6 /*
7 * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
8 * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
10 * Defined in such a way that we can optimize away code block at build time
11 * if CONFIG_HUGETLB_PAGE=n.
13 static inline int pmd_huge(pmd_t pmd)
16 * leaf pte for huge page
18 return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
21 static inline int pud_huge(pud_t pud)
24 * leaf pte for huge page
26 return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
29 static inline int pgd_huge(pgd_t pgd)
32 * leaf pte for huge page
34 return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
36 #define pgd_huge pgd_huge
39 * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't
40 * need to setup hugepage directory for them. Our pte and page directory format
41 * enable us to have this enabled.
43 static inline int hugepd_ok(hugepd_t hpd)
45 return 0;
47 #define is_hugepd(pdep) 0
49 #else /* !CONFIG_HUGETLB_PAGE */
50 static inline int pmd_huge(pmd_t pmd) { return 0; }
51 static inline int pud_huge(pud_t pud) { return 0; }
52 #endif /* CONFIG_HUGETLB_PAGE */
54 static inline int remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
55 unsigned long pfn, pgprot_t prot)
57 if (radix_enabled())
58 BUG();
59 return hash__remap_4k_pfn(vma, addr, pfn, prot);
61 #endif /* __ASSEMBLY__ */
62 #endif /*_ASM_POWERPC_BOOK3S_64_PGTABLE_64K_H */