x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / x86 / include / asm / pgalloc.h
blobb6d425999f99de01b7f8a5645894a39e04ac685c
1 #ifndef _ASM_X86_PGALLOC_H
2 #define _ASM_X86_PGALLOC_H
4 #include <linux/threads.h>
5 #include <linux/mm.h> /* for struct page */
6 #include <linux/pagemap.h>
8 static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
10 #ifdef CONFIG_PARAVIRT
11 #include <asm/paravirt.h>
12 #else
13 #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm)
14 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {}
15 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
16 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
17 static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
18 unsigned long start, unsigned long count) {}
19 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {}
20 static inline void paravirt_release_pte(unsigned long pfn) {}
21 static inline void paravirt_release_pmd(unsigned long pfn) {}
22 static inline void paravirt_release_pud(unsigned long pfn) {}
23 #endif
26 * Flags to use when allocating a user page table page.
28 extern gfp_t __userpte_alloc_gfp;
31 * Allocate and free page tables.
33 extern pgd_t *pgd_alloc(struct mm_struct *);
34 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
36 extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
37 extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
39 /* Should really implement gc for free page table pages. This could be
40 done with a reference count in struct page. */
42 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
44 BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
45 free_page((unsigned long)pte);
48 static inline void pte_free(struct mm_struct *mm, struct page *pte)
50 pgtable_page_dtor(pte);
51 __free_page(pte);
54 extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
56 static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
57 unsigned long address)
59 ___pte_free_tlb(tlb, pte);
62 static inline void pmd_populate_kernel(struct mm_struct *mm,
63 pmd_t *pmd, pte_t *pte)
65 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
66 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
69 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
70 struct page *pte)
72 unsigned long pfn = page_to_pfn(pte);
74 paravirt_alloc_pte(mm, pfn);
75 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
78 #define pmd_pgtable(pmd) pmd_page(pmd)
80 #if CONFIG_PGTABLE_LEVELS > 2
81 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
83 struct page *page;
84 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
86 if (mm == &init_mm)
87 gfp &= ~__GFP_ACCOUNT;
88 page = alloc_pages(gfp, 0);
89 if (!page)
90 return NULL;
91 if (!pgtable_pmd_page_ctor(page)) {
92 __free_pages(page, 0);
93 return NULL;
95 return (pmd_t *)page_address(page);
98 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
100 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
101 pgtable_pmd_page_dtor(virt_to_page(pmd));
102 free_page((unsigned long)pmd);
105 extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
107 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
108 unsigned long address)
110 ___pmd_free_tlb(tlb, pmd);
113 #ifdef CONFIG_X86_PAE
114 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
115 #else /* !CONFIG_X86_PAE */
116 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
118 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
119 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
121 #endif /* CONFIG_X86_PAE */
123 #if CONFIG_PGTABLE_LEVELS > 3
124 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
126 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
127 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
130 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
132 gfp_t gfp = GFP_KERNEL_ACCOUNT;
134 if (mm == &init_mm)
135 gfp &= ~__GFP_ACCOUNT;
136 return (pud_t *)get_zeroed_page(gfp);
139 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
141 BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
142 free_page((unsigned long)pud);
145 extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
147 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
148 unsigned long address)
150 ___pud_free_tlb(tlb, pud);
153 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
154 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
156 #endif /* _ASM_X86_PGALLOC_H */