1 #ifndef _ASM_POWERPC_PGALLOC_64_H
2 #define _ASM_POWERPC_PGALLOC_64_H
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/slab.h>
11 #include <linux/cpumask.h>
12 #include <linux/percpu.h>
14 struct vmemmap_backing
{
15 struct vmemmap_backing
*list
;
17 unsigned long virt_addr
;
21 * Functions that deal with pagetables that could be at any level of
22 * the table need to be passed an "index_size" so they know how to
23 * handle allocation. For PTE pages (which are linked to a struct
24 * page for now, and drawn from the main get_free_pages() pool), the
25 * allocation size will be (2^index_size * sizeof(pointer)) and
26 * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
28 * The maximum index size needs to be big enough to allow any
29 * pagetable sizes we need, but small enough to fit in the low bits of
30 * any page table pointer. In other words all pagetables, even tiny
31 * ones, must be aligned to allow at least enough low 0 bits to
32 * contain this value. This value is also used as a mask, so it must
33 * be one less than a power of two.
35 #define MAX_PGTABLE_INDEX_SIZE 0xf
37 extern struct kmem_cache
*pgtable_cache
[];
38 #define PGT_CACHE(shift) ({ \
40 pgtable_cache[(shift) - 1]; \
43 static inline pgd_t
*pgd_alloc(struct mm_struct
*mm
)
45 return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE
), GFP_KERNEL
);
48 static inline void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
50 kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE
), pgd
);
53 #ifndef CONFIG_PPC_64K_PAGES
55 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
57 static inline pud_t
*pud_alloc_one(struct mm_struct
*mm
, unsigned long addr
)
59 return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE
),
60 GFP_KERNEL
|__GFP_REPEAT
);
63 static inline void pud_free(struct mm_struct
*mm
, pud_t
*pud
)
65 kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE
), pud
);
68 static inline void pud_populate(struct mm_struct
*mm
, pud_t
*pud
, pmd_t
*pmd
)
70 pud_set(pud
, (unsigned long)pmd
);
73 #define pmd_populate(mm, pmd, pte_page) \
74 pmd_populate_kernel(mm, pmd, page_address(pte_page))
75 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
76 #define pmd_pgtable(pmd) pmd_page(pmd)
78 static inline pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
,
79 unsigned long address
)
81 return (pte_t
*)__get_free_page(GFP_KERNEL
| __GFP_REPEAT
| __GFP_ZERO
);
84 static inline pgtable_t
pte_alloc_one(struct mm_struct
*mm
,
85 unsigned long address
)
90 pte
= pte_alloc_one_kernel(mm
, address
);
93 page
= virt_to_page(pte
);
94 pgtable_page_ctor(page
);
98 static inline void pte_free_kernel(struct mm_struct
*mm
, pte_t
*pte
)
100 free_page((unsigned long)pte
);
103 static inline void pte_free(struct mm_struct
*mm
, pgtable_t ptepage
)
105 pgtable_page_dtor(ptepage
);
106 __free_page(ptepage
);
109 static inline void pgtable_free(void *table
, unsigned index_size
)
112 free_page((unsigned long)table
);
114 BUG_ON(index_size
> MAX_PGTABLE_INDEX_SIZE
);
115 kmem_cache_free(PGT_CACHE(index_size
), table
);
120 static inline void pgtable_free_tlb(struct mmu_gather
*tlb
,
121 void *table
, int shift
)
123 unsigned long pgf
= (unsigned long)table
;
124 BUG_ON(shift
> MAX_PGTABLE_INDEX_SIZE
);
126 tlb_remove_table(tlb
, (void *)pgf
);
129 static inline void __tlb_remove_table(void *_table
)
131 void *table
= (void *)((unsigned long)_table
& ~MAX_PGTABLE_INDEX_SIZE
);
132 unsigned shift
= (unsigned long)_table
& MAX_PGTABLE_INDEX_SIZE
;
134 pgtable_free(table
, shift
);
136 #else /* !CONFIG_SMP */
137 static inline void pgtable_free_tlb(struct mmu_gather
*tlb
,
138 void *table
, int shift
)
140 pgtable_free(table
, shift
);
142 #endif /* CONFIG_SMP */
144 static inline void __pte_free_tlb(struct mmu_gather
*tlb
, pgtable_t table
,
145 unsigned long address
)
147 tlb_flush_pgtable(tlb
, address
);
148 pgtable_page_dtor(table
);
149 pgtable_free_tlb(tlb
, page_address(table
), 0);
152 #else /* if CONFIG_PPC_64K_PAGES */
154 * we support 16 fragments per PTE page.
156 #define PTE_FRAG_NR 16
158 * We use a 2K PTE page fragment and another 2K for storing
159 * real_pte_t hash index
161 #define PTE_FRAG_SIZE_SHIFT 12
162 #define PTE_FRAG_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t))
164 extern pte_t
*page_table_alloc(struct mm_struct
*, unsigned long, int);
165 extern void page_table_free(struct mm_struct
*, unsigned long *, int);
166 extern void pgtable_free_tlb(struct mmu_gather
*tlb
, void *table
, int shift
);
168 extern void __tlb_remove_table(void *_table
);
171 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
173 static inline void pmd_populate_kernel(struct mm_struct
*mm
, pmd_t
*pmd
,
176 pmd_set(pmd
, (unsigned long)pte
);
179 static inline void pmd_populate(struct mm_struct
*mm
, pmd_t
*pmd
,
182 pmd_set(pmd
, (unsigned long)pte_page
);
185 static inline pgtable_t
pmd_pgtable(pmd_t pmd
)
187 return (pgtable_t
)(pmd_val(pmd
) & ~PMD_MASKED_BITS
);
190 static inline pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
,
191 unsigned long address
)
193 return (pte_t
*)page_table_alloc(mm
, address
, 1);
196 static inline pgtable_t
pte_alloc_one(struct mm_struct
*mm
,
197 unsigned long address
)
199 return (pgtable_t
)page_table_alloc(mm
, address
, 0);
202 static inline void pte_free_kernel(struct mm_struct
*mm
, pte_t
*pte
)
204 page_table_free(mm
, (unsigned long *)pte
, 1);
207 static inline void pte_free(struct mm_struct
*mm
, pgtable_t ptepage
)
209 page_table_free(mm
, (unsigned long *)ptepage
, 0);
212 static inline void __pte_free_tlb(struct mmu_gather
*tlb
, pgtable_t table
,
213 unsigned long address
)
215 tlb_flush_pgtable(tlb
, address
);
216 pgtable_free_tlb(tlb
, table
, 0);
218 #endif /* CONFIG_PPC_64K_PAGES */
220 static inline pmd_t
*pmd_alloc_one(struct mm_struct
*mm
, unsigned long addr
)
222 return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX
),
223 GFP_KERNEL
|__GFP_REPEAT
);
226 static inline void pmd_free(struct mm_struct
*mm
, pmd_t
*pmd
)
228 kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX
), pmd
);
231 #define __pmd_free_tlb(tlb, pmd, addr) \
232 pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX)
233 #ifndef CONFIG_PPC_64K_PAGES
234 #define __pud_free_tlb(tlb, pud, addr) \
235 pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
237 #endif /* CONFIG_PPC_64K_PAGES */
239 #define check_pgt_cache() do { } while (0)
241 #endif /* _ASM_POWERPC_PGALLOC_64_H */