1 #ifndef _ASM_POWERPC_PGALLOC_32_H
2 #define _ASM_POWERPC_PGALLOC_32_H
4 #include <linux/threads.h>
5 #include <linux/slab.h>
8 * Functions that deal with pagetables that could be at any level of
9 * the table need to be passed an "index_size" so they know how to
10 * handle allocation. For PTE pages (which are linked to a struct
11 * page for now, and drawn from the main get_free_pages() pool), the
12 * allocation size will be (2^index_size * sizeof(pointer)) and
13 * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
15 * The maximum index size needs to be big enough to allow any
16 * pagetable sizes we need, but small enough to fit in the low bits of
17 * any page table pointer. In other words all pagetables, even tiny
18 * ones, must be aligned to allow at least enough low 0 bits to
19 * contain this value. This value is also used as a mask, so it must
20 * be one less than a power of two.
22 #define MAX_PGTABLE_INDEX_SIZE 0xf
24 extern void __bad_pte(pmd_t
*pmd
);
26 extern struct kmem_cache
*pgtable_cache
[];
27 #define PGT_CACHE(shift) ({ \
29 pgtable_cache[(shift) - 1]; \
32 static inline pgd_t
*pgd_alloc(struct mm_struct
*mm
)
34 return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE
), GFP_KERNEL
);
37 static inline void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
39 kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE
), pgd
);
43 * We don't have any real pmd's, and this code never triggers because
44 * the pgd will always be present..
46 /* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */
47 #define pmd_free(mm, x) do { } while (0)
48 #define __pmd_free_tlb(tlb,x,a) do { } while (0)
49 /* #define pgd_populate(mm, pmd, pte) BUG() */
53 static inline void pmd_populate_kernel(struct mm_struct
*mm
, pmd_t
*pmdp
,
56 *pmdp
= __pmd(__pa(pte
) | _PMD_PRESENT
);
59 static inline void pmd_populate(struct mm_struct
*mm
, pmd_t
*pmdp
,
62 *pmdp
= __pmd((page_to_pfn(pte_page
) << PAGE_SHIFT
) | _PMD_PRESENT
);
65 #define pmd_pgtable(pmd) pmd_page(pmd)
68 static inline void pmd_populate_kernel(struct mm_struct
*mm
, pmd_t
*pmdp
,
71 *pmdp
= __pmd((unsigned long)pte
| _PMD_PRESENT
);
74 static inline void pmd_populate(struct mm_struct
*mm
, pmd_t
*pmdp
,
77 *pmdp
= __pmd((unsigned long)lowmem_page_address(pte_page
) | _PMD_PRESENT
);
80 #define pmd_pgtable(pmd) pmd_page(pmd)
83 extern pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long addr
);
84 extern pgtable_t
pte_alloc_one(struct mm_struct
*mm
, unsigned long addr
);
86 static inline void pte_free_kernel(struct mm_struct
*mm
, pte_t
*pte
)
88 free_page((unsigned long)pte
);
91 static inline void pte_free(struct mm_struct
*mm
, pgtable_t ptepage
)
93 pgtable_page_dtor(ptepage
);
97 static inline void pgtable_free(void *table
, unsigned index_size
)
100 free_page((unsigned long)table
);
102 BUG_ON(index_size
> MAX_PGTABLE_INDEX_SIZE
);
103 kmem_cache_free(PGT_CACHE(index_size
), table
);
107 #define check_pgt_cache() do { } while (0)
110 static inline void pgtable_free_tlb(struct mmu_gather
*tlb
,
111 void *table
, int shift
)
113 unsigned long pgf
= (unsigned long)table
;
114 BUG_ON(shift
> MAX_PGTABLE_INDEX_SIZE
);
116 tlb_remove_table(tlb
, (void *)pgf
);
119 static inline void __tlb_remove_table(void *_table
)
121 void *table
= (void *)((unsigned long)_table
& ~MAX_PGTABLE_INDEX_SIZE
);
122 unsigned shift
= (unsigned long)_table
& MAX_PGTABLE_INDEX_SIZE
;
124 pgtable_free(table
, shift
);
127 static inline void pgtable_free_tlb(struct mmu_gather
*tlb
,
128 void *table
, int shift
)
130 pgtable_free(table
, shift
);
134 static inline void __pte_free_tlb(struct mmu_gather
*tlb
, pgtable_t table
,
135 unsigned long address
)
137 tlb_flush_pgtable(tlb
, address
);
138 pgtable_page_dtor(table
);
139 pgtable_free_tlb(tlb
, page_address(table
), 0);
141 #endif /* _ASM_POWERPC_PGALLOC_32_H */