1 #ifndef _ASM_POWERPC_PGALLOC_64_H
2 #define _ASM_POWERPC_PGALLOC_64_H
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/slab.h>
11 #include <linux/cpumask.h>
12 #include <linux/percpu.h>
14 struct vmemmap_backing
{
15 struct vmemmap_backing
*list
;
17 unsigned long virt_addr
;
21 * Functions that deal with pagetables that could be at any level of
22 * the table need to be passed an "index_size" so they know how to
23 * handle allocation. For PTE pages (which are linked to a struct
24 * page for now, and drawn from the main get_free_pages() pool), the
25 * allocation size will be (2^index_size * sizeof(pointer)) and
26 * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
28 * The maximum index size needs to be big enough to allow any
29 * pagetable sizes we need, but small enough to fit in the low bits of
30 * any page table pointer. In other words all pagetables, even tiny
31 * ones, must be aligned to allow at least enough low 0 bits to
32 * contain this value. This value is also used as a mask, so it must
33 * be one less than a power of two.
35 #define MAX_PGTABLE_INDEX_SIZE 0xf
37 extern struct kmem_cache
*pgtable_cache
[];
38 #define PGT_CACHE(shift) (pgtable_cache[(shift)-1])
40 static inline pgd_t
*pgd_alloc(struct mm_struct
*mm
)
42 return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE
), GFP_KERNEL
);
45 static inline void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
47 kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE
), pgd
);
50 #ifndef CONFIG_PPC_64K_PAGES
52 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
54 static inline pud_t
*pud_alloc_one(struct mm_struct
*mm
, unsigned long addr
)
56 return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE
),
57 GFP_KERNEL
|__GFP_REPEAT
);
60 static inline void pud_free(struct mm_struct
*mm
, pud_t
*pud
)
62 kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE
), pud
);
65 static inline void pud_populate(struct mm_struct
*mm
, pud_t
*pud
, pmd_t
*pmd
)
67 pud_set(pud
, (unsigned long)pmd
);
70 #define pmd_populate(mm, pmd, pte_page) \
71 pmd_populate_kernel(mm, pmd, page_address(pte_page))
72 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
73 #define pmd_pgtable(pmd) pmd_page(pmd)
76 #else /* CONFIG_PPC_64K_PAGES */
78 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
80 static inline void pmd_populate_kernel(struct mm_struct
*mm
, pmd_t
*pmd
,
83 pmd_set(pmd
, (unsigned long)pte
);
86 #define pmd_populate(mm, pmd, pte_page) \
87 pmd_populate_kernel(mm, pmd, page_address(pte_page))
88 #define pmd_pgtable(pmd) pmd_page(pmd)
90 #endif /* CONFIG_PPC_64K_PAGES */
92 static inline pmd_t
*pmd_alloc_one(struct mm_struct
*mm
, unsigned long addr
)
94 return kmem_cache_alloc(PGT_CACHE(PMD_INDEX_SIZE
),
95 GFP_KERNEL
|__GFP_REPEAT
);
98 static inline void pmd_free(struct mm_struct
*mm
, pmd_t
*pmd
)
100 kmem_cache_free(PGT_CACHE(PMD_INDEX_SIZE
), pmd
);
103 static inline pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
,
104 unsigned long address
)
106 return (pte_t
*)__get_free_page(GFP_KERNEL
| __GFP_REPEAT
| __GFP_ZERO
);
109 static inline pgtable_t
pte_alloc_one(struct mm_struct
*mm
,
110 unsigned long address
)
115 pte
= pte_alloc_one_kernel(mm
, address
);
118 page
= virt_to_page(pte
);
119 pgtable_page_ctor(page
);
123 static inline void pgtable_free(void *table
, unsigned index_size
)
126 free_page((unsigned long)table
);
128 BUG_ON(index_size
> MAX_PGTABLE_INDEX_SIZE
);
129 kmem_cache_free(PGT_CACHE(index_size
), table
);
133 #define __pmd_free_tlb(tlb, pmd, addr) \
134 pgtable_free_tlb(tlb, pmd, PMD_INDEX_SIZE)
135 #ifndef CONFIG_PPC_64K_PAGES
136 #define __pud_free_tlb(tlb, pud, addr) \
137 pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
139 #endif /* CONFIG_PPC_64K_PAGES */
141 #define check_pgt_cache() do { } while (0)
143 #endif /* _ASM_POWERPC_PGALLOC_64_H */