fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / include / asm-s390 / pgalloc.h
blobe45d3c9a4b7ee65a99f684a4e24ddbd9d0e17432
1 /*
2 * include/asm-s390/pgalloc.h
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Derived from "include/asm-i386/pgalloc.h"
10 * Copyright (C) 1994 Linus Torvalds
13 #ifndef _S390_PGALLOC_H
14 #define _S390_PGALLOC_H
16 #include <linux/threads.h>
17 #include <linux/gfp.h>
18 #include <linux/mm.h>
20 #define check_pgt_cache() do {} while (0)
23 * Page allocation orders.
25 #ifndef __s390x__
26 # define PTE_ALLOC_ORDER 0
27 # define PMD_ALLOC_ORDER 0
28 # define PGD_ALLOC_ORDER 1
29 #else /* __s390x__ */
30 # define PTE_ALLOC_ORDER 0
31 # define PMD_ALLOC_ORDER 2
32 # define PGD_ALLOC_ORDER 2
33 #endif /* __s390x__ */
36 * Allocate and free page tables. The xxx_kernel() versions are
37 * used to allocate a kernel page table - this turns on ASN bits
38 * if any.
41 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
43 pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
44 int i;
46 if (!pgd)
47 return NULL;
48 if (s390_noexec) {
49 pgd_t *shadow_pgd = (pgd_t *)
50 __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
51 struct page *page = virt_to_page(pgd);
53 if (!shadow_pgd) {
54 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
55 return NULL;
57 page->lru.next = (void *) shadow_pgd;
59 for (i = 0; i < PTRS_PER_PGD; i++)
60 #ifndef __s390x__
61 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
62 #else
63 pgd_clear(pgd + i);
64 #endif
65 return pgd;
68 static inline void pgd_free(pgd_t *pgd)
70 pgd_t *shadow_pgd = get_shadow_pgd(pgd);
72 if (shadow_pgd)
73 free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER);
74 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
77 #ifndef __s390x__
79 * page middle directory allocation/free routines.
80 * We use pmd cache only on s390x, so these are dummy routines. This
81 * code never triggers because the pgd will always be present.
83 #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
84 #define pmd_free(x) do { } while (0)
85 #define __pmd_free_tlb(tlb,x) do { } while (0)
86 #define pgd_populate(mm, pmd, pte) BUG()
87 #define pgd_populate_kernel(mm, pmd, pte) BUG()
88 #else /* __s390x__ */
89 static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
91 pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
92 int i;
94 if (!pmd)
95 return NULL;
96 if (s390_noexec) {
97 pmd_t *shadow_pmd = (pmd_t *)
98 __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
99 struct page *page = virt_to_page(pmd);
101 if (!shadow_pmd) {
102 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
103 return NULL;
105 page->lru.next = (void *) shadow_pmd;
107 for (i=0; i < PTRS_PER_PMD; i++)
108 pmd_clear(pmd + i);
109 return pmd;
112 static inline void pmd_free (pmd_t *pmd)
114 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
116 if (shadow_pmd)
117 free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER);
118 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
121 #define __pmd_free_tlb(tlb,pmd) \
122 do { \
123 tlb_flush_mmu(tlb, 0, 0); \
124 pmd_free(pmd); \
125 } while (0)
127 static inline void
128 pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
130 pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd);
133 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
135 pgd_t *shadow_pgd = get_shadow_pgd(pgd);
136 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
138 if (shadow_pgd && shadow_pmd)
139 pgd_populate_kernel(mm, shadow_pgd, shadow_pmd);
140 pgd_populate_kernel(mm, pgd, pmd);
143 #endif /* __s390x__ */
145 static inline void
146 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
148 #ifndef __s390x__
149 pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte);
150 pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256);
151 pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512);
152 pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768);
153 #else /* __s390x__ */
154 pmd_val(*pmd) = _PMD_ENTRY + __pa(pte);
155 pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256);
156 #endif /* __s390x__ */
159 static inline void
160 pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
162 pte_t *pte = (pte_t *)page_to_phys(page);
163 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
164 pte_t *shadow_pte = get_shadow_pte(pte);
166 pmd_populate_kernel(mm, pmd, pte);
167 if (shadow_pmd && shadow_pte)
168 pmd_populate_kernel(mm, shadow_pmd, shadow_pte);
172 * page table entry allocation/free routines.
174 static inline pte_t *
175 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
177 pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
178 int i;
180 if (!pte)
181 return NULL;
182 if (s390_noexec) {
183 pte_t *shadow_pte = (pte_t *)
184 __get_free_page(GFP_KERNEL|__GFP_REPEAT);
185 struct page *page = virt_to_page(pte);
187 if (!shadow_pte) {
188 free_page((unsigned long) pte);
189 return NULL;
191 page->lru.next = (void *) shadow_pte;
193 for (i=0; i < PTRS_PER_PTE; i++) {
194 pte_clear(mm, vmaddr, pte + i);
195 vmaddr += PAGE_SIZE;
197 return pte;
200 static inline struct page *
201 pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
203 pte_t *pte = pte_alloc_one_kernel(mm, vmaddr);
204 if (pte)
205 return virt_to_page(pte);
206 return NULL;
209 static inline void pte_free_kernel(pte_t *pte)
211 pte_t *shadow_pte = get_shadow_pte(pte);
213 if (shadow_pte)
214 free_page((unsigned long) shadow_pte);
215 free_page((unsigned long) pte);
218 static inline void pte_free(struct page *pte)
220 struct page *shadow_page = get_shadow_page(pte);
222 if (shadow_page)
223 __free_page(shadow_page);
224 __free_page(pte);
227 #define __pte_free_tlb(tlb, pte) \
228 ({ \
229 struct mmu_gather *__tlb = (tlb); \
230 struct page *__pte = (pte); \
231 struct page *shadow_page = get_shadow_page(__pte); \
232 if (shadow_page) \
233 tlb_remove_page(__tlb, shadow_page); \
234 tlb_remove_page(__tlb, __pte); \
237 #endif /* _S390_PGALLOC_H */