2 * include/asm-s390/pgalloc.h
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Derived from "include/asm-i386/pgalloc.h"
10 * Copyright (C) 1994 Linus Torvalds
13 #ifndef _S390_PGALLOC_H
14 #define _S390_PGALLOC_H
16 #include <linux/threads.h>
17 #include <linux/gfp.h>
20 #define check_pgt_cache() do {} while (0)
22 unsigned long *crst_table_alloc(struct mm_struct
*, int);
23 void crst_table_free(struct mm_struct
*, unsigned long *);
25 unsigned long *page_table_alloc(struct mm_struct
*);
26 void page_table_free(struct mm_struct
*, unsigned long *);
27 void disable_noexec(struct mm_struct
*, struct task_struct
*);
29 static inline void clear_table(unsigned long *s
, unsigned long val
, size_t n
)
31 typedef struct { char _
[n
]; } addrtype
;
37 " mvc 8(248,%0),0(%0)\n"
39 " mvc 4(252,%0),0(%0)\n"
41 "0: mvc 256(256,%0),0(%0)\n"
44 : "+a" (s
), "+d" (n
), "=m" (*(addrtype
*) s
)
45 : "m" (*(addrtype
*) s
));
48 static inline void crst_table_init(unsigned long *crst
, unsigned long entry
)
50 clear_table(crst
, entry
, sizeof(unsigned long)*2048);
51 crst
= get_shadow_table(crst
);
53 clear_table(crst
, entry
, sizeof(unsigned long)*2048);
58 static inline unsigned long pgd_entry_type(struct mm_struct
*mm
)
60 return _SEGMENT_ENTRY_EMPTY
;
63 #define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
64 #define pud_free(mm, x) do { } while (0)
66 #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
67 #define pmd_free(mm, x) do { } while (0)
69 #define pgd_populate(mm, pgd, pud) BUG()
70 #define pgd_populate_kernel(mm, pgd, pud) BUG()
72 #define pud_populate(mm, pud, pmd) BUG()
73 #define pud_populate_kernel(mm, pud, pmd) BUG()
77 static inline unsigned long pgd_entry_type(struct mm_struct
*mm
)
79 if (mm
->context
.asce_limit
<= (1UL << 31))
80 return _SEGMENT_ENTRY_EMPTY
;
81 if (mm
->context
.asce_limit
<= (1UL << 42))
82 return _REGION3_ENTRY_EMPTY
;
83 return _REGION2_ENTRY_EMPTY
;
86 int crst_table_upgrade(struct mm_struct
*, unsigned long limit
);
87 void crst_table_downgrade(struct mm_struct
*, unsigned long limit
);
89 static inline pud_t
*pud_alloc_one(struct mm_struct
*mm
, unsigned long address
)
91 unsigned long *table
= crst_table_alloc(mm
, mm
->context
.noexec
);
93 crst_table_init(table
, _REGION3_ENTRY_EMPTY
);
94 return (pud_t
*) table
;
96 #define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
98 static inline pmd_t
*pmd_alloc_one(struct mm_struct
*mm
, unsigned long vmaddr
)
100 unsigned long *table
= crst_table_alloc(mm
, mm
->context
.noexec
);
102 crst_table_init(table
, _SEGMENT_ENTRY_EMPTY
);
103 return (pmd_t
*) table
;
105 #define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
107 static inline void pgd_populate_kernel(struct mm_struct
*mm
,
108 pgd_t
*pgd
, pud_t
*pud
)
110 pgd_val(*pgd
) = _REGION2_ENTRY
| __pa(pud
);
113 static inline void pgd_populate(struct mm_struct
*mm
, pgd_t
*pgd
, pud_t
*pud
)
115 pgd_populate_kernel(mm
, pgd
, pud
);
116 if (mm
->context
.noexec
) {
117 pgd
= get_shadow_table(pgd
);
118 pud
= get_shadow_table(pud
);
119 pgd_populate_kernel(mm
, pgd
, pud
);
123 static inline void pud_populate_kernel(struct mm_struct
*mm
,
124 pud_t
*pud
, pmd_t
*pmd
)
126 pud_val(*pud
) = _REGION3_ENTRY
| __pa(pmd
);
129 static inline void pud_populate(struct mm_struct
*mm
, pud_t
*pud
, pmd_t
*pmd
)
131 pud_populate_kernel(mm
, pud
, pmd
);
132 if (mm
->context
.noexec
) {
133 pud
= get_shadow_table(pud
);
134 pmd
= get_shadow_table(pmd
);
135 pud_populate_kernel(mm
, pud
, pmd
);
139 #endif /* __s390x__ */
141 static inline pgd_t
*pgd_alloc(struct mm_struct
*mm
)
143 INIT_LIST_HEAD(&mm
->context
.crst_list
);
144 INIT_LIST_HEAD(&mm
->context
.pgtable_list
);
145 return (pgd_t
*) crst_table_alloc(mm
, s390_noexec
);
147 #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
149 static inline void pmd_populate_kernel(struct mm_struct
*mm
,
150 pmd_t
*pmd
, pte_t
*pte
)
152 pmd_val(*pmd
) = _SEGMENT_ENTRY
+ __pa(pte
);
155 static inline void pmd_populate(struct mm_struct
*mm
,
156 pmd_t
*pmd
, pgtable_t pte
)
158 pmd_populate_kernel(mm
, pmd
, pte
);
159 if (mm
->context
.noexec
) {
160 pmd
= get_shadow_table(pmd
);
161 pmd_populate_kernel(mm
, pmd
, pte
+ PTRS_PER_PTE
);
165 #define pmd_pgtable(pmd) \
166 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
169 * page table entry allocation/free routines.
171 #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
172 #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
174 #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
175 #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
177 #endif /* _S390_PGALLOC_H */