x86/amd-iommu: Add function to complete a tlb flush
[linux/fpc-iii.git] / arch / s390 / include / asm / pgalloc.h
blobddad5903341cf28ced23e217855950e9794bb239
1 /*
2 * include/asm-s390/pgalloc.h
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Derived from "include/asm-i386/pgalloc.h"
10 * Copyright (C) 1994 Linus Torvalds
13 #ifndef _S390_PGALLOC_H
14 #define _S390_PGALLOC_H
16 #include <linux/threads.h>
17 #include <linux/gfp.h>
18 #include <linux/mm.h>
20 #define check_pgt_cache() do {} while (0)
22 unsigned long *crst_table_alloc(struct mm_struct *, int);
23 void crst_table_free(struct mm_struct *, unsigned long *);
25 unsigned long *page_table_alloc(struct mm_struct *);
26 void page_table_free(struct mm_struct *, unsigned long *);
27 void disable_noexec(struct mm_struct *, struct task_struct *);
29 static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
31 typedef struct { char _[n]; } addrtype;
33 *s = val;
34 n = (n / 256) - 1;
35 asm volatile(
36 #ifdef CONFIG_64BIT
37 " mvc 8(248,%0),0(%0)\n"
38 #else
39 " mvc 4(252,%0),0(%0)\n"
40 #endif
41 "0: mvc 256(256,%0),0(%0)\n"
42 " la %0,256(%0)\n"
43 " brct %1,0b\n"
44 : "+a" (s), "+d" (n), "=m" (*(addrtype *) s)
45 : "m" (*(addrtype *) s));
48 static inline void crst_table_init(unsigned long *crst, unsigned long entry)
50 clear_table(crst, entry, sizeof(unsigned long)*2048);
51 crst = get_shadow_table(crst);
52 if (crst)
53 clear_table(crst, entry, sizeof(unsigned long)*2048);
56 #ifndef __s390x__
58 static inline unsigned long pgd_entry_type(struct mm_struct *mm)
60 return _SEGMENT_ENTRY_EMPTY;
63 #define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
64 #define pud_free(mm, x) do { } while (0)
66 #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
67 #define pmd_free(mm, x) do { } while (0)
69 #define pgd_populate(mm, pgd, pud) BUG()
70 #define pgd_populate_kernel(mm, pgd, pud) BUG()
72 #define pud_populate(mm, pud, pmd) BUG()
73 #define pud_populate_kernel(mm, pud, pmd) BUG()
75 #else /* __s390x__ */
77 static inline unsigned long pgd_entry_type(struct mm_struct *mm)
79 if (mm->context.asce_limit <= (1UL << 31))
80 return _SEGMENT_ENTRY_EMPTY;
81 if (mm->context.asce_limit <= (1UL << 42))
82 return _REGION3_ENTRY_EMPTY;
83 return _REGION2_ENTRY_EMPTY;
86 int crst_table_upgrade(struct mm_struct *, unsigned long limit);
87 void crst_table_downgrade(struct mm_struct *, unsigned long limit);
89 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
91 unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
92 if (table)
93 crst_table_init(table, _REGION3_ENTRY_EMPTY);
94 return (pud_t *) table;
96 #define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
98 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
100 unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
101 if (table)
102 crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
103 return (pmd_t *) table;
105 #define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
107 static inline void pgd_populate_kernel(struct mm_struct *mm,
108 pgd_t *pgd, pud_t *pud)
110 pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
113 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
115 pgd_populate_kernel(mm, pgd, pud);
116 if (mm->context.noexec) {
117 pgd = get_shadow_table(pgd);
118 pud = get_shadow_table(pud);
119 pgd_populate_kernel(mm, pgd, pud);
123 static inline void pud_populate_kernel(struct mm_struct *mm,
124 pud_t *pud, pmd_t *pmd)
126 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
129 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
131 pud_populate_kernel(mm, pud, pmd);
132 if (mm->context.noexec) {
133 pud = get_shadow_table(pud);
134 pmd = get_shadow_table(pmd);
135 pud_populate_kernel(mm, pud, pmd);
139 #endif /* __s390x__ */
141 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
143 spin_lock_init(&mm->context.list_lock);
144 INIT_LIST_HEAD(&mm->context.crst_list);
145 INIT_LIST_HEAD(&mm->context.pgtable_list);
146 return (pgd_t *) crst_table_alloc(mm, s390_noexec);
148 #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
150 static inline void pmd_populate_kernel(struct mm_struct *mm,
151 pmd_t *pmd, pte_t *pte)
153 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
156 static inline void pmd_populate(struct mm_struct *mm,
157 pmd_t *pmd, pgtable_t pte)
159 pmd_populate_kernel(mm, pmd, pte);
160 if (mm->context.noexec) {
161 pmd = get_shadow_table(pmd);
162 pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE);
166 #define pmd_pgtable(pmd) \
167 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
170 * page table entry allocation/free routines.
172 #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
173 #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
175 #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
176 #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
178 #endif /* _S390_PGALLOC_H */