writeback: rework wb_[dec|inc]_stat family of functions
[linux/fpc-iii.git] / arch / s390 / mm / pgalloc.c
blob18918e394ce4b67614f9ac0e70a7a112f73fbc6a
1 /*
2 * Page table allocation functions
4 * Copyright IBM Corp. 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
8 #include <linux/mm.h>
9 #include <linux/sysctl.h>
10 #include <asm/mmu_context.h>
11 #include <asm/pgalloc.h>
12 #include <asm/gmap.h>
13 #include <asm/tlb.h>
14 #include <asm/tlbflush.h>
16 #ifdef CONFIG_PGSTE
18 static int page_table_allocate_pgste_min = 0;
19 static int page_table_allocate_pgste_max = 1;
20 int page_table_allocate_pgste = 0;
21 EXPORT_SYMBOL(page_table_allocate_pgste);
23 static struct ctl_table page_table_sysctl[] = {
25 .procname = "allocate_pgste",
26 .data = &page_table_allocate_pgste,
27 .maxlen = sizeof(int),
28 .mode = S_IRUGO | S_IWUSR,
29 .proc_handler = proc_dointvec,
30 .extra1 = &page_table_allocate_pgste_min,
31 .extra2 = &page_table_allocate_pgste_max,
33 { }
36 static struct ctl_table page_table_sysctl_dir[] = {
38 .procname = "vm",
39 .maxlen = 0,
40 .mode = 0555,
41 .child = page_table_sysctl,
43 { }
46 static int __init page_table_register_sysctl(void)
48 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
50 __initcall(page_table_register_sysctl);
52 #endif /* CONFIG_PGSTE */
54 unsigned long *crst_table_alloc(struct mm_struct *mm)
56 struct page *page = alloc_pages(GFP_KERNEL, 2);
58 if (!page)
59 return NULL;
60 return (unsigned long *) page_to_phys(page);
63 void crst_table_free(struct mm_struct *mm, unsigned long *table)
65 free_pages((unsigned long) table, 2);
68 static void __crst_table_upgrade(void *arg)
70 struct mm_struct *mm = arg;
72 if (current->active_mm == mm) {
73 clear_user_asce();
74 set_user_asce(mm);
76 __tlb_flush_local();
79 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
81 unsigned long *table, *pgd;
82 int rc, notify;
84 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
85 BUG_ON(mm->context.asce_limit < (1UL << 42));
86 if (end >= TASK_SIZE_MAX)
87 return -ENOMEM;
88 rc = 0;
89 notify = 0;
90 while (mm->context.asce_limit < end) {
91 table = crst_table_alloc(mm);
92 if (!table) {
93 rc = -ENOMEM;
94 break;
96 spin_lock_bh(&mm->page_table_lock);
97 pgd = (unsigned long *) mm->pgd;
98 if (mm->context.asce_limit == (1UL << 42)) {
99 crst_table_init(table, _REGION2_ENTRY_EMPTY);
100 p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
101 mm->pgd = (pgd_t *) table;
102 mm->context.asce_limit = 1UL << 53;
103 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
104 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
105 } else {
106 crst_table_init(table, _REGION1_ENTRY_EMPTY);
107 pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
108 mm->pgd = (pgd_t *) table;
109 mm->context.asce_limit = -PAGE_SIZE;
110 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
111 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
113 notify = 1;
114 spin_unlock_bh(&mm->page_table_lock);
116 if (notify)
117 on_each_cpu(__crst_table_upgrade, mm, 0);
118 return rc;
121 void crst_table_downgrade(struct mm_struct *mm)
123 pgd_t *pgd;
125 /* downgrade should only happen from 3 to 2 levels (compat only) */
126 BUG_ON(mm->context.asce_limit != (1UL << 42));
128 if (current->active_mm == mm) {
129 clear_user_asce();
130 __tlb_flush_mm(mm);
133 pgd = mm->pgd;
134 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
135 mm->context.asce_limit = 1UL << 31;
136 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
137 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
138 crst_table_free(mm, (unsigned long *) pgd);
140 if (current->active_mm == mm)
141 set_user_asce(mm);
144 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
146 unsigned int old, new;
148 do {
149 old = atomic_read(v);
150 new = old ^ bits;
151 } while (atomic_cmpxchg(v, old, new) != old);
152 return new;
155 #ifdef CONFIG_PGSTE
157 struct page *page_table_alloc_pgste(struct mm_struct *mm)
159 struct page *page;
160 unsigned long *table;
162 page = alloc_page(GFP_KERNEL);
163 if (page) {
164 table = (unsigned long *) page_to_phys(page);
165 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
166 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
168 return page;
171 void page_table_free_pgste(struct page *page)
173 __free_page(page);
176 #endif /* CONFIG_PGSTE */
179 * page table entry allocation/free routines.
181 unsigned long *page_table_alloc(struct mm_struct *mm)
183 unsigned long *table;
184 struct page *page;
185 unsigned int mask, bit;
187 /* Try to get a fragment of a 4K page as a 2K page table */
188 if (!mm_alloc_pgste(mm)) {
189 table = NULL;
190 spin_lock_bh(&mm->context.pgtable_lock);
191 if (!list_empty(&mm->context.pgtable_list)) {
192 page = list_first_entry(&mm->context.pgtable_list,
193 struct page, lru);
194 mask = atomic_read(&page->_mapcount);
195 mask = (mask | (mask >> 4)) & 3;
196 if (mask != 3) {
197 table = (unsigned long *) page_to_phys(page);
198 bit = mask & 1; /* =1 -> second 2K */
199 if (bit)
200 table += PTRS_PER_PTE;
201 atomic_xor_bits(&page->_mapcount, 1U << bit);
202 list_del(&page->lru);
205 spin_unlock_bh(&mm->context.pgtable_lock);
206 if (table)
207 return table;
209 /* Allocate a fresh page */
210 page = alloc_page(GFP_KERNEL);
211 if (!page)
212 return NULL;
213 if (!pgtable_page_ctor(page)) {
214 __free_page(page);
215 return NULL;
217 /* Initialize page table */
218 table = (unsigned long *) page_to_phys(page);
219 if (mm_alloc_pgste(mm)) {
220 /* Return 4K page table with PGSTEs */
221 atomic_set(&page->_mapcount, 3);
222 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
223 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
224 } else {
225 /* Return the first 2K fragment of the page */
226 atomic_set(&page->_mapcount, 1);
227 clear_table(table, _PAGE_INVALID, PAGE_SIZE);
228 spin_lock_bh(&mm->context.pgtable_lock);
229 list_add(&page->lru, &mm->context.pgtable_list);
230 spin_unlock_bh(&mm->context.pgtable_lock);
232 return table;
235 void page_table_free(struct mm_struct *mm, unsigned long *table)
237 struct page *page;
238 unsigned int bit, mask;
240 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
241 if (!mm_alloc_pgste(mm)) {
242 /* Free 2K page table fragment of a 4K page */
243 bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
244 spin_lock_bh(&mm->context.pgtable_lock);
245 mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
246 if (mask & 3)
247 list_add(&page->lru, &mm->context.pgtable_list);
248 else
249 list_del(&page->lru);
250 spin_unlock_bh(&mm->context.pgtable_lock);
251 if (mask != 0)
252 return;
255 pgtable_page_dtor(page);
256 atomic_set(&page->_mapcount, -1);
257 __free_page(page);
260 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
261 unsigned long vmaddr)
263 struct mm_struct *mm;
264 struct page *page;
265 unsigned int bit, mask;
267 mm = tlb->mm;
268 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
269 if (mm_alloc_pgste(mm)) {
270 gmap_unlink(mm, table, vmaddr);
271 table = (unsigned long *) (__pa(table) | 3);
272 tlb_remove_table(tlb, table);
273 return;
275 bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
276 spin_lock_bh(&mm->context.pgtable_lock);
277 mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
278 if (mask & 3)
279 list_add_tail(&page->lru, &mm->context.pgtable_list);
280 else
281 list_del(&page->lru);
282 spin_unlock_bh(&mm->context.pgtable_lock);
283 table = (unsigned long *) (__pa(table) | (1U << bit));
284 tlb_remove_table(tlb, table);
287 static void __tlb_remove_table(void *_table)
289 unsigned int mask = (unsigned long) _table & 3;
290 void *table = (void *)((unsigned long) _table ^ mask);
291 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
293 switch (mask) {
294 case 0: /* pmd, pud, or p4d */
295 free_pages((unsigned long) table, 2);
296 break;
297 case 1: /* lower 2K of a 4K page table */
298 case 2: /* higher 2K of a 4K page table */
299 if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
300 break;
301 /* fallthrough */
302 case 3: /* 4K page table with pgstes */
303 pgtable_page_dtor(page);
304 atomic_set(&page->_mapcount, -1);
305 __free_page(page);
306 break;
310 static void tlb_remove_table_smp_sync(void *arg)
312 /* Simply deliver the interrupt */
315 static void tlb_remove_table_one(void *table)
318 * This isn't an RCU grace period and hence the page-tables cannot be
319 * assumed to be actually RCU-freed.
321 * It is however sufficient for software page-table walkers that rely
322 * on IRQ disabling. See the comment near struct mmu_table_batch.
324 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
325 __tlb_remove_table(table);
328 static void tlb_remove_table_rcu(struct rcu_head *head)
330 struct mmu_table_batch *batch;
331 int i;
333 batch = container_of(head, struct mmu_table_batch, rcu);
335 for (i = 0; i < batch->nr; i++)
336 __tlb_remove_table(batch->tables[i]);
338 free_page((unsigned long)batch);
341 void tlb_table_flush(struct mmu_gather *tlb)
343 struct mmu_table_batch **batch = &tlb->batch;
345 if (*batch) {
346 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
347 *batch = NULL;
351 void tlb_remove_table(struct mmu_gather *tlb, void *table)
353 struct mmu_table_batch **batch = &tlb->batch;
355 tlb->mm->context.flush_mm = 1;
356 if (*batch == NULL) {
357 *batch = (struct mmu_table_batch *)
358 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
359 if (*batch == NULL) {
360 __tlb_flush_mm_lazy(tlb->mm);
361 tlb_remove_table_one(table);
362 return;
364 (*batch)->nr = 0;
366 (*batch)->tables[(*batch)->nr++] = table;
367 if ((*batch)->nr == MAX_TABLE_BATCH)
368 tlb_flush_mmu(tlb);