iwlwifi: mvm: fix version check for GEO_TX_POWER_LIMIT support
[linux/fpc-iii.git] / arch / s390 / mm / pgalloc.c
blob814f26520aa2c2439de4e10ce52bf0476c8f2661
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Page table allocation functions
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/pgalloc.h>
14 #include <asm/gmap.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
18 #ifdef CONFIG_PGSTE
20 static int page_table_allocate_pgste_min = 0;
21 static int page_table_allocate_pgste_max = 1;
22 int page_table_allocate_pgste = 0;
23 EXPORT_SYMBOL(page_table_allocate_pgste);
25 static struct ctl_table page_table_sysctl[] = {
27 .procname = "allocate_pgste",
28 .data = &page_table_allocate_pgste,
29 .maxlen = sizeof(int),
30 .mode = S_IRUGO | S_IWUSR,
31 .proc_handler = proc_dointvec_minmax,
32 .extra1 = &page_table_allocate_pgste_min,
33 .extra2 = &page_table_allocate_pgste_max,
35 { }
38 static struct ctl_table page_table_sysctl_dir[] = {
40 .procname = "vm",
41 .maxlen = 0,
42 .mode = 0555,
43 .child = page_table_sysctl,
45 { }
48 static int __init page_table_register_sysctl(void)
50 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
52 __initcall(page_table_register_sysctl);
54 #endif /* CONFIG_PGSTE */
56 unsigned long *crst_table_alloc(struct mm_struct *mm)
58 struct page *page = alloc_pages(GFP_KERNEL, 2);
60 if (!page)
61 return NULL;
62 arch_set_page_dat(page, 2);
63 return (unsigned long *) page_to_phys(page);
66 void crst_table_free(struct mm_struct *mm, unsigned long *table)
68 free_pages((unsigned long) table, 2);
71 static void __crst_table_upgrade(void *arg)
73 struct mm_struct *mm = arg;
75 if (current->active_mm == mm)
76 set_user_asce(mm);
77 __tlb_flush_local();
80 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
82 unsigned long *table, *pgd;
83 int rc, notify;
85 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
86 VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
87 rc = 0;
88 notify = 0;
89 while (mm->context.asce_limit < end) {
90 table = crst_table_alloc(mm);
91 if (!table) {
92 rc = -ENOMEM;
93 break;
95 spin_lock_bh(&mm->page_table_lock);
96 pgd = (unsigned long *) mm->pgd;
97 if (mm->context.asce_limit == _REGION2_SIZE) {
98 crst_table_init(table, _REGION2_ENTRY_EMPTY);
99 p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
100 mm->pgd = (pgd_t *) table;
101 mm->context.asce_limit = _REGION1_SIZE;
102 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
103 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
104 mm_inc_nr_puds(mm);
105 } else {
106 crst_table_init(table, _REGION1_ENTRY_EMPTY);
107 pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
108 mm->pgd = (pgd_t *) table;
109 mm->context.asce_limit = -PAGE_SIZE;
110 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
111 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
113 notify = 1;
114 spin_unlock_bh(&mm->page_table_lock);
116 if (notify)
117 on_each_cpu(__crst_table_upgrade, mm, 0);
118 return rc;
121 void crst_table_downgrade(struct mm_struct *mm)
123 pgd_t *pgd;
125 /* downgrade should only happen from 3 to 2 levels (compat only) */
126 VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
128 if (current->active_mm == mm) {
129 clear_user_asce();
130 __tlb_flush_mm(mm);
133 pgd = mm->pgd;
134 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
135 mm->context.asce_limit = _REGION3_SIZE;
136 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
137 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
138 crst_table_free(mm, (unsigned long *) pgd);
140 if (current->active_mm == mm)
141 set_user_asce(mm);
144 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
146 unsigned int old, new;
148 do {
149 old = atomic_read(v);
150 new = old ^ bits;
151 } while (atomic_cmpxchg(v, old, new) != old);
152 return new;
155 #ifdef CONFIG_PGSTE
157 struct page *page_table_alloc_pgste(struct mm_struct *mm)
159 struct page *page;
160 u64 *table;
162 page = alloc_page(GFP_KERNEL);
163 if (page) {
164 table = (u64 *)page_to_phys(page);
165 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
166 memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
168 return page;
171 void page_table_free_pgste(struct page *page)
173 __free_page(page);
176 #endif /* CONFIG_PGSTE */
179 * page table entry allocation/free routines.
181 unsigned long *page_table_alloc(struct mm_struct *mm)
183 unsigned long *table;
184 struct page *page;
185 unsigned int mask, bit;
187 /* Try to get a fragment of a 4K page as a 2K page table */
188 if (!mm_alloc_pgste(mm)) {
189 table = NULL;
190 spin_lock_bh(&mm->context.lock);
191 if (!list_empty(&mm->context.pgtable_list)) {
192 page = list_first_entry(&mm->context.pgtable_list,
193 struct page, lru);
194 mask = atomic_read(&page->_refcount) >> 24;
195 mask = (mask | (mask >> 4)) & 3;
196 if (mask != 3) {
197 table = (unsigned long *) page_to_phys(page);
198 bit = mask & 1; /* =1 -> second 2K */
199 if (bit)
200 table += PTRS_PER_PTE;
201 atomic_xor_bits(&page->_refcount,
202 1U << (bit + 24));
203 list_del(&page->lru);
206 spin_unlock_bh(&mm->context.lock);
207 if (table)
208 return table;
210 /* Allocate a fresh page */
211 page = alloc_page(GFP_KERNEL);
212 if (!page)
213 return NULL;
214 if (!pgtable_page_ctor(page)) {
215 __free_page(page);
216 return NULL;
218 arch_set_page_dat(page, 0);
219 /* Initialize page table */
220 table = (unsigned long *) page_to_phys(page);
221 if (mm_alloc_pgste(mm)) {
222 /* Return 4K page table with PGSTEs */
223 atomic_xor_bits(&page->_refcount, 3 << 24);
224 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
225 memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
226 } else {
227 /* Return the first 2K fragment of the page */
228 atomic_xor_bits(&page->_refcount, 1 << 24);
229 memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
230 spin_lock_bh(&mm->context.lock);
231 list_add(&page->lru, &mm->context.pgtable_list);
232 spin_unlock_bh(&mm->context.lock);
234 return table;
237 void page_table_free(struct mm_struct *mm, unsigned long *table)
239 struct page *page;
240 unsigned int bit, mask;
242 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
243 if (!mm_alloc_pgste(mm)) {
244 /* Free 2K page table fragment of a 4K page */
245 bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
246 spin_lock_bh(&mm->context.lock);
247 mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24));
248 mask >>= 24;
249 if (mask & 3)
250 list_add(&page->lru, &mm->context.pgtable_list);
251 else
252 list_del(&page->lru);
253 spin_unlock_bh(&mm->context.lock);
254 if (mask != 0)
255 return;
256 } else {
257 atomic_xor_bits(&page->_refcount, 3U << 24);
260 pgtable_page_dtor(page);
261 __free_page(page);
264 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
265 unsigned long vmaddr)
267 struct mm_struct *mm;
268 struct page *page;
269 unsigned int bit, mask;
271 mm = tlb->mm;
272 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
273 if (mm_alloc_pgste(mm)) {
274 gmap_unlink(mm, table, vmaddr);
275 table = (unsigned long *) (__pa(table) | 3);
276 tlb_remove_table(tlb, table);
277 return;
279 bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
280 spin_lock_bh(&mm->context.lock);
281 mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
282 mask >>= 24;
283 if (mask & 3)
284 list_add_tail(&page->lru, &mm->context.pgtable_list);
285 else
286 list_del(&page->lru);
287 spin_unlock_bh(&mm->context.lock);
288 table = (unsigned long *) (__pa(table) | (1U << bit));
289 tlb_remove_table(tlb, table);
292 static void __tlb_remove_table(void *_table)
294 unsigned int mask = (unsigned long) _table & 3;
295 void *table = (void *)((unsigned long) _table ^ mask);
296 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
298 switch (mask) {
299 case 0: /* pmd, pud, or p4d */
300 free_pages((unsigned long) table, 2);
301 break;
302 case 1: /* lower 2K of a 4K page table */
303 case 2: /* higher 2K of a 4K page table */
304 mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
305 mask >>= 24;
306 if (mask != 0)
307 break;
308 /* fallthrough */
309 case 3: /* 4K page table with pgstes */
310 if (mask & 3)
311 atomic_xor_bits(&page->_refcount, 3 << 24);
312 pgtable_page_dtor(page);
313 __free_page(page);
314 break;
318 static void tlb_remove_table_smp_sync(void *arg)
320 /* Simply deliver the interrupt */
323 static void tlb_remove_table_one(void *table)
326 * This isn't an RCU grace period and hence the page-tables cannot be
327 * assumed to be actually RCU-freed.
329 * It is however sufficient for software page-table walkers that rely
330 * on IRQ disabling. See the comment near struct mmu_table_batch.
332 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
333 __tlb_remove_table(table);
336 static void tlb_remove_table_rcu(struct rcu_head *head)
338 struct mmu_table_batch *batch;
339 int i;
341 batch = container_of(head, struct mmu_table_batch, rcu);
343 for (i = 0; i < batch->nr; i++)
344 __tlb_remove_table(batch->tables[i]);
346 free_page((unsigned long)batch);
349 void tlb_table_flush(struct mmu_gather *tlb)
351 struct mmu_table_batch **batch = &tlb->batch;
353 if (*batch) {
354 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
355 *batch = NULL;
359 void tlb_remove_table(struct mmu_gather *tlb, void *table)
361 struct mmu_table_batch **batch = &tlb->batch;
363 tlb->mm->context.flush_mm = 1;
364 if (*batch == NULL) {
365 *batch = (struct mmu_table_batch *)
366 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
367 if (*batch == NULL) {
368 __tlb_flush_mm_lazy(tlb->mm);
369 tlb_remove_table_one(table);
370 return;
372 (*batch)->nr = 0;
374 (*batch)->tables[(*batch)->nr++] = table;
375 if ((*batch)->nr == MAX_TABLE_BATCH)
376 tlb_flush_mmu(tlb);
380 * Base infrastructure required to generate basic asces, region, segment,
381 * and page tables that do not make use of enhanced features like EDAT1.
384 static struct kmem_cache *base_pgt_cache;
386 static unsigned long base_pgt_alloc(void)
388 u64 *table;
390 table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
391 if (table)
392 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
393 return (unsigned long) table;
396 static void base_pgt_free(unsigned long table)
398 kmem_cache_free(base_pgt_cache, (void *) table);
401 static unsigned long base_crst_alloc(unsigned long val)
403 unsigned long table;
405 table = __get_free_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
406 if (table)
407 crst_table_init((unsigned long *)table, val);
408 return table;
411 static void base_crst_free(unsigned long table)
413 free_pages(table, CRST_ALLOC_ORDER);
416 #define BASE_ADDR_END_FUNC(NAME, SIZE) \
417 static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \
418 unsigned long end) \
420 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
422 return (next - 1) < (end - 1) ? next : end; \
425 BASE_ADDR_END_FUNC(page, _PAGE_SIZE)
426 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
427 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
428 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
429 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
431 static inline unsigned long base_lra(unsigned long address)
433 unsigned long real;
435 asm volatile(
436 " lra %0,0(%1)\n"
437 : "=d" (real) : "a" (address) : "cc");
438 return real;
441 static int base_page_walk(unsigned long origin, unsigned long addr,
442 unsigned long end, int alloc)
444 unsigned long *pte, next;
446 if (!alloc)
447 return 0;
448 pte = (unsigned long *) origin;
449 pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
450 do {
451 next = base_page_addr_end(addr, end);
452 *pte = base_lra(addr);
453 } while (pte++, addr = next, addr < end);
454 return 0;
457 static int base_segment_walk(unsigned long origin, unsigned long addr,
458 unsigned long end, int alloc)
460 unsigned long *ste, next, table;
461 int rc;
463 ste = (unsigned long *) origin;
464 ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
465 do {
466 next = base_segment_addr_end(addr, end);
467 if (*ste & _SEGMENT_ENTRY_INVALID) {
468 if (!alloc)
469 continue;
470 table = base_pgt_alloc();
471 if (!table)
472 return -ENOMEM;
473 *ste = table | _SEGMENT_ENTRY;
475 table = *ste & _SEGMENT_ENTRY_ORIGIN;
476 rc = base_page_walk(table, addr, next, alloc);
477 if (rc)
478 return rc;
479 if (!alloc)
480 base_pgt_free(table);
481 cond_resched();
482 } while (ste++, addr = next, addr < end);
483 return 0;
486 static int base_region3_walk(unsigned long origin, unsigned long addr,
487 unsigned long end, int alloc)
489 unsigned long *rtte, next, table;
490 int rc;
492 rtte = (unsigned long *) origin;
493 rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
494 do {
495 next = base_region3_addr_end(addr, end);
496 if (*rtte & _REGION_ENTRY_INVALID) {
497 if (!alloc)
498 continue;
499 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
500 if (!table)
501 return -ENOMEM;
502 *rtte = table | _REGION3_ENTRY;
504 table = *rtte & _REGION_ENTRY_ORIGIN;
505 rc = base_segment_walk(table, addr, next, alloc);
506 if (rc)
507 return rc;
508 if (!alloc)
509 base_crst_free(table);
510 } while (rtte++, addr = next, addr < end);
511 return 0;
514 static int base_region2_walk(unsigned long origin, unsigned long addr,
515 unsigned long end, int alloc)
517 unsigned long *rste, next, table;
518 int rc;
520 rste = (unsigned long *) origin;
521 rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
522 do {
523 next = base_region2_addr_end(addr, end);
524 if (*rste & _REGION_ENTRY_INVALID) {
525 if (!alloc)
526 continue;
527 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
528 if (!table)
529 return -ENOMEM;
530 *rste = table | _REGION2_ENTRY;
532 table = *rste & _REGION_ENTRY_ORIGIN;
533 rc = base_region3_walk(table, addr, next, alloc);
534 if (rc)
535 return rc;
536 if (!alloc)
537 base_crst_free(table);
538 } while (rste++, addr = next, addr < end);
539 return 0;
542 static int base_region1_walk(unsigned long origin, unsigned long addr,
543 unsigned long end, int alloc)
545 unsigned long *rfte, next, table;
546 int rc;
548 rfte = (unsigned long *) origin;
549 rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
550 do {
551 next = base_region1_addr_end(addr, end);
552 if (*rfte & _REGION_ENTRY_INVALID) {
553 if (!alloc)
554 continue;
555 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
556 if (!table)
557 return -ENOMEM;
558 *rfte = table | _REGION1_ENTRY;
560 table = *rfte & _REGION_ENTRY_ORIGIN;
561 rc = base_region2_walk(table, addr, next, alloc);
562 if (rc)
563 return rc;
564 if (!alloc)
565 base_crst_free(table);
566 } while (rfte++, addr = next, addr < end);
567 return 0;
571 * base_asce_free - free asce and tables returned from base_asce_alloc()
572 * @asce: asce to be freed
574 * Frees all region, segment, and page tables that were allocated with a
575 * corresponding base_asce_alloc() call.
577 void base_asce_free(unsigned long asce)
579 unsigned long table = asce & _ASCE_ORIGIN;
581 if (!asce)
582 return;
583 switch (asce & _ASCE_TYPE_MASK) {
584 case _ASCE_TYPE_SEGMENT:
585 base_segment_walk(table, 0, _REGION3_SIZE, 0);
586 break;
587 case _ASCE_TYPE_REGION3:
588 base_region3_walk(table, 0, _REGION2_SIZE, 0);
589 break;
590 case _ASCE_TYPE_REGION2:
591 base_region2_walk(table, 0, _REGION1_SIZE, 0);
592 break;
593 case _ASCE_TYPE_REGION1:
594 base_region1_walk(table, 0, -_PAGE_SIZE, 0);
595 break;
597 base_crst_free(table);
600 static int base_pgt_cache_init(void)
602 static DEFINE_MUTEX(base_pgt_cache_mutex);
603 unsigned long sz = _PAGE_TABLE_SIZE;
605 if (base_pgt_cache)
606 return 0;
607 mutex_lock(&base_pgt_cache_mutex);
608 if (!base_pgt_cache)
609 base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
610 mutex_unlock(&base_pgt_cache_mutex);
611 return base_pgt_cache ? 0 : -ENOMEM;
615 * base_asce_alloc - create kernel mapping without enhanced DAT features
616 * @addr: virtual start address of kernel mapping
617 * @num_pages: number of consecutive pages
619 * Generate an asce, including all required region, segment and page tables,
620 * that can be used to access the virtual kernel mapping. The difference is
621 * that the returned asce does not make use of any enhanced DAT features like
622 * e.g. large pages. This is required for some I/O functions that pass an
623 * asce, like e.g. some service call requests.
625 * Note: the returned asce may NEVER be attached to any cpu. It may only be
626 * used for I/O requests. tlb entries that might result because the
627 * asce was attached to a cpu won't be cleared.
629 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
631 unsigned long asce, table, end;
632 int rc;
634 if (base_pgt_cache_init())
635 return 0;
636 end = addr + num_pages * PAGE_SIZE;
637 if (end <= _REGION3_SIZE) {
638 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
639 if (!table)
640 return 0;
641 rc = base_segment_walk(table, addr, end, 1);
642 asce = table | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
643 } else if (end <= _REGION2_SIZE) {
644 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
645 if (!table)
646 return 0;
647 rc = base_region3_walk(table, addr, end, 1);
648 asce = table | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
649 } else if (end <= _REGION1_SIZE) {
650 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
651 if (!table)
652 return 0;
653 rc = base_region2_walk(table, addr, end, 1);
654 asce = table | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
655 } else {
656 table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
657 if (!table)
658 return 0;
659 rc = base_region1_walk(table, addr, end, 1);
660 asce = table | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
662 if (rc) {
663 base_asce_free(asce);
664 asce = 0;
666 return asce;