1 // SPDX-License-Identifier: GPL-2.0
3 * Page table allocation functions
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
12 #include <asm/mmu_context.h>
13 #include <asm/pgalloc.h>
16 #include <asm/tlbflush.h>
20 static int page_table_allocate_pgste_min
= 0;
21 static int page_table_allocate_pgste_max
= 1;
22 int page_table_allocate_pgste
= 0;
23 EXPORT_SYMBOL(page_table_allocate_pgste
);
25 static struct ctl_table page_table_sysctl
[] = {
27 .procname
= "allocate_pgste",
28 .data
= &page_table_allocate_pgste
,
29 .maxlen
= sizeof(int),
30 .mode
= S_IRUGO
| S_IWUSR
,
31 .proc_handler
= proc_dointvec_minmax
,
32 .extra1
= &page_table_allocate_pgste_min
,
33 .extra2
= &page_table_allocate_pgste_max
,
38 static struct ctl_table page_table_sysctl_dir
[] = {
43 .child
= page_table_sysctl
,
48 static int __init
page_table_register_sysctl(void)
50 return register_sysctl_table(page_table_sysctl_dir
) ? 0 : -ENOMEM
;
52 __initcall(page_table_register_sysctl
);
54 #endif /* CONFIG_PGSTE */
56 unsigned long *crst_table_alloc(struct mm_struct
*mm
)
58 struct page
*page
= alloc_pages(GFP_KERNEL
, 2);
62 arch_set_page_dat(page
, 2);
63 return (unsigned long *) page_to_phys(page
);
66 void crst_table_free(struct mm_struct
*mm
, unsigned long *table
)
68 free_pages((unsigned long) table
, 2);
71 static void __crst_table_upgrade(void *arg
)
73 struct mm_struct
*mm
= arg
;
75 if (current
->active_mm
== mm
)
80 int crst_table_upgrade(struct mm_struct
*mm
, unsigned long end
)
82 unsigned long *table
, *pgd
;
85 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
86 VM_BUG_ON(mm
->context
.asce_limit
< _REGION2_SIZE
);
89 while (mm
->context
.asce_limit
< end
) {
90 table
= crst_table_alloc(mm
);
95 spin_lock_bh(&mm
->page_table_lock
);
96 pgd
= (unsigned long *) mm
->pgd
;
97 if (mm
->context
.asce_limit
== _REGION2_SIZE
) {
98 crst_table_init(table
, _REGION2_ENTRY_EMPTY
);
99 p4d_populate(mm
, (p4d_t
*) table
, (pud_t
*) pgd
);
100 mm
->pgd
= (pgd_t
*) table
;
101 mm
->context
.asce_limit
= _REGION1_SIZE
;
102 mm
->context
.asce
= __pa(mm
->pgd
) | _ASCE_TABLE_LENGTH
|
103 _ASCE_USER_BITS
| _ASCE_TYPE_REGION2
;
105 crst_table_init(table
, _REGION1_ENTRY_EMPTY
);
106 pgd_populate(mm
, (pgd_t
*) table
, (p4d_t
*) pgd
);
107 mm
->pgd
= (pgd_t
*) table
;
108 mm
->context
.asce_limit
= -PAGE_SIZE
;
109 mm
->context
.asce
= __pa(mm
->pgd
) | _ASCE_TABLE_LENGTH
|
110 _ASCE_USER_BITS
| _ASCE_TYPE_REGION1
;
113 spin_unlock_bh(&mm
->page_table_lock
);
116 on_each_cpu(__crst_table_upgrade
, mm
, 0);
120 void crst_table_downgrade(struct mm_struct
*mm
)
124 /* downgrade should only happen from 3 to 2 levels (compat only) */
125 VM_BUG_ON(mm
->context
.asce_limit
!= _REGION2_SIZE
);
127 if (current
->active_mm
== mm
) {
133 mm
->pgd
= (pgd_t
*) (pgd_val(*pgd
) & _REGION_ENTRY_ORIGIN
);
134 mm
->context
.asce_limit
= _REGION3_SIZE
;
135 mm
->context
.asce
= __pa(mm
->pgd
) | _ASCE_TABLE_LENGTH
|
136 _ASCE_USER_BITS
| _ASCE_TYPE_SEGMENT
;
137 crst_table_free(mm
, (unsigned long *) pgd
);
139 if (current
->active_mm
== mm
)
143 static inline unsigned int atomic_xor_bits(atomic_t
*v
, unsigned int bits
)
145 unsigned int old
, new;
148 old
= atomic_read(v
);
150 } while (atomic_cmpxchg(v
, old
, new) != old
);
156 struct page
*page_table_alloc_pgste(struct mm_struct
*mm
)
161 page
= alloc_page(GFP_KERNEL
);
163 table
= (u64
*)page_to_phys(page
);
164 memset64(table
, _PAGE_INVALID
, PTRS_PER_PTE
);
165 memset64(table
+ PTRS_PER_PTE
, 0, PTRS_PER_PTE
);
170 void page_table_free_pgste(struct page
*page
)
175 #endif /* CONFIG_PGSTE */
178 * page table entry allocation/free routines.
180 unsigned long *page_table_alloc(struct mm_struct
*mm
)
182 unsigned long *table
;
184 unsigned int mask
, bit
;
186 /* Try to get a fragment of a 4K page as a 2K page table */
187 if (!mm_alloc_pgste(mm
)) {
189 spin_lock_bh(&mm
->context
.lock
);
190 if (!list_empty(&mm
->context
.pgtable_list
)) {
191 page
= list_first_entry(&mm
->context
.pgtable_list
,
193 mask
= atomic_read(&page
->_refcount
) >> 24;
194 mask
= (mask
| (mask
>> 4)) & 3;
196 table
= (unsigned long *) page_to_phys(page
);
197 bit
= mask
& 1; /* =1 -> second 2K */
199 table
+= PTRS_PER_PTE
;
200 atomic_xor_bits(&page
->_refcount
,
202 list_del(&page
->lru
);
205 spin_unlock_bh(&mm
->context
.lock
);
209 /* Allocate a fresh page */
210 page
= alloc_page(GFP_KERNEL
);
213 if (!pgtable_page_ctor(page
)) {
217 arch_set_page_dat(page
, 0);
218 /* Initialize page table */
219 table
= (unsigned long *) page_to_phys(page
);
220 if (mm_alloc_pgste(mm
)) {
221 /* Return 4K page table with PGSTEs */
222 atomic_xor_bits(&page
->_refcount
, 3 << 24);
223 memset64((u64
*)table
, _PAGE_INVALID
, PTRS_PER_PTE
);
224 memset64((u64
*)table
+ PTRS_PER_PTE
, 0, PTRS_PER_PTE
);
226 /* Return the first 2K fragment of the page */
227 atomic_xor_bits(&page
->_refcount
, 1 << 24);
228 memset64((u64
*)table
, _PAGE_INVALID
, 2 * PTRS_PER_PTE
);
229 spin_lock_bh(&mm
->context
.lock
);
230 list_add(&page
->lru
, &mm
->context
.pgtable_list
);
231 spin_unlock_bh(&mm
->context
.lock
);
236 void page_table_free(struct mm_struct
*mm
, unsigned long *table
)
239 unsigned int bit
, mask
;
241 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
242 if (!mm_alloc_pgste(mm
)) {
243 /* Free 2K page table fragment of a 4K page */
244 bit
= (__pa(table
) & ~PAGE_MASK
)/(PTRS_PER_PTE
*sizeof(pte_t
));
245 spin_lock_bh(&mm
->context
.lock
);
246 mask
= atomic_xor_bits(&page
->_refcount
, 1U << (bit
+ 24));
249 list_add(&page
->lru
, &mm
->context
.pgtable_list
);
251 list_del(&page
->lru
);
252 spin_unlock_bh(&mm
->context
.lock
);
256 atomic_xor_bits(&page
->_refcount
, 3U << 24);
259 pgtable_page_dtor(page
);
263 void page_table_free_rcu(struct mmu_gather
*tlb
, unsigned long *table
,
264 unsigned long vmaddr
)
266 struct mm_struct
*mm
;
268 unsigned int bit
, mask
;
271 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
272 if (mm_alloc_pgste(mm
)) {
273 gmap_unlink(mm
, table
, vmaddr
);
274 table
= (unsigned long *) (__pa(table
) | 3);
275 tlb_remove_table(tlb
, table
);
278 bit
= (__pa(table
) & ~PAGE_MASK
) / (PTRS_PER_PTE
*sizeof(pte_t
));
279 spin_lock_bh(&mm
->context
.lock
);
280 mask
= atomic_xor_bits(&page
->_refcount
, 0x11U
<< (bit
+ 24));
283 list_add_tail(&page
->lru
, &mm
->context
.pgtable_list
);
285 list_del(&page
->lru
);
286 spin_unlock_bh(&mm
->context
.lock
);
287 table
= (unsigned long *) (__pa(table
) | (1U << bit
));
288 tlb_remove_table(tlb
, table
);
291 static void __tlb_remove_table(void *_table
)
293 unsigned int mask
= (unsigned long) _table
& 3;
294 void *table
= (void *)((unsigned long) _table
^ mask
);
295 struct page
*page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
298 case 0: /* pmd, pud, or p4d */
299 free_pages((unsigned long) table
, 2);
301 case 1: /* lower 2K of a 4K page table */
302 case 2: /* higher 2K of a 4K page table */
303 mask
= atomic_xor_bits(&page
->_refcount
, mask
<< (4 + 24));
308 case 3: /* 4K page table with pgstes */
310 atomic_xor_bits(&page
->_refcount
, 3 << 24);
311 pgtable_page_dtor(page
);
317 static void tlb_remove_table_smp_sync(void *arg
)
319 /* Simply deliver the interrupt */
322 static void tlb_remove_table_one(void *table
)
325 * This isn't an RCU grace period and hence the page-tables cannot be
326 * assumed to be actually RCU-freed.
328 * It is however sufficient for software page-table walkers that rely
329 * on IRQ disabling. See the comment near struct mmu_table_batch.
331 smp_call_function(tlb_remove_table_smp_sync
, NULL
, 1);
332 __tlb_remove_table(table
);
335 static void tlb_remove_table_rcu(struct rcu_head
*head
)
337 struct mmu_table_batch
*batch
;
340 batch
= container_of(head
, struct mmu_table_batch
, rcu
);
342 for (i
= 0; i
< batch
->nr
; i
++)
343 __tlb_remove_table(batch
->tables
[i
]);
345 free_page((unsigned long)batch
);
348 void tlb_table_flush(struct mmu_gather
*tlb
)
350 struct mmu_table_batch
**batch
= &tlb
->batch
;
353 call_rcu_sched(&(*batch
)->rcu
, tlb_remove_table_rcu
);
358 void tlb_remove_table(struct mmu_gather
*tlb
, void *table
)
360 struct mmu_table_batch
**batch
= &tlb
->batch
;
362 tlb
->mm
->context
.flush_mm
= 1;
363 if (*batch
== NULL
) {
364 *batch
= (struct mmu_table_batch
*)
365 __get_free_page(GFP_NOWAIT
| __GFP_NOWARN
);
366 if (*batch
== NULL
) {
367 __tlb_flush_mm_lazy(tlb
->mm
);
368 tlb_remove_table_one(table
);
373 (*batch
)->tables
[(*batch
)->nr
++] = table
;
374 if ((*batch
)->nr
== MAX_TABLE_BATCH
)
379 * Base infrastructure required to generate basic asces, region, segment,
380 * and page tables that do not make use of enhanced features like EDAT1.
383 static struct kmem_cache
*base_pgt_cache
;
385 static unsigned long base_pgt_alloc(void)
389 table
= kmem_cache_alloc(base_pgt_cache
, GFP_KERNEL
);
391 memset64(table
, _PAGE_INVALID
, PTRS_PER_PTE
);
392 return (unsigned long) table
;
395 static void base_pgt_free(unsigned long table
)
397 kmem_cache_free(base_pgt_cache
, (void *) table
);
400 static unsigned long base_crst_alloc(unsigned long val
)
404 table
= __get_free_pages(GFP_KERNEL
, CRST_ALLOC_ORDER
);
406 crst_table_init((unsigned long *)table
, val
);
410 static void base_crst_free(unsigned long table
)
412 free_pages(table
, CRST_ALLOC_ORDER
);
415 #define BASE_ADDR_END_FUNC(NAME, SIZE) \
416 static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \
419 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
421 return (next - 1) < (end - 1) ? next : end; \
424 BASE_ADDR_END_FUNC(page
, _PAGE_SIZE
)
425 BASE_ADDR_END_FUNC(segment
, _SEGMENT_SIZE
)
426 BASE_ADDR_END_FUNC(region3
, _REGION3_SIZE
)
427 BASE_ADDR_END_FUNC(region2
, _REGION2_SIZE
)
428 BASE_ADDR_END_FUNC(region1
, _REGION1_SIZE
)
430 static inline unsigned long base_lra(unsigned long address
)
436 : "=d" (real
) : "a" (address
) : "cc");
440 static int base_page_walk(unsigned long origin
, unsigned long addr
,
441 unsigned long end
, int alloc
)
443 unsigned long *pte
, next
;
447 pte
= (unsigned long *) origin
;
448 pte
+= (addr
& _PAGE_INDEX
) >> _PAGE_SHIFT
;
450 next
= base_page_addr_end(addr
, end
);
451 *pte
= base_lra(addr
);
452 } while (pte
++, addr
= next
, addr
< end
);
456 static int base_segment_walk(unsigned long origin
, unsigned long addr
,
457 unsigned long end
, int alloc
)
459 unsigned long *ste
, next
, table
;
462 ste
= (unsigned long *) origin
;
463 ste
+= (addr
& _SEGMENT_INDEX
) >> _SEGMENT_SHIFT
;
465 next
= base_segment_addr_end(addr
, end
);
466 if (*ste
& _SEGMENT_ENTRY_INVALID
) {
469 table
= base_pgt_alloc();
472 *ste
= table
| _SEGMENT_ENTRY
;
474 table
= *ste
& _SEGMENT_ENTRY_ORIGIN
;
475 rc
= base_page_walk(table
, addr
, next
, alloc
);
479 base_pgt_free(table
);
481 } while (ste
++, addr
= next
, addr
< end
);
485 static int base_region3_walk(unsigned long origin
, unsigned long addr
,
486 unsigned long end
, int alloc
)
488 unsigned long *rtte
, next
, table
;
491 rtte
= (unsigned long *) origin
;
492 rtte
+= (addr
& _REGION3_INDEX
) >> _REGION3_SHIFT
;
494 next
= base_region3_addr_end(addr
, end
);
495 if (*rtte
& _REGION_ENTRY_INVALID
) {
498 table
= base_crst_alloc(_SEGMENT_ENTRY_EMPTY
);
501 *rtte
= table
| _REGION3_ENTRY
;
503 table
= *rtte
& _REGION_ENTRY_ORIGIN
;
504 rc
= base_segment_walk(table
, addr
, next
, alloc
);
508 base_crst_free(table
);
509 } while (rtte
++, addr
= next
, addr
< end
);
513 static int base_region2_walk(unsigned long origin
, unsigned long addr
,
514 unsigned long end
, int alloc
)
516 unsigned long *rste
, next
, table
;
519 rste
= (unsigned long *) origin
;
520 rste
+= (addr
& _REGION2_INDEX
) >> _REGION2_SHIFT
;
522 next
= base_region2_addr_end(addr
, end
);
523 if (*rste
& _REGION_ENTRY_INVALID
) {
526 table
= base_crst_alloc(_REGION3_ENTRY_EMPTY
);
529 *rste
= table
| _REGION2_ENTRY
;
531 table
= *rste
& _REGION_ENTRY_ORIGIN
;
532 rc
= base_region3_walk(table
, addr
, next
, alloc
);
536 base_crst_free(table
);
537 } while (rste
++, addr
= next
, addr
< end
);
541 static int base_region1_walk(unsigned long origin
, unsigned long addr
,
542 unsigned long end
, int alloc
)
544 unsigned long *rfte
, next
, table
;
547 rfte
= (unsigned long *) origin
;
548 rfte
+= (addr
& _REGION1_INDEX
) >> _REGION1_SHIFT
;
550 next
= base_region1_addr_end(addr
, end
);
551 if (*rfte
& _REGION_ENTRY_INVALID
) {
554 table
= base_crst_alloc(_REGION2_ENTRY_EMPTY
);
557 *rfte
= table
| _REGION1_ENTRY
;
559 table
= *rfte
& _REGION_ENTRY_ORIGIN
;
560 rc
= base_region2_walk(table
, addr
, next
, alloc
);
564 base_crst_free(table
);
565 } while (rfte
++, addr
= next
, addr
< end
);
570 * base_asce_free - free asce and tables returned from base_asce_alloc()
571 * @asce: asce to be freed
573 * Frees all region, segment, and page tables that were allocated with a
574 * corresponding base_asce_alloc() call.
576 void base_asce_free(unsigned long asce
)
578 unsigned long table
= asce
& _ASCE_ORIGIN
;
582 switch (asce
& _ASCE_TYPE_MASK
) {
583 case _ASCE_TYPE_SEGMENT
:
584 base_segment_walk(table
, 0, _REGION3_SIZE
, 0);
586 case _ASCE_TYPE_REGION3
:
587 base_region3_walk(table
, 0, _REGION2_SIZE
, 0);
589 case _ASCE_TYPE_REGION2
:
590 base_region2_walk(table
, 0, _REGION1_SIZE
, 0);
592 case _ASCE_TYPE_REGION1
:
593 base_region1_walk(table
, 0, -_PAGE_SIZE
, 0);
596 base_crst_free(table
);
599 static int base_pgt_cache_init(void)
601 static DEFINE_MUTEX(base_pgt_cache_mutex
);
602 unsigned long sz
= _PAGE_TABLE_SIZE
;
606 mutex_lock(&base_pgt_cache_mutex
);
608 base_pgt_cache
= kmem_cache_create("base_pgt", sz
, sz
, 0, NULL
);
609 mutex_unlock(&base_pgt_cache_mutex
);
610 return base_pgt_cache
? 0 : -ENOMEM
;
614 * base_asce_alloc - create kernel mapping without enhanced DAT features
615 * @addr: virtual start address of kernel mapping
616 * @num_pages: number of consecutive pages
618 * Generate an asce, including all required region, segment and page tables,
619 * that can be used to access the virtual kernel mapping. The difference is
620 * that the returned asce does not make use of any enhanced DAT features like
621 * e.g. large pages. This is required for some I/O functions that pass an
622 * asce, like e.g. some service call requests.
624 * Note: the returned asce may NEVER be attached to any cpu. It may only be
625 * used for I/O requests. tlb entries that might result because the
626 * asce was attached to a cpu won't be cleared.
628 unsigned long base_asce_alloc(unsigned long addr
, unsigned long num_pages
)
630 unsigned long asce
, table
, end
;
633 if (base_pgt_cache_init())
635 end
= addr
+ num_pages
* PAGE_SIZE
;
636 if (end
<= _REGION3_SIZE
) {
637 table
= base_crst_alloc(_SEGMENT_ENTRY_EMPTY
);
640 rc
= base_segment_walk(table
, addr
, end
, 1);
641 asce
= table
| _ASCE_TYPE_SEGMENT
| _ASCE_TABLE_LENGTH
;
642 } else if (end
<= _REGION2_SIZE
) {
643 table
= base_crst_alloc(_REGION3_ENTRY_EMPTY
);
646 rc
= base_region3_walk(table
, addr
, end
, 1);
647 asce
= table
| _ASCE_TYPE_REGION3
| _ASCE_TABLE_LENGTH
;
648 } else if (end
<= _REGION1_SIZE
) {
649 table
= base_crst_alloc(_REGION2_ENTRY_EMPTY
);
652 rc
= base_region2_walk(table
, addr
, end
, 1);
653 asce
= table
| _ASCE_TYPE_REGION2
| _ASCE_TABLE_LENGTH
;
655 table
= base_crst_alloc(_REGION1_ENTRY_EMPTY
);
658 rc
= base_region1_walk(table
, addr
, end
, 1);
659 asce
= table
| _ASCE_TYPE_REGION1
| _ASCE_TABLE_LENGTH
;
662 base_asce_free(asce
);