1 // SPDX-License-Identifier: GPL-2.0
3 * Page table allocation functions
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
12 #include <asm/mmu_context.h>
13 #include <asm/pgalloc.h>
16 #include <asm/tlbflush.h>
20 static int page_table_allocate_pgste_min
= 0;
21 static int page_table_allocate_pgste_max
= 1;
22 int page_table_allocate_pgste
= 0;
23 EXPORT_SYMBOL(page_table_allocate_pgste
);
25 static struct ctl_table page_table_sysctl
[] = {
27 .procname
= "allocate_pgste",
28 .data
= &page_table_allocate_pgste
,
29 .maxlen
= sizeof(int),
30 .mode
= S_IRUGO
| S_IWUSR
,
31 .proc_handler
= proc_dointvec_minmax
,
32 .extra1
= &page_table_allocate_pgste_min
,
33 .extra2
= &page_table_allocate_pgste_max
,
38 static struct ctl_table page_table_sysctl_dir
[] = {
43 .child
= page_table_sysctl
,
48 static int __init
page_table_register_sysctl(void)
50 return register_sysctl_table(page_table_sysctl_dir
) ? 0 : -ENOMEM
;
52 __initcall(page_table_register_sysctl
);
54 #endif /* CONFIG_PGSTE */
56 unsigned long *crst_table_alloc(struct mm_struct
*mm
)
58 struct page
*page
= alloc_pages(GFP_KERNEL
, 2);
62 arch_set_page_dat(page
, 2);
63 return (unsigned long *) page_to_phys(page
);
66 void crst_table_free(struct mm_struct
*mm
, unsigned long *table
)
68 free_pages((unsigned long) table
, 2);
71 static void __crst_table_upgrade(void *arg
)
73 struct mm_struct
*mm
= arg
;
75 if (current
->active_mm
== mm
)
80 int crst_table_upgrade(struct mm_struct
*mm
, unsigned long end
)
82 unsigned long *table
, *pgd
;
85 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
86 VM_BUG_ON(mm
->context
.asce_limit
< _REGION2_SIZE
);
89 while (mm
->context
.asce_limit
< end
) {
90 table
= crst_table_alloc(mm
);
95 spin_lock_bh(&mm
->page_table_lock
);
96 pgd
= (unsigned long *) mm
->pgd
;
97 if (mm
->context
.asce_limit
== _REGION2_SIZE
) {
98 crst_table_init(table
, _REGION2_ENTRY_EMPTY
);
99 p4d_populate(mm
, (p4d_t
*) table
, (pud_t
*) pgd
);
100 mm
->pgd
= (pgd_t
*) table
;
101 mm
->context
.asce_limit
= _REGION1_SIZE
;
102 mm
->context
.asce
= __pa(mm
->pgd
) | _ASCE_TABLE_LENGTH
|
103 _ASCE_USER_BITS
| _ASCE_TYPE_REGION2
;
106 crst_table_init(table
, _REGION1_ENTRY_EMPTY
);
107 pgd_populate(mm
, (pgd_t
*) table
, (p4d_t
*) pgd
);
108 mm
->pgd
= (pgd_t
*) table
;
109 mm
->context
.asce_limit
= -PAGE_SIZE
;
110 mm
->context
.asce
= __pa(mm
->pgd
) | _ASCE_TABLE_LENGTH
|
111 _ASCE_USER_BITS
| _ASCE_TYPE_REGION1
;
114 spin_unlock_bh(&mm
->page_table_lock
);
117 on_each_cpu(__crst_table_upgrade
, mm
, 0);
121 void crst_table_downgrade(struct mm_struct
*mm
)
125 /* downgrade should only happen from 3 to 2 levels (compat only) */
126 VM_BUG_ON(mm
->context
.asce_limit
!= _REGION2_SIZE
);
128 if (current
->active_mm
== mm
) {
135 mm
->pgd
= (pgd_t
*) (pgd_val(*pgd
) & _REGION_ENTRY_ORIGIN
);
136 mm
->context
.asce_limit
= _REGION3_SIZE
;
137 mm
->context
.asce
= __pa(mm
->pgd
) | _ASCE_TABLE_LENGTH
|
138 _ASCE_USER_BITS
| _ASCE_TYPE_SEGMENT
;
139 crst_table_free(mm
, (unsigned long *) pgd
);
141 if (current
->active_mm
== mm
)
145 static inline unsigned int atomic_xor_bits(atomic_t
*v
, unsigned int bits
)
147 unsigned int old
, new;
150 old
= atomic_read(v
);
152 } while (atomic_cmpxchg(v
, old
, new) != old
);
158 struct page
*page_table_alloc_pgste(struct mm_struct
*mm
)
163 page
= alloc_page(GFP_KERNEL
);
165 table
= (u64
*)page_to_phys(page
);
166 memset64(table
, _PAGE_INVALID
, PTRS_PER_PTE
);
167 memset64(table
+ PTRS_PER_PTE
, 0, PTRS_PER_PTE
);
172 void page_table_free_pgste(struct page
*page
)
177 #endif /* CONFIG_PGSTE */
180 * page table entry allocation/free routines.
182 unsigned long *page_table_alloc(struct mm_struct
*mm
)
184 unsigned long *table
;
186 unsigned int mask
, bit
;
188 /* Try to get a fragment of a 4K page as a 2K page table */
189 if (!mm_alloc_pgste(mm
)) {
191 spin_lock_bh(&mm
->context
.lock
);
192 if (!list_empty(&mm
->context
.pgtable_list
)) {
193 page
= list_first_entry(&mm
->context
.pgtable_list
,
195 mask
= atomic_read(&page
->_refcount
) >> 24;
196 mask
= (mask
| (mask
>> 4)) & 3;
198 table
= (unsigned long *) page_to_phys(page
);
199 bit
= mask
& 1; /* =1 -> second 2K */
201 table
+= PTRS_PER_PTE
;
202 atomic_xor_bits(&page
->_refcount
,
204 list_del(&page
->lru
);
207 spin_unlock_bh(&mm
->context
.lock
);
211 /* Allocate a fresh page */
212 page
= alloc_page(GFP_KERNEL
);
215 if (!pgtable_page_ctor(page
)) {
219 arch_set_page_dat(page
, 0);
220 /* Initialize page table */
221 table
= (unsigned long *) page_to_phys(page
);
222 if (mm_alloc_pgste(mm
)) {
223 /* Return 4K page table with PGSTEs */
224 atomic_xor_bits(&page
->_refcount
, 3 << 24);
225 memset64((u64
*)table
, _PAGE_INVALID
, PTRS_PER_PTE
);
226 memset64((u64
*)table
+ PTRS_PER_PTE
, 0, PTRS_PER_PTE
);
228 /* Return the first 2K fragment of the page */
229 atomic_xor_bits(&page
->_refcount
, 1 << 24);
230 memset64((u64
*)table
, _PAGE_INVALID
, 2 * PTRS_PER_PTE
);
231 spin_lock_bh(&mm
->context
.lock
);
232 list_add(&page
->lru
, &mm
->context
.pgtable_list
);
233 spin_unlock_bh(&mm
->context
.lock
);
238 void page_table_free(struct mm_struct
*mm
, unsigned long *table
)
241 unsigned int bit
, mask
;
243 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
244 if (!mm_alloc_pgste(mm
)) {
245 /* Free 2K page table fragment of a 4K page */
246 bit
= (__pa(table
) & ~PAGE_MASK
)/(PTRS_PER_PTE
*sizeof(pte_t
));
247 spin_lock_bh(&mm
->context
.lock
);
248 mask
= atomic_xor_bits(&page
->_refcount
, 1U << (bit
+ 24));
251 list_add(&page
->lru
, &mm
->context
.pgtable_list
);
253 list_del(&page
->lru
);
254 spin_unlock_bh(&mm
->context
.lock
);
258 atomic_xor_bits(&page
->_refcount
, 3U << 24);
261 pgtable_page_dtor(page
);
265 void page_table_free_rcu(struct mmu_gather
*tlb
, unsigned long *table
,
266 unsigned long vmaddr
)
268 struct mm_struct
*mm
;
270 unsigned int bit
, mask
;
273 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
274 if (mm_alloc_pgste(mm
)) {
275 gmap_unlink(mm
, table
, vmaddr
);
276 table
= (unsigned long *) (__pa(table
) | 3);
277 tlb_remove_table(tlb
, table
);
280 bit
= (__pa(table
) & ~PAGE_MASK
) / (PTRS_PER_PTE
*sizeof(pte_t
));
281 spin_lock_bh(&mm
->context
.lock
);
282 mask
= atomic_xor_bits(&page
->_refcount
, 0x11U
<< (bit
+ 24));
285 list_add_tail(&page
->lru
, &mm
->context
.pgtable_list
);
287 list_del(&page
->lru
);
288 spin_unlock_bh(&mm
->context
.lock
);
289 table
= (unsigned long *) (__pa(table
) | (1U << bit
));
290 tlb_remove_table(tlb
, table
);
293 void __tlb_remove_table(void *_table
)
295 unsigned int mask
= (unsigned long) _table
& 3;
296 void *table
= (void *)((unsigned long) _table
^ mask
);
297 struct page
*page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
300 case 0: /* pmd, pud, or p4d */
301 free_pages((unsigned long) table
, 2);
303 case 1: /* lower 2K of a 4K page table */
304 case 2: /* higher 2K of a 4K page table */
305 mask
= atomic_xor_bits(&page
->_refcount
, mask
<< (4 + 24));
310 case 3: /* 4K page table with pgstes */
312 atomic_xor_bits(&page
->_refcount
, 3 << 24);
313 pgtable_page_dtor(page
);
320 * Base infrastructure required to generate basic asces, region, segment,
321 * and page tables that do not make use of enhanced features like EDAT1.
324 static struct kmem_cache
*base_pgt_cache
;
326 static unsigned long base_pgt_alloc(void)
330 table
= kmem_cache_alloc(base_pgt_cache
, GFP_KERNEL
);
332 memset64(table
, _PAGE_INVALID
, PTRS_PER_PTE
);
333 return (unsigned long) table
;
336 static void base_pgt_free(unsigned long table
)
338 kmem_cache_free(base_pgt_cache
, (void *) table
);
341 static unsigned long base_crst_alloc(unsigned long val
)
345 table
= __get_free_pages(GFP_KERNEL
, CRST_ALLOC_ORDER
);
347 crst_table_init((unsigned long *)table
, val
);
351 static void base_crst_free(unsigned long table
)
353 free_pages(table
, CRST_ALLOC_ORDER
);
356 #define BASE_ADDR_END_FUNC(NAME, SIZE) \
357 static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \
360 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
362 return (next - 1) < (end - 1) ? next : end; \
365 BASE_ADDR_END_FUNC(page
, _PAGE_SIZE
)
366 BASE_ADDR_END_FUNC(segment
, _SEGMENT_SIZE
)
367 BASE_ADDR_END_FUNC(region3
, _REGION3_SIZE
)
368 BASE_ADDR_END_FUNC(region2
, _REGION2_SIZE
)
369 BASE_ADDR_END_FUNC(region1
, _REGION1_SIZE
)
371 static inline unsigned long base_lra(unsigned long address
)
377 : "=d" (real
) : "a" (address
) : "cc");
381 static int base_page_walk(unsigned long origin
, unsigned long addr
,
382 unsigned long end
, int alloc
)
384 unsigned long *pte
, next
;
388 pte
= (unsigned long *) origin
;
389 pte
+= (addr
& _PAGE_INDEX
) >> _PAGE_SHIFT
;
391 next
= base_page_addr_end(addr
, end
);
392 *pte
= base_lra(addr
);
393 } while (pte
++, addr
= next
, addr
< end
);
397 static int base_segment_walk(unsigned long origin
, unsigned long addr
,
398 unsigned long end
, int alloc
)
400 unsigned long *ste
, next
, table
;
403 ste
= (unsigned long *) origin
;
404 ste
+= (addr
& _SEGMENT_INDEX
) >> _SEGMENT_SHIFT
;
406 next
= base_segment_addr_end(addr
, end
);
407 if (*ste
& _SEGMENT_ENTRY_INVALID
) {
410 table
= base_pgt_alloc();
413 *ste
= table
| _SEGMENT_ENTRY
;
415 table
= *ste
& _SEGMENT_ENTRY_ORIGIN
;
416 rc
= base_page_walk(table
, addr
, next
, alloc
);
420 base_pgt_free(table
);
422 } while (ste
++, addr
= next
, addr
< end
);
426 static int base_region3_walk(unsigned long origin
, unsigned long addr
,
427 unsigned long end
, int alloc
)
429 unsigned long *rtte
, next
, table
;
432 rtte
= (unsigned long *) origin
;
433 rtte
+= (addr
& _REGION3_INDEX
) >> _REGION3_SHIFT
;
435 next
= base_region3_addr_end(addr
, end
);
436 if (*rtte
& _REGION_ENTRY_INVALID
) {
439 table
= base_crst_alloc(_SEGMENT_ENTRY_EMPTY
);
442 *rtte
= table
| _REGION3_ENTRY
;
444 table
= *rtte
& _REGION_ENTRY_ORIGIN
;
445 rc
= base_segment_walk(table
, addr
, next
, alloc
);
449 base_crst_free(table
);
450 } while (rtte
++, addr
= next
, addr
< end
);
454 static int base_region2_walk(unsigned long origin
, unsigned long addr
,
455 unsigned long end
, int alloc
)
457 unsigned long *rste
, next
, table
;
460 rste
= (unsigned long *) origin
;
461 rste
+= (addr
& _REGION2_INDEX
) >> _REGION2_SHIFT
;
463 next
= base_region2_addr_end(addr
, end
);
464 if (*rste
& _REGION_ENTRY_INVALID
) {
467 table
= base_crst_alloc(_REGION3_ENTRY_EMPTY
);
470 *rste
= table
| _REGION2_ENTRY
;
472 table
= *rste
& _REGION_ENTRY_ORIGIN
;
473 rc
= base_region3_walk(table
, addr
, next
, alloc
);
477 base_crst_free(table
);
478 } while (rste
++, addr
= next
, addr
< end
);
482 static int base_region1_walk(unsigned long origin
, unsigned long addr
,
483 unsigned long end
, int alloc
)
485 unsigned long *rfte
, next
, table
;
488 rfte
= (unsigned long *) origin
;
489 rfte
+= (addr
& _REGION1_INDEX
) >> _REGION1_SHIFT
;
491 next
= base_region1_addr_end(addr
, end
);
492 if (*rfte
& _REGION_ENTRY_INVALID
) {
495 table
= base_crst_alloc(_REGION2_ENTRY_EMPTY
);
498 *rfte
= table
| _REGION1_ENTRY
;
500 table
= *rfte
& _REGION_ENTRY_ORIGIN
;
501 rc
= base_region2_walk(table
, addr
, next
, alloc
);
505 base_crst_free(table
);
506 } while (rfte
++, addr
= next
, addr
< end
);
511 * base_asce_free - free asce and tables returned from base_asce_alloc()
512 * @asce: asce to be freed
514 * Frees all region, segment, and page tables that were allocated with a
515 * corresponding base_asce_alloc() call.
517 void base_asce_free(unsigned long asce
)
519 unsigned long table
= asce
& _ASCE_ORIGIN
;
523 switch (asce
& _ASCE_TYPE_MASK
) {
524 case _ASCE_TYPE_SEGMENT
:
525 base_segment_walk(table
, 0, _REGION3_SIZE
, 0);
527 case _ASCE_TYPE_REGION3
:
528 base_region3_walk(table
, 0, _REGION2_SIZE
, 0);
530 case _ASCE_TYPE_REGION2
:
531 base_region2_walk(table
, 0, _REGION1_SIZE
, 0);
533 case _ASCE_TYPE_REGION1
:
534 base_region1_walk(table
, 0, -_PAGE_SIZE
, 0);
537 base_crst_free(table
);
540 static int base_pgt_cache_init(void)
542 static DEFINE_MUTEX(base_pgt_cache_mutex
);
543 unsigned long sz
= _PAGE_TABLE_SIZE
;
547 mutex_lock(&base_pgt_cache_mutex
);
549 base_pgt_cache
= kmem_cache_create("base_pgt", sz
, sz
, 0, NULL
);
550 mutex_unlock(&base_pgt_cache_mutex
);
551 return base_pgt_cache
? 0 : -ENOMEM
;
555 * base_asce_alloc - create kernel mapping without enhanced DAT features
556 * @addr: virtual start address of kernel mapping
557 * @num_pages: number of consecutive pages
559 * Generate an asce, including all required region, segment and page tables,
560 * that can be used to access the virtual kernel mapping. The difference is
561 * that the returned asce does not make use of any enhanced DAT features like
562 * e.g. large pages. This is required for some I/O functions that pass an
563 * asce, like e.g. some service call requests.
565 * Note: the returned asce may NEVER be attached to any cpu. It may only be
566 * used for I/O requests. tlb entries that might result because the
567 * asce was attached to a cpu won't be cleared.
569 unsigned long base_asce_alloc(unsigned long addr
, unsigned long num_pages
)
571 unsigned long asce
, table
, end
;
574 if (base_pgt_cache_init())
576 end
= addr
+ num_pages
* PAGE_SIZE
;
577 if (end
<= _REGION3_SIZE
) {
578 table
= base_crst_alloc(_SEGMENT_ENTRY_EMPTY
);
581 rc
= base_segment_walk(table
, addr
, end
, 1);
582 asce
= table
| _ASCE_TYPE_SEGMENT
| _ASCE_TABLE_LENGTH
;
583 } else if (end
<= _REGION2_SIZE
) {
584 table
= base_crst_alloc(_REGION3_ENTRY_EMPTY
);
587 rc
= base_region3_walk(table
, addr
, end
, 1);
588 asce
= table
| _ASCE_TYPE_REGION3
| _ASCE_TABLE_LENGTH
;
589 } else if (end
<= _REGION1_SIZE
) {
590 table
= base_crst_alloc(_REGION2_ENTRY_EMPTY
);
593 rc
= base_region2_walk(table
, addr
, end
, 1);
594 asce
= table
| _ASCE_TYPE_REGION2
| _ASCE_TABLE_LENGTH
;
596 table
= base_crst_alloc(_REGION1_ENTRY_EMPTY
);
599 rc
= base_region1_walk(table
, addr
, end
, 1);
600 asce
= table
| _ASCE_TYPE_REGION1
| _ASCE_TABLE_LENGTH
;
603 base_asce_free(asce
);