1 // SPDX-License-Identifier: GPL-2.0
3 * Page table allocation functions
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
10 #include <linux/sysctl.h>
11 #include <asm/mmu_context.h>
12 #include <asm/pgalloc.h>
15 #include <asm/tlbflush.h>
19 static int page_table_allocate_pgste_min
= 0;
20 static int page_table_allocate_pgste_max
= 1;
21 int page_table_allocate_pgste
= 0;
22 EXPORT_SYMBOL(page_table_allocate_pgste
);
24 static struct ctl_table page_table_sysctl
[] = {
26 .procname
= "allocate_pgste",
27 .data
= &page_table_allocate_pgste
,
28 .maxlen
= sizeof(int),
29 .mode
= S_IRUGO
| S_IWUSR
,
30 .proc_handler
= proc_dointvec
,
31 .extra1
= &page_table_allocate_pgste_min
,
32 .extra2
= &page_table_allocate_pgste_max
,
37 static struct ctl_table page_table_sysctl_dir
[] = {
42 .child
= page_table_sysctl
,
47 static int __init
page_table_register_sysctl(void)
49 return register_sysctl_table(page_table_sysctl_dir
) ? 0 : -ENOMEM
;
51 __initcall(page_table_register_sysctl
);
53 #endif /* CONFIG_PGSTE */
55 unsigned long *crst_table_alloc(struct mm_struct
*mm
)
57 struct page
*page
= alloc_pages(GFP_KERNEL
, 2);
61 arch_set_page_dat(page
, 2);
62 return (unsigned long *) page_to_phys(page
);
65 void crst_table_free(struct mm_struct
*mm
, unsigned long *table
)
67 free_pages((unsigned long) table
, 2);
70 static void __crst_table_upgrade(void *arg
)
72 struct mm_struct
*mm
= arg
;
74 if (current
->active_mm
== mm
)
79 int crst_table_upgrade(struct mm_struct
*mm
, unsigned long end
)
81 unsigned long *table
, *pgd
;
84 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
85 VM_BUG_ON(mm
->context
.asce_limit
< _REGION2_SIZE
);
88 while (mm
->context
.asce_limit
< end
) {
89 table
= crst_table_alloc(mm
);
94 spin_lock_bh(&mm
->page_table_lock
);
95 pgd
= (unsigned long *) mm
->pgd
;
96 if (mm
->context
.asce_limit
== _REGION2_SIZE
) {
97 crst_table_init(table
, _REGION2_ENTRY_EMPTY
);
98 p4d_populate(mm
, (p4d_t
*) table
, (pud_t
*) pgd
);
99 mm
->pgd
= (pgd_t
*) table
;
100 mm
->context
.asce_limit
= _REGION1_SIZE
;
101 mm
->context
.asce
= __pa(mm
->pgd
) | _ASCE_TABLE_LENGTH
|
102 _ASCE_USER_BITS
| _ASCE_TYPE_REGION2
;
104 crst_table_init(table
, _REGION1_ENTRY_EMPTY
);
105 pgd_populate(mm
, (pgd_t
*) table
, (p4d_t
*) pgd
);
106 mm
->pgd
= (pgd_t
*) table
;
107 mm
->context
.asce_limit
= -PAGE_SIZE
;
108 mm
->context
.asce
= __pa(mm
->pgd
) | _ASCE_TABLE_LENGTH
|
109 _ASCE_USER_BITS
| _ASCE_TYPE_REGION1
;
112 spin_unlock_bh(&mm
->page_table_lock
);
115 on_each_cpu(__crst_table_upgrade
, mm
, 0);
119 void crst_table_downgrade(struct mm_struct
*mm
)
123 /* downgrade should only happen from 3 to 2 levels (compat only) */
124 VM_BUG_ON(mm
->context
.asce_limit
!= _REGION2_SIZE
);
126 if (current
->active_mm
== mm
) {
132 mm
->pgd
= (pgd_t
*) (pgd_val(*pgd
) & _REGION_ENTRY_ORIGIN
);
133 mm
->context
.asce_limit
= _REGION3_SIZE
;
134 mm
->context
.asce
= __pa(mm
->pgd
) | _ASCE_TABLE_LENGTH
|
135 _ASCE_USER_BITS
| _ASCE_TYPE_SEGMENT
;
136 crst_table_free(mm
, (unsigned long *) pgd
);
138 if (current
->active_mm
== mm
)
142 static inline unsigned int atomic_xor_bits(atomic_t
*v
, unsigned int bits
)
144 unsigned int old
, new;
147 old
= atomic_read(v
);
149 } while (atomic_cmpxchg(v
, old
, new) != old
);
155 struct page
*page_table_alloc_pgste(struct mm_struct
*mm
)
160 page
= alloc_page(GFP_KERNEL
);
162 table
= (u64
*)page_to_phys(page
);
163 memset64(table
, _PAGE_INVALID
, PTRS_PER_PTE
);
164 memset64(table
+ PTRS_PER_PTE
, 0, PTRS_PER_PTE
);
169 void page_table_free_pgste(struct page
*page
)
174 #endif /* CONFIG_PGSTE */
177 * page table entry allocation/free routines.
179 unsigned long *page_table_alloc(struct mm_struct
*mm
)
181 unsigned long *table
;
183 unsigned int mask
, bit
;
185 /* Try to get a fragment of a 4K page as a 2K page table */
186 if (!mm_alloc_pgste(mm
)) {
188 spin_lock_bh(&mm
->context
.lock
);
189 if (!list_empty(&mm
->context
.pgtable_list
)) {
190 page
= list_first_entry(&mm
->context
.pgtable_list
,
192 mask
= atomic_read(&page
->_mapcount
);
193 mask
= (mask
| (mask
>> 4)) & 3;
195 table
= (unsigned long *) page_to_phys(page
);
196 bit
= mask
& 1; /* =1 -> second 2K */
198 table
+= PTRS_PER_PTE
;
199 atomic_xor_bits(&page
->_mapcount
, 1U << bit
);
200 list_del(&page
->lru
);
203 spin_unlock_bh(&mm
->context
.lock
);
207 /* Allocate a fresh page */
208 page
= alloc_page(GFP_KERNEL
);
211 if (!pgtable_page_ctor(page
)) {
215 arch_set_page_dat(page
, 0);
216 /* Initialize page table */
217 table
= (unsigned long *) page_to_phys(page
);
218 if (mm_alloc_pgste(mm
)) {
219 /* Return 4K page table with PGSTEs */
220 atomic_set(&page
->_mapcount
, 3);
221 memset64((u64
*)table
, _PAGE_INVALID
, PTRS_PER_PTE
);
222 memset64((u64
*)table
+ PTRS_PER_PTE
, 0, PTRS_PER_PTE
);
224 /* Return the first 2K fragment of the page */
225 atomic_set(&page
->_mapcount
, 1);
226 memset64((u64
*)table
, _PAGE_INVALID
, 2 * PTRS_PER_PTE
);
227 spin_lock_bh(&mm
->context
.lock
);
228 list_add(&page
->lru
, &mm
->context
.pgtable_list
);
229 spin_unlock_bh(&mm
->context
.lock
);
234 void page_table_free(struct mm_struct
*mm
, unsigned long *table
)
237 unsigned int bit
, mask
;
239 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
240 if (!mm_alloc_pgste(mm
)) {
241 /* Free 2K page table fragment of a 4K page */
242 bit
= (__pa(table
) & ~PAGE_MASK
)/(PTRS_PER_PTE
*sizeof(pte_t
));
243 spin_lock_bh(&mm
->context
.lock
);
244 mask
= atomic_xor_bits(&page
->_mapcount
, 1U << bit
);
246 list_add(&page
->lru
, &mm
->context
.pgtable_list
);
248 list_del(&page
->lru
);
249 spin_unlock_bh(&mm
->context
.lock
);
254 pgtable_page_dtor(page
);
255 atomic_set(&page
->_mapcount
, -1);
259 void page_table_free_rcu(struct mmu_gather
*tlb
, unsigned long *table
,
260 unsigned long vmaddr
)
262 struct mm_struct
*mm
;
264 unsigned int bit
, mask
;
267 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
268 if (mm_alloc_pgste(mm
)) {
269 gmap_unlink(mm
, table
, vmaddr
);
270 table
= (unsigned long *) (__pa(table
) | 3);
271 tlb_remove_table(tlb
, table
);
274 bit
= (__pa(table
) & ~PAGE_MASK
) / (PTRS_PER_PTE
*sizeof(pte_t
));
275 spin_lock_bh(&mm
->context
.lock
);
276 mask
= atomic_xor_bits(&page
->_mapcount
, 0x11U
<< bit
);
278 list_add_tail(&page
->lru
, &mm
->context
.pgtable_list
);
280 list_del(&page
->lru
);
281 spin_unlock_bh(&mm
->context
.lock
);
282 table
= (unsigned long *) (__pa(table
) | (1U << bit
));
283 tlb_remove_table(tlb
, table
);
286 static void __tlb_remove_table(void *_table
)
288 unsigned int mask
= (unsigned long) _table
& 3;
289 void *table
= (void *)((unsigned long) _table
^ mask
);
290 struct page
*page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
293 case 0: /* pmd, pud, or p4d */
294 free_pages((unsigned long) table
, 2);
296 case 1: /* lower 2K of a 4K page table */
297 case 2: /* higher 2K of a 4K page table */
298 if (atomic_xor_bits(&page
->_mapcount
, mask
<< 4) != 0)
301 case 3: /* 4K page table with pgstes */
302 pgtable_page_dtor(page
);
303 atomic_set(&page
->_mapcount
, -1);
309 static void tlb_remove_table_smp_sync(void *arg
)
311 /* Simply deliver the interrupt */
314 static void tlb_remove_table_one(void *table
)
317 * This isn't an RCU grace period and hence the page-tables cannot be
318 * assumed to be actually RCU-freed.
320 * It is however sufficient for software page-table walkers that rely
321 * on IRQ disabling. See the comment near struct mmu_table_batch.
323 smp_call_function(tlb_remove_table_smp_sync
, NULL
, 1);
324 __tlb_remove_table(table
);
327 static void tlb_remove_table_rcu(struct rcu_head
*head
)
329 struct mmu_table_batch
*batch
;
332 batch
= container_of(head
, struct mmu_table_batch
, rcu
);
334 for (i
= 0; i
< batch
->nr
; i
++)
335 __tlb_remove_table(batch
->tables
[i
]);
337 free_page((unsigned long)batch
);
340 void tlb_table_flush(struct mmu_gather
*tlb
)
342 struct mmu_table_batch
**batch
= &tlb
->batch
;
345 call_rcu_sched(&(*batch
)->rcu
, tlb_remove_table_rcu
);
350 void tlb_remove_table(struct mmu_gather
*tlb
, void *table
)
352 struct mmu_table_batch
**batch
= &tlb
->batch
;
354 tlb
->mm
->context
.flush_mm
= 1;
355 if (*batch
== NULL
) {
356 *batch
= (struct mmu_table_batch
*)
357 __get_free_page(GFP_NOWAIT
| __GFP_NOWARN
);
358 if (*batch
== NULL
) {
359 __tlb_flush_mm_lazy(tlb
->mm
);
360 tlb_remove_table_one(table
);
365 (*batch
)->tables
[(*batch
)->nr
++] = table
;
366 if ((*batch
)->nr
== MAX_TABLE_BATCH
)