Linux 4.18.8
[linux/fpc-iii.git] / arch / powerpc / mm / mmu_context_book3s64.c
blob3bb5cec03d1f525d6be56f93bfd6c2f9e879a2da
1 /*
2 * MMU context allocation for 64-bit kernels.
4 * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 #include <linux/mm.h>
19 #include <linux/pkeys.h>
20 #include <linux/spinlock.h>
21 #include <linux/idr.h>
22 #include <linux/export.h>
23 #include <linux/gfp.h>
24 #include <linux/slab.h>
26 #include <asm/mmu_context.h>
27 #include <asm/pgalloc.h>
29 static DEFINE_SPINLOCK(mmu_context_lock);
30 static DEFINE_IDA(mmu_context_ida);
32 static int alloc_context_id(int min_id, int max_id)
34 int index, err;
36 again:
37 if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
38 return -ENOMEM;
40 spin_lock(&mmu_context_lock);
41 err = ida_get_new_above(&mmu_context_ida, min_id, &index);
42 spin_unlock(&mmu_context_lock);
44 if (err == -EAGAIN)
45 goto again;
46 else if (err)
47 return err;
49 if (index > max_id) {
50 spin_lock(&mmu_context_lock);
51 ida_remove(&mmu_context_ida, index);
52 spin_unlock(&mmu_context_lock);
53 return -ENOMEM;
56 return index;
59 void hash__reserve_context_id(int id)
61 int rc, result = 0;
63 do {
64 if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
65 break;
67 spin_lock(&mmu_context_lock);
68 rc = ida_get_new_above(&mmu_context_ida, id, &result);
69 spin_unlock(&mmu_context_lock);
70 } while (rc == -EAGAIN);
72 WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
75 int hash__alloc_context_id(void)
77 unsigned long max;
79 if (mmu_has_feature(MMU_FTR_68_BIT_VA))
80 max = MAX_USER_CONTEXT;
81 else
82 max = MAX_USER_CONTEXT_65BIT_VA;
84 return alloc_context_id(MIN_USER_CONTEXT, max);
86 EXPORT_SYMBOL_GPL(hash__alloc_context_id);
88 static int hash__init_new_context(struct mm_struct *mm)
90 int index;
92 index = hash__alloc_context_id();
93 if (index < 0)
94 return index;
97 * The old code would re-promote on fork, we don't do that when using
98 * slices as it could cause problem promoting slices that have been
99 * forced down to 4K.
101 * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
102 * explicitly against context.id == 0. This ensures that we properly
103 * initialize context slice details for newly allocated mm's (which will
104 * have id == 0) and don't alter context slice inherited via fork (which
105 * will have id != 0).
107 * We should not be calling init_new_context() on init_mm. Hence a
108 * check against 0 is OK.
110 if (mm->context.id == 0)
111 slice_init_new_context_exec(mm);
113 subpage_prot_init_new_context(mm);
115 pkey_mm_init(mm);
116 return index;
119 static int radix__init_new_context(struct mm_struct *mm)
121 unsigned long rts_field;
122 int index, max_id;
124 max_id = (1 << mmu_pid_bits) - 1;
125 index = alloc_context_id(mmu_base_pid, max_id);
126 if (index < 0)
127 return index;
130 * set the process table entry,
132 rts_field = radix__get_tree_size();
133 process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
136 * Order the above store with subsequent update of the PID
137 * register (at which point HW can start loading/caching
138 * the entry) and the corresponding load by the MMU from
139 * the L2 cache.
141 asm volatile("ptesync;isync" : : : "memory");
143 mm->context.npu_context = NULL;
145 return index;
148 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
150 int index;
152 if (radix_enabled())
153 index = radix__init_new_context(mm);
154 else
155 index = hash__init_new_context(mm);
157 if (index < 0)
158 return index;
160 mm->context.id = index;
162 mm->context.pte_frag = NULL;
163 mm->context.pmd_frag = NULL;
164 #ifdef CONFIG_SPAPR_TCE_IOMMU
165 mm_iommu_init(mm);
166 #endif
167 atomic_set(&mm->context.active_cpus, 0);
168 atomic_set(&mm->context.copros, 0);
170 return 0;
173 void __destroy_context(int context_id)
175 spin_lock(&mmu_context_lock);
176 ida_remove(&mmu_context_ida, context_id);
177 spin_unlock(&mmu_context_lock);
179 EXPORT_SYMBOL_GPL(__destroy_context);
181 static void destroy_contexts(mm_context_t *ctx)
183 int index, context_id;
185 spin_lock(&mmu_context_lock);
186 for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
187 context_id = ctx->extended_id[index];
188 if (context_id)
189 ida_remove(&mmu_context_ida, context_id);
191 spin_unlock(&mmu_context_lock);
194 static void pte_frag_destroy(void *pte_frag)
196 int count;
197 struct page *page;
199 page = virt_to_page(pte_frag);
200 /* drop all the pending references */
201 count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
202 /* We allow PTE_FRAG_NR fragments from a PTE page */
203 if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
204 pgtable_page_dtor(page);
205 __free_page(page);
209 static void pmd_frag_destroy(void *pmd_frag)
211 int count;
212 struct page *page;
214 page = virt_to_page(pmd_frag);
215 /* drop all the pending references */
216 count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;
217 /* We allow PTE_FRAG_NR fragments from a PTE page */
218 if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) {
219 pgtable_pmd_page_dtor(page);
220 __free_page(page);
224 static void destroy_pagetable_page(struct mm_struct *mm)
226 void *frag;
228 frag = mm->context.pte_frag;
229 if (frag)
230 pte_frag_destroy(frag);
232 frag = mm->context.pmd_frag;
233 if (frag)
234 pmd_frag_destroy(frag);
235 return;
238 void destroy_context(struct mm_struct *mm)
240 #ifdef CONFIG_SPAPR_TCE_IOMMU
241 WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
242 #endif
243 if (radix_enabled())
244 WARN_ON(process_tb[mm->context.id].prtb0 != 0);
245 else
246 subpage_prot_free(mm);
247 destroy_pagetable_page(mm);
248 destroy_contexts(&mm->context);
249 mm->context.id = MMU_NO_CONTEXT;
252 void arch_exit_mmap(struct mm_struct *mm)
254 if (radix_enabled()) {
256 * Radix doesn't have a valid bit in the process table
257 * entries. However we know that at least P9 implementation
258 * will avoid caching an entry with an invalid RTS field,
259 * and 0 is invalid. So this will do.
261 * This runs before the "fullmm" tlb flush in exit_mmap,
262 * which does a RIC=2 tlbie to clear the process table
263 * entry. See the "fullmm" comments in tlb-radix.c.
265 * No barrier required here after the store because
266 * this process will do the invalidate, which starts with
267 * ptesync.
269 process_tb[mm->context.id].prtb0 = 0;
273 #ifdef CONFIG_PPC_RADIX_MMU
274 void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
277 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
278 isync();
279 mtspr(SPRN_PID, next->context.id);
280 isync();
281 asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
282 } else {
283 mtspr(SPRN_PID, next->context.id);
284 isync();
287 #endif