staging: rtl8192u: remove redundant assignment to pointer crypt
[linux/fpc-iii.git] / arch / arm64 / mm / context.c
blobb5e329fde2dd31ea5d7591deb499a4ac901c24c6
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/mm/context.c
5 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
6 * Copyright (C) 2012 ARM Ltd.
7 */
9 #include <linux/bitops.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/mm.h>
14 #include <asm/cpufeature.h>
15 #include <asm/mmu_context.h>
16 #include <asm/smp.h>
17 #include <asm/tlbflush.h>
19 static u32 asid_bits;
20 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
22 static atomic64_t asid_generation;
23 static unsigned long *asid_map;
25 static DEFINE_PER_CPU(atomic64_t, active_asids);
26 static DEFINE_PER_CPU(u64, reserved_asids);
27 static cpumask_t tlb_flush_pending;
29 #define ASID_MASK (~GENMASK(asid_bits - 1, 0))
30 #define ASID_FIRST_VERSION (1UL << asid_bits)
32 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
33 #define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1)
34 #define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1)
35 #define idx2asid(idx) (((idx) << 1) & ~ASID_MASK)
36 #else
37 #define NUM_USER_ASIDS (ASID_FIRST_VERSION)
38 #define asid2idx(asid) ((asid) & ~ASID_MASK)
39 #define idx2asid(idx) asid2idx(idx)
40 #endif
42 /* Get the ASIDBits supported by the current CPU */
43 static u32 get_cpu_asid_bits(void)
45 u32 asid;
46 int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
47 ID_AA64MMFR0_ASID_SHIFT);
49 switch (fld) {
50 default:
51 pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
52 smp_processor_id(), fld);
53 /* Fallthrough */
54 case 0:
55 asid = 8;
56 break;
57 case 2:
58 asid = 16;
61 return asid;
64 /* Check if the current cpu's ASIDBits is compatible with asid_bits */
65 void verify_cpu_asid_bits(void)
67 u32 asid = get_cpu_asid_bits();
69 if (asid < asid_bits) {
71 * We cannot decrease the ASID size at runtime, so panic if we support
72 * fewer ASID bits than the boot CPU.
74 pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
75 smp_processor_id(), asid, asid_bits);
76 cpu_panic_kernel();
80 static void flush_context(void)
82 int i;
83 u64 asid;
85 /* Update the list of reserved ASIDs and the ASID bitmap. */
86 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
88 for_each_possible_cpu(i) {
89 asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
91 * If this CPU has already been through a
92 * rollover, but hasn't run another task in
93 * the meantime, we must preserve its reserved
94 * ASID, as this is the only trace we have of
95 * the process it is still running.
97 if (asid == 0)
98 asid = per_cpu(reserved_asids, i);
99 __set_bit(asid2idx(asid), asid_map);
100 per_cpu(reserved_asids, i) = asid;
104 * Queue a TLB invalidation for each CPU to perform on next
105 * context-switch
107 cpumask_setall(&tlb_flush_pending);
110 static bool check_update_reserved_asid(u64 asid, u64 newasid)
112 int cpu;
113 bool hit = false;
116 * Iterate over the set of reserved ASIDs looking for a match.
117 * If we find one, then we can update our mm to use newasid
118 * (i.e. the same ASID in the current generation) but we can't
119 * exit the loop early, since we need to ensure that all copies
120 * of the old ASID are updated to reflect the mm. Failure to do
121 * so could result in us missing the reserved ASID in a future
122 * generation.
124 for_each_possible_cpu(cpu) {
125 if (per_cpu(reserved_asids, cpu) == asid) {
126 hit = true;
127 per_cpu(reserved_asids, cpu) = newasid;
131 return hit;
134 static u64 new_context(struct mm_struct *mm)
136 static u32 cur_idx = 1;
137 u64 asid = atomic64_read(&mm->context.id);
138 u64 generation = atomic64_read(&asid_generation);
140 if (asid != 0) {
141 u64 newasid = generation | (asid & ~ASID_MASK);
144 * If our current ASID was active during a rollover, we
145 * can continue to use it and this was just a false alarm.
147 if (check_update_reserved_asid(asid, newasid))
148 return newasid;
151 * We had a valid ASID in a previous life, so try to re-use
152 * it if possible.
154 if (!__test_and_set_bit(asid2idx(asid), asid_map))
155 return newasid;
159 * Allocate a free ASID. If we can't find one, take a note of the
160 * currently active ASIDs and mark the TLBs as requiring flushes. We
161 * always count from ASID #2 (index 1), as we use ASID #0 when setting
162 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
163 * pairs.
165 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
166 if (asid != NUM_USER_ASIDS)
167 goto set_asid;
169 /* We're out of ASIDs, so increment the global generation count */
170 generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
171 &asid_generation);
172 flush_context();
174 /* We have more ASIDs than CPUs, so this will always succeed */
175 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
177 set_asid:
178 __set_bit(asid, asid_map);
179 cur_idx = asid;
180 return idx2asid(asid) | generation;
183 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
185 unsigned long flags;
186 u64 asid, old_active_asid;
188 if (system_supports_cnp())
189 cpu_set_reserved_ttbr0();
191 asid = atomic64_read(&mm->context.id);
194 * The memory ordering here is subtle.
195 * If our active_asids is non-zero and the ASID matches the current
196 * generation, then we update the active_asids entry with a relaxed
197 * cmpxchg. Racing with a concurrent rollover means that either:
199 * - We get a zero back from the cmpxchg and end up waiting on the
200 * lock. Taking the lock synchronises with the rollover and so
201 * we are forced to see the updated generation.
203 * - We get a valid ASID back from the cmpxchg, which means the
204 * relaxed xchg in flush_context will treat us as reserved
205 * because atomic RmWs are totally ordered for a given location.
207 old_active_asid = atomic64_read(&per_cpu(active_asids, cpu));
208 if (old_active_asid &&
209 !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) &&
210 atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu),
211 old_active_asid, asid))
212 goto switch_mm_fastpath;
214 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
215 /* Check that our ASID belongs to the current generation. */
216 asid = atomic64_read(&mm->context.id);
217 if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
218 asid = new_context(mm);
219 atomic64_set(&mm->context.id, asid);
222 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
223 local_flush_tlb_all();
225 atomic64_set(&per_cpu(active_asids, cpu), asid);
226 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
228 switch_mm_fastpath:
230 arm64_apply_bp_hardening();
233 * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
234 * emulating PAN.
236 if (!system_uses_ttbr0_pan())
237 cpu_switch_mm(mm->pgd, mm);
240 /* Errata workaround post TTBRx_EL1 update. */
241 asmlinkage void post_ttbr_update_workaround(void)
243 asm(ALTERNATIVE("nop; nop; nop",
244 "ic iallu; dsb nsh; isb",
245 ARM64_WORKAROUND_CAVIUM_27456,
246 CONFIG_CAVIUM_ERRATUM_27456));
249 static int asids_init(void)
251 asid_bits = get_cpu_asid_bits();
253 * Expect allocation after rollover to fail if we don't have at least
254 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
256 WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
257 atomic64_set(&asid_generation, ASID_FIRST_VERSION);
258 asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
259 GFP_KERNEL);
260 if (!asid_map)
261 panic("Failed to allocate bitmap for %lu ASIDs\n",
262 NUM_USER_ASIDS);
264 pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
265 return 0;
267 early_initcall(asids_init);