2 * Based on arch/arm/mm/context.c
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/bitops.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
25 #include <asm/cpufeature.h>
26 #include <asm/mmu_context.h>
28 #include <asm/tlbflush.h>
31 static DEFINE_RAW_SPINLOCK(cpu_asid_lock
);
33 static atomic64_t asid_generation
;
34 static unsigned long *asid_map
;
36 static DEFINE_PER_CPU(atomic64_t
, active_asids
);
37 static DEFINE_PER_CPU(u64
, reserved_asids
);
38 static cpumask_t tlb_flush_pending
;
40 #define ASID_MASK (~GENMASK(asid_bits - 1, 0))
41 #define ASID_FIRST_VERSION (1UL << asid_bits)
43 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
44 #define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1)
45 #define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1)
46 #define idx2asid(idx) (((idx) << 1) & ~ASID_MASK)
48 #define NUM_USER_ASIDS (ASID_FIRST_VERSION)
49 #define asid2idx(asid) ((asid) & ~ASID_MASK)
50 #define idx2asid(idx) asid2idx(idx)
53 /* Get the ASIDBits supported by the current CPU */
54 static u32
get_cpu_asid_bits(void)
57 int fld
= cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1
),
58 ID_AA64MMFR0_ASID_SHIFT
);
62 pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
63 smp_processor_id(), fld
);
75 /* Check if the current cpu's ASIDBits is compatible with asid_bits */
76 void verify_cpu_asid_bits(void)
78 u32 asid
= get_cpu_asid_bits();
80 if (asid
< asid_bits
) {
82 * We cannot decrease the ASID size at runtime, so panic if we support
83 * fewer ASID bits than the boot CPU.
85 pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
86 smp_processor_id(), asid
, asid_bits
);
91 static void flush_context(unsigned int cpu
)
96 /* Update the list of reserved ASIDs and the ASID bitmap. */
97 bitmap_clear(asid_map
, 0, NUM_USER_ASIDS
);
99 for_each_possible_cpu(i
) {
100 asid
= atomic64_xchg_relaxed(&per_cpu(active_asids
, i
), 0);
102 * If this CPU has already been through a
103 * rollover, but hasn't run another task in
104 * the meantime, we must preserve its reserved
105 * ASID, as this is the only trace we have of
106 * the process it is still running.
109 asid
= per_cpu(reserved_asids
, i
);
110 __set_bit(asid2idx(asid
), asid_map
);
111 per_cpu(reserved_asids
, i
) = asid
;
115 * Queue a TLB invalidation for each CPU to perform on next
118 cpumask_setall(&tlb_flush_pending
);
121 static bool check_update_reserved_asid(u64 asid
, u64 newasid
)
127 * Iterate over the set of reserved ASIDs looking for a match.
128 * If we find one, then we can update our mm to use newasid
129 * (i.e. the same ASID in the current generation) but we can't
130 * exit the loop early, since we need to ensure that all copies
131 * of the old ASID are updated to reflect the mm. Failure to do
132 * so could result in us missing the reserved ASID in a future
135 for_each_possible_cpu(cpu
) {
136 if (per_cpu(reserved_asids
, cpu
) == asid
) {
138 per_cpu(reserved_asids
, cpu
) = newasid
;
145 static u64
new_context(struct mm_struct
*mm
, unsigned int cpu
)
147 static u32 cur_idx
= 1;
148 u64 asid
= atomic64_read(&mm
->context
.id
);
149 u64 generation
= atomic64_read(&asid_generation
);
152 u64 newasid
= generation
| (asid
& ~ASID_MASK
);
155 * If our current ASID was active during a rollover, we
156 * can continue to use it and this was just a false alarm.
158 if (check_update_reserved_asid(asid
, newasid
))
162 * We had a valid ASID in a previous life, so try to re-use
165 if (!__test_and_set_bit(asid2idx(asid
), asid_map
))
170 * Allocate a free ASID. If we can't find one, take a note of the
171 * currently active ASIDs and mark the TLBs as requiring flushes. We
172 * always count from ASID #2 (index 1), as we use ASID #0 when setting
173 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
176 asid
= find_next_zero_bit(asid_map
, NUM_USER_ASIDS
, cur_idx
);
177 if (asid
!= NUM_USER_ASIDS
)
180 /* We're out of ASIDs, so increment the global generation count */
181 generation
= atomic64_add_return_relaxed(ASID_FIRST_VERSION
,
185 /* We have more ASIDs than CPUs, so this will always succeed */
186 asid
= find_next_zero_bit(asid_map
, NUM_USER_ASIDS
, 1);
189 __set_bit(asid
, asid_map
);
191 return idx2asid(asid
) | generation
;
194 void check_and_switch_context(struct mm_struct
*mm
, unsigned int cpu
)
197 u64 asid
, old_active_asid
;
199 asid
= atomic64_read(&mm
->context
.id
);
202 * The memory ordering here is subtle.
203 * If our active_asids is non-zero and the ASID matches the current
204 * generation, then we update the active_asids entry with a relaxed
205 * cmpxchg. Racing with a concurrent rollover means that either:
207 * - We get a zero back from the cmpxchg and end up waiting on the
208 * lock. Taking the lock synchronises with the rollover and so
209 * we are forced to see the updated generation.
211 * - We get a valid ASID back from the cmpxchg, which means the
212 * relaxed xchg in flush_context will treat us as reserved
213 * because atomic RmWs are totally ordered for a given location.
215 old_active_asid
= atomic64_read(&per_cpu(active_asids
, cpu
));
216 if (old_active_asid
&&
217 !((asid
^ atomic64_read(&asid_generation
)) >> asid_bits
) &&
218 atomic64_cmpxchg_relaxed(&per_cpu(active_asids
, cpu
),
219 old_active_asid
, asid
))
220 goto switch_mm_fastpath
;
222 raw_spin_lock_irqsave(&cpu_asid_lock
, flags
);
223 /* Check that our ASID belongs to the current generation. */
224 asid
= atomic64_read(&mm
->context
.id
);
225 if ((asid
^ atomic64_read(&asid_generation
)) >> asid_bits
) {
226 asid
= new_context(mm
, cpu
);
227 atomic64_set(&mm
->context
.id
, asid
);
230 if (cpumask_test_and_clear_cpu(cpu
, &tlb_flush_pending
))
231 local_flush_tlb_all();
233 atomic64_set(&per_cpu(active_asids
, cpu
), asid
);
234 raw_spin_unlock_irqrestore(&cpu_asid_lock
, flags
);
238 arm64_apply_bp_hardening();
241 * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
244 if (!system_uses_ttbr0_pan())
245 cpu_switch_mm(mm
->pgd
, mm
);
248 /* Errata workaround post TTBRx_EL1 update. */
249 asmlinkage
void post_ttbr_update_workaround(void)
251 asm(ALTERNATIVE("nop; nop; nop",
252 "ic iallu; dsb nsh; isb",
253 ARM64_WORKAROUND_CAVIUM_27456
,
254 CONFIG_CAVIUM_ERRATUM_27456
));
257 static int asids_init(void)
259 asid_bits
= get_cpu_asid_bits();
261 * Expect allocation after rollover to fail if we don't have at least
262 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
264 WARN_ON(NUM_USER_ASIDS
- 1 <= num_possible_cpus());
265 atomic64_set(&asid_generation
, ASID_FIRST_VERSION
);
266 asid_map
= kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS
) * sizeof(*asid_map
),
269 panic("Failed to allocate bitmap for %lu ASIDs\n",
272 pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS
);
275 early_initcall(asids_init
);