Linux 5.7.7
[linux/fpc-iii.git] / arch / arm / mm / context.c
blobb7525b433f3e2b1a75ff55c0d80bf26bdbe68394
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/arm/mm/context.c
5 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
6 * Copyright (C) 2012 ARM Limited
8 * Author: Will Deacon <will.deacon@arm.com>
9 */
10 #include <linux/init.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/smp.h>
14 #include <linux/percpu.h>
16 #include <asm/mmu_context.h>
17 #include <asm/smp_plat.h>
18 #include <asm/thread_notify.h>
19 #include <asm/tlbflush.h>
20 #include <asm/proc-fns.h>
23 * On ARMv6, we have the following structure in the Context ID:
25 * 31 7 0
26 * +-------------------------+-----------+
27 * | process ID | ASID |
28 * +-------------------------+-----------+
29 * | context ID |
30 * +-------------------------------------+
32 * The ASID is used to tag entries in the CPU caches and TLBs.
33 * The context ID is used by debuggers and trace logic, and
34 * should be unique within all running processes.
36 * In big endian operation, the two 32 bit words are swapped if accessed
37 * by non-64-bit operations.
39 #define ASID_FIRST_VERSION (1ULL << ASID_BITS)
40 #define NUM_USER_ASIDS ASID_FIRST_VERSION
42 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
43 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
44 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
46 static DEFINE_PER_CPU(atomic64_t, active_asids);
47 static DEFINE_PER_CPU(u64, reserved_asids);
48 static cpumask_t tlb_flush_pending;
50 #ifdef CONFIG_ARM_ERRATA_798181
51 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
52 cpumask_t *mask)
54 int cpu;
55 unsigned long flags;
56 u64 context_id, asid;
58 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
59 context_id = mm->context.id.counter;
60 for_each_online_cpu(cpu) {
61 if (cpu == this_cpu)
62 continue;
64 * We only need to send an IPI if the other CPUs are
65 * running the same ASID as the one being invalidated.
67 asid = per_cpu(active_asids, cpu).counter;
68 if (asid == 0)
69 asid = per_cpu(reserved_asids, cpu);
70 if (context_id == asid)
71 cpumask_set_cpu(cpu, mask);
73 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
75 #endif
77 #ifdef CONFIG_ARM_LPAE
79 * With LPAE, the ASID and page tables are updated atomicly, so there is
80 * no need for a reserved set of tables (the active ASID tracking prevents
81 * any issues across a rollover).
83 #define cpu_set_reserved_ttbr0()
84 #else
85 static void cpu_set_reserved_ttbr0(void)
87 u32 ttb;
89 * Copy TTBR1 into TTBR0.
90 * This points at swapper_pg_dir, which contains only global
91 * entries so any speculative walks are perfectly safe.
93 asm volatile(
94 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
95 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
96 : "=r" (ttb));
97 isb();
99 #endif
101 #ifdef CONFIG_PID_IN_CONTEXTIDR
102 static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
103 void *t)
105 u32 contextidr;
106 pid_t pid;
107 struct thread_info *thread = t;
109 if (cmd != THREAD_NOTIFY_SWITCH)
110 return NOTIFY_DONE;
112 pid = task_pid_nr(thread->task) << ASID_BITS;
113 asm volatile(
114 " mrc p15, 0, %0, c13, c0, 1\n"
115 " and %0, %0, %2\n"
116 " orr %0, %0, %1\n"
117 " mcr p15, 0, %0, c13, c0, 1\n"
118 : "=r" (contextidr), "+r" (pid)
119 : "I" (~ASID_MASK));
120 isb();
122 return NOTIFY_OK;
125 static struct notifier_block contextidr_notifier_block = {
126 .notifier_call = contextidr_notifier,
129 static int __init contextidr_notifier_init(void)
131 return thread_register_notifier(&contextidr_notifier_block);
133 arch_initcall(contextidr_notifier_init);
134 #endif
136 static void flush_context(unsigned int cpu)
138 int i;
139 u64 asid;
141 /* Update the list of reserved ASIDs and the ASID bitmap. */
142 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
143 for_each_possible_cpu(i) {
144 asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
146 * If this CPU has already been through a
147 * rollover, but hasn't run another task in
148 * the meantime, we must preserve its reserved
149 * ASID, as this is the only trace we have of
150 * the process it is still running.
152 if (asid == 0)
153 asid = per_cpu(reserved_asids, i);
154 __set_bit(asid & ~ASID_MASK, asid_map);
155 per_cpu(reserved_asids, i) = asid;
158 /* Queue a TLB invalidate and flush the I-cache if necessary. */
159 cpumask_setall(&tlb_flush_pending);
161 if (icache_is_vivt_asid_tagged())
162 __flush_icache_all();
165 static bool check_update_reserved_asid(u64 asid, u64 newasid)
167 int cpu;
168 bool hit = false;
171 * Iterate over the set of reserved ASIDs looking for a match.
172 * If we find one, then we can update our mm to use newasid
173 * (i.e. the same ASID in the current generation) but we can't
174 * exit the loop early, since we need to ensure that all copies
175 * of the old ASID are updated to reflect the mm. Failure to do
176 * so could result in us missing the reserved ASID in a future
177 * generation.
179 for_each_possible_cpu(cpu) {
180 if (per_cpu(reserved_asids, cpu) == asid) {
181 hit = true;
182 per_cpu(reserved_asids, cpu) = newasid;
186 return hit;
189 static u64 new_context(struct mm_struct *mm, unsigned int cpu)
191 static u32 cur_idx = 1;
192 u64 asid = atomic64_read(&mm->context.id);
193 u64 generation = atomic64_read(&asid_generation);
195 if (asid != 0) {
196 u64 newasid = generation | (asid & ~ASID_MASK);
199 * If our current ASID was active during a rollover, we
200 * can continue to use it and this was just a false alarm.
202 if (check_update_reserved_asid(asid, newasid))
203 return newasid;
206 * We had a valid ASID in a previous life, so try to re-use
207 * it if possible.,
209 asid &= ~ASID_MASK;
210 if (!__test_and_set_bit(asid, asid_map))
211 return newasid;
215 * Allocate a free ASID. If we can't find one, take a note of the
216 * currently active ASIDs and mark the TLBs as requiring flushes.
217 * We always count from ASID #1, as we reserve ASID #0 to switch
218 * via TTBR0 and to avoid speculative page table walks from hitting
219 * in any partial walk caches, which could be populated from
220 * overlapping level-1 descriptors used to map both the module
221 * area and the userspace stack.
223 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
224 if (asid == NUM_USER_ASIDS) {
225 generation = atomic64_add_return(ASID_FIRST_VERSION,
226 &asid_generation);
227 flush_context(cpu);
228 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
231 __set_bit(asid, asid_map);
232 cur_idx = asid;
233 cpumask_clear(mm_cpumask(mm));
234 return asid | generation;
237 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
239 unsigned long flags;
240 unsigned int cpu = smp_processor_id();
241 u64 asid;
243 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
244 __check_vmalloc_seq(mm);
247 * We cannot update the pgd and the ASID atomicly with classic
248 * MMU, so switch exclusively to global mappings to avoid
249 * speculative page table walking with the wrong TTBR.
251 cpu_set_reserved_ttbr0();
253 asid = atomic64_read(&mm->context.id);
254 if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
255 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
256 goto switch_mm_fastpath;
258 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
259 /* Check that our ASID belongs to the current generation. */
260 asid = atomic64_read(&mm->context.id);
261 if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
262 asid = new_context(mm, cpu);
263 atomic64_set(&mm->context.id, asid);
266 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
267 local_flush_bp_all();
268 local_flush_tlb_all();
271 atomic64_set(&per_cpu(active_asids, cpu), asid);
272 cpumask_set_cpu(cpu, mm_cpumask(mm));
273 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
275 switch_mm_fastpath:
276 cpu_switch_mm(mm->pgd, mm);