x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / arm / mm / context.c
blobc8c8b9ed02e09b57a79bee1ab5db9c337be0d338
1 /*
2 * linux/arch/arm/mm/context.c
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5 * Copyright (C) 2012 ARM Limited
7 * Author: Will Deacon <will.deacon@arm.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/init.h>
14 #include <linux/sched.h>
15 #include <linux/mm.h>
16 #include <linux/smp.h>
17 #include <linux/percpu.h>
19 #include <asm/mmu_context.h>
20 #include <asm/smp_plat.h>
21 #include <asm/thread_notify.h>
22 #include <asm/tlbflush.h>
23 #include <asm/proc-fns.h>
26 * On ARMv6, we have the following structure in the Context ID:
28 * 31 7 0
29 * +-------------------------+-----------+
30 * | process ID | ASID |
31 * +-------------------------+-----------+
32 * | context ID |
33 * +-------------------------------------+
35 * The ASID is used to tag entries in the CPU caches and TLBs.
36 * The context ID is used by debuggers and trace logic, and
37 * should be unique within all running processes.
39 * In big endian operation, the two 32 bit words are swapped if accessed
40 * by non-64-bit operations.
42 #define ASID_FIRST_VERSION (1ULL << ASID_BITS)
43 #define NUM_USER_ASIDS ASID_FIRST_VERSION
45 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
46 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
47 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
49 static DEFINE_PER_CPU(atomic64_t, active_asids);
50 static DEFINE_PER_CPU(u64, reserved_asids);
51 static cpumask_t tlb_flush_pending;
53 #ifdef CONFIG_ARM_ERRATA_798181
54 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
55 cpumask_t *mask)
57 int cpu;
58 unsigned long flags;
59 u64 context_id, asid;
61 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
62 context_id = mm->context.id.counter;
63 for_each_online_cpu(cpu) {
64 if (cpu == this_cpu)
65 continue;
67 * We only need to send an IPI if the other CPUs are
68 * running the same ASID as the one being invalidated.
70 asid = per_cpu(active_asids, cpu).counter;
71 if (asid == 0)
72 asid = per_cpu(reserved_asids, cpu);
73 if (context_id == asid)
74 cpumask_set_cpu(cpu, mask);
76 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
78 #endif
80 #ifdef CONFIG_ARM_LPAE
82 * With LPAE, the ASID and page tables are updated atomicly, so there is
83 * no need for a reserved set of tables (the active ASID tracking prevents
84 * any issues across a rollover).
86 #define cpu_set_reserved_ttbr0()
87 #else
88 static void cpu_set_reserved_ttbr0(void)
90 u32 ttb;
92 * Copy TTBR1 into TTBR0.
93 * This points at swapper_pg_dir, which contains only global
94 * entries so any speculative walks are perfectly safe.
96 asm volatile(
97 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
98 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
99 : "=r" (ttb));
100 isb();
102 #endif
104 #ifdef CONFIG_PID_IN_CONTEXTIDR
105 static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
106 void *t)
108 u32 contextidr;
109 pid_t pid;
110 struct thread_info *thread = t;
112 if (cmd != THREAD_NOTIFY_SWITCH)
113 return NOTIFY_DONE;
115 pid = task_pid_nr(thread->task) << ASID_BITS;
116 asm volatile(
117 " mrc p15, 0, %0, c13, c0, 1\n"
118 " and %0, %0, %2\n"
119 " orr %0, %0, %1\n"
120 " mcr p15, 0, %0, c13, c0, 1\n"
121 : "=r" (contextidr), "+r" (pid)
122 : "I" (~ASID_MASK));
123 isb();
125 return NOTIFY_OK;
128 static struct notifier_block contextidr_notifier_block = {
129 .notifier_call = contextidr_notifier,
132 static int __init contextidr_notifier_init(void)
134 return thread_register_notifier(&contextidr_notifier_block);
136 arch_initcall(contextidr_notifier_init);
137 #endif
139 static void flush_context(unsigned int cpu)
141 int i;
142 u64 asid;
144 /* Update the list of reserved ASIDs and the ASID bitmap. */
145 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
146 for_each_possible_cpu(i) {
147 asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
149 * If this CPU has already been through a
150 * rollover, but hasn't run another task in
151 * the meantime, we must preserve its reserved
152 * ASID, as this is the only trace we have of
153 * the process it is still running.
155 if (asid == 0)
156 asid = per_cpu(reserved_asids, i);
157 __set_bit(asid & ~ASID_MASK, asid_map);
158 per_cpu(reserved_asids, i) = asid;
161 /* Queue a TLB invalidate and flush the I-cache if necessary. */
162 cpumask_setall(&tlb_flush_pending);
164 if (icache_is_vivt_asid_tagged())
165 __flush_icache_all();
168 static bool check_update_reserved_asid(u64 asid, u64 newasid)
170 int cpu;
171 bool hit = false;
174 * Iterate over the set of reserved ASIDs looking for a match.
175 * If we find one, then we can update our mm to use newasid
176 * (i.e. the same ASID in the current generation) but we can't
177 * exit the loop early, since we need to ensure that all copies
178 * of the old ASID are updated to reflect the mm. Failure to do
179 * so could result in us missing the reserved ASID in a future
180 * generation.
182 for_each_possible_cpu(cpu) {
183 if (per_cpu(reserved_asids, cpu) == asid) {
184 hit = true;
185 per_cpu(reserved_asids, cpu) = newasid;
189 return hit;
192 static u64 new_context(struct mm_struct *mm, unsigned int cpu)
194 static u32 cur_idx = 1;
195 u64 asid = atomic64_read(&mm->context.id);
196 u64 generation = atomic64_read(&asid_generation);
198 if (asid != 0) {
199 u64 newasid = generation | (asid & ~ASID_MASK);
202 * If our current ASID was active during a rollover, we
203 * can continue to use it and this was just a false alarm.
205 if (check_update_reserved_asid(asid, newasid))
206 return newasid;
209 * We had a valid ASID in a previous life, so try to re-use
210 * it if possible.,
212 asid &= ~ASID_MASK;
213 if (!__test_and_set_bit(asid, asid_map))
214 return newasid;
218 * Allocate a free ASID. If we can't find one, take a note of the
219 * currently active ASIDs and mark the TLBs as requiring flushes.
220 * We always count from ASID #1, as we reserve ASID #0 to switch
221 * via TTBR0 and to avoid speculative page table walks from hitting
222 * in any partial walk caches, which could be populated from
223 * overlapping level-1 descriptors used to map both the module
224 * area and the userspace stack.
226 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
227 if (asid == NUM_USER_ASIDS) {
228 generation = atomic64_add_return(ASID_FIRST_VERSION,
229 &asid_generation);
230 flush_context(cpu);
231 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
234 __set_bit(asid, asid_map);
235 cur_idx = asid;
236 cpumask_clear(mm_cpumask(mm));
237 return asid | generation;
240 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
242 unsigned long flags;
243 unsigned int cpu = smp_processor_id();
244 u64 asid;
246 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
247 __check_vmalloc_seq(mm);
250 * We cannot update the pgd and the ASID atomicly with classic
251 * MMU, so switch exclusively to global mappings to avoid
252 * speculative page table walking with the wrong TTBR.
254 cpu_set_reserved_ttbr0();
256 asid = atomic64_read(&mm->context.id);
257 if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
258 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
259 goto switch_mm_fastpath;
261 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
262 /* Check that our ASID belongs to the current generation. */
263 asid = atomic64_read(&mm->context.id);
264 if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
265 asid = new_context(mm, cpu);
266 atomic64_set(&mm->context.id, asid);
269 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
270 local_flush_bp_all();
271 local_flush_tlb_all();
274 atomic64_set(&per_cpu(active_asids, cpu), asid);
275 cpumask_set_cpu(cpu, mm_cpumask(mm));
276 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
278 switch_mm_fastpath:
279 cpu_switch_mm(mm->pgd, mm);