IB/srp: Let srp_abort() return FAST_IO_FAIL if TL offline
[linux/fpc-iii.git] / arch / arm / mm / context.c
blob2ac37372ef52f4ba4db642d39bef349798f1785a
1 /*
2 * linux/arch/arm/mm/context.c
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5 * Copyright (C) 2012 ARM Limited
7 * Author: Will Deacon <will.deacon@arm.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/init.h>
14 #include <linux/sched.h>
15 #include <linux/mm.h>
16 #include <linux/smp.h>
17 #include <linux/percpu.h>
19 #include <asm/mmu_context.h>
20 #include <asm/smp_plat.h>
21 #include <asm/thread_notify.h>
22 #include <asm/tlbflush.h>
25 * On ARMv6, we have the following structure in the Context ID:
27 * 31 7 0
28 * +-------------------------+-----------+
29 * | process ID | ASID |
30 * +-------------------------+-----------+
31 * | context ID |
32 * +-------------------------------------+
34 * The ASID is used to tag entries in the CPU caches and TLBs.
35 * The context ID is used by debuggers and trace logic, and
36 * should be unique within all running processes.
38 * In big endian operation, the two 32 bit words are swapped if accesed by
39 * non 64-bit operations.
41 #define ASID_FIRST_VERSION (1ULL << ASID_BITS)
42 #define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)
44 #define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1)
45 #define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK)
47 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
48 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
49 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
51 DEFINE_PER_CPU(atomic64_t, active_asids);
52 static DEFINE_PER_CPU(u64, reserved_asids);
53 static cpumask_t tlb_flush_pending;
55 #ifdef CONFIG_ARM_LPAE
56 static void cpu_set_reserved_ttbr0(void)
58 unsigned long ttbl = __pa(swapper_pg_dir);
59 unsigned long ttbh = 0;
62 * Set TTBR0 to swapper_pg_dir which contains only global entries. The
63 * ASID is set to 0.
65 asm volatile(
66 " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n"
68 : "r" (ttbl), "r" (ttbh));
69 isb();
71 #else
72 static void cpu_set_reserved_ttbr0(void)
74 u32 ttb;
75 /* Copy TTBR1 into TTBR0 */
76 asm volatile(
77 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
78 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
79 : "=r" (ttb));
80 isb();
82 #endif
84 #ifdef CONFIG_PID_IN_CONTEXTIDR
85 static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
86 void *t)
88 u32 contextidr;
89 pid_t pid;
90 struct thread_info *thread = t;
92 if (cmd != THREAD_NOTIFY_SWITCH)
93 return NOTIFY_DONE;
95 pid = task_pid_nr(thread->task) << ASID_BITS;
96 asm volatile(
97 " mrc p15, 0, %0, c13, c0, 1\n"
98 " and %0, %0, %2\n"
99 " orr %0, %0, %1\n"
100 " mcr p15, 0, %0, c13, c0, 1\n"
101 : "=r" (contextidr), "+r" (pid)
102 : "I" (~ASID_MASK));
103 isb();
105 return NOTIFY_OK;
108 static struct notifier_block contextidr_notifier_block = {
109 .notifier_call = contextidr_notifier,
112 static int __init contextidr_notifier_init(void)
114 return thread_register_notifier(&contextidr_notifier_block);
116 arch_initcall(contextidr_notifier_init);
117 #endif
119 static void flush_context(unsigned int cpu)
121 int i;
122 u64 asid;
124 /* Update the list of reserved ASIDs and the ASID bitmap. */
125 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
126 for_each_possible_cpu(i) {
127 if (i == cpu) {
128 asid = 0;
129 } else {
130 asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
131 __set_bit(ASID_TO_IDX(asid), asid_map);
133 per_cpu(reserved_asids, i) = asid;
136 /* Queue a TLB invalidate and flush the I-cache if necessary. */
137 if (!tlb_ops_need_broadcast())
138 cpumask_set_cpu(cpu, &tlb_flush_pending);
139 else
140 cpumask_setall(&tlb_flush_pending);
142 if (icache_is_vivt_asid_tagged())
143 __flush_icache_all();
146 static int is_reserved_asid(u64 asid)
148 int cpu;
149 for_each_possible_cpu(cpu)
150 if (per_cpu(reserved_asids, cpu) == asid)
151 return 1;
152 return 0;
155 static u64 new_context(struct mm_struct *mm, unsigned int cpu)
157 u64 asid = atomic64_read(&mm->context.id);
158 u64 generation = atomic64_read(&asid_generation);
160 if (asid != 0 && is_reserved_asid(asid)) {
162 * Our current ASID was active during a rollover, we can
163 * continue to use it and this was just a false alarm.
165 asid = generation | (asid & ~ASID_MASK);
166 } else {
168 * Allocate a free ASID. If we can't find one, take a
169 * note of the currently active ASIDs and mark the TLBs
170 * as requiring flushes.
172 asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
173 if (asid == NUM_USER_ASIDS) {
174 generation = atomic64_add_return(ASID_FIRST_VERSION,
175 &asid_generation);
176 flush_context(cpu);
177 asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
179 __set_bit(asid, asid_map);
180 asid = generation | IDX_TO_ASID(asid);
181 cpumask_clear(mm_cpumask(mm));
184 return asid;
187 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
189 unsigned long flags;
190 unsigned int cpu = smp_processor_id();
191 u64 asid;
193 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
194 __check_vmalloc_seq(mm);
197 * Required during context switch to avoid speculative page table
198 * walking with the wrong TTBR.
200 cpu_set_reserved_ttbr0();
202 asid = atomic64_read(&mm->context.id);
203 if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
204 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
205 goto switch_mm_fastpath;
207 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
208 /* Check that our ASID belongs to the current generation. */
209 asid = atomic64_read(&mm->context.id);
210 if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
211 asid = new_context(mm, cpu);
212 atomic64_set(&mm->context.id, asid);
215 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
216 local_flush_bp_all();
217 local_flush_tlb_all();
218 dummy_flush_tlb_a15_erratum();
221 atomic64_set(&per_cpu(active_asids, cpu), asid);
222 cpumask_set_cpu(cpu, mm_cpumask(mm));
223 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
225 switch_mm_fastpath:
226 cpu_switch_mm(mm->pgd, mm);