1 #include <linux/init.h>
4 #include <linux/delay.h>
5 #include <linux/spinlock.h>
7 #include <linux/kernel_stat.h>
8 #include <linux/mc146818rtc.h>
9 #include <linux/interrupt.h>
12 #include <asm/pgalloc.h>
13 #include <asm/tlbflush.h>
14 #include <asm/mmu_context.h>
15 #include <asm/proto.h>
16 #include <asm/apicdef.h>
21 * Smarter SMP flushing macros.
24 * These mean you can really definitely utterly forget about
25 * writing to user space from interrupts. (Its not allowed anyway).
27 * Optimizations Manfred Spraul <manfred@colorfullife.com>
29 * More scalable flush, from Andi Kleen
31 * To avoid global state use 8 different call vectors.
32 * Each CPU uses a specific vector to trigger flushes on other
33 * CPUs. Depending on the received vector the target CPUs look into
34 * the right per cpu variable for the flush data.
36 * With more than 8 CPUs they are hashed to the 8 available
37 * vectors. The limited global vector space forces us to this right now.
38 * In future when interrupts are split into per CPU domains this could be
39 * fixed, at the cost of triggering multiple IPIs in some cases.
42 union smp_flush_state
{
44 cpumask_t flush_cpumask
;
45 struct mm_struct
*flush_mm
;
46 unsigned long flush_va
;
47 spinlock_t tlbstate_lock
;
49 char pad
[SMP_CACHE_BYTES
];
50 } ____cacheline_aligned
;
52 /* State is put into the per CPU data section, but padded
53 to a full cache line because other CPUs can access it and we don't
54 want false sharing in the per cpu data segment. */
55 static DEFINE_PER_CPU(union smp_flush_state
, flush_state
);
58 * We cannot call mmdrop() because we are in interrupt context,
59 * instead update mm->cpu_vm_mask.
61 void leave_mm(int cpu
)
63 if (read_pda(mmu_state
) == TLBSTATE_OK
)
65 cpu_clear(cpu
, read_pda(active_mm
)->cpu_vm_mask
);
66 load_cr3(swapper_pg_dir
);
68 EXPORT_SYMBOL_GPL(leave_mm
);
72 * The flush IPI assumes that a thread switch happens in this order:
73 * [cpu0: the cpu that switches]
74 * 1) switch_mm() either 1a) or 1b)
75 * 1a) thread switch to a different mm
76 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
77 * Stop ipi delivery for the old mm. This is not synchronized with
78 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
79 * for the wrong mm, and in the worst case we perform a superfluous
81 * 1a2) set cpu mmu_state to TLBSTATE_OK
82 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
83 * was in lazy tlb mode.
84 * 1a3) update cpu active_mm
85 * Now cpu0 accepts tlb flushes for the new mm.
86 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
87 * Now the other cpus will send tlb flush ipis.
89 * 1b) thread switch without mm change
90 * cpu active_mm is correct, cpu0 already handles
92 * 1b1) set cpu mmu_state to TLBSTATE_OK
93 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
94 * Atomically set the bit [other cpus will start sending flush ipis],
96 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
97 * 2) switch %%esp, ie current
99 * The interrupt must handle 2 special cases:
100 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
101 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
102 * runs in kernel space, the cpu could load tlb entries for user space
105 * The good news is that cpu mmu_state is local to each cpu, no
106 * write/read ordering problems.
112 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
113 * 2) Leave the mm if we are in the lazy tlb mode.
115 * Interrupts are disabled.
118 asmlinkage
void smp_invalidate_interrupt(struct pt_regs
*regs
)
122 union smp_flush_state
*f
;
124 cpu
= smp_processor_id();
126 * orig_rax contains the negated interrupt vector.
127 * Use that to determine where the sender put the data.
129 sender
= ~regs
->orig_ax
- INVALIDATE_TLB_VECTOR_START
;
130 f
= &per_cpu(flush_state
, sender
);
132 if (!cpu_isset(cpu
, f
->flush_cpumask
))
135 * This was a BUG() but until someone can quote me the
136 * line from the intel manual that guarantees an IPI to
137 * multiple CPUs is retried _only_ on the erroring CPUs
138 * its staying as a return
143 if (f
->flush_mm
== read_pda(active_mm
)) {
144 if (read_pda(mmu_state
) == TLBSTATE_OK
) {
145 if (f
->flush_va
== TLB_FLUSH_ALL
)
148 __flush_tlb_one(f
->flush_va
);
154 cpu_clear(cpu
, f
->flush_cpumask
);
155 add_pda(irq_tlb_count
, 1);
158 void native_flush_tlb_others(const cpumask_t
*cpumaskp
, struct mm_struct
*mm
,
162 union smp_flush_state
*f
;
163 cpumask_t cpumask
= *cpumaskp
;
165 /* Caller has disabled preemption */
166 sender
= smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS
;
167 f
= &per_cpu(flush_state
, sender
);
170 * Could avoid this lock when
171 * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
172 * probably not worth checking this for a cache-hot lock.
174 spin_lock(&f
->tlbstate_lock
);
178 cpus_or(f
->flush_cpumask
, cpumask
, f
->flush_cpumask
);
181 * We have to send the IPI only to
184 send_IPI_mask(cpumask
, INVALIDATE_TLB_VECTOR_START
+ sender
);
186 while (!cpus_empty(f
->flush_cpumask
))
191 spin_unlock(&f
->tlbstate_lock
);
194 static int __cpuinit
init_smp_flush(void)
198 for_each_possible_cpu(i
)
199 spin_lock_init(&per_cpu(flush_state
, i
).tlbstate_lock
);
203 core_initcall(init_smp_flush
);
205 void flush_tlb_current_task(void)
207 struct mm_struct
*mm
= current
->mm
;
211 cpu_mask
= mm
->cpu_vm_mask
;
212 cpu_clear(smp_processor_id(), cpu_mask
);
215 if (!cpus_empty(cpu_mask
))
216 flush_tlb_others(cpu_mask
, mm
, TLB_FLUSH_ALL
);
220 void flush_tlb_mm(struct mm_struct
*mm
)
225 cpu_mask
= mm
->cpu_vm_mask
;
226 cpu_clear(smp_processor_id(), cpu_mask
);
228 if (current
->active_mm
== mm
) {
232 leave_mm(smp_processor_id());
234 if (!cpus_empty(cpu_mask
))
235 flush_tlb_others(cpu_mask
, mm
, TLB_FLUSH_ALL
);
240 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long va
)
242 struct mm_struct
*mm
= vma
->vm_mm
;
246 cpu_mask
= mm
->cpu_vm_mask
;
247 cpu_clear(smp_processor_id(), cpu_mask
);
249 if (current
->active_mm
== mm
) {
253 leave_mm(smp_processor_id());
256 if (!cpus_empty(cpu_mask
))
257 flush_tlb_others(cpu_mask
, mm
, va
);
262 static void do_flush_tlb_all(void *info
)
264 unsigned long cpu
= smp_processor_id();
267 if (read_pda(mmu_state
) == TLBSTATE_LAZY
)
271 void flush_tlb_all(void)
273 on_each_cpu(do_flush_tlb_all
, NULL
, 1, 1);