2 * linux/arch/arm/mm/context.c
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/init.h>
11 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/percpu.h>
16 #include <asm/mmu_context.h>
17 #include <asm/tlbflush.h>
19 static DEFINE_SPINLOCK(cpu_asid_lock
);
20 unsigned int cpu_last_asid
= ASID_FIRST_VERSION
;
22 DEFINE_PER_CPU(struct mm_struct
*, current_mm
);
25 #ifdef CONFIG_ARM_LPAE
26 #define cpu_set_asid(asid) { \
27 unsigned long ttbl, ttbh; \
29 " mrrc p15, 0, %0, %1, c2 @ read TTBR0\n" \
30 " mov %1, %2, lsl #(48 - 32) @ set ASID\n" \
31 " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" \
32 : "=&r" (ttbl), "=&r" (ttbh) \
33 : "r" (asid & ~ASID_MASK)); \
36 #define cpu_set_asid(asid) \
37 asm(" mcr p15, 0, %0, c13, c0, 1\n" : : "r" (asid))
41 * We fork()ed a process, and we need a new context for the child
42 * to run in. We reserve version 0 for initial tasks so we will
43 * always allocate an ASID. The ASID 0 is reserved for the TTBR
44 * register changing sequence.
46 void __init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
)
49 spin_lock_init(&mm
->context
.id_lock
);
52 static void flush_context(void)
54 /* set the reserved ASID before flushing the TLB */
57 local_flush_tlb_all();
58 if (icache_is_vivt_asid_tagged()) {
66 static void set_mm_context(struct mm_struct
*mm
, unsigned int asid
)
71 * Locking needed for multi-threaded applications where the
72 * same mm->context.id could be set from different CPUs during
73 * the broadcast. This function is also called via IPI so the
74 * mm->context.id_lock has to be IRQ-safe.
76 spin_lock_irqsave(&mm
->context
.id_lock
, flags
);
77 if (likely((mm
->context
.id
^ cpu_last_asid
) >> ASID_BITS
)) {
79 * Old version of ASID found. Set the new one and
80 * reset mm_cpumask(mm).
82 mm
->context
.id
= asid
;
83 cpumask_clear(mm_cpumask(mm
));
85 spin_unlock_irqrestore(&mm
->context
.id_lock
, flags
);
88 * Set the mm_cpumask(mm) bit for the current CPU.
90 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm
));
94 * Reset the ASID on the current CPU. This function call is broadcast
95 * from the CPU handling the ASID rollover and holding cpu_asid_lock.
97 static void reset_context(void *info
)
100 unsigned int cpu
= smp_processor_id();
101 struct mm_struct
*mm
= per_cpu(current_mm
, cpu
);
104 * Check if a current_mm was set on this CPU as it might still
105 * be in the early booting stages and using the reserved ASID.
111 asid
= cpu_last_asid
+ cpu
+ 1;
114 set_mm_context(mm
, asid
);
116 /* set the new ASID */
117 cpu_set_asid(mm
->context
.id
);
123 static inline void set_mm_context(struct mm_struct
*mm
, unsigned int asid
)
125 mm
->context
.id
= asid
;
126 cpumask_copy(mm_cpumask(mm
), cpumask_of(smp_processor_id()));
131 void __new_context(struct mm_struct
*mm
)
135 spin_lock(&cpu_asid_lock
);
138 * Check the ASID again, in case the change was broadcast from
139 * another CPU before we acquired the lock.
141 if (unlikely(((mm
->context
.id
^ cpu_last_asid
) >> ASID_BITS
) == 0)) {
142 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm
));
143 spin_unlock(&cpu_asid_lock
);
148 * At this point, it is guaranteed that the current mm (with
149 * an old ASID) isn't active on any other CPU since the ASIDs
150 * are changed simultaneously via IPI.
152 asid
= ++cpu_last_asid
;
154 asid
= cpu_last_asid
= ASID_FIRST_VERSION
;
157 * If we've used up all our ASIDs, we need
158 * to start a new version and flush the TLB.
160 if (unlikely((asid
& ~ASID_MASK
) == 0)) {
161 asid
= cpu_last_asid
+ smp_processor_id() + 1;
165 smp_call_function(reset_context
, NULL
, 1);
167 cpu_last_asid
+= NR_CPUS
;
170 set_mm_context(mm
, asid
);
171 spin_unlock(&cpu_asid_lock
);