2 * linux/arch/arm/mm/context.c
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/init.h>
11 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/percpu.h>
16 #include <asm/mmu_context.h>
17 #include <asm/thread_notify.h>
18 #include <asm/tlbflush.h>
20 static DEFINE_RAW_SPINLOCK(cpu_asid_lock
);
21 unsigned int cpu_last_asid
= ASID_FIRST_VERSION
;
23 #ifdef CONFIG_ARM_LPAE
24 void cpu_set_reserved_ttbr0(void)
26 unsigned long ttbl
= __pa(swapper_pg_dir
);
27 unsigned long ttbh
= 0;
30 * Set TTBR0 to swapper_pg_dir which contains only global entries. The
34 " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n"
36 : "r" (ttbl
), "r" (ttbh
));
40 void cpu_set_reserved_ttbr0(void)
43 /* Copy TTBR1 into TTBR0 */
45 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
46 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
52 #ifdef CONFIG_PID_IN_CONTEXTIDR
53 static int contextidr_notifier(struct notifier_block
*unused
, unsigned long cmd
,
58 struct thread_info
*thread
= t
;
60 if (cmd
!= THREAD_NOTIFY_SWITCH
)
63 pid
= task_pid_nr(thread
->task
) << ASID_BITS
;
65 " mrc p15, 0, %0, c13, c0, 1\n"
68 " mcr p15, 0, %0, c13, c0, 1\n"
69 : "=r" (contextidr
), "+r" (pid
)
76 static struct notifier_block contextidr_notifier_block
= {
77 .notifier_call
= contextidr_notifier
,
80 static int __init
contextidr_notifier_init(void)
82 return thread_register_notifier(&contextidr_notifier_block
);
84 arch_initcall(contextidr_notifier_init
);
88 * We fork()ed a process, and we need a new context for the child
91 void __init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
)
94 raw_spin_lock_init(&mm
->context
.id_lock
);
97 static void flush_context(void)
99 cpu_set_reserved_ttbr0();
100 local_flush_tlb_all();
101 if (icache_is_vivt_asid_tagged()) {
102 __flush_icache_all();
109 static void set_mm_context(struct mm_struct
*mm
, unsigned int asid
)
114 * Locking needed for multi-threaded applications where the
115 * same mm->context.id could be set from different CPUs during
116 * the broadcast. This function is also called via IPI so the
117 * mm->context.id_lock has to be IRQ-safe.
119 raw_spin_lock_irqsave(&mm
->context
.id_lock
, flags
);
120 if (likely((mm
->context
.id
^ cpu_last_asid
) >> ASID_BITS
)) {
122 * Old version of ASID found. Set the new one and
123 * reset mm_cpumask(mm).
125 mm
->context
.id
= asid
;
126 cpumask_clear(mm_cpumask(mm
));
128 raw_spin_unlock_irqrestore(&mm
->context
.id_lock
, flags
);
131 * Set the mm_cpumask(mm) bit for the current CPU.
133 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm
));
137 * Reset the ASID on the current CPU. This function call is broadcast
138 * from the CPU handling the ASID rollover and holding cpu_asid_lock.
140 static void reset_context(void *info
)
143 unsigned int cpu
= smp_processor_id();
144 struct mm_struct
*mm
= current
->active_mm
;
147 asid
= cpu_last_asid
+ cpu
+ 1;
150 set_mm_context(mm
, asid
);
152 /* set the new ASID */
153 cpu_switch_mm(mm
->pgd
, mm
);
158 static inline void set_mm_context(struct mm_struct
*mm
, unsigned int asid
)
160 mm
->context
.id
= asid
;
161 cpumask_copy(mm_cpumask(mm
), cpumask_of(smp_processor_id()));
166 void __new_context(struct mm_struct
*mm
)
170 raw_spin_lock(&cpu_asid_lock
);
173 * Check the ASID again, in case the change was broadcast from
174 * another CPU before we acquired the lock.
176 if (unlikely(((mm
->context
.id
^ cpu_last_asid
) >> ASID_BITS
) == 0)) {
177 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm
));
178 raw_spin_unlock(&cpu_asid_lock
);
183 * At this point, it is guaranteed that the current mm (with
184 * an old ASID) isn't active on any other CPU since the ASIDs
185 * are changed simultaneously via IPI.
187 asid
= ++cpu_last_asid
;
189 asid
= cpu_last_asid
= ASID_FIRST_VERSION
;
192 * If we've used up all our ASIDs, we need
193 * to start a new version and flush the TLB.
195 if (unlikely((asid
& ~ASID_MASK
) == 0)) {
196 asid
= cpu_last_asid
+ smp_processor_id() + 1;
200 smp_call_function(reset_context
, NULL
, 1);
202 cpu_last_asid
+= NR_CPUS
;
205 set_mm_context(mm
, asid
);
206 raw_spin_unlock(&cpu_asid_lock
);