2 * Based on arch/arm/include/asm/mmu_context.h
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #ifndef __ASM_MMU_CONTEXT_H
20 #define __ASM_MMU_CONTEXT_H
22 #include <linux/compiler.h>
23 #include <linux/sched.h>
25 #include <asm/cacheflush.h>
26 #include <asm/proc-fns.h>
27 #include <asm-generic/mm_hooks.h>
28 #include <asm/cputype.h>
29 #include <asm/pgtable.h>
31 #define MAX_ASID_BITS 16
33 extern unsigned int cpu_last_asid
;
35 void __init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
);
36 void __new_context(struct mm_struct
*mm
);
39 * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
41 static inline void cpu_set_reserved_ttbr0(void)
43 unsigned long ttbr
= page_to_phys(empty_zero_page
);
46 " msr ttbr0_el1, %0 // set TTBR0\n"
52 static inline void switch_new_context(struct mm_struct
*mm
)
58 local_irq_save(flags
);
59 cpu_switch_mm(mm
->pgd
, mm
);
60 local_irq_restore(flags
);
63 static inline void check_and_switch_context(struct mm_struct
*mm
,
64 struct task_struct
*tsk
)
67 * Required during context switch to avoid speculative page table
68 * walking with the wrong TTBR.
70 cpu_set_reserved_ttbr0();
72 if (!((mm
->context
.id
^ cpu_last_asid
) >> MAX_ASID_BITS
))
74 * The ASID is from the current generation, just switch to the
75 * new pgd. This condition is only true for calls from
76 * context_switch() and interrupts are already disabled.
78 cpu_switch_mm(mm
->pgd
, mm
);
79 else if (irqs_disabled())
81 * Defer the new ASID allocation until after the context
82 * switch critical region since __new_context() cannot be
83 * called with interrupts disabled.
85 set_ti_thread_flag(task_thread_info(tsk
), TIF_SWITCH_MM
);
88 * That is a direct call to switch_mm() or activate_mm() with
89 * interrupts enabled and a new context.
91 switch_new_context(mm
);
94 #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
95 #define destroy_context(mm) do { } while(0)
97 #define finish_arch_post_lock_switch \
98 finish_arch_post_lock_switch
99 static inline void finish_arch_post_lock_switch(void)
101 if (test_and_clear_thread_flag(TIF_SWITCH_MM
)) {
102 struct mm_struct
*mm
= current
->mm
;
107 local_irq_save(flags
);
108 cpu_switch_mm(mm
->pgd
, mm
);
109 local_irq_restore(flags
);
114 * This is called when "tsk" is about to enter lazy TLB mode.
116 * mm: describes the currently active mm context
117 * tsk: task which is entering lazy tlb
118 * cpu: cpu number which is entering lazy tlb
120 * tsk->mm will be NULL
123 enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*tsk
)
128 * This is the actual mm switch as far as the scheduler
129 * is concerned. No registers are touched. We avoid
130 * calling the CPU specific function when the mm hasn't
134 switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
135 struct task_struct
*tsk
)
137 unsigned int cpu
= smp_processor_id();
140 /* check for possible thread migration */
141 if (!cpumask_empty(mm_cpumask(next
)) &&
142 !cpumask_test_cpu(cpu
, mm_cpumask(next
)))
143 __flush_icache_all();
145 if (!cpumask_test_and_set_cpu(cpu
, mm_cpumask(next
)) || prev
!= next
)
146 check_and_switch_context(next
, tsk
);
149 #define deactivate_mm(tsk,mm) do { } while (0)
150 #define activate_mm(prev,next) switch_mm(prev, next, NULL)