1 #ifndef _ASM_X86_MMU_CONTEXT_H
2 #define _ASM_X86_MMU_CONTEXT_H
5 #include <linux/atomic.h>
6 #include <asm/pgalloc.h>
7 #include <asm/tlbflush.h>
8 #include <asm/paravirt.h>
9 #ifndef CONFIG_PARAVIRT
10 #include <asm-generic/mm_hooks.h>
12 static inline void paravirt_activate_mm(struct mm_struct
*prev
,
13 struct mm_struct
*next
)
16 #endif /* !CONFIG_PARAVIRT */
19 * ldt_structs can be allocated, used, and freed, but they are never
20 * modified while live.
24 * Xen requires page-aligned LDTs with special permissions. This is
25 * needed to prevent us from installing evil descriptors such as
26 * call gates. On native, we could merge the ldt_struct and LDT
27 * allocations, but it's not worth trying to optimize.
29 struct desc_struct
*entries
;
33 static inline void load_mm_ldt(struct mm_struct
*mm
)
35 struct ldt_struct
*ldt
;
37 /* lockless_dereference synchronizes with smp_store_release */
38 ldt
= lockless_dereference(mm
->context
.ldt
);
41 * Any change to mm->context.ldt is followed by an IPI to all
42 * CPUs with the mm active. The LDT will not be freed until
43 * after the IPI is handled by all such CPUs. This means that,
44 * if the ldt_struct changes before we return, the values we see
45 * will be safe, and the new values will be loaded before we run
48 * NB: don't try to convert this to use RCU without extreme care.
49 * We would still need IRQs off, because we don't want to change
50 * the local LDT after an IPI loaded a newer value than the one
55 set_ldt(ldt
->entries
, ldt
->size
);
59 DEBUG_LOCKS_WARN_ON(preemptible());
63 * Used for LDT copy/destruction.
65 int init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
);
66 void destroy_context(struct mm_struct
*mm
);
69 static inline void enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*tsk
)
72 if (this_cpu_read(cpu_tlbstate
.state
) == TLBSTATE_OK
)
73 this_cpu_write(cpu_tlbstate
.state
, TLBSTATE_LAZY
);
77 static inline void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
78 struct task_struct
*tsk
)
80 unsigned cpu
= smp_processor_id();
82 if (likely(prev
!= next
)) {
84 this_cpu_write(cpu_tlbstate
.state
, TLBSTATE_OK
);
85 this_cpu_write(cpu_tlbstate
.active_mm
, next
);
87 cpumask_set_cpu(cpu
, mm_cpumask(next
));
90 * Re-load page tables.
92 * This logic has an ordering constraint:
94 * CPU 0: Write to a PTE for 'next'
95 * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
96 * CPU 1: set bit 1 in next's mm_cpumask
97 * CPU 1: load from the PTE that CPU 0 writes (implicit)
99 * We need to prevent an outcome in which CPU 1 observes
100 * the new PTE value and CPU 0 observes bit 1 clear in
101 * mm_cpumask. (If that occurs, then the IPI will never
102 * be sent, and CPU 0's TLB will contain a stale entry.)
104 * The bad outcome can occur if either CPU's load is
105 * reordered before that CPU's store, so both CPUs must
106 * execute full barriers to prevent this from happening.
108 * Thus, switch_mm needs a full barrier between the
109 * store to mm_cpumask and any operation that could load
110 * from next->pgd. TLB fills are special and can happen
111 * due to instruction fetches or for no reason at all,
112 * and neither LOCK nor MFENCE orders them.
113 * Fortunately, load_cr3() is serializing and gives the
114 * ordering guarantee we need.
119 /* Stop flush ipis for the previous mm */
120 cpumask_clear_cpu(cpu
, mm_cpumask(prev
));
122 /* Load the LDT, if the LDT is different: */
123 if (unlikely(prev
->context
.ldt
!= next
->context
.ldt
))
128 this_cpu_write(cpu_tlbstate
.state
, TLBSTATE_OK
);
129 BUG_ON(this_cpu_read(cpu_tlbstate
.active_mm
) != next
);
131 if (!cpumask_test_cpu(cpu
, mm_cpumask(next
))) {
133 * On established mms, the mm_cpumask is only changed
134 * from irq context, from ptep_clear_flush() while in
135 * lazy tlb mode, and here. Irqs are blocked during
136 * schedule, protecting us from simultaneous changes.
138 cpumask_set_cpu(cpu
, mm_cpumask(next
));
141 * We were in lazy tlb mode and leave_mm disabled
142 * tlb flush IPI delivery. We must reload CR3
143 * to make sure to use no freed page tables.
145 * As above, load_cr3() is serializing and orders TLB
146 * fills with respect to the mm_cpumask write.
155 #define activate_mm(prev, next) \
157 paravirt_activate_mm((prev), (next)); \
158 switch_mm((prev), (next), NULL); \
162 #define deactivate_mm(tsk, mm) \
167 #define deactivate_mm(tsk, mm) \
170 loadsegment(fs, 0); \
174 #endif /* _ASM_X86_MMU_CONTEXT_H */