1 #ifndef _ASM_X86_MMU_CONTEXT_H
2 #define _ASM_X86_MMU_CONTEXT_H
5 #include <linux/atomic.h>
6 #include <linux/mm_types.h>
8 #include <trace/events/tlb.h>
10 #include <asm/pgalloc.h>
11 #include <asm/tlbflush.h>
12 #include <asm/paravirt.h>
14 #ifndef CONFIG_PARAVIRT
15 static inline void paravirt_activate_mm(struct mm_struct
*prev
,
16 struct mm_struct
*next
)
19 #endif /* !CONFIG_PARAVIRT */
21 #ifdef CONFIG_PERF_EVENTS
22 extern struct static_key rdpmc_always_available
;
24 static inline void load_mm_cr4(struct mm_struct
*mm
)
26 if (static_key_false(&rdpmc_always_available
) ||
27 atomic_read(&mm
->context
.perf_rdpmc_allowed
))
28 cr4_set_bits(X86_CR4_PCE
);
30 cr4_clear_bits(X86_CR4_PCE
);
33 static inline void load_mm_cr4(struct mm_struct
*mm
) {}
36 #ifdef CONFIG_MODIFY_LDT_SYSCALL
38 * ldt_structs can be allocated, used, and freed, but they are never
39 * modified while live.
43 * Xen requires page-aligned LDTs with special permissions. This is
44 * needed to prevent us from installing evil descriptors such as
45 * call gates. On native, we could merge the ldt_struct and LDT
46 * allocations, but it's not worth trying to optimize.
48 struct desc_struct
*entries
;
53 * Used for LDT copy/destruction.
55 int init_new_context_ldt(struct task_struct
*tsk
, struct mm_struct
*mm
);
56 void destroy_context_ldt(struct mm_struct
*mm
);
57 #else /* CONFIG_MODIFY_LDT_SYSCALL */
58 static inline int init_new_context_ldt(struct task_struct
*tsk
,
63 static inline void destroy_context_ldt(struct mm_struct
*mm
) {}
66 static inline void load_mm_ldt(struct mm_struct
*mm
)
68 #ifdef CONFIG_MODIFY_LDT_SYSCALL
69 struct ldt_struct
*ldt
;
71 /* lockless_dereference synchronizes with smp_store_release */
72 ldt
= lockless_dereference(mm
->context
.ldt
);
75 * Any change to mm->context.ldt is followed by an IPI to all
76 * CPUs with the mm active. The LDT will not be freed until
77 * after the IPI is handled by all such CPUs. This means that,
78 * if the ldt_struct changes before we return, the values we see
79 * will be safe, and the new values will be loaded before we run
82 * NB: don't try to convert this to use RCU without extreme care.
83 * We would still need IRQs off, because we don't want to change
84 * the local LDT after an IPI loaded a newer value than the one
89 set_ldt(ldt
->entries
, ldt
->size
);
96 DEBUG_LOCKS_WARN_ON(preemptible());
99 static inline void enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*tsk
)
102 if (this_cpu_read(cpu_tlbstate
.state
) == TLBSTATE_OK
)
103 this_cpu_write(cpu_tlbstate
.state
, TLBSTATE_LAZY
);
107 static inline int init_new_context(struct task_struct
*tsk
,
108 struct mm_struct
*mm
)
110 init_new_context_ldt(tsk
, mm
);
113 static inline void destroy_context(struct mm_struct
*mm
)
115 destroy_context_ldt(mm
);
118 extern void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
119 struct task_struct
*tsk
);
121 extern void switch_mm_irqs_off(struct mm_struct
*prev
, struct mm_struct
*next
,
122 struct task_struct
*tsk
);
123 #define switch_mm_irqs_off switch_mm_irqs_off
125 #define activate_mm(prev, next) \
127 paravirt_activate_mm((prev), (next)); \
128 switch_mm((prev), (next), NULL); \
132 #define deactivate_mm(tsk, mm) \
137 #define deactivate_mm(tsk, mm) \
140 loadsegment(fs, 0); \
144 static inline void arch_dup_mmap(struct mm_struct
*oldmm
,
145 struct mm_struct
*mm
)
147 paravirt_arch_dup_mmap(oldmm
, mm
);
150 static inline void arch_exit_mmap(struct mm_struct
*mm
)
152 paravirt_arch_exit_mmap(mm
);
156 static inline bool is_64bit_mm(struct mm_struct
*mm
)
158 return !IS_ENABLED(CONFIG_IA32_EMULATION
) ||
159 !(mm
->context
.ia32_compat
== TIF_IA32
);
162 static inline bool is_64bit_mm(struct mm_struct
*mm
)
168 static inline void arch_bprm_mm_init(struct mm_struct
*mm
,
169 struct vm_area_struct
*vma
)
174 static inline void arch_unmap(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
175 unsigned long start
, unsigned long end
)
178 * mpx_notify_unmap() goes and reads a rarely-hot
179 * cacheline in the mm_struct. That can be expensive
180 * enough to be seen in profiles.
182 * The mpx_notify_unmap() call and its contents have been
183 * observed to affect munmap() performance on hardware
184 * where MPX is not present.
186 * The unlikely() optimizes for the fast case: no MPX
187 * in the CPU, or no MPX use in the process. Even if
188 * we get this wrong (in the unlikely event that MPX
189 * is widely enabled on some system) the overhead of
190 * MPX itself (reading bounds tables) is expected to
191 * overwhelm the overhead of getting this unlikely()
192 * consistently wrong.
194 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX
)))
195 mpx_notify_unmap(mm
, vma
, start
, end
);
198 static inline int vma_pkey(struct vm_area_struct
*vma
)
201 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
202 unsigned long vma_pkey_mask
= VM_PKEY_BIT0
| VM_PKEY_BIT1
|
203 VM_PKEY_BIT2
| VM_PKEY_BIT3
;
204 pkey
= (vma
->vm_flags
& vma_pkey_mask
) >> VM_PKEY_SHIFT
;
209 static inline bool __pkru_allows_pkey(u16 pkey
, bool write
)
211 u32 pkru
= read_pkru();
213 if (!__pkru_allows_read(pkru
, pkey
))
215 if (write
&& !__pkru_allows_write(pkru
, pkey
))
222 * We only want to enforce protection keys on the current process
223 * because we effectively have no access to PKRU for other
224 * processes or any way to tell *which * PKRU in a threaded
225 * process we could use.
227 * So do not enforce things if the VMA is not from the current
228 * mm, or if we are in a kernel thread.
230 static inline bool vma_is_foreign(struct vm_area_struct
*vma
)
235 * Should PKRU be enforced on the access to this VMA? If
236 * the VMA is from another process, then PKRU has no
237 * relevance and should not be enforced.
239 if (current
->mm
!= vma
->vm_mm
)
245 static inline bool arch_vma_access_permitted(struct vm_area_struct
*vma
,
246 bool write
, bool execute
, bool foreign
)
248 /* pkeys never affect instruction fetches */
251 /* allow access if the VMA is not one from this process */
252 if (foreign
|| vma_is_foreign(vma
))
254 return __pkru_allows_pkey(vma_pkey(vma
), write
);
257 static inline bool arch_pte_access_permitted(pte_t pte
, bool write
)
259 return __pkru_allows_pkey(pte_flags_pkey(pte_flags(pte
)), write
);
262 #endif /* _ASM_X86_MMU_CONTEXT_H */