1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_MMU_CONTEXT_H
3 #define _ASM_X86_MMU_CONTEXT_H
6 #include <linux/atomic.h>
7 #include <linux/mm_types.h>
8 #include <linux/pkeys.h>
10 #include <trace/events/tlb.h>
12 #include <asm/tlbflush.h>
13 #include <asm/paravirt.h>
14 #include <asm/debugreg.h>
16 extern atomic64_t last_mm_ctx_id
;
18 #ifndef CONFIG_PARAVIRT_XXL
19 static inline void paravirt_activate_mm(struct mm_struct
*prev
,
20 struct mm_struct
*next
)
23 #endif /* !CONFIG_PARAVIRT_XXL */
25 #ifdef CONFIG_PERF_EVENTS
26 DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key
);
27 DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key
);
28 void cr4_update_pce(void *ignored
);
31 #ifdef CONFIG_MODIFY_LDT_SYSCALL
33 * ldt_structs can be allocated, used, and freed, but they are never
34 * modified while live.
38 * Xen requires page-aligned LDTs with special permissions. This is
39 * needed to prevent us from installing evil descriptors such as
40 * call gates. On native, we could merge the ldt_struct and LDT
41 * allocations, but it's not worth trying to optimize.
43 struct desc_struct
*entries
;
44 unsigned int nr_entries
;
47 * If PTI is in use, then the entries array is not mapped while we're
48 * in user mode. The whole array will be aliased at the addressed
49 * given by ldt_slot_va(slot). We use two slots so that we can allocate
50 * and map, and enable a new LDT without invalidating the mapping
51 * of an older, still-in-use LDT.
53 * slot will be -1 if this LDT doesn't have an alias mapping.
59 * Used for LDT copy/destruction.
61 static inline void init_new_context_ldt(struct mm_struct
*mm
)
63 mm
->context
.ldt
= NULL
;
64 init_rwsem(&mm
->context
.ldt_usr_sem
);
66 int ldt_dup_context(struct mm_struct
*oldmm
, struct mm_struct
*mm
);
67 void destroy_context_ldt(struct mm_struct
*mm
);
68 void ldt_arch_exit_mmap(struct mm_struct
*mm
);
69 #else /* CONFIG_MODIFY_LDT_SYSCALL */
70 static inline void init_new_context_ldt(struct mm_struct
*mm
) { }
71 static inline int ldt_dup_context(struct mm_struct
*oldmm
,
76 static inline void destroy_context_ldt(struct mm_struct
*mm
) { }
77 static inline void ldt_arch_exit_mmap(struct mm_struct
*mm
) { }
80 #ifdef CONFIG_MODIFY_LDT_SYSCALL
81 extern void load_mm_ldt(struct mm_struct
*mm
);
82 extern void switch_ldt(struct mm_struct
*prev
, struct mm_struct
*next
);
84 static inline void load_mm_ldt(struct mm_struct
*mm
)
88 static inline void switch_ldt(struct mm_struct
*prev
, struct mm_struct
*next
)
90 DEBUG_LOCKS_WARN_ON(preemptible());
94 #define enter_lazy_tlb enter_lazy_tlb
95 extern void enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*tsk
);
98 * Init a new mm. Used on mm copies, like at fork()
99 * and on mm's that are brand-new, like at execve().
101 #define init_new_context init_new_context
102 static inline int init_new_context(struct task_struct
*tsk
,
103 struct mm_struct
*mm
)
105 mutex_init(&mm
->context
.lock
);
107 mm
->context
.ctx_id
= atomic64_inc_return(&last_mm_ctx_id
);
108 atomic64_set(&mm
->context
.tlb_gen
, 0);
110 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
111 if (cpu_feature_enabled(X86_FEATURE_OSPKE
)) {
112 /* pkey 0 is the default and allocated implicitly */
113 mm
->context
.pkey_allocation_map
= 0x1;
114 /* -1 means unallocated or invalid */
115 mm
->context
.execute_only_pkey
= -1;
118 init_new_context_ldt(mm
);
122 #define destroy_context destroy_context
123 static inline void destroy_context(struct mm_struct
*mm
)
125 destroy_context_ldt(mm
);
128 extern void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
129 struct task_struct
*tsk
);
131 extern void switch_mm_irqs_off(struct mm_struct
*prev
, struct mm_struct
*next
,
132 struct task_struct
*tsk
);
133 #define switch_mm_irqs_off switch_mm_irqs_off
135 #define activate_mm(prev, next) \
137 paravirt_activate_mm((prev), (next)); \
138 switch_mm((prev), (next), NULL); \
142 #define deactivate_mm(tsk, mm) \
147 #define deactivate_mm(tsk, mm) \
150 loadsegment(fs, 0); \
154 static inline void arch_dup_pkeys(struct mm_struct
*oldmm
,
155 struct mm_struct
*mm
)
157 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
158 if (!cpu_feature_enabled(X86_FEATURE_OSPKE
))
161 /* Duplicate the oldmm pkey state in mm: */
162 mm
->context
.pkey_allocation_map
= oldmm
->context
.pkey_allocation_map
;
163 mm
->context
.execute_only_pkey
= oldmm
->context
.execute_only_pkey
;
167 static inline int arch_dup_mmap(struct mm_struct
*oldmm
, struct mm_struct
*mm
)
169 arch_dup_pkeys(oldmm
, mm
);
170 paravirt_arch_dup_mmap(oldmm
, mm
);
171 return ldt_dup_context(oldmm
, mm
);
174 static inline void arch_exit_mmap(struct mm_struct
*mm
)
176 paravirt_arch_exit_mmap(mm
);
177 ldt_arch_exit_mmap(mm
);
181 static inline bool is_64bit_mm(struct mm_struct
*mm
)
183 return !IS_ENABLED(CONFIG_IA32_EMULATION
) ||
184 !(mm
->context
.flags
& MM_CONTEXT_UPROBE_IA32
);
187 static inline bool is_64bit_mm(struct mm_struct
*mm
)
193 static inline void arch_unmap(struct mm_struct
*mm
, unsigned long start
,
199 * We only want to enforce protection keys on the current process
200 * because we effectively have no access to PKRU for other
201 * processes or any way to tell *which * PKRU in a threaded
202 * process we could use.
204 * So do not enforce things if the VMA is not from the current
205 * mm, or if we are in a kernel thread.
207 static inline bool arch_vma_access_permitted(struct vm_area_struct
*vma
,
208 bool write
, bool execute
, bool foreign
)
210 /* pkeys never affect instruction fetches */
213 /* allow access if the VMA is not one from this process */
214 if (foreign
|| vma_is_foreign(vma
))
216 return __pkru_allows_pkey(vma_pkey(vma
), write
);
219 unsigned long __get_current_cr3_fast(void);
221 #include <asm-generic/mmu_context.h>
223 #endif /* _ASM_X86_MMU_CONTEXT_H */