1 // SPDX-License-Identifier: GPL-2.0-only
4 #include <linux/mman.h>
5 #include <linux/syscalls.h>
6 #include <linux/types.h>
8 #include <asm/cmpxchg.h>
9 #include <asm/cpufeature.h>
13 static unsigned long alloc_gcs(unsigned long addr
, unsigned long size
)
15 int flags
= MAP_ANONYMOUS
| MAP_PRIVATE
;
16 struct mm_struct
*mm
= current
->mm
;
17 unsigned long mapped_addr
, unused
;
20 flags
|= MAP_FIXED_NOREPLACE
;
23 mapped_addr
= do_mmap(NULL
, addr
, size
, PROT_READ
, flags
,
24 VM_SHADOW_STACK
| VM_WRITE
, 0, &unused
, NULL
);
25 mmap_write_unlock(mm
);
30 static unsigned long gcs_size(unsigned long size
)
33 return PAGE_ALIGN(size
);
35 /* Allocate RLIMIT_STACK/2 with limits of PAGE_SIZE..2G */
36 size
= PAGE_ALIGN(min_t(unsigned long long,
37 rlimit(RLIMIT_STACK
) / 2, SZ_2G
));
38 return max(PAGE_SIZE
, size
);
41 unsigned long gcs_alloc_thread_stack(struct task_struct
*tsk
,
42 const struct kernel_clone_args
*args
)
44 unsigned long addr
, size
;
46 if (!system_supports_gcs())
49 if (!task_gcs_el0_enabled(tsk
))
52 if ((args
->flags
& (CLONE_VFORK
| CLONE_VM
)) != CLONE_VM
) {
53 tsk
->thread
.gcspr_el0
= read_sysreg_s(SYS_GCSPR_EL0
);
57 size
= args
->stack_size
/ 2;
59 size
= gcs_size(size
);
60 addr
= alloc_gcs(0, size
);
61 if (IS_ERR_VALUE(addr
))
64 tsk
->thread
.gcs_base
= addr
;
65 tsk
->thread
.gcs_size
= size
;
66 tsk
->thread
.gcspr_el0
= addr
+ size
- sizeof(u64
);
71 SYSCALL_DEFINE3(map_shadow_stack
, unsigned long, addr
, unsigned long, size
, unsigned int, flags
)
73 unsigned long alloc_size
;
74 unsigned long __user
*cap_ptr
;
75 unsigned long cap_val
;
79 if (!system_supports_gcs())
82 if (flags
& ~(SHADOW_STACK_SET_TOKEN
| SHADOW_STACK_SET_MARKER
))
85 if (!PAGE_ALIGNED(addr
))
88 if (size
== 8 || !IS_ALIGNED(size
, 8))
92 * An overflow would result in attempting to write the restore token
93 * to the wrong location. Not catastrophic, but just return the right
94 * error code and block it.
96 alloc_size
= PAGE_ALIGN(size
);
97 if (alloc_size
< size
)
100 addr
= alloc_gcs(addr
, alloc_size
);
101 if (IS_ERR_VALUE(addr
))
105 * Put a cap token at the end of the allocated region so it
106 * can be switched to.
108 if (flags
& SHADOW_STACK_SET_TOKEN
) {
109 /* Leave an extra empty frame as a top of stack marker? */
110 if (flags
& SHADOW_STACK_SET_MARKER
)
115 cap_ptr
= (unsigned long __user
*)(addr
+ size
-
116 (cap_offset
* sizeof(unsigned long)));
117 cap_val
= GCS_CAP(cap_ptr
);
119 put_user_gcs(cap_val
, cap_ptr
, &ret
);
121 vm_munmap(addr
, size
);
126 * Ensure the new cap is ordered before standard
127 * memory accesses to the same location.
136 * Apply the GCS mode configured for the specified task to the
139 void gcs_set_el0_mode(struct task_struct
*task
)
141 u64 gcscre0_el1
= GCSCRE0_EL1_nTR
;
143 if (task
->thread
.gcs_el0_mode
& PR_SHADOW_STACK_ENABLE
)
144 gcscre0_el1
|= GCSCRE0_EL1_RVCHKEN
| GCSCRE0_EL1_PCRSEL
;
146 if (task
->thread
.gcs_el0_mode
& PR_SHADOW_STACK_WRITE
)
147 gcscre0_el1
|= GCSCRE0_EL1_STREn
;
149 if (task
->thread
.gcs_el0_mode
& PR_SHADOW_STACK_PUSH
)
150 gcscre0_el1
|= GCSCRE0_EL1_PUSHMEn
;
152 write_sysreg_s(gcscre0_el1
, SYS_GCSCRE0_EL1
);
155 void gcs_free(struct task_struct
*task
)
157 if (!system_supports_gcs())
161 * When fork() with CLONE_VM fails, the child (tsk) already
162 * has a GCS allocated, and exit_thread() calls this function
163 * to free it. In this case the parent (current) and the
164 * child share the same mm struct.
166 if (!task
->mm
|| task
->mm
!= current
->mm
)
169 if (task
->thread
.gcs_base
)
170 vm_munmap(task
->thread
.gcs_base
, task
->thread
.gcs_size
);
172 task
->thread
.gcspr_el0
= 0;
173 task
->thread
.gcs_base
= 0;
174 task
->thread
.gcs_size
= 0;
177 int arch_set_shadow_stack_status(struct task_struct
*task
, unsigned long arg
)
179 unsigned long gcs
, size
;
182 if (!system_supports_gcs())
185 if (is_compat_thread(task_thread_info(task
)))
188 /* Reject unknown flags */
189 if (arg
& ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK
)
192 ret
= gcs_check_locked(task
, arg
);
196 /* If we are enabling GCS then make sure we have a stack */
197 if (arg
& PR_SHADOW_STACK_ENABLE
&&
198 !task_gcs_el0_enabled(task
)) {
199 /* Do not allow GCS to be reenabled */
200 if (task
->thread
.gcs_base
|| task
->thread
.gcspr_el0
)
207 gcs
= alloc_gcs(0, size
);
211 task
->thread
.gcspr_el0
= gcs
+ size
- sizeof(u64
);
212 task
->thread
.gcs_base
= gcs
;
213 task
->thread
.gcs_size
= size
;
215 write_sysreg_s(task
->thread
.gcspr_el0
,
219 task
->thread
.gcs_el0_mode
= arg
;
221 gcs_set_el0_mode(task
);
226 int arch_get_shadow_stack_status(struct task_struct
*task
,
227 unsigned long __user
*arg
)
229 if (!system_supports_gcs())
232 if (is_compat_thread(task_thread_info(task
)))
235 return put_user(task
->thread
.gcs_el0_mode
, arg
);
238 int arch_lock_shadow_stack_status(struct task_struct
*task
,
241 if (!system_supports_gcs())
244 if (is_compat_thread(task_thread_info(task
)))
248 * We support locking unknown bits so applications can prevent
249 * any changes in a future proof manner.
251 task
->thread
.gcs_el0_locked
|= arg
;