accel/qaic: Add AIC200 support
[drm/drm-misc.git] / arch / arm64 / mm / gcs.c
blob5c46ec527b1cdaa8f52cff445d70ba0f8509d086
1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/mm.h>
4 #include <linux/mman.h>
5 #include <linux/syscalls.h>
6 #include <linux/types.h>
8 #include <asm/cmpxchg.h>
9 #include <asm/cpufeature.h>
10 #include <asm/gcs.h>
11 #include <asm/page.h>
13 static unsigned long alloc_gcs(unsigned long addr, unsigned long size)
15 int flags = MAP_ANONYMOUS | MAP_PRIVATE;
16 struct mm_struct *mm = current->mm;
17 unsigned long mapped_addr, unused;
19 if (addr)
20 flags |= MAP_FIXED_NOREPLACE;
22 mmap_write_lock(mm);
23 mapped_addr = do_mmap(NULL, addr, size, PROT_READ, flags,
24 VM_SHADOW_STACK | VM_WRITE, 0, &unused, NULL);
25 mmap_write_unlock(mm);
27 return mapped_addr;
30 static unsigned long gcs_size(unsigned long size)
32 if (size)
33 return PAGE_ALIGN(size);
35 /* Allocate RLIMIT_STACK/2 with limits of PAGE_SIZE..2G */
36 size = PAGE_ALIGN(min_t(unsigned long long,
37 rlimit(RLIMIT_STACK) / 2, SZ_2G));
38 return max(PAGE_SIZE, size);
41 unsigned long gcs_alloc_thread_stack(struct task_struct *tsk,
42 const struct kernel_clone_args *args)
44 unsigned long addr, size;
46 if (!system_supports_gcs())
47 return 0;
49 if (!task_gcs_el0_enabled(tsk))
50 return 0;
52 if ((args->flags & (CLONE_VFORK | CLONE_VM)) != CLONE_VM) {
53 tsk->thread.gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
54 return 0;
57 size = args->stack_size / 2;
59 size = gcs_size(size);
60 addr = alloc_gcs(0, size);
61 if (IS_ERR_VALUE(addr))
62 return addr;
64 tsk->thread.gcs_base = addr;
65 tsk->thread.gcs_size = size;
66 tsk->thread.gcspr_el0 = addr + size - sizeof(u64);
68 return addr;
71 SYSCALL_DEFINE3(map_shadow_stack, unsigned long, addr, unsigned long, size, unsigned int, flags)
73 unsigned long alloc_size;
74 unsigned long __user *cap_ptr;
75 unsigned long cap_val;
76 int ret = 0;
77 int cap_offset;
79 if (!system_supports_gcs())
80 return -EOPNOTSUPP;
82 if (flags & ~(SHADOW_STACK_SET_TOKEN | SHADOW_STACK_SET_MARKER))
83 return -EINVAL;
85 if (!PAGE_ALIGNED(addr))
86 return -EINVAL;
88 if (size == 8 || !IS_ALIGNED(size, 8))
89 return -EINVAL;
92 * An overflow would result in attempting to write the restore token
93 * to the wrong location. Not catastrophic, but just return the right
94 * error code and block it.
96 alloc_size = PAGE_ALIGN(size);
97 if (alloc_size < size)
98 return -EOVERFLOW;
100 addr = alloc_gcs(addr, alloc_size);
101 if (IS_ERR_VALUE(addr))
102 return addr;
105 * Put a cap token at the end of the allocated region so it
106 * can be switched to.
108 if (flags & SHADOW_STACK_SET_TOKEN) {
109 /* Leave an extra empty frame as a top of stack marker? */
110 if (flags & SHADOW_STACK_SET_MARKER)
111 cap_offset = 2;
112 else
113 cap_offset = 1;
115 cap_ptr = (unsigned long __user *)(addr + size -
116 (cap_offset * sizeof(unsigned long)));
117 cap_val = GCS_CAP(cap_ptr);
119 put_user_gcs(cap_val, cap_ptr, &ret);
120 if (ret != 0) {
121 vm_munmap(addr, size);
122 return -EFAULT;
126 * Ensure the new cap is ordered before standard
127 * memory accesses to the same location.
129 gcsb_dsync();
132 return addr;
136 * Apply the GCS mode configured for the specified task to the
137 * hardware.
139 void gcs_set_el0_mode(struct task_struct *task)
141 u64 gcscre0_el1 = GCSCRE0_EL1_nTR;
143 if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE)
144 gcscre0_el1 |= GCSCRE0_EL1_RVCHKEN | GCSCRE0_EL1_PCRSEL;
146 if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_WRITE)
147 gcscre0_el1 |= GCSCRE0_EL1_STREn;
149 if (task->thread.gcs_el0_mode & PR_SHADOW_STACK_PUSH)
150 gcscre0_el1 |= GCSCRE0_EL1_PUSHMEn;
152 write_sysreg_s(gcscre0_el1, SYS_GCSCRE0_EL1);
155 void gcs_free(struct task_struct *task)
157 if (!system_supports_gcs())
158 return;
161 * When fork() with CLONE_VM fails, the child (tsk) already
162 * has a GCS allocated, and exit_thread() calls this function
163 * to free it. In this case the parent (current) and the
164 * child share the same mm struct.
166 if (!task->mm || task->mm != current->mm)
167 return;
169 if (task->thread.gcs_base)
170 vm_munmap(task->thread.gcs_base, task->thread.gcs_size);
172 task->thread.gcspr_el0 = 0;
173 task->thread.gcs_base = 0;
174 task->thread.gcs_size = 0;
177 int arch_set_shadow_stack_status(struct task_struct *task, unsigned long arg)
179 unsigned long gcs, size;
180 int ret;
182 if (!system_supports_gcs())
183 return -EINVAL;
185 if (is_compat_thread(task_thread_info(task)))
186 return -EINVAL;
188 /* Reject unknown flags */
189 if (arg & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK)
190 return -EINVAL;
192 ret = gcs_check_locked(task, arg);
193 if (ret != 0)
194 return ret;
196 /* If we are enabling GCS then make sure we have a stack */
197 if (arg & PR_SHADOW_STACK_ENABLE &&
198 !task_gcs_el0_enabled(task)) {
199 /* Do not allow GCS to be reenabled */
200 if (task->thread.gcs_base || task->thread.gcspr_el0)
201 return -EINVAL;
203 if (task != current)
204 return -EBUSY;
206 size = gcs_size(0);
207 gcs = alloc_gcs(0, size);
208 if (!gcs)
209 return -ENOMEM;
211 task->thread.gcspr_el0 = gcs + size - sizeof(u64);
212 task->thread.gcs_base = gcs;
213 task->thread.gcs_size = size;
214 if (task == current)
215 write_sysreg_s(task->thread.gcspr_el0,
216 SYS_GCSPR_EL0);
219 task->thread.gcs_el0_mode = arg;
220 if (task == current)
221 gcs_set_el0_mode(task);
223 return 0;
226 int arch_get_shadow_stack_status(struct task_struct *task,
227 unsigned long __user *arg)
229 if (!system_supports_gcs())
230 return -EINVAL;
232 if (is_compat_thread(task_thread_info(task)))
233 return -EINVAL;
235 return put_user(task->thread.gcs_el0_mode, arg);
238 int arch_lock_shadow_stack_status(struct task_struct *task,
239 unsigned long arg)
241 if (!system_supports_gcs())
242 return -EINVAL;
244 if (is_compat_thread(task_thread_info(task)))
245 return -EINVAL;
248 * We support locking unknown bits so applications can prevent
249 * any changes in a future proof manner.
251 task->thread.gcs_el0_locked |= arg;
253 return 0;