WIP FPC-III support
[linux/fpc-iii.git] / arch / arm64 / include / asm / uaccess.h
blobf0fe0cc6abe0b1636e1cd894097a1cf1a985b427
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Based on arch/arm/include/asm/uaccess.h
5 * Copyright (C) 2012 ARM Ltd.
6 */
7 #ifndef __ASM_UACCESS_H
8 #define __ASM_UACCESS_H
10 #include <asm/alternative.h>
11 #include <asm/kernel-pgtable.h>
12 #include <asm/sysreg.h>
15 * User space memory access functions
17 #include <linux/bitops.h>
18 #include <linux/kasan-checks.h>
19 #include <linux/string.h>
21 #include <asm/cpufeature.h>
22 #include <asm/mmu.h>
23 #include <asm/ptrace.h>
24 #include <asm/memory.h>
25 #include <asm/extable.h>
27 #define HAVE_GET_KERNEL_NOFAULT
30 * Test whether a block of memory is a valid user space address.
31 * Returns 1 if the range is valid, 0 otherwise.
33 * This is equivalent to the following test:
34 * (u65)addr + (u65)size <= (u65)TASK_SIZE_MAX
36 static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
38 unsigned long ret, limit = TASK_SIZE_MAX - 1;
41 * Asynchronous I/O running in a kernel thread does not have the
42 * TIF_TAGGED_ADDR flag of the process owning the mm, so always untag
43 * the user address before checking.
45 if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
46 (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR)))
47 addr = untagged_addr(addr);
49 __chk_user_ptr(addr);
50 asm volatile(
51 // A + B <= C + 1 for all A,B,C, in four easy steps:
52 // 1: X = A + B; X' = X % 2^64
53 " adds %0, %3, %2\n"
54 // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
55 " csel %1, xzr, %1, hi\n"
56 // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
57 // to compensate for the carry flag being set in step 4. For
58 // X > 2^64, X' merely has to remain nonzero, which it does.
59 " csinv %0, %0, xzr, cc\n"
60 // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
61 // comes from the carry in being clear. Otherwise, we are
62 // testing X' - C == 0, subject to the previous adjustments.
63 " sbcs xzr, %0, %1\n"
64 " cset %0, ls\n"
65 : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc");
67 return ret;
70 #define access_ok(addr, size) __range_ok(addr, size)
72 #define _ASM_EXTABLE(from, to) \
73 " .pushsection __ex_table, \"a\"\n" \
74 " .align 3\n" \
75 " .long (" #from " - .), (" #to " - .)\n" \
76 " .popsection\n"
79 * User access enabling/disabling.
81 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
82 static inline void __uaccess_ttbr0_disable(void)
84 unsigned long flags, ttbr;
86 local_irq_save(flags);
87 ttbr = read_sysreg(ttbr1_el1);
88 ttbr &= ~TTBR_ASID_MASK;
89 /* reserved_pg_dir placed before swapper_pg_dir */
90 write_sysreg(ttbr - PAGE_SIZE, ttbr0_el1);
91 isb();
92 /* Set reserved ASID */
93 write_sysreg(ttbr, ttbr1_el1);
94 isb();
95 local_irq_restore(flags);
98 static inline void __uaccess_ttbr0_enable(void)
100 unsigned long flags, ttbr0, ttbr1;
103 * Disable interrupts to avoid preemption between reading the 'ttbr0'
104 * variable and the MSR. A context switch could trigger an ASID
105 * roll-over and an update of 'ttbr0'.
107 local_irq_save(flags);
108 ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
110 /* Restore active ASID */
111 ttbr1 = read_sysreg(ttbr1_el1);
112 ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
113 ttbr1 |= ttbr0 & TTBR_ASID_MASK;
114 write_sysreg(ttbr1, ttbr1_el1);
115 isb();
117 /* Restore user page table */
118 write_sysreg(ttbr0, ttbr0_el1);
119 isb();
120 local_irq_restore(flags);
123 static inline bool uaccess_ttbr0_disable(void)
125 if (!system_uses_ttbr0_pan())
126 return false;
127 __uaccess_ttbr0_disable();
128 return true;
131 static inline bool uaccess_ttbr0_enable(void)
133 if (!system_uses_ttbr0_pan())
134 return false;
135 __uaccess_ttbr0_enable();
136 return true;
138 #else
139 static inline bool uaccess_ttbr0_disable(void)
141 return false;
144 static inline bool uaccess_ttbr0_enable(void)
146 return false;
148 #endif
150 static inline void __uaccess_disable_hw_pan(void)
152 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
153 CONFIG_ARM64_PAN));
156 static inline void __uaccess_enable_hw_pan(void)
158 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
159 CONFIG_ARM64_PAN));
163 * The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0
164 * affects EL0 and TCF affects EL1 irrespective of which TTBR is
165 * used.
166 * The kernel accesses TTBR0 usually with LDTR/STTR instructions
167 * when UAO is available, so these would act as EL0 accesses using
168 * TCF0.
169 * However futex.h code uses exclusives which would be executed as
170 * EL1, this can potentially cause a tag check fault even if the
171 * user disables TCF0.
173 * To address the problem we set the PSTATE.TCO bit in uaccess_enable()
174 * and reset it in uaccess_disable().
176 * The Tag check override (TCO) bit disables temporarily the tag checking
177 * preventing the issue.
179 static inline void __uaccess_disable_tco(void)
181 asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
182 ARM64_MTE, CONFIG_KASAN_HW_TAGS));
185 static inline void __uaccess_enable_tco(void)
187 asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
188 ARM64_MTE, CONFIG_KASAN_HW_TAGS));
191 static inline void uaccess_disable_privileged(void)
193 __uaccess_disable_tco();
195 if (uaccess_ttbr0_disable())
196 return;
198 __uaccess_enable_hw_pan();
201 static inline void uaccess_enable_privileged(void)
203 __uaccess_enable_tco();
205 if (uaccess_ttbr0_enable())
206 return;
208 __uaccess_disable_hw_pan();
212 * Sanitise a uaccess pointer such that it becomes NULL if above the maximum
213 * user address. In case the pointer is tagged (has the top byte set), untag
214 * the pointer before checking.
216 #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
217 static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
219 void __user *safe_ptr;
221 asm volatile(
222 " bics xzr, %3, %2\n"
223 " csel %0, %1, xzr, eq\n"
224 : "=&r" (safe_ptr)
225 : "r" (ptr), "r" (TASK_SIZE_MAX - 1),
226 "r" (untagged_addr(ptr))
227 : "cc");
229 csdb();
230 return safe_ptr;
234 * The "__xxx" versions of the user access functions do not verify the address
235 * space - it must have been done previously with a separate "access_ok()"
236 * call.
238 * The "__xxx_error" versions set the third argument to -EFAULT if an error
239 * occurs, and leave it unchanged on success.
241 #define __get_mem_asm(load, reg, x, addr, err) \
242 asm volatile( \
243 "1: " load " " reg "1, [%2]\n" \
244 "2:\n" \
245 " .section .fixup, \"ax\"\n" \
246 " .align 2\n" \
247 "3: mov %w0, %3\n" \
248 " mov %1, #0\n" \
249 " b 2b\n" \
250 " .previous\n" \
251 _ASM_EXTABLE(1b, 3b) \
252 : "+r" (err), "=&r" (x) \
253 : "r" (addr), "i" (-EFAULT))
255 #define __raw_get_mem(ldr, x, ptr, err) \
256 do { \
257 unsigned long __gu_val; \
258 switch (sizeof(*(ptr))) { \
259 case 1: \
260 __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err)); \
261 break; \
262 case 2: \
263 __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err)); \
264 break; \
265 case 4: \
266 __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err)); \
267 break; \
268 case 8: \
269 __get_mem_asm(ldr, "%x", __gu_val, (ptr), (err)); \
270 break; \
271 default: \
272 BUILD_BUG(); \
274 (x) = (__force __typeof__(*(ptr)))__gu_val; \
275 } while (0)
277 #define __raw_get_user(x, ptr, err) \
278 do { \
279 __chk_user_ptr(ptr); \
280 uaccess_ttbr0_enable(); \
281 __raw_get_mem("ldtr", x, ptr, err); \
282 uaccess_ttbr0_disable(); \
283 } while (0)
285 #define __get_user_error(x, ptr, err) \
286 do { \
287 __typeof__(*(ptr)) __user *__p = (ptr); \
288 might_fault(); \
289 if (access_ok(__p, sizeof(*__p))) { \
290 __p = uaccess_mask_ptr(__p); \
291 __raw_get_user((x), __p, (err)); \
292 } else { \
293 (x) = (__force __typeof__(x))0; (err) = -EFAULT; \
295 } while (0)
297 #define __get_user(x, ptr) \
298 ({ \
299 int __gu_err = 0; \
300 __get_user_error((x), (ptr), __gu_err); \
301 __gu_err; \
304 #define get_user __get_user
306 #define __get_kernel_nofault(dst, src, type, err_label) \
307 do { \
308 int __gkn_err = 0; \
310 __raw_get_mem("ldr", *((type *)(dst)), \
311 (__force type *)(src), __gkn_err); \
312 if (unlikely(__gkn_err)) \
313 goto err_label; \
314 } while (0)
316 #define __put_mem_asm(store, reg, x, addr, err) \
317 asm volatile( \
318 "1: " store " " reg "1, [%2]\n" \
319 "2:\n" \
320 " .section .fixup,\"ax\"\n" \
321 " .align 2\n" \
322 "3: mov %w0, %3\n" \
323 " b 2b\n" \
324 " .previous\n" \
325 _ASM_EXTABLE(1b, 3b) \
326 : "+r" (err) \
327 : "r" (x), "r" (addr), "i" (-EFAULT))
329 #define __raw_put_mem(str, x, ptr, err) \
330 do { \
331 __typeof__(*(ptr)) __pu_val = (x); \
332 switch (sizeof(*(ptr))) { \
333 case 1: \
334 __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err)); \
335 break; \
336 case 2: \
337 __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err)); \
338 break; \
339 case 4: \
340 __put_mem_asm(str, "%w", __pu_val, (ptr), (err)); \
341 break; \
342 case 8: \
343 __put_mem_asm(str, "%x", __pu_val, (ptr), (err)); \
344 break; \
345 default: \
346 BUILD_BUG(); \
348 } while (0)
350 #define __raw_put_user(x, ptr, err) \
351 do { \
352 __chk_user_ptr(ptr); \
353 uaccess_ttbr0_enable(); \
354 __raw_put_mem("sttr", x, ptr, err); \
355 uaccess_ttbr0_disable(); \
356 } while (0)
358 #define __put_user_error(x, ptr, err) \
359 do { \
360 __typeof__(*(ptr)) __user *__p = (ptr); \
361 might_fault(); \
362 if (access_ok(__p, sizeof(*__p))) { \
363 __p = uaccess_mask_ptr(__p); \
364 __raw_put_user((x), __p, (err)); \
365 } else { \
366 (err) = -EFAULT; \
368 } while (0)
370 #define __put_user(x, ptr) \
371 ({ \
372 int __pu_err = 0; \
373 __put_user_error((x), (ptr), __pu_err); \
374 __pu_err; \
377 #define put_user __put_user
379 #define __put_kernel_nofault(dst, src, type, err_label) \
380 do { \
381 int __pkn_err = 0; \
383 __raw_put_mem("str", *((type *)(src)), \
384 (__force type *)(dst), __pkn_err); \
385 if (unlikely(__pkn_err)) \
386 goto err_label; \
387 } while(0)
389 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
390 #define raw_copy_from_user(to, from, n) \
391 ({ \
392 unsigned long __acfu_ret; \
393 uaccess_ttbr0_enable(); \
394 __acfu_ret = __arch_copy_from_user((to), \
395 __uaccess_mask_ptr(from), (n)); \
396 uaccess_ttbr0_disable(); \
397 __acfu_ret; \
400 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
401 #define raw_copy_to_user(to, from, n) \
402 ({ \
403 unsigned long __actu_ret; \
404 uaccess_ttbr0_enable(); \
405 __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \
406 (from), (n)); \
407 uaccess_ttbr0_disable(); \
408 __actu_ret; \
411 extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
412 #define raw_copy_in_user(to, from, n) \
413 ({ \
414 unsigned long __aciu_ret; \
415 uaccess_ttbr0_enable(); \
416 __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \
417 __uaccess_mask_ptr(from), (n)); \
418 uaccess_ttbr0_disable(); \
419 __aciu_ret; \
422 #define INLINE_COPY_TO_USER
423 #define INLINE_COPY_FROM_USER
425 extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
426 static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
428 if (access_ok(to, n)) {
429 uaccess_ttbr0_enable();
430 n = __arch_clear_user(__uaccess_mask_ptr(to), n);
431 uaccess_ttbr0_disable();
433 return n;
435 #define clear_user __clear_user
437 extern long strncpy_from_user(char *dest, const char __user *src, long count);
439 extern __must_check long strnlen_user(const char __user *str, long n);
441 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
442 struct page;
443 void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
444 extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
446 static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
448 kasan_check_write(dst, size);
449 return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
451 #endif
453 #endif /* __ASM_UACCESS_H */