drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / arch / arm64 / kernel / process.c
blob0540653fbf382bf8a0984a7e9f97cafbedc3251b
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/kernel/process.c
5 * Original Copyright (C) 1995 Linus Torvalds
6 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
7 * Copyright (C) 2012 ARM Ltd.
8 */
9 #include <linux/compat.h>
10 #include <linux/efi.h>
11 #include <linux/elf.h>
12 #include <linux/export.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/sched/task.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/kernel.h>
18 #include <linux/mman.h>
19 #include <linux/mm.h>
20 #include <linux/nospec.h>
21 #include <linux/stddef.h>
22 #include <linux/sysctl.h>
23 #include <linux/unistd.h>
24 #include <linux/user.h>
25 #include <linux/delay.h>
26 #include <linux/reboot.h>
27 #include <linux/interrupt.h>
28 #include <linux/init.h>
29 #include <linux/cpu.h>
30 #include <linux/elfcore.h>
31 #include <linux/pm.h>
32 #include <linux/tick.h>
33 #include <linux/utsname.h>
34 #include <linux/uaccess.h>
35 #include <linux/random.h>
36 #include <linux/hw_breakpoint.h>
37 #include <linux/personality.h>
38 #include <linux/notifier.h>
39 #include <trace/events/power.h>
40 #include <linux/percpu.h>
41 #include <linux/thread_info.h>
42 #include <linux/prctl.h>
43 #include <linux/stacktrace.h>
45 #include <asm/alternative.h>
46 #include <asm/arch_timer.h>
47 #include <asm/compat.h>
48 #include <asm/cpufeature.h>
49 #include <asm/cacheflush.h>
50 #include <asm/exec.h>
51 #include <asm/fpsimd.h>
52 #include <asm/mmu_context.h>
53 #include <asm/mte.h>
54 #include <asm/processor.h>
55 #include <asm/pointer_auth.h>
56 #include <asm/stacktrace.h>
57 #include <asm/switch_to.h>
58 #include <asm/system_misc.h>
60 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
61 #include <linux/stackprotector.h>
62 unsigned long __stack_chk_guard __ro_after_init;
63 EXPORT_SYMBOL(__stack_chk_guard);
64 #endif
67 * Function pointers to optional machine specific functions
69 void (*pm_power_off)(void);
70 EXPORT_SYMBOL_GPL(pm_power_off);
72 #ifdef CONFIG_HOTPLUG_CPU
73 void __noreturn arch_cpu_idle_dead(void)
75 cpu_die();
77 #endif
80 * Called by kexec, immediately prior to machine_kexec().
82 * This must completely disable all secondary CPUs; simply causing those CPUs
83 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
84 * kexec'd kernel to use any and all RAM as it sees fit, without having to
85 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
86 * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this.
88 void machine_shutdown(void)
90 smp_shutdown_nonboot_cpus(reboot_cpu);
94 * Halting simply requires that the secondary CPUs stop performing any
95 * activity (executing tasks, handling interrupts). smp_send_stop()
96 * achieves this.
98 void machine_halt(void)
100 local_irq_disable();
101 smp_send_stop();
102 while (1);
106 * Power-off simply requires that the secondary CPUs stop performing any
107 * activity (executing tasks, handling interrupts). smp_send_stop()
108 * achieves this. When the system power is turned off, it will take all CPUs
109 * with it.
111 void machine_power_off(void)
113 local_irq_disable();
114 smp_send_stop();
115 do_kernel_power_off();
119 * Restart requires that the secondary CPUs stop performing any activity
120 * while the primary CPU resets the system. Systems with multiple CPUs must
121 * provide a HW restart implementation, to ensure that all CPUs reset at once.
122 * This is required so that any code running after reset on the primary CPU
123 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
124 * executing pre-reset code, and using RAM that the primary CPU's code wishes
125 * to use. Implementing such co-ordination would be essentially impossible.
127 void machine_restart(char *cmd)
129 /* Disable interrupts first */
130 local_irq_disable();
131 smp_send_stop();
134 * UpdateCapsule() depends on the system being reset via
135 * ResetSystem().
137 if (efi_enabled(EFI_RUNTIME_SERVICES))
138 efi_reboot(reboot_mode, NULL);
140 /* Now call the architecture specific reboot code. */
141 do_kernel_restart(cmd);
144 * Whoops - the architecture was unable to reboot.
146 printk("Reboot failed -- System halted\n");
147 while (1);
150 #define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str
151 static const char *const btypes[] = {
152 bstr(NONE, "--"),
153 bstr( JC, "jc"),
154 bstr( C, "-c"),
155 bstr( J , "j-")
157 #undef bstr
159 static void print_pstate(struct pt_regs *regs)
161 u64 pstate = regs->pstate;
163 if (compat_user_mode(regs)) {
164 printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c %cDIT %cSSBS)\n",
165 pstate,
166 pstate & PSR_AA32_N_BIT ? 'N' : 'n',
167 pstate & PSR_AA32_Z_BIT ? 'Z' : 'z',
168 pstate & PSR_AA32_C_BIT ? 'C' : 'c',
169 pstate & PSR_AA32_V_BIT ? 'V' : 'v',
170 pstate & PSR_AA32_Q_BIT ? 'Q' : 'q',
171 pstate & PSR_AA32_T_BIT ? "T32" : "A32",
172 pstate & PSR_AA32_E_BIT ? "BE" : "LE",
173 pstate & PSR_AA32_A_BIT ? 'A' : 'a',
174 pstate & PSR_AA32_I_BIT ? 'I' : 'i',
175 pstate & PSR_AA32_F_BIT ? 'F' : 'f',
176 pstate & PSR_AA32_DIT_BIT ? '+' : '-',
177 pstate & PSR_AA32_SSBS_BIT ? '+' : '-');
178 } else {
179 const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >>
180 PSR_BTYPE_SHIFT];
182 printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO %cDIT %cSSBS BTYPE=%s)\n",
183 pstate,
184 pstate & PSR_N_BIT ? 'N' : 'n',
185 pstate & PSR_Z_BIT ? 'Z' : 'z',
186 pstate & PSR_C_BIT ? 'C' : 'c',
187 pstate & PSR_V_BIT ? 'V' : 'v',
188 pstate & PSR_D_BIT ? 'D' : 'd',
189 pstate & PSR_A_BIT ? 'A' : 'a',
190 pstate & PSR_I_BIT ? 'I' : 'i',
191 pstate & PSR_F_BIT ? 'F' : 'f',
192 pstate & PSR_PAN_BIT ? '+' : '-',
193 pstate & PSR_UAO_BIT ? '+' : '-',
194 pstate & PSR_TCO_BIT ? '+' : '-',
195 pstate & PSR_DIT_BIT ? '+' : '-',
196 pstate & PSR_SSBS_BIT ? '+' : '-',
197 btype_str);
201 void __show_regs(struct pt_regs *regs)
203 int i, top_reg;
204 u64 lr, sp;
206 if (compat_user_mode(regs)) {
207 lr = regs->compat_lr;
208 sp = regs->compat_sp;
209 top_reg = 12;
210 } else {
211 lr = regs->regs[30];
212 sp = regs->sp;
213 top_reg = 29;
216 show_regs_print_info(KERN_DEFAULT);
217 print_pstate(regs);
219 if (!user_mode(regs)) {
220 printk("pc : %pS\n", (void *)regs->pc);
221 printk("lr : %pS\n", (void *)ptrauth_strip_kernel_insn_pac(lr));
222 } else {
223 printk("pc : %016llx\n", regs->pc);
224 printk("lr : %016llx\n", lr);
227 printk("sp : %016llx\n", sp);
229 if (system_uses_irq_prio_masking())
230 printk("pmr_save: %08llx\n", regs->pmr_save);
232 i = top_reg;
234 while (i >= 0) {
235 printk("x%-2d: %016llx", i, regs->regs[i]);
237 while (i-- % 3)
238 pr_cont(" x%-2d: %016llx", i, regs->regs[i]);
240 pr_cont("\n");
244 void show_regs(struct pt_regs *regs)
246 __show_regs(regs);
247 dump_backtrace(regs, NULL, KERN_DEFAULT);
250 static void tls_thread_flush(void)
252 write_sysreg(0, tpidr_el0);
253 if (system_supports_tpidr2())
254 write_sysreg_s(0, SYS_TPIDR2_EL0);
256 if (is_compat_task()) {
257 current->thread.uw.tp_value = 0;
260 * We need to ensure ordering between the shadow state and the
261 * hardware state, so that we don't corrupt the hardware state
262 * with a stale shadow state during context switch.
264 barrier();
265 write_sysreg(0, tpidrro_el0);
269 static void flush_tagged_addr_state(void)
271 if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI))
272 clear_thread_flag(TIF_TAGGED_ADDR);
275 static void flush_poe(void)
277 if (!system_supports_poe())
278 return;
280 write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0);
283 void flush_thread(void)
285 fpsimd_flush_thread();
286 tls_thread_flush();
287 flush_ptrace_hw_breakpoint(current);
288 flush_tagged_addr_state();
289 flush_poe();
292 void arch_release_task_struct(struct task_struct *tsk)
294 fpsimd_release_task(tsk);
297 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
299 if (current->mm)
300 fpsimd_preserve_current_state();
301 *dst = *src;
304 * Detach src's sve_state (if any) from dst so that it does not
305 * get erroneously used or freed prematurely. dst's copies
306 * will be allocated on demand later on if dst uses SVE.
307 * For consistency, also clear TIF_SVE here: this could be done
308 * later in copy_process(), but to avoid tripping up future
309 * maintainers it is best not to leave TIF flags and buffers in
310 * an inconsistent state, even temporarily.
312 dst->thread.sve_state = NULL;
313 clear_tsk_thread_flag(dst, TIF_SVE);
316 * In the unlikely event that we create a new thread with ZA
317 * enabled we should retain the ZA and ZT state so duplicate
318 * it here. This may be shortly freed if we exec() or if
319 * CLONE_SETTLS but it's simpler to do it here. To avoid
320 * confusing the rest of the code ensure that we have a
321 * sve_state allocated whenever sme_state is allocated.
323 if (thread_za_enabled(&src->thread)) {
324 dst->thread.sve_state = kzalloc(sve_state_size(src),
325 GFP_KERNEL);
326 if (!dst->thread.sve_state)
327 return -ENOMEM;
329 dst->thread.sme_state = kmemdup(src->thread.sme_state,
330 sme_state_size(src),
331 GFP_KERNEL);
332 if (!dst->thread.sme_state) {
333 kfree(dst->thread.sve_state);
334 dst->thread.sve_state = NULL;
335 return -ENOMEM;
337 } else {
338 dst->thread.sme_state = NULL;
339 clear_tsk_thread_flag(dst, TIF_SME);
342 dst->thread.fp_type = FP_STATE_FPSIMD;
344 /* clear any pending asynchronous tag fault raised by the parent */
345 clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
347 return 0;
350 asmlinkage void ret_from_fork(void) asm("ret_from_fork");
352 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
354 unsigned long clone_flags = args->flags;
355 unsigned long stack_start = args->stack;
356 unsigned long tls = args->tls;
357 struct pt_regs *childregs = task_pt_regs(p);
359 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
362 * In case p was allocated the same task_struct pointer as some
363 * other recently-exited task, make sure p is disassociated from
364 * any cpu that may have run that now-exited task recently.
365 * Otherwise we could erroneously skip reloading the FPSIMD
366 * registers for p.
368 fpsimd_flush_task_state(p);
370 ptrauth_thread_init_kernel(p);
372 if (likely(!args->fn)) {
373 *childregs = *current_pt_regs();
374 childregs->regs[0] = 0;
377 * Read the current TLS pointer from tpidr_el0 as it may be
378 * out-of-sync with the saved value.
380 *task_user_tls(p) = read_sysreg(tpidr_el0);
381 if (system_supports_tpidr2())
382 p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
384 if (system_supports_poe())
385 p->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
387 if (stack_start) {
388 if (is_compat_thread(task_thread_info(p)))
389 childregs->compat_sp = stack_start;
390 else
391 childregs->sp = stack_start;
395 * If a TLS pointer was passed to clone, use it for the new
396 * thread. We also reset TPIDR2 if it's in use.
398 if (clone_flags & CLONE_SETTLS) {
399 p->thread.uw.tp_value = tls;
400 p->thread.tpidr2_el0 = 0;
402 } else {
404 * A kthread has no context to ERET to, so ensure any buggy
405 * ERET is treated as an illegal exception return.
407 * When a user task is created from a kthread, childregs will
408 * be initialized by start_thread() or start_compat_thread().
410 memset(childregs, 0, sizeof(struct pt_regs));
411 childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT;
413 p->thread.cpu_context.x19 = (unsigned long)args->fn;
414 p->thread.cpu_context.x20 = (unsigned long)args->fn_arg;
416 p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
417 p->thread.cpu_context.sp = (unsigned long)childregs;
419 * For the benefit of the unwinder, set up childregs->stackframe
420 * as the final frame for the new task.
422 p->thread.cpu_context.fp = (unsigned long)childregs->stackframe;
424 ptrace_hw_copy_thread(p);
426 return 0;
429 void tls_preserve_current_state(void)
431 *task_user_tls(current) = read_sysreg(tpidr_el0);
432 if (system_supports_tpidr2() && !is_compat_task())
433 current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
436 static void tls_thread_switch(struct task_struct *next)
438 tls_preserve_current_state();
440 if (is_compat_thread(task_thread_info(next)))
441 write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
442 else if (!arm64_kernel_unmapped_at_el0())
443 write_sysreg(0, tpidrro_el0);
445 write_sysreg(*task_user_tls(next), tpidr_el0);
446 if (system_supports_tpidr2())
447 write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0);
451 * Force SSBS state on context-switch, since it may be lost after migrating
452 * from a CPU which treats the bit as RES0 in a heterogeneous system.
454 static void ssbs_thread_switch(struct task_struct *next)
457 * Nothing to do for kernel threads, but 'regs' may be junk
458 * (e.g. idle task) so check the flags and bail early.
460 if (unlikely(next->flags & PF_KTHREAD))
461 return;
464 * If all CPUs implement the SSBS extension, then we just need to
465 * context-switch the PSTATE field.
467 if (alternative_has_cap_unlikely(ARM64_SSBS))
468 return;
470 spectre_v4_enable_task_mitigation(next);
474 * We store our current task in sp_el0, which is clobbered by userspace. Keep a
475 * shadow copy so that we can restore this upon entry from userspace.
477 * This is *only* for exception entry from EL0, and is not valid until we
478 * __switch_to() a user task.
480 DEFINE_PER_CPU(struct task_struct *, __entry_task);
482 static void entry_task_switch(struct task_struct *next)
484 __this_cpu_write(__entry_task, next);
488 * Handle sysreg updates for ARM erratum 1418040 which affects the 32bit view of
489 * CNTVCT, various other errata which require trapping all CNTVCT{,_EL0}
490 * accesses and prctl(PR_SET_TSC). Ensure access is disabled iff a workaround is
491 * required or PR_TSC_SIGSEGV is set.
493 static void update_cntkctl_el1(struct task_struct *next)
495 struct thread_info *ti = task_thread_info(next);
497 if (test_ti_thread_flag(ti, TIF_TSC_SIGSEGV) ||
498 has_erratum_handler(read_cntvct_el0) ||
499 (IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
500 this_cpu_has_cap(ARM64_WORKAROUND_1418040) &&
501 is_compat_thread(ti)))
502 sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0);
503 else
504 sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN);
507 static void cntkctl_thread_switch(struct task_struct *prev,
508 struct task_struct *next)
510 if ((read_ti_thread_flags(task_thread_info(prev)) &
511 (_TIF_32BIT | _TIF_TSC_SIGSEGV)) !=
512 (read_ti_thread_flags(task_thread_info(next)) &
513 (_TIF_32BIT | _TIF_TSC_SIGSEGV)))
514 update_cntkctl_el1(next);
517 static int do_set_tsc_mode(unsigned int val)
519 bool tsc_sigsegv;
521 if (val == PR_TSC_SIGSEGV)
522 tsc_sigsegv = true;
523 else if (val == PR_TSC_ENABLE)
524 tsc_sigsegv = false;
525 else
526 return -EINVAL;
528 preempt_disable();
529 update_thread_flag(TIF_TSC_SIGSEGV, tsc_sigsegv);
530 update_cntkctl_el1(current);
531 preempt_enable();
533 return 0;
536 static void permission_overlay_switch(struct task_struct *next)
538 if (!system_supports_poe())
539 return;
541 current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
542 if (current->thread.por_el0 != next->thread.por_el0) {
543 write_sysreg_s(next->thread.por_el0, SYS_POR_EL0);
548 * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore
549 * this function must be called with preemption disabled and the update to
550 * sctlr_user must be made in the same preemption disabled block so that
551 * __switch_to() does not see the variable update before the SCTLR_EL1 one.
553 void update_sctlr_el1(u64 sctlr)
556 * EnIA must not be cleared while in the kernel as this is necessary for
557 * in-kernel PAC. It will be cleared on kernel exit if needed.
559 sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr);
561 /* ISB required for the kernel uaccess routines when setting TCF0. */
562 isb();
566 * Thread switching.
568 __notrace_funcgraph __sched
569 struct task_struct *__switch_to(struct task_struct *prev,
570 struct task_struct *next)
572 struct task_struct *last;
574 fpsimd_thread_switch(next);
575 tls_thread_switch(next);
576 hw_breakpoint_thread_switch(next);
577 contextidr_thread_switch(next);
578 entry_task_switch(next);
579 ssbs_thread_switch(next);
580 cntkctl_thread_switch(prev, next);
581 ptrauth_thread_switch_user(next);
582 permission_overlay_switch(next);
585 * Complete any pending TLB or cache maintenance on this CPU in case
586 * the thread migrates to a different CPU.
587 * This full barrier is also required by the membarrier system
588 * call.
590 dsb(ish);
593 * MTE thread switching must happen after the DSB above to ensure that
594 * any asynchronous tag check faults have been logged in the TFSR*_EL1
595 * registers.
597 mte_thread_switch(next);
598 /* avoid expensive SCTLR_EL1 accesses if no change */
599 if (prev->thread.sctlr_user != next->thread.sctlr_user)
600 update_sctlr_el1(next->thread.sctlr_user);
602 /* the actual thread switch */
603 last = cpu_switch_to(prev, next);
605 return last;
608 struct wchan_info {
609 unsigned long pc;
610 int count;
613 static bool get_wchan_cb(void *arg, unsigned long pc)
615 struct wchan_info *wchan_info = arg;
617 if (!in_sched_functions(pc)) {
618 wchan_info->pc = pc;
619 return false;
621 return wchan_info->count++ < 16;
624 unsigned long __get_wchan(struct task_struct *p)
626 struct wchan_info wchan_info = {
627 .pc = 0,
628 .count = 0,
631 if (!try_get_task_stack(p))
632 return 0;
634 arch_stack_walk(get_wchan_cb, &wchan_info, p, NULL);
636 put_task_stack(p);
638 return wchan_info.pc;
641 unsigned long arch_align_stack(unsigned long sp)
643 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
644 sp -= get_random_u32_below(PAGE_SIZE);
645 return sp & ~0xf;
648 #ifdef CONFIG_COMPAT
649 int compat_elf_check_arch(const struct elf32_hdr *hdr)
651 if (!system_supports_32bit_el0())
652 return false;
654 if ((hdr)->e_machine != EM_ARM)
655 return false;
657 if (!((hdr)->e_flags & EF_ARM_EABI_MASK))
658 return false;
661 * Prevent execve() of a 32-bit program from a deadline task
662 * if the restricted affinity mask would be inadmissible on an
663 * asymmetric system.
665 return !static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
666 !dl_task_check_affinity(current, system_32bit_el0_cpumask());
668 #endif
671 * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
673 void arch_setup_new_exec(void)
675 unsigned long mmflags = 0;
677 if (is_compat_task()) {
678 mmflags = MMCF_AARCH32;
681 * Restrict the CPU affinity mask for a 32-bit task so that
682 * it contains only 32-bit-capable CPUs.
684 * From the perspective of the task, this looks similar to
685 * what would happen if the 64-bit-only CPUs were hot-unplugged
686 * at the point of execve(), although we try a bit harder to
687 * honour the cpuset hierarchy.
689 if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
690 force_compatible_cpus_allowed_ptr(current);
691 } else if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) {
692 relax_compatible_cpus_allowed_ptr(current);
695 current->mm->context.flags = mmflags;
696 ptrauth_thread_init_user();
697 mte_thread_init_user();
698 do_set_tsc_mode(PR_TSC_ENABLE);
700 if (task_spec_ssb_noexec(current)) {
701 arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
702 PR_SPEC_ENABLE);
706 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
708 * Control the relaxed ABI allowing tagged user addresses into the kernel.
710 static unsigned int tagged_addr_disabled;
712 long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
714 unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE;
715 struct thread_info *ti = task_thread_info(task);
717 if (is_compat_thread(ti))
718 return -EINVAL;
720 if (system_supports_mte())
721 valid_mask |= PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC \
722 | PR_MTE_TAG_MASK;
724 if (arg & ~valid_mask)
725 return -EINVAL;
728 * Do not allow the enabling of the tagged address ABI if globally
729 * disabled via sysctl abi.tagged_addr_disabled.
731 if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled)
732 return -EINVAL;
734 if (set_mte_ctrl(task, arg) != 0)
735 return -EINVAL;
737 update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
739 return 0;
742 long get_tagged_addr_ctrl(struct task_struct *task)
744 long ret = 0;
745 struct thread_info *ti = task_thread_info(task);
747 if (is_compat_thread(ti))
748 return -EINVAL;
750 if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR))
751 ret = PR_TAGGED_ADDR_ENABLE;
753 ret |= get_mte_ctrl(task);
755 return ret;
759 * Global sysctl to disable the tagged user addresses support. This control
760 * only prevents the tagged address ABI enabling via prctl() and does not
761 * disable it for tasks that already opted in to the relaxed ABI.
764 static struct ctl_table tagged_addr_sysctl_table[] = {
766 .procname = "tagged_addr_disabled",
767 .mode = 0644,
768 .data = &tagged_addr_disabled,
769 .maxlen = sizeof(int),
770 .proc_handler = proc_dointvec_minmax,
771 .extra1 = SYSCTL_ZERO,
772 .extra2 = SYSCTL_ONE,
776 static int __init tagged_addr_init(void)
778 if (!register_sysctl("abi", tagged_addr_sysctl_table))
779 return -EINVAL;
780 return 0;
783 core_initcall(tagged_addr_init);
784 #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
786 #ifdef CONFIG_BINFMT_ELF
787 int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
788 bool has_interp, bool is_interp)
791 * For dynamically linked executables the interpreter is
792 * responsible for setting PROT_BTI on everything except
793 * itself.
795 if (is_interp != has_interp)
796 return prot;
798 if (!(state->flags & ARM64_ELF_BTI))
799 return prot;
801 if (prot & PROT_EXEC)
802 prot |= PROT_BTI;
804 return prot;
806 #endif
808 int get_tsc_mode(unsigned long adr)
810 unsigned int val;
812 if (is_compat_task())
813 return -EINVAL;
815 if (test_thread_flag(TIF_TSC_SIGSEGV))
816 val = PR_TSC_SIGSEGV;
817 else
818 val = PR_TSC_ENABLE;
820 return put_user(val, (unsigned int __user *)adr);
823 int set_tsc_mode(unsigned int val)
825 if (is_compat_task())
826 return -EINVAL;
828 return do_set_tsc_mode(val);