1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2014 ARM Limited
7 #include <linux/init.h>
8 #include <linux/list.h>
9 #include <linux/perf_event.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/sysctl.h>
13 #include <linux/uaccess.h>
15 #include <asm/cpufeature.h>
17 #include <asm/sysreg.h>
18 #include <asm/system_misc.h>
19 #include <asm/traps.h>
21 #define CREATE_TRACE_POINTS
22 #include "trace-events-emulation.h"
25 * The runtime support for deprecated instruction support can be in one of
26 * following three states -
29 * 1 = emulate (software emulation)
30 * 2 = hw (supported in hardware)
32 enum insn_emulation_mode
{
38 enum legacy_insn_status
{
44 struct insn_emulation
{
46 enum legacy_insn_status status
;
47 bool (*try_emulate
)(struct pt_regs
*regs
,
49 int (*set_hw_mode
)(bool enable
);
55 /* sysctl for this emulation */
56 struct ctl_table sysctl
;
59 #define ARM_OPCODE_CONDTEST_FAIL 0
60 #define ARM_OPCODE_CONDTEST_PASS 1
61 #define ARM_OPCODE_CONDTEST_UNCOND 2
63 #define ARM_OPCODE_CONDITION_UNCOND 0xf
65 static unsigned int __maybe_unused
aarch32_check_condition(u32 opcode
, u32 psr
)
67 u32 cc_bits
= opcode
>> 28;
69 if (cc_bits
!= ARM_OPCODE_CONDITION_UNCOND
) {
70 if ((*aarch32_opcode_cond_checks
[cc_bits
])(psr
))
71 return ARM_OPCODE_CONDTEST_PASS
;
73 return ARM_OPCODE_CONDTEST_FAIL
;
75 return ARM_OPCODE_CONDTEST_UNCOND
;
78 #ifdef CONFIG_SWP_EMULATION
80 * Implement emulation of the SWP/SWPB instructions using load-exclusive and
83 * Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
84 * Where: Rt = destination
90 * Error-checking SWP macros implemented using ldxr{b}/stxr{b}
93 /* Arbitrary constant to ensure forward-progress of the LL/SC loop */
94 #define __SWP_LL_SC_LOOPS 4
96 #define __user_swpX_asm(data, addr, res, temp, temp2, B) \
98 uaccess_enable_privileged(); \
99 __asm__ __volatile__( \
101 "0: ldxr"B" %w2, [%4]\n" \
102 "1: stxr"B" %w0, %w1, [%4]\n" \
104 " sub %w3, %w3, #1\n" \
111 _ASM_EXTABLE_UACCESS_ERR(0b, 3b, %w0) \
112 _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w0) \
113 : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
114 : "r" ((unsigned long)addr), "i" (-EAGAIN), \
115 "i" (__SWP_LL_SC_LOOPS) \
117 uaccess_disable_privileged(); \
120 #define __user_swp_asm(data, addr, res, temp, temp2) \
121 __user_swpX_asm(data, addr, res, temp, temp2, "")
122 #define __user_swpb_asm(data, addr, res, temp, temp2) \
123 __user_swpX_asm(data, addr, res, temp, temp2, "b")
126 * Bit 22 of the instruction encoding distinguishes between
127 * the SWP and SWPB variants (bit set means SWPB).
129 #define TYPE_SWPB (1 << 22)
131 static int emulate_swpX(unsigned int address
, unsigned int *data
,
134 unsigned int res
= 0;
136 if ((type
!= TYPE_SWPB
) && (address
& 0x3)) {
137 /* SWP to unaligned address not permitted */
138 pr_debug("SWP instruction on unaligned pointer!\n");
143 unsigned long temp
, temp2
;
145 if (type
== TYPE_SWPB
)
146 __user_swpb_asm(*data
, address
, res
, temp
, temp2
);
148 __user_swp_asm(*data
, address
, res
, temp
, temp2
);
150 if (likely(res
!= -EAGAIN
) || signal_pending(current
))
160 * swp_handler logs the id of calling process, dissects the instruction, sanity
161 * checks the memory location, calls emulate_swpX for the actual operation and
162 * deals with fixup/error handling before returning
164 static int swp_handler(struct pt_regs
*regs
, u32 instr
)
166 u32 destreg
, data
, type
, address
= 0;
167 const void __user
*user_ptr
;
168 int rn
, rt2
, res
= 0;
170 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS
, 1, regs
, regs
->pc
);
172 type
= instr
& TYPE_SWPB
;
174 switch (aarch32_check_condition(instr
, regs
->pstate
)) {
175 case ARM_OPCODE_CONDTEST_PASS
:
177 case ARM_OPCODE_CONDTEST_FAIL
:
178 /* Condition failed - return to next instruction */
180 case ARM_OPCODE_CONDTEST_UNCOND
:
181 /* If unconditional encoding - not a SWP, undef */
187 rn
= aarch32_insn_extract_reg_num(instr
, A32_RN_OFFSET
);
188 rt2
= aarch32_insn_extract_reg_num(instr
, A32_RT2_OFFSET
);
190 address
= (u32
)regs
->user_regs
.regs
[rn
];
191 data
= (u32
)regs
->user_regs
.regs
[rt2
];
192 destreg
= aarch32_insn_extract_reg_num(instr
, A32_RT_OFFSET
);
194 pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
195 rn
, address
, destreg
,
196 aarch32_insn_extract_reg_num(instr
, A32_RT2_OFFSET
), data
);
198 /* Check access in reasonable access range for both SWP and SWPB */
199 user_ptr
= (const void __user
*)(unsigned long)(address
& ~3);
200 if (!access_ok(user_ptr
, 4)) {
201 pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n",
206 res
= emulate_swpX(address
, &data
, type
);
210 regs
->user_regs
.regs
[destreg
] = data
;
213 if (type
== TYPE_SWPB
)
214 trace_instruction_emulation("swpb", regs
->pc
);
216 trace_instruction_emulation("swp", regs
->pc
);
218 pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n",
219 current
->comm
, (unsigned long)current
->pid
, regs
->pc
);
221 arm64_skip_faulting_instruction(regs
, 4);
225 pr_debug("SWP{B} emulation: access caused memory abort!\n");
226 arm64_notify_segfault(address
);
231 static bool try_emulate_swp(struct pt_regs
*regs
, u32 insn
)
233 /* SWP{B} only exists in ARM state and does not exist in Thumb */
234 if (!compat_user_mode(regs
) || compat_thumb_mode(regs
))
237 if ((insn
& 0x0fb00ff0) != 0x01000090)
240 return swp_handler(regs
, insn
) == 0;
243 static struct insn_emulation insn_swp
= {
245 .status
= INSN_OBSOLETE
,
246 .try_emulate
= try_emulate_swp
,
249 #endif /* CONFIG_SWP_EMULATION */
251 #ifdef CONFIG_CP15_BARRIER_EMULATION
252 static int cp15barrier_handler(struct pt_regs
*regs
, u32 instr
)
254 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS
, 1, regs
, regs
->pc
);
256 switch (aarch32_check_condition(instr
, regs
->pstate
)) {
257 case ARM_OPCODE_CONDTEST_PASS
:
259 case ARM_OPCODE_CONDTEST_FAIL
:
260 /* Condition failed - return to next instruction */
262 case ARM_OPCODE_CONDTEST_UNCOND
:
263 /* If unconditional encoding - not a barrier instruction */
269 switch (aarch32_insn_mcr_extract_crm(instr
)) {
272 * dmb - mcr p15, 0, Rt, c7, c10, 5
273 * dsb - mcr p15, 0, Rt, c7, c10, 4
275 if (aarch32_insn_mcr_extract_opc2(instr
) == 5) {
277 trace_instruction_emulation(
278 "mcr p15, 0, Rt, c7, c10, 5 ; dmb", regs
->pc
);
281 trace_instruction_emulation(
282 "mcr p15, 0, Rt, c7, c10, 4 ; dsb", regs
->pc
);
287 * isb - mcr p15, 0, Rt, c7, c5, 4
289 * Taking an exception or returning from one acts as an
290 * instruction barrier. So no explicit barrier needed here.
292 trace_instruction_emulation(
293 "mcr p15, 0, Rt, c7, c5, 4 ; isb", regs
->pc
);
298 pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n",
299 current
->comm
, (unsigned long)current
->pid
, regs
->pc
);
301 arm64_skip_faulting_instruction(regs
, 4);
305 static int cp15_barrier_set_hw_mode(bool enable
)
308 sysreg_clear_set(sctlr_el1
, 0, SCTLR_EL1_CP15BEN
);
310 sysreg_clear_set(sctlr_el1
, SCTLR_EL1_CP15BEN
, 0);
314 static bool try_emulate_cp15_barrier(struct pt_regs
*regs
, u32 insn
)
316 if (!compat_user_mode(regs
) || compat_thumb_mode(regs
))
319 if ((insn
& 0x0fff0fdf) == 0x0e070f9a)
320 return cp15barrier_handler(regs
, insn
) == 0;
322 if ((insn
& 0x0fff0fff) == 0x0e070f95)
323 return cp15barrier_handler(regs
, insn
) == 0;
328 static struct insn_emulation insn_cp15_barrier
= {
329 .name
= "cp15_barrier",
330 .status
= INSN_DEPRECATED
,
331 .try_emulate
= try_emulate_cp15_barrier
,
332 .set_hw_mode
= cp15_barrier_set_hw_mode
,
334 #endif /* CONFIG_CP15_BARRIER_EMULATION */
336 #ifdef CONFIG_SETEND_EMULATION
337 static int setend_set_hw_mode(bool enable
)
339 if (!cpu_supports_mixed_endian_el0())
343 sysreg_clear_set(sctlr_el1
, SCTLR_EL1_SED
, 0);
345 sysreg_clear_set(sctlr_el1
, 0, SCTLR_EL1_SED
);
349 static int compat_setend_handler(struct pt_regs
*regs
, u32 big_endian
)
353 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS
, 1, regs
, regs
->pc
);
357 regs
->pstate
|= PSR_AA32_E_BIT
;
360 regs
->pstate
&= ~PSR_AA32_E_BIT
;
363 trace_instruction_emulation(insn
, regs
->pc
);
364 pr_warn_ratelimited("\"%s\" (%ld) uses deprecated setend instruction at 0x%llx\n",
365 current
->comm
, (unsigned long)current
->pid
, regs
->pc
);
370 static int a32_setend_handler(struct pt_regs
*regs
, u32 instr
)
372 int rc
= compat_setend_handler(regs
, (instr
>> 9) & 1);
373 arm64_skip_faulting_instruction(regs
, 4);
377 static int t16_setend_handler(struct pt_regs
*regs
, u32 instr
)
379 int rc
= compat_setend_handler(regs
, (instr
>> 3) & 1);
380 arm64_skip_faulting_instruction(regs
, 2);
384 static bool try_emulate_setend(struct pt_regs
*regs
, u32 insn
)
386 if (compat_thumb_mode(regs
) &&
387 (insn
& 0xfffffff7) == 0x0000b650)
388 return t16_setend_handler(regs
, insn
) == 0;
390 if (compat_user_mode(regs
) &&
391 (insn
& 0xfffffdff) == 0xf1010000)
392 return a32_setend_handler(regs
, insn
) == 0;
397 static struct insn_emulation insn_setend
= {
399 .status
= INSN_DEPRECATED
,
400 .try_emulate
= try_emulate_setend
,
401 .set_hw_mode
= setend_set_hw_mode
,
403 #endif /* CONFIG_SETEND_EMULATION */
405 static struct insn_emulation
*insn_emulations
[] = {
406 #ifdef CONFIG_SWP_EMULATION
409 #ifdef CONFIG_CP15_BARRIER_EMULATION
412 #ifdef CONFIG_SETEND_EMULATION
417 static DEFINE_MUTEX(insn_emulation_mutex
);
419 static void enable_insn_hw_mode(void *data
)
421 struct insn_emulation
*insn
= data
;
422 if (insn
->set_hw_mode
)
423 insn
->set_hw_mode(true);
426 static void disable_insn_hw_mode(void *data
)
428 struct insn_emulation
*insn
= data
;
429 if (insn
->set_hw_mode
)
430 insn
->set_hw_mode(false);
433 /* Run set_hw_mode(mode) on all active CPUs */
434 static int run_all_cpu_set_hw_mode(struct insn_emulation
*insn
, bool enable
)
436 if (!insn
->set_hw_mode
)
439 on_each_cpu(enable_insn_hw_mode
, (void *)insn
, true);
441 on_each_cpu(disable_insn_hw_mode
, (void *)insn
, true);
446 * Run set_hw_mode for all insns on a starting CPU.
448 * 0 - If all the hooks ran successfully.
449 * -EINVAL - At least one hook is not supported by the CPU.
451 static int run_all_insn_set_hw_mode(unsigned int cpu
)
457 * Disable IRQs to serialize against an IPI from
458 * run_all_cpu_set_hw_mode(), ensuring the HW is programmed to the most
459 * recent enablement state if the two race with one another.
461 local_irq_save(flags
);
462 for (int i
= 0; i
< ARRAY_SIZE(insn_emulations
); i
++) {
463 struct insn_emulation
*insn
= insn_emulations
[i
];
464 bool enable
= READ_ONCE(insn
->current_mode
) == INSN_HW
;
465 if (insn
->status
== INSN_UNAVAILABLE
)
468 if (insn
->set_hw_mode
&& insn
->set_hw_mode(enable
)) {
469 pr_warn("CPU[%u] cannot support the emulation of %s",
474 local_irq_restore(flags
);
479 static int update_insn_emulation_mode(struct insn_emulation
*insn
,
480 enum insn_emulation_mode prev
)
485 case INSN_UNDEF
: /* Nothing to be done */
490 if (!run_all_cpu_set_hw_mode(insn
, false))
491 pr_notice("Disabled %s support\n", insn
->name
);
495 switch (insn
->current_mode
) {
501 ret
= run_all_cpu_set_hw_mode(insn
, true);
503 pr_notice("Enabled %s support\n", insn
->name
);
510 static int emulation_proc_handler(const struct ctl_table
*table
, int write
,
511 void *buffer
, size_t *lenp
,
515 struct insn_emulation
*insn
= container_of(table
->data
, struct insn_emulation
, current_mode
);
516 enum insn_emulation_mode prev_mode
= insn
->current_mode
;
518 mutex_lock(&insn_emulation_mutex
);
519 ret
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
521 if (ret
|| !write
|| prev_mode
== insn
->current_mode
)
524 ret
= update_insn_emulation_mode(insn
, prev_mode
);
526 /* Mode change failed, revert to previous mode. */
527 WRITE_ONCE(insn
->current_mode
, prev_mode
);
528 update_insn_emulation_mode(insn
, INSN_UNDEF
);
531 mutex_unlock(&insn_emulation_mutex
);
535 static void __init
register_insn_emulation(struct insn_emulation
*insn
)
537 struct ctl_table
*sysctl
;
539 insn
->min
= INSN_UNDEF
;
541 switch (insn
->status
) {
542 case INSN_DEPRECATED
:
543 insn
->current_mode
= INSN_EMULATE
;
544 /* Disable the HW mode if it was turned on at early boot time */
545 run_all_cpu_set_hw_mode(insn
, false);
549 insn
->current_mode
= INSN_UNDEF
;
550 insn
->max
= INSN_EMULATE
;
552 case INSN_UNAVAILABLE
:
553 insn
->current_mode
= INSN_UNDEF
;
554 insn
->max
= INSN_UNDEF
;
558 /* Program the HW if required */
559 update_insn_emulation_mode(insn
, INSN_UNDEF
);
561 if (insn
->status
!= INSN_UNAVAILABLE
) {
562 sysctl
= &insn
->sysctl
;
565 sysctl
->maxlen
= sizeof(int);
567 sysctl
->procname
= insn
->name
;
568 sysctl
->data
= &insn
->current_mode
;
569 sysctl
->extra1
= &insn
->min
;
570 sysctl
->extra2
= &insn
->max
;
571 sysctl
->proc_handler
= emulation_proc_handler
;
573 register_sysctl_sz("abi", sysctl
, 1);
577 bool try_emulate_armv8_deprecated(struct pt_regs
*regs
, u32 insn
)
579 for (int i
= 0; i
< ARRAY_SIZE(insn_emulations
); i
++) {
580 struct insn_emulation
*ie
= insn_emulations
[i
];
582 if (ie
->status
== INSN_UNAVAILABLE
)
586 * A trap may race with the mode being changed
587 * INSN_EMULATE<->INSN_HW. Try to emulate the instruction to
588 * avoid a spurious UNDEF.
590 if (READ_ONCE(ie
->current_mode
) == INSN_UNDEF
)
593 if (ie
->try_emulate(regs
, insn
))
601 * Invoked as core_initcall, which guarantees that the instruction
602 * emulation is ready for userspace.
604 static int __init
armv8_deprecated_init(void)
606 #ifdef CONFIG_SETEND_EMULATION
607 if (!system_supports_mixed_endian_el0()) {
608 insn_setend
.status
= INSN_UNAVAILABLE
;
609 pr_info("setend instruction emulation is not supported on this system\n");
613 for (int i
= 0; i
< ARRAY_SIZE(insn_emulations
); i
++) {
614 struct insn_emulation
*ie
= insn_emulations
[i
];
616 if (ie
->status
== INSN_UNAVAILABLE
)
619 register_insn_emulation(ie
);
622 cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING
,
623 "arm64/isndep:starting",
624 run_all_insn_set_hw_mode
, NULL
);
628 core_initcall(armv8_deprecated_init
);