2 * Copyright (C) 2013 Huawei Ltd.
3 * Author: Jiang Liu <liuj97@gmail.com>
5 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
33 #include <asm/opcodes.h>
36 #define AARCH64_INSN_SF_BIT BIT(31)
37 #define AARCH64_INSN_N_BIT BIT(22)
39 static int aarch64_insn_encoding_class
[] = {
40 AARCH64_INSN_CLS_UNKNOWN
,
41 AARCH64_INSN_CLS_UNKNOWN
,
42 AARCH64_INSN_CLS_UNKNOWN
,
43 AARCH64_INSN_CLS_UNKNOWN
,
44 AARCH64_INSN_CLS_LDST
,
45 AARCH64_INSN_CLS_DP_REG
,
46 AARCH64_INSN_CLS_LDST
,
47 AARCH64_INSN_CLS_DP_FPSIMD
,
48 AARCH64_INSN_CLS_DP_IMM
,
49 AARCH64_INSN_CLS_DP_IMM
,
50 AARCH64_INSN_CLS_BR_SYS
,
51 AARCH64_INSN_CLS_BR_SYS
,
52 AARCH64_INSN_CLS_LDST
,
53 AARCH64_INSN_CLS_DP_REG
,
54 AARCH64_INSN_CLS_LDST
,
55 AARCH64_INSN_CLS_DP_FPSIMD
,
58 enum aarch64_insn_encoding_class __kprobes
aarch64_get_insn_class(u32 insn
)
60 return aarch64_insn_encoding_class
[(insn
>> 25) & 0xf];
63 /* NOP is an alias of HINT */
64 bool __kprobes
aarch64_insn_is_nop(u32 insn
)
66 if (!aarch64_insn_is_hint(insn
))
69 switch (insn
& 0xFE0) {
70 case AARCH64_INSN_HINT_YIELD
:
71 case AARCH64_INSN_HINT_WFE
:
72 case AARCH64_INSN_HINT_WFI
:
73 case AARCH64_INSN_HINT_SEV
:
74 case AARCH64_INSN_HINT_SEVL
:
81 bool aarch64_insn_is_branch_imm(u32 insn
)
83 return (aarch64_insn_is_b(insn
) || aarch64_insn_is_bl(insn
) ||
84 aarch64_insn_is_tbz(insn
) || aarch64_insn_is_tbnz(insn
) ||
85 aarch64_insn_is_cbz(insn
) || aarch64_insn_is_cbnz(insn
) ||
86 aarch64_insn_is_bcond(insn
));
89 static DEFINE_RAW_SPINLOCK(patch_lock
);
91 static void __kprobes
*patch_map(void *addr
, int fixmap
)
93 unsigned long uintaddr
= (uintptr_t) addr
;
94 bool module
= !core_kernel_text(uintaddr
);
97 if (module
&& IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX
))
98 page
= vmalloc_to_page(addr
);
100 page
= pfn_to_page(PHYS_PFN(__pa(addr
)));
105 return (void *)set_fixmap_offset(fixmap
, page_to_phys(page
) +
106 (uintaddr
& ~PAGE_MASK
));
109 static void __kprobes
patch_unmap(int fixmap
)
111 clear_fixmap(fixmap
);
114 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
117 int __kprobes
aarch64_insn_read(void *addr
, u32
*insnp
)
122 ret
= probe_kernel_read(&val
, addr
, AARCH64_INSN_SIZE
);
124 *insnp
= le32_to_cpu(val
);
129 static int __kprobes
__aarch64_insn_write(void *addr
, u32 insn
)
132 unsigned long flags
= 0;
135 raw_spin_lock_irqsave(&patch_lock
, flags
);
136 waddr
= patch_map(addr
, FIX_TEXT_POKE0
);
138 ret
= probe_kernel_write(waddr
, &insn
, AARCH64_INSN_SIZE
);
140 patch_unmap(FIX_TEXT_POKE0
);
141 raw_spin_unlock_irqrestore(&patch_lock
, flags
);
146 int __kprobes
aarch64_insn_write(void *addr
, u32 insn
)
148 insn
= cpu_to_le32(insn
);
149 return __aarch64_insn_write(addr
, insn
);
152 static bool __kprobes
__aarch64_insn_hotpatch_safe(u32 insn
)
154 if (aarch64_get_insn_class(insn
) != AARCH64_INSN_CLS_BR_SYS
)
157 return aarch64_insn_is_b(insn
) ||
158 aarch64_insn_is_bl(insn
) ||
159 aarch64_insn_is_svc(insn
) ||
160 aarch64_insn_is_hvc(insn
) ||
161 aarch64_insn_is_smc(insn
) ||
162 aarch64_insn_is_brk(insn
) ||
163 aarch64_insn_is_nop(insn
);
166 bool __kprobes
aarch64_insn_uses_literal(u32 insn
)
168 /* ldr/ldrsw (literal), prfm */
170 return aarch64_insn_is_ldr_lit(insn
) ||
171 aarch64_insn_is_ldrsw_lit(insn
) ||
172 aarch64_insn_is_adr_adrp(insn
) ||
173 aarch64_insn_is_prfm_lit(insn
);
176 bool __kprobes
aarch64_insn_is_branch(u32 insn
)
178 /* b, bl, cb*, tb*, b.cond, br, blr */
180 return aarch64_insn_is_b(insn
) ||
181 aarch64_insn_is_bl(insn
) ||
182 aarch64_insn_is_cbz(insn
) ||
183 aarch64_insn_is_cbnz(insn
) ||
184 aarch64_insn_is_tbz(insn
) ||
185 aarch64_insn_is_tbnz(insn
) ||
186 aarch64_insn_is_ret(insn
) ||
187 aarch64_insn_is_br(insn
) ||
188 aarch64_insn_is_blr(insn
) ||
189 aarch64_insn_is_bcond(insn
);
193 * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
194 * Section B2.6.5 "Concurrent modification and execution of instructions":
195 * Concurrent modification and execution of instructions can lead to the
196 * resulting instruction performing any behavior that can be achieved by
197 * executing any sequence of instructions that can be executed from the
198 * same Exception level, except where the instruction before modification
199 * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
200 * or SMC instruction.
202 bool __kprobes
aarch64_insn_hotpatch_safe(u32 old_insn
, u32 new_insn
)
204 return __aarch64_insn_hotpatch_safe(old_insn
) &&
205 __aarch64_insn_hotpatch_safe(new_insn
);
208 int __kprobes
aarch64_insn_patch_text_nosync(void *addr
, u32 insn
)
213 /* A64 instructions must be word aligned */
214 if ((uintptr_t)tp
& 0x3)
217 ret
= aarch64_insn_write(tp
, insn
);
219 flush_icache_range((uintptr_t)tp
,
220 (uintptr_t)tp
+ AARCH64_INSN_SIZE
);
225 struct aarch64_insn_patch
{
232 static int __kprobes
aarch64_insn_patch_text_cb(void *arg
)
235 struct aarch64_insn_patch
*pp
= arg
;
237 /* The first CPU becomes master */
238 if (atomic_inc_return(&pp
->cpu_count
) == 1) {
239 for (i
= 0; ret
== 0 && i
< pp
->insn_cnt
; i
++)
240 ret
= aarch64_insn_patch_text_nosync(pp
->text_addrs
[i
],
243 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
244 * which ends with "dsb; isb" pair guaranteeing global
247 /* Notify other processors with an additional increment. */
248 atomic_inc(&pp
->cpu_count
);
250 while (atomic_read(&pp
->cpu_count
) <= num_online_cpus())
258 int __kprobes
aarch64_insn_patch_text_sync(void *addrs
[], u32 insns
[], int cnt
)
260 struct aarch64_insn_patch patch
= {
264 .cpu_count
= ATOMIC_INIT(0),
270 return stop_machine(aarch64_insn_patch_text_cb
, &patch
,
274 int __kprobes
aarch64_insn_patch_text(void *addrs
[], u32 insns
[], int cnt
)
279 /* Unsafe to patch multiple instructions without synchronizaiton */
281 ret
= aarch64_insn_read(addrs
[0], &insn
);
285 if (aarch64_insn_hotpatch_safe(insn
, insns
[0])) {
287 * ARMv8 architecture doesn't guarantee all CPUs see
288 * the new instruction after returning from function
289 * aarch64_insn_patch_text_nosync(). So send IPIs to
290 * all other CPUs to achieve instruction
293 ret
= aarch64_insn_patch_text_nosync(addrs
[0], insns
[0]);
294 kick_all_cpus_sync();
299 return aarch64_insn_patch_text_sync(addrs
, insns
, cnt
);
302 static int __kprobes
aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type
,
303 u32
*maskp
, int *shiftp
)
309 case AARCH64_INSN_IMM_26
:
313 case AARCH64_INSN_IMM_19
:
317 case AARCH64_INSN_IMM_16
:
321 case AARCH64_INSN_IMM_14
:
325 case AARCH64_INSN_IMM_12
:
329 case AARCH64_INSN_IMM_9
:
333 case AARCH64_INSN_IMM_7
:
337 case AARCH64_INSN_IMM_6
:
338 case AARCH64_INSN_IMM_S
:
342 case AARCH64_INSN_IMM_R
:
356 #define ADR_IMM_HILOSPLIT 2
357 #define ADR_IMM_SIZE SZ_2M
358 #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
359 #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
360 #define ADR_IMM_LOSHIFT 29
361 #define ADR_IMM_HISHIFT 5
363 u64
aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type
, u32 insn
)
365 u32 immlo
, immhi
, mask
;
369 case AARCH64_INSN_IMM_ADR
:
371 immlo
= (insn
>> ADR_IMM_LOSHIFT
) & ADR_IMM_LOMASK
;
372 immhi
= (insn
>> ADR_IMM_HISHIFT
) & ADR_IMM_HIMASK
;
373 insn
= (immhi
<< ADR_IMM_HILOSPLIT
) | immlo
;
374 mask
= ADR_IMM_SIZE
- 1;
377 if (aarch64_get_imm_shift_mask(type
, &mask
, &shift
) < 0) {
378 pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
384 return (insn
>> shift
) & mask
;
387 u32 __kprobes
aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type
,
390 u32 immlo
, immhi
, mask
;
393 if (insn
== AARCH64_BREAK_FAULT
)
394 return AARCH64_BREAK_FAULT
;
397 case AARCH64_INSN_IMM_ADR
:
399 immlo
= (imm
& ADR_IMM_LOMASK
) << ADR_IMM_LOSHIFT
;
400 imm
>>= ADR_IMM_HILOSPLIT
;
401 immhi
= (imm
& ADR_IMM_HIMASK
) << ADR_IMM_HISHIFT
;
403 mask
= ((ADR_IMM_LOMASK
<< ADR_IMM_LOSHIFT
) |
404 (ADR_IMM_HIMASK
<< ADR_IMM_HISHIFT
));
407 if (aarch64_get_imm_shift_mask(type
, &mask
, &shift
) < 0) {
408 pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
410 return AARCH64_BREAK_FAULT
;
414 /* Update the immediate field. */
415 insn
&= ~(mask
<< shift
);
416 insn
|= (imm
& mask
) << shift
;
421 static u32
aarch64_insn_encode_register(enum aarch64_insn_register_type type
,
423 enum aarch64_insn_register reg
)
427 if (insn
== AARCH64_BREAK_FAULT
)
428 return AARCH64_BREAK_FAULT
;
430 if (reg
< AARCH64_INSN_REG_0
|| reg
> AARCH64_INSN_REG_SP
) {
431 pr_err("%s: unknown register encoding %d\n", __func__
, reg
);
432 return AARCH64_BREAK_FAULT
;
436 case AARCH64_INSN_REGTYPE_RT
:
437 case AARCH64_INSN_REGTYPE_RD
:
440 case AARCH64_INSN_REGTYPE_RN
:
443 case AARCH64_INSN_REGTYPE_RT2
:
444 case AARCH64_INSN_REGTYPE_RA
:
447 case AARCH64_INSN_REGTYPE_RM
:
451 pr_err("%s: unknown register type encoding %d\n", __func__
,
453 return AARCH64_BREAK_FAULT
;
456 insn
&= ~(GENMASK(4, 0) << shift
);
457 insn
|= reg
<< shift
;
462 static u32
aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type
,
468 case AARCH64_INSN_SIZE_8
:
471 case AARCH64_INSN_SIZE_16
:
474 case AARCH64_INSN_SIZE_32
:
477 case AARCH64_INSN_SIZE_64
:
481 pr_err("%s: unknown size encoding %d\n", __func__
, type
);
482 return AARCH64_BREAK_FAULT
;
485 insn
&= ~GENMASK(31, 30);
491 static inline long branch_imm_common(unsigned long pc
, unsigned long addr
,
496 if ((pc
& 0x3) || (addr
& 0x3)) {
497 pr_err("%s: A64 instructions must be word aligned\n", __func__
);
501 offset
= ((long)addr
- (long)pc
);
503 if (offset
< -range
|| offset
>= range
) {
504 pr_err("%s: offset out of range\n", __func__
);
511 u32 __kprobes
aarch64_insn_gen_branch_imm(unsigned long pc
, unsigned long addr
,
512 enum aarch64_insn_branch_type type
)
518 * B/BL support [-128M, 128M) offset
519 * ARM64 virtual address arrangement guarantees all kernel and module
520 * texts are within +/-128M.
522 offset
= branch_imm_common(pc
, addr
, SZ_128M
);
523 if (offset
>= SZ_128M
)
524 return AARCH64_BREAK_FAULT
;
527 case AARCH64_INSN_BRANCH_LINK
:
528 insn
= aarch64_insn_get_bl_value();
530 case AARCH64_INSN_BRANCH_NOLINK
:
531 insn
= aarch64_insn_get_b_value();
534 pr_err("%s: unknown branch encoding %d\n", __func__
, type
);
535 return AARCH64_BREAK_FAULT
;
538 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26
, insn
,
542 u32
aarch64_insn_gen_comp_branch_imm(unsigned long pc
, unsigned long addr
,
543 enum aarch64_insn_register reg
,
544 enum aarch64_insn_variant variant
,
545 enum aarch64_insn_branch_type type
)
550 offset
= branch_imm_common(pc
, addr
, SZ_1M
);
552 return AARCH64_BREAK_FAULT
;
555 case AARCH64_INSN_BRANCH_COMP_ZERO
:
556 insn
= aarch64_insn_get_cbz_value();
558 case AARCH64_INSN_BRANCH_COMP_NONZERO
:
559 insn
= aarch64_insn_get_cbnz_value();
562 pr_err("%s: unknown branch encoding %d\n", __func__
, type
);
563 return AARCH64_BREAK_FAULT
;
567 case AARCH64_INSN_VARIANT_32BIT
:
569 case AARCH64_INSN_VARIANT_64BIT
:
570 insn
|= AARCH64_INSN_SF_BIT
;
573 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
574 return AARCH64_BREAK_FAULT
;
577 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
, reg
);
579 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19
, insn
,
583 u32
aarch64_insn_gen_cond_branch_imm(unsigned long pc
, unsigned long addr
,
584 enum aarch64_insn_condition cond
)
589 offset
= branch_imm_common(pc
, addr
, SZ_1M
);
591 insn
= aarch64_insn_get_bcond_value();
593 if (cond
< AARCH64_INSN_COND_EQ
|| cond
> AARCH64_INSN_COND_AL
) {
594 pr_err("%s: unknown condition encoding %d\n", __func__
, cond
);
595 return AARCH64_BREAK_FAULT
;
599 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19
, insn
,
603 u32 __kprobes
aarch64_insn_gen_hint(enum aarch64_insn_hint_op op
)
605 return aarch64_insn_get_hint_value() | op
;
608 u32 __kprobes
aarch64_insn_gen_nop(void)
610 return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP
);
613 u32
aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg
,
614 enum aarch64_insn_branch_type type
)
619 case AARCH64_INSN_BRANCH_NOLINK
:
620 insn
= aarch64_insn_get_br_value();
622 case AARCH64_INSN_BRANCH_LINK
:
623 insn
= aarch64_insn_get_blr_value();
625 case AARCH64_INSN_BRANCH_RETURN
:
626 insn
= aarch64_insn_get_ret_value();
629 pr_err("%s: unknown branch encoding %d\n", __func__
, type
);
630 return AARCH64_BREAK_FAULT
;
633 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, reg
);
636 u32
aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg
,
637 enum aarch64_insn_register base
,
638 enum aarch64_insn_register offset
,
639 enum aarch64_insn_size_type size
,
640 enum aarch64_insn_ldst_type type
)
645 case AARCH64_INSN_LDST_LOAD_REG_OFFSET
:
646 insn
= aarch64_insn_get_ldr_reg_value();
648 case AARCH64_INSN_LDST_STORE_REG_OFFSET
:
649 insn
= aarch64_insn_get_str_reg_value();
652 pr_err("%s: unknown load/store encoding %d\n", __func__
, type
);
653 return AARCH64_BREAK_FAULT
;
656 insn
= aarch64_insn_encode_ldst_size(size
, insn
);
658 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
, reg
);
660 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
663 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
,
667 u32
aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1
,
668 enum aarch64_insn_register reg2
,
669 enum aarch64_insn_register base
,
671 enum aarch64_insn_variant variant
,
672 enum aarch64_insn_ldst_type type
)
678 case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX
:
679 insn
= aarch64_insn_get_ldp_pre_value();
681 case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX
:
682 insn
= aarch64_insn_get_stp_pre_value();
684 case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX
:
685 insn
= aarch64_insn_get_ldp_post_value();
687 case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX
:
688 insn
= aarch64_insn_get_stp_post_value();
691 pr_err("%s: unknown load/store encoding %d\n", __func__
, type
);
692 return AARCH64_BREAK_FAULT
;
696 case AARCH64_INSN_VARIANT_32BIT
:
697 if ((offset
& 0x3) || (offset
< -256) || (offset
> 252)) {
698 pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
700 return AARCH64_BREAK_FAULT
;
704 case AARCH64_INSN_VARIANT_64BIT
:
705 if ((offset
& 0x7) || (offset
< -512) || (offset
> 504)) {
706 pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
708 return AARCH64_BREAK_FAULT
;
711 insn
|= AARCH64_INSN_SF_BIT
;
714 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
715 return AARCH64_BREAK_FAULT
;
718 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
,
721 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2
, insn
,
724 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
727 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7
, insn
,
731 u32
aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst
,
732 enum aarch64_insn_register src
,
733 int imm
, enum aarch64_insn_variant variant
,
734 enum aarch64_insn_adsb_type type
)
739 case AARCH64_INSN_ADSB_ADD
:
740 insn
= aarch64_insn_get_add_imm_value();
742 case AARCH64_INSN_ADSB_SUB
:
743 insn
= aarch64_insn_get_sub_imm_value();
745 case AARCH64_INSN_ADSB_ADD_SETFLAGS
:
746 insn
= aarch64_insn_get_adds_imm_value();
748 case AARCH64_INSN_ADSB_SUB_SETFLAGS
:
749 insn
= aarch64_insn_get_subs_imm_value();
752 pr_err("%s: unknown add/sub encoding %d\n", __func__
, type
);
753 return AARCH64_BREAK_FAULT
;
757 case AARCH64_INSN_VARIANT_32BIT
:
759 case AARCH64_INSN_VARIANT_64BIT
:
760 insn
|= AARCH64_INSN_SF_BIT
;
763 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
764 return AARCH64_BREAK_FAULT
;
767 if (imm
& ~(SZ_4K
- 1)) {
768 pr_err("%s: invalid immediate encoding %d\n", __func__
, imm
);
769 return AARCH64_BREAK_FAULT
;
772 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
774 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
776 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12
, insn
, imm
);
779 u32
aarch64_insn_gen_bitfield(enum aarch64_insn_register dst
,
780 enum aarch64_insn_register src
,
782 enum aarch64_insn_variant variant
,
783 enum aarch64_insn_bitfield_type type
)
789 case AARCH64_INSN_BITFIELD_MOVE
:
790 insn
= aarch64_insn_get_bfm_value();
792 case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED
:
793 insn
= aarch64_insn_get_ubfm_value();
795 case AARCH64_INSN_BITFIELD_MOVE_SIGNED
:
796 insn
= aarch64_insn_get_sbfm_value();
799 pr_err("%s: unknown bitfield encoding %d\n", __func__
, type
);
800 return AARCH64_BREAK_FAULT
;
804 case AARCH64_INSN_VARIANT_32BIT
:
805 mask
= GENMASK(4, 0);
807 case AARCH64_INSN_VARIANT_64BIT
:
808 insn
|= AARCH64_INSN_SF_BIT
| AARCH64_INSN_N_BIT
;
809 mask
= GENMASK(5, 0);
812 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
813 return AARCH64_BREAK_FAULT
;
817 pr_err("%s: invalid immr encoding %d\n", __func__
, immr
);
818 return AARCH64_BREAK_FAULT
;
821 pr_err("%s: invalid imms encoding %d\n", __func__
, imms
);
822 return AARCH64_BREAK_FAULT
;
825 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
827 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
829 insn
= aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R
, insn
, immr
);
831 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S
, insn
, imms
);
834 u32
aarch64_insn_gen_movewide(enum aarch64_insn_register dst
,
836 enum aarch64_insn_variant variant
,
837 enum aarch64_insn_movewide_type type
)
842 case AARCH64_INSN_MOVEWIDE_ZERO
:
843 insn
= aarch64_insn_get_movz_value();
845 case AARCH64_INSN_MOVEWIDE_KEEP
:
846 insn
= aarch64_insn_get_movk_value();
848 case AARCH64_INSN_MOVEWIDE_INVERSE
:
849 insn
= aarch64_insn_get_movn_value();
852 pr_err("%s: unknown movewide encoding %d\n", __func__
, type
);
853 return AARCH64_BREAK_FAULT
;
856 if (imm
& ~(SZ_64K
- 1)) {
857 pr_err("%s: invalid immediate encoding %d\n", __func__
, imm
);
858 return AARCH64_BREAK_FAULT
;
862 case AARCH64_INSN_VARIANT_32BIT
:
863 if (shift
!= 0 && shift
!= 16) {
864 pr_err("%s: invalid shift encoding %d\n", __func__
,
866 return AARCH64_BREAK_FAULT
;
869 case AARCH64_INSN_VARIANT_64BIT
:
870 insn
|= AARCH64_INSN_SF_BIT
;
871 if (shift
!= 0 && shift
!= 16 && shift
!= 32 && shift
!= 48) {
872 pr_err("%s: invalid shift encoding %d\n", __func__
,
874 return AARCH64_BREAK_FAULT
;
878 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
879 return AARCH64_BREAK_FAULT
;
882 insn
|= (shift
>> 4) << 21;
884 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
886 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16
, insn
, imm
);
889 u32
aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst
,
890 enum aarch64_insn_register src
,
891 enum aarch64_insn_register reg
,
893 enum aarch64_insn_variant variant
,
894 enum aarch64_insn_adsb_type type
)
899 case AARCH64_INSN_ADSB_ADD
:
900 insn
= aarch64_insn_get_add_value();
902 case AARCH64_INSN_ADSB_SUB
:
903 insn
= aarch64_insn_get_sub_value();
905 case AARCH64_INSN_ADSB_ADD_SETFLAGS
:
906 insn
= aarch64_insn_get_adds_value();
908 case AARCH64_INSN_ADSB_SUB_SETFLAGS
:
909 insn
= aarch64_insn_get_subs_value();
912 pr_err("%s: unknown add/sub encoding %d\n", __func__
, type
);
913 return AARCH64_BREAK_FAULT
;
917 case AARCH64_INSN_VARIANT_32BIT
:
918 if (shift
& ~(SZ_32
- 1)) {
919 pr_err("%s: invalid shift encoding %d\n", __func__
,
921 return AARCH64_BREAK_FAULT
;
924 case AARCH64_INSN_VARIANT_64BIT
:
925 insn
|= AARCH64_INSN_SF_BIT
;
926 if (shift
& ~(SZ_64
- 1)) {
927 pr_err("%s: invalid shift encoding %d\n", __func__
,
929 return AARCH64_BREAK_FAULT
;
933 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
934 return AARCH64_BREAK_FAULT
;
938 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
940 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
942 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, reg
);
944 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6
, insn
, shift
);
947 u32
aarch64_insn_gen_data1(enum aarch64_insn_register dst
,
948 enum aarch64_insn_register src
,
949 enum aarch64_insn_variant variant
,
950 enum aarch64_insn_data1_type type
)
955 case AARCH64_INSN_DATA1_REVERSE_16
:
956 insn
= aarch64_insn_get_rev16_value();
958 case AARCH64_INSN_DATA1_REVERSE_32
:
959 insn
= aarch64_insn_get_rev32_value();
961 case AARCH64_INSN_DATA1_REVERSE_64
:
962 if (variant
!= AARCH64_INSN_VARIANT_64BIT
) {
963 pr_err("%s: invalid variant for reverse64 %d\n",
965 return AARCH64_BREAK_FAULT
;
967 insn
= aarch64_insn_get_rev64_value();
970 pr_err("%s: unknown data1 encoding %d\n", __func__
, type
);
971 return AARCH64_BREAK_FAULT
;
975 case AARCH64_INSN_VARIANT_32BIT
:
977 case AARCH64_INSN_VARIANT_64BIT
:
978 insn
|= AARCH64_INSN_SF_BIT
;
981 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
982 return AARCH64_BREAK_FAULT
;
985 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
987 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
990 u32
aarch64_insn_gen_data2(enum aarch64_insn_register dst
,
991 enum aarch64_insn_register src
,
992 enum aarch64_insn_register reg
,
993 enum aarch64_insn_variant variant
,
994 enum aarch64_insn_data2_type type
)
999 case AARCH64_INSN_DATA2_UDIV
:
1000 insn
= aarch64_insn_get_udiv_value();
1002 case AARCH64_INSN_DATA2_SDIV
:
1003 insn
= aarch64_insn_get_sdiv_value();
1005 case AARCH64_INSN_DATA2_LSLV
:
1006 insn
= aarch64_insn_get_lslv_value();
1008 case AARCH64_INSN_DATA2_LSRV
:
1009 insn
= aarch64_insn_get_lsrv_value();
1011 case AARCH64_INSN_DATA2_ASRV
:
1012 insn
= aarch64_insn_get_asrv_value();
1014 case AARCH64_INSN_DATA2_RORV
:
1015 insn
= aarch64_insn_get_rorv_value();
1018 pr_err("%s: unknown data2 encoding %d\n", __func__
, type
);
1019 return AARCH64_BREAK_FAULT
;
1023 case AARCH64_INSN_VARIANT_32BIT
:
1025 case AARCH64_INSN_VARIANT_64BIT
:
1026 insn
|= AARCH64_INSN_SF_BIT
;
1029 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1030 return AARCH64_BREAK_FAULT
;
1033 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1035 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
1037 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, reg
);
1040 u32
aarch64_insn_gen_data3(enum aarch64_insn_register dst
,
1041 enum aarch64_insn_register src
,
1042 enum aarch64_insn_register reg1
,
1043 enum aarch64_insn_register reg2
,
1044 enum aarch64_insn_variant variant
,
1045 enum aarch64_insn_data3_type type
)
1050 case AARCH64_INSN_DATA3_MADD
:
1051 insn
= aarch64_insn_get_madd_value();
1053 case AARCH64_INSN_DATA3_MSUB
:
1054 insn
= aarch64_insn_get_msub_value();
1057 pr_err("%s: unknown data3 encoding %d\n", __func__
, type
);
1058 return AARCH64_BREAK_FAULT
;
1062 case AARCH64_INSN_VARIANT_32BIT
:
1064 case AARCH64_INSN_VARIANT_64BIT
:
1065 insn
|= AARCH64_INSN_SF_BIT
;
1068 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1069 return AARCH64_BREAK_FAULT
;
1072 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1074 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA
, insn
, src
);
1076 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
1079 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
,
1083 u32
aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst
,
1084 enum aarch64_insn_register src
,
1085 enum aarch64_insn_register reg
,
1087 enum aarch64_insn_variant variant
,
1088 enum aarch64_insn_logic_type type
)
1093 case AARCH64_INSN_LOGIC_AND
:
1094 insn
= aarch64_insn_get_and_value();
1096 case AARCH64_INSN_LOGIC_BIC
:
1097 insn
= aarch64_insn_get_bic_value();
1099 case AARCH64_INSN_LOGIC_ORR
:
1100 insn
= aarch64_insn_get_orr_value();
1102 case AARCH64_INSN_LOGIC_ORN
:
1103 insn
= aarch64_insn_get_orn_value();
1105 case AARCH64_INSN_LOGIC_EOR
:
1106 insn
= aarch64_insn_get_eor_value();
1108 case AARCH64_INSN_LOGIC_EON
:
1109 insn
= aarch64_insn_get_eon_value();
1111 case AARCH64_INSN_LOGIC_AND_SETFLAGS
:
1112 insn
= aarch64_insn_get_ands_value();
1114 case AARCH64_INSN_LOGIC_BIC_SETFLAGS
:
1115 insn
= aarch64_insn_get_bics_value();
1118 pr_err("%s: unknown logical encoding %d\n", __func__
, type
);
1119 return AARCH64_BREAK_FAULT
;
1123 case AARCH64_INSN_VARIANT_32BIT
:
1124 if (shift
& ~(SZ_32
- 1)) {
1125 pr_err("%s: invalid shift encoding %d\n", __func__
,
1127 return AARCH64_BREAK_FAULT
;
1130 case AARCH64_INSN_VARIANT_64BIT
:
1131 insn
|= AARCH64_INSN_SF_BIT
;
1132 if (shift
& ~(SZ_64
- 1)) {
1133 pr_err("%s: invalid shift encoding %d\n", __func__
,
1135 return AARCH64_BREAK_FAULT
;
1139 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1140 return AARCH64_BREAK_FAULT
;
1144 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1146 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
1148 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, reg
);
1150 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6
, insn
, shift
);
1154 * Decode the imm field of a branch, and return the byte offset as a
1155 * signed value (so it can be used when computing a new branch
1158 s32
aarch64_get_branch_offset(u32 insn
)
1162 if (aarch64_insn_is_b(insn
) || aarch64_insn_is_bl(insn
)) {
1163 imm
= aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26
, insn
);
1164 return (imm
<< 6) >> 4;
1167 if (aarch64_insn_is_cbz(insn
) || aarch64_insn_is_cbnz(insn
) ||
1168 aarch64_insn_is_bcond(insn
)) {
1169 imm
= aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19
, insn
);
1170 return (imm
<< 13) >> 11;
1173 if (aarch64_insn_is_tbz(insn
) || aarch64_insn_is_tbnz(insn
)) {
1174 imm
= aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14
, insn
);
1175 return (imm
<< 18) >> 16;
1178 /* Unhandled instruction */
1183 * Encode the displacement of a branch in the imm field and return the
1184 * updated instruction.
1186 u32
aarch64_set_branch_offset(u32 insn
, s32 offset
)
1188 if (aarch64_insn_is_b(insn
) || aarch64_insn_is_bl(insn
))
1189 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26
, insn
,
1192 if (aarch64_insn_is_cbz(insn
) || aarch64_insn_is_cbnz(insn
) ||
1193 aarch64_insn_is_bcond(insn
))
1194 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19
, insn
,
1197 if (aarch64_insn_is_tbz(insn
) || aarch64_insn_is_tbnz(insn
))
1198 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14
, insn
,
1201 /* Unhandled instruction */
1205 s32
aarch64_insn_adrp_get_offset(u32 insn
)
1207 BUG_ON(!aarch64_insn_is_adrp(insn
));
1208 return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR
, insn
) << 12;
1211 u32
aarch64_insn_adrp_set_offset(u32 insn
, s32 offset
)
1213 BUG_ON(!aarch64_insn_is_adrp(insn
));
1214 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR
, insn
,
1219 * Extract the Op/CR data from a msr/mrs instruction.
1221 u32
aarch64_insn_extract_system_reg(u32 insn
)
1223 return (insn
& 0x1FFFE0) >> 5;
1226 bool aarch32_insn_is_wide(u32 insn
)
1228 return insn
>= 0xe800;
1232 * Macros/defines for extracting register numbers from instruction.
1234 u32
aarch32_insn_extract_reg_num(u32 insn
, int offset
)
1236 return (insn
& (0xf << offset
)) >> offset
;
1239 #define OPC2_MASK 0x7
1240 #define OPC2_OFFSET 5
1241 u32
aarch32_insn_mcr_extract_opc2(u32 insn
)
1243 return (insn
& (OPC2_MASK
<< OPC2_OFFSET
)) >> OPC2_OFFSET
;
1246 #define CRM_MASK 0xf
1247 u32
aarch32_insn_mcr_extract_crm(u32 insn
)
1249 return insn
& CRM_MASK
;
1252 static bool __kprobes
__check_eq(unsigned long pstate
)
1254 return (pstate
& PSR_Z_BIT
) != 0;
1257 static bool __kprobes
__check_ne(unsigned long pstate
)
1259 return (pstate
& PSR_Z_BIT
) == 0;
1262 static bool __kprobes
__check_cs(unsigned long pstate
)
1264 return (pstate
& PSR_C_BIT
) != 0;
1267 static bool __kprobes
__check_cc(unsigned long pstate
)
1269 return (pstate
& PSR_C_BIT
) == 0;
1272 static bool __kprobes
__check_mi(unsigned long pstate
)
1274 return (pstate
& PSR_N_BIT
) != 0;
1277 static bool __kprobes
__check_pl(unsigned long pstate
)
1279 return (pstate
& PSR_N_BIT
) == 0;
1282 static bool __kprobes
__check_vs(unsigned long pstate
)
1284 return (pstate
& PSR_V_BIT
) != 0;
1287 static bool __kprobes
__check_vc(unsigned long pstate
)
1289 return (pstate
& PSR_V_BIT
) == 0;
1292 static bool __kprobes
__check_hi(unsigned long pstate
)
1294 pstate
&= ~(pstate
>> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1295 return (pstate
& PSR_C_BIT
) != 0;
1298 static bool __kprobes
__check_ls(unsigned long pstate
)
1300 pstate
&= ~(pstate
>> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1301 return (pstate
& PSR_C_BIT
) == 0;
1304 static bool __kprobes
__check_ge(unsigned long pstate
)
1306 pstate
^= (pstate
<< 3); /* PSR_N_BIT ^= PSR_V_BIT */
1307 return (pstate
& PSR_N_BIT
) == 0;
1310 static bool __kprobes
__check_lt(unsigned long pstate
)
1312 pstate
^= (pstate
<< 3); /* PSR_N_BIT ^= PSR_V_BIT */
1313 return (pstate
& PSR_N_BIT
) != 0;
1316 static bool __kprobes
__check_gt(unsigned long pstate
)
1318 /*PSR_N_BIT ^= PSR_V_BIT */
1319 unsigned long temp
= pstate
^ (pstate
<< 3);
1321 temp
|= (pstate
<< 1); /*PSR_N_BIT |= PSR_Z_BIT */
1322 return (temp
& PSR_N_BIT
) == 0;
1325 static bool __kprobes
__check_le(unsigned long pstate
)
1327 /*PSR_N_BIT ^= PSR_V_BIT */
1328 unsigned long temp
= pstate
^ (pstate
<< 3);
1330 temp
|= (pstate
<< 1); /*PSR_N_BIT |= PSR_Z_BIT */
1331 return (temp
& PSR_N_BIT
) != 0;
1334 static bool __kprobes
__check_al(unsigned long pstate
)
1340 * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1341 * it behaves identically to 0b1110 ("al").
1343 pstate_check_t
* const aarch32_opcode_cond_checks
[16] = {
1344 __check_eq
, __check_ne
, __check_cs
, __check_cc
,
1345 __check_mi
, __check_pl
, __check_vs
, __check_vc
,
1346 __check_hi
, __check_ls
, __check_ge
, __check_lt
,
1347 __check_gt
, __check_le
, __check_al
, __check_al