2 * Copyright (C) 2013 Huawei Ltd.
3 * Author: Jiang Liu <liuj97@gmail.com>
5 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
34 #include <asm/kprobes.h>
36 #define AARCH64_INSN_SF_BIT BIT(31)
37 #define AARCH64_INSN_N_BIT BIT(22)
39 static int aarch64_insn_encoding_class
[] = {
40 AARCH64_INSN_CLS_UNKNOWN
,
41 AARCH64_INSN_CLS_UNKNOWN
,
42 AARCH64_INSN_CLS_UNKNOWN
,
43 AARCH64_INSN_CLS_UNKNOWN
,
44 AARCH64_INSN_CLS_LDST
,
45 AARCH64_INSN_CLS_DP_REG
,
46 AARCH64_INSN_CLS_LDST
,
47 AARCH64_INSN_CLS_DP_FPSIMD
,
48 AARCH64_INSN_CLS_DP_IMM
,
49 AARCH64_INSN_CLS_DP_IMM
,
50 AARCH64_INSN_CLS_BR_SYS
,
51 AARCH64_INSN_CLS_BR_SYS
,
52 AARCH64_INSN_CLS_LDST
,
53 AARCH64_INSN_CLS_DP_REG
,
54 AARCH64_INSN_CLS_LDST
,
55 AARCH64_INSN_CLS_DP_FPSIMD
,
58 enum aarch64_insn_encoding_class __kprobes
aarch64_get_insn_class(u32 insn
)
60 return aarch64_insn_encoding_class
[(insn
>> 25) & 0xf];
63 /* NOP is an alias of HINT */
64 bool __kprobes
aarch64_insn_is_nop(u32 insn
)
66 if (!aarch64_insn_is_hint(insn
))
69 switch (insn
& 0xFE0) {
70 case AARCH64_INSN_HINT_YIELD
:
71 case AARCH64_INSN_HINT_WFE
:
72 case AARCH64_INSN_HINT_WFI
:
73 case AARCH64_INSN_HINT_SEV
:
74 case AARCH64_INSN_HINT_SEVL
:
81 bool aarch64_insn_is_branch_imm(u32 insn
)
83 return (aarch64_insn_is_b(insn
) || aarch64_insn_is_bl(insn
) ||
84 aarch64_insn_is_tbz(insn
) || aarch64_insn_is_tbnz(insn
) ||
85 aarch64_insn_is_cbz(insn
) || aarch64_insn_is_cbnz(insn
) ||
86 aarch64_insn_is_bcond(insn
));
89 static DEFINE_RAW_SPINLOCK(patch_lock
);
91 static void __kprobes
*patch_map(void *addr
, int fixmap
)
93 unsigned long uintaddr
= (uintptr_t) addr
;
94 bool module
= !core_kernel_text(uintaddr
);
97 if (module
&& IS_ENABLED(CONFIG_STRICT_MODULE_RWX
))
98 page
= vmalloc_to_page(addr
);
100 page
= phys_to_page(__pa_symbol(addr
));
105 return (void *)set_fixmap_offset(fixmap
, page_to_phys(page
) +
106 (uintaddr
& ~PAGE_MASK
));
109 static void __kprobes
patch_unmap(int fixmap
)
111 clear_fixmap(fixmap
);
114 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
117 int __kprobes
aarch64_insn_read(void *addr
, u32
*insnp
)
122 ret
= probe_kernel_read(&val
, addr
, AARCH64_INSN_SIZE
);
124 *insnp
= le32_to_cpu(val
);
129 static int __kprobes
__aarch64_insn_write(void *addr
, __le32 insn
)
132 unsigned long flags
= 0;
135 raw_spin_lock_irqsave(&patch_lock
, flags
);
136 waddr
= patch_map(addr
, FIX_TEXT_POKE0
);
138 ret
= probe_kernel_write(waddr
, &insn
, AARCH64_INSN_SIZE
);
140 patch_unmap(FIX_TEXT_POKE0
);
141 raw_spin_unlock_irqrestore(&patch_lock
, flags
);
146 int __kprobes
aarch64_insn_write(void *addr
, u32 insn
)
148 return __aarch64_insn_write(addr
, cpu_to_le32(insn
));
151 static bool __kprobes
__aarch64_insn_hotpatch_safe(u32 insn
)
153 if (aarch64_get_insn_class(insn
) != AARCH64_INSN_CLS_BR_SYS
)
156 return aarch64_insn_is_b(insn
) ||
157 aarch64_insn_is_bl(insn
) ||
158 aarch64_insn_is_svc(insn
) ||
159 aarch64_insn_is_hvc(insn
) ||
160 aarch64_insn_is_smc(insn
) ||
161 aarch64_insn_is_brk(insn
) ||
162 aarch64_insn_is_nop(insn
);
165 bool __kprobes
aarch64_insn_uses_literal(u32 insn
)
167 /* ldr/ldrsw (literal), prfm */
169 return aarch64_insn_is_ldr_lit(insn
) ||
170 aarch64_insn_is_ldrsw_lit(insn
) ||
171 aarch64_insn_is_adr_adrp(insn
) ||
172 aarch64_insn_is_prfm_lit(insn
);
175 bool __kprobes
aarch64_insn_is_branch(u32 insn
)
177 /* b, bl, cb*, tb*, b.cond, br, blr */
179 return aarch64_insn_is_b(insn
) ||
180 aarch64_insn_is_bl(insn
) ||
181 aarch64_insn_is_cbz(insn
) ||
182 aarch64_insn_is_cbnz(insn
) ||
183 aarch64_insn_is_tbz(insn
) ||
184 aarch64_insn_is_tbnz(insn
) ||
185 aarch64_insn_is_ret(insn
) ||
186 aarch64_insn_is_br(insn
) ||
187 aarch64_insn_is_blr(insn
) ||
188 aarch64_insn_is_bcond(insn
);
192 * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
193 * Section B2.6.5 "Concurrent modification and execution of instructions":
194 * Concurrent modification and execution of instructions can lead to the
195 * resulting instruction performing any behavior that can be achieved by
196 * executing any sequence of instructions that can be executed from the
197 * same Exception level, except where the instruction before modification
198 * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
199 * or SMC instruction.
201 bool __kprobes
aarch64_insn_hotpatch_safe(u32 old_insn
, u32 new_insn
)
203 return __aarch64_insn_hotpatch_safe(old_insn
) &&
204 __aarch64_insn_hotpatch_safe(new_insn
);
207 int __kprobes
aarch64_insn_patch_text_nosync(void *addr
, u32 insn
)
212 /* A64 instructions must be word aligned */
213 if ((uintptr_t)tp
& 0x3)
216 ret
= aarch64_insn_write(tp
, insn
);
218 flush_icache_range((uintptr_t)tp
,
219 (uintptr_t)tp
+ AARCH64_INSN_SIZE
);
224 struct aarch64_insn_patch
{
231 static int __kprobes
aarch64_insn_patch_text_cb(void *arg
)
234 struct aarch64_insn_patch
*pp
= arg
;
236 /* The first CPU becomes master */
237 if (atomic_inc_return(&pp
->cpu_count
) == 1) {
238 for (i
= 0; ret
== 0 && i
< pp
->insn_cnt
; i
++)
239 ret
= aarch64_insn_patch_text_nosync(pp
->text_addrs
[i
],
242 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
243 * which ends with "dsb; isb" pair guaranteeing global
246 /* Notify other processors with an additional increment. */
247 atomic_inc(&pp
->cpu_count
);
249 while (atomic_read(&pp
->cpu_count
) <= num_online_cpus())
258 int __kprobes
aarch64_insn_patch_text_sync(void *addrs
[], u32 insns
[], int cnt
)
260 struct aarch64_insn_patch patch
= {
264 .cpu_count
= ATOMIC_INIT(0),
270 return stop_machine_cpuslocked(aarch64_insn_patch_text_cb
, &patch
,
274 int __kprobes
aarch64_insn_patch_text(void *addrs
[], u32 insns
[], int cnt
)
279 /* Unsafe to patch multiple instructions without synchronizaiton */
281 ret
= aarch64_insn_read(addrs
[0], &insn
);
285 if (aarch64_insn_hotpatch_safe(insn
, insns
[0])) {
287 * ARMv8 architecture doesn't guarantee all CPUs see
288 * the new instruction after returning from function
289 * aarch64_insn_patch_text_nosync(). So send IPIs to
290 * all other CPUs to achieve instruction
293 ret
= aarch64_insn_patch_text_nosync(addrs
[0], insns
[0]);
294 kick_all_cpus_sync();
299 return aarch64_insn_patch_text_sync(addrs
, insns
, cnt
);
302 static int __kprobes
aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type
,
303 u32
*maskp
, int *shiftp
)
309 case AARCH64_INSN_IMM_26
:
313 case AARCH64_INSN_IMM_19
:
317 case AARCH64_INSN_IMM_16
:
321 case AARCH64_INSN_IMM_14
:
325 case AARCH64_INSN_IMM_12
:
329 case AARCH64_INSN_IMM_9
:
333 case AARCH64_INSN_IMM_7
:
337 case AARCH64_INSN_IMM_6
:
338 case AARCH64_INSN_IMM_S
:
342 case AARCH64_INSN_IMM_R
:
356 #define ADR_IMM_HILOSPLIT 2
357 #define ADR_IMM_SIZE SZ_2M
358 #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
359 #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
360 #define ADR_IMM_LOSHIFT 29
361 #define ADR_IMM_HISHIFT 5
363 u64
aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type
, u32 insn
)
365 u32 immlo
, immhi
, mask
;
369 case AARCH64_INSN_IMM_ADR
:
371 immlo
= (insn
>> ADR_IMM_LOSHIFT
) & ADR_IMM_LOMASK
;
372 immhi
= (insn
>> ADR_IMM_HISHIFT
) & ADR_IMM_HIMASK
;
373 insn
= (immhi
<< ADR_IMM_HILOSPLIT
) | immlo
;
374 mask
= ADR_IMM_SIZE
- 1;
377 if (aarch64_get_imm_shift_mask(type
, &mask
, &shift
) < 0) {
378 pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
384 return (insn
>> shift
) & mask
;
387 u32 __kprobes
aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type
,
390 u32 immlo
, immhi
, mask
;
393 if (insn
== AARCH64_BREAK_FAULT
)
394 return AARCH64_BREAK_FAULT
;
397 case AARCH64_INSN_IMM_ADR
:
399 immlo
= (imm
& ADR_IMM_LOMASK
) << ADR_IMM_LOSHIFT
;
400 imm
>>= ADR_IMM_HILOSPLIT
;
401 immhi
= (imm
& ADR_IMM_HIMASK
) << ADR_IMM_HISHIFT
;
403 mask
= ((ADR_IMM_LOMASK
<< ADR_IMM_LOSHIFT
) |
404 (ADR_IMM_HIMASK
<< ADR_IMM_HISHIFT
));
407 if (aarch64_get_imm_shift_mask(type
, &mask
, &shift
) < 0) {
408 pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
410 return AARCH64_BREAK_FAULT
;
414 /* Update the immediate field. */
415 insn
&= ~(mask
<< shift
);
416 insn
|= (imm
& mask
) << shift
;
421 u32
aarch64_insn_decode_register(enum aarch64_insn_register_type type
,
427 case AARCH64_INSN_REGTYPE_RT
:
428 case AARCH64_INSN_REGTYPE_RD
:
431 case AARCH64_INSN_REGTYPE_RN
:
434 case AARCH64_INSN_REGTYPE_RT2
:
435 case AARCH64_INSN_REGTYPE_RA
:
438 case AARCH64_INSN_REGTYPE_RM
:
442 pr_err("%s: unknown register type encoding %d\n", __func__
,
447 return (insn
>> shift
) & GENMASK(4, 0);
450 static u32
aarch64_insn_encode_register(enum aarch64_insn_register_type type
,
452 enum aarch64_insn_register reg
)
456 if (insn
== AARCH64_BREAK_FAULT
)
457 return AARCH64_BREAK_FAULT
;
459 if (reg
< AARCH64_INSN_REG_0
|| reg
> AARCH64_INSN_REG_SP
) {
460 pr_err("%s: unknown register encoding %d\n", __func__
, reg
);
461 return AARCH64_BREAK_FAULT
;
465 case AARCH64_INSN_REGTYPE_RT
:
466 case AARCH64_INSN_REGTYPE_RD
:
469 case AARCH64_INSN_REGTYPE_RN
:
472 case AARCH64_INSN_REGTYPE_RT2
:
473 case AARCH64_INSN_REGTYPE_RA
:
476 case AARCH64_INSN_REGTYPE_RM
:
477 case AARCH64_INSN_REGTYPE_RS
:
481 pr_err("%s: unknown register type encoding %d\n", __func__
,
483 return AARCH64_BREAK_FAULT
;
486 insn
&= ~(GENMASK(4, 0) << shift
);
487 insn
|= reg
<< shift
;
492 static u32
aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type
,
498 case AARCH64_INSN_SIZE_8
:
501 case AARCH64_INSN_SIZE_16
:
504 case AARCH64_INSN_SIZE_32
:
507 case AARCH64_INSN_SIZE_64
:
511 pr_err("%s: unknown size encoding %d\n", __func__
, type
);
512 return AARCH64_BREAK_FAULT
;
515 insn
&= ~GENMASK(31, 30);
521 static inline long branch_imm_common(unsigned long pc
, unsigned long addr
,
526 if ((pc
& 0x3) || (addr
& 0x3)) {
527 pr_err("%s: A64 instructions must be word aligned\n", __func__
);
531 offset
= ((long)addr
- (long)pc
);
533 if (offset
< -range
|| offset
>= range
) {
534 pr_err("%s: offset out of range\n", __func__
);
541 u32 __kprobes
aarch64_insn_gen_branch_imm(unsigned long pc
, unsigned long addr
,
542 enum aarch64_insn_branch_type type
)
548 * B/BL support [-128M, 128M) offset
549 * ARM64 virtual address arrangement guarantees all kernel and module
550 * texts are within +/-128M.
552 offset
= branch_imm_common(pc
, addr
, SZ_128M
);
553 if (offset
>= SZ_128M
)
554 return AARCH64_BREAK_FAULT
;
557 case AARCH64_INSN_BRANCH_LINK
:
558 insn
= aarch64_insn_get_bl_value();
560 case AARCH64_INSN_BRANCH_NOLINK
:
561 insn
= aarch64_insn_get_b_value();
564 pr_err("%s: unknown branch encoding %d\n", __func__
, type
);
565 return AARCH64_BREAK_FAULT
;
568 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26
, insn
,
572 u32
aarch64_insn_gen_comp_branch_imm(unsigned long pc
, unsigned long addr
,
573 enum aarch64_insn_register reg
,
574 enum aarch64_insn_variant variant
,
575 enum aarch64_insn_branch_type type
)
580 offset
= branch_imm_common(pc
, addr
, SZ_1M
);
582 return AARCH64_BREAK_FAULT
;
585 case AARCH64_INSN_BRANCH_COMP_ZERO
:
586 insn
= aarch64_insn_get_cbz_value();
588 case AARCH64_INSN_BRANCH_COMP_NONZERO
:
589 insn
= aarch64_insn_get_cbnz_value();
592 pr_err("%s: unknown branch encoding %d\n", __func__
, type
);
593 return AARCH64_BREAK_FAULT
;
597 case AARCH64_INSN_VARIANT_32BIT
:
599 case AARCH64_INSN_VARIANT_64BIT
:
600 insn
|= AARCH64_INSN_SF_BIT
;
603 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
604 return AARCH64_BREAK_FAULT
;
607 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
, reg
);
609 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19
, insn
,
613 u32
aarch64_insn_gen_cond_branch_imm(unsigned long pc
, unsigned long addr
,
614 enum aarch64_insn_condition cond
)
619 offset
= branch_imm_common(pc
, addr
, SZ_1M
);
621 insn
= aarch64_insn_get_bcond_value();
623 if (cond
< AARCH64_INSN_COND_EQ
|| cond
> AARCH64_INSN_COND_AL
) {
624 pr_err("%s: unknown condition encoding %d\n", __func__
, cond
);
625 return AARCH64_BREAK_FAULT
;
629 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19
, insn
,
633 u32 __kprobes
aarch64_insn_gen_hint(enum aarch64_insn_hint_op op
)
635 return aarch64_insn_get_hint_value() | op
;
638 u32 __kprobes
aarch64_insn_gen_nop(void)
640 return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP
);
643 u32
aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg
,
644 enum aarch64_insn_branch_type type
)
649 case AARCH64_INSN_BRANCH_NOLINK
:
650 insn
= aarch64_insn_get_br_value();
652 case AARCH64_INSN_BRANCH_LINK
:
653 insn
= aarch64_insn_get_blr_value();
655 case AARCH64_INSN_BRANCH_RETURN
:
656 insn
= aarch64_insn_get_ret_value();
659 pr_err("%s: unknown branch encoding %d\n", __func__
, type
);
660 return AARCH64_BREAK_FAULT
;
663 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, reg
);
666 u32
aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg
,
667 enum aarch64_insn_register base
,
668 enum aarch64_insn_register offset
,
669 enum aarch64_insn_size_type size
,
670 enum aarch64_insn_ldst_type type
)
675 case AARCH64_INSN_LDST_LOAD_REG_OFFSET
:
676 insn
= aarch64_insn_get_ldr_reg_value();
678 case AARCH64_INSN_LDST_STORE_REG_OFFSET
:
679 insn
= aarch64_insn_get_str_reg_value();
682 pr_err("%s: unknown load/store encoding %d\n", __func__
, type
);
683 return AARCH64_BREAK_FAULT
;
686 insn
= aarch64_insn_encode_ldst_size(size
, insn
);
688 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
, reg
);
690 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
693 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
,
697 u32
aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1
,
698 enum aarch64_insn_register reg2
,
699 enum aarch64_insn_register base
,
701 enum aarch64_insn_variant variant
,
702 enum aarch64_insn_ldst_type type
)
708 case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX
:
709 insn
= aarch64_insn_get_ldp_pre_value();
711 case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX
:
712 insn
= aarch64_insn_get_stp_pre_value();
714 case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX
:
715 insn
= aarch64_insn_get_ldp_post_value();
717 case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX
:
718 insn
= aarch64_insn_get_stp_post_value();
721 pr_err("%s: unknown load/store encoding %d\n", __func__
, type
);
722 return AARCH64_BREAK_FAULT
;
726 case AARCH64_INSN_VARIANT_32BIT
:
727 if ((offset
& 0x3) || (offset
< -256) || (offset
> 252)) {
728 pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
730 return AARCH64_BREAK_FAULT
;
734 case AARCH64_INSN_VARIANT_64BIT
:
735 if ((offset
& 0x7) || (offset
< -512) || (offset
> 504)) {
736 pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
738 return AARCH64_BREAK_FAULT
;
741 insn
|= AARCH64_INSN_SF_BIT
;
744 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
745 return AARCH64_BREAK_FAULT
;
748 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
,
751 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2
, insn
,
754 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
757 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7
, insn
,
761 u32
aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg
,
762 enum aarch64_insn_register base
,
763 enum aarch64_insn_register state
,
764 enum aarch64_insn_size_type size
,
765 enum aarch64_insn_ldst_type type
)
770 case AARCH64_INSN_LDST_LOAD_EX
:
771 insn
= aarch64_insn_get_load_ex_value();
773 case AARCH64_INSN_LDST_STORE_EX
:
774 insn
= aarch64_insn_get_store_ex_value();
777 pr_err("%s: unknown load/store exclusive encoding %d\n", __func__
, type
);
778 return AARCH64_BREAK_FAULT
;
781 insn
= aarch64_insn_encode_ldst_size(size
, insn
);
783 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
,
786 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
789 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2
, insn
,
790 AARCH64_INSN_REG_ZR
);
792 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS
, insn
,
796 static u32
aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type
,
797 enum aarch64_insn_prfm_target target
,
798 enum aarch64_insn_prfm_policy policy
,
801 u32 imm_type
= 0, imm_target
= 0, imm_policy
= 0;
804 case AARCH64_INSN_PRFM_TYPE_PLD
:
806 case AARCH64_INSN_PRFM_TYPE_PLI
:
809 case AARCH64_INSN_PRFM_TYPE_PST
:
813 pr_err("%s: unknown prfm type encoding %d\n", __func__
, type
);
814 return AARCH64_BREAK_FAULT
;
818 case AARCH64_INSN_PRFM_TARGET_L1
:
820 case AARCH64_INSN_PRFM_TARGET_L2
:
823 case AARCH64_INSN_PRFM_TARGET_L3
:
827 pr_err("%s: unknown prfm target encoding %d\n", __func__
, target
);
828 return AARCH64_BREAK_FAULT
;
832 case AARCH64_INSN_PRFM_POLICY_KEEP
:
834 case AARCH64_INSN_PRFM_POLICY_STRM
:
838 pr_err("%s: unknown prfm policy encoding %d\n", __func__
, policy
);
839 return AARCH64_BREAK_FAULT
;
842 /* In this case, imm5 is encoded into Rt field. */
843 insn
&= ~GENMASK(4, 0);
844 insn
|= imm_policy
| (imm_target
<< 1) | (imm_type
<< 3);
849 u32
aarch64_insn_gen_prefetch(enum aarch64_insn_register base
,
850 enum aarch64_insn_prfm_type type
,
851 enum aarch64_insn_prfm_target target
,
852 enum aarch64_insn_prfm_policy policy
)
854 u32 insn
= aarch64_insn_get_prfm_value();
856 insn
= aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64
, insn
);
858 insn
= aarch64_insn_encode_prfm_imm(type
, target
, policy
, insn
);
860 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
863 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12
, insn
, 0);
866 u32
aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst
,
867 enum aarch64_insn_register src
,
868 int imm
, enum aarch64_insn_variant variant
,
869 enum aarch64_insn_adsb_type type
)
874 case AARCH64_INSN_ADSB_ADD
:
875 insn
= aarch64_insn_get_add_imm_value();
877 case AARCH64_INSN_ADSB_SUB
:
878 insn
= aarch64_insn_get_sub_imm_value();
880 case AARCH64_INSN_ADSB_ADD_SETFLAGS
:
881 insn
= aarch64_insn_get_adds_imm_value();
883 case AARCH64_INSN_ADSB_SUB_SETFLAGS
:
884 insn
= aarch64_insn_get_subs_imm_value();
887 pr_err("%s: unknown add/sub encoding %d\n", __func__
, type
);
888 return AARCH64_BREAK_FAULT
;
892 case AARCH64_INSN_VARIANT_32BIT
:
894 case AARCH64_INSN_VARIANT_64BIT
:
895 insn
|= AARCH64_INSN_SF_BIT
;
898 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
899 return AARCH64_BREAK_FAULT
;
902 if (imm
& ~(SZ_4K
- 1)) {
903 pr_err("%s: invalid immediate encoding %d\n", __func__
, imm
);
904 return AARCH64_BREAK_FAULT
;
907 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
909 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
911 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12
, insn
, imm
);
914 u32
aarch64_insn_gen_bitfield(enum aarch64_insn_register dst
,
915 enum aarch64_insn_register src
,
917 enum aarch64_insn_variant variant
,
918 enum aarch64_insn_bitfield_type type
)
924 case AARCH64_INSN_BITFIELD_MOVE
:
925 insn
= aarch64_insn_get_bfm_value();
927 case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED
:
928 insn
= aarch64_insn_get_ubfm_value();
930 case AARCH64_INSN_BITFIELD_MOVE_SIGNED
:
931 insn
= aarch64_insn_get_sbfm_value();
934 pr_err("%s: unknown bitfield encoding %d\n", __func__
, type
);
935 return AARCH64_BREAK_FAULT
;
939 case AARCH64_INSN_VARIANT_32BIT
:
940 mask
= GENMASK(4, 0);
942 case AARCH64_INSN_VARIANT_64BIT
:
943 insn
|= AARCH64_INSN_SF_BIT
| AARCH64_INSN_N_BIT
;
944 mask
= GENMASK(5, 0);
947 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
948 return AARCH64_BREAK_FAULT
;
952 pr_err("%s: invalid immr encoding %d\n", __func__
, immr
);
953 return AARCH64_BREAK_FAULT
;
956 pr_err("%s: invalid imms encoding %d\n", __func__
, imms
);
957 return AARCH64_BREAK_FAULT
;
960 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
962 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
964 insn
= aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R
, insn
, immr
);
966 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S
, insn
, imms
);
969 u32
aarch64_insn_gen_movewide(enum aarch64_insn_register dst
,
971 enum aarch64_insn_variant variant
,
972 enum aarch64_insn_movewide_type type
)
977 case AARCH64_INSN_MOVEWIDE_ZERO
:
978 insn
= aarch64_insn_get_movz_value();
980 case AARCH64_INSN_MOVEWIDE_KEEP
:
981 insn
= aarch64_insn_get_movk_value();
983 case AARCH64_INSN_MOVEWIDE_INVERSE
:
984 insn
= aarch64_insn_get_movn_value();
987 pr_err("%s: unknown movewide encoding %d\n", __func__
, type
);
988 return AARCH64_BREAK_FAULT
;
991 if (imm
& ~(SZ_64K
- 1)) {
992 pr_err("%s: invalid immediate encoding %d\n", __func__
, imm
);
993 return AARCH64_BREAK_FAULT
;
997 case AARCH64_INSN_VARIANT_32BIT
:
998 if (shift
!= 0 && shift
!= 16) {
999 pr_err("%s: invalid shift encoding %d\n", __func__
,
1001 return AARCH64_BREAK_FAULT
;
1004 case AARCH64_INSN_VARIANT_64BIT
:
1005 insn
|= AARCH64_INSN_SF_BIT
;
1006 if (shift
!= 0 && shift
!= 16 && shift
!= 32 && shift
!= 48) {
1007 pr_err("%s: invalid shift encoding %d\n", __func__
,
1009 return AARCH64_BREAK_FAULT
;
1013 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1014 return AARCH64_BREAK_FAULT
;
1017 insn
|= (shift
>> 4) << 21;
1019 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1021 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16
, insn
, imm
);
1024 u32
aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst
,
1025 enum aarch64_insn_register src
,
1026 enum aarch64_insn_register reg
,
1028 enum aarch64_insn_variant variant
,
1029 enum aarch64_insn_adsb_type type
)
1034 case AARCH64_INSN_ADSB_ADD
:
1035 insn
= aarch64_insn_get_add_value();
1037 case AARCH64_INSN_ADSB_SUB
:
1038 insn
= aarch64_insn_get_sub_value();
1040 case AARCH64_INSN_ADSB_ADD_SETFLAGS
:
1041 insn
= aarch64_insn_get_adds_value();
1043 case AARCH64_INSN_ADSB_SUB_SETFLAGS
:
1044 insn
= aarch64_insn_get_subs_value();
1047 pr_err("%s: unknown add/sub encoding %d\n", __func__
, type
);
1048 return AARCH64_BREAK_FAULT
;
1052 case AARCH64_INSN_VARIANT_32BIT
:
1053 if (shift
& ~(SZ_32
- 1)) {
1054 pr_err("%s: invalid shift encoding %d\n", __func__
,
1056 return AARCH64_BREAK_FAULT
;
1059 case AARCH64_INSN_VARIANT_64BIT
:
1060 insn
|= AARCH64_INSN_SF_BIT
;
1061 if (shift
& ~(SZ_64
- 1)) {
1062 pr_err("%s: invalid shift encoding %d\n", __func__
,
1064 return AARCH64_BREAK_FAULT
;
1068 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1069 return AARCH64_BREAK_FAULT
;
1073 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1075 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
1077 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, reg
);
1079 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6
, insn
, shift
);
1082 u32
aarch64_insn_gen_data1(enum aarch64_insn_register dst
,
1083 enum aarch64_insn_register src
,
1084 enum aarch64_insn_variant variant
,
1085 enum aarch64_insn_data1_type type
)
1090 case AARCH64_INSN_DATA1_REVERSE_16
:
1091 insn
= aarch64_insn_get_rev16_value();
1093 case AARCH64_INSN_DATA1_REVERSE_32
:
1094 insn
= aarch64_insn_get_rev32_value();
1096 case AARCH64_INSN_DATA1_REVERSE_64
:
1097 if (variant
!= AARCH64_INSN_VARIANT_64BIT
) {
1098 pr_err("%s: invalid variant for reverse64 %d\n",
1100 return AARCH64_BREAK_FAULT
;
1102 insn
= aarch64_insn_get_rev64_value();
1105 pr_err("%s: unknown data1 encoding %d\n", __func__
, type
);
1106 return AARCH64_BREAK_FAULT
;
1110 case AARCH64_INSN_VARIANT_32BIT
:
1112 case AARCH64_INSN_VARIANT_64BIT
:
1113 insn
|= AARCH64_INSN_SF_BIT
;
1116 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1117 return AARCH64_BREAK_FAULT
;
1120 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1122 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
1125 u32
aarch64_insn_gen_data2(enum aarch64_insn_register dst
,
1126 enum aarch64_insn_register src
,
1127 enum aarch64_insn_register reg
,
1128 enum aarch64_insn_variant variant
,
1129 enum aarch64_insn_data2_type type
)
1134 case AARCH64_INSN_DATA2_UDIV
:
1135 insn
= aarch64_insn_get_udiv_value();
1137 case AARCH64_INSN_DATA2_SDIV
:
1138 insn
= aarch64_insn_get_sdiv_value();
1140 case AARCH64_INSN_DATA2_LSLV
:
1141 insn
= aarch64_insn_get_lslv_value();
1143 case AARCH64_INSN_DATA2_LSRV
:
1144 insn
= aarch64_insn_get_lsrv_value();
1146 case AARCH64_INSN_DATA2_ASRV
:
1147 insn
= aarch64_insn_get_asrv_value();
1149 case AARCH64_INSN_DATA2_RORV
:
1150 insn
= aarch64_insn_get_rorv_value();
1153 pr_err("%s: unknown data2 encoding %d\n", __func__
, type
);
1154 return AARCH64_BREAK_FAULT
;
1158 case AARCH64_INSN_VARIANT_32BIT
:
1160 case AARCH64_INSN_VARIANT_64BIT
:
1161 insn
|= AARCH64_INSN_SF_BIT
;
1164 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1165 return AARCH64_BREAK_FAULT
;
1168 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1170 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
1172 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, reg
);
1175 u32
aarch64_insn_gen_data3(enum aarch64_insn_register dst
,
1176 enum aarch64_insn_register src
,
1177 enum aarch64_insn_register reg1
,
1178 enum aarch64_insn_register reg2
,
1179 enum aarch64_insn_variant variant
,
1180 enum aarch64_insn_data3_type type
)
1185 case AARCH64_INSN_DATA3_MADD
:
1186 insn
= aarch64_insn_get_madd_value();
1188 case AARCH64_INSN_DATA3_MSUB
:
1189 insn
= aarch64_insn_get_msub_value();
1192 pr_err("%s: unknown data3 encoding %d\n", __func__
, type
);
1193 return AARCH64_BREAK_FAULT
;
1197 case AARCH64_INSN_VARIANT_32BIT
:
1199 case AARCH64_INSN_VARIANT_64BIT
:
1200 insn
|= AARCH64_INSN_SF_BIT
;
1203 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1204 return AARCH64_BREAK_FAULT
;
1207 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1209 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA
, insn
, src
);
1211 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
1214 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
,
1218 u32
aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst
,
1219 enum aarch64_insn_register src
,
1220 enum aarch64_insn_register reg
,
1222 enum aarch64_insn_variant variant
,
1223 enum aarch64_insn_logic_type type
)
1228 case AARCH64_INSN_LOGIC_AND
:
1229 insn
= aarch64_insn_get_and_value();
1231 case AARCH64_INSN_LOGIC_BIC
:
1232 insn
= aarch64_insn_get_bic_value();
1234 case AARCH64_INSN_LOGIC_ORR
:
1235 insn
= aarch64_insn_get_orr_value();
1237 case AARCH64_INSN_LOGIC_ORN
:
1238 insn
= aarch64_insn_get_orn_value();
1240 case AARCH64_INSN_LOGIC_EOR
:
1241 insn
= aarch64_insn_get_eor_value();
1243 case AARCH64_INSN_LOGIC_EON
:
1244 insn
= aarch64_insn_get_eon_value();
1246 case AARCH64_INSN_LOGIC_AND_SETFLAGS
:
1247 insn
= aarch64_insn_get_ands_value();
1249 case AARCH64_INSN_LOGIC_BIC_SETFLAGS
:
1250 insn
= aarch64_insn_get_bics_value();
1253 pr_err("%s: unknown logical encoding %d\n", __func__
, type
);
1254 return AARCH64_BREAK_FAULT
;
1258 case AARCH64_INSN_VARIANT_32BIT
:
1259 if (shift
& ~(SZ_32
- 1)) {
1260 pr_err("%s: invalid shift encoding %d\n", __func__
,
1262 return AARCH64_BREAK_FAULT
;
1265 case AARCH64_INSN_VARIANT_64BIT
:
1266 insn
|= AARCH64_INSN_SF_BIT
;
1267 if (shift
& ~(SZ_64
- 1)) {
1268 pr_err("%s: invalid shift encoding %d\n", __func__
,
1270 return AARCH64_BREAK_FAULT
;
1274 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1275 return AARCH64_BREAK_FAULT
;
1279 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1281 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
1283 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, reg
);
1285 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6
, insn
, shift
);
1289 * Decode the imm field of a branch, and return the byte offset as a
1290 * signed value (so it can be used when computing a new branch
1293 s32
aarch64_get_branch_offset(u32 insn
)
1297 if (aarch64_insn_is_b(insn
) || aarch64_insn_is_bl(insn
)) {
1298 imm
= aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26
, insn
);
1299 return (imm
<< 6) >> 4;
1302 if (aarch64_insn_is_cbz(insn
) || aarch64_insn_is_cbnz(insn
) ||
1303 aarch64_insn_is_bcond(insn
)) {
1304 imm
= aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19
, insn
);
1305 return (imm
<< 13) >> 11;
1308 if (aarch64_insn_is_tbz(insn
) || aarch64_insn_is_tbnz(insn
)) {
1309 imm
= aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14
, insn
);
1310 return (imm
<< 18) >> 16;
1313 /* Unhandled instruction */
1318 * Encode the displacement of a branch in the imm field and return the
1319 * updated instruction.
1321 u32
aarch64_set_branch_offset(u32 insn
, s32 offset
)
1323 if (aarch64_insn_is_b(insn
) || aarch64_insn_is_bl(insn
))
1324 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26
, insn
,
1327 if (aarch64_insn_is_cbz(insn
) || aarch64_insn_is_cbnz(insn
) ||
1328 aarch64_insn_is_bcond(insn
))
1329 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19
, insn
,
1332 if (aarch64_insn_is_tbz(insn
) || aarch64_insn_is_tbnz(insn
))
1333 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14
, insn
,
1336 /* Unhandled instruction */
1340 s32
aarch64_insn_adrp_get_offset(u32 insn
)
1342 BUG_ON(!aarch64_insn_is_adrp(insn
));
1343 return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR
, insn
) << 12;
1346 u32
aarch64_insn_adrp_set_offset(u32 insn
, s32 offset
)
1348 BUG_ON(!aarch64_insn_is_adrp(insn
));
1349 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR
, insn
,
1354 * Extract the Op/CR data from a msr/mrs instruction.
1356 u32
aarch64_insn_extract_system_reg(u32 insn
)
1358 return (insn
& 0x1FFFE0) >> 5;
1361 bool aarch32_insn_is_wide(u32 insn
)
1363 return insn
>= 0xe800;
1367 * Macros/defines for extracting register numbers from instruction.
1369 u32
aarch32_insn_extract_reg_num(u32 insn
, int offset
)
1371 return (insn
& (0xf << offset
)) >> offset
;
1374 #define OPC2_MASK 0x7
1375 #define OPC2_OFFSET 5
1376 u32
aarch32_insn_mcr_extract_opc2(u32 insn
)
1378 return (insn
& (OPC2_MASK
<< OPC2_OFFSET
)) >> OPC2_OFFSET
;
1381 #define CRM_MASK 0xf
1382 u32
aarch32_insn_mcr_extract_crm(u32 insn
)
1384 return insn
& CRM_MASK
;
1387 static bool __kprobes
__check_eq(unsigned long pstate
)
1389 return (pstate
& PSR_Z_BIT
) != 0;
1392 static bool __kprobes
__check_ne(unsigned long pstate
)
1394 return (pstate
& PSR_Z_BIT
) == 0;
1397 static bool __kprobes
__check_cs(unsigned long pstate
)
1399 return (pstate
& PSR_C_BIT
) != 0;
1402 static bool __kprobes
__check_cc(unsigned long pstate
)
1404 return (pstate
& PSR_C_BIT
) == 0;
1407 static bool __kprobes
__check_mi(unsigned long pstate
)
1409 return (pstate
& PSR_N_BIT
) != 0;
1412 static bool __kprobes
__check_pl(unsigned long pstate
)
1414 return (pstate
& PSR_N_BIT
) == 0;
1417 static bool __kprobes
__check_vs(unsigned long pstate
)
1419 return (pstate
& PSR_V_BIT
) != 0;
1422 static bool __kprobes
__check_vc(unsigned long pstate
)
1424 return (pstate
& PSR_V_BIT
) == 0;
1427 static bool __kprobes
__check_hi(unsigned long pstate
)
1429 pstate
&= ~(pstate
>> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1430 return (pstate
& PSR_C_BIT
) != 0;
1433 static bool __kprobes
__check_ls(unsigned long pstate
)
1435 pstate
&= ~(pstate
>> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1436 return (pstate
& PSR_C_BIT
) == 0;
1439 static bool __kprobes
__check_ge(unsigned long pstate
)
1441 pstate
^= (pstate
<< 3); /* PSR_N_BIT ^= PSR_V_BIT */
1442 return (pstate
& PSR_N_BIT
) == 0;
1445 static bool __kprobes
__check_lt(unsigned long pstate
)
1447 pstate
^= (pstate
<< 3); /* PSR_N_BIT ^= PSR_V_BIT */
1448 return (pstate
& PSR_N_BIT
) != 0;
1451 static bool __kprobes
__check_gt(unsigned long pstate
)
1453 /*PSR_N_BIT ^= PSR_V_BIT */
1454 unsigned long temp
= pstate
^ (pstate
<< 3);
1456 temp
|= (pstate
<< 1); /*PSR_N_BIT |= PSR_Z_BIT */
1457 return (temp
& PSR_N_BIT
) == 0;
1460 static bool __kprobes
__check_le(unsigned long pstate
)
1462 /*PSR_N_BIT ^= PSR_V_BIT */
1463 unsigned long temp
= pstate
^ (pstate
<< 3);
1465 temp
|= (pstate
<< 1); /*PSR_N_BIT |= PSR_Z_BIT */
1466 return (temp
& PSR_N_BIT
) != 0;
1469 static bool __kprobes
__check_al(unsigned long pstate
)
1475 * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1476 * it behaves identically to 0b1110 ("al").
1478 pstate_check_t
* const aarch32_opcode_cond_checks
[16] = {
1479 __check_eq
, __check_ne
, __check_cs
, __check_cc
,
1480 __check_mi
, __check_pl
, __check_vs
, __check_vc
,
1481 __check_hi
, __check_ls
, __check_ge
, __check_lt
,
1482 __check_gt
, __check_le
, __check_al
, __check_al