2 * Copyright (C) 2013 Huawei Ltd.
3 * Author: Jiang Liu <liuj97@gmail.com>
5 * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
35 #define AARCH64_INSN_SF_BIT BIT(31)
36 #define AARCH64_INSN_N_BIT BIT(22)
38 static int aarch64_insn_encoding_class
[] = {
39 AARCH64_INSN_CLS_UNKNOWN
,
40 AARCH64_INSN_CLS_UNKNOWN
,
41 AARCH64_INSN_CLS_UNKNOWN
,
42 AARCH64_INSN_CLS_UNKNOWN
,
43 AARCH64_INSN_CLS_LDST
,
44 AARCH64_INSN_CLS_DP_REG
,
45 AARCH64_INSN_CLS_LDST
,
46 AARCH64_INSN_CLS_DP_FPSIMD
,
47 AARCH64_INSN_CLS_DP_IMM
,
48 AARCH64_INSN_CLS_DP_IMM
,
49 AARCH64_INSN_CLS_BR_SYS
,
50 AARCH64_INSN_CLS_BR_SYS
,
51 AARCH64_INSN_CLS_LDST
,
52 AARCH64_INSN_CLS_DP_REG
,
53 AARCH64_INSN_CLS_LDST
,
54 AARCH64_INSN_CLS_DP_FPSIMD
,
57 enum aarch64_insn_encoding_class __kprobes
aarch64_get_insn_class(u32 insn
)
59 return aarch64_insn_encoding_class
[(insn
>> 25) & 0xf];
62 /* NOP is an alias of HINT */
63 bool __kprobes
aarch64_insn_is_nop(u32 insn
)
65 if (!aarch64_insn_is_hint(insn
))
68 switch (insn
& 0xFE0) {
69 case AARCH64_INSN_HINT_YIELD
:
70 case AARCH64_INSN_HINT_WFE
:
71 case AARCH64_INSN_HINT_WFI
:
72 case AARCH64_INSN_HINT_SEV
:
73 case AARCH64_INSN_HINT_SEVL
:
80 bool aarch64_insn_is_branch_imm(u32 insn
)
82 return (aarch64_insn_is_b(insn
) || aarch64_insn_is_bl(insn
) ||
83 aarch64_insn_is_tbz(insn
) || aarch64_insn_is_tbnz(insn
) ||
84 aarch64_insn_is_cbz(insn
) || aarch64_insn_is_cbnz(insn
) ||
85 aarch64_insn_is_bcond(insn
));
88 static DEFINE_SPINLOCK(patch_lock
);
90 static void __kprobes
*patch_map(void *addr
, int fixmap
)
92 unsigned long uintaddr
= (uintptr_t) addr
;
93 bool module
= !core_kernel_text(uintaddr
);
96 if (module
&& IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX
))
97 page
= vmalloc_to_page(addr
);
98 else if (!module
&& IS_ENABLED(CONFIG_DEBUG_RODATA
))
99 page
= virt_to_page(addr
);
104 return (void *)set_fixmap_offset(fixmap
, page_to_phys(page
) +
105 (uintaddr
& ~PAGE_MASK
));
108 static void __kprobes
patch_unmap(int fixmap
)
110 clear_fixmap(fixmap
);
113 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
116 int __kprobes
aarch64_insn_read(void *addr
, u32
*insnp
)
121 ret
= probe_kernel_read(&val
, addr
, AARCH64_INSN_SIZE
);
123 *insnp
= le32_to_cpu(val
);
128 static int __kprobes
__aarch64_insn_write(void *addr
, u32 insn
)
131 unsigned long flags
= 0;
134 spin_lock_irqsave(&patch_lock
, flags
);
135 waddr
= patch_map(addr
, FIX_TEXT_POKE0
);
137 ret
= probe_kernel_write(waddr
, &insn
, AARCH64_INSN_SIZE
);
139 patch_unmap(FIX_TEXT_POKE0
);
140 spin_unlock_irqrestore(&patch_lock
, flags
);
145 int __kprobes
aarch64_insn_write(void *addr
, u32 insn
)
147 insn
= cpu_to_le32(insn
);
148 return __aarch64_insn_write(addr
, insn
);
151 static bool __kprobes
__aarch64_insn_hotpatch_safe(u32 insn
)
153 if (aarch64_get_insn_class(insn
) != AARCH64_INSN_CLS_BR_SYS
)
156 return aarch64_insn_is_b(insn
) ||
157 aarch64_insn_is_bl(insn
) ||
158 aarch64_insn_is_svc(insn
) ||
159 aarch64_insn_is_hvc(insn
) ||
160 aarch64_insn_is_smc(insn
) ||
161 aarch64_insn_is_brk(insn
) ||
162 aarch64_insn_is_nop(insn
);
166 * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
167 * Section B2.6.5 "Concurrent modification and execution of instructions":
168 * Concurrent modification and execution of instructions can lead to the
169 * resulting instruction performing any behavior that can be achieved by
170 * executing any sequence of instructions that can be executed from the
171 * same Exception level, except where the instruction before modification
172 * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
173 * or SMC instruction.
175 bool __kprobes
aarch64_insn_hotpatch_safe(u32 old_insn
, u32 new_insn
)
177 return __aarch64_insn_hotpatch_safe(old_insn
) &&
178 __aarch64_insn_hotpatch_safe(new_insn
);
181 int __kprobes
aarch64_insn_patch_text_nosync(void *addr
, u32 insn
)
186 /* A64 instructions must be word aligned */
187 if ((uintptr_t)tp
& 0x3)
190 ret
= aarch64_insn_write(tp
, insn
);
192 flush_icache_range((uintptr_t)tp
,
193 (uintptr_t)tp
+ AARCH64_INSN_SIZE
);
198 struct aarch64_insn_patch
{
205 static int __kprobes
aarch64_insn_patch_text_cb(void *arg
)
208 struct aarch64_insn_patch
*pp
= arg
;
210 /* The first CPU becomes master */
211 if (atomic_inc_return(&pp
->cpu_count
) == 1) {
212 for (i
= 0; ret
== 0 && i
< pp
->insn_cnt
; i
++)
213 ret
= aarch64_insn_patch_text_nosync(pp
->text_addrs
[i
],
216 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
217 * which ends with "dsb; isb" pair guaranteeing global
220 /* Notify other processors with an additional increment. */
221 atomic_inc(&pp
->cpu_count
);
223 while (atomic_read(&pp
->cpu_count
) <= num_online_cpus())
231 int __kprobes
aarch64_insn_patch_text_sync(void *addrs
[], u32 insns
[], int cnt
)
233 struct aarch64_insn_patch patch
= {
237 .cpu_count
= ATOMIC_INIT(0),
243 return stop_machine(aarch64_insn_patch_text_cb
, &patch
,
247 int __kprobes
aarch64_insn_patch_text(void *addrs
[], u32 insns
[], int cnt
)
252 /* Unsafe to patch multiple instructions without synchronizaiton */
254 ret
= aarch64_insn_read(addrs
[0], &insn
);
258 if (aarch64_insn_hotpatch_safe(insn
, insns
[0])) {
260 * ARMv8 architecture doesn't guarantee all CPUs see
261 * the new instruction after returning from function
262 * aarch64_insn_patch_text_nosync(). So send IPIs to
263 * all other CPUs to achieve instruction
266 ret
= aarch64_insn_patch_text_nosync(addrs
[0], insns
[0]);
267 kick_all_cpus_sync();
272 return aarch64_insn_patch_text_sync(addrs
, insns
, cnt
);
275 static int __kprobes
aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type
,
276 u32
*maskp
, int *shiftp
)
282 case AARCH64_INSN_IMM_26
:
286 case AARCH64_INSN_IMM_19
:
290 case AARCH64_INSN_IMM_16
:
294 case AARCH64_INSN_IMM_14
:
298 case AARCH64_INSN_IMM_12
:
302 case AARCH64_INSN_IMM_9
:
306 case AARCH64_INSN_IMM_7
:
310 case AARCH64_INSN_IMM_6
:
311 case AARCH64_INSN_IMM_S
:
315 case AARCH64_INSN_IMM_R
:
329 #define ADR_IMM_HILOSPLIT 2
330 #define ADR_IMM_SIZE SZ_2M
331 #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
332 #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
333 #define ADR_IMM_LOSHIFT 29
334 #define ADR_IMM_HISHIFT 5
336 u64
aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type
, u32 insn
)
338 u32 immlo
, immhi
, mask
;
342 case AARCH64_INSN_IMM_ADR
:
344 immlo
= (insn
>> ADR_IMM_LOSHIFT
) & ADR_IMM_LOMASK
;
345 immhi
= (insn
>> ADR_IMM_HISHIFT
) & ADR_IMM_HIMASK
;
346 insn
= (immhi
<< ADR_IMM_HILOSPLIT
) | immlo
;
347 mask
= ADR_IMM_SIZE
- 1;
350 if (aarch64_get_imm_shift_mask(type
, &mask
, &shift
) < 0) {
351 pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
357 return (insn
>> shift
) & mask
;
360 u32 __kprobes
aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type
,
363 u32 immlo
, immhi
, mask
;
367 case AARCH64_INSN_IMM_ADR
:
369 immlo
= (imm
& ADR_IMM_LOMASK
) << ADR_IMM_LOSHIFT
;
370 imm
>>= ADR_IMM_HILOSPLIT
;
371 immhi
= (imm
& ADR_IMM_HIMASK
) << ADR_IMM_HISHIFT
;
373 mask
= ((ADR_IMM_LOMASK
<< ADR_IMM_LOSHIFT
) |
374 (ADR_IMM_HIMASK
<< ADR_IMM_HISHIFT
));
377 if (aarch64_get_imm_shift_mask(type
, &mask
, &shift
) < 0) {
378 pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
384 /* Update the immediate field. */
385 insn
&= ~(mask
<< shift
);
386 insn
|= (imm
& mask
) << shift
;
391 static u32
aarch64_insn_encode_register(enum aarch64_insn_register_type type
,
393 enum aarch64_insn_register reg
)
397 if (reg
< AARCH64_INSN_REG_0
|| reg
> AARCH64_INSN_REG_SP
) {
398 pr_err("%s: unknown register encoding %d\n", __func__
, reg
);
403 case AARCH64_INSN_REGTYPE_RT
:
404 case AARCH64_INSN_REGTYPE_RD
:
407 case AARCH64_INSN_REGTYPE_RN
:
410 case AARCH64_INSN_REGTYPE_RT2
:
411 case AARCH64_INSN_REGTYPE_RA
:
414 case AARCH64_INSN_REGTYPE_RM
:
418 pr_err("%s: unknown register type encoding %d\n", __func__
,
423 insn
&= ~(GENMASK(4, 0) << shift
);
424 insn
|= reg
<< shift
;
429 static u32
aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type
,
435 case AARCH64_INSN_SIZE_8
:
438 case AARCH64_INSN_SIZE_16
:
441 case AARCH64_INSN_SIZE_32
:
444 case AARCH64_INSN_SIZE_64
:
448 pr_err("%s: unknown size encoding %d\n", __func__
, type
);
452 insn
&= ~GENMASK(31, 30);
458 static inline long branch_imm_common(unsigned long pc
, unsigned long addr
,
464 * PC: A 64-bit Program Counter holding the address of the current
465 * instruction. A64 instructions must be word-aligned.
467 BUG_ON((pc
& 0x3) || (addr
& 0x3));
469 offset
= ((long)addr
- (long)pc
);
470 BUG_ON(offset
< -range
|| offset
>= range
);
475 u32 __kprobes
aarch64_insn_gen_branch_imm(unsigned long pc
, unsigned long addr
,
476 enum aarch64_insn_branch_type type
)
482 * B/BL support [-128M, 128M) offset
483 * ARM64 virtual address arrangement guarantees all kernel and module
484 * texts are within +/-128M.
486 offset
= branch_imm_common(pc
, addr
, SZ_128M
);
489 case AARCH64_INSN_BRANCH_LINK
:
490 insn
= aarch64_insn_get_bl_value();
492 case AARCH64_INSN_BRANCH_NOLINK
:
493 insn
= aarch64_insn_get_b_value();
497 return AARCH64_BREAK_FAULT
;
500 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26
, insn
,
504 u32
aarch64_insn_gen_comp_branch_imm(unsigned long pc
, unsigned long addr
,
505 enum aarch64_insn_register reg
,
506 enum aarch64_insn_variant variant
,
507 enum aarch64_insn_branch_type type
)
512 offset
= branch_imm_common(pc
, addr
, SZ_1M
);
515 case AARCH64_INSN_BRANCH_COMP_ZERO
:
516 insn
= aarch64_insn_get_cbz_value();
518 case AARCH64_INSN_BRANCH_COMP_NONZERO
:
519 insn
= aarch64_insn_get_cbnz_value();
523 return AARCH64_BREAK_FAULT
;
527 case AARCH64_INSN_VARIANT_32BIT
:
529 case AARCH64_INSN_VARIANT_64BIT
:
530 insn
|= AARCH64_INSN_SF_BIT
;
534 return AARCH64_BREAK_FAULT
;
537 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
, reg
);
539 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19
, insn
,
543 u32
aarch64_insn_gen_cond_branch_imm(unsigned long pc
, unsigned long addr
,
544 enum aarch64_insn_condition cond
)
549 offset
= branch_imm_common(pc
, addr
, SZ_1M
);
551 insn
= aarch64_insn_get_bcond_value();
553 BUG_ON(cond
< AARCH64_INSN_COND_EQ
|| cond
> AARCH64_INSN_COND_AL
);
556 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19
, insn
,
560 u32 __kprobes
aarch64_insn_gen_hint(enum aarch64_insn_hint_op op
)
562 return aarch64_insn_get_hint_value() | op
;
565 u32 __kprobes
aarch64_insn_gen_nop(void)
567 return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP
);
570 u32
aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg
,
571 enum aarch64_insn_branch_type type
)
576 case AARCH64_INSN_BRANCH_NOLINK
:
577 insn
= aarch64_insn_get_br_value();
579 case AARCH64_INSN_BRANCH_LINK
:
580 insn
= aarch64_insn_get_blr_value();
582 case AARCH64_INSN_BRANCH_RETURN
:
583 insn
= aarch64_insn_get_ret_value();
587 return AARCH64_BREAK_FAULT
;
590 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, reg
);
593 u32
aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg
,
594 enum aarch64_insn_register base
,
595 enum aarch64_insn_register offset
,
596 enum aarch64_insn_size_type size
,
597 enum aarch64_insn_ldst_type type
)
602 case AARCH64_INSN_LDST_LOAD_REG_OFFSET
:
603 insn
= aarch64_insn_get_ldr_reg_value();
605 case AARCH64_INSN_LDST_STORE_REG_OFFSET
:
606 insn
= aarch64_insn_get_str_reg_value();
610 return AARCH64_BREAK_FAULT
;
613 insn
= aarch64_insn_encode_ldst_size(size
, insn
);
615 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
, reg
);
617 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
620 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
,
624 u32
aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1
,
625 enum aarch64_insn_register reg2
,
626 enum aarch64_insn_register base
,
628 enum aarch64_insn_variant variant
,
629 enum aarch64_insn_ldst_type type
)
635 case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX
:
636 insn
= aarch64_insn_get_ldp_pre_value();
638 case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX
:
639 insn
= aarch64_insn_get_stp_pre_value();
641 case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX
:
642 insn
= aarch64_insn_get_ldp_post_value();
644 case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX
:
645 insn
= aarch64_insn_get_stp_post_value();
649 return AARCH64_BREAK_FAULT
;
653 case AARCH64_INSN_VARIANT_32BIT
:
654 /* offset must be multiples of 4 in the range [-256, 252] */
655 BUG_ON(offset
& 0x3);
656 BUG_ON(offset
< -256 || offset
> 252);
659 case AARCH64_INSN_VARIANT_64BIT
:
660 /* offset must be multiples of 8 in the range [-512, 504] */
661 BUG_ON(offset
& 0x7);
662 BUG_ON(offset
< -512 || offset
> 504);
664 insn
|= AARCH64_INSN_SF_BIT
;
668 return AARCH64_BREAK_FAULT
;
671 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
,
674 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2
, insn
,
677 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
680 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7
, insn
,
684 u32
aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst
,
685 enum aarch64_insn_register src
,
686 int imm
, enum aarch64_insn_variant variant
,
687 enum aarch64_insn_adsb_type type
)
692 case AARCH64_INSN_ADSB_ADD
:
693 insn
= aarch64_insn_get_add_imm_value();
695 case AARCH64_INSN_ADSB_SUB
:
696 insn
= aarch64_insn_get_sub_imm_value();
698 case AARCH64_INSN_ADSB_ADD_SETFLAGS
:
699 insn
= aarch64_insn_get_adds_imm_value();
701 case AARCH64_INSN_ADSB_SUB_SETFLAGS
:
702 insn
= aarch64_insn_get_subs_imm_value();
706 return AARCH64_BREAK_FAULT
;
710 case AARCH64_INSN_VARIANT_32BIT
:
712 case AARCH64_INSN_VARIANT_64BIT
:
713 insn
|= AARCH64_INSN_SF_BIT
;
717 return AARCH64_BREAK_FAULT
;
720 BUG_ON(imm
& ~(SZ_4K
- 1));
722 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
724 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
726 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12
, insn
, imm
);
729 u32
aarch64_insn_gen_bitfield(enum aarch64_insn_register dst
,
730 enum aarch64_insn_register src
,
732 enum aarch64_insn_variant variant
,
733 enum aarch64_insn_bitfield_type type
)
739 case AARCH64_INSN_BITFIELD_MOVE
:
740 insn
= aarch64_insn_get_bfm_value();
742 case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED
:
743 insn
= aarch64_insn_get_ubfm_value();
745 case AARCH64_INSN_BITFIELD_MOVE_SIGNED
:
746 insn
= aarch64_insn_get_sbfm_value();
750 return AARCH64_BREAK_FAULT
;
754 case AARCH64_INSN_VARIANT_32BIT
:
755 mask
= GENMASK(4, 0);
757 case AARCH64_INSN_VARIANT_64BIT
:
758 insn
|= AARCH64_INSN_SF_BIT
| AARCH64_INSN_N_BIT
;
759 mask
= GENMASK(5, 0);
763 return AARCH64_BREAK_FAULT
;
766 BUG_ON(immr
& ~mask
);
767 BUG_ON(imms
& ~mask
);
769 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
771 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
773 insn
= aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R
, insn
, immr
);
775 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S
, insn
, imms
);
778 u32
aarch64_insn_gen_movewide(enum aarch64_insn_register dst
,
780 enum aarch64_insn_variant variant
,
781 enum aarch64_insn_movewide_type type
)
786 case AARCH64_INSN_MOVEWIDE_ZERO
:
787 insn
= aarch64_insn_get_movz_value();
789 case AARCH64_INSN_MOVEWIDE_KEEP
:
790 insn
= aarch64_insn_get_movk_value();
792 case AARCH64_INSN_MOVEWIDE_INVERSE
:
793 insn
= aarch64_insn_get_movn_value();
797 return AARCH64_BREAK_FAULT
;
800 BUG_ON(imm
& ~(SZ_64K
- 1));
803 case AARCH64_INSN_VARIANT_32BIT
:
804 BUG_ON(shift
!= 0 && shift
!= 16);
806 case AARCH64_INSN_VARIANT_64BIT
:
807 insn
|= AARCH64_INSN_SF_BIT
;
808 BUG_ON(shift
!= 0 && shift
!= 16 && shift
!= 32 &&
813 return AARCH64_BREAK_FAULT
;
816 insn
|= (shift
>> 4) << 21;
818 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
820 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16
, insn
, imm
);
823 u32
aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst
,
824 enum aarch64_insn_register src
,
825 enum aarch64_insn_register reg
,
827 enum aarch64_insn_variant variant
,
828 enum aarch64_insn_adsb_type type
)
833 case AARCH64_INSN_ADSB_ADD
:
834 insn
= aarch64_insn_get_add_value();
836 case AARCH64_INSN_ADSB_SUB
:
837 insn
= aarch64_insn_get_sub_value();
839 case AARCH64_INSN_ADSB_ADD_SETFLAGS
:
840 insn
= aarch64_insn_get_adds_value();
842 case AARCH64_INSN_ADSB_SUB_SETFLAGS
:
843 insn
= aarch64_insn_get_subs_value();
847 return AARCH64_BREAK_FAULT
;
851 case AARCH64_INSN_VARIANT_32BIT
:
852 BUG_ON(shift
& ~(SZ_32
- 1));
854 case AARCH64_INSN_VARIANT_64BIT
:
855 insn
|= AARCH64_INSN_SF_BIT
;
856 BUG_ON(shift
& ~(SZ_64
- 1));
860 return AARCH64_BREAK_FAULT
;
864 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
866 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
868 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, reg
);
870 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6
, insn
, shift
);
873 u32
aarch64_insn_gen_data1(enum aarch64_insn_register dst
,
874 enum aarch64_insn_register src
,
875 enum aarch64_insn_variant variant
,
876 enum aarch64_insn_data1_type type
)
881 case AARCH64_INSN_DATA1_REVERSE_16
:
882 insn
= aarch64_insn_get_rev16_value();
884 case AARCH64_INSN_DATA1_REVERSE_32
:
885 insn
= aarch64_insn_get_rev32_value();
887 case AARCH64_INSN_DATA1_REVERSE_64
:
888 BUG_ON(variant
!= AARCH64_INSN_VARIANT_64BIT
);
889 insn
= aarch64_insn_get_rev64_value();
893 return AARCH64_BREAK_FAULT
;
897 case AARCH64_INSN_VARIANT_32BIT
:
899 case AARCH64_INSN_VARIANT_64BIT
:
900 insn
|= AARCH64_INSN_SF_BIT
;
904 return AARCH64_BREAK_FAULT
;
907 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
909 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
912 u32
aarch64_insn_gen_data2(enum aarch64_insn_register dst
,
913 enum aarch64_insn_register src
,
914 enum aarch64_insn_register reg
,
915 enum aarch64_insn_variant variant
,
916 enum aarch64_insn_data2_type type
)
921 case AARCH64_INSN_DATA2_UDIV
:
922 insn
= aarch64_insn_get_udiv_value();
924 case AARCH64_INSN_DATA2_SDIV
:
925 insn
= aarch64_insn_get_sdiv_value();
927 case AARCH64_INSN_DATA2_LSLV
:
928 insn
= aarch64_insn_get_lslv_value();
930 case AARCH64_INSN_DATA2_LSRV
:
931 insn
= aarch64_insn_get_lsrv_value();
933 case AARCH64_INSN_DATA2_ASRV
:
934 insn
= aarch64_insn_get_asrv_value();
936 case AARCH64_INSN_DATA2_RORV
:
937 insn
= aarch64_insn_get_rorv_value();
941 return AARCH64_BREAK_FAULT
;
945 case AARCH64_INSN_VARIANT_32BIT
:
947 case AARCH64_INSN_VARIANT_64BIT
:
948 insn
|= AARCH64_INSN_SF_BIT
;
952 return AARCH64_BREAK_FAULT
;
955 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
957 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
959 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, reg
);
962 u32
aarch64_insn_gen_data3(enum aarch64_insn_register dst
,
963 enum aarch64_insn_register src
,
964 enum aarch64_insn_register reg1
,
965 enum aarch64_insn_register reg2
,
966 enum aarch64_insn_variant variant
,
967 enum aarch64_insn_data3_type type
)
972 case AARCH64_INSN_DATA3_MADD
:
973 insn
= aarch64_insn_get_madd_value();
975 case AARCH64_INSN_DATA3_MSUB
:
976 insn
= aarch64_insn_get_msub_value();
980 return AARCH64_BREAK_FAULT
;
984 case AARCH64_INSN_VARIANT_32BIT
:
986 case AARCH64_INSN_VARIANT_64BIT
:
987 insn
|= AARCH64_INSN_SF_BIT
;
991 return AARCH64_BREAK_FAULT
;
994 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
996 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA
, insn
, src
);
998 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
1001 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
,
1005 u32
aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst
,
1006 enum aarch64_insn_register src
,
1007 enum aarch64_insn_register reg
,
1009 enum aarch64_insn_variant variant
,
1010 enum aarch64_insn_logic_type type
)
1015 case AARCH64_INSN_LOGIC_AND
:
1016 insn
= aarch64_insn_get_and_value();
1018 case AARCH64_INSN_LOGIC_BIC
:
1019 insn
= aarch64_insn_get_bic_value();
1021 case AARCH64_INSN_LOGIC_ORR
:
1022 insn
= aarch64_insn_get_orr_value();
1024 case AARCH64_INSN_LOGIC_ORN
:
1025 insn
= aarch64_insn_get_orn_value();
1027 case AARCH64_INSN_LOGIC_EOR
:
1028 insn
= aarch64_insn_get_eor_value();
1030 case AARCH64_INSN_LOGIC_EON
:
1031 insn
= aarch64_insn_get_eon_value();
1033 case AARCH64_INSN_LOGIC_AND_SETFLAGS
:
1034 insn
= aarch64_insn_get_ands_value();
1036 case AARCH64_INSN_LOGIC_BIC_SETFLAGS
:
1037 insn
= aarch64_insn_get_bics_value();
1041 return AARCH64_BREAK_FAULT
;
1045 case AARCH64_INSN_VARIANT_32BIT
:
1046 BUG_ON(shift
& ~(SZ_32
- 1));
1048 case AARCH64_INSN_VARIANT_64BIT
:
1049 insn
|= AARCH64_INSN_SF_BIT
;
1050 BUG_ON(shift
& ~(SZ_64
- 1));
1054 return AARCH64_BREAK_FAULT
;
1058 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1060 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
1062 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, reg
);
1064 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6
, insn
, shift
);
1068 * Decode the imm field of a branch, and return the byte offset as a
1069 * signed value (so it can be used when computing a new branch
1072 s32
aarch64_get_branch_offset(u32 insn
)
1076 if (aarch64_insn_is_b(insn
) || aarch64_insn_is_bl(insn
)) {
1077 imm
= aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26
, insn
);
1078 return (imm
<< 6) >> 4;
1081 if (aarch64_insn_is_cbz(insn
) || aarch64_insn_is_cbnz(insn
) ||
1082 aarch64_insn_is_bcond(insn
)) {
1083 imm
= aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19
, insn
);
1084 return (imm
<< 13) >> 11;
1087 if (aarch64_insn_is_tbz(insn
) || aarch64_insn_is_tbnz(insn
)) {
1088 imm
= aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14
, insn
);
1089 return (imm
<< 18) >> 16;
1092 /* Unhandled instruction */
1097 * Encode the displacement of a branch in the imm field and return the
1098 * updated instruction.
1100 u32
aarch64_set_branch_offset(u32 insn
, s32 offset
)
1102 if (aarch64_insn_is_b(insn
) || aarch64_insn_is_bl(insn
))
1103 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26
, insn
,
1106 if (aarch64_insn_is_cbz(insn
) || aarch64_insn_is_cbnz(insn
) ||
1107 aarch64_insn_is_bcond(insn
))
1108 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19
, insn
,
1111 if (aarch64_insn_is_tbz(insn
) || aarch64_insn_is_tbnz(insn
))
1112 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14
, insn
,
1115 /* Unhandled instruction */
1119 bool aarch32_insn_is_wide(u32 insn
)
1121 return insn
>= 0xe800;
1125 * Macros/defines for extracting register numbers from instruction.
1127 u32
aarch32_insn_extract_reg_num(u32 insn
, int offset
)
1129 return (insn
& (0xf << offset
)) >> offset
;
1132 #define OPC2_MASK 0x7
1133 #define OPC2_OFFSET 5
1134 u32
aarch32_insn_mcr_extract_opc2(u32 insn
)
1136 return (insn
& (OPC2_MASK
<< OPC2_OFFSET
)) >> OPC2_OFFSET
;
1139 #define CRM_MASK 0xf
1140 u32
aarch32_insn_mcr_extract_crm(u32 insn
)
1142 return insn
& CRM_MASK
;