1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Huawei Ltd.
4 * Author: Jiang Liu <liuj97@gmail.com>
6 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
8 #include <linux/bitops.h>
10 #include <linux/compiler.h>
11 #include <linux/kernel.h>
13 #include <linux/smp.h>
14 #include <linux/spinlock.h>
15 #include <linux/stop_machine.h>
16 #include <linux/types.h>
17 #include <linux/uaccess.h>
19 #include <asm/cacheflush.h>
20 #include <asm/debug-monitors.h>
21 #include <asm/fixmap.h>
23 #include <asm/kprobes.h>
24 #include <asm/sections.h>
26 #define AARCH64_INSN_SF_BIT BIT(31)
27 #define AARCH64_INSN_N_BIT BIT(22)
28 #define AARCH64_INSN_LSL_12 BIT(22)
30 static const int aarch64_insn_encoding_class
[] = {
31 AARCH64_INSN_CLS_UNKNOWN
,
32 AARCH64_INSN_CLS_UNKNOWN
,
33 AARCH64_INSN_CLS_UNKNOWN
,
34 AARCH64_INSN_CLS_UNKNOWN
,
35 AARCH64_INSN_CLS_LDST
,
36 AARCH64_INSN_CLS_DP_REG
,
37 AARCH64_INSN_CLS_LDST
,
38 AARCH64_INSN_CLS_DP_FPSIMD
,
39 AARCH64_INSN_CLS_DP_IMM
,
40 AARCH64_INSN_CLS_DP_IMM
,
41 AARCH64_INSN_CLS_BR_SYS
,
42 AARCH64_INSN_CLS_BR_SYS
,
43 AARCH64_INSN_CLS_LDST
,
44 AARCH64_INSN_CLS_DP_REG
,
45 AARCH64_INSN_CLS_LDST
,
46 AARCH64_INSN_CLS_DP_FPSIMD
,
49 enum aarch64_insn_encoding_class __kprobes
aarch64_get_insn_class(u32 insn
)
51 return aarch64_insn_encoding_class
[(insn
>> 25) & 0xf];
54 bool __kprobes
aarch64_insn_is_steppable_hint(u32 insn
)
56 if (!aarch64_insn_is_hint(insn
))
59 switch (insn
& 0xFE0) {
60 case AARCH64_INSN_HINT_XPACLRI
:
61 case AARCH64_INSN_HINT_PACIA_1716
:
62 case AARCH64_INSN_HINT_PACIB_1716
:
63 case AARCH64_INSN_HINT_PACIAZ
:
64 case AARCH64_INSN_HINT_PACIASP
:
65 case AARCH64_INSN_HINT_PACIBZ
:
66 case AARCH64_INSN_HINT_PACIBSP
:
67 case AARCH64_INSN_HINT_BTI
:
68 case AARCH64_INSN_HINT_BTIC
:
69 case AARCH64_INSN_HINT_BTIJ
:
70 case AARCH64_INSN_HINT_BTIJC
:
71 case AARCH64_INSN_HINT_NOP
:
78 bool aarch64_insn_is_branch_imm(u32 insn
)
80 return (aarch64_insn_is_b(insn
) || aarch64_insn_is_bl(insn
) ||
81 aarch64_insn_is_tbz(insn
) || aarch64_insn_is_tbnz(insn
) ||
82 aarch64_insn_is_cbz(insn
) || aarch64_insn_is_cbnz(insn
) ||
83 aarch64_insn_is_bcond(insn
));
86 static DEFINE_RAW_SPINLOCK(patch_lock
);
88 static bool is_exit_text(unsigned long addr
)
90 /* discarded with init text/data */
91 return system_state
< SYSTEM_RUNNING
&&
92 addr
>= (unsigned long)__exittext_begin
&&
93 addr
< (unsigned long)__exittext_end
;
96 static bool is_image_text(unsigned long addr
)
98 return core_kernel_text(addr
) || is_exit_text(addr
);
101 static void __kprobes
*patch_map(void *addr
, int fixmap
)
103 unsigned long uintaddr
= (uintptr_t) addr
;
104 bool image
= is_image_text(uintaddr
);
108 page
= phys_to_page(__pa_symbol(addr
));
109 else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX
))
110 page
= vmalloc_to_page(addr
);
115 return (void *)set_fixmap_offset(fixmap
, page_to_phys(page
) +
116 (uintaddr
& ~PAGE_MASK
));
119 static void __kprobes
patch_unmap(int fixmap
)
121 clear_fixmap(fixmap
);
124 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
127 int __kprobes
aarch64_insn_read(void *addr
, u32
*insnp
)
132 ret
= copy_from_kernel_nofault(&val
, addr
, AARCH64_INSN_SIZE
);
134 *insnp
= le32_to_cpu(val
);
139 static int __kprobes
__aarch64_insn_write(void *addr
, __le32 insn
)
142 unsigned long flags
= 0;
145 raw_spin_lock_irqsave(&patch_lock
, flags
);
146 waddr
= patch_map(addr
, FIX_TEXT_POKE0
);
148 ret
= copy_to_kernel_nofault(waddr
, &insn
, AARCH64_INSN_SIZE
);
150 patch_unmap(FIX_TEXT_POKE0
);
151 raw_spin_unlock_irqrestore(&patch_lock
, flags
);
156 int __kprobes
aarch64_insn_write(void *addr
, u32 insn
)
158 return __aarch64_insn_write(addr
, cpu_to_le32(insn
));
161 bool __kprobes
aarch64_insn_uses_literal(u32 insn
)
163 /* ldr/ldrsw (literal), prfm */
165 return aarch64_insn_is_ldr_lit(insn
) ||
166 aarch64_insn_is_ldrsw_lit(insn
) ||
167 aarch64_insn_is_adr_adrp(insn
) ||
168 aarch64_insn_is_prfm_lit(insn
);
171 bool __kprobes
aarch64_insn_is_branch(u32 insn
)
173 /* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */
175 return aarch64_insn_is_b(insn
) ||
176 aarch64_insn_is_bl(insn
) ||
177 aarch64_insn_is_cbz(insn
) ||
178 aarch64_insn_is_cbnz(insn
) ||
179 aarch64_insn_is_tbz(insn
) ||
180 aarch64_insn_is_tbnz(insn
) ||
181 aarch64_insn_is_ret(insn
) ||
182 aarch64_insn_is_ret_auth(insn
) ||
183 aarch64_insn_is_br(insn
) ||
184 aarch64_insn_is_br_auth(insn
) ||
185 aarch64_insn_is_blr(insn
) ||
186 aarch64_insn_is_blr_auth(insn
) ||
187 aarch64_insn_is_bcond(insn
);
190 int __kprobes
aarch64_insn_patch_text_nosync(void *addr
, u32 insn
)
195 /* A64 instructions must be word aligned */
196 if ((uintptr_t)tp
& 0x3)
199 ret
= aarch64_insn_write(tp
, insn
);
201 __flush_icache_range((uintptr_t)tp
,
202 (uintptr_t)tp
+ AARCH64_INSN_SIZE
);
207 struct aarch64_insn_patch
{
214 static int __kprobes
aarch64_insn_patch_text_cb(void *arg
)
217 struct aarch64_insn_patch
*pp
= arg
;
219 /* The first CPU becomes master */
220 if (atomic_inc_return(&pp
->cpu_count
) == 1) {
221 for (i
= 0; ret
== 0 && i
< pp
->insn_cnt
; i
++)
222 ret
= aarch64_insn_patch_text_nosync(pp
->text_addrs
[i
],
224 /* Notify other processors with an additional increment. */
225 atomic_inc(&pp
->cpu_count
);
227 while (atomic_read(&pp
->cpu_count
) <= num_online_cpus())
235 int __kprobes
aarch64_insn_patch_text(void *addrs
[], u32 insns
[], int cnt
)
237 struct aarch64_insn_patch patch
= {
241 .cpu_count
= ATOMIC_INIT(0),
247 return stop_machine_cpuslocked(aarch64_insn_patch_text_cb
, &patch
,
251 static int __kprobes
aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type
,
252 u32
*maskp
, int *shiftp
)
258 case AARCH64_INSN_IMM_26
:
262 case AARCH64_INSN_IMM_19
:
266 case AARCH64_INSN_IMM_16
:
270 case AARCH64_INSN_IMM_14
:
274 case AARCH64_INSN_IMM_12
:
278 case AARCH64_INSN_IMM_9
:
282 case AARCH64_INSN_IMM_7
:
286 case AARCH64_INSN_IMM_6
:
287 case AARCH64_INSN_IMM_S
:
291 case AARCH64_INSN_IMM_R
:
295 case AARCH64_INSN_IMM_N
:
309 #define ADR_IMM_HILOSPLIT 2
310 #define ADR_IMM_SIZE SZ_2M
311 #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
312 #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
313 #define ADR_IMM_LOSHIFT 29
314 #define ADR_IMM_HISHIFT 5
316 u64
aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type
, u32 insn
)
318 u32 immlo
, immhi
, mask
;
322 case AARCH64_INSN_IMM_ADR
:
324 immlo
= (insn
>> ADR_IMM_LOSHIFT
) & ADR_IMM_LOMASK
;
325 immhi
= (insn
>> ADR_IMM_HISHIFT
) & ADR_IMM_HIMASK
;
326 insn
= (immhi
<< ADR_IMM_HILOSPLIT
) | immlo
;
327 mask
= ADR_IMM_SIZE
- 1;
330 if (aarch64_get_imm_shift_mask(type
, &mask
, &shift
) < 0) {
331 pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
337 return (insn
>> shift
) & mask
;
340 u32 __kprobes
aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type
,
343 u32 immlo
, immhi
, mask
;
346 if (insn
== AARCH64_BREAK_FAULT
)
347 return AARCH64_BREAK_FAULT
;
350 case AARCH64_INSN_IMM_ADR
:
352 immlo
= (imm
& ADR_IMM_LOMASK
) << ADR_IMM_LOSHIFT
;
353 imm
>>= ADR_IMM_HILOSPLIT
;
354 immhi
= (imm
& ADR_IMM_HIMASK
) << ADR_IMM_HISHIFT
;
356 mask
= ((ADR_IMM_LOMASK
<< ADR_IMM_LOSHIFT
) |
357 (ADR_IMM_HIMASK
<< ADR_IMM_HISHIFT
));
360 if (aarch64_get_imm_shift_mask(type
, &mask
, &shift
) < 0) {
361 pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
363 return AARCH64_BREAK_FAULT
;
367 /* Update the immediate field. */
368 insn
&= ~(mask
<< shift
);
369 insn
|= (imm
& mask
) << shift
;
374 u32
aarch64_insn_decode_register(enum aarch64_insn_register_type type
,
380 case AARCH64_INSN_REGTYPE_RT
:
381 case AARCH64_INSN_REGTYPE_RD
:
384 case AARCH64_INSN_REGTYPE_RN
:
387 case AARCH64_INSN_REGTYPE_RT2
:
388 case AARCH64_INSN_REGTYPE_RA
:
391 case AARCH64_INSN_REGTYPE_RM
:
395 pr_err("%s: unknown register type encoding %d\n", __func__
,
400 return (insn
>> shift
) & GENMASK(4, 0);
403 static u32
aarch64_insn_encode_register(enum aarch64_insn_register_type type
,
405 enum aarch64_insn_register reg
)
409 if (insn
== AARCH64_BREAK_FAULT
)
410 return AARCH64_BREAK_FAULT
;
412 if (reg
< AARCH64_INSN_REG_0
|| reg
> AARCH64_INSN_REG_SP
) {
413 pr_err("%s: unknown register encoding %d\n", __func__
, reg
);
414 return AARCH64_BREAK_FAULT
;
418 case AARCH64_INSN_REGTYPE_RT
:
419 case AARCH64_INSN_REGTYPE_RD
:
422 case AARCH64_INSN_REGTYPE_RN
:
425 case AARCH64_INSN_REGTYPE_RT2
:
426 case AARCH64_INSN_REGTYPE_RA
:
429 case AARCH64_INSN_REGTYPE_RM
:
430 case AARCH64_INSN_REGTYPE_RS
:
434 pr_err("%s: unknown register type encoding %d\n", __func__
,
436 return AARCH64_BREAK_FAULT
;
439 insn
&= ~(GENMASK(4, 0) << shift
);
440 insn
|= reg
<< shift
;
445 static u32
aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type
,
451 case AARCH64_INSN_SIZE_8
:
454 case AARCH64_INSN_SIZE_16
:
457 case AARCH64_INSN_SIZE_32
:
460 case AARCH64_INSN_SIZE_64
:
464 pr_err("%s: unknown size encoding %d\n", __func__
, type
);
465 return AARCH64_BREAK_FAULT
;
468 insn
&= ~GENMASK(31, 30);
474 static inline long branch_imm_common(unsigned long pc
, unsigned long addr
,
479 if ((pc
& 0x3) || (addr
& 0x3)) {
480 pr_err("%s: A64 instructions must be word aligned\n", __func__
);
484 offset
= ((long)addr
- (long)pc
);
486 if (offset
< -range
|| offset
>= range
) {
487 pr_err("%s: offset out of range\n", __func__
);
494 u32 __kprobes
aarch64_insn_gen_branch_imm(unsigned long pc
, unsigned long addr
,
495 enum aarch64_insn_branch_type type
)
501 * B/BL support [-128M, 128M) offset
502 * ARM64 virtual address arrangement guarantees all kernel and module
503 * texts are within +/-128M.
505 offset
= branch_imm_common(pc
, addr
, SZ_128M
);
506 if (offset
>= SZ_128M
)
507 return AARCH64_BREAK_FAULT
;
510 case AARCH64_INSN_BRANCH_LINK
:
511 insn
= aarch64_insn_get_bl_value();
513 case AARCH64_INSN_BRANCH_NOLINK
:
514 insn
= aarch64_insn_get_b_value();
517 pr_err("%s: unknown branch encoding %d\n", __func__
, type
);
518 return AARCH64_BREAK_FAULT
;
521 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26
, insn
,
525 u32
aarch64_insn_gen_comp_branch_imm(unsigned long pc
, unsigned long addr
,
526 enum aarch64_insn_register reg
,
527 enum aarch64_insn_variant variant
,
528 enum aarch64_insn_branch_type type
)
533 offset
= branch_imm_common(pc
, addr
, SZ_1M
);
535 return AARCH64_BREAK_FAULT
;
538 case AARCH64_INSN_BRANCH_COMP_ZERO
:
539 insn
= aarch64_insn_get_cbz_value();
541 case AARCH64_INSN_BRANCH_COMP_NONZERO
:
542 insn
= aarch64_insn_get_cbnz_value();
545 pr_err("%s: unknown branch encoding %d\n", __func__
, type
);
546 return AARCH64_BREAK_FAULT
;
550 case AARCH64_INSN_VARIANT_32BIT
:
552 case AARCH64_INSN_VARIANT_64BIT
:
553 insn
|= AARCH64_INSN_SF_BIT
;
556 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
557 return AARCH64_BREAK_FAULT
;
560 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
, reg
);
562 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19
, insn
,
566 u32
aarch64_insn_gen_cond_branch_imm(unsigned long pc
, unsigned long addr
,
567 enum aarch64_insn_condition cond
)
572 offset
= branch_imm_common(pc
, addr
, SZ_1M
);
574 insn
= aarch64_insn_get_bcond_value();
576 if (cond
< AARCH64_INSN_COND_EQ
|| cond
> AARCH64_INSN_COND_AL
) {
577 pr_err("%s: unknown condition encoding %d\n", __func__
, cond
);
578 return AARCH64_BREAK_FAULT
;
582 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19
, insn
,
586 u32 __kprobes
aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op
)
588 return aarch64_insn_get_hint_value() | op
;
591 u32 __kprobes
aarch64_insn_gen_nop(void)
593 return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP
);
596 u32
aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg
,
597 enum aarch64_insn_branch_type type
)
602 case AARCH64_INSN_BRANCH_NOLINK
:
603 insn
= aarch64_insn_get_br_value();
605 case AARCH64_INSN_BRANCH_LINK
:
606 insn
= aarch64_insn_get_blr_value();
608 case AARCH64_INSN_BRANCH_RETURN
:
609 insn
= aarch64_insn_get_ret_value();
612 pr_err("%s: unknown branch encoding %d\n", __func__
, type
);
613 return AARCH64_BREAK_FAULT
;
616 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, reg
);
619 u32
aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg
,
620 enum aarch64_insn_register base
,
621 enum aarch64_insn_register offset
,
622 enum aarch64_insn_size_type size
,
623 enum aarch64_insn_ldst_type type
)
628 case AARCH64_INSN_LDST_LOAD_REG_OFFSET
:
629 insn
= aarch64_insn_get_ldr_reg_value();
631 case AARCH64_INSN_LDST_STORE_REG_OFFSET
:
632 insn
= aarch64_insn_get_str_reg_value();
635 pr_err("%s: unknown load/store encoding %d\n", __func__
, type
);
636 return AARCH64_BREAK_FAULT
;
639 insn
= aarch64_insn_encode_ldst_size(size
, insn
);
641 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
, reg
);
643 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
646 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
,
650 u32
aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1
,
651 enum aarch64_insn_register reg2
,
652 enum aarch64_insn_register base
,
654 enum aarch64_insn_variant variant
,
655 enum aarch64_insn_ldst_type type
)
661 case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX
:
662 insn
= aarch64_insn_get_ldp_pre_value();
664 case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX
:
665 insn
= aarch64_insn_get_stp_pre_value();
667 case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX
:
668 insn
= aarch64_insn_get_ldp_post_value();
670 case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX
:
671 insn
= aarch64_insn_get_stp_post_value();
674 pr_err("%s: unknown load/store encoding %d\n", __func__
, type
);
675 return AARCH64_BREAK_FAULT
;
679 case AARCH64_INSN_VARIANT_32BIT
:
680 if ((offset
& 0x3) || (offset
< -256) || (offset
> 252)) {
681 pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
683 return AARCH64_BREAK_FAULT
;
687 case AARCH64_INSN_VARIANT_64BIT
:
688 if ((offset
& 0x7) || (offset
< -512) || (offset
> 504)) {
689 pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
691 return AARCH64_BREAK_FAULT
;
694 insn
|= AARCH64_INSN_SF_BIT
;
697 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
698 return AARCH64_BREAK_FAULT
;
701 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
,
704 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2
, insn
,
707 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
710 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7
, insn
,
714 u32
aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg
,
715 enum aarch64_insn_register base
,
716 enum aarch64_insn_register state
,
717 enum aarch64_insn_size_type size
,
718 enum aarch64_insn_ldst_type type
)
723 case AARCH64_INSN_LDST_LOAD_EX
:
724 insn
= aarch64_insn_get_load_ex_value();
726 case AARCH64_INSN_LDST_STORE_EX
:
727 insn
= aarch64_insn_get_store_ex_value();
730 pr_err("%s: unknown load/store exclusive encoding %d\n", __func__
, type
);
731 return AARCH64_BREAK_FAULT
;
734 insn
= aarch64_insn_encode_ldst_size(size
, insn
);
736 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
,
739 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
742 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2
, insn
,
743 AARCH64_INSN_REG_ZR
);
745 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS
, insn
,
749 u32
aarch64_insn_gen_ldadd(enum aarch64_insn_register result
,
750 enum aarch64_insn_register address
,
751 enum aarch64_insn_register value
,
752 enum aarch64_insn_size_type size
)
754 u32 insn
= aarch64_insn_get_ldadd_value();
757 case AARCH64_INSN_SIZE_32
:
758 case AARCH64_INSN_SIZE_64
:
761 pr_err("%s: unimplemented size encoding %d\n", __func__
, size
);
762 return AARCH64_BREAK_FAULT
;
765 insn
= aarch64_insn_encode_ldst_size(size
, insn
);
767 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
,
770 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
773 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS
, insn
,
777 u32
aarch64_insn_gen_stadd(enum aarch64_insn_register address
,
778 enum aarch64_insn_register value
,
779 enum aarch64_insn_size_type size
)
782 * STADD is simply encoded as an alias for LDADD with XZR as
783 * the destination register.
785 return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR
, address
,
789 static u32
aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type
,
790 enum aarch64_insn_prfm_target target
,
791 enum aarch64_insn_prfm_policy policy
,
794 u32 imm_type
= 0, imm_target
= 0, imm_policy
= 0;
797 case AARCH64_INSN_PRFM_TYPE_PLD
:
799 case AARCH64_INSN_PRFM_TYPE_PLI
:
802 case AARCH64_INSN_PRFM_TYPE_PST
:
806 pr_err("%s: unknown prfm type encoding %d\n", __func__
, type
);
807 return AARCH64_BREAK_FAULT
;
811 case AARCH64_INSN_PRFM_TARGET_L1
:
813 case AARCH64_INSN_PRFM_TARGET_L2
:
816 case AARCH64_INSN_PRFM_TARGET_L3
:
820 pr_err("%s: unknown prfm target encoding %d\n", __func__
, target
);
821 return AARCH64_BREAK_FAULT
;
825 case AARCH64_INSN_PRFM_POLICY_KEEP
:
827 case AARCH64_INSN_PRFM_POLICY_STRM
:
831 pr_err("%s: unknown prfm policy encoding %d\n", __func__
, policy
);
832 return AARCH64_BREAK_FAULT
;
835 /* In this case, imm5 is encoded into Rt field. */
836 insn
&= ~GENMASK(4, 0);
837 insn
|= imm_policy
| (imm_target
<< 1) | (imm_type
<< 3);
842 u32
aarch64_insn_gen_prefetch(enum aarch64_insn_register base
,
843 enum aarch64_insn_prfm_type type
,
844 enum aarch64_insn_prfm_target target
,
845 enum aarch64_insn_prfm_policy policy
)
847 u32 insn
= aarch64_insn_get_prfm_value();
849 insn
= aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64
, insn
);
851 insn
= aarch64_insn_encode_prfm_imm(type
, target
, policy
, insn
);
853 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
856 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12
, insn
, 0);
859 u32
aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst
,
860 enum aarch64_insn_register src
,
861 int imm
, enum aarch64_insn_variant variant
,
862 enum aarch64_insn_adsb_type type
)
867 case AARCH64_INSN_ADSB_ADD
:
868 insn
= aarch64_insn_get_add_imm_value();
870 case AARCH64_INSN_ADSB_SUB
:
871 insn
= aarch64_insn_get_sub_imm_value();
873 case AARCH64_INSN_ADSB_ADD_SETFLAGS
:
874 insn
= aarch64_insn_get_adds_imm_value();
876 case AARCH64_INSN_ADSB_SUB_SETFLAGS
:
877 insn
= aarch64_insn_get_subs_imm_value();
880 pr_err("%s: unknown add/sub encoding %d\n", __func__
, type
);
881 return AARCH64_BREAK_FAULT
;
885 case AARCH64_INSN_VARIANT_32BIT
:
887 case AARCH64_INSN_VARIANT_64BIT
:
888 insn
|= AARCH64_INSN_SF_BIT
;
891 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
892 return AARCH64_BREAK_FAULT
;
895 /* We can't encode more than a 24bit value (12bit + 12bit shift) */
896 if (imm
& ~(BIT(24) - 1))
899 /* If we have something in the top 12 bits... */
900 if (imm
& ~(SZ_4K
- 1)) {
901 /* ... and in the low 12 bits -> error */
902 if (imm
& (SZ_4K
- 1))
906 insn
|= AARCH64_INSN_LSL_12
;
909 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
911 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
913 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12
, insn
, imm
);
916 pr_err("%s: invalid immediate encoding %d\n", __func__
, imm
);
917 return AARCH64_BREAK_FAULT
;
920 u32
aarch64_insn_gen_bitfield(enum aarch64_insn_register dst
,
921 enum aarch64_insn_register src
,
923 enum aarch64_insn_variant variant
,
924 enum aarch64_insn_bitfield_type type
)
930 case AARCH64_INSN_BITFIELD_MOVE
:
931 insn
= aarch64_insn_get_bfm_value();
933 case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED
:
934 insn
= aarch64_insn_get_ubfm_value();
936 case AARCH64_INSN_BITFIELD_MOVE_SIGNED
:
937 insn
= aarch64_insn_get_sbfm_value();
940 pr_err("%s: unknown bitfield encoding %d\n", __func__
, type
);
941 return AARCH64_BREAK_FAULT
;
945 case AARCH64_INSN_VARIANT_32BIT
:
946 mask
= GENMASK(4, 0);
948 case AARCH64_INSN_VARIANT_64BIT
:
949 insn
|= AARCH64_INSN_SF_BIT
| AARCH64_INSN_N_BIT
;
950 mask
= GENMASK(5, 0);
953 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
954 return AARCH64_BREAK_FAULT
;
958 pr_err("%s: invalid immr encoding %d\n", __func__
, immr
);
959 return AARCH64_BREAK_FAULT
;
962 pr_err("%s: invalid imms encoding %d\n", __func__
, imms
);
963 return AARCH64_BREAK_FAULT
;
966 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
968 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
970 insn
= aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R
, insn
, immr
);
972 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S
, insn
, imms
);
975 u32
aarch64_insn_gen_movewide(enum aarch64_insn_register dst
,
977 enum aarch64_insn_variant variant
,
978 enum aarch64_insn_movewide_type type
)
983 case AARCH64_INSN_MOVEWIDE_ZERO
:
984 insn
= aarch64_insn_get_movz_value();
986 case AARCH64_INSN_MOVEWIDE_KEEP
:
987 insn
= aarch64_insn_get_movk_value();
989 case AARCH64_INSN_MOVEWIDE_INVERSE
:
990 insn
= aarch64_insn_get_movn_value();
993 pr_err("%s: unknown movewide encoding %d\n", __func__
, type
);
994 return AARCH64_BREAK_FAULT
;
997 if (imm
& ~(SZ_64K
- 1)) {
998 pr_err("%s: invalid immediate encoding %d\n", __func__
, imm
);
999 return AARCH64_BREAK_FAULT
;
1003 case AARCH64_INSN_VARIANT_32BIT
:
1004 if (shift
!= 0 && shift
!= 16) {
1005 pr_err("%s: invalid shift encoding %d\n", __func__
,
1007 return AARCH64_BREAK_FAULT
;
1010 case AARCH64_INSN_VARIANT_64BIT
:
1011 insn
|= AARCH64_INSN_SF_BIT
;
1012 if (shift
!= 0 && shift
!= 16 && shift
!= 32 && shift
!= 48) {
1013 pr_err("%s: invalid shift encoding %d\n", __func__
,
1015 return AARCH64_BREAK_FAULT
;
1019 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1020 return AARCH64_BREAK_FAULT
;
1023 insn
|= (shift
>> 4) << 21;
1025 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1027 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16
, insn
, imm
);
1030 u32
aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst
,
1031 enum aarch64_insn_register src
,
1032 enum aarch64_insn_register reg
,
1034 enum aarch64_insn_variant variant
,
1035 enum aarch64_insn_adsb_type type
)
1040 case AARCH64_INSN_ADSB_ADD
:
1041 insn
= aarch64_insn_get_add_value();
1043 case AARCH64_INSN_ADSB_SUB
:
1044 insn
= aarch64_insn_get_sub_value();
1046 case AARCH64_INSN_ADSB_ADD_SETFLAGS
:
1047 insn
= aarch64_insn_get_adds_value();
1049 case AARCH64_INSN_ADSB_SUB_SETFLAGS
:
1050 insn
= aarch64_insn_get_subs_value();
1053 pr_err("%s: unknown add/sub encoding %d\n", __func__
, type
);
1054 return AARCH64_BREAK_FAULT
;
1058 case AARCH64_INSN_VARIANT_32BIT
:
1059 if (shift
& ~(SZ_32
- 1)) {
1060 pr_err("%s: invalid shift encoding %d\n", __func__
,
1062 return AARCH64_BREAK_FAULT
;
1065 case AARCH64_INSN_VARIANT_64BIT
:
1066 insn
|= AARCH64_INSN_SF_BIT
;
1067 if (shift
& ~(SZ_64
- 1)) {
1068 pr_err("%s: invalid shift encoding %d\n", __func__
,
1070 return AARCH64_BREAK_FAULT
;
1074 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1075 return AARCH64_BREAK_FAULT
;
1079 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1081 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
1083 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, reg
);
1085 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6
, insn
, shift
);
1088 u32
aarch64_insn_gen_data1(enum aarch64_insn_register dst
,
1089 enum aarch64_insn_register src
,
1090 enum aarch64_insn_variant variant
,
1091 enum aarch64_insn_data1_type type
)
1096 case AARCH64_INSN_DATA1_REVERSE_16
:
1097 insn
= aarch64_insn_get_rev16_value();
1099 case AARCH64_INSN_DATA1_REVERSE_32
:
1100 insn
= aarch64_insn_get_rev32_value();
1102 case AARCH64_INSN_DATA1_REVERSE_64
:
1103 if (variant
!= AARCH64_INSN_VARIANT_64BIT
) {
1104 pr_err("%s: invalid variant for reverse64 %d\n",
1106 return AARCH64_BREAK_FAULT
;
1108 insn
= aarch64_insn_get_rev64_value();
1111 pr_err("%s: unknown data1 encoding %d\n", __func__
, type
);
1112 return AARCH64_BREAK_FAULT
;
1116 case AARCH64_INSN_VARIANT_32BIT
:
1118 case AARCH64_INSN_VARIANT_64BIT
:
1119 insn
|= AARCH64_INSN_SF_BIT
;
1122 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1123 return AARCH64_BREAK_FAULT
;
1126 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1128 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
1131 u32
aarch64_insn_gen_data2(enum aarch64_insn_register dst
,
1132 enum aarch64_insn_register src
,
1133 enum aarch64_insn_register reg
,
1134 enum aarch64_insn_variant variant
,
1135 enum aarch64_insn_data2_type type
)
1140 case AARCH64_INSN_DATA2_UDIV
:
1141 insn
= aarch64_insn_get_udiv_value();
1143 case AARCH64_INSN_DATA2_SDIV
:
1144 insn
= aarch64_insn_get_sdiv_value();
1146 case AARCH64_INSN_DATA2_LSLV
:
1147 insn
= aarch64_insn_get_lslv_value();
1149 case AARCH64_INSN_DATA2_LSRV
:
1150 insn
= aarch64_insn_get_lsrv_value();
1152 case AARCH64_INSN_DATA2_ASRV
:
1153 insn
= aarch64_insn_get_asrv_value();
1155 case AARCH64_INSN_DATA2_RORV
:
1156 insn
= aarch64_insn_get_rorv_value();
1159 pr_err("%s: unknown data2 encoding %d\n", __func__
, type
);
1160 return AARCH64_BREAK_FAULT
;
1164 case AARCH64_INSN_VARIANT_32BIT
:
1166 case AARCH64_INSN_VARIANT_64BIT
:
1167 insn
|= AARCH64_INSN_SF_BIT
;
1170 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1171 return AARCH64_BREAK_FAULT
;
1174 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1176 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
1178 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, reg
);
1181 u32
aarch64_insn_gen_data3(enum aarch64_insn_register dst
,
1182 enum aarch64_insn_register src
,
1183 enum aarch64_insn_register reg1
,
1184 enum aarch64_insn_register reg2
,
1185 enum aarch64_insn_variant variant
,
1186 enum aarch64_insn_data3_type type
)
1191 case AARCH64_INSN_DATA3_MADD
:
1192 insn
= aarch64_insn_get_madd_value();
1194 case AARCH64_INSN_DATA3_MSUB
:
1195 insn
= aarch64_insn_get_msub_value();
1198 pr_err("%s: unknown data3 encoding %d\n", __func__
, type
);
1199 return AARCH64_BREAK_FAULT
;
1203 case AARCH64_INSN_VARIANT_32BIT
:
1205 case AARCH64_INSN_VARIANT_64BIT
:
1206 insn
|= AARCH64_INSN_SF_BIT
;
1209 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1210 return AARCH64_BREAK_FAULT
;
1213 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1215 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA
, insn
, src
);
1217 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
1220 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
,
1224 u32
aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst
,
1225 enum aarch64_insn_register src
,
1226 enum aarch64_insn_register reg
,
1228 enum aarch64_insn_variant variant
,
1229 enum aarch64_insn_logic_type type
)
1234 case AARCH64_INSN_LOGIC_AND
:
1235 insn
= aarch64_insn_get_and_value();
1237 case AARCH64_INSN_LOGIC_BIC
:
1238 insn
= aarch64_insn_get_bic_value();
1240 case AARCH64_INSN_LOGIC_ORR
:
1241 insn
= aarch64_insn_get_orr_value();
1243 case AARCH64_INSN_LOGIC_ORN
:
1244 insn
= aarch64_insn_get_orn_value();
1246 case AARCH64_INSN_LOGIC_EOR
:
1247 insn
= aarch64_insn_get_eor_value();
1249 case AARCH64_INSN_LOGIC_EON
:
1250 insn
= aarch64_insn_get_eon_value();
1252 case AARCH64_INSN_LOGIC_AND_SETFLAGS
:
1253 insn
= aarch64_insn_get_ands_value();
1255 case AARCH64_INSN_LOGIC_BIC_SETFLAGS
:
1256 insn
= aarch64_insn_get_bics_value();
1259 pr_err("%s: unknown logical encoding %d\n", __func__
, type
);
1260 return AARCH64_BREAK_FAULT
;
1264 case AARCH64_INSN_VARIANT_32BIT
:
1265 if (shift
& ~(SZ_32
- 1)) {
1266 pr_err("%s: invalid shift encoding %d\n", __func__
,
1268 return AARCH64_BREAK_FAULT
;
1271 case AARCH64_INSN_VARIANT_64BIT
:
1272 insn
|= AARCH64_INSN_SF_BIT
;
1273 if (shift
& ~(SZ_64
- 1)) {
1274 pr_err("%s: invalid shift encoding %d\n", __func__
,
1276 return AARCH64_BREAK_FAULT
;
1280 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1281 return AARCH64_BREAK_FAULT
;
1285 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1287 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
1289 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, reg
);
1291 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6
, insn
, shift
);
1295 * MOV (register) is architecturally an alias of ORR (shifted register) where
1296 * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m>
1298 u32
aarch64_insn_gen_move_reg(enum aarch64_insn_register dst
,
1299 enum aarch64_insn_register src
,
1300 enum aarch64_insn_variant variant
)
1302 return aarch64_insn_gen_logical_shifted_reg(dst
, AARCH64_INSN_REG_ZR
,
1304 AARCH64_INSN_LOGIC_ORR
);
1307 u32
aarch64_insn_gen_adr(unsigned long pc
, unsigned long addr
,
1308 enum aarch64_insn_register reg
,
1309 enum aarch64_insn_adr_type type
)
1315 case AARCH64_INSN_ADR_TYPE_ADR
:
1316 insn
= aarch64_insn_get_adr_value();
1319 case AARCH64_INSN_ADR_TYPE_ADRP
:
1320 insn
= aarch64_insn_get_adrp_value();
1321 offset
= (addr
- ALIGN_DOWN(pc
, SZ_4K
)) >> 12;
1324 pr_err("%s: unknown adr encoding %d\n", __func__
, type
);
1325 return AARCH64_BREAK_FAULT
;
1328 if (offset
< -SZ_1M
|| offset
>= SZ_1M
)
1329 return AARCH64_BREAK_FAULT
;
1331 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, reg
);
1333 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR
, insn
, offset
);
1337 * Decode the imm field of a branch, and return the byte offset as a
1338 * signed value (so it can be used when computing a new branch
1341 s32
aarch64_get_branch_offset(u32 insn
)
1345 if (aarch64_insn_is_b(insn
) || aarch64_insn_is_bl(insn
)) {
1346 imm
= aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26
, insn
);
1347 return (imm
<< 6) >> 4;
1350 if (aarch64_insn_is_cbz(insn
) || aarch64_insn_is_cbnz(insn
) ||
1351 aarch64_insn_is_bcond(insn
)) {
1352 imm
= aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19
, insn
);
1353 return (imm
<< 13) >> 11;
1356 if (aarch64_insn_is_tbz(insn
) || aarch64_insn_is_tbnz(insn
)) {
1357 imm
= aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14
, insn
);
1358 return (imm
<< 18) >> 16;
1361 /* Unhandled instruction */
1366 * Encode the displacement of a branch in the imm field and return the
1367 * updated instruction.
1369 u32
aarch64_set_branch_offset(u32 insn
, s32 offset
)
1371 if (aarch64_insn_is_b(insn
) || aarch64_insn_is_bl(insn
))
1372 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26
, insn
,
1375 if (aarch64_insn_is_cbz(insn
) || aarch64_insn_is_cbnz(insn
) ||
1376 aarch64_insn_is_bcond(insn
))
1377 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19
, insn
,
1380 if (aarch64_insn_is_tbz(insn
) || aarch64_insn_is_tbnz(insn
))
1381 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14
, insn
,
1384 /* Unhandled instruction */
1388 s32
aarch64_insn_adrp_get_offset(u32 insn
)
1390 BUG_ON(!aarch64_insn_is_adrp(insn
));
1391 return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR
, insn
) << 12;
1394 u32
aarch64_insn_adrp_set_offset(u32 insn
, s32 offset
)
1396 BUG_ON(!aarch64_insn_is_adrp(insn
));
1397 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR
, insn
,
1402 * Extract the Op/CR data from a msr/mrs instruction.
1404 u32
aarch64_insn_extract_system_reg(u32 insn
)
1406 return (insn
& 0x1FFFE0) >> 5;
1409 bool aarch32_insn_is_wide(u32 insn
)
1411 return insn
>= 0xe800;
1415 * Macros/defines for extracting register numbers from instruction.
1417 u32
aarch32_insn_extract_reg_num(u32 insn
, int offset
)
1419 return (insn
& (0xf << offset
)) >> offset
;
1422 #define OPC2_MASK 0x7
1423 #define OPC2_OFFSET 5
1424 u32
aarch32_insn_mcr_extract_opc2(u32 insn
)
1426 return (insn
& (OPC2_MASK
<< OPC2_OFFSET
)) >> OPC2_OFFSET
;
1429 #define CRM_MASK 0xf
1430 u32
aarch32_insn_mcr_extract_crm(u32 insn
)
1432 return insn
& CRM_MASK
;
1435 static bool __kprobes
__check_eq(unsigned long pstate
)
1437 return (pstate
& PSR_Z_BIT
) != 0;
1440 static bool __kprobes
__check_ne(unsigned long pstate
)
1442 return (pstate
& PSR_Z_BIT
) == 0;
1445 static bool __kprobes
__check_cs(unsigned long pstate
)
1447 return (pstate
& PSR_C_BIT
) != 0;
1450 static bool __kprobes
__check_cc(unsigned long pstate
)
1452 return (pstate
& PSR_C_BIT
) == 0;
1455 static bool __kprobes
__check_mi(unsigned long pstate
)
1457 return (pstate
& PSR_N_BIT
) != 0;
1460 static bool __kprobes
__check_pl(unsigned long pstate
)
1462 return (pstate
& PSR_N_BIT
) == 0;
1465 static bool __kprobes
__check_vs(unsigned long pstate
)
1467 return (pstate
& PSR_V_BIT
) != 0;
1470 static bool __kprobes
__check_vc(unsigned long pstate
)
1472 return (pstate
& PSR_V_BIT
) == 0;
1475 static bool __kprobes
__check_hi(unsigned long pstate
)
1477 pstate
&= ~(pstate
>> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1478 return (pstate
& PSR_C_BIT
) != 0;
1481 static bool __kprobes
__check_ls(unsigned long pstate
)
1483 pstate
&= ~(pstate
>> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1484 return (pstate
& PSR_C_BIT
) == 0;
1487 static bool __kprobes
__check_ge(unsigned long pstate
)
1489 pstate
^= (pstate
<< 3); /* PSR_N_BIT ^= PSR_V_BIT */
1490 return (pstate
& PSR_N_BIT
) == 0;
1493 static bool __kprobes
__check_lt(unsigned long pstate
)
1495 pstate
^= (pstate
<< 3); /* PSR_N_BIT ^= PSR_V_BIT */
1496 return (pstate
& PSR_N_BIT
) != 0;
1499 static bool __kprobes
__check_gt(unsigned long pstate
)
1501 /*PSR_N_BIT ^= PSR_V_BIT */
1502 unsigned long temp
= pstate
^ (pstate
<< 3);
1504 temp
|= (pstate
<< 1); /*PSR_N_BIT |= PSR_Z_BIT */
1505 return (temp
& PSR_N_BIT
) == 0;
1508 static bool __kprobes
__check_le(unsigned long pstate
)
1510 /*PSR_N_BIT ^= PSR_V_BIT */
1511 unsigned long temp
= pstate
^ (pstate
<< 3);
1513 temp
|= (pstate
<< 1); /*PSR_N_BIT |= PSR_Z_BIT */
1514 return (temp
& PSR_N_BIT
) != 0;
1517 static bool __kprobes
__check_al(unsigned long pstate
)
1523 * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1524 * it behaves identically to 0b1110 ("al").
1526 pstate_check_t
* const aarch32_opcode_cond_checks
[16] = {
1527 __check_eq
, __check_ne
, __check_cs
, __check_cc
,
1528 __check_mi
, __check_pl
, __check_vs
, __check_vc
,
1529 __check_hi
, __check_ls
, __check_ge
, __check_lt
,
1530 __check_gt
, __check_le
, __check_al
, __check_al
1533 static bool range_of_ones(u64 val
)
1535 /* Doesn't handle full ones or full zeroes */
1536 u64 sval
= val
>> __ffs64(val
);
1538 /* One of Sean Eron Anderson's bithack tricks */
1539 return ((sval
+ 1) & (sval
)) == 0;
1542 static u32
aarch64_encode_immediate(u64 imm
,
1543 enum aarch64_insn_variant variant
,
1546 unsigned int immr
, imms
, n
, ones
, ror
, esz
, tmp
;
1550 case AARCH64_INSN_VARIANT_32BIT
:
1553 case AARCH64_INSN_VARIANT_64BIT
:
1554 insn
|= AARCH64_INSN_SF_BIT
;
1558 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1559 return AARCH64_BREAK_FAULT
;
1562 mask
= GENMASK(esz
- 1, 0);
1564 /* Can't encode full zeroes, full ones, or value wider than the mask */
1565 if (!imm
|| imm
== mask
|| imm
& ~mask
)
1566 return AARCH64_BREAK_FAULT
;
1569 * Inverse of Replicate(). Try to spot a repeating pattern
1570 * with a pow2 stride.
1572 for (tmp
= esz
/ 2; tmp
>= 2; tmp
/= 2) {
1573 u64 emask
= BIT(tmp
) - 1;
1575 if ((imm
& emask
) != ((imm
>> tmp
) & emask
))
1582 /* N is only set if we're encoding a 64bit value */
1585 /* Trim imm to the element size */
1588 /* That's how many ones we need to encode */
1589 ones
= hweight64(imm
);
1592 * imms is set to (ones - 1), prefixed with a string of ones
1593 * and a zero if they fit. Cap it to 6 bits.
1596 imms
|= 0xf << ffs(esz
);
1599 /* Compute the rotation */
1600 if (range_of_ones(imm
)) {
1602 * Pattern: 0..01..10..0
1604 * Compute how many rotate we need to align it right
1609 * Pattern: 0..01..10..01..1
1611 * Fill the unused top bits with ones, and check if
1612 * the result is a valid immediate (all ones with a
1613 * contiguous ranges of zeroes).
1616 if (!range_of_ones(~imm
))
1617 return AARCH64_BREAK_FAULT
;
1620 * Compute the rotation to get a continuous set of
1621 * ones, with the first bit set at position 0
1627 * immr is the number of bits we need to rotate back to the
1628 * original set of ones. Note that this is relative to the
1631 immr
= (esz
- ror
) % esz
;
1633 insn
= aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N
, insn
, n
);
1634 insn
= aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R
, insn
, immr
);
1635 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S
, insn
, imms
);
1638 u32
aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type
,
1639 enum aarch64_insn_variant variant
,
1640 enum aarch64_insn_register Rn
,
1641 enum aarch64_insn_register Rd
,
1647 case AARCH64_INSN_LOGIC_AND
:
1648 insn
= aarch64_insn_get_and_imm_value();
1650 case AARCH64_INSN_LOGIC_ORR
:
1651 insn
= aarch64_insn_get_orr_imm_value();
1653 case AARCH64_INSN_LOGIC_EOR
:
1654 insn
= aarch64_insn_get_eor_imm_value();
1656 case AARCH64_INSN_LOGIC_AND_SETFLAGS
:
1657 insn
= aarch64_insn_get_ands_imm_value();
1660 pr_err("%s: unknown logical encoding %d\n", __func__
, type
);
1661 return AARCH64_BREAK_FAULT
;
1664 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, Rd
);
1665 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, Rn
);
1666 return aarch64_encode_immediate(imm
, variant
, insn
);
1669 u32
aarch64_insn_gen_extr(enum aarch64_insn_variant variant
,
1670 enum aarch64_insn_register Rm
,
1671 enum aarch64_insn_register Rn
,
1672 enum aarch64_insn_register Rd
,
1677 insn
= aarch64_insn_get_extr_value();
1680 case AARCH64_INSN_VARIANT_32BIT
:
1682 return AARCH64_BREAK_FAULT
;
1684 case AARCH64_INSN_VARIANT_64BIT
:
1686 return AARCH64_BREAK_FAULT
;
1687 insn
|= AARCH64_INSN_SF_BIT
;
1688 insn
= aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N
, insn
, 1);
1691 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1692 return AARCH64_BREAK_FAULT
;
1695 insn
= aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S
, insn
, lsb
);
1696 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, Rd
);
1697 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, Rn
);
1698 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, Rm
);