2 * Copyright (C) 2013 Huawei Ltd.
3 * Author: Jiang Liu <liuj97@gmail.com>
5 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
34 #include <asm/kprobes.h>
36 #define AARCH64_INSN_SF_BIT BIT(31)
37 #define AARCH64_INSN_N_BIT BIT(22)
38 #define AARCH64_INSN_LSL_12 BIT(22)
40 static int aarch64_insn_encoding_class
[] = {
41 AARCH64_INSN_CLS_UNKNOWN
,
42 AARCH64_INSN_CLS_UNKNOWN
,
43 AARCH64_INSN_CLS_UNKNOWN
,
44 AARCH64_INSN_CLS_UNKNOWN
,
45 AARCH64_INSN_CLS_LDST
,
46 AARCH64_INSN_CLS_DP_REG
,
47 AARCH64_INSN_CLS_LDST
,
48 AARCH64_INSN_CLS_DP_FPSIMD
,
49 AARCH64_INSN_CLS_DP_IMM
,
50 AARCH64_INSN_CLS_DP_IMM
,
51 AARCH64_INSN_CLS_BR_SYS
,
52 AARCH64_INSN_CLS_BR_SYS
,
53 AARCH64_INSN_CLS_LDST
,
54 AARCH64_INSN_CLS_DP_REG
,
55 AARCH64_INSN_CLS_LDST
,
56 AARCH64_INSN_CLS_DP_FPSIMD
,
59 enum aarch64_insn_encoding_class __kprobes
aarch64_get_insn_class(u32 insn
)
61 return aarch64_insn_encoding_class
[(insn
>> 25) & 0xf];
64 /* NOP is an alias of HINT */
65 bool __kprobes
aarch64_insn_is_nop(u32 insn
)
67 if (!aarch64_insn_is_hint(insn
))
70 switch (insn
& 0xFE0) {
71 case AARCH64_INSN_HINT_YIELD
:
72 case AARCH64_INSN_HINT_WFE
:
73 case AARCH64_INSN_HINT_WFI
:
74 case AARCH64_INSN_HINT_SEV
:
75 case AARCH64_INSN_HINT_SEVL
:
82 bool aarch64_insn_is_branch_imm(u32 insn
)
84 return (aarch64_insn_is_b(insn
) || aarch64_insn_is_bl(insn
) ||
85 aarch64_insn_is_tbz(insn
) || aarch64_insn_is_tbnz(insn
) ||
86 aarch64_insn_is_cbz(insn
) || aarch64_insn_is_cbnz(insn
) ||
87 aarch64_insn_is_bcond(insn
));
90 static DEFINE_RAW_SPINLOCK(patch_lock
);
92 static void __kprobes
*patch_map(void *addr
, int fixmap
)
94 unsigned long uintaddr
= (uintptr_t) addr
;
95 bool module
= !core_kernel_text(uintaddr
);
98 if (module
&& IS_ENABLED(CONFIG_STRICT_MODULE_RWX
))
99 page
= vmalloc_to_page(addr
);
101 page
= phys_to_page(__pa_symbol(addr
));
106 return (void *)set_fixmap_offset(fixmap
, page_to_phys(page
) +
107 (uintaddr
& ~PAGE_MASK
));
110 static void __kprobes
patch_unmap(int fixmap
)
112 clear_fixmap(fixmap
);
115 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
118 int __kprobes
aarch64_insn_read(void *addr
, u32
*insnp
)
123 ret
= probe_kernel_read(&val
, addr
, AARCH64_INSN_SIZE
);
125 *insnp
= le32_to_cpu(val
);
130 static int __kprobes
__aarch64_insn_write(void *addr
, __le32 insn
)
133 unsigned long flags
= 0;
136 raw_spin_lock_irqsave(&patch_lock
, flags
);
137 waddr
= patch_map(addr
, FIX_TEXT_POKE0
);
139 ret
= probe_kernel_write(waddr
, &insn
, AARCH64_INSN_SIZE
);
141 patch_unmap(FIX_TEXT_POKE0
);
142 raw_spin_unlock_irqrestore(&patch_lock
, flags
);
147 int __kprobes
aarch64_insn_write(void *addr
, u32 insn
)
149 return __aarch64_insn_write(addr
, cpu_to_le32(insn
));
152 static bool __kprobes
__aarch64_insn_hotpatch_safe(u32 insn
)
154 if (aarch64_get_insn_class(insn
) != AARCH64_INSN_CLS_BR_SYS
)
157 return aarch64_insn_is_b(insn
) ||
158 aarch64_insn_is_bl(insn
) ||
159 aarch64_insn_is_svc(insn
) ||
160 aarch64_insn_is_hvc(insn
) ||
161 aarch64_insn_is_smc(insn
) ||
162 aarch64_insn_is_brk(insn
) ||
163 aarch64_insn_is_nop(insn
);
166 bool __kprobes
aarch64_insn_uses_literal(u32 insn
)
168 /* ldr/ldrsw (literal), prfm */
170 return aarch64_insn_is_ldr_lit(insn
) ||
171 aarch64_insn_is_ldrsw_lit(insn
) ||
172 aarch64_insn_is_adr_adrp(insn
) ||
173 aarch64_insn_is_prfm_lit(insn
);
176 bool __kprobes
aarch64_insn_is_branch(u32 insn
)
178 /* b, bl, cb*, tb*, b.cond, br, blr */
180 return aarch64_insn_is_b(insn
) ||
181 aarch64_insn_is_bl(insn
) ||
182 aarch64_insn_is_cbz(insn
) ||
183 aarch64_insn_is_cbnz(insn
) ||
184 aarch64_insn_is_tbz(insn
) ||
185 aarch64_insn_is_tbnz(insn
) ||
186 aarch64_insn_is_ret(insn
) ||
187 aarch64_insn_is_br(insn
) ||
188 aarch64_insn_is_blr(insn
) ||
189 aarch64_insn_is_bcond(insn
);
193 * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
194 * Section B2.6.5 "Concurrent modification and execution of instructions":
195 * Concurrent modification and execution of instructions can lead to the
196 * resulting instruction performing any behavior that can be achieved by
197 * executing any sequence of instructions that can be executed from the
198 * same Exception level, except where the instruction before modification
199 * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
200 * or SMC instruction.
202 bool __kprobes
aarch64_insn_hotpatch_safe(u32 old_insn
, u32 new_insn
)
204 return __aarch64_insn_hotpatch_safe(old_insn
) &&
205 __aarch64_insn_hotpatch_safe(new_insn
);
208 int __kprobes
aarch64_insn_patch_text_nosync(void *addr
, u32 insn
)
213 /* A64 instructions must be word aligned */
214 if ((uintptr_t)tp
& 0x3)
217 ret
= aarch64_insn_write(tp
, insn
);
219 flush_icache_range((uintptr_t)tp
,
220 (uintptr_t)tp
+ AARCH64_INSN_SIZE
);
225 struct aarch64_insn_patch
{
232 static int __kprobes
aarch64_insn_patch_text_cb(void *arg
)
235 struct aarch64_insn_patch
*pp
= arg
;
237 /* The first CPU becomes master */
238 if (atomic_inc_return(&pp
->cpu_count
) == 1) {
239 for (i
= 0; ret
== 0 && i
< pp
->insn_cnt
; i
++)
240 ret
= aarch64_insn_patch_text_nosync(pp
->text_addrs
[i
],
243 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
244 * which ends with "dsb; isb" pair guaranteeing global
247 /* Notify other processors with an additional increment. */
248 atomic_inc(&pp
->cpu_count
);
250 while (atomic_read(&pp
->cpu_count
) <= num_online_cpus())
259 int __kprobes
aarch64_insn_patch_text_sync(void *addrs
[], u32 insns
[], int cnt
)
261 struct aarch64_insn_patch patch
= {
265 .cpu_count
= ATOMIC_INIT(0),
271 return stop_machine_cpuslocked(aarch64_insn_patch_text_cb
, &patch
,
275 int __kprobes
aarch64_insn_patch_text(void *addrs
[], u32 insns
[], int cnt
)
280 /* Unsafe to patch multiple instructions without synchronizaiton */
282 ret
= aarch64_insn_read(addrs
[0], &insn
);
286 if (aarch64_insn_hotpatch_safe(insn
, insns
[0])) {
288 * ARMv8 architecture doesn't guarantee all CPUs see
289 * the new instruction after returning from function
290 * aarch64_insn_patch_text_nosync(). So send IPIs to
291 * all other CPUs to achieve instruction
294 ret
= aarch64_insn_patch_text_nosync(addrs
[0], insns
[0]);
295 kick_all_cpus_sync();
300 return aarch64_insn_patch_text_sync(addrs
, insns
, cnt
);
303 static int __kprobes
aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type
,
304 u32
*maskp
, int *shiftp
)
310 case AARCH64_INSN_IMM_26
:
314 case AARCH64_INSN_IMM_19
:
318 case AARCH64_INSN_IMM_16
:
322 case AARCH64_INSN_IMM_14
:
326 case AARCH64_INSN_IMM_12
:
330 case AARCH64_INSN_IMM_9
:
334 case AARCH64_INSN_IMM_7
:
338 case AARCH64_INSN_IMM_6
:
339 case AARCH64_INSN_IMM_S
:
343 case AARCH64_INSN_IMM_R
:
347 case AARCH64_INSN_IMM_N
:
361 #define ADR_IMM_HILOSPLIT 2
362 #define ADR_IMM_SIZE SZ_2M
363 #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
364 #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
365 #define ADR_IMM_LOSHIFT 29
366 #define ADR_IMM_HISHIFT 5
368 u64
aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type
, u32 insn
)
370 u32 immlo
, immhi
, mask
;
374 case AARCH64_INSN_IMM_ADR
:
376 immlo
= (insn
>> ADR_IMM_LOSHIFT
) & ADR_IMM_LOMASK
;
377 immhi
= (insn
>> ADR_IMM_HISHIFT
) & ADR_IMM_HIMASK
;
378 insn
= (immhi
<< ADR_IMM_HILOSPLIT
) | immlo
;
379 mask
= ADR_IMM_SIZE
- 1;
382 if (aarch64_get_imm_shift_mask(type
, &mask
, &shift
) < 0) {
383 pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
389 return (insn
>> shift
) & mask
;
392 u32 __kprobes
aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type
,
395 u32 immlo
, immhi
, mask
;
398 if (insn
== AARCH64_BREAK_FAULT
)
399 return AARCH64_BREAK_FAULT
;
402 case AARCH64_INSN_IMM_ADR
:
404 immlo
= (imm
& ADR_IMM_LOMASK
) << ADR_IMM_LOSHIFT
;
405 imm
>>= ADR_IMM_HILOSPLIT
;
406 immhi
= (imm
& ADR_IMM_HIMASK
) << ADR_IMM_HISHIFT
;
408 mask
= ((ADR_IMM_LOMASK
<< ADR_IMM_LOSHIFT
) |
409 (ADR_IMM_HIMASK
<< ADR_IMM_HISHIFT
));
412 if (aarch64_get_imm_shift_mask(type
, &mask
, &shift
) < 0) {
413 pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
415 return AARCH64_BREAK_FAULT
;
419 /* Update the immediate field. */
420 insn
&= ~(mask
<< shift
);
421 insn
|= (imm
& mask
) << shift
;
426 u32
aarch64_insn_decode_register(enum aarch64_insn_register_type type
,
432 case AARCH64_INSN_REGTYPE_RT
:
433 case AARCH64_INSN_REGTYPE_RD
:
436 case AARCH64_INSN_REGTYPE_RN
:
439 case AARCH64_INSN_REGTYPE_RT2
:
440 case AARCH64_INSN_REGTYPE_RA
:
443 case AARCH64_INSN_REGTYPE_RM
:
447 pr_err("%s: unknown register type encoding %d\n", __func__
,
452 return (insn
>> shift
) & GENMASK(4, 0);
455 static u32
aarch64_insn_encode_register(enum aarch64_insn_register_type type
,
457 enum aarch64_insn_register reg
)
461 if (insn
== AARCH64_BREAK_FAULT
)
462 return AARCH64_BREAK_FAULT
;
464 if (reg
< AARCH64_INSN_REG_0
|| reg
> AARCH64_INSN_REG_SP
) {
465 pr_err("%s: unknown register encoding %d\n", __func__
, reg
);
466 return AARCH64_BREAK_FAULT
;
470 case AARCH64_INSN_REGTYPE_RT
:
471 case AARCH64_INSN_REGTYPE_RD
:
474 case AARCH64_INSN_REGTYPE_RN
:
477 case AARCH64_INSN_REGTYPE_RT2
:
478 case AARCH64_INSN_REGTYPE_RA
:
481 case AARCH64_INSN_REGTYPE_RM
:
482 case AARCH64_INSN_REGTYPE_RS
:
486 pr_err("%s: unknown register type encoding %d\n", __func__
,
488 return AARCH64_BREAK_FAULT
;
491 insn
&= ~(GENMASK(4, 0) << shift
);
492 insn
|= reg
<< shift
;
497 static u32
aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type
,
503 case AARCH64_INSN_SIZE_8
:
506 case AARCH64_INSN_SIZE_16
:
509 case AARCH64_INSN_SIZE_32
:
512 case AARCH64_INSN_SIZE_64
:
516 pr_err("%s: unknown size encoding %d\n", __func__
, type
);
517 return AARCH64_BREAK_FAULT
;
520 insn
&= ~GENMASK(31, 30);
526 static inline long branch_imm_common(unsigned long pc
, unsigned long addr
,
531 if ((pc
& 0x3) || (addr
& 0x3)) {
532 pr_err("%s: A64 instructions must be word aligned\n", __func__
);
536 offset
= ((long)addr
- (long)pc
);
538 if (offset
< -range
|| offset
>= range
) {
539 pr_err("%s: offset out of range\n", __func__
);
546 u32 __kprobes
aarch64_insn_gen_branch_imm(unsigned long pc
, unsigned long addr
,
547 enum aarch64_insn_branch_type type
)
553 * B/BL support [-128M, 128M) offset
554 * ARM64 virtual address arrangement guarantees all kernel and module
555 * texts are within +/-128M.
557 offset
= branch_imm_common(pc
, addr
, SZ_128M
);
558 if (offset
>= SZ_128M
)
559 return AARCH64_BREAK_FAULT
;
562 case AARCH64_INSN_BRANCH_LINK
:
563 insn
= aarch64_insn_get_bl_value();
565 case AARCH64_INSN_BRANCH_NOLINK
:
566 insn
= aarch64_insn_get_b_value();
569 pr_err("%s: unknown branch encoding %d\n", __func__
, type
);
570 return AARCH64_BREAK_FAULT
;
573 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26
, insn
,
577 u32
aarch64_insn_gen_comp_branch_imm(unsigned long pc
, unsigned long addr
,
578 enum aarch64_insn_register reg
,
579 enum aarch64_insn_variant variant
,
580 enum aarch64_insn_branch_type type
)
585 offset
= branch_imm_common(pc
, addr
, SZ_1M
);
587 return AARCH64_BREAK_FAULT
;
590 case AARCH64_INSN_BRANCH_COMP_ZERO
:
591 insn
= aarch64_insn_get_cbz_value();
593 case AARCH64_INSN_BRANCH_COMP_NONZERO
:
594 insn
= aarch64_insn_get_cbnz_value();
597 pr_err("%s: unknown branch encoding %d\n", __func__
, type
);
598 return AARCH64_BREAK_FAULT
;
602 case AARCH64_INSN_VARIANT_32BIT
:
604 case AARCH64_INSN_VARIANT_64BIT
:
605 insn
|= AARCH64_INSN_SF_BIT
;
608 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
609 return AARCH64_BREAK_FAULT
;
612 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
, reg
);
614 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19
, insn
,
618 u32
aarch64_insn_gen_cond_branch_imm(unsigned long pc
, unsigned long addr
,
619 enum aarch64_insn_condition cond
)
624 offset
= branch_imm_common(pc
, addr
, SZ_1M
);
626 insn
= aarch64_insn_get_bcond_value();
628 if (cond
< AARCH64_INSN_COND_EQ
|| cond
> AARCH64_INSN_COND_AL
) {
629 pr_err("%s: unknown condition encoding %d\n", __func__
, cond
);
630 return AARCH64_BREAK_FAULT
;
634 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19
, insn
,
638 u32 __kprobes
aarch64_insn_gen_hint(enum aarch64_insn_hint_op op
)
640 return aarch64_insn_get_hint_value() | op
;
643 u32 __kprobes
aarch64_insn_gen_nop(void)
645 return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP
);
648 u32
aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg
,
649 enum aarch64_insn_branch_type type
)
654 case AARCH64_INSN_BRANCH_NOLINK
:
655 insn
= aarch64_insn_get_br_value();
657 case AARCH64_INSN_BRANCH_LINK
:
658 insn
= aarch64_insn_get_blr_value();
660 case AARCH64_INSN_BRANCH_RETURN
:
661 insn
= aarch64_insn_get_ret_value();
664 pr_err("%s: unknown branch encoding %d\n", __func__
, type
);
665 return AARCH64_BREAK_FAULT
;
668 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, reg
);
671 u32
aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg
,
672 enum aarch64_insn_register base
,
673 enum aarch64_insn_register offset
,
674 enum aarch64_insn_size_type size
,
675 enum aarch64_insn_ldst_type type
)
680 case AARCH64_INSN_LDST_LOAD_REG_OFFSET
:
681 insn
= aarch64_insn_get_ldr_reg_value();
683 case AARCH64_INSN_LDST_STORE_REG_OFFSET
:
684 insn
= aarch64_insn_get_str_reg_value();
687 pr_err("%s: unknown load/store encoding %d\n", __func__
, type
);
688 return AARCH64_BREAK_FAULT
;
691 insn
= aarch64_insn_encode_ldst_size(size
, insn
);
693 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
, reg
);
695 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
698 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
,
702 u32
aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1
,
703 enum aarch64_insn_register reg2
,
704 enum aarch64_insn_register base
,
706 enum aarch64_insn_variant variant
,
707 enum aarch64_insn_ldst_type type
)
713 case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX
:
714 insn
= aarch64_insn_get_ldp_pre_value();
716 case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX
:
717 insn
= aarch64_insn_get_stp_pre_value();
719 case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX
:
720 insn
= aarch64_insn_get_ldp_post_value();
722 case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX
:
723 insn
= aarch64_insn_get_stp_post_value();
726 pr_err("%s: unknown load/store encoding %d\n", __func__
, type
);
727 return AARCH64_BREAK_FAULT
;
731 case AARCH64_INSN_VARIANT_32BIT
:
732 if ((offset
& 0x3) || (offset
< -256) || (offset
> 252)) {
733 pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
735 return AARCH64_BREAK_FAULT
;
739 case AARCH64_INSN_VARIANT_64BIT
:
740 if ((offset
& 0x7) || (offset
< -512) || (offset
> 504)) {
741 pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
743 return AARCH64_BREAK_FAULT
;
746 insn
|= AARCH64_INSN_SF_BIT
;
749 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
750 return AARCH64_BREAK_FAULT
;
753 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
,
756 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2
, insn
,
759 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
762 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7
, insn
,
766 u32
aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg
,
767 enum aarch64_insn_register base
,
768 enum aarch64_insn_register state
,
769 enum aarch64_insn_size_type size
,
770 enum aarch64_insn_ldst_type type
)
775 case AARCH64_INSN_LDST_LOAD_EX
:
776 insn
= aarch64_insn_get_load_ex_value();
778 case AARCH64_INSN_LDST_STORE_EX
:
779 insn
= aarch64_insn_get_store_ex_value();
782 pr_err("%s: unknown load/store exclusive encoding %d\n", __func__
, type
);
783 return AARCH64_BREAK_FAULT
;
786 insn
= aarch64_insn_encode_ldst_size(size
, insn
);
788 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT
, insn
,
791 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
794 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2
, insn
,
795 AARCH64_INSN_REG_ZR
);
797 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS
, insn
,
801 static u32
aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type
,
802 enum aarch64_insn_prfm_target target
,
803 enum aarch64_insn_prfm_policy policy
,
806 u32 imm_type
= 0, imm_target
= 0, imm_policy
= 0;
809 case AARCH64_INSN_PRFM_TYPE_PLD
:
811 case AARCH64_INSN_PRFM_TYPE_PLI
:
814 case AARCH64_INSN_PRFM_TYPE_PST
:
818 pr_err("%s: unknown prfm type encoding %d\n", __func__
, type
);
819 return AARCH64_BREAK_FAULT
;
823 case AARCH64_INSN_PRFM_TARGET_L1
:
825 case AARCH64_INSN_PRFM_TARGET_L2
:
828 case AARCH64_INSN_PRFM_TARGET_L3
:
832 pr_err("%s: unknown prfm target encoding %d\n", __func__
, target
);
833 return AARCH64_BREAK_FAULT
;
837 case AARCH64_INSN_PRFM_POLICY_KEEP
:
839 case AARCH64_INSN_PRFM_POLICY_STRM
:
843 pr_err("%s: unknown prfm policy encoding %d\n", __func__
, policy
);
844 return AARCH64_BREAK_FAULT
;
847 /* In this case, imm5 is encoded into Rt field. */
848 insn
&= ~GENMASK(4, 0);
849 insn
|= imm_policy
| (imm_target
<< 1) | (imm_type
<< 3);
854 u32
aarch64_insn_gen_prefetch(enum aarch64_insn_register base
,
855 enum aarch64_insn_prfm_type type
,
856 enum aarch64_insn_prfm_target target
,
857 enum aarch64_insn_prfm_policy policy
)
859 u32 insn
= aarch64_insn_get_prfm_value();
861 insn
= aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64
, insn
);
863 insn
= aarch64_insn_encode_prfm_imm(type
, target
, policy
, insn
);
865 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
868 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12
, insn
, 0);
871 u32
aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst
,
872 enum aarch64_insn_register src
,
873 int imm
, enum aarch64_insn_variant variant
,
874 enum aarch64_insn_adsb_type type
)
879 case AARCH64_INSN_ADSB_ADD
:
880 insn
= aarch64_insn_get_add_imm_value();
882 case AARCH64_INSN_ADSB_SUB
:
883 insn
= aarch64_insn_get_sub_imm_value();
885 case AARCH64_INSN_ADSB_ADD_SETFLAGS
:
886 insn
= aarch64_insn_get_adds_imm_value();
888 case AARCH64_INSN_ADSB_SUB_SETFLAGS
:
889 insn
= aarch64_insn_get_subs_imm_value();
892 pr_err("%s: unknown add/sub encoding %d\n", __func__
, type
);
893 return AARCH64_BREAK_FAULT
;
897 case AARCH64_INSN_VARIANT_32BIT
:
899 case AARCH64_INSN_VARIANT_64BIT
:
900 insn
|= AARCH64_INSN_SF_BIT
;
903 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
904 return AARCH64_BREAK_FAULT
;
907 /* We can't encode more than a 24bit value (12bit + 12bit shift) */
908 if (imm
& ~(BIT(24) - 1))
911 /* If we have something in the top 12 bits... */
912 if (imm
& ~(SZ_4K
- 1)) {
913 /* ... and in the low 12 bits -> error */
914 if (imm
& (SZ_4K
- 1))
918 insn
|= AARCH64_INSN_LSL_12
;
921 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
923 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
925 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12
, insn
, imm
);
928 pr_err("%s: invalid immediate encoding %d\n", __func__
, imm
);
929 return AARCH64_BREAK_FAULT
;
932 u32
aarch64_insn_gen_bitfield(enum aarch64_insn_register dst
,
933 enum aarch64_insn_register src
,
935 enum aarch64_insn_variant variant
,
936 enum aarch64_insn_bitfield_type type
)
942 case AARCH64_INSN_BITFIELD_MOVE
:
943 insn
= aarch64_insn_get_bfm_value();
945 case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED
:
946 insn
= aarch64_insn_get_ubfm_value();
948 case AARCH64_INSN_BITFIELD_MOVE_SIGNED
:
949 insn
= aarch64_insn_get_sbfm_value();
952 pr_err("%s: unknown bitfield encoding %d\n", __func__
, type
);
953 return AARCH64_BREAK_FAULT
;
957 case AARCH64_INSN_VARIANT_32BIT
:
958 mask
= GENMASK(4, 0);
960 case AARCH64_INSN_VARIANT_64BIT
:
961 insn
|= AARCH64_INSN_SF_BIT
| AARCH64_INSN_N_BIT
;
962 mask
= GENMASK(5, 0);
965 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
966 return AARCH64_BREAK_FAULT
;
970 pr_err("%s: invalid immr encoding %d\n", __func__
, immr
);
971 return AARCH64_BREAK_FAULT
;
974 pr_err("%s: invalid imms encoding %d\n", __func__
, imms
);
975 return AARCH64_BREAK_FAULT
;
978 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
980 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
982 insn
= aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R
, insn
, immr
);
984 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S
, insn
, imms
);
987 u32
aarch64_insn_gen_movewide(enum aarch64_insn_register dst
,
989 enum aarch64_insn_variant variant
,
990 enum aarch64_insn_movewide_type type
)
995 case AARCH64_INSN_MOVEWIDE_ZERO
:
996 insn
= aarch64_insn_get_movz_value();
998 case AARCH64_INSN_MOVEWIDE_KEEP
:
999 insn
= aarch64_insn_get_movk_value();
1001 case AARCH64_INSN_MOVEWIDE_INVERSE
:
1002 insn
= aarch64_insn_get_movn_value();
1005 pr_err("%s: unknown movewide encoding %d\n", __func__
, type
);
1006 return AARCH64_BREAK_FAULT
;
1009 if (imm
& ~(SZ_64K
- 1)) {
1010 pr_err("%s: invalid immediate encoding %d\n", __func__
, imm
);
1011 return AARCH64_BREAK_FAULT
;
1015 case AARCH64_INSN_VARIANT_32BIT
:
1016 if (shift
!= 0 && shift
!= 16) {
1017 pr_err("%s: invalid shift encoding %d\n", __func__
,
1019 return AARCH64_BREAK_FAULT
;
1022 case AARCH64_INSN_VARIANT_64BIT
:
1023 insn
|= AARCH64_INSN_SF_BIT
;
1024 if (shift
!= 0 && shift
!= 16 && shift
!= 32 && shift
!= 48) {
1025 pr_err("%s: invalid shift encoding %d\n", __func__
,
1027 return AARCH64_BREAK_FAULT
;
1031 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1032 return AARCH64_BREAK_FAULT
;
1035 insn
|= (shift
>> 4) << 21;
1037 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1039 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16
, insn
, imm
);
1042 u32
aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst
,
1043 enum aarch64_insn_register src
,
1044 enum aarch64_insn_register reg
,
1046 enum aarch64_insn_variant variant
,
1047 enum aarch64_insn_adsb_type type
)
1052 case AARCH64_INSN_ADSB_ADD
:
1053 insn
= aarch64_insn_get_add_value();
1055 case AARCH64_INSN_ADSB_SUB
:
1056 insn
= aarch64_insn_get_sub_value();
1058 case AARCH64_INSN_ADSB_ADD_SETFLAGS
:
1059 insn
= aarch64_insn_get_adds_value();
1061 case AARCH64_INSN_ADSB_SUB_SETFLAGS
:
1062 insn
= aarch64_insn_get_subs_value();
1065 pr_err("%s: unknown add/sub encoding %d\n", __func__
, type
);
1066 return AARCH64_BREAK_FAULT
;
1070 case AARCH64_INSN_VARIANT_32BIT
:
1071 if (shift
& ~(SZ_32
- 1)) {
1072 pr_err("%s: invalid shift encoding %d\n", __func__
,
1074 return AARCH64_BREAK_FAULT
;
1077 case AARCH64_INSN_VARIANT_64BIT
:
1078 insn
|= AARCH64_INSN_SF_BIT
;
1079 if (shift
& ~(SZ_64
- 1)) {
1080 pr_err("%s: invalid shift encoding %d\n", __func__
,
1082 return AARCH64_BREAK_FAULT
;
1086 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1087 return AARCH64_BREAK_FAULT
;
1091 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1093 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
1095 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, reg
);
1097 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6
, insn
, shift
);
1100 u32
aarch64_insn_gen_data1(enum aarch64_insn_register dst
,
1101 enum aarch64_insn_register src
,
1102 enum aarch64_insn_variant variant
,
1103 enum aarch64_insn_data1_type type
)
1108 case AARCH64_INSN_DATA1_REVERSE_16
:
1109 insn
= aarch64_insn_get_rev16_value();
1111 case AARCH64_INSN_DATA1_REVERSE_32
:
1112 insn
= aarch64_insn_get_rev32_value();
1114 case AARCH64_INSN_DATA1_REVERSE_64
:
1115 if (variant
!= AARCH64_INSN_VARIANT_64BIT
) {
1116 pr_err("%s: invalid variant for reverse64 %d\n",
1118 return AARCH64_BREAK_FAULT
;
1120 insn
= aarch64_insn_get_rev64_value();
1123 pr_err("%s: unknown data1 encoding %d\n", __func__
, type
);
1124 return AARCH64_BREAK_FAULT
;
1128 case AARCH64_INSN_VARIANT_32BIT
:
1130 case AARCH64_INSN_VARIANT_64BIT
:
1131 insn
|= AARCH64_INSN_SF_BIT
;
1134 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1135 return AARCH64_BREAK_FAULT
;
1138 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1140 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
1143 u32
aarch64_insn_gen_data2(enum aarch64_insn_register dst
,
1144 enum aarch64_insn_register src
,
1145 enum aarch64_insn_register reg
,
1146 enum aarch64_insn_variant variant
,
1147 enum aarch64_insn_data2_type type
)
1152 case AARCH64_INSN_DATA2_UDIV
:
1153 insn
= aarch64_insn_get_udiv_value();
1155 case AARCH64_INSN_DATA2_SDIV
:
1156 insn
= aarch64_insn_get_sdiv_value();
1158 case AARCH64_INSN_DATA2_LSLV
:
1159 insn
= aarch64_insn_get_lslv_value();
1161 case AARCH64_INSN_DATA2_LSRV
:
1162 insn
= aarch64_insn_get_lsrv_value();
1164 case AARCH64_INSN_DATA2_ASRV
:
1165 insn
= aarch64_insn_get_asrv_value();
1167 case AARCH64_INSN_DATA2_RORV
:
1168 insn
= aarch64_insn_get_rorv_value();
1171 pr_err("%s: unknown data2 encoding %d\n", __func__
, type
);
1172 return AARCH64_BREAK_FAULT
;
1176 case AARCH64_INSN_VARIANT_32BIT
:
1178 case AARCH64_INSN_VARIANT_64BIT
:
1179 insn
|= AARCH64_INSN_SF_BIT
;
1182 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1183 return AARCH64_BREAK_FAULT
;
1186 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1188 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
1190 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, reg
);
1193 u32
aarch64_insn_gen_data3(enum aarch64_insn_register dst
,
1194 enum aarch64_insn_register src
,
1195 enum aarch64_insn_register reg1
,
1196 enum aarch64_insn_register reg2
,
1197 enum aarch64_insn_variant variant
,
1198 enum aarch64_insn_data3_type type
)
1203 case AARCH64_INSN_DATA3_MADD
:
1204 insn
= aarch64_insn_get_madd_value();
1206 case AARCH64_INSN_DATA3_MSUB
:
1207 insn
= aarch64_insn_get_msub_value();
1210 pr_err("%s: unknown data3 encoding %d\n", __func__
, type
);
1211 return AARCH64_BREAK_FAULT
;
1215 case AARCH64_INSN_VARIANT_32BIT
:
1217 case AARCH64_INSN_VARIANT_64BIT
:
1218 insn
|= AARCH64_INSN_SF_BIT
;
1221 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1222 return AARCH64_BREAK_FAULT
;
1225 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1227 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA
, insn
, src
);
1229 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
,
1232 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
,
1236 u32
aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst
,
1237 enum aarch64_insn_register src
,
1238 enum aarch64_insn_register reg
,
1240 enum aarch64_insn_variant variant
,
1241 enum aarch64_insn_logic_type type
)
1246 case AARCH64_INSN_LOGIC_AND
:
1247 insn
= aarch64_insn_get_and_value();
1249 case AARCH64_INSN_LOGIC_BIC
:
1250 insn
= aarch64_insn_get_bic_value();
1252 case AARCH64_INSN_LOGIC_ORR
:
1253 insn
= aarch64_insn_get_orr_value();
1255 case AARCH64_INSN_LOGIC_ORN
:
1256 insn
= aarch64_insn_get_orn_value();
1258 case AARCH64_INSN_LOGIC_EOR
:
1259 insn
= aarch64_insn_get_eor_value();
1261 case AARCH64_INSN_LOGIC_EON
:
1262 insn
= aarch64_insn_get_eon_value();
1264 case AARCH64_INSN_LOGIC_AND_SETFLAGS
:
1265 insn
= aarch64_insn_get_ands_value();
1267 case AARCH64_INSN_LOGIC_BIC_SETFLAGS
:
1268 insn
= aarch64_insn_get_bics_value();
1271 pr_err("%s: unknown logical encoding %d\n", __func__
, type
);
1272 return AARCH64_BREAK_FAULT
;
1276 case AARCH64_INSN_VARIANT_32BIT
:
1277 if (shift
& ~(SZ_32
- 1)) {
1278 pr_err("%s: invalid shift encoding %d\n", __func__
,
1280 return AARCH64_BREAK_FAULT
;
1283 case AARCH64_INSN_VARIANT_64BIT
:
1284 insn
|= AARCH64_INSN_SF_BIT
;
1285 if (shift
& ~(SZ_64
- 1)) {
1286 pr_err("%s: invalid shift encoding %d\n", __func__
,
1288 return AARCH64_BREAK_FAULT
;
1292 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1293 return AARCH64_BREAK_FAULT
;
1297 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, dst
);
1299 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, src
);
1301 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, reg
);
1303 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6
, insn
, shift
);
1307 * Decode the imm field of a branch, and return the byte offset as a
1308 * signed value (so it can be used when computing a new branch
1311 s32
aarch64_get_branch_offset(u32 insn
)
1315 if (aarch64_insn_is_b(insn
) || aarch64_insn_is_bl(insn
)) {
1316 imm
= aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26
, insn
);
1317 return (imm
<< 6) >> 4;
1320 if (aarch64_insn_is_cbz(insn
) || aarch64_insn_is_cbnz(insn
) ||
1321 aarch64_insn_is_bcond(insn
)) {
1322 imm
= aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19
, insn
);
1323 return (imm
<< 13) >> 11;
1326 if (aarch64_insn_is_tbz(insn
) || aarch64_insn_is_tbnz(insn
)) {
1327 imm
= aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14
, insn
);
1328 return (imm
<< 18) >> 16;
1331 /* Unhandled instruction */
1336 * Encode the displacement of a branch in the imm field and return the
1337 * updated instruction.
1339 u32
aarch64_set_branch_offset(u32 insn
, s32 offset
)
1341 if (aarch64_insn_is_b(insn
) || aarch64_insn_is_bl(insn
))
1342 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26
, insn
,
1345 if (aarch64_insn_is_cbz(insn
) || aarch64_insn_is_cbnz(insn
) ||
1346 aarch64_insn_is_bcond(insn
))
1347 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19
, insn
,
1350 if (aarch64_insn_is_tbz(insn
) || aarch64_insn_is_tbnz(insn
))
1351 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14
, insn
,
1354 /* Unhandled instruction */
1358 s32
aarch64_insn_adrp_get_offset(u32 insn
)
1360 BUG_ON(!aarch64_insn_is_adrp(insn
));
1361 return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR
, insn
) << 12;
1364 u32
aarch64_insn_adrp_set_offset(u32 insn
, s32 offset
)
1366 BUG_ON(!aarch64_insn_is_adrp(insn
));
1367 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR
, insn
,
1372 * Extract the Op/CR data from a msr/mrs instruction.
1374 u32
aarch64_insn_extract_system_reg(u32 insn
)
1376 return (insn
& 0x1FFFE0) >> 5;
1379 bool aarch32_insn_is_wide(u32 insn
)
1381 return insn
>= 0xe800;
1385 * Macros/defines for extracting register numbers from instruction.
1387 u32
aarch32_insn_extract_reg_num(u32 insn
, int offset
)
1389 return (insn
& (0xf << offset
)) >> offset
;
1392 #define OPC2_MASK 0x7
1393 #define OPC2_OFFSET 5
1394 u32
aarch32_insn_mcr_extract_opc2(u32 insn
)
1396 return (insn
& (OPC2_MASK
<< OPC2_OFFSET
)) >> OPC2_OFFSET
;
1399 #define CRM_MASK 0xf
1400 u32
aarch32_insn_mcr_extract_crm(u32 insn
)
1402 return insn
& CRM_MASK
;
1405 static bool __kprobes
__check_eq(unsigned long pstate
)
1407 return (pstate
& PSR_Z_BIT
) != 0;
1410 static bool __kprobes
__check_ne(unsigned long pstate
)
1412 return (pstate
& PSR_Z_BIT
) == 0;
1415 static bool __kprobes
__check_cs(unsigned long pstate
)
1417 return (pstate
& PSR_C_BIT
) != 0;
1420 static bool __kprobes
__check_cc(unsigned long pstate
)
1422 return (pstate
& PSR_C_BIT
) == 0;
1425 static bool __kprobes
__check_mi(unsigned long pstate
)
1427 return (pstate
& PSR_N_BIT
) != 0;
1430 static bool __kprobes
__check_pl(unsigned long pstate
)
1432 return (pstate
& PSR_N_BIT
) == 0;
1435 static bool __kprobes
__check_vs(unsigned long pstate
)
1437 return (pstate
& PSR_V_BIT
) != 0;
1440 static bool __kprobes
__check_vc(unsigned long pstate
)
1442 return (pstate
& PSR_V_BIT
) == 0;
1445 static bool __kprobes
__check_hi(unsigned long pstate
)
1447 pstate
&= ~(pstate
>> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1448 return (pstate
& PSR_C_BIT
) != 0;
1451 static bool __kprobes
__check_ls(unsigned long pstate
)
1453 pstate
&= ~(pstate
>> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1454 return (pstate
& PSR_C_BIT
) == 0;
1457 static bool __kprobes
__check_ge(unsigned long pstate
)
1459 pstate
^= (pstate
<< 3); /* PSR_N_BIT ^= PSR_V_BIT */
1460 return (pstate
& PSR_N_BIT
) == 0;
1463 static bool __kprobes
__check_lt(unsigned long pstate
)
1465 pstate
^= (pstate
<< 3); /* PSR_N_BIT ^= PSR_V_BIT */
1466 return (pstate
& PSR_N_BIT
) != 0;
1469 static bool __kprobes
__check_gt(unsigned long pstate
)
1471 /*PSR_N_BIT ^= PSR_V_BIT */
1472 unsigned long temp
= pstate
^ (pstate
<< 3);
1474 temp
|= (pstate
<< 1); /*PSR_N_BIT |= PSR_Z_BIT */
1475 return (temp
& PSR_N_BIT
) == 0;
1478 static bool __kprobes
__check_le(unsigned long pstate
)
1480 /*PSR_N_BIT ^= PSR_V_BIT */
1481 unsigned long temp
= pstate
^ (pstate
<< 3);
1483 temp
|= (pstate
<< 1); /*PSR_N_BIT |= PSR_Z_BIT */
1484 return (temp
& PSR_N_BIT
) != 0;
1487 static bool __kprobes
__check_al(unsigned long pstate
)
1493 * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1494 * it behaves identically to 0b1110 ("al").
1496 pstate_check_t
* const aarch32_opcode_cond_checks
[16] = {
1497 __check_eq
, __check_ne
, __check_cs
, __check_cc
,
1498 __check_mi
, __check_pl
, __check_vs
, __check_vc
,
1499 __check_hi
, __check_ls
, __check_ge
, __check_lt
,
1500 __check_gt
, __check_le
, __check_al
, __check_al
1503 static bool range_of_ones(u64 val
)
1505 /* Doesn't handle full ones or full zeroes */
1506 u64 sval
= val
>> __ffs64(val
);
1508 /* One of Sean Eron Anderson's bithack tricks */
1509 return ((sval
+ 1) & (sval
)) == 0;
1512 static u32
aarch64_encode_immediate(u64 imm
,
1513 enum aarch64_insn_variant variant
,
1516 unsigned int immr
, imms
, n
, ones
, ror
, esz
, tmp
;
1519 /* Can't encode full zeroes or full ones */
1521 return AARCH64_BREAK_FAULT
;
1524 case AARCH64_INSN_VARIANT_32BIT
:
1525 if (upper_32_bits(imm
))
1526 return AARCH64_BREAK_FAULT
;
1529 case AARCH64_INSN_VARIANT_64BIT
:
1530 insn
|= AARCH64_INSN_SF_BIT
;
1534 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1535 return AARCH64_BREAK_FAULT
;
1539 * Inverse of Replicate(). Try to spot a repeating pattern
1540 * with a pow2 stride.
1542 for (tmp
= esz
/ 2; tmp
>= 2; tmp
/= 2) {
1543 u64 emask
= BIT(tmp
) - 1;
1545 if ((imm
& emask
) != ((imm
>> tmp
) & emask
))
1552 /* N is only set if we're encoding a 64bit value */
1555 /* Trim imm to the element size */
1558 /* That's how many ones we need to encode */
1559 ones
= hweight64(imm
);
1562 * imms is set to (ones - 1), prefixed with a string of ones
1563 * and a zero if they fit. Cap it to 6 bits.
1566 imms
|= 0xf << ffs(esz
);
1569 /* Compute the rotation */
1570 if (range_of_ones(imm
)) {
1572 * Pattern: 0..01..10..0
1574 * Compute how many rotate we need to align it right
1579 * Pattern: 0..01..10..01..1
1581 * Fill the unused top bits with ones, and check if
1582 * the result is a valid immediate (all ones with a
1583 * contiguous ranges of zeroes).
1586 if (!range_of_ones(~imm
))
1587 return AARCH64_BREAK_FAULT
;
1590 * Compute the rotation to get a continuous set of
1591 * ones, with the first bit set at position 0
1597 * immr is the number of bits we need to rotate back to the
1598 * original set of ones. Note that this is relative to the
1601 immr
= (esz
- ror
) % esz
;
1603 insn
= aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N
, insn
, n
);
1604 insn
= aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R
, insn
, immr
);
1605 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S
, insn
, imms
);
1608 u32
aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type
,
1609 enum aarch64_insn_variant variant
,
1610 enum aarch64_insn_register Rn
,
1611 enum aarch64_insn_register Rd
,
1617 case AARCH64_INSN_LOGIC_AND
:
1618 insn
= aarch64_insn_get_and_imm_value();
1620 case AARCH64_INSN_LOGIC_ORR
:
1621 insn
= aarch64_insn_get_orr_imm_value();
1623 case AARCH64_INSN_LOGIC_EOR
:
1624 insn
= aarch64_insn_get_eor_imm_value();
1626 case AARCH64_INSN_LOGIC_AND_SETFLAGS
:
1627 insn
= aarch64_insn_get_ands_imm_value();
1630 pr_err("%s: unknown logical encoding %d\n", __func__
, type
);
1631 return AARCH64_BREAK_FAULT
;
1634 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, Rd
);
1635 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, Rn
);
1636 return aarch64_encode_immediate(imm
, variant
, insn
);
1639 u32
aarch64_insn_gen_extr(enum aarch64_insn_variant variant
,
1640 enum aarch64_insn_register Rm
,
1641 enum aarch64_insn_register Rn
,
1642 enum aarch64_insn_register Rd
,
1647 insn
= aarch64_insn_get_extr_value();
1650 case AARCH64_INSN_VARIANT_32BIT
:
1652 return AARCH64_BREAK_FAULT
;
1654 case AARCH64_INSN_VARIANT_64BIT
:
1656 return AARCH64_BREAK_FAULT
;
1657 insn
|= AARCH64_INSN_SF_BIT
;
1658 insn
= aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N
, insn
, 1);
1661 pr_err("%s: unknown variant encoding %d\n", __func__
, variant
);
1662 return AARCH64_BREAK_FAULT
;
1665 insn
= aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S
, insn
, lsb
);
1666 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD
, insn
, Rd
);
1667 insn
= aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN
, insn
, Rn
);
1668 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM
, insn
, Rm
);