Linux 4.2.2
[linux/fpc-iii.git] / arch / arm64 / kernel / insn.c
blobdd9671cd0bb255b2b5fa32a5e1e5a61b6decaac5
1 /*
2 * Copyright (C) 2013 Huawei Ltd.
3 * Author: Jiang Liu <liuj97@gmail.com>
5 * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
33 #include <asm/insn.h>
35 #define AARCH64_INSN_SF_BIT BIT(31)
36 #define AARCH64_INSN_N_BIT BIT(22)
38 static int aarch64_insn_encoding_class[] = {
39 AARCH64_INSN_CLS_UNKNOWN,
40 AARCH64_INSN_CLS_UNKNOWN,
41 AARCH64_INSN_CLS_UNKNOWN,
42 AARCH64_INSN_CLS_UNKNOWN,
43 AARCH64_INSN_CLS_LDST,
44 AARCH64_INSN_CLS_DP_REG,
45 AARCH64_INSN_CLS_LDST,
46 AARCH64_INSN_CLS_DP_FPSIMD,
47 AARCH64_INSN_CLS_DP_IMM,
48 AARCH64_INSN_CLS_DP_IMM,
49 AARCH64_INSN_CLS_BR_SYS,
50 AARCH64_INSN_CLS_BR_SYS,
51 AARCH64_INSN_CLS_LDST,
52 AARCH64_INSN_CLS_DP_REG,
53 AARCH64_INSN_CLS_LDST,
54 AARCH64_INSN_CLS_DP_FPSIMD,
57 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
59 return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
62 /* NOP is an alias of HINT */
63 bool __kprobes aarch64_insn_is_nop(u32 insn)
65 if (!aarch64_insn_is_hint(insn))
66 return false;
68 switch (insn & 0xFE0) {
69 case AARCH64_INSN_HINT_YIELD:
70 case AARCH64_INSN_HINT_WFE:
71 case AARCH64_INSN_HINT_WFI:
72 case AARCH64_INSN_HINT_SEV:
73 case AARCH64_INSN_HINT_SEVL:
74 return false;
75 default:
76 return true;
80 bool aarch64_insn_is_branch_imm(u32 insn)
82 return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
83 aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
84 aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
85 aarch64_insn_is_bcond(insn));
88 static DEFINE_SPINLOCK(patch_lock);
90 static void __kprobes *patch_map(void *addr, int fixmap)
92 unsigned long uintaddr = (uintptr_t) addr;
93 bool module = !core_kernel_text(uintaddr);
94 struct page *page;
96 if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
97 page = vmalloc_to_page(addr);
98 else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
99 page = virt_to_page(addr);
100 else
101 return addr;
103 BUG_ON(!page);
104 set_fixmap(fixmap, page_to_phys(page));
106 return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
109 static void __kprobes patch_unmap(int fixmap)
111 clear_fixmap(fixmap);
114 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
115 * little-endian.
117 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
119 int ret;
120 u32 val;
122 ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
123 if (!ret)
124 *insnp = le32_to_cpu(val);
126 return ret;
129 static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
131 void *waddr = addr;
132 unsigned long flags = 0;
133 int ret;
135 spin_lock_irqsave(&patch_lock, flags);
136 waddr = patch_map(addr, FIX_TEXT_POKE0);
138 ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
140 patch_unmap(FIX_TEXT_POKE0);
141 spin_unlock_irqrestore(&patch_lock, flags);
143 return ret;
146 int __kprobes aarch64_insn_write(void *addr, u32 insn)
148 insn = cpu_to_le32(insn);
149 return __aarch64_insn_write(addr, insn);
152 static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
154 if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
155 return false;
157 return aarch64_insn_is_b(insn) ||
158 aarch64_insn_is_bl(insn) ||
159 aarch64_insn_is_svc(insn) ||
160 aarch64_insn_is_hvc(insn) ||
161 aarch64_insn_is_smc(insn) ||
162 aarch64_insn_is_brk(insn) ||
163 aarch64_insn_is_nop(insn);
167 * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
168 * Section B2.6.5 "Concurrent modification and execution of instructions":
169 * Concurrent modification and execution of instructions can lead to the
170 * resulting instruction performing any behavior that can be achieved by
171 * executing any sequence of instructions that can be executed from the
172 * same Exception level, except where the instruction before modification
173 * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
174 * or SMC instruction.
176 bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
178 return __aarch64_insn_hotpatch_safe(old_insn) &&
179 __aarch64_insn_hotpatch_safe(new_insn);
182 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
184 u32 *tp = addr;
185 int ret;
187 /* A64 instructions must be word aligned */
188 if ((uintptr_t)tp & 0x3)
189 return -EINVAL;
191 ret = aarch64_insn_write(tp, insn);
192 if (ret == 0)
193 flush_icache_range((uintptr_t)tp,
194 (uintptr_t)tp + AARCH64_INSN_SIZE);
196 return ret;
199 struct aarch64_insn_patch {
200 void **text_addrs;
201 u32 *new_insns;
202 int insn_cnt;
203 atomic_t cpu_count;
206 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
208 int i, ret = 0;
209 struct aarch64_insn_patch *pp = arg;
211 /* The first CPU becomes master */
212 if (atomic_inc_return(&pp->cpu_count) == 1) {
213 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
214 ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
215 pp->new_insns[i]);
217 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
218 * which ends with "dsb; isb" pair guaranteeing global
219 * visibility.
221 /* Notify other processors with an additional increment. */
222 atomic_inc(&pp->cpu_count);
223 } else {
224 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
225 cpu_relax();
226 isb();
229 return ret;
232 int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
234 struct aarch64_insn_patch patch = {
235 .text_addrs = addrs,
236 .new_insns = insns,
237 .insn_cnt = cnt,
238 .cpu_count = ATOMIC_INIT(0),
241 if (cnt <= 0)
242 return -EINVAL;
244 return stop_machine(aarch64_insn_patch_text_cb, &patch,
245 cpu_online_mask);
248 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
250 int ret;
251 u32 insn;
253 /* Unsafe to patch multiple instructions without synchronizaiton */
254 if (cnt == 1) {
255 ret = aarch64_insn_read(addrs[0], &insn);
256 if (ret)
257 return ret;
259 if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
261 * ARMv8 architecture doesn't guarantee all CPUs see
262 * the new instruction after returning from function
263 * aarch64_insn_patch_text_nosync(). So send IPIs to
264 * all other CPUs to achieve instruction
265 * synchronization.
267 ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
268 kick_all_cpus_sync();
269 return ret;
273 return aarch64_insn_patch_text_sync(addrs, insns, cnt);
276 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
277 u32 *maskp, int *shiftp)
279 u32 mask;
280 int shift;
282 switch (type) {
283 case AARCH64_INSN_IMM_26:
284 mask = BIT(26) - 1;
285 shift = 0;
286 break;
287 case AARCH64_INSN_IMM_19:
288 mask = BIT(19) - 1;
289 shift = 5;
290 break;
291 case AARCH64_INSN_IMM_16:
292 mask = BIT(16) - 1;
293 shift = 5;
294 break;
295 case AARCH64_INSN_IMM_14:
296 mask = BIT(14) - 1;
297 shift = 5;
298 break;
299 case AARCH64_INSN_IMM_12:
300 mask = BIT(12) - 1;
301 shift = 10;
302 break;
303 case AARCH64_INSN_IMM_9:
304 mask = BIT(9) - 1;
305 shift = 12;
306 break;
307 case AARCH64_INSN_IMM_7:
308 mask = BIT(7) - 1;
309 shift = 15;
310 break;
311 case AARCH64_INSN_IMM_6:
312 case AARCH64_INSN_IMM_S:
313 mask = BIT(6) - 1;
314 shift = 10;
315 break;
316 case AARCH64_INSN_IMM_R:
317 mask = BIT(6) - 1;
318 shift = 16;
319 break;
320 default:
321 return -EINVAL;
324 *maskp = mask;
325 *shiftp = shift;
327 return 0;
330 #define ADR_IMM_HILOSPLIT 2
331 #define ADR_IMM_SIZE SZ_2M
332 #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
333 #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
334 #define ADR_IMM_LOSHIFT 29
335 #define ADR_IMM_HISHIFT 5
337 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
339 u32 immlo, immhi, mask;
340 int shift;
342 switch (type) {
343 case AARCH64_INSN_IMM_ADR:
344 shift = 0;
345 immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
346 immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
347 insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
348 mask = ADR_IMM_SIZE - 1;
349 break;
350 default:
351 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
352 pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
353 type);
354 return 0;
358 return (insn >> shift) & mask;
361 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
362 u32 insn, u64 imm)
364 u32 immlo, immhi, mask;
365 int shift;
367 switch (type) {
368 case AARCH64_INSN_IMM_ADR:
369 shift = 0;
370 immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
371 imm >>= ADR_IMM_HILOSPLIT;
372 immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
373 imm = immlo | immhi;
374 mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
375 (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
376 break;
377 default:
378 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
379 pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
380 type);
381 return 0;
385 /* Update the immediate field. */
386 insn &= ~(mask << shift);
387 insn |= (imm & mask) << shift;
389 return insn;
392 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
393 u32 insn,
394 enum aarch64_insn_register reg)
396 int shift;
398 if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
399 pr_err("%s: unknown register encoding %d\n", __func__, reg);
400 return 0;
403 switch (type) {
404 case AARCH64_INSN_REGTYPE_RT:
405 case AARCH64_INSN_REGTYPE_RD:
406 shift = 0;
407 break;
408 case AARCH64_INSN_REGTYPE_RN:
409 shift = 5;
410 break;
411 case AARCH64_INSN_REGTYPE_RT2:
412 case AARCH64_INSN_REGTYPE_RA:
413 shift = 10;
414 break;
415 case AARCH64_INSN_REGTYPE_RM:
416 shift = 16;
417 break;
418 default:
419 pr_err("%s: unknown register type encoding %d\n", __func__,
420 type);
421 return 0;
424 insn &= ~(GENMASK(4, 0) << shift);
425 insn |= reg << shift;
427 return insn;
430 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
431 u32 insn)
433 u32 size;
435 switch (type) {
436 case AARCH64_INSN_SIZE_8:
437 size = 0;
438 break;
439 case AARCH64_INSN_SIZE_16:
440 size = 1;
441 break;
442 case AARCH64_INSN_SIZE_32:
443 size = 2;
444 break;
445 case AARCH64_INSN_SIZE_64:
446 size = 3;
447 break;
448 default:
449 pr_err("%s: unknown size encoding %d\n", __func__, type);
450 return 0;
453 insn &= ~GENMASK(31, 30);
454 insn |= size << 30;
456 return insn;
459 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
460 long range)
462 long offset;
465 * PC: A 64-bit Program Counter holding the address of the current
466 * instruction. A64 instructions must be word-aligned.
468 BUG_ON((pc & 0x3) || (addr & 0x3));
470 offset = ((long)addr - (long)pc);
471 BUG_ON(offset < -range || offset >= range);
473 return offset;
476 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
477 enum aarch64_insn_branch_type type)
479 u32 insn;
480 long offset;
483 * B/BL support [-128M, 128M) offset
484 * ARM64 virtual address arrangement guarantees all kernel and module
485 * texts are within +/-128M.
487 offset = branch_imm_common(pc, addr, SZ_128M);
489 switch (type) {
490 case AARCH64_INSN_BRANCH_LINK:
491 insn = aarch64_insn_get_bl_value();
492 break;
493 case AARCH64_INSN_BRANCH_NOLINK:
494 insn = aarch64_insn_get_b_value();
495 break;
496 default:
497 BUG_ON(1);
498 return AARCH64_BREAK_FAULT;
501 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
502 offset >> 2);
505 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
506 enum aarch64_insn_register reg,
507 enum aarch64_insn_variant variant,
508 enum aarch64_insn_branch_type type)
510 u32 insn;
511 long offset;
513 offset = branch_imm_common(pc, addr, SZ_1M);
515 switch (type) {
516 case AARCH64_INSN_BRANCH_COMP_ZERO:
517 insn = aarch64_insn_get_cbz_value();
518 break;
519 case AARCH64_INSN_BRANCH_COMP_NONZERO:
520 insn = aarch64_insn_get_cbnz_value();
521 break;
522 default:
523 BUG_ON(1);
524 return AARCH64_BREAK_FAULT;
527 switch (variant) {
528 case AARCH64_INSN_VARIANT_32BIT:
529 break;
530 case AARCH64_INSN_VARIANT_64BIT:
531 insn |= AARCH64_INSN_SF_BIT;
532 break;
533 default:
534 BUG_ON(1);
535 return AARCH64_BREAK_FAULT;
538 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
540 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
541 offset >> 2);
544 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
545 enum aarch64_insn_condition cond)
547 u32 insn;
548 long offset;
550 offset = branch_imm_common(pc, addr, SZ_1M);
552 insn = aarch64_insn_get_bcond_value();
554 BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
555 insn |= cond;
557 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
558 offset >> 2);
561 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
563 return aarch64_insn_get_hint_value() | op;
566 u32 __kprobes aarch64_insn_gen_nop(void)
568 return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
571 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
572 enum aarch64_insn_branch_type type)
574 u32 insn;
576 switch (type) {
577 case AARCH64_INSN_BRANCH_NOLINK:
578 insn = aarch64_insn_get_br_value();
579 break;
580 case AARCH64_INSN_BRANCH_LINK:
581 insn = aarch64_insn_get_blr_value();
582 break;
583 case AARCH64_INSN_BRANCH_RETURN:
584 insn = aarch64_insn_get_ret_value();
585 break;
586 default:
587 BUG_ON(1);
588 return AARCH64_BREAK_FAULT;
591 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
594 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
595 enum aarch64_insn_register base,
596 enum aarch64_insn_register offset,
597 enum aarch64_insn_size_type size,
598 enum aarch64_insn_ldst_type type)
600 u32 insn;
602 switch (type) {
603 case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
604 insn = aarch64_insn_get_ldr_reg_value();
605 break;
606 case AARCH64_INSN_LDST_STORE_REG_OFFSET:
607 insn = aarch64_insn_get_str_reg_value();
608 break;
609 default:
610 BUG_ON(1);
611 return AARCH64_BREAK_FAULT;
614 insn = aarch64_insn_encode_ldst_size(size, insn);
616 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
618 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
619 base);
621 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
622 offset);
625 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
626 enum aarch64_insn_register reg2,
627 enum aarch64_insn_register base,
628 int offset,
629 enum aarch64_insn_variant variant,
630 enum aarch64_insn_ldst_type type)
632 u32 insn;
633 int shift;
635 switch (type) {
636 case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
637 insn = aarch64_insn_get_ldp_pre_value();
638 break;
639 case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
640 insn = aarch64_insn_get_stp_pre_value();
641 break;
642 case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
643 insn = aarch64_insn_get_ldp_post_value();
644 break;
645 case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
646 insn = aarch64_insn_get_stp_post_value();
647 break;
648 default:
649 BUG_ON(1);
650 return AARCH64_BREAK_FAULT;
653 switch (variant) {
654 case AARCH64_INSN_VARIANT_32BIT:
655 /* offset must be multiples of 4 in the range [-256, 252] */
656 BUG_ON(offset & 0x3);
657 BUG_ON(offset < -256 || offset > 252);
658 shift = 2;
659 break;
660 case AARCH64_INSN_VARIANT_64BIT:
661 /* offset must be multiples of 8 in the range [-512, 504] */
662 BUG_ON(offset & 0x7);
663 BUG_ON(offset < -512 || offset > 504);
664 shift = 3;
665 insn |= AARCH64_INSN_SF_BIT;
666 break;
667 default:
668 BUG_ON(1);
669 return AARCH64_BREAK_FAULT;
672 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
673 reg1);
675 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
676 reg2);
678 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
679 base);
681 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
682 offset >> shift);
685 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
686 enum aarch64_insn_register src,
687 int imm, enum aarch64_insn_variant variant,
688 enum aarch64_insn_adsb_type type)
690 u32 insn;
692 switch (type) {
693 case AARCH64_INSN_ADSB_ADD:
694 insn = aarch64_insn_get_add_imm_value();
695 break;
696 case AARCH64_INSN_ADSB_SUB:
697 insn = aarch64_insn_get_sub_imm_value();
698 break;
699 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
700 insn = aarch64_insn_get_adds_imm_value();
701 break;
702 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
703 insn = aarch64_insn_get_subs_imm_value();
704 break;
705 default:
706 BUG_ON(1);
707 return AARCH64_BREAK_FAULT;
710 switch (variant) {
711 case AARCH64_INSN_VARIANT_32BIT:
712 break;
713 case AARCH64_INSN_VARIANT_64BIT:
714 insn |= AARCH64_INSN_SF_BIT;
715 break;
716 default:
717 BUG_ON(1);
718 return AARCH64_BREAK_FAULT;
721 BUG_ON(imm & ~(SZ_4K - 1));
723 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
725 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
727 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
730 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
731 enum aarch64_insn_register src,
732 int immr, int imms,
733 enum aarch64_insn_variant variant,
734 enum aarch64_insn_bitfield_type type)
736 u32 insn;
737 u32 mask;
739 switch (type) {
740 case AARCH64_INSN_BITFIELD_MOVE:
741 insn = aarch64_insn_get_bfm_value();
742 break;
743 case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
744 insn = aarch64_insn_get_ubfm_value();
745 break;
746 case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
747 insn = aarch64_insn_get_sbfm_value();
748 break;
749 default:
750 BUG_ON(1);
751 return AARCH64_BREAK_FAULT;
754 switch (variant) {
755 case AARCH64_INSN_VARIANT_32BIT:
756 mask = GENMASK(4, 0);
757 break;
758 case AARCH64_INSN_VARIANT_64BIT:
759 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
760 mask = GENMASK(5, 0);
761 break;
762 default:
763 BUG_ON(1);
764 return AARCH64_BREAK_FAULT;
767 BUG_ON(immr & ~mask);
768 BUG_ON(imms & ~mask);
770 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
772 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
774 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
776 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
779 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
780 int imm, int shift,
781 enum aarch64_insn_variant variant,
782 enum aarch64_insn_movewide_type type)
784 u32 insn;
786 switch (type) {
787 case AARCH64_INSN_MOVEWIDE_ZERO:
788 insn = aarch64_insn_get_movz_value();
789 break;
790 case AARCH64_INSN_MOVEWIDE_KEEP:
791 insn = aarch64_insn_get_movk_value();
792 break;
793 case AARCH64_INSN_MOVEWIDE_INVERSE:
794 insn = aarch64_insn_get_movn_value();
795 break;
796 default:
797 BUG_ON(1);
798 return AARCH64_BREAK_FAULT;
801 BUG_ON(imm & ~(SZ_64K - 1));
803 switch (variant) {
804 case AARCH64_INSN_VARIANT_32BIT:
805 BUG_ON(shift != 0 && shift != 16);
806 break;
807 case AARCH64_INSN_VARIANT_64BIT:
808 insn |= AARCH64_INSN_SF_BIT;
809 BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
810 shift != 48);
811 break;
812 default:
813 BUG_ON(1);
814 return AARCH64_BREAK_FAULT;
817 insn |= (shift >> 4) << 21;
819 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
821 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
824 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
825 enum aarch64_insn_register src,
826 enum aarch64_insn_register reg,
827 int shift,
828 enum aarch64_insn_variant variant,
829 enum aarch64_insn_adsb_type type)
831 u32 insn;
833 switch (type) {
834 case AARCH64_INSN_ADSB_ADD:
835 insn = aarch64_insn_get_add_value();
836 break;
837 case AARCH64_INSN_ADSB_SUB:
838 insn = aarch64_insn_get_sub_value();
839 break;
840 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
841 insn = aarch64_insn_get_adds_value();
842 break;
843 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
844 insn = aarch64_insn_get_subs_value();
845 break;
846 default:
847 BUG_ON(1);
848 return AARCH64_BREAK_FAULT;
851 switch (variant) {
852 case AARCH64_INSN_VARIANT_32BIT:
853 BUG_ON(shift & ~(SZ_32 - 1));
854 break;
855 case AARCH64_INSN_VARIANT_64BIT:
856 insn |= AARCH64_INSN_SF_BIT;
857 BUG_ON(shift & ~(SZ_64 - 1));
858 break;
859 default:
860 BUG_ON(1);
861 return AARCH64_BREAK_FAULT;
865 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
867 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
869 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
871 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
874 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
875 enum aarch64_insn_register src,
876 enum aarch64_insn_variant variant,
877 enum aarch64_insn_data1_type type)
879 u32 insn;
881 switch (type) {
882 case AARCH64_INSN_DATA1_REVERSE_16:
883 insn = aarch64_insn_get_rev16_value();
884 break;
885 case AARCH64_INSN_DATA1_REVERSE_32:
886 insn = aarch64_insn_get_rev32_value();
887 break;
888 case AARCH64_INSN_DATA1_REVERSE_64:
889 BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT);
890 insn = aarch64_insn_get_rev64_value();
891 break;
892 default:
893 BUG_ON(1);
894 return AARCH64_BREAK_FAULT;
897 switch (variant) {
898 case AARCH64_INSN_VARIANT_32BIT:
899 break;
900 case AARCH64_INSN_VARIANT_64BIT:
901 insn |= AARCH64_INSN_SF_BIT;
902 break;
903 default:
904 BUG_ON(1);
905 return AARCH64_BREAK_FAULT;
908 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
910 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
913 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
914 enum aarch64_insn_register src,
915 enum aarch64_insn_register reg,
916 enum aarch64_insn_variant variant,
917 enum aarch64_insn_data2_type type)
919 u32 insn;
921 switch (type) {
922 case AARCH64_INSN_DATA2_UDIV:
923 insn = aarch64_insn_get_udiv_value();
924 break;
925 case AARCH64_INSN_DATA2_SDIV:
926 insn = aarch64_insn_get_sdiv_value();
927 break;
928 case AARCH64_INSN_DATA2_LSLV:
929 insn = aarch64_insn_get_lslv_value();
930 break;
931 case AARCH64_INSN_DATA2_LSRV:
932 insn = aarch64_insn_get_lsrv_value();
933 break;
934 case AARCH64_INSN_DATA2_ASRV:
935 insn = aarch64_insn_get_asrv_value();
936 break;
937 case AARCH64_INSN_DATA2_RORV:
938 insn = aarch64_insn_get_rorv_value();
939 break;
940 default:
941 BUG_ON(1);
942 return AARCH64_BREAK_FAULT;
945 switch (variant) {
946 case AARCH64_INSN_VARIANT_32BIT:
947 break;
948 case AARCH64_INSN_VARIANT_64BIT:
949 insn |= AARCH64_INSN_SF_BIT;
950 break;
951 default:
952 BUG_ON(1);
953 return AARCH64_BREAK_FAULT;
956 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
958 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
960 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
963 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
964 enum aarch64_insn_register src,
965 enum aarch64_insn_register reg1,
966 enum aarch64_insn_register reg2,
967 enum aarch64_insn_variant variant,
968 enum aarch64_insn_data3_type type)
970 u32 insn;
972 switch (type) {
973 case AARCH64_INSN_DATA3_MADD:
974 insn = aarch64_insn_get_madd_value();
975 break;
976 case AARCH64_INSN_DATA3_MSUB:
977 insn = aarch64_insn_get_msub_value();
978 break;
979 default:
980 BUG_ON(1);
981 return AARCH64_BREAK_FAULT;
984 switch (variant) {
985 case AARCH64_INSN_VARIANT_32BIT:
986 break;
987 case AARCH64_INSN_VARIANT_64BIT:
988 insn |= AARCH64_INSN_SF_BIT;
989 break;
990 default:
991 BUG_ON(1);
992 return AARCH64_BREAK_FAULT;
995 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
997 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
999 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1000 reg1);
1002 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1003 reg2);
1006 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1007 enum aarch64_insn_register src,
1008 enum aarch64_insn_register reg,
1009 int shift,
1010 enum aarch64_insn_variant variant,
1011 enum aarch64_insn_logic_type type)
1013 u32 insn;
1015 switch (type) {
1016 case AARCH64_INSN_LOGIC_AND:
1017 insn = aarch64_insn_get_and_value();
1018 break;
1019 case AARCH64_INSN_LOGIC_BIC:
1020 insn = aarch64_insn_get_bic_value();
1021 break;
1022 case AARCH64_INSN_LOGIC_ORR:
1023 insn = aarch64_insn_get_orr_value();
1024 break;
1025 case AARCH64_INSN_LOGIC_ORN:
1026 insn = aarch64_insn_get_orn_value();
1027 break;
1028 case AARCH64_INSN_LOGIC_EOR:
1029 insn = aarch64_insn_get_eor_value();
1030 break;
1031 case AARCH64_INSN_LOGIC_EON:
1032 insn = aarch64_insn_get_eon_value();
1033 break;
1034 case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1035 insn = aarch64_insn_get_ands_value();
1036 break;
1037 case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1038 insn = aarch64_insn_get_bics_value();
1039 break;
1040 default:
1041 BUG_ON(1);
1042 return AARCH64_BREAK_FAULT;
1045 switch (variant) {
1046 case AARCH64_INSN_VARIANT_32BIT:
1047 BUG_ON(shift & ~(SZ_32 - 1));
1048 break;
1049 case AARCH64_INSN_VARIANT_64BIT:
1050 insn |= AARCH64_INSN_SF_BIT;
1051 BUG_ON(shift & ~(SZ_64 - 1));
1052 break;
1053 default:
1054 BUG_ON(1);
1055 return AARCH64_BREAK_FAULT;
1059 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1061 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1063 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1065 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1069 * Decode the imm field of a branch, and return the byte offset as a
1070 * signed value (so it can be used when computing a new branch
1071 * target).
1073 s32 aarch64_get_branch_offset(u32 insn)
1075 s32 imm;
1077 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1078 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1079 return (imm << 6) >> 4;
1082 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1083 aarch64_insn_is_bcond(insn)) {
1084 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1085 return (imm << 13) >> 11;
1088 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1089 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1090 return (imm << 18) >> 16;
1093 /* Unhandled instruction */
1094 BUG();
1098 * Encode the displacement of a branch in the imm field and return the
1099 * updated instruction.
1101 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1103 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1104 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1105 offset >> 2);
1107 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1108 aarch64_insn_is_bcond(insn))
1109 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1110 offset >> 2);
1112 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1113 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1114 offset >> 2);
1116 /* Unhandled instruction */
1117 BUG();
1120 bool aarch32_insn_is_wide(u32 insn)
1122 return insn >= 0xe800;
1126 * Macros/defines for extracting register numbers from instruction.
1128 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1130 return (insn & (0xf << offset)) >> offset;
1133 #define OPC2_MASK 0x7
1134 #define OPC2_OFFSET 5
1135 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1137 return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1140 #define CRM_MASK 0xf
1141 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1143 return insn & CRM_MASK;