scripts: qmp-shell: Add verbose flag
[qemu/qmp-unstable.git] / target-arm / translate.c
blob9116529306643633752ca2965fed2d5b49339f40
1 /*
2 * ARM translation
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
27 #include "cpu.h"
28 #include "internals.h"
29 #include "disas/disas.h"
30 #include "tcg-op.h"
31 #include "qemu/log.h"
32 #include "qemu/bitops.h"
33 #include "arm_ldst.h"
35 #include "exec/helper-proto.h"
36 #include "exec/helper-gen.h"
38 #include "trace-tcg.h"
41 #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42 #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
43 /* currently all emulated v5 cores are also v5TE, so don't bother */
44 #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
45 #define ENABLE_ARCH_5J 0
46 #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47 #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48 #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49 #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50 #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
52 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
54 #include "translate.h"
55 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
57 #if defined(CONFIG_USER_ONLY)
58 #define IS_USER(s) 1
59 #else
60 #define IS_USER(s) (s->user)
61 #endif
63 TCGv_ptr cpu_env;
64 /* We reuse the same 64-bit temporaries for efficiency. */
65 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
66 static TCGv_i32 cpu_R[16];
67 static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
68 static TCGv_i64 cpu_exclusive_addr;
69 static TCGv_i64 cpu_exclusive_val;
70 #ifdef CONFIG_USER_ONLY
71 static TCGv_i64 cpu_exclusive_test;
72 static TCGv_i32 cpu_exclusive_info;
73 #endif
75 /* FIXME: These should be removed. */
76 static TCGv_i32 cpu_F0s, cpu_F1s;
77 static TCGv_i64 cpu_F0d, cpu_F1d;
79 #include "exec/gen-icount.h"
81 static const char *regnames[] =
82 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
83 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
85 /* initialize TCG globals. */
86 void arm_translate_init(void)
88 int i;
90 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
92 for (i = 0; i < 16; i++) {
93 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
94 offsetof(CPUARMState, regs[i]),
95 regnames[i]);
97 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
98 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
99 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
100 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
102 cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
103 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
104 cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
105 offsetof(CPUARMState, exclusive_val), "exclusive_val");
106 #ifdef CONFIG_USER_ONLY
107 cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
108 offsetof(CPUARMState, exclusive_test), "exclusive_test");
109 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUARMState, exclusive_info), "exclusive_info");
111 #endif
113 a64_translate_init();
116 static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
118 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
119 * insns:
120 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
121 * otherwise, access as if at PL0.
123 switch (s->mmu_idx) {
124 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
125 case ARMMMUIdx_S12NSE0:
126 case ARMMMUIdx_S12NSE1:
127 return ARMMMUIdx_S12NSE0;
128 case ARMMMUIdx_S1E3:
129 case ARMMMUIdx_S1SE0:
130 case ARMMMUIdx_S1SE1:
131 return ARMMMUIdx_S1SE0;
132 case ARMMMUIdx_S2NS:
133 default:
134 g_assert_not_reached();
138 static inline TCGv_i32 load_cpu_offset(int offset)
140 TCGv_i32 tmp = tcg_temp_new_i32();
141 tcg_gen_ld_i32(tmp, cpu_env, offset);
142 return tmp;
145 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
147 static inline void store_cpu_offset(TCGv_i32 var, int offset)
149 tcg_gen_st_i32(var, cpu_env, offset);
150 tcg_temp_free_i32(var);
153 #define store_cpu_field(var, name) \
154 store_cpu_offset(var, offsetof(CPUARMState, name))
156 /* Set a variable to the value of a CPU register. */
157 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
159 if (reg == 15) {
160 uint32_t addr;
161 /* normally, since we updated PC, we need only to add one insn */
162 if (s->thumb)
163 addr = (long)s->pc + 2;
164 else
165 addr = (long)s->pc + 4;
166 tcg_gen_movi_i32(var, addr);
167 } else {
168 tcg_gen_mov_i32(var, cpu_R[reg]);
172 /* Create a new temporary and set it to the value of a CPU register. */
173 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
175 TCGv_i32 tmp = tcg_temp_new_i32();
176 load_reg_var(s, tmp, reg);
177 return tmp;
180 /* Set a CPU register. The source must be a temporary and will be
181 marked as dead. */
182 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
184 if (reg == 15) {
185 tcg_gen_andi_i32(var, var, ~1);
186 s->is_jmp = DISAS_JUMP;
188 tcg_gen_mov_i32(cpu_R[reg], var);
189 tcg_temp_free_i32(var);
192 /* Value extensions. */
193 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
194 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
195 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
196 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
198 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
199 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
202 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
204 TCGv_i32 tmp_mask = tcg_const_i32(mask);
205 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
206 tcg_temp_free_i32(tmp_mask);
208 /* Set NZCV flags from the high 4 bits of var. */
209 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
211 static void gen_exception_internal(int excp)
213 TCGv_i32 tcg_excp = tcg_const_i32(excp);
215 assert(excp_is_internal(excp));
216 gen_helper_exception_internal(cpu_env, tcg_excp);
217 tcg_temp_free_i32(tcg_excp);
220 static void gen_exception(int excp, uint32_t syndrome)
222 TCGv_i32 tcg_excp = tcg_const_i32(excp);
223 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
225 gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn);
226 tcg_temp_free_i32(tcg_syn);
227 tcg_temp_free_i32(tcg_excp);
230 static void gen_ss_advance(DisasContext *s)
232 /* If the singlestep state is Active-not-pending, advance to
233 * Active-pending.
235 if (s->ss_active) {
236 s->pstate_ss = 0;
237 gen_helper_clear_pstate_ss(cpu_env);
241 static void gen_step_complete_exception(DisasContext *s)
243 /* We just completed step of an insn. Move from Active-not-pending
244 * to Active-pending, and then also take the swstep exception.
245 * This corresponds to making the (IMPDEF) choice to prioritize
246 * swstep exceptions over asynchronous exceptions taken to an exception
247 * level where debug is disabled. This choice has the advantage that
248 * we do not need to maintain internal state corresponding to the
249 * ISV/EX syndrome bits between completion of the step and generation
250 * of the exception, and our syndrome information is always correct.
252 gen_ss_advance(s);
253 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex));
254 s->is_jmp = DISAS_EXC;
257 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
259 TCGv_i32 tmp1 = tcg_temp_new_i32();
260 TCGv_i32 tmp2 = tcg_temp_new_i32();
261 tcg_gen_ext16s_i32(tmp1, a);
262 tcg_gen_ext16s_i32(tmp2, b);
263 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
264 tcg_temp_free_i32(tmp2);
265 tcg_gen_sari_i32(a, a, 16);
266 tcg_gen_sari_i32(b, b, 16);
267 tcg_gen_mul_i32(b, b, a);
268 tcg_gen_mov_i32(a, tmp1);
269 tcg_temp_free_i32(tmp1);
272 /* Byteswap each halfword. */
273 static void gen_rev16(TCGv_i32 var)
275 TCGv_i32 tmp = tcg_temp_new_i32();
276 tcg_gen_shri_i32(tmp, var, 8);
277 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
278 tcg_gen_shli_i32(var, var, 8);
279 tcg_gen_andi_i32(var, var, 0xff00ff00);
280 tcg_gen_or_i32(var, var, tmp);
281 tcg_temp_free_i32(tmp);
284 /* Byteswap low halfword and sign extend. */
285 static void gen_revsh(TCGv_i32 var)
287 tcg_gen_ext16u_i32(var, var);
288 tcg_gen_bswap16_i32(var, var);
289 tcg_gen_ext16s_i32(var, var);
292 /* Unsigned bitfield extract. */
293 static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
295 if (shift)
296 tcg_gen_shri_i32(var, var, shift);
297 tcg_gen_andi_i32(var, var, mask);
300 /* Signed bitfield extract. */
301 static void gen_sbfx(TCGv_i32 var, int shift, int width)
303 uint32_t signbit;
305 if (shift)
306 tcg_gen_sari_i32(var, var, shift);
307 if (shift + width < 32) {
308 signbit = 1u << (width - 1);
309 tcg_gen_andi_i32(var, var, (1u << width) - 1);
310 tcg_gen_xori_i32(var, var, signbit);
311 tcg_gen_subi_i32(var, var, signbit);
315 /* Return (b << 32) + a. Mark inputs as dead */
316 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
318 TCGv_i64 tmp64 = tcg_temp_new_i64();
320 tcg_gen_extu_i32_i64(tmp64, b);
321 tcg_temp_free_i32(b);
322 tcg_gen_shli_i64(tmp64, tmp64, 32);
323 tcg_gen_add_i64(a, tmp64, a);
325 tcg_temp_free_i64(tmp64);
326 return a;
329 /* Return (b << 32) - a. Mark inputs as dead. */
330 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
332 TCGv_i64 tmp64 = tcg_temp_new_i64();
334 tcg_gen_extu_i32_i64(tmp64, b);
335 tcg_temp_free_i32(b);
336 tcg_gen_shli_i64(tmp64, tmp64, 32);
337 tcg_gen_sub_i64(a, tmp64, a);
339 tcg_temp_free_i64(tmp64);
340 return a;
343 /* 32x32->64 multiply. Marks inputs as dead. */
344 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
346 TCGv_i32 lo = tcg_temp_new_i32();
347 TCGv_i32 hi = tcg_temp_new_i32();
348 TCGv_i64 ret;
350 tcg_gen_mulu2_i32(lo, hi, a, b);
351 tcg_temp_free_i32(a);
352 tcg_temp_free_i32(b);
354 ret = tcg_temp_new_i64();
355 tcg_gen_concat_i32_i64(ret, lo, hi);
356 tcg_temp_free_i32(lo);
357 tcg_temp_free_i32(hi);
359 return ret;
362 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
364 TCGv_i32 lo = tcg_temp_new_i32();
365 TCGv_i32 hi = tcg_temp_new_i32();
366 TCGv_i64 ret;
368 tcg_gen_muls2_i32(lo, hi, a, b);
369 tcg_temp_free_i32(a);
370 tcg_temp_free_i32(b);
372 ret = tcg_temp_new_i64();
373 tcg_gen_concat_i32_i64(ret, lo, hi);
374 tcg_temp_free_i32(lo);
375 tcg_temp_free_i32(hi);
377 return ret;
380 /* Swap low and high halfwords. */
381 static void gen_swap_half(TCGv_i32 var)
383 TCGv_i32 tmp = tcg_temp_new_i32();
384 tcg_gen_shri_i32(tmp, var, 16);
385 tcg_gen_shli_i32(var, var, 16);
386 tcg_gen_or_i32(var, var, tmp);
387 tcg_temp_free_i32(tmp);
390 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
391 tmp = (t0 ^ t1) & 0x8000;
392 t0 &= ~0x8000;
393 t1 &= ~0x8000;
394 t0 = (t0 + t1) ^ tmp;
397 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
399 TCGv_i32 tmp = tcg_temp_new_i32();
400 tcg_gen_xor_i32(tmp, t0, t1);
401 tcg_gen_andi_i32(tmp, tmp, 0x8000);
402 tcg_gen_andi_i32(t0, t0, ~0x8000);
403 tcg_gen_andi_i32(t1, t1, ~0x8000);
404 tcg_gen_add_i32(t0, t0, t1);
405 tcg_gen_xor_i32(t0, t0, tmp);
406 tcg_temp_free_i32(tmp);
407 tcg_temp_free_i32(t1);
410 /* Set CF to the top bit of var. */
411 static void gen_set_CF_bit31(TCGv_i32 var)
413 tcg_gen_shri_i32(cpu_CF, var, 31);
416 /* Set N and Z flags from var. */
417 static inline void gen_logic_CC(TCGv_i32 var)
419 tcg_gen_mov_i32(cpu_NF, var);
420 tcg_gen_mov_i32(cpu_ZF, var);
423 /* T0 += T1 + CF. */
424 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
426 tcg_gen_add_i32(t0, t0, t1);
427 tcg_gen_add_i32(t0, t0, cpu_CF);
430 /* dest = T0 + T1 + CF. */
431 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
433 tcg_gen_add_i32(dest, t0, t1);
434 tcg_gen_add_i32(dest, dest, cpu_CF);
437 /* dest = T0 - T1 + CF - 1. */
438 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
440 tcg_gen_sub_i32(dest, t0, t1);
441 tcg_gen_add_i32(dest, dest, cpu_CF);
442 tcg_gen_subi_i32(dest, dest, 1);
445 /* dest = T0 + T1. Compute C, N, V and Z flags */
446 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
448 TCGv_i32 tmp = tcg_temp_new_i32();
449 tcg_gen_movi_i32(tmp, 0);
450 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
451 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
452 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
453 tcg_gen_xor_i32(tmp, t0, t1);
454 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
455 tcg_temp_free_i32(tmp);
456 tcg_gen_mov_i32(dest, cpu_NF);
459 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
460 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
462 TCGv_i32 tmp = tcg_temp_new_i32();
463 if (TCG_TARGET_HAS_add2_i32) {
464 tcg_gen_movi_i32(tmp, 0);
465 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
466 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
467 } else {
468 TCGv_i64 q0 = tcg_temp_new_i64();
469 TCGv_i64 q1 = tcg_temp_new_i64();
470 tcg_gen_extu_i32_i64(q0, t0);
471 tcg_gen_extu_i32_i64(q1, t1);
472 tcg_gen_add_i64(q0, q0, q1);
473 tcg_gen_extu_i32_i64(q1, cpu_CF);
474 tcg_gen_add_i64(q0, q0, q1);
475 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
476 tcg_temp_free_i64(q0);
477 tcg_temp_free_i64(q1);
479 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
480 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
481 tcg_gen_xor_i32(tmp, t0, t1);
482 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
483 tcg_temp_free_i32(tmp);
484 tcg_gen_mov_i32(dest, cpu_NF);
487 /* dest = T0 - T1. Compute C, N, V and Z flags */
488 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
490 TCGv_i32 tmp;
491 tcg_gen_sub_i32(cpu_NF, t0, t1);
492 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
493 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
494 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
495 tmp = tcg_temp_new_i32();
496 tcg_gen_xor_i32(tmp, t0, t1);
497 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
498 tcg_temp_free_i32(tmp);
499 tcg_gen_mov_i32(dest, cpu_NF);
502 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
503 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
505 TCGv_i32 tmp = tcg_temp_new_i32();
506 tcg_gen_not_i32(tmp, t1);
507 gen_adc_CC(dest, t0, tmp);
508 tcg_temp_free_i32(tmp);
511 #define GEN_SHIFT(name) \
512 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
514 TCGv_i32 tmp1, tmp2, tmp3; \
515 tmp1 = tcg_temp_new_i32(); \
516 tcg_gen_andi_i32(tmp1, t1, 0xff); \
517 tmp2 = tcg_const_i32(0); \
518 tmp3 = tcg_const_i32(0x1f); \
519 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
520 tcg_temp_free_i32(tmp3); \
521 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
522 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
523 tcg_temp_free_i32(tmp2); \
524 tcg_temp_free_i32(tmp1); \
526 GEN_SHIFT(shl)
527 GEN_SHIFT(shr)
528 #undef GEN_SHIFT
530 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
532 TCGv_i32 tmp1, tmp2;
533 tmp1 = tcg_temp_new_i32();
534 tcg_gen_andi_i32(tmp1, t1, 0xff);
535 tmp2 = tcg_const_i32(0x1f);
536 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
537 tcg_temp_free_i32(tmp2);
538 tcg_gen_sar_i32(dest, t0, tmp1);
539 tcg_temp_free_i32(tmp1);
542 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
544 TCGv_i32 c0 = tcg_const_i32(0);
545 TCGv_i32 tmp = tcg_temp_new_i32();
546 tcg_gen_neg_i32(tmp, src);
547 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
548 tcg_temp_free_i32(c0);
549 tcg_temp_free_i32(tmp);
552 static void shifter_out_im(TCGv_i32 var, int shift)
554 if (shift == 0) {
555 tcg_gen_andi_i32(cpu_CF, var, 1);
556 } else {
557 tcg_gen_shri_i32(cpu_CF, var, shift);
558 if (shift != 31) {
559 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
564 /* Shift by immediate. Includes special handling for shift == 0. */
565 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
566 int shift, int flags)
568 switch (shiftop) {
569 case 0: /* LSL */
570 if (shift != 0) {
571 if (flags)
572 shifter_out_im(var, 32 - shift);
573 tcg_gen_shli_i32(var, var, shift);
575 break;
576 case 1: /* LSR */
577 if (shift == 0) {
578 if (flags) {
579 tcg_gen_shri_i32(cpu_CF, var, 31);
581 tcg_gen_movi_i32(var, 0);
582 } else {
583 if (flags)
584 shifter_out_im(var, shift - 1);
585 tcg_gen_shri_i32(var, var, shift);
587 break;
588 case 2: /* ASR */
589 if (shift == 0)
590 shift = 32;
591 if (flags)
592 shifter_out_im(var, shift - 1);
593 if (shift == 32)
594 shift = 31;
595 tcg_gen_sari_i32(var, var, shift);
596 break;
597 case 3: /* ROR/RRX */
598 if (shift != 0) {
599 if (flags)
600 shifter_out_im(var, shift - 1);
601 tcg_gen_rotri_i32(var, var, shift); break;
602 } else {
603 TCGv_i32 tmp = tcg_temp_new_i32();
604 tcg_gen_shli_i32(tmp, cpu_CF, 31);
605 if (flags)
606 shifter_out_im(var, 0);
607 tcg_gen_shri_i32(var, var, 1);
608 tcg_gen_or_i32(var, var, tmp);
609 tcg_temp_free_i32(tmp);
614 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
615 TCGv_i32 shift, int flags)
617 if (flags) {
618 switch (shiftop) {
619 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
620 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
621 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
622 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
624 } else {
625 switch (shiftop) {
626 case 0:
627 gen_shl(var, var, shift);
628 break;
629 case 1:
630 gen_shr(var, var, shift);
631 break;
632 case 2:
633 gen_sar(var, var, shift);
634 break;
635 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
636 tcg_gen_rotr_i32(var, var, shift); break;
639 tcg_temp_free_i32(shift);
642 #define PAS_OP(pfx) \
643 switch (op2) { \
644 case 0: gen_pas_helper(glue(pfx,add16)); break; \
645 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
646 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
647 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
648 case 4: gen_pas_helper(glue(pfx,add8)); break; \
649 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
651 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
653 TCGv_ptr tmp;
655 switch (op1) {
656 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
657 case 1:
658 tmp = tcg_temp_new_ptr();
659 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
660 PAS_OP(s)
661 tcg_temp_free_ptr(tmp);
662 break;
663 case 5:
664 tmp = tcg_temp_new_ptr();
665 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
666 PAS_OP(u)
667 tcg_temp_free_ptr(tmp);
668 break;
669 #undef gen_pas_helper
670 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
671 case 2:
672 PAS_OP(q);
673 break;
674 case 3:
675 PAS_OP(sh);
676 break;
677 case 6:
678 PAS_OP(uq);
679 break;
680 case 7:
681 PAS_OP(uh);
682 break;
683 #undef gen_pas_helper
686 #undef PAS_OP
688 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
689 #define PAS_OP(pfx) \
690 switch (op1) { \
691 case 0: gen_pas_helper(glue(pfx,add8)); break; \
692 case 1: gen_pas_helper(glue(pfx,add16)); break; \
693 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
694 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
695 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
696 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
698 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
700 TCGv_ptr tmp;
702 switch (op2) {
703 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
704 case 0:
705 tmp = tcg_temp_new_ptr();
706 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
707 PAS_OP(s)
708 tcg_temp_free_ptr(tmp);
709 break;
710 case 4:
711 tmp = tcg_temp_new_ptr();
712 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
713 PAS_OP(u)
714 tcg_temp_free_ptr(tmp);
715 break;
716 #undef gen_pas_helper
717 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
718 case 1:
719 PAS_OP(q);
720 break;
721 case 2:
722 PAS_OP(sh);
723 break;
724 case 5:
725 PAS_OP(uq);
726 break;
727 case 6:
728 PAS_OP(uh);
729 break;
730 #undef gen_pas_helper
733 #undef PAS_OP
736 * generate a conditional branch based on ARM condition code cc.
737 * This is common between ARM and Aarch64 targets.
739 void arm_gen_test_cc(int cc, TCGLabel *label)
741 TCGv_i32 tmp;
742 TCGLabel *inv;
744 switch (cc) {
745 case 0: /* eq: Z */
746 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
747 break;
748 case 1: /* ne: !Z */
749 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
750 break;
751 case 2: /* cs: C */
752 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
753 break;
754 case 3: /* cc: !C */
755 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
756 break;
757 case 4: /* mi: N */
758 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
759 break;
760 case 5: /* pl: !N */
761 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
762 break;
763 case 6: /* vs: V */
764 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
765 break;
766 case 7: /* vc: !V */
767 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
768 break;
769 case 8: /* hi: C && !Z */
770 inv = gen_new_label();
771 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
772 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
773 gen_set_label(inv);
774 break;
775 case 9: /* ls: !C || Z */
776 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
777 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
778 break;
779 case 10: /* ge: N == V -> N ^ V == 0 */
780 tmp = tcg_temp_new_i32();
781 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
782 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
783 tcg_temp_free_i32(tmp);
784 break;
785 case 11: /* lt: N != V -> N ^ V != 0 */
786 tmp = tcg_temp_new_i32();
787 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
788 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
789 tcg_temp_free_i32(tmp);
790 break;
791 case 12: /* gt: !Z && N == V */
792 inv = gen_new_label();
793 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
794 tmp = tcg_temp_new_i32();
795 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
796 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
797 tcg_temp_free_i32(tmp);
798 gen_set_label(inv);
799 break;
800 case 13: /* le: Z || N != V */
801 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
802 tmp = tcg_temp_new_i32();
803 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
804 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
805 tcg_temp_free_i32(tmp);
806 break;
807 default:
808 fprintf(stderr, "Bad condition code 0x%x\n", cc);
809 abort();
813 static const uint8_t table_logic_cc[16] = {
814 1, /* and */
815 1, /* xor */
816 0, /* sub */
817 0, /* rsb */
818 0, /* add */
819 0, /* adc */
820 0, /* sbc */
821 0, /* rsc */
822 1, /* andl */
823 1, /* xorl */
824 0, /* cmp */
825 0, /* cmn */
826 1, /* orr */
827 1, /* mov */
828 1, /* bic */
829 1, /* mvn */
832 /* Set PC and Thumb state from an immediate address. */
833 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
835 TCGv_i32 tmp;
837 s->is_jmp = DISAS_UPDATE;
838 if (s->thumb != (addr & 1)) {
839 tmp = tcg_temp_new_i32();
840 tcg_gen_movi_i32(tmp, addr & 1);
841 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
842 tcg_temp_free_i32(tmp);
844 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
847 /* Set PC and Thumb state from var. var is marked as dead. */
848 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
850 s->is_jmp = DISAS_UPDATE;
851 tcg_gen_andi_i32(cpu_R[15], var, ~1);
852 tcg_gen_andi_i32(var, var, 1);
853 store_cpu_field(var, thumb);
856 /* Variant of store_reg which uses branch&exchange logic when storing
857 to r15 in ARM architecture v7 and above. The source must be a temporary
858 and will be marked as dead. */
859 static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
861 if (reg == 15 && ENABLE_ARCH_7) {
862 gen_bx(s, var);
863 } else {
864 store_reg(s, reg, var);
868 /* Variant of store_reg which uses branch&exchange logic when storing
869 * to r15 in ARM architecture v5T and above. This is used for storing
870 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
871 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
872 static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
874 if (reg == 15 && ENABLE_ARCH_5) {
875 gen_bx(s, var);
876 } else {
877 store_reg(s, reg, var);
881 /* Abstractions of "generate code to do a guest load/store for
882 * AArch32", where a vaddr is always 32 bits (and is zero
883 * extended if we're a 64 bit core) and data is also
884 * 32 bits unless specifically doing a 64 bit access.
885 * These functions work like tcg_gen_qemu_{ld,st}* except
886 * that the address argument is TCGv_i32 rather than TCGv.
888 #if TARGET_LONG_BITS == 32
890 #define DO_GEN_LD(SUFF, OPC) \
891 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
893 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
896 #define DO_GEN_ST(SUFF, OPC) \
897 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
899 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
902 static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
904 tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
907 static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
909 tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
912 #else
914 #define DO_GEN_LD(SUFF, OPC) \
915 static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
917 TCGv addr64 = tcg_temp_new(); \
918 tcg_gen_extu_i32_i64(addr64, addr); \
919 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
920 tcg_temp_free(addr64); \
923 #define DO_GEN_ST(SUFF, OPC) \
924 static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
926 TCGv addr64 = tcg_temp_new(); \
927 tcg_gen_extu_i32_i64(addr64, addr); \
928 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
929 tcg_temp_free(addr64); \
932 static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
934 TCGv addr64 = tcg_temp_new();
935 tcg_gen_extu_i32_i64(addr64, addr);
936 tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
937 tcg_temp_free(addr64);
940 static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
942 TCGv addr64 = tcg_temp_new();
943 tcg_gen_extu_i32_i64(addr64, addr);
944 tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
945 tcg_temp_free(addr64);
948 #endif
950 DO_GEN_LD(8s, MO_SB)
951 DO_GEN_LD(8u, MO_UB)
952 DO_GEN_LD(16s, MO_TESW)
953 DO_GEN_LD(16u, MO_TEUW)
954 DO_GEN_LD(32u, MO_TEUL)
955 DO_GEN_ST(8, MO_UB)
956 DO_GEN_ST(16, MO_TEUW)
957 DO_GEN_ST(32, MO_TEUL)
959 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
961 tcg_gen_movi_i32(cpu_R[15], val);
964 static inline void gen_hvc(DisasContext *s, int imm16)
966 /* The pre HVC helper handles cases when HVC gets trapped
967 * as an undefined insn by runtime configuration (ie before
968 * the insn really executes).
970 gen_set_pc_im(s, s->pc - 4);
971 gen_helper_pre_hvc(cpu_env);
972 /* Otherwise we will treat this as a real exception which
973 * happens after execution of the insn. (The distinction matters
974 * for the PC value reported to the exception handler and also
975 * for single stepping.)
977 s->svc_imm = imm16;
978 gen_set_pc_im(s, s->pc);
979 s->is_jmp = DISAS_HVC;
982 static inline void gen_smc(DisasContext *s)
984 /* As with HVC, we may take an exception either before or after
985 * the insn executes.
987 TCGv_i32 tmp;
989 gen_set_pc_im(s, s->pc - 4);
990 tmp = tcg_const_i32(syn_aa32_smc());
991 gen_helper_pre_smc(cpu_env, tmp);
992 tcg_temp_free_i32(tmp);
993 gen_set_pc_im(s, s->pc);
994 s->is_jmp = DISAS_SMC;
997 static inline void
998 gen_set_condexec (DisasContext *s)
1000 if (s->condexec_mask) {
1001 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1002 TCGv_i32 tmp = tcg_temp_new_i32();
1003 tcg_gen_movi_i32(tmp, val);
1004 store_cpu_field(tmp, condexec_bits);
1008 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1010 gen_set_condexec(s);
1011 gen_set_pc_im(s, s->pc - offset);
1012 gen_exception_internal(excp);
1013 s->is_jmp = DISAS_JUMP;
1016 static void gen_exception_insn(DisasContext *s, int offset, int excp, int syn)
1018 gen_set_condexec(s);
1019 gen_set_pc_im(s, s->pc - offset);
1020 gen_exception(excp, syn);
1021 s->is_jmp = DISAS_JUMP;
1024 /* Force a TB lookup after an instruction that changes the CPU state. */
1025 static inline void gen_lookup_tb(DisasContext *s)
1027 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
1028 s->is_jmp = DISAS_UPDATE;
1031 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
1032 TCGv_i32 var)
1034 int val, rm, shift, shiftop;
1035 TCGv_i32 offset;
1037 if (!(insn & (1 << 25))) {
1038 /* immediate */
1039 val = insn & 0xfff;
1040 if (!(insn & (1 << 23)))
1041 val = -val;
1042 if (val != 0)
1043 tcg_gen_addi_i32(var, var, val);
1044 } else {
1045 /* shift/register */
1046 rm = (insn) & 0xf;
1047 shift = (insn >> 7) & 0x1f;
1048 shiftop = (insn >> 5) & 3;
1049 offset = load_reg(s, rm);
1050 gen_arm_shift_im(offset, shiftop, shift, 0);
1051 if (!(insn & (1 << 23)))
1052 tcg_gen_sub_i32(var, var, offset);
1053 else
1054 tcg_gen_add_i32(var, var, offset);
1055 tcg_temp_free_i32(offset);
1059 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
1060 int extra, TCGv_i32 var)
1062 int val, rm;
1063 TCGv_i32 offset;
1065 if (insn & (1 << 22)) {
1066 /* immediate */
1067 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1068 if (!(insn & (1 << 23)))
1069 val = -val;
1070 val += extra;
1071 if (val != 0)
1072 tcg_gen_addi_i32(var, var, val);
1073 } else {
1074 /* register */
1075 if (extra)
1076 tcg_gen_addi_i32(var, var, extra);
1077 rm = (insn) & 0xf;
1078 offset = load_reg(s, rm);
1079 if (!(insn & (1 << 23)))
1080 tcg_gen_sub_i32(var, var, offset);
1081 else
1082 tcg_gen_add_i32(var, var, offset);
1083 tcg_temp_free_i32(offset);
1087 static TCGv_ptr get_fpstatus_ptr(int neon)
1089 TCGv_ptr statusptr = tcg_temp_new_ptr();
1090 int offset;
1091 if (neon) {
1092 offset = offsetof(CPUARMState, vfp.standard_fp_status);
1093 } else {
1094 offset = offsetof(CPUARMState, vfp.fp_status);
1096 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1097 return statusptr;
1100 #define VFP_OP2(name) \
1101 static inline void gen_vfp_##name(int dp) \
1103 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1104 if (dp) { \
1105 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1106 } else { \
1107 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1109 tcg_temp_free_ptr(fpst); \
1112 VFP_OP2(add)
1113 VFP_OP2(sub)
1114 VFP_OP2(mul)
1115 VFP_OP2(div)
1117 #undef VFP_OP2
1119 static inline void gen_vfp_F1_mul(int dp)
1121 /* Like gen_vfp_mul() but put result in F1 */
1122 TCGv_ptr fpst = get_fpstatus_ptr(0);
1123 if (dp) {
1124 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1125 } else {
1126 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1128 tcg_temp_free_ptr(fpst);
1131 static inline void gen_vfp_F1_neg(int dp)
1133 /* Like gen_vfp_neg() but put result in F1 */
1134 if (dp) {
1135 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1136 } else {
1137 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1141 static inline void gen_vfp_abs(int dp)
1143 if (dp)
1144 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1145 else
1146 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1149 static inline void gen_vfp_neg(int dp)
1151 if (dp)
1152 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1153 else
1154 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1157 static inline void gen_vfp_sqrt(int dp)
1159 if (dp)
1160 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1161 else
1162 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1165 static inline void gen_vfp_cmp(int dp)
1167 if (dp)
1168 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1169 else
1170 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1173 static inline void gen_vfp_cmpe(int dp)
1175 if (dp)
1176 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1177 else
1178 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1181 static inline void gen_vfp_F1_ld0(int dp)
1183 if (dp)
1184 tcg_gen_movi_i64(cpu_F1d, 0);
1185 else
1186 tcg_gen_movi_i32(cpu_F1s, 0);
1189 #define VFP_GEN_ITOF(name) \
1190 static inline void gen_vfp_##name(int dp, int neon) \
1192 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1193 if (dp) { \
1194 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1195 } else { \
1196 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1198 tcg_temp_free_ptr(statusptr); \
1201 VFP_GEN_ITOF(uito)
1202 VFP_GEN_ITOF(sito)
1203 #undef VFP_GEN_ITOF
1205 #define VFP_GEN_FTOI(name) \
1206 static inline void gen_vfp_##name(int dp, int neon) \
1208 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1209 if (dp) { \
1210 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1211 } else { \
1212 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1214 tcg_temp_free_ptr(statusptr); \
1217 VFP_GEN_FTOI(toui)
1218 VFP_GEN_FTOI(touiz)
1219 VFP_GEN_FTOI(tosi)
1220 VFP_GEN_FTOI(tosiz)
1221 #undef VFP_GEN_FTOI
1223 #define VFP_GEN_FIX(name, round) \
1224 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1226 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1227 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1228 if (dp) { \
1229 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1230 statusptr); \
1231 } else { \
1232 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1233 statusptr); \
1235 tcg_temp_free_i32(tmp_shift); \
1236 tcg_temp_free_ptr(statusptr); \
1238 VFP_GEN_FIX(tosh, _round_to_zero)
1239 VFP_GEN_FIX(tosl, _round_to_zero)
1240 VFP_GEN_FIX(touh, _round_to_zero)
1241 VFP_GEN_FIX(toul, _round_to_zero)
1242 VFP_GEN_FIX(shto, )
1243 VFP_GEN_FIX(slto, )
1244 VFP_GEN_FIX(uhto, )
1245 VFP_GEN_FIX(ulto, )
1246 #undef VFP_GEN_FIX
1248 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1250 if (dp) {
1251 gen_aa32_ld64(cpu_F0d, addr, get_mem_index(s));
1252 } else {
1253 gen_aa32_ld32u(cpu_F0s, addr, get_mem_index(s));
1257 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1259 if (dp) {
1260 gen_aa32_st64(cpu_F0d, addr, get_mem_index(s));
1261 } else {
1262 gen_aa32_st32(cpu_F0s, addr, get_mem_index(s));
1266 static inline long
1267 vfp_reg_offset (int dp, int reg)
1269 if (dp)
1270 return offsetof(CPUARMState, vfp.regs[reg]);
1271 else if (reg & 1) {
1272 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1273 + offsetof(CPU_DoubleU, l.upper);
1274 } else {
1275 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1276 + offsetof(CPU_DoubleU, l.lower);
1280 /* Return the offset of a 32-bit piece of a NEON register.
1281 zero is the least significant end of the register. */
1282 static inline long
1283 neon_reg_offset (int reg, int n)
1285 int sreg;
1286 sreg = reg * 2 + n;
1287 return vfp_reg_offset(0, sreg);
1290 static TCGv_i32 neon_load_reg(int reg, int pass)
1292 TCGv_i32 tmp = tcg_temp_new_i32();
1293 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1294 return tmp;
1297 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1299 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1300 tcg_temp_free_i32(var);
1303 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1305 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1308 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1310 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1313 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1314 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1315 #define tcg_gen_st_f32 tcg_gen_st_i32
1316 #define tcg_gen_st_f64 tcg_gen_st_i64
1318 static inline void gen_mov_F0_vreg(int dp, int reg)
1320 if (dp)
1321 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1322 else
1323 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1326 static inline void gen_mov_F1_vreg(int dp, int reg)
1328 if (dp)
1329 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1330 else
1331 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1334 static inline void gen_mov_vreg_F0(int dp, int reg)
1336 if (dp)
1337 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1338 else
1339 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1342 #define ARM_CP_RW_BIT (1 << 20)
1344 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1346 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1349 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1351 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1354 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1356 TCGv_i32 var = tcg_temp_new_i32();
1357 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1358 return var;
1361 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1363 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1364 tcg_temp_free_i32(var);
1367 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1369 iwmmxt_store_reg(cpu_M0, rn);
1372 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1374 iwmmxt_load_reg(cpu_M0, rn);
1377 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1379 iwmmxt_load_reg(cpu_V1, rn);
1380 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1383 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1385 iwmmxt_load_reg(cpu_V1, rn);
1386 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1389 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1391 iwmmxt_load_reg(cpu_V1, rn);
1392 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1395 #define IWMMXT_OP(name) \
1396 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1398 iwmmxt_load_reg(cpu_V1, rn); \
1399 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1402 #define IWMMXT_OP_ENV(name) \
1403 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1405 iwmmxt_load_reg(cpu_V1, rn); \
1406 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1409 #define IWMMXT_OP_ENV_SIZE(name) \
1410 IWMMXT_OP_ENV(name##b) \
1411 IWMMXT_OP_ENV(name##w) \
1412 IWMMXT_OP_ENV(name##l)
1414 #define IWMMXT_OP_ENV1(name) \
1415 static inline void gen_op_iwmmxt_##name##_M0(void) \
1417 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1420 IWMMXT_OP(maddsq)
1421 IWMMXT_OP(madduq)
1422 IWMMXT_OP(sadb)
1423 IWMMXT_OP(sadw)
1424 IWMMXT_OP(mulslw)
1425 IWMMXT_OP(mulshw)
1426 IWMMXT_OP(mululw)
1427 IWMMXT_OP(muluhw)
1428 IWMMXT_OP(macsw)
1429 IWMMXT_OP(macuw)
1431 IWMMXT_OP_ENV_SIZE(unpackl)
1432 IWMMXT_OP_ENV_SIZE(unpackh)
1434 IWMMXT_OP_ENV1(unpacklub)
1435 IWMMXT_OP_ENV1(unpackluw)
1436 IWMMXT_OP_ENV1(unpacklul)
1437 IWMMXT_OP_ENV1(unpackhub)
1438 IWMMXT_OP_ENV1(unpackhuw)
1439 IWMMXT_OP_ENV1(unpackhul)
1440 IWMMXT_OP_ENV1(unpacklsb)
1441 IWMMXT_OP_ENV1(unpacklsw)
1442 IWMMXT_OP_ENV1(unpacklsl)
1443 IWMMXT_OP_ENV1(unpackhsb)
1444 IWMMXT_OP_ENV1(unpackhsw)
1445 IWMMXT_OP_ENV1(unpackhsl)
1447 IWMMXT_OP_ENV_SIZE(cmpeq)
1448 IWMMXT_OP_ENV_SIZE(cmpgtu)
1449 IWMMXT_OP_ENV_SIZE(cmpgts)
1451 IWMMXT_OP_ENV_SIZE(mins)
1452 IWMMXT_OP_ENV_SIZE(minu)
1453 IWMMXT_OP_ENV_SIZE(maxs)
1454 IWMMXT_OP_ENV_SIZE(maxu)
1456 IWMMXT_OP_ENV_SIZE(subn)
1457 IWMMXT_OP_ENV_SIZE(addn)
1458 IWMMXT_OP_ENV_SIZE(subu)
1459 IWMMXT_OP_ENV_SIZE(addu)
1460 IWMMXT_OP_ENV_SIZE(subs)
1461 IWMMXT_OP_ENV_SIZE(adds)
1463 IWMMXT_OP_ENV(avgb0)
1464 IWMMXT_OP_ENV(avgb1)
1465 IWMMXT_OP_ENV(avgw0)
1466 IWMMXT_OP_ENV(avgw1)
1468 IWMMXT_OP_ENV(packuw)
1469 IWMMXT_OP_ENV(packul)
1470 IWMMXT_OP_ENV(packuq)
1471 IWMMXT_OP_ENV(packsw)
1472 IWMMXT_OP_ENV(packsl)
1473 IWMMXT_OP_ENV(packsq)
1475 static void gen_op_iwmmxt_set_mup(void)
1477 TCGv_i32 tmp;
1478 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1479 tcg_gen_ori_i32(tmp, tmp, 2);
1480 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1483 static void gen_op_iwmmxt_set_cup(void)
1485 TCGv_i32 tmp;
1486 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1487 tcg_gen_ori_i32(tmp, tmp, 1);
1488 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1491 static void gen_op_iwmmxt_setpsr_nz(void)
1493 TCGv_i32 tmp = tcg_temp_new_i32();
1494 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1495 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1498 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1500 iwmmxt_load_reg(cpu_V1, rn);
1501 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1502 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1505 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1506 TCGv_i32 dest)
1508 int rd;
1509 uint32_t offset;
1510 TCGv_i32 tmp;
1512 rd = (insn >> 16) & 0xf;
1513 tmp = load_reg(s, rd);
1515 offset = (insn & 0xff) << ((insn >> 7) & 2);
1516 if (insn & (1 << 24)) {
1517 /* Pre indexed */
1518 if (insn & (1 << 23))
1519 tcg_gen_addi_i32(tmp, tmp, offset);
1520 else
1521 tcg_gen_addi_i32(tmp, tmp, -offset);
1522 tcg_gen_mov_i32(dest, tmp);
1523 if (insn & (1 << 21))
1524 store_reg(s, rd, tmp);
1525 else
1526 tcg_temp_free_i32(tmp);
1527 } else if (insn & (1 << 21)) {
1528 /* Post indexed */
1529 tcg_gen_mov_i32(dest, tmp);
1530 if (insn & (1 << 23))
1531 tcg_gen_addi_i32(tmp, tmp, offset);
1532 else
1533 tcg_gen_addi_i32(tmp, tmp, -offset);
1534 store_reg(s, rd, tmp);
1535 } else if (!(insn & (1 << 23)))
1536 return 1;
1537 return 0;
1540 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1542 int rd = (insn >> 0) & 0xf;
1543 TCGv_i32 tmp;
1545 if (insn & (1 << 8)) {
1546 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1547 return 1;
1548 } else {
1549 tmp = iwmmxt_load_creg(rd);
1551 } else {
1552 tmp = tcg_temp_new_i32();
1553 iwmmxt_load_reg(cpu_V0, rd);
1554 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1556 tcg_gen_andi_i32(tmp, tmp, mask);
1557 tcg_gen_mov_i32(dest, tmp);
1558 tcg_temp_free_i32(tmp);
1559 return 0;
1562 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1563 (ie. an undefined instruction). */
1564 static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
1566 int rd, wrd;
1567 int rdhi, rdlo, rd0, rd1, i;
1568 TCGv_i32 addr;
1569 TCGv_i32 tmp, tmp2, tmp3;
1571 if ((insn & 0x0e000e00) == 0x0c000000) {
1572 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1573 wrd = insn & 0xf;
1574 rdlo = (insn >> 12) & 0xf;
1575 rdhi = (insn >> 16) & 0xf;
1576 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1577 iwmmxt_load_reg(cpu_V0, wrd);
1578 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1579 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1580 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1581 } else { /* TMCRR */
1582 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1583 iwmmxt_store_reg(cpu_V0, wrd);
1584 gen_op_iwmmxt_set_mup();
1586 return 0;
1589 wrd = (insn >> 12) & 0xf;
1590 addr = tcg_temp_new_i32();
1591 if (gen_iwmmxt_address(s, insn, addr)) {
1592 tcg_temp_free_i32(addr);
1593 return 1;
1595 if (insn & ARM_CP_RW_BIT) {
1596 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1597 tmp = tcg_temp_new_i32();
1598 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
1599 iwmmxt_store_creg(wrd, tmp);
1600 } else {
1601 i = 1;
1602 if (insn & (1 << 8)) {
1603 if (insn & (1 << 22)) { /* WLDRD */
1604 gen_aa32_ld64(cpu_M0, addr, get_mem_index(s));
1605 i = 0;
1606 } else { /* WLDRW wRd */
1607 tmp = tcg_temp_new_i32();
1608 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
1610 } else {
1611 tmp = tcg_temp_new_i32();
1612 if (insn & (1 << 22)) { /* WLDRH */
1613 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
1614 } else { /* WLDRB */
1615 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
1618 if (i) {
1619 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1620 tcg_temp_free_i32(tmp);
1622 gen_op_iwmmxt_movq_wRn_M0(wrd);
1624 } else {
1625 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1626 tmp = iwmmxt_load_creg(wrd);
1627 gen_aa32_st32(tmp, addr, get_mem_index(s));
1628 } else {
1629 gen_op_iwmmxt_movq_M0_wRn(wrd);
1630 tmp = tcg_temp_new_i32();
1631 if (insn & (1 << 8)) {
1632 if (insn & (1 << 22)) { /* WSTRD */
1633 gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
1634 } else { /* WSTRW wRd */
1635 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1636 gen_aa32_st32(tmp, addr, get_mem_index(s));
1638 } else {
1639 if (insn & (1 << 22)) { /* WSTRH */
1640 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1641 gen_aa32_st16(tmp, addr, get_mem_index(s));
1642 } else { /* WSTRB */
1643 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1644 gen_aa32_st8(tmp, addr, get_mem_index(s));
1648 tcg_temp_free_i32(tmp);
1650 tcg_temp_free_i32(addr);
1651 return 0;
1654 if ((insn & 0x0f000000) != 0x0e000000)
1655 return 1;
1657 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1658 case 0x000: /* WOR */
1659 wrd = (insn >> 12) & 0xf;
1660 rd0 = (insn >> 0) & 0xf;
1661 rd1 = (insn >> 16) & 0xf;
1662 gen_op_iwmmxt_movq_M0_wRn(rd0);
1663 gen_op_iwmmxt_orq_M0_wRn(rd1);
1664 gen_op_iwmmxt_setpsr_nz();
1665 gen_op_iwmmxt_movq_wRn_M0(wrd);
1666 gen_op_iwmmxt_set_mup();
1667 gen_op_iwmmxt_set_cup();
1668 break;
1669 case 0x011: /* TMCR */
1670 if (insn & 0xf)
1671 return 1;
1672 rd = (insn >> 12) & 0xf;
1673 wrd = (insn >> 16) & 0xf;
1674 switch (wrd) {
1675 case ARM_IWMMXT_wCID:
1676 case ARM_IWMMXT_wCASF:
1677 break;
1678 case ARM_IWMMXT_wCon:
1679 gen_op_iwmmxt_set_cup();
1680 /* Fall through. */
1681 case ARM_IWMMXT_wCSSF:
1682 tmp = iwmmxt_load_creg(wrd);
1683 tmp2 = load_reg(s, rd);
1684 tcg_gen_andc_i32(tmp, tmp, tmp2);
1685 tcg_temp_free_i32(tmp2);
1686 iwmmxt_store_creg(wrd, tmp);
1687 break;
1688 case ARM_IWMMXT_wCGR0:
1689 case ARM_IWMMXT_wCGR1:
1690 case ARM_IWMMXT_wCGR2:
1691 case ARM_IWMMXT_wCGR3:
1692 gen_op_iwmmxt_set_cup();
1693 tmp = load_reg(s, rd);
1694 iwmmxt_store_creg(wrd, tmp);
1695 break;
1696 default:
1697 return 1;
1699 break;
1700 case 0x100: /* WXOR */
1701 wrd = (insn >> 12) & 0xf;
1702 rd0 = (insn >> 0) & 0xf;
1703 rd1 = (insn >> 16) & 0xf;
1704 gen_op_iwmmxt_movq_M0_wRn(rd0);
1705 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1706 gen_op_iwmmxt_setpsr_nz();
1707 gen_op_iwmmxt_movq_wRn_M0(wrd);
1708 gen_op_iwmmxt_set_mup();
1709 gen_op_iwmmxt_set_cup();
1710 break;
1711 case 0x111: /* TMRC */
1712 if (insn & 0xf)
1713 return 1;
1714 rd = (insn >> 12) & 0xf;
1715 wrd = (insn >> 16) & 0xf;
1716 tmp = iwmmxt_load_creg(wrd);
1717 store_reg(s, rd, tmp);
1718 break;
1719 case 0x300: /* WANDN */
1720 wrd = (insn >> 12) & 0xf;
1721 rd0 = (insn >> 0) & 0xf;
1722 rd1 = (insn >> 16) & 0xf;
1723 gen_op_iwmmxt_movq_M0_wRn(rd0);
1724 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1725 gen_op_iwmmxt_andq_M0_wRn(rd1);
1726 gen_op_iwmmxt_setpsr_nz();
1727 gen_op_iwmmxt_movq_wRn_M0(wrd);
1728 gen_op_iwmmxt_set_mup();
1729 gen_op_iwmmxt_set_cup();
1730 break;
1731 case 0x200: /* WAND */
1732 wrd = (insn >> 12) & 0xf;
1733 rd0 = (insn >> 0) & 0xf;
1734 rd1 = (insn >> 16) & 0xf;
1735 gen_op_iwmmxt_movq_M0_wRn(rd0);
1736 gen_op_iwmmxt_andq_M0_wRn(rd1);
1737 gen_op_iwmmxt_setpsr_nz();
1738 gen_op_iwmmxt_movq_wRn_M0(wrd);
1739 gen_op_iwmmxt_set_mup();
1740 gen_op_iwmmxt_set_cup();
1741 break;
1742 case 0x810: case 0xa10: /* WMADD */
1743 wrd = (insn >> 12) & 0xf;
1744 rd0 = (insn >> 0) & 0xf;
1745 rd1 = (insn >> 16) & 0xf;
1746 gen_op_iwmmxt_movq_M0_wRn(rd0);
1747 if (insn & (1 << 21))
1748 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1749 else
1750 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1751 gen_op_iwmmxt_movq_wRn_M0(wrd);
1752 gen_op_iwmmxt_set_mup();
1753 break;
1754 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1755 wrd = (insn >> 12) & 0xf;
1756 rd0 = (insn >> 16) & 0xf;
1757 rd1 = (insn >> 0) & 0xf;
1758 gen_op_iwmmxt_movq_M0_wRn(rd0);
1759 switch ((insn >> 22) & 3) {
1760 case 0:
1761 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1762 break;
1763 case 1:
1764 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1765 break;
1766 case 2:
1767 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1768 break;
1769 case 3:
1770 return 1;
1772 gen_op_iwmmxt_movq_wRn_M0(wrd);
1773 gen_op_iwmmxt_set_mup();
1774 gen_op_iwmmxt_set_cup();
1775 break;
1776 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1777 wrd = (insn >> 12) & 0xf;
1778 rd0 = (insn >> 16) & 0xf;
1779 rd1 = (insn >> 0) & 0xf;
1780 gen_op_iwmmxt_movq_M0_wRn(rd0);
1781 switch ((insn >> 22) & 3) {
1782 case 0:
1783 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1784 break;
1785 case 1:
1786 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1787 break;
1788 case 2:
1789 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1790 break;
1791 case 3:
1792 return 1;
1794 gen_op_iwmmxt_movq_wRn_M0(wrd);
1795 gen_op_iwmmxt_set_mup();
1796 gen_op_iwmmxt_set_cup();
1797 break;
1798 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1799 wrd = (insn >> 12) & 0xf;
1800 rd0 = (insn >> 16) & 0xf;
1801 rd1 = (insn >> 0) & 0xf;
1802 gen_op_iwmmxt_movq_M0_wRn(rd0);
1803 if (insn & (1 << 22))
1804 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1805 else
1806 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1807 if (!(insn & (1 << 20)))
1808 gen_op_iwmmxt_addl_M0_wRn(wrd);
1809 gen_op_iwmmxt_movq_wRn_M0(wrd);
1810 gen_op_iwmmxt_set_mup();
1811 break;
1812 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1813 wrd = (insn >> 12) & 0xf;
1814 rd0 = (insn >> 16) & 0xf;
1815 rd1 = (insn >> 0) & 0xf;
1816 gen_op_iwmmxt_movq_M0_wRn(rd0);
1817 if (insn & (1 << 21)) {
1818 if (insn & (1 << 20))
1819 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1820 else
1821 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1822 } else {
1823 if (insn & (1 << 20))
1824 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1825 else
1826 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1828 gen_op_iwmmxt_movq_wRn_M0(wrd);
1829 gen_op_iwmmxt_set_mup();
1830 break;
1831 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1832 wrd = (insn >> 12) & 0xf;
1833 rd0 = (insn >> 16) & 0xf;
1834 rd1 = (insn >> 0) & 0xf;
1835 gen_op_iwmmxt_movq_M0_wRn(rd0);
1836 if (insn & (1 << 21))
1837 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1838 else
1839 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1840 if (!(insn & (1 << 20))) {
1841 iwmmxt_load_reg(cpu_V1, wrd);
1842 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1844 gen_op_iwmmxt_movq_wRn_M0(wrd);
1845 gen_op_iwmmxt_set_mup();
1846 break;
1847 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1848 wrd = (insn >> 12) & 0xf;
1849 rd0 = (insn >> 16) & 0xf;
1850 rd1 = (insn >> 0) & 0xf;
1851 gen_op_iwmmxt_movq_M0_wRn(rd0);
1852 switch ((insn >> 22) & 3) {
1853 case 0:
1854 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1855 break;
1856 case 1:
1857 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1858 break;
1859 case 2:
1860 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1861 break;
1862 case 3:
1863 return 1;
1865 gen_op_iwmmxt_movq_wRn_M0(wrd);
1866 gen_op_iwmmxt_set_mup();
1867 gen_op_iwmmxt_set_cup();
1868 break;
1869 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1870 wrd = (insn >> 12) & 0xf;
1871 rd0 = (insn >> 16) & 0xf;
1872 rd1 = (insn >> 0) & 0xf;
1873 gen_op_iwmmxt_movq_M0_wRn(rd0);
1874 if (insn & (1 << 22)) {
1875 if (insn & (1 << 20))
1876 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1877 else
1878 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1879 } else {
1880 if (insn & (1 << 20))
1881 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1882 else
1883 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1885 gen_op_iwmmxt_movq_wRn_M0(wrd);
1886 gen_op_iwmmxt_set_mup();
1887 gen_op_iwmmxt_set_cup();
1888 break;
1889 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1890 wrd = (insn >> 12) & 0xf;
1891 rd0 = (insn >> 16) & 0xf;
1892 rd1 = (insn >> 0) & 0xf;
1893 gen_op_iwmmxt_movq_M0_wRn(rd0);
1894 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1895 tcg_gen_andi_i32(tmp, tmp, 7);
1896 iwmmxt_load_reg(cpu_V1, rd1);
1897 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1898 tcg_temp_free_i32(tmp);
1899 gen_op_iwmmxt_movq_wRn_M0(wrd);
1900 gen_op_iwmmxt_set_mup();
1901 break;
1902 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1903 if (((insn >> 6) & 3) == 3)
1904 return 1;
1905 rd = (insn >> 12) & 0xf;
1906 wrd = (insn >> 16) & 0xf;
1907 tmp = load_reg(s, rd);
1908 gen_op_iwmmxt_movq_M0_wRn(wrd);
1909 switch ((insn >> 6) & 3) {
1910 case 0:
1911 tmp2 = tcg_const_i32(0xff);
1912 tmp3 = tcg_const_i32((insn & 7) << 3);
1913 break;
1914 case 1:
1915 tmp2 = tcg_const_i32(0xffff);
1916 tmp3 = tcg_const_i32((insn & 3) << 4);
1917 break;
1918 case 2:
1919 tmp2 = tcg_const_i32(0xffffffff);
1920 tmp3 = tcg_const_i32((insn & 1) << 5);
1921 break;
1922 default:
1923 TCGV_UNUSED_I32(tmp2);
1924 TCGV_UNUSED_I32(tmp3);
1926 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1927 tcg_temp_free_i32(tmp3);
1928 tcg_temp_free_i32(tmp2);
1929 tcg_temp_free_i32(tmp);
1930 gen_op_iwmmxt_movq_wRn_M0(wrd);
1931 gen_op_iwmmxt_set_mup();
1932 break;
1933 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1934 rd = (insn >> 12) & 0xf;
1935 wrd = (insn >> 16) & 0xf;
1936 if (rd == 15 || ((insn >> 22) & 3) == 3)
1937 return 1;
1938 gen_op_iwmmxt_movq_M0_wRn(wrd);
1939 tmp = tcg_temp_new_i32();
1940 switch ((insn >> 22) & 3) {
1941 case 0:
1942 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1943 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1944 if (insn & 8) {
1945 tcg_gen_ext8s_i32(tmp, tmp);
1946 } else {
1947 tcg_gen_andi_i32(tmp, tmp, 0xff);
1949 break;
1950 case 1:
1951 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1952 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1953 if (insn & 8) {
1954 tcg_gen_ext16s_i32(tmp, tmp);
1955 } else {
1956 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1958 break;
1959 case 2:
1960 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1961 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1962 break;
1964 store_reg(s, rd, tmp);
1965 break;
1966 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1967 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1968 return 1;
1969 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1970 switch ((insn >> 22) & 3) {
1971 case 0:
1972 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1973 break;
1974 case 1:
1975 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1976 break;
1977 case 2:
1978 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1979 break;
1981 tcg_gen_shli_i32(tmp, tmp, 28);
1982 gen_set_nzcv(tmp);
1983 tcg_temp_free_i32(tmp);
1984 break;
1985 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1986 if (((insn >> 6) & 3) == 3)
1987 return 1;
1988 rd = (insn >> 12) & 0xf;
1989 wrd = (insn >> 16) & 0xf;
1990 tmp = load_reg(s, rd);
1991 switch ((insn >> 6) & 3) {
1992 case 0:
1993 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1994 break;
1995 case 1:
1996 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1997 break;
1998 case 2:
1999 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
2000 break;
2002 tcg_temp_free_i32(tmp);
2003 gen_op_iwmmxt_movq_wRn_M0(wrd);
2004 gen_op_iwmmxt_set_mup();
2005 break;
2006 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
2007 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2008 return 1;
2009 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2010 tmp2 = tcg_temp_new_i32();
2011 tcg_gen_mov_i32(tmp2, tmp);
2012 switch ((insn >> 22) & 3) {
2013 case 0:
2014 for (i = 0; i < 7; i ++) {
2015 tcg_gen_shli_i32(tmp2, tmp2, 4);
2016 tcg_gen_and_i32(tmp, tmp, tmp2);
2018 break;
2019 case 1:
2020 for (i = 0; i < 3; i ++) {
2021 tcg_gen_shli_i32(tmp2, tmp2, 8);
2022 tcg_gen_and_i32(tmp, tmp, tmp2);
2024 break;
2025 case 2:
2026 tcg_gen_shli_i32(tmp2, tmp2, 16);
2027 tcg_gen_and_i32(tmp, tmp, tmp2);
2028 break;
2030 gen_set_nzcv(tmp);
2031 tcg_temp_free_i32(tmp2);
2032 tcg_temp_free_i32(tmp);
2033 break;
2034 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2035 wrd = (insn >> 12) & 0xf;
2036 rd0 = (insn >> 16) & 0xf;
2037 gen_op_iwmmxt_movq_M0_wRn(rd0);
2038 switch ((insn >> 22) & 3) {
2039 case 0:
2040 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
2041 break;
2042 case 1:
2043 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
2044 break;
2045 case 2:
2046 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
2047 break;
2048 case 3:
2049 return 1;
2051 gen_op_iwmmxt_movq_wRn_M0(wrd);
2052 gen_op_iwmmxt_set_mup();
2053 break;
2054 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
2055 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
2056 return 1;
2057 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
2058 tmp2 = tcg_temp_new_i32();
2059 tcg_gen_mov_i32(tmp2, tmp);
2060 switch ((insn >> 22) & 3) {
2061 case 0:
2062 for (i = 0; i < 7; i ++) {
2063 tcg_gen_shli_i32(tmp2, tmp2, 4);
2064 tcg_gen_or_i32(tmp, tmp, tmp2);
2066 break;
2067 case 1:
2068 for (i = 0; i < 3; i ++) {
2069 tcg_gen_shli_i32(tmp2, tmp2, 8);
2070 tcg_gen_or_i32(tmp, tmp, tmp2);
2072 break;
2073 case 2:
2074 tcg_gen_shli_i32(tmp2, tmp2, 16);
2075 tcg_gen_or_i32(tmp, tmp, tmp2);
2076 break;
2078 gen_set_nzcv(tmp);
2079 tcg_temp_free_i32(tmp2);
2080 tcg_temp_free_i32(tmp);
2081 break;
2082 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2083 rd = (insn >> 12) & 0xf;
2084 rd0 = (insn >> 16) & 0xf;
2085 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
2086 return 1;
2087 gen_op_iwmmxt_movq_M0_wRn(rd0);
2088 tmp = tcg_temp_new_i32();
2089 switch ((insn >> 22) & 3) {
2090 case 0:
2091 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
2092 break;
2093 case 1:
2094 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
2095 break;
2096 case 2:
2097 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
2098 break;
2100 store_reg(s, rd, tmp);
2101 break;
2102 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2103 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2104 wrd = (insn >> 12) & 0xf;
2105 rd0 = (insn >> 16) & 0xf;
2106 rd1 = (insn >> 0) & 0xf;
2107 gen_op_iwmmxt_movq_M0_wRn(rd0);
2108 switch ((insn >> 22) & 3) {
2109 case 0:
2110 if (insn & (1 << 21))
2111 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2112 else
2113 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2114 break;
2115 case 1:
2116 if (insn & (1 << 21))
2117 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2118 else
2119 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2120 break;
2121 case 2:
2122 if (insn & (1 << 21))
2123 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2124 else
2125 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2126 break;
2127 case 3:
2128 return 1;
2130 gen_op_iwmmxt_movq_wRn_M0(wrd);
2131 gen_op_iwmmxt_set_mup();
2132 gen_op_iwmmxt_set_cup();
2133 break;
2134 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2135 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2136 wrd = (insn >> 12) & 0xf;
2137 rd0 = (insn >> 16) & 0xf;
2138 gen_op_iwmmxt_movq_M0_wRn(rd0);
2139 switch ((insn >> 22) & 3) {
2140 case 0:
2141 if (insn & (1 << 21))
2142 gen_op_iwmmxt_unpacklsb_M0();
2143 else
2144 gen_op_iwmmxt_unpacklub_M0();
2145 break;
2146 case 1:
2147 if (insn & (1 << 21))
2148 gen_op_iwmmxt_unpacklsw_M0();
2149 else
2150 gen_op_iwmmxt_unpackluw_M0();
2151 break;
2152 case 2:
2153 if (insn & (1 << 21))
2154 gen_op_iwmmxt_unpacklsl_M0();
2155 else
2156 gen_op_iwmmxt_unpacklul_M0();
2157 break;
2158 case 3:
2159 return 1;
2161 gen_op_iwmmxt_movq_wRn_M0(wrd);
2162 gen_op_iwmmxt_set_mup();
2163 gen_op_iwmmxt_set_cup();
2164 break;
2165 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2166 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2167 wrd = (insn >> 12) & 0xf;
2168 rd0 = (insn >> 16) & 0xf;
2169 gen_op_iwmmxt_movq_M0_wRn(rd0);
2170 switch ((insn >> 22) & 3) {
2171 case 0:
2172 if (insn & (1 << 21))
2173 gen_op_iwmmxt_unpackhsb_M0();
2174 else
2175 gen_op_iwmmxt_unpackhub_M0();
2176 break;
2177 case 1:
2178 if (insn & (1 << 21))
2179 gen_op_iwmmxt_unpackhsw_M0();
2180 else
2181 gen_op_iwmmxt_unpackhuw_M0();
2182 break;
2183 case 2:
2184 if (insn & (1 << 21))
2185 gen_op_iwmmxt_unpackhsl_M0();
2186 else
2187 gen_op_iwmmxt_unpackhul_M0();
2188 break;
2189 case 3:
2190 return 1;
2192 gen_op_iwmmxt_movq_wRn_M0(wrd);
2193 gen_op_iwmmxt_set_mup();
2194 gen_op_iwmmxt_set_cup();
2195 break;
2196 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2197 case 0x214: case 0x614: case 0xa14: case 0xe14:
2198 if (((insn >> 22) & 3) == 0)
2199 return 1;
2200 wrd = (insn >> 12) & 0xf;
2201 rd0 = (insn >> 16) & 0xf;
2202 gen_op_iwmmxt_movq_M0_wRn(rd0);
2203 tmp = tcg_temp_new_i32();
2204 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2205 tcg_temp_free_i32(tmp);
2206 return 1;
2208 switch ((insn >> 22) & 3) {
2209 case 1:
2210 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2211 break;
2212 case 2:
2213 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2214 break;
2215 case 3:
2216 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2217 break;
2219 tcg_temp_free_i32(tmp);
2220 gen_op_iwmmxt_movq_wRn_M0(wrd);
2221 gen_op_iwmmxt_set_mup();
2222 gen_op_iwmmxt_set_cup();
2223 break;
2224 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2225 case 0x014: case 0x414: case 0x814: case 0xc14:
2226 if (((insn >> 22) & 3) == 0)
2227 return 1;
2228 wrd = (insn >> 12) & 0xf;
2229 rd0 = (insn >> 16) & 0xf;
2230 gen_op_iwmmxt_movq_M0_wRn(rd0);
2231 tmp = tcg_temp_new_i32();
2232 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2233 tcg_temp_free_i32(tmp);
2234 return 1;
2236 switch ((insn >> 22) & 3) {
2237 case 1:
2238 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2239 break;
2240 case 2:
2241 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2242 break;
2243 case 3:
2244 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2245 break;
2247 tcg_temp_free_i32(tmp);
2248 gen_op_iwmmxt_movq_wRn_M0(wrd);
2249 gen_op_iwmmxt_set_mup();
2250 gen_op_iwmmxt_set_cup();
2251 break;
2252 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2253 case 0x114: case 0x514: case 0x914: case 0xd14:
2254 if (((insn >> 22) & 3) == 0)
2255 return 1;
2256 wrd = (insn >> 12) & 0xf;
2257 rd0 = (insn >> 16) & 0xf;
2258 gen_op_iwmmxt_movq_M0_wRn(rd0);
2259 tmp = tcg_temp_new_i32();
2260 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2261 tcg_temp_free_i32(tmp);
2262 return 1;
2264 switch ((insn >> 22) & 3) {
2265 case 1:
2266 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2267 break;
2268 case 2:
2269 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2270 break;
2271 case 3:
2272 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2273 break;
2275 tcg_temp_free_i32(tmp);
2276 gen_op_iwmmxt_movq_wRn_M0(wrd);
2277 gen_op_iwmmxt_set_mup();
2278 gen_op_iwmmxt_set_cup();
2279 break;
2280 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2281 case 0x314: case 0x714: case 0xb14: case 0xf14:
2282 if (((insn >> 22) & 3) == 0)
2283 return 1;
2284 wrd = (insn >> 12) & 0xf;
2285 rd0 = (insn >> 16) & 0xf;
2286 gen_op_iwmmxt_movq_M0_wRn(rd0);
2287 tmp = tcg_temp_new_i32();
2288 switch ((insn >> 22) & 3) {
2289 case 1:
2290 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2291 tcg_temp_free_i32(tmp);
2292 return 1;
2294 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2295 break;
2296 case 2:
2297 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2298 tcg_temp_free_i32(tmp);
2299 return 1;
2301 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2302 break;
2303 case 3:
2304 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2305 tcg_temp_free_i32(tmp);
2306 return 1;
2308 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2309 break;
2311 tcg_temp_free_i32(tmp);
2312 gen_op_iwmmxt_movq_wRn_M0(wrd);
2313 gen_op_iwmmxt_set_mup();
2314 gen_op_iwmmxt_set_cup();
2315 break;
2316 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2317 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2318 wrd = (insn >> 12) & 0xf;
2319 rd0 = (insn >> 16) & 0xf;
2320 rd1 = (insn >> 0) & 0xf;
2321 gen_op_iwmmxt_movq_M0_wRn(rd0);
2322 switch ((insn >> 22) & 3) {
2323 case 0:
2324 if (insn & (1 << 21))
2325 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2326 else
2327 gen_op_iwmmxt_minub_M0_wRn(rd1);
2328 break;
2329 case 1:
2330 if (insn & (1 << 21))
2331 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2332 else
2333 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2334 break;
2335 case 2:
2336 if (insn & (1 << 21))
2337 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2338 else
2339 gen_op_iwmmxt_minul_M0_wRn(rd1);
2340 break;
2341 case 3:
2342 return 1;
2344 gen_op_iwmmxt_movq_wRn_M0(wrd);
2345 gen_op_iwmmxt_set_mup();
2346 break;
2347 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2348 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2349 wrd = (insn >> 12) & 0xf;
2350 rd0 = (insn >> 16) & 0xf;
2351 rd1 = (insn >> 0) & 0xf;
2352 gen_op_iwmmxt_movq_M0_wRn(rd0);
2353 switch ((insn >> 22) & 3) {
2354 case 0:
2355 if (insn & (1 << 21))
2356 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2357 else
2358 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2359 break;
2360 case 1:
2361 if (insn & (1 << 21))
2362 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2363 else
2364 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2365 break;
2366 case 2:
2367 if (insn & (1 << 21))
2368 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2369 else
2370 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2371 break;
2372 case 3:
2373 return 1;
2375 gen_op_iwmmxt_movq_wRn_M0(wrd);
2376 gen_op_iwmmxt_set_mup();
2377 break;
2378 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2379 case 0x402: case 0x502: case 0x602: case 0x702:
2380 wrd = (insn >> 12) & 0xf;
2381 rd0 = (insn >> 16) & 0xf;
2382 rd1 = (insn >> 0) & 0xf;
2383 gen_op_iwmmxt_movq_M0_wRn(rd0);
2384 tmp = tcg_const_i32((insn >> 20) & 3);
2385 iwmmxt_load_reg(cpu_V1, rd1);
2386 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2387 tcg_temp_free_i32(tmp);
2388 gen_op_iwmmxt_movq_wRn_M0(wrd);
2389 gen_op_iwmmxt_set_mup();
2390 break;
2391 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2392 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2393 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2394 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2395 wrd = (insn >> 12) & 0xf;
2396 rd0 = (insn >> 16) & 0xf;
2397 rd1 = (insn >> 0) & 0xf;
2398 gen_op_iwmmxt_movq_M0_wRn(rd0);
2399 switch ((insn >> 20) & 0xf) {
2400 case 0x0:
2401 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2402 break;
2403 case 0x1:
2404 gen_op_iwmmxt_subub_M0_wRn(rd1);
2405 break;
2406 case 0x3:
2407 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2408 break;
2409 case 0x4:
2410 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2411 break;
2412 case 0x5:
2413 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2414 break;
2415 case 0x7:
2416 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2417 break;
2418 case 0x8:
2419 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2420 break;
2421 case 0x9:
2422 gen_op_iwmmxt_subul_M0_wRn(rd1);
2423 break;
2424 case 0xb:
2425 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2426 break;
2427 default:
2428 return 1;
2430 gen_op_iwmmxt_movq_wRn_M0(wrd);
2431 gen_op_iwmmxt_set_mup();
2432 gen_op_iwmmxt_set_cup();
2433 break;
2434 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2435 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2436 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2437 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2438 wrd = (insn >> 12) & 0xf;
2439 rd0 = (insn >> 16) & 0xf;
2440 gen_op_iwmmxt_movq_M0_wRn(rd0);
2441 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2442 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2443 tcg_temp_free_i32(tmp);
2444 gen_op_iwmmxt_movq_wRn_M0(wrd);
2445 gen_op_iwmmxt_set_mup();
2446 gen_op_iwmmxt_set_cup();
2447 break;
2448 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2449 case 0x418: case 0x518: case 0x618: case 0x718:
2450 case 0x818: case 0x918: case 0xa18: case 0xb18:
2451 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2452 wrd = (insn >> 12) & 0xf;
2453 rd0 = (insn >> 16) & 0xf;
2454 rd1 = (insn >> 0) & 0xf;
2455 gen_op_iwmmxt_movq_M0_wRn(rd0);
2456 switch ((insn >> 20) & 0xf) {
2457 case 0x0:
2458 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2459 break;
2460 case 0x1:
2461 gen_op_iwmmxt_addub_M0_wRn(rd1);
2462 break;
2463 case 0x3:
2464 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2465 break;
2466 case 0x4:
2467 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2468 break;
2469 case 0x5:
2470 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2471 break;
2472 case 0x7:
2473 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2474 break;
2475 case 0x8:
2476 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2477 break;
2478 case 0x9:
2479 gen_op_iwmmxt_addul_M0_wRn(rd1);
2480 break;
2481 case 0xb:
2482 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2483 break;
2484 default:
2485 return 1;
2487 gen_op_iwmmxt_movq_wRn_M0(wrd);
2488 gen_op_iwmmxt_set_mup();
2489 gen_op_iwmmxt_set_cup();
2490 break;
2491 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2492 case 0x408: case 0x508: case 0x608: case 0x708:
2493 case 0x808: case 0x908: case 0xa08: case 0xb08:
2494 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2495 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2496 return 1;
2497 wrd = (insn >> 12) & 0xf;
2498 rd0 = (insn >> 16) & 0xf;
2499 rd1 = (insn >> 0) & 0xf;
2500 gen_op_iwmmxt_movq_M0_wRn(rd0);
2501 switch ((insn >> 22) & 3) {
2502 case 1:
2503 if (insn & (1 << 21))
2504 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2505 else
2506 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2507 break;
2508 case 2:
2509 if (insn & (1 << 21))
2510 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2511 else
2512 gen_op_iwmmxt_packul_M0_wRn(rd1);
2513 break;
2514 case 3:
2515 if (insn & (1 << 21))
2516 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2517 else
2518 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2519 break;
2521 gen_op_iwmmxt_movq_wRn_M0(wrd);
2522 gen_op_iwmmxt_set_mup();
2523 gen_op_iwmmxt_set_cup();
2524 break;
2525 case 0x201: case 0x203: case 0x205: case 0x207:
2526 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2527 case 0x211: case 0x213: case 0x215: case 0x217:
2528 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2529 wrd = (insn >> 5) & 0xf;
2530 rd0 = (insn >> 12) & 0xf;
2531 rd1 = (insn >> 0) & 0xf;
2532 if (rd0 == 0xf || rd1 == 0xf)
2533 return 1;
2534 gen_op_iwmmxt_movq_M0_wRn(wrd);
2535 tmp = load_reg(s, rd0);
2536 tmp2 = load_reg(s, rd1);
2537 switch ((insn >> 16) & 0xf) {
2538 case 0x0: /* TMIA */
2539 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2540 break;
2541 case 0x8: /* TMIAPH */
2542 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2543 break;
2544 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2545 if (insn & (1 << 16))
2546 tcg_gen_shri_i32(tmp, tmp, 16);
2547 if (insn & (1 << 17))
2548 tcg_gen_shri_i32(tmp2, tmp2, 16);
2549 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2550 break;
2551 default:
2552 tcg_temp_free_i32(tmp2);
2553 tcg_temp_free_i32(tmp);
2554 return 1;
2556 tcg_temp_free_i32(tmp2);
2557 tcg_temp_free_i32(tmp);
2558 gen_op_iwmmxt_movq_wRn_M0(wrd);
2559 gen_op_iwmmxt_set_mup();
2560 break;
2561 default:
2562 return 1;
2565 return 0;
2568 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2569 (ie. an undefined instruction). */
2570 static int disas_dsp_insn(DisasContext *s, uint32_t insn)
2572 int acc, rd0, rd1, rdhi, rdlo;
2573 TCGv_i32 tmp, tmp2;
2575 if ((insn & 0x0ff00f10) == 0x0e200010) {
2576 /* Multiply with Internal Accumulate Format */
2577 rd0 = (insn >> 12) & 0xf;
2578 rd1 = insn & 0xf;
2579 acc = (insn >> 5) & 7;
2581 if (acc != 0)
2582 return 1;
2584 tmp = load_reg(s, rd0);
2585 tmp2 = load_reg(s, rd1);
2586 switch ((insn >> 16) & 0xf) {
2587 case 0x0: /* MIA */
2588 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2589 break;
2590 case 0x8: /* MIAPH */
2591 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2592 break;
2593 case 0xc: /* MIABB */
2594 case 0xd: /* MIABT */
2595 case 0xe: /* MIATB */
2596 case 0xf: /* MIATT */
2597 if (insn & (1 << 16))
2598 tcg_gen_shri_i32(tmp, tmp, 16);
2599 if (insn & (1 << 17))
2600 tcg_gen_shri_i32(tmp2, tmp2, 16);
2601 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2602 break;
2603 default:
2604 return 1;
2606 tcg_temp_free_i32(tmp2);
2607 tcg_temp_free_i32(tmp);
2609 gen_op_iwmmxt_movq_wRn_M0(acc);
2610 return 0;
2613 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2614 /* Internal Accumulator Access Format */
2615 rdhi = (insn >> 16) & 0xf;
2616 rdlo = (insn >> 12) & 0xf;
2617 acc = insn & 7;
2619 if (acc != 0)
2620 return 1;
2622 if (insn & ARM_CP_RW_BIT) { /* MRA */
2623 iwmmxt_load_reg(cpu_V0, acc);
2624 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2625 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2626 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2627 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2628 } else { /* MAR */
2629 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2630 iwmmxt_store_reg(cpu_V0, acc);
2632 return 0;
2635 return 1;
2638 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2639 #define VFP_SREG(insn, bigbit, smallbit) \
2640 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2641 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2642 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
2643 reg = (((insn) >> (bigbit)) & 0x0f) \
2644 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2645 } else { \
2646 if (insn & (1 << (smallbit))) \
2647 return 1; \
2648 reg = ((insn) >> (bigbit)) & 0x0f; \
2649 }} while (0)
2651 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2652 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2653 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2654 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2655 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2656 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2658 /* Move between integer and VFP cores. */
2659 static TCGv_i32 gen_vfp_mrs(void)
2661 TCGv_i32 tmp = tcg_temp_new_i32();
2662 tcg_gen_mov_i32(tmp, cpu_F0s);
2663 return tmp;
2666 static void gen_vfp_msr(TCGv_i32 tmp)
2668 tcg_gen_mov_i32(cpu_F0s, tmp);
2669 tcg_temp_free_i32(tmp);
2672 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2674 TCGv_i32 tmp = tcg_temp_new_i32();
2675 if (shift)
2676 tcg_gen_shri_i32(var, var, shift);
2677 tcg_gen_ext8u_i32(var, var);
2678 tcg_gen_shli_i32(tmp, var, 8);
2679 tcg_gen_or_i32(var, var, tmp);
2680 tcg_gen_shli_i32(tmp, var, 16);
2681 tcg_gen_or_i32(var, var, tmp);
2682 tcg_temp_free_i32(tmp);
2685 static void gen_neon_dup_low16(TCGv_i32 var)
2687 TCGv_i32 tmp = tcg_temp_new_i32();
2688 tcg_gen_ext16u_i32(var, var);
2689 tcg_gen_shli_i32(tmp, var, 16);
2690 tcg_gen_or_i32(var, var, tmp);
2691 tcg_temp_free_i32(tmp);
2694 static void gen_neon_dup_high16(TCGv_i32 var)
2696 TCGv_i32 tmp = tcg_temp_new_i32();
2697 tcg_gen_andi_i32(var, var, 0xffff0000);
2698 tcg_gen_shri_i32(tmp, var, 16);
2699 tcg_gen_or_i32(var, var, tmp);
2700 tcg_temp_free_i32(tmp);
2703 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2705 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2706 TCGv_i32 tmp = tcg_temp_new_i32();
2707 switch (size) {
2708 case 0:
2709 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2710 gen_neon_dup_u8(tmp, 0);
2711 break;
2712 case 1:
2713 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2714 gen_neon_dup_low16(tmp);
2715 break;
2716 case 2:
2717 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2718 break;
2719 default: /* Avoid compiler warnings. */
2720 abort();
2722 return tmp;
2725 static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2726 uint32_t dp)
2728 uint32_t cc = extract32(insn, 20, 2);
2730 if (dp) {
2731 TCGv_i64 frn, frm, dest;
2732 TCGv_i64 tmp, zero, zf, nf, vf;
2734 zero = tcg_const_i64(0);
2736 frn = tcg_temp_new_i64();
2737 frm = tcg_temp_new_i64();
2738 dest = tcg_temp_new_i64();
2740 zf = tcg_temp_new_i64();
2741 nf = tcg_temp_new_i64();
2742 vf = tcg_temp_new_i64();
2744 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2745 tcg_gen_ext_i32_i64(nf, cpu_NF);
2746 tcg_gen_ext_i32_i64(vf, cpu_VF);
2748 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2749 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2750 switch (cc) {
2751 case 0: /* eq: Z */
2752 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2753 frn, frm);
2754 break;
2755 case 1: /* vs: V */
2756 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2757 frn, frm);
2758 break;
2759 case 2: /* ge: N == V -> N ^ V == 0 */
2760 tmp = tcg_temp_new_i64();
2761 tcg_gen_xor_i64(tmp, vf, nf);
2762 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2763 frn, frm);
2764 tcg_temp_free_i64(tmp);
2765 break;
2766 case 3: /* gt: !Z && N == V */
2767 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2768 frn, frm);
2769 tmp = tcg_temp_new_i64();
2770 tcg_gen_xor_i64(tmp, vf, nf);
2771 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2772 dest, frm);
2773 tcg_temp_free_i64(tmp);
2774 break;
2776 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2777 tcg_temp_free_i64(frn);
2778 tcg_temp_free_i64(frm);
2779 tcg_temp_free_i64(dest);
2781 tcg_temp_free_i64(zf);
2782 tcg_temp_free_i64(nf);
2783 tcg_temp_free_i64(vf);
2785 tcg_temp_free_i64(zero);
2786 } else {
2787 TCGv_i32 frn, frm, dest;
2788 TCGv_i32 tmp, zero;
2790 zero = tcg_const_i32(0);
2792 frn = tcg_temp_new_i32();
2793 frm = tcg_temp_new_i32();
2794 dest = tcg_temp_new_i32();
2795 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2796 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2797 switch (cc) {
2798 case 0: /* eq: Z */
2799 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2800 frn, frm);
2801 break;
2802 case 1: /* vs: V */
2803 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2804 frn, frm);
2805 break;
2806 case 2: /* ge: N == V -> N ^ V == 0 */
2807 tmp = tcg_temp_new_i32();
2808 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2809 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2810 frn, frm);
2811 tcg_temp_free_i32(tmp);
2812 break;
2813 case 3: /* gt: !Z && N == V */
2814 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2815 frn, frm);
2816 tmp = tcg_temp_new_i32();
2817 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2818 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2819 dest, frm);
2820 tcg_temp_free_i32(tmp);
2821 break;
2823 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2824 tcg_temp_free_i32(frn);
2825 tcg_temp_free_i32(frm);
2826 tcg_temp_free_i32(dest);
2828 tcg_temp_free_i32(zero);
2831 return 0;
2834 static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2835 uint32_t rm, uint32_t dp)
2837 uint32_t vmin = extract32(insn, 6, 1);
2838 TCGv_ptr fpst = get_fpstatus_ptr(0);
2840 if (dp) {
2841 TCGv_i64 frn, frm, dest;
2843 frn = tcg_temp_new_i64();
2844 frm = tcg_temp_new_i64();
2845 dest = tcg_temp_new_i64();
2847 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2848 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2849 if (vmin) {
2850 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
2851 } else {
2852 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
2854 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2855 tcg_temp_free_i64(frn);
2856 tcg_temp_free_i64(frm);
2857 tcg_temp_free_i64(dest);
2858 } else {
2859 TCGv_i32 frn, frm, dest;
2861 frn = tcg_temp_new_i32();
2862 frm = tcg_temp_new_i32();
2863 dest = tcg_temp_new_i32();
2865 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2866 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2867 if (vmin) {
2868 gen_helper_vfp_minnums(dest, frn, frm, fpst);
2869 } else {
2870 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
2872 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2873 tcg_temp_free_i32(frn);
2874 tcg_temp_free_i32(frm);
2875 tcg_temp_free_i32(dest);
2878 tcg_temp_free_ptr(fpst);
2879 return 0;
2882 static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2883 int rounding)
2885 TCGv_ptr fpst = get_fpstatus_ptr(0);
2886 TCGv_i32 tcg_rmode;
2888 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2889 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2891 if (dp) {
2892 TCGv_i64 tcg_op;
2893 TCGv_i64 tcg_res;
2894 tcg_op = tcg_temp_new_i64();
2895 tcg_res = tcg_temp_new_i64();
2896 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2897 gen_helper_rintd(tcg_res, tcg_op, fpst);
2898 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2899 tcg_temp_free_i64(tcg_op);
2900 tcg_temp_free_i64(tcg_res);
2901 } else {
2902 TCGv_i32 tcg_op;
2903 TCGv_i32 tcg_res;
2904 tcg_op = tcg_temp_new_i32();
2905 tcg_res = tcg_temp_new_i32();
2906 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2907 gen_helper_rints(tcg_res, tcg_op, fpst);
2908 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2909 tcg_temp_free_i32(tcg_op);
2910 tcg_temp_free_i32(tcg_res);
2913 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2914 tcg_temp_free_i32(tcg_rmode);
2916 tcg_temp_free_ptr(fpst);
2917 return 0;
2920 static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2921 int rounding)
2923 bool is_signed = extract32(insn, 7, 1);
2924 TCGv_ptr fpst = get_fpstatus_ptr(0);
2925 TCGv_i32 tcg_rmode, tcg_shift;
2927 tcg_shift = tcg_const_i32(0);
2929 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2930 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2932 if (dp) {
2933 TCGv_i64 tcg_double, tcg_res;
2934 TCGv_i32 tcg_tmp;
2935 /* Rd is encoded as a single precision register even when the source
2936 * is double precision.
2938 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
2939 tcg_double = tcg_temp_new_i64();
2940 tcg_res = tcg_temp_new_i64();
2941 tcg_tmp = tcg_temp_new_i32();
2942 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
2943 if (is_signed) {
2944 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
2945 } else {
2946 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
2948 tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res);
2949 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
2950 tcg_temp_free_i32(tcg_tmp);
2951 tcg_temp_free_i64(tcg_res);
2952 tcg_temp_free_i64(tcg_double);
2953 } else {
2954 TCGv_i32 tcg_single, tcg_res;
2955 tcg_single = tcg_temp_new_i32();
2956 tcg_res = tcg_temp_new_i32();
2957 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
2958 if (is_signed) {
2959 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
2960 } else {
2961 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
2963 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
2964 tcg_temp_free_i32(tcg_res);
2965 tcg_temp_free_i32(tcg_single);
2968 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2969 tcg_temp_free_i32(tcg_rmode);
2971 tcg_temp_free_i32(tcg_shift);
2973 tcg_temp_free_ptr(fpst);
2975 return 0;
2978 /* Table for converting the most common AArch32 encoding of
2979 * rounding mode to arm_fprounding order (which matches the
2980 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
2982 static const uint8_t fp_decode_rm[] = {
2983 FPROUNDING_TIEAWAY,
2984 FPROUNDING_TIEEVEN,
2985 FPROUNDING_POSINF,
2986 FPROUNDING_NEGINF,
2989 static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
2991 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
2993 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
2994 return 1;
2997 if (dp) {
2998 VFP_DREG_D(rd, insn);
2999 VFP_DREG_N(rn, insn);
3000 VFP_DREG_M(rm, insn);
3001 } else {
3002 rd = VFP_SREG_D(insn);
3003 rn = VFP_SREG_N(insn);
3004 rm = VFP_SREG_M(insn);
3007 if ((insn & 0x0f800e50) == 0x0e000a00) {
3008 return handle_vsel(insn, rd, rn, rm, dp);
3009 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3010 return handle_vminmaxnm(insn, rd, rn, rm, dp);
3011 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3012 /* VRINTA, VRINTN, VRINTP, VRINTM */
3013 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3014 return handle_vrint(insn, rd, rm, dp, rounding);
3015 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3016 /* VCVTA, VCVTN, VCVTP, VCVTM */
3017 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3018 return handle_vcvt(insn, rd, rm, dp, rounding);
3020 return 1;
3023 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
3024 (ie. an undefined instruction). */
3025 static int disas_vfp_insn(DisasContext *s, uint32_t insn)
3027 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3028 int dp, veclen;
3029 TCGv_i32 addr;
3030 TCGv_i32 tmp;
3031 TCGv_i32 tmp2;
3033 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
3034 return 1;
3037 /* FIXME: this access check should not take precedence over UNDEF
3038 * for invalid encodings; we will generate incorrect syndrome information
3039 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3041 if (!s->cpacr_fpen) {
3042 gen_exception_insn(s, 4, EXCP_UDEF,
3043 syn_fp_access_trap(1, 0xe, s->thumb));
3044 return 0;
3047 if (!s->vfp_enabled) {
3048 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
3049 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3050 return 1;
3051 rn = (insn >> 16) & 0xf;
3052 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3053 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
3054 return 1;
3058 if (extract32(insn, 28, 4) == 0xf) {
3059 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3060 * only used in v8 and above.
3062 return disas_vfp_v8_insn(s, insn);
3065 dp = ((insn & 0xf00) == 0xb00);
3066 switch ((insn >> 24) & 0xf) {
3067 case 0xe:
3068 if (insn & (1 << 4)) {
3069 /* single register transfer */
3070 rd = (insn >> 12) & 0xf;
3071 if (dp) {
3072 int size;
3073 int pass;
3075 VFP_DREG_N(rn, insn);
3076 if (insn & 0xf)
3077 return 1;
3078 if (insn & 0x00c00060
3079 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
3080 return 1;
3083 pass = (insn >> 21) & 1;
3084 if (insn & (1 << 22)) {
3085 size = 0;
3086 offset = ((insn >> 5) & 3) * 8;
3087 } else if (insn & (1 << 5)) {
3088 size = 1;
3089 offset = (insn & (1 << 6)) ? 16 : 0;
3090 } else {
3091 size = 2;
3092 offset = 0;
3094 if (insn & ARM_CP_RW_BIT) {
3095 /* vfp->arm */
3096 tmp = neon_load_reg(rn, pass);
3097 switch (size) {
3098 case 0:
3099 if (offset)
3100 tcg_gen_shri_i32(tmp, tmp, offset);
3101 if (insn & (1 << 23))
3102 gen_uxtb(tmp);
3103 else
3104 gen_sxtb(tmp);
3105 break;
3106 case 1:
3107 if (insn & (1 << 23)) {
3108 if (offset) {
3109 tcg_gen_shri_i32(tmp, tmp, 16);
3110 } else {
3111 gen_uxth(tmp);
3113 } else {
3114 if (offset) {
3115 tcg_gen_sari_i32(tmp, tmp, 16);
3116 } else {
3117 gen_sxth(tmp);
3120 break;
3121 case 2:
3122 break;
3124 store_reg(s, rd, tmp);
3125 } else {
3126 /* arm->vfp */
3127 tmp = load_reg(s, rd);
3128 if (insn & (1 << 23)) {
3129 /* VDUP */
3130 if (size == 0) {
3131 gen_neon_dup_u8(tmp, 0);
3132 } else if (size == 1) {
3133 gen_neon_dup_low16(tmp);
3135 for (n = 0; n <= pass * 2; n++) {
3136 tmp2 = tcg_temp_new_i32();
3137 tcg_gen_mov_i32(tmp2, tmp);
3138 neon_store_reg(rn, n, tmp2);
3140 neon_store_reg(rn, n, tmp);
3141 } else {
3142 /* VMOV */
3143 switch (size) {
3144 case 0:
3145 tmp2 = neon_load_reg(rn, pass);
3146 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
3147 tcg_temp_free_i32(tmp2);
3148 break;
3149 case 1:
3150 tmp2 = neon_load_reg(rn, pass);
3151 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
3152 tcg_temp_free_i32(tmp2);
3153 break;
3154 case 2:
3155 break;
3157 neon_store_reg(rn, pass, tmp);
3160 } else { /* !dp */
3161 if ((insn & 0x6f) != 0x00)
3162 return 1;
3163 rn = VFP_SREG_N(insn);
3164 if (insn & ARM_CP_RW_BIT) {
3165 /* vfp->arm */
3166 if (insn & (1 << 21)) {
3167 /* system register */
3168 rn >>= 1;
3170 switch (rn) {
3171 case ARM_VFP_FPSID:
3172 /* VFP2 allows access to FSID from userspace.
3173 VFP3 restricts all id registers to privileged
3174 accesses. */
3175 if (IS_USER(s)
3176 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3177 return 1;
3179 tmp = load_cpu_field(vfp.xregs[rn]);
3180 break;
3181 case ARM_VFP_FPEXC:
3182 if (IS_USER(s))
3183 return 1;
3184 tmp = load_cpu_field(vfp.xregs[rn]);
3185 break;
3186 case ARM_VFP_FPINST:
3187 case ARM_VFP_FPINST2:
3188 /* Not present in VFP3. */
3189 if (IS_USER(s)
3190 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3191 return 1;
3193 tmp = load_cpu_field(vfp.xregs[rn]);
3194 break;
3195 case ARM_VFP_FPSCR:
3196 if (rd == 15) {
3197 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3198 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3199 } else {
3200 tmp = tcg_temp_new_i32();
3201 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3203 break;
3204 case ARM_VFP_MVFR2:
3205 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
3206 return 1;
3208 /* fall through */
3209 case ARM_VFP_MVFR0:
3210 case ARM_VFP_MVFR1:
3211 if (IS_USER(s)
3212 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
3213 return 1;
3215 tmp = load_cpu_field(vfp.xregs[rn]);
3216 break;
3217 default:
3218 return 1;
3220 } else {
3221 gen_mov_F0_vreg(0, rn);
3222 tmp = gen_vfp_mrs();
3224 if (rd == 15) {
3225 /* Set the 4 flag bits in the CPSR. */
3226 gen_set_nzcv(tmp);
3227 tcg_temp_free_i32(tmp);
3228 } else {
3229 store_reg(s, rd, tmp);
3231 } else {
3232 /* arm->vfp */
3233 if (insn & (1 << 21)) {
3234 rn >>= 1;
3235 /* system register */
3236 switch (rn) {
3237 case ARM_VFP_FPSID:
3238 case ARM_VFP_MVFR0:
3239 case ARM_VFP_MVFR1:
3240 /* Writes are ignored. */
3241 break;
3242 case ARM_VFP_FPSCR:
3243 tmp = load_reg(s, rd);
3244 gen_helper_vfp_set_fpscr(cpu_env, tmp);
3245 tcg_temp_free_i32(tmp);
3246 gen_lookup_tb(s);
3247 break;
3248 case ARM_VFP_FPEXC:
3249 if (IS_USER(s))
3250 return 1;
3251 /* TODO: VFP subarchitecture support.
3252 * For now, keep the EN bit only */
3253 tmp = load_reg(s, rd);
3254 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
3255 store_cpu_field(tmp, vfp.xregs[rn]);
3256 gen_lookup_tb(s);
3257 break;
3258 case ARM_VFP_FPINST:
3259 case ARM_VFP_FPINST2:
3260 if (IS_USER(s)) {
3261 return 1;
3263 tmp = load_reg(s, rd);
3264 store_cpu_field(tmp, vfp.xregs[rn]);
3265 break;
3266 default:
3267 return 1;
3269 } else {
3270 tmp = load_reg(s, rd);
3271 gen_vfp_msr(tmp);
3272 gen_mov_vreg_F0(0, rn);
3276 } else {
3277 /* data processing */
3278 /* The opcode is in bits 23, 21, 20 and 6. */
3279 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3280 if (dp) {
3281 if (op == 15) {
3282 /* rn is opcode */
3283 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3284 } else {
3285 /* rn is register number */
3286 VFP_DREG_N(rn, insn);
3289 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3290 ((rn & 0x1e) == 0x6))) {
3291 /* Integer or single/half precision destination. */
3292 rd = VFP_SREG_D(insn);
3293 } else {
3294 VFP_DREG_D(rd, insn);
3296 if (op == 15 &&
3297 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3298 ((rn & 0x1e) == 0x4))) {
3299 /* VCVT from int or half precision is always from S reg
3300 * regardless of dp bit. VCVT with immediate frac_bits
3301 * has same format as SREG_M.
3303 rm = VFP_SREG_M(insn);
3304 } else {
3305 VFP_DREG_M(rm, insn);
3307 } else {
3308 rn = VFP_SREG_N(insn);
3309 if (op == 15 && rn == 15) {
3310 /* Double precision destination. */
3311 VFP_DREG_D(rd, insn);
3312 } else {
3313 rd = VFP_SREG_D(insn);
3315 /* NB that we implicitly rely on the encoding for the frac_bits
3316 * in VCVT of fixed to float being the same as that of an SREG_M
3318 rm = VFP_SREG_M(insn);
3321 veclen = s->vec_len;
3322 if (op == 15 && rn > 3)
3323 veclen = 0;
3325 /* Shut up compiler warnings. */
3326 delta_m = 0;
3327 delta_d = 0;
3328 bank_mask = 0;
3330 if (veclen > 0) {
3331 if (dp)
3332 bank_mask = 0xc;
3333 else
3334 bank_mask = 0x18;
3336 /* Figure out what type of vector operation this is. */
3337 if ((rd & bank_mask) == 0) {
3338 /* scalar */
3339 veclen = 0;
3340 } else {
3341 if (dp)
3342 delta_d = (s->vec_stride >> 1) + 1;
3343 else
3344 delta_d = s->vec_stride + 1;
3346 if ((rm & bank_mask) == 0) {
3347 /* mixed scalar/vector */
3348 delta_m = 0;
3349 } else {
3350 /* vector */
3351 delta_m = delta_d;
3356 /* Load the initial operands. */
3357 if (op == 15) {
3358 switch (rn) {
3359 case 16:
3360 case 17:
3361 /* Integer source */
3362 gen_mov_F0_vreg(0, rm);
3363 break;
3364 case 8:
3365 case 9:
3366 /* Compare */
3367 gen_mov_F0_vreg(dp, rd);
3368 gen_mov_F1_vreg(dp, rm);
3369 break;
3370 case 10:
3371 case 11:
3372 /* Compare with zero */
3373 gen_mov_F0_vreg(dp, rd);
3374 gen_vfp_F1_ld0(dp);
3375 break;
3376 case 20:
3377 case 21:
3378 case 22:
3379 case 23:
3380 case 28:
3381 case 29:
3382 case 30:
3383 case 31:
3384 /* Source and destination the same. */
3385 gen_mov_F0_vreg(dp, rd);
3386 break;
3387 case 4:
3388 case 5:
3389 case 6:
3390 case 7:
3391 /* VCVTB, VCVTT: only present with the halfprec extension
3392 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3393 * (we choose to UNDEF)
3395 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3396 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
3397 return 1;
3399 if (!extract32(rn, 1, 1)) {
3400 /* Half precision source. */
3401 gen_mov_F0_vreg(0, rm);
3402 break;
3404 /* Otherwise fall through */
3405 default:
3406 /* One source operand. */
3407 gen_mov_F0_vreg(dp, rm);
3408 break;
3410 } else {
3411 /* Two source operands. */
3412 gen_mov_F0_vreg(dp, rn);
3413 gen_mov_F1_vreg(dp, rm);
3416 for (;;) {
3417 /* Perform the calculation. */
3418 switch (op) {
3419 case 0: /* VMLA: fd + (fn * fm) */
3420 /* Note that order of inputs to the add matters for NaNs */
3421 gen_vfp_F1_mul(dp);
3422 gen_mov_F0_vreg(dp, rd);
3423 gen_vfp_add(dp);
3424 break;
3425 case 1: /* VMLS: fd + -(fn * fm) */
3426 gen_vfp_mul(dp);
3427 gen_vfp_F1_neg(dp);
3428 gen_mov_F0_vreg(dp, rd);
3429 gen_vfp_add(dp);
3430 break;
3431 case 2: /* VNMLS: -fd + (fn * fm) */
3432 /* Note that it isn't valid to replace (-A + B) with (B - A)
3433 * or similar plausible looking simplifications
3434 * because this will give wrong results for NaNs.
3436 gen_vfp_F1_mul(dp);
3437 gen_mov_F0_vreg(dp, rd);
3438 gen_vfp_neg(dp);
3439 gen_vfp_add(dp);
3440 break;
3441 case 3: /* VNMLA: -fd + -(fn * fm) */
3442 gen_vfp_mul(dp);
3443 gen_vfp_F1_neg(dp);
3444 gen_mov_F0_vreg(dp, rd);
3445 gen_vfp_neg(dp);
3446 gen_vfp_add(dp);
3447 break;
3448 case 4: /* mul: fn * fm */
3449 gen_vfp_mul(dp);
3450 break;
3451 case 5: /* nmul: -(fn * fm) */
3452 gen_vfp_mul(dp);
3453 gen_vfp_neg(dp);
3454 break;
3455 case 6: /* add: fn + fm */
3456 gen_vfp_add(dp);
3457 break;
3458 case 7: /* sub: fn - fm */
3459 gen_vfp_sub(dp);
3460 break;
3461 case 8: /* div: fn / fm */
3462 gen_vfp_div(dp);
3463 break;
3464 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3465 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3466 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3467 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3468 /* These are fused multiply-add, and must be done as one
3469 * floating point operation with no rounding between the
3470 * multiplication and addition steps.
3471 * NB that doing the negations here as separate steps is
3472 * correct : an input NaN should come out with its sign bit
3473 * flipped if it is a negated-input.
3475 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
3476 return 1;
3478 if (dp) {
3479 TCGv_ptr fpst;
3480 TCGv_i64 frd;
3481 if (op & 1) {
3482 /* VFNMS, VFMS */
3483 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3485 frd = tcg_temp_new_i64();
3486 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3487 if (op & 2) {
3488 /* VFNMA, VFNMS */
3489 gen_helper_vfp_negd(frd, frd);
3491 fpst = get_fpstatus_ptr(0);
3492 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3493 cpu_F1d, frd, fpst);
3494 tcg_temp_free_ptr(fpst);
3495 tcg_temp_free_i64(frd);
3496 } else {
3497 TCGv_ptr fpst;
3498 TCGv_i32 frd;
3499 if (op & 1) {
3500 /* VFNMS, VFMS */
3501 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3503 frd = tcg_temp_new_i32();
3504 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3505 if (op & 2) {
3506 gen_helper_vfp_negs(frd, frd);
3508 fpst = get_fpstatus_ptr(0);
3509 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3510 cpu_F1s, frd, fpst);
3511 tcg_temp_free_ptr(fpst);
3512 tcg_temp_free_i32(frd);
3514 break;
3515 case 14: /* fconst */
3516 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3517 return 1;
3520 n = (insn << 12) & 0x80000000;
3521 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3522 if (dp) {
3523 if (i & 0x40)
3524 i |= 0x3f80;
3525 else
3526 i |= 0x4000;
3527 n |= i << 16;
3528 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3529 } else {
3530 if (i & 0x40)
3531 i |= 0x780;
3532 else
3533 i |= 0x800;
3534 n |= i << 19;
3535 tcg_gen_movi_i32(cpu_F0s, n);
3537 break;
3538 case 15: /* extension space */
3539 switch (rn) {
3540 case 0: /* cpy */
3541 /* no-op */
3542 break;
3543 case 1: /* abs */
3544 gen_vfp_abs(dp);
3545 break;
3546 case 2: /* neg */
3547 gen_vfp_neg(dp);
3548 break;
3549 case 3: /* sqrt */
3550 gen_vfp_sqrt(dp);
3551 break;
3552 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
3553 tmp = gen_vfp_mrs();
3554 tcg_gen_ext16u_i32(tmp, tmp);
3555 if (dp) {
3556 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3557 cpu_env);
3558 } else {
3559 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3560 cpu_env);
3562 tcg_temp_free_i32(tmp);
3563 break;
3564 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
3565 tmp = gen_vfp_mrs();
3566 tcg_gen_shri_i32(tmp, tmp, 16);
3567 if (dp) {
3568 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3569 cpu_env);
3570 } else {
3571 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3572 cpu_env);
3574 tcg_temp_free_i32(tmp);
3575 break;
3576 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3577 tmp = tcg_temp_new_i32();
3578 if (dp) {
3579 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3580 cpu_env);
3581 } else {
3582 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3583 cpu_env);
3585 gen_mov_F0_vreg(0, rd);
3586 tmp2 = gen_vfp_mrs();
3587 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3588 tcg_gen_or_i32(tmp, tmp, tmp2);
3589 tcg_temp_free_i32(tmp2);
3590 gen_vfp_msr(tmp);
3591 break;
3592 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3593 tmp = tcg_temp_new_i32();
3594 if (dp) {
3595 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3596 cpu_env);
3597 } else {
3598 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3599 cpu_env);
3601 tcg_gen_shli_i32(tmp, tmp, 16);
3602 gen_mov_F0_vreg(0, rd);
3603 tmp2 = gen_vfp_mrs();
3604 tcg_gen_ext16u_i32(tmp2, tmp2);
3605 tcg_gen_or_i32(tmp, tmp, tmp2);
3606 tcg_temp_free_i32(tmp2);
3607 gen_vfp_msr(tmp);
3608 break;
3609 case 8: /* cmp */
3610 gen_vfp_cmp(dp);
3611 break;
3612 case 9: /* cmpe */
3613 gen_vfp_cmpe(dp);
3614 break;
3615 case 10: /* cmpz */
3616 gen_vfp_cmp(dp);
3617 break;
3618 case 11: /* cmpez */
3619 gen_vfp_F1_ld0(dp);
3620 gen_vfp_cmpe(dp);
3621 break;
3622 case 12: /* vrintr */
3624 TCGv_ptr fpst = get_fpstatus_ptr(0);
3625 if (dp) {
3626 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3627 } else {
3628 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3630 tcg_temp_free_ptr(fpst);
3631 break;
3633 case 13: /* vrintz */
3635 TCGv_ptr fpst = get_fpstatus_ptr(0);
3636 TCGv_i32 tcg_rmode;
3637 tcg_rmode = tcg_const_i32(float_round_to_zero);
3638 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3639 if (dp) {
3640 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3641 } else {
3642 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3644 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3645 tcg_temp_free_i32(tcg_rmode);
3646 tcg_temp_free_ptr(fpst);
3647 break;
3649 case 14: /* vrintx */
3651 TCGv_ptr fpst = get_fpstatus_ptr(0);
3652 if (dp) {
3653 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3654 } else {
3655 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3657 tcg_temp_free_ptr(fpst);
3658 break;
3660 case 15: /* single<->double conversion */
3661 if (dp)
3662 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3663 else
3664 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3665 break;
3666 case 16: /* fuito */
3667 gen_vfp_uito(dp, 0);
3668 break;
3669 case 17: /* fsito */
3670 gen_vfp_sito(dp, 0);
3671 break;
3672 case 20: /* fshto */
3673 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3674 return 1;
3676 gen_vfp_shto(dp, 16 - rm, 0);
3677 break;
3678 case 21: /* fslto */
3679 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3680 return 1;
3682 gen_vfp_slto(dp, 32 - rm, 0);
3683 break;
3684 case 22: /* fuhto */
3685 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3686 return 1;
3688 gen_vfp_uhto(dp, 16 - rm, 0);
3689 break;
3690 case 23: /* fulto */
3691 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3692 return 1;
3694 gen_vfp_ulto(dp, 32 - rm, 0);
3695 break;
3696 case 24: /* ftoui */
3697 gen_vfp_toui(dp, 0);
3698 break;
3699 case 25: /* ftouiz */
3700 gen_vfp_touiz(dp, 0);
3701 break;
3702 case 26: /* ftosi */
3703 gen_vfp_tosi(dp, 0);
3704 break;
3705 case 27: /* ftosiz */
3706 gen_vfp_tosiz(dp, 0);
3707 break;
3708 case 28: /* ftosh */
3709 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3710 return 1;
3712 gen_vfp_tosh(dp, 16 - rm, 0);
3713 break;
3714 case 29: /* ftosl */
3715 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3716 return 1;
3718 gen_vfp_tosl(dp, 32 - rm, 0);
3719 break;
3720 case 30: /* ftouh */
3721 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3722 return 1;
3724 gen_vfp_touh(dp, 16 - rm, 0);
3725 break;
3726 case 31: /* ftoul */
3727 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3728 return 1;
3730 gen_vfp_toul(dp, 32 - rm, 0);
3731 break;
3732 default: /* undefined */
3733 return 1;
3735 break;
3736 default: /* undefined */
3737 return 1;
3740 /* Write back the result. */
3741 if (op == 15 && (rn >= 8 && rn <= 11)) {
3742 /* Comparison, do nothing. */
3743 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3744 (rn & 0x1e) == 0x6)) {
3745 /* VCVT double to int: always integer result.
3746 * VCVT double to half precision is always a single
3747 * precision result.
3749 gen_mov_vreg_F0(0, rd);
3750 } else if (op == 15 && rn == 15) {
3751 /* conversion */
3752 gen_mov_vreg_F0(!dp, rd);
3753 } else {
3754 gen_mov_vreg_F0(dp, rd);
3757 /* break out of the loop if we have finished */
3758 if (veclen == 0)
3759 break;
3761 if (op == 15 && delta_m == 0) {
3762 /* single source one-many */
3763 while (veclen--) {
3764 rd = ((rd + delta_d) & (bank_mask - 1))
3765 | (rd & bank_mask);
3766 gen_mov_vreg_F0(dp, rd);
3768 break;
3770 /* Setup the next operands. */
3771 veclen--;
3772 rd = ((rd + delta_d) & (bank_mask - 1))
3773 | (rd & bank_mask);
3775 if (op == 15) {
3776 /* One source operand. */
3777 rm = ((rm + delta_m) & (bank_mask - 1))
3778 | (rm & bank_mask);
3779 gen_mov_F0_vreg(dp, rm);
3780 } else {
3781 /* Two source operands. */
3782 rn = ((rn + delta_d) & (bank_mask - 1))
3783 | (rn & bank_mask);
3784 gen_mov_F0_vreg(dp, rn);
3785 if (delta_m) {
3786 rm = ((rm + delta_m) & (bank_mask - 1))
3787 | (rm & bank_mask);
3788 gen_mov_F1_vreg(dp, rm);
3793 break;
3794 case 0xc:
3795 case 0xd:
3796 if ((insn & 0x03e00000) == 0x00400000) {
3797 /* two-register transfer */
3798 rn = (insn >> 16) & 0xf;
3799 rd = (insn >> 12) & 0xf;
3800 if (dp) {
3801 VFP_DREG_M(rm, insn);
3802 } else {
3803 rm = VFP_SREG_M(insn);
3806 if (insn & ARM_CP_RW_BIT) {
3807 /* vfp->arm */
3808 if (dp) {
3809 gen_mov_F0_vreg(0, rm * 2);
3810 tmp = gen_vfp_mrs();
3811 store_reg(s, rd, tmp);
3812 gen_mov_F0_vreg(0, rm * 2 + 1);
3813 tmp = gen_vfp_mrs();
3814 store_reg(s, rn, tmp);
3815 } else {
3816 gen_mov_F0_vreg(0, rm);
3817 tmp = gen_vfp_mrs();
3818 store_reg(s, rd, tmp);
3819 gen_mov_F0_vreg(0, rm + 1);
3820 tmp = gen_vfp_mrs();
3821 store_reg(s, rn, tmp);
3823 } else {
3824 /* arm->vfp */
3825 if (dp) {
3826 tmp = load_reg(s, rd);
3827 gen_vfp_msr(tmp);
3828 gen_mov_vreg_F0(0, rm * 2);
3829 tmp = load_reg(s, rn);
3830 gen_vfp_msr(tmp);
3831 gen_mov_vreg_F0(0, rm * 2 + 1);
3832 } else {
3833 tmp = load_reg(s, rd);
3834 gen_vfp_msr(tmp);
3835 gen_mov_vreg_F0(0, rm);
3836 tmp = load_reg(s, rn);
3837 gen_vfp_msr(tmp);
3838 gen_mov_vreg_F0(0, rm + 1);
3841 } else {
3842 /* Load/store */
3843 rn = (insn >> 16) & 0xf;
3844 if (dp)
3845 VFP_DREG_D(rd, insn);
3846 else
3847 rd = VFP_SREG_D(insn);
3848 if ((insn & 0x01200000) == 0x01000000) {
3849 /* Single load/store */
3850 offset = (insn & 0xff) << 2;
3851 if ((insn & (1 << 23)) == 0)
3852 offset = -offset;
3853 if (s->thumb && rn == 15) {
3854 /* This is actually UNPREDICTABLE */
3855 addr = tcg_temp_new_i32();
3856 tcg_gen_movi_i32(addr, s->pc & ~2);
3857 } else {
3858 addr = load_reg(s, rn);
3860 tcg_gen_addi_i32(addr, addr, offset);
3861 if (insn & (1 << 20)) {
3862 gen_vfp_ld(s, dp, addr);
3863 gen_mov_vreg_F0(dp, rd);
3864 } else {
3865 gen_mov_F0_vreg(dp, rd);
3866 gen_vfp_st(s, dp, addr);
3868 tcg_temp_free_i32(addr);
3869 } else {
3870 /* load/store multiple */
3871 int w = insn & (1 << 21);
3872 if (dp)
3873 n = (insn >> 1) & 0x7f;
3874 else
3875 n = insn & 0xff;
3877 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3878 /* P == U , W == 1 => UNDEF */
3879 return 1;
3881 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3882 /* UNPREDICTABLE cases for bad immediates: we choose to
3883 * UNDEF to avoid generating huge numbers of TCG ops
3885 return 1;
3887 if (rn == 15 && w) {
3888 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3889 return 1;
3892 if (s->thumb && rn == 15) {
3893 /* This is actually UNPREDICTABLE */
3894 addr = tcg_temp_new_i32();
3895 tcg_gen_movi_i32(addr, s->pc & ~2);
3896 } else {
3897 addr = load_reg(s, rn);
3899 if (insn & (1 << 24)) /* pre-decrement */
3900 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3902 if (dp)
3903 offset = 8;
3904 else
3905 offset = 4;
3906 for (i = 0; i < n; i++) {
3907 if (insn & ARM_CP_RW_BIT) {
3908 /* load */
3909 gen_vfp_ld(s, dp, addr);
3910 gen_mov_vreg_F0(dp, rd + i);
3911 } else {
3912 /* store */
3913 gen_mov_F0_vreg(dp, rd + i);
3914 gen_vfp_st(s, dp, addr);
3916 tcg_gen_addi_i32(addr, addr, offset);
3918 if (w) {
3919 /* writeback */
3920 if (insn & (1 << 24))
3921 offset = -offset * n;
3922 else if (dp && (insn & 1))
3923 offset = 4;
3924 else
3925 offset = 0;
3927 if (offset != 0)
3928 tcg_gen_addi_i32(addr, addr, offset);
3929 store_reg(s, rn, addr);
3930 } else {
3931 tcg_temp_free_i32(addr);
3935 break;
3936 default:
3937 /* Should never happen. */
3938 return 1;
3940 return 0;
3943 static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
3945 TranslationBlock *tb;
3947 tb = s->tb;
3948 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3949 tcg_gen_goto_tb(n);
3950 gen_set_pc_im(s, dest);
3951 tcg_gen_exit_tb((uintptr_t)tb + n);
3952 } else {
3953 gen_set_pc_im(s, dest);
3954 tcg_gen_exit_tb(0);
3958 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3960 if (unlikely(s->singlestep_enabled || s->ss_active)) {
3961 /* An indirect jump so that we still trigger the debug exception. */
3962 if (s->thumb)
3963 dest |= 1;
3964 gen_bx_im(s, dest);
3965 } else {
3966 gen_goto_tb(s, 0, dest);
3967 s->is_jmp = DISAS_TB_JUMP;
3971 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
3973 if (x)
3974 tcg_gen_sari_i32(t0, t0, 16);
3975 else
3976 gen_sxth(t0);
3977 if (y)
3978 tcg_gen_sari_i32(t1, t1, 16);
3979 else
3980 gen_sxth(t1);
3981 tcg_gen_mul_i32(t0, t0, t1);
3984 /* Return the mask of PSR bits set by a MSR instruction. */
3985 static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
3987 uint32_t mask;
3989 mask = 0;
3990 if (flags & (1 << 0))
3991 mask |= 0xff;
3992 if (flags & (1 << 1))
3993 mask |= 0xff00;
3994 if (flags & (1 << 2))
3995 mask |= 0xff0000;
3996 if (flags & (1 << 3))
3997 mask |= 0xff000000;
3999 /* Mask out undefined bits. */
4000 mask &= ~CPSR_RESERVED;
4001 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
4002 mask &= ~CPSR_T;
4004 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
4005 mask &= ~CPSR_Q; /* V5TE in reality*/
4007 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
4008 mask &= ~(CPSR_E | CPSR_GE);
4010 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
4011 mask &= ~CPSR_IT;
4013 /* Mask out execution state and reserved bits. */
4014 if (!spsr) {
4015 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4017 /* Mask out privileged bits. */
4018 if (IS_USER(s))
4019 mask &= CPSR_USER;
4020 return mask;
4023 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
4024 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
4026 TCGv_i32 tmp;
4027 if (spsr) {
4028 /* ??? This is also undefined in system mode. */
4029 if (IS_USER(s))
4030 return 1;
4032 tmp = load_cpu_field(spsr);
4033 tcg_gen_andi_i32(tmp, tmp, ~mask);
4034 tcg_gen_andi_i32(t0, t0, mask);
4035 tcg_gen_or_i32(tmp, tmp, t0);
4036 store_cpu_field(tmp, spsr);
4037 } else {
4038 gen_set_cpsr(t0, mask);
4040 tcg_temp_free_i32(t0);
4041 gen_lookup_tb(s);
4042 return 0;
4045 /* Returns nonzero if access to the PSR is not permitted. */
4046 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4048 TCGv_i32 tmp;
4049 tmp = tcg_temp_new_i32();
4050 tcg_gen_movi_i32(tmp, val);
4051 return gen_set_psr(s, mask, spsr, tmp);
4054 /* Generate an old-style exception return. Marks pc as dead. */
4055 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4057 TCGv_i32 tmp;
4058 store_reg(s, 15, pc);
4059 tmp = load_cpu_field(spsr);
4060 gen_set_cpsr(tmp, CPSR_ERET_MASK);
4061 tcg_temp_free_i32(tmp);
4062 s->is_jmp = DISAS_UPDATE;
4065 /* Generate a v6 exception return. Marks both values as dead. */
4066 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
4068 gen_set_cpsr(cpsr, CPSR_ERET_MASK);
4069 tcg_temp_free_i32(cpsr);
4070 store_reg(s, 15, pc);
4071 s->is_jmp = DISAS_UPDATE;
4074 static void gen_nop_hint(DisasContext *s, int val)
4076 switch (val) {
4077 case 3: /* wfi */
4078 gen_set_pc_im(s, s->pc);
4079 s->is_jmp = DISAS_WFI;
4080 break;
4081 case 2: /* wfe */
4082 gen_set_pc_im(s, s->pc);
4083 s->is_jmp = DISAS_WFE;
4084 break;
4085 case 4: /* sev */
4086 case 5: /* sevl */
4087 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
4088 default: /* nop */
4089 break;
4093 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
4095 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
4097 switch (size) {
4098 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4099 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4100 case 2: tcg_gen_add_i32(t0, t0, t1); break;
4101 default: abort();
4105 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
4107 switch (size) {
4108 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4109 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4110 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
4111 default: return;
4115 /* 32-bit pairwise ops end up the same as the elementwise versions. */
4116 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4117 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4118 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4119 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4121 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
4122 switch ((size << 1) | u) { \
4123 case 0: \
4124 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
4125 break; \
4126 case 1: \
4127 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
4128 break; \
4129 case 2: \
4130 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
4131 break; \
4132 case 3: \
4133 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
4134 break; \
4135 case 4: \
4136 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
4137 break; \
4138 case 5: \
4139 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
4140 break; \
4141 default: return 1; \
4142 }} while (0)
4144 #define GEN_NEON_INTEGER_OP(name) do { \
4145 switch ((size << 1) | u) { \
4146 case 0: \
4147 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
4148 break; \
4149 case 1: \
4150 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
4151 break; \
4152 case 2: \
4153 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
4154 break; \
4155 case 3: \
4156 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
4157 break; \
4158 case 4: \
4159 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
4160 break; \
4161 case 5: \
4162 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
4163 break; \
4164 default: return 1; \
4165 }} while (0)
4167 static TCGv_i32 neon_load_scratch(int scratch)
4169 TCGv_i32 tmp = tcg_temp_new_i32();
4170 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4171 return tmp;
4174 static void neon_store_scratch(int scratch, TCGv_i32 var)
4176 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4177 tcg_temp_free_i32(var);
4180 static inline TCGv_i32 neon_get_scalar(int size, int reg)
4182 TCGv_i32 tmp;
4183 if (size == 1) {
4184 tmp = neon_load_reg(reg & 7, reg >> 4);
4185 if (reg & 8) {
4186 gen_neon_dup_high16(tmp);
4187 } else {
4188 gen_neon_dup_low16(tmp);
4190 } else {
4191 tmp = neon_load_reg(reg & 15, reg >> 4);
4193 return tmp;
4196 static int gen_neon_unzip(int rd, int rm, int size, int q)
4198 TCGv_i32 tmp, tmp2;
4199 if (!q && size == 2) {
4200 return 1;
4202 tmp = tcg_const_i32(rd);
4203 tmp2 = tcg_const_i32(rm);
4204 if (q) {
4205 switch (size) {
4206 case 0:
4207 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
4208 break;
4209 case 1:
4210 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
4211 break;
4212 case 2:
4213 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
4214 break;
4215 default:
4216 abort();
4218 } else {
4219 switch (size) {
4220 case 0:
4221 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
4222 break;
4223 case 1:
4224 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
4225 break;
4226 default:
4227 abort();
4230 tcg_temp_free_i32(tmp);
4231 tcg_temp_free_i32(tmp2);
4232 return 0;
4235 static int gen_neon_zip(int rd, int rm, int size, int q)
4237 TCGv_i32 tmp, tmp2;
4238 if (!q && size == 2) {
4239 return 1;
4241 tmp = tcg_const_i32(rd);
4242 tmp2 = tcg_const_i32(rm);
4243 if (q) {
4244 switch (size) {
4245 case 0:
4246 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
4247 break;
4248 case 1:
4249 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
4250 break;
4251 case 2:
4252 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
4253 break;
4254 default:
4255 abort();
4257 } else {
4258 switch (size) {
4259 case 0:
4260 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
4261 break;
4262 case 1:
4263 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
4264 break;
4265 default:
4266 abort();
4269 tcg_temp_free_i32(tmp);
4270 tcg_temp_free_i32(tmp2);
4271 return 0;
4274 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
4276 TCGv_i32 rd, tmp;
4278 rd = tcg_temp_new_i32();
4279 tmp = tcg_temp_new_i32();
4281 tcg_gen_shli_i32(rd, t0, 8);
4282 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4283 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4284 tcg_gen_or_i32(rd, rd, tmp);
4286 tcg_gen_shri_i32(t1, t1, 8);
4287 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4288 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4289 tcg_gen_or_i32(t1, t1, tmp);
4290 tcg_gen_mov_i32(t0, rd);
4292 tcg_temp_free_i32(tmp);
4293 tcg_temp_free_i32(rd);
4296 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
4298 TCGv_i32 rd, tmp;
4300 rd = tcg_temp_new_i32();
4301 tmp = tcg_temp_new_i32();
4303 tcg_gen_shli_i32(rd, t0, 16);
4304 tcg_gen_andi_i32(tmp, t1, 0xffff);
4305 tcg_gen_or_i32(rd, rd, tmp);
4306 tcg_gen_shri_i32(t1, t1, 16);
4307 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4308 tcg_gen_or_i32(t1, t1, tmp);
4309 tcg_gen_mov_i32(t0, rd);
4311 tcg_temp_free_i32(tmp);
4312 tcg_temp_free_i32(rd);
4316 static struct {
4317 int nregs;
4318 int interleave;
4319 int spacing;
4320 } neon_ls_element_type[11] = {
4321 {4, 4, 1},
4322 {4, 4, 2},
4323 {4, 1, 1},
4324 {4, 2, 1},
4325 {3, 3, 1},
4326 {3, 3, 2},
4327 {3, 1, 1},
4328 {1, 1, 1},
4329 {2, 2, 1},
4330 {2, 2, 2},
4331 {2, 1, 1}
4334 /* Translate a NEON load/store element instruction. Return nonzero if the
4335 instruction is invalid. */
4336 static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
4338 int rd, rn, rm;
4339 int op;
4340 int nregs;
4341 int interleave;
4342 int spacing;
4343 int stride;
4344 int size;
4345 int reg;
4346 int pass;
4347 int load;
4348 int shift;
4349 int n;
4350 TCGv_i32 addr;
4351 TCGv_i32 tmp;
4352 TCGv_i32 tmp2;
4353 TCGv_i64 tmp64;
4355 /* FIXME: this access check should not take precedence over UNDEF
4356 * for invalid encodings; we will generate incorrect syndrome information
4357 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4359 if (!s->cpacr_fpen) {
4360 gen_exception_insn(s, 4, EXCP_UDEF,
4361 syn_fp_access_trap(1, 0xe, s->thumb));
4362 return 0;
4365 if (!s->vfp_enabled)
4366 return 1;
4367 VFP_DREG_D(rd, insn);
4368 rn = (insn >> 16) & 0xf;
4369 rm = insn & 0xf;
4370 load = (insn & (1 << 21)) != 0;
4371 if ((insn & (1 << 23)) == 0) {
4372 /* Load store all elements. */
4373 op = (insn >> 8) & 0xf;
4374 size = (insn >> 6) & 3;
4375 if (op > 10)
4376 return 1;
4377 /* Catch UNDEF cases for bad values of align field */
4378 switch (op & 0xc) {
4379 case 4:
4380 if (((insn >> 5) & 1) == 1) {
4381 return 1;
4383 break;
4384 case 8:
4385 if (((insn >> 4) & 3) == 3) {
4386 return 1;
4388 break;
4389 default:
4390 break;
4392 nregs = neon_ls_element_type[op].nregs;
4393 interleave = neon_ls_element_type[op].interleave;
4394 spacing = neon_ls_element_type[op].spacing;
4395 if (size == 3 && (interleave | spacing) != 1)
4396 return 1;
4397 addr = tcg_temp_new_i32();
4398 load_reg_var(s, addr, rn);
4399 stride = (1 << size) * interleave;
4400 for (reg = 0; reg < nregs; reg++) {
4401 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
4402 load_reg_var(s, addr, rn);
4403 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
4404 } else if (interleave == 2 && nregs == 4 && reg == 2) {
4405 load_reg_var(s, addr, rn);
4406 tcg_gen_addi_i32(addr, addr, 1 << size);
4408 if (size == 3) {
4409 tmp64 = tcg_temp_new_i64();
4410 if (load) {
4411 gen_aa32_ld64(tmp64, addr, get_mem_index(s));
4412 neon_store_reg64(tmp64, rd);
4413 } else {
4414 neon_load_reg64(tmp64, rd);
4415 gen_aa32_st64(tmp64, addr, get_mem_index(s));
4417 tcg_temp_free_i64(tmp64);
4418 tcg_gen_addi_i32(addr, addr, stride);
4419 } else {
4420 for (pass = 0; pass < 2; pass++) {
4421 if (size == 2) {
4422 if (load) {
4423 tmp = tcg_temp_new_i32();
4424 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
4425 neon_store_reg(rd, pass, tmp);
4426 } else {
4427 tmp = neon_load_reg(rd, pass);
4428 gen_aa32_st32(tmp, addr, get_mem_index(s));
4429 tcg_temp_free_i32(tmp);
4431 tcg_gen_addi_i32(addr, addr, stride);
4432 } else if (size == 1) {
4433 if (load) {
4434 tmp = tcg_temp_new_i32();
4435 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
4436 tcg_gen_addi_i32(addr, addr, stride);
4437 tmp2 = tcg_temp_new_i32();
4438 gen_aa32_ld16u(tmp2, addr, get_mem_index(s));
4439 tcg_gen_addi_i32(addr, addr, stride);
4440 tcg_gen_shli_i32(tmp2, tmp2, 16);
4441 tcg_gen_or_i32(tmp, tmp, tmp2);
4442 tcg_temp_free_i32(tmp2);
4443 neon_store_reg(rd, pass, tmp);
4444 } else {
4445 tmp = neon_load_reg(rd, pass);
4446 tmp2 = tcg_temp_new_i32();
4447 tcg_gen_shri_i32(tmp2, tmp, 16);
4448 gen_aa32_st16(tmp, addr, get_mem_index(s));
4449 tcg_temp_free_i32(tmp);
4450 tcg_gen_addi_i32(addr, addr, stride);
4451 gen_aa32_st16(tmp2, addr, get_mem_index(s));
4452 tcg_temp_free_i32(tmp2);
4453 tcg_gen_addi_i32(addr, addr, stride);
4455 } else /* size == 0 */ {
4456 if (load) {
4457 TCGV_UNUSED_I32(tmp2);
4458 for (n = 0; n < 4; n++) {
4459 tmp = tcg_temp_new_i32();
4460 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
4461 tcg_gen_addi_i32(addr, addr, stride);
4462 if (n == 0) {
4463 tmp2 = tmp;
4464 } else {
4465 tcg_gen_shli_i32(tmp, tmp, n * 8);
4466 tcg_gen_or_i32(tmp2, tmp2, tmp);
4467 tcg_temp_free_i32(tmp);
4470 neon_store_reg(rd, pass, tmp2);
4471 } else {
4472 tmp2 = neon_load_reg(rd, pass);
4473 for (n = 0; n < 4; n++) {
4474 tmp = tcg_temp_new_i32();
4475 if (n == 0) {
4476 tcg_gen_mov_i32(tmp, tmp2);
4477 } else {
4478 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4480 gen_aa32_st8(tmp, addr, get_mem_index(s));
4481 tcg_temp_free_i32(tmp);
4482 tcg_gen_addi_i32(addr, addr, stride);
4484 tcg_temp_free_i32(tmp2);
4489 rd += spacing;
4491 tcg_temp_free_i32(addr);
4492 stride = nregs * 8;
4493 } else {
4494 size = (insn >> 10) & 3;
4495 if (size == 3) {
4496 /* Load single element to all lanes. */
4497 int a = (insn >> 4) & 1;
4498 if (!load) {
4499 return 1;
4501 size = (insn >> 6) & 3;
4502 nregs = ((insn >> 8) & 3) + 1;
4504 if (size == 3) {
4505 if (nregs != 4 || a == 0) {
4506 return 1;
4508 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4509 size = 2;
4511 if (nregs == 1 && a == 1 && size == 0) {
4512 return 1;
4514 if (nregs == 3 && a == 1) {
4515 return 1;
4517 addr = tcg_temp_new_i32();
4518 load_reg_var(s, addr, rn);
4519 if (nregs == 1) {
4520 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4521 tmp = gen_load_and_replicate(s, addr, size);
4522 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4523 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4524 if (insn & (1 << 5)) {
4525 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4526 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4528 tcg_temp_free_i32(tmp);
4529 } else {
4530 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4531 stride = (insn & (1 << 5)) ? 2 : 1;
4532 for (reg = 0; reg < nregs; reg++) {
4533 tmp = gen_load_and_replicate(s, addr, size);
4534 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4535 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4536 tcg_temp_free_i32(tmp);
4537 tcg_gen_addi_i32(addr, addr, 1 << size);
4538 rd += stride;
4541 tcg_temp_free_i32(addr);
4542 stride = (1 << size) * nregs;
4543 } else {
4544 /* Single element. */
4545 int idx = (insn >> 4) & 0xf;
4546 pass = (insn >> 7) & 1;
4547 switch (size) {
4548 case 0:
4549 shift = ((insn >> 5) & 3) * 8;
4550 stride = 1;
4551 break;
4552 case 1:
4553 shift = ((insn >> 6) & 1) * 16;
4554 stride = (insn & (1 << 5)) ? 2 : 1;
4555 break;
4556 case 2:
4557 shift = 0;
4558 stride = (insn & (1 << 6)) ? 2 : 1;
4559 break;
4560 default:
4561 abort();
4563 nregs = ((insn >> 8) & 3) + 1;
4564 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4565 switch (nregs) {
4566 case 1:
4567 if (((idx & (1 << size)) != 0) ||
4568 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4569 return 1;
4571 break;
4572 case 3:
4573 if ((idx & 1) != 0) {
4574 return 1;
4576 /* fall through */
4577 case 2:
4578 if (size == 2 && (idx & 2) != 0) {
4579 return 1;
4581 break;
4582 case 4:
4583 if ((size == 2) && ((idx & 3) == 3)) {
4584 return 1;
4586 break;
4587 default:
4588 abort();
4590 if ((rd + stride * (nregs - 1)) > 31) {
4591 /* Attempts to write off the end of the register file
4592 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4593 * the neon_load_reg() would write off the end of the array.
4595 return 1;
4597 addr = tcg_temp_new_i32();
4598 load_reg_var(s, addr, rn);
4599 for (reg = 0; reg < nregs; reg++) {
4600 if (load) {
4601 tmp = tcg_temp_new_i32();
4602 switch (size) {
4603 case 0:
4604 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
4605 break;
4606 case 1:
4607 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
4608 break;
4609 case 2:
4610 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
4611 break;
4612 default: /* Avoid compiler warnings. */
4613 abort();
4615 if (size != 2) {
4616 tmp2 = neon_load_reg(rd, pass);
4617 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4618 shift, size ? 16 : 8);
4619 tcg_temp_free_i32(tmp2);
4621 neon_store_reg(rd, pass, tmp);
4622 } else { /* Store */
4623 tmp = neon_load_reg(rd, pass);
4624 if (shift)
4625 tcg_gen_shri_i32(tmp, tmp, shift);
4626 switch (size) {
4627 case 0:
4628 gen_aa32_st8(tmp, addr, get_mem_index(s));
4629 break;
4630 case 1:
4631 gen_aa32_st16(tmp, addr, get_mem_index(s));
4632 break;
4633 case 2:
4634 gen_aa32_st32(tmp, addr, get_mem_index(s));
4635 break;
4637 tcg_temp_free_i32(tmp);
4639 rd += stride;
4640 tcg_gen_addi_i32(addr, addr, 1 << size);
4642 tcg_temp_free_i32(addr);
4643 stride = nregs * (1 << size);
4646 if (rm != 15) {
4647 TCGv_i32 base;
4649 base = load_reg(s, rn);
4650 if (rm == 13) {
4651 tcg_gen_addi_i32(base, base, stride);
4652 } else {
4653 TCGv_i32 index;
4654 index = load_reg(s, rm);
4655 tcg_gen_add_i32(base, base, index);
4656 tcg_temp_free_i32(index);
4658 store_reg(s, rn, base);
4660 return 0;
4663 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4664 static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
4666 tcg_gen_and_i32(t, t, c);
4667 tcg_gen_andc_i32(f, f, c);
4668 tcg_gen_or_i32(dest, t, f);
4671 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
4673 switch (size) {
4674 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4675 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4676 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4677 default: abort();
4681 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4683 switch (size) {
4684 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4685 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4686 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4687 default: abort();
4691 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
4693 switch (size) {
4694 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4695 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4696 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4697 default: abort();
4701 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4703 switch (size) {
4704 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4705 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4706 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4707 default: abort();
4711 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
4712 int q, int u)
4714 if (q) {
4715 if (u) {
4716 switch (size) {
4717 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4718 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4719 default: abort();
4721 } else {
4722 switch (size) {
4723 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4724 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4725 default: abort();
4728 } else {
4729 if (u) {
4730 switch (size) {
4731 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4732 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4733 default: abort();
4735 } else {
4736 switch (size) {
4737 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4738 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4739 default: abort();
4745 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
4747 if (u) {
4748 switch (size) {
4749 case 0: gen_helper_neon_widen_u8(dest, src); break;
4750 case 1: gen_helper_neon_widen_u16(dest, src); break;
4751 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4752 default: abort();
4754 } else {
4755 switch (size) {
4756 case 0: gen_helper_neon_widen_s8(dest, src); break;
4757 case 1: gen_helper_neon_widen_s16(dest, src); break;
4758 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4759 default: abort();
4762 tcg_temp_free_i32(src);
4765 static inline void gen_neon_addl(int size)
4767 switch (size) {
4768 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4769 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4770 case 2: tcg_gen_add_i64(CPU_V001); break;
4771 default: abort();
4775 static inline void gen_neon_subl(int size)
4777 switch (size) {
4778 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4779 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4780 case 2: tcg_gen_sub_i64(CPU_V001); break;
4781 default: abort();
4785 static inline void gen_neon_negl(TCGv_i64 var, int size)
4787 switch (size) {
4788 case 0: gen_helper_neon_negl_u16(var, var); break;
4789 case 1: gen_helper_neon_negl_u32(var, var); break;
4790 case 2:
4791 tcg_gen_neg_i64(var, var);
4792 break;
4793 default: abort();
4797 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4799 switch (size) {
4800 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4801 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4802 default: abort();
4806 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4807 int size, int u)
4809 TCGv_i64 tmp;
4811 switch ((size << 1) | u) {
4812 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4813 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4814 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4815 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4816 case 4:
4817 tmp = gen_muls_i64_i32(a, b);
4818 tcg_gen_mov_i64(dest, tmp);
4819 tcg_temp_free_i64(tmp);
4820 break;
4821 case 5:
4822 tmp = gen_mulu_i64_i32(a, b);
4823 tcg_gen_mov_i64(dest, tmp);
4824 tcg_temp_free_i64(tmp);
4825 break;
4826 default: abort();
4829 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4830 Don't forget to clean them now. */
4831 if (size < 2) {
4832 tcg_temp_free_i32(a);
4833 tcg_temp_free_i32(b);
4837 static void gen_neon_narrow_op(int op, int u, int size,
4838 TCGv_i32 dest, TCGv_i64 src)
4840 if (op) {
4841 if (u) {
4842 gen_neon_unarrow_sats(size, dest, src);
4843 } else {
4844 gen_neon_narrow(size, dest, src);
4846 } else {
4847 if (u) {
4848 gen_neon_narrow_satu(size, dest, src);
4849 } else {
4850 gen_neon_narrow_sats(size, dest, src);
4855 /* Symbolic constants for op fields for Neon 3-register same-length.
4856 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4857 * table A7-9.
4859 #define NEON_3R_VHADD 0
4860 #define NEON_3R_VQADD 1
4861 #define NEON_3R_VRHADD 2
4862 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4863 #define NEON_3R_VHSUB 4
4864 #define NEON_3R_VQSUB 5
4865 #define NEON_3R_VCGT 6
4866 #define NEON_3R_VCGE 7
4867 #define NEON_3R_VSHL 8
4868 #define NEON_3R_VQSHL 9
4869 #define NEON_3R_VRSHL 10
4870 #define NEON_3R_VQRSHL 11
4871 #define NEON_3R_VMAX 12
4872 #define NEON_3R_VMIN 13
4873 #define NEON_3R_VABD 14
4874 #define NEON_3R_VABA 15
4875 #define NEON_3R_VADD_VSUB 16
4876 #define NEON_3R_VTST_VCEQ 17
4877 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4878 #define NEON_3R_VMUL 19
4879 #define NEON_3R_VPMAX 20
4880 #define NEON_3R_VPMIN 21
4881 #define NEON_3R_VQDMULH_VQRDMULH 22
4882 #define NEON_3R_VPADD 23
4883 #define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
4884 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4885 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4886 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4887 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4888 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4889 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4890 #define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
4892 static const uint8_t neon_3r_sizes[] = {
4893 [NEON_3R_VHADD] = 0x7,
4894 [NEON_3R_VQADD] = 0xf,
4895 [NEON_3R_VRHADD] = 0x7,
4896 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4897 [NEON_3R_VHSUB] = 0x7,
4898 [NEON_3R_VQSUB] = 0xf,
4899 [NEON_3R_VCGT] = 0x7,
4900 [NEON_3R_VCGE] = 0x7,
4901 [NEON_3R_VSHL] = 0xf,
4902 [NEON_3R_VQSHL] = 0xf,
4903 [NEON_3R_VRSHL] = 0xf,
4904 [NEON_3R_VQRSHL] = 0xf,
4905 [NEON_3R_VMAX] = 0x7,
4906 [NEON_3R_VMIN] = 0x7,
4907 [NEON_3R_VABD] = 0x7,
4908 [NEON_3R_VABA] = 0x7,
4909 [NEON_3R_VADD_VSUB] = 0xf,
4910 [NEON_3R_VTST_VCEQ] = 0x7,
4911 [NEON_3R_VML] = 0x7,
4912 [NEON_3R_VMUL] = 0x7,
4913 [NEON_3R_VPMAX] = 0x7,
4914 [NEON_3R_VPMIN] = 0x7,
4915 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4916 [NEON_3R_VPADD] = 0x7,
4917 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
4918 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
4919 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4920 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4921 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4922 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4923 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4924 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
4927 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4928 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4929 * table A7-13.
4931 #define NEON_2RM_VREV64 0
4932 #define NEON_2RM_VREV32 1
4933 #define NEON_2RM_VREV16 2
4934 #define NEON_2RM_VPADDL 4
4935 #define NEON_2RM_VPADDL_U 5
4936 #define NEON_2RM_AESE 6 /* Includes AESD */
4937 #define NEON_2RM_AESMC 7 /* Includes AESIMC */
4938 #define NEON_2RM_VCLS 8
4939 #define NEON_2RM_VCLZ 9
4940 #define NEON_2RM_VCNT 10
4941 #define NEON_2RM_VMVN 11
4942 #define NEON_2RM_VPADAL 12
4943 #define NEON_2RM_VPADAL_U 13
4944 #define NEON_2RM_VQABS 14
4945 #define NEON_2RM_VQNEG 15
4946 #define NEON_2RM_VCGT0 16
4947 #define NEON_2RM_VCGE0 17
4948 #define NEON_2RM_VCEQ0 18
4949 #define NEON_2RM_VCLE0 19
4950 #define NEON_2RM_VCLT0 20
4951 #define NEON_2RM_SHA1H 21
4952 #define NEON_2RM_VABS 22
4953 #define NEON_2RM_VNEG 23
4954 #define NEON_2RM_VCGT0_F 24
4955 #define NEON_2RM_VCGE0_F 25
4956 #define NEON_2RM_VCEQ0_F 26
4957 #define NEON_2RM_VCLE0_F 27
4958 #define NEON_2RM_VCLT0_F 28
4959 #define NEON_2RM_VABS_F 30
4960 #define NEON_2RM_VNEG_F 31
4961 #define NEON_2RM_VSWP 32
4962 #define NEON_2RM_VTRN 33
4963 #define NEON_2RM_VUZP 34
4964 #define NEON_2RM_VZIP 35
4965 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4966 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4967 #define NEON_2RM_VSHLL 38
4968 #define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
4969 #define NEON_2RM_VRINTN 40
4970 #define NEON_2RM_VRINTX 41
4971 #define NEON_2RM_VRINTA 42
4972 #define NEON_2RM_VRINTZ 43
4973 #define NEON_2RM_VCVT_F16_F32 44
4974 #define NEON_2RM_VRINTM 45
4975 #define NEON_2RM_VCVT_F32_F16 46
4976 #define NEON_2RM_VRINTP 47
4977 #define NEON_2RM_VCVTAU 48
4978 #define NEON_2RM_VCVTAS 49
4979 #define NEON_2RM_VCVTNU 50
4980 #define NEON_2RM_VCVTNS 51
4981 #define NEON_2RM_VCVTPU 52
4982 #define NEON_2RM_VCVTPS 53
4983 #define NEON_2RM_VCVTMU 54
4984 #define NEON_2RM_VCVTMS 55
4985 #define NEON_2RM_VRECPE 56
4986 #define NEON_2RM_VRSQRTE 57
4987 #define NEON_2RM_VRECPE_F 58
4988 #define NEON_2RM_VRSQRTE_F 59
4989 #define NEON_2RM_VCVT_FS 60
4990 #define NEON_2RM_VCVT_FU 61
4991 #define NEON_2RM_VCVT_SF 62
4992 #define NEON_2RM_VCVT_UF 63
4994 static int neon_2rm_is_float_op(int op)
4996 /* Return true if this neon 2reg-misc op is float-to-float */
4997 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4998 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
4999 op == NEON_2RM_VRINTM ||
5000 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
5001 op >= NEON_2RM_VRECPE_F);
5004 /* Each entry in this array has bit n set if the insn allows
5005 * size value n (otherwise it will UNDEF). Since unallocated
5006 * op values will have no bits set they always UNDEF.
5008 static const uint8_t neon_2rm_sizes[] = {
5009 [NEON_2RM_VREV64] = 0x7,
5010 [NEON_2RM_VREV32] = 0x3,
5011 [NEON_2RM_VREV16] = 0x1,
5012 [NEON_2RM_VPADDL] = 0x7,
5013 [NEON_2RM_VPADDL_U] = 0x7,
5014 [NEON_2RM_AESE] = 0x1,
5015 [NEON_2RM_AESMC] = 0x1,
5016 [NEON_2RM_VCLS] = 0x7,
5017 [NEON_2RM_VCLZ] = 0x7,
5018 [NEON_2RM_VCNT] = 0x1,
5019 [NEON_2RM_VMVN] = 0x1,
5020 [NEON_2RM_VPADAL] = 0x7,
5021 [NEON_2RM_VPADAL_U] = 0x7,
5022 [NEON_2RM_VQABS] = 0x7,
5023 [NEON_2RM_VQNEG] = 0x7,
5024 [NEON_2RM_VCGT0] = 0x7,
5025 [NEON_2RM_VCGE0] = 0x7,
5026 [NEON_2RM_VCEQ0] = 0x7,
5027 [NEON_2RM_VCLE0] = 0x7,
5028 [NEON_2RM_VCLT0] = 0x7,
5029 [NEON_2RM_SHA1H] = 0x4,
5030 [NEON_2RM_VABS] = 0x7,
5031 [NEON_2RM_VNEG] = 0x7,
5032 [NEON_2RM_VCGT0_F] = 0x4,
5033 [NEON_2RM_VCGE0_F] = 0x4,
5034 [NEON_2RM_VCEQ0_F] = 0x4,
5035 [NEON_2RM_VCLE0_F] = 0x4,
5036 [NEON_2RM_VCLT0_F] = 0x4,
5037 [NEON_2RM_VABS_F] = 0x4,
5038 [NEON_2RM_VNEG_F] = 0x4,
5039 [NEON_2RM_VSWP] = 0x1,
5040 [NEON_2RM_VTRN] = 0x7,
5041 [NEON_2RM_VUZP] = 0x7,
5042 [NEON_2RM_VZIP] = 0x7,
5043 [NEON_2RM_VMOVN] = 0x7,
5044 [NEON_2RM_VQMOVN] = 0x7,
5045 [NEON_2RM_VSHLL] = 0x7,
5046 [NEON_2RM_SHA1SU1] = 0x4,
5047 [NEON_2RM_VRINTN] = 0x4,
5048 [NEON_2RM_VRINTX] = 0x4,
5049 [NEON_2RM_VRINTA] = 0x4,
5050 [NEON_2RM_VRINTZ] = 0x4,
5051 [NEON_2RM_VCVT_F16_F32] = 0x2,
5052 [NEON_2RM_VRINTM] = 0x4,
5053 [NEON_2RM_VCVT_F32_F16] = 0x2,
5054 [NEON_2RM_VRINTP] = 0x4,
5055 [NEON_2RM_VCVTAU] = 0x4,
5056 [NEON_2RM_VCVTAS] = 0x4,
5057 [NEON_2RM_VCVTNU] = 0x4,
5058 [NEON_2RM_VCVTNS] = 0x4,
5059 [NEON_2RM_VCVTPU] = 0x4,
5060 [NEON_2RM_VCVTPS] = 0x4,
5061 [NEON_2RM_VCVTMU] = 0x4,
5062 [NEON_2RM_VCVTMS] = 0x4,
5063 [NEON_2RM_VRECPE] = 0x4,
5064 [NEON_2RM_VRSQRTE] = 0x4,
5065 [NEON_2RM_VRECPE_F] = 0x4,
5066 [NEON_2RM_VRSQRTE_F] = 0x4,
5067 [NEON_2RM_VCVT_FS] = 0x4,
5068 [NEON_2RM_VCVT_FU] = 0x4,
5069 [NEON_2RM_VCVT_SF] = 0x4,
5070 [NEON_2RM_VCVT_UF] = 0x4,
5073 /* Translate a NEON data processing instruction. Return nonzero if the
5074 instruction is invalid.
5075 We process data in a mixture of 32-bit and 64-bit chunks.
5076 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
5078 static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
5080 int op;
5081 int q;
5082 int rd, rn, rm;
5083 int size;
5084 int shift;
5085 int pass;
5086 int count;
5087 int pairwise;
5088 int u;
5089 uint32_t imm, mask;
5090 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
5091 TCGv_i64 tmp64;
5093 /* FIXME: this access check should not take precedence over UNDEF
5094 * for invalid encodings; we will generate incorrect syndrome information
5095 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5097 if (!s->cpacr_fpen) {
5098 gen_exception_insn(s, 4, EXCP_UDEF,
5099 syn_fp_access_trap(1, 0xe, s->thumb));
5100 return 0;
5103 if (!s->vfp_enabled)
5104 return 1;
5105 q = (insn & (1 << 6)) != 0;
5106 u = (insn >> 24) & 1;
5107 VFP_DREG_D(rd, insn);
5108 VFP_DREG_N(rn, insn);
5109 VFP_DREG_M(rm, insn);
5110 size = (insn >> 20) & 3;
5111 if ((insn & (1 << 23)) == 0) {
5112 /* Three register same length. */
5113 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
5114 /* Catch invalid op and bad size combinations: UNDEF */
5115 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5116 return 1;
5118 /* All insns of this form UNDEF for either this condition or the
5119 * superset of cases "Q==1"; we catch the latter later.
5121 if (q && ((rd | rn | rm) & 1)) {
5122 return 1;
5125 * The SHA-1/SHA-256 3-register instructions require special treatment
5126 * here, as their size field is overloaded as an op type selector, and
5127 * they all consume their input in a single pass.
5129 if (op == NEON_3R_SHA) {
5130 if (!q) {
5131 return 1;
5133 if (!u) { /* SHA-1 */
5134 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
5135 return 1;
5137 tmp = tcg_const_i32(rd);
5138 tmp2 = tcg_const_i32(rn);
5139 tmp3 = tcg_const_i32(rm);
5140 tmp4 = tcg_const_i32(size);
5141 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5142 tcg_temp_free_i32(tmp4);
5143 } else { /* SHA-256 */
5144 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
5145 return 1;
5147 tmp = tcg_const_i32(rd);
5148 tmp2 = tcg_const_i32(rn);
5149 tmp3 = tcg_const_i32(rm);
5150 switch (size) {
5151 case 0:
5152 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5153 break;
5154 case 1:
5155 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5156 break;
5157 case 2:
5158 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5159 break;
5162 tcg_temp_free_i32(tmp);
5163 tcg_temp_free_i32(tmp2);
5164 tcg_temp_free_i32(tmp3);
5165 return 0;
5167 if (size == 3 && op != NEON_3R_LOGIC) {
5168 /* 64-bit element instructions. */
5169 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5170 neon_load_reg64(cpu_V0, rn + pass);
5171 neon_load_reg64(cpu_V1, rm + pass);
5172 switch (op) {
5173 case NEON_3R_VQADD:
5174 if (u) {
5175 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5176 cpu_V0, cpu_V1);
5177 } else {
5178 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5179 cpu_V0, cpu_V1);
5181 break;
5182 case NEON_3R_VQSUB:
5183 if (u) {
5184 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5185 cpu_V0, cpu_V1);
5186 } else {
5187 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5188 cpu_V0, cpu_V1);
5190 break;
5191 case NEON_3R_VSHL:
5192 if (u) {
5193 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5194 } else {
5195 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5197 break;
5198 case NEON_3R_VQSHL:
5199 if (u) {
5200 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5201 cpu_V1, cpu_V0);
5202 } else {
5203 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5204 cpu_V1, cpu_V0);
5206 break;
5207 case NEON_3R_VRSHL:
5208 if (u) {
5209 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
5210 } else {
5211 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5213 break;
5214 case NEON_3R_VQRSHL:
5215 if (u) {
5216 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5217 cpu_V1, cpu_V0);
5218 } else {
5219 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5220 cpu_V1, cpu_V0);
5222 break;
5223 case NEON_3R_VADD_VSUB:
5224 if (u) {
5225 tcg_gen_sub_i64(CPU_V001);
5226 } else {
5227 tcg_gen_add_i64(CPU_V001);
5229 break;
5230 default:
5231 abort();
5233 neon_store_reg64(cpu_V0, rd + pass);
5235 return 0;
5237 pairwise = 0;
5238 switch (op) {
5239 case NEON_3R_VSHL:
5240 case NEON_3R_VQSHL:
5241 case NEON_3R_VRSHL:
5242 case NEON_3R_VQRSHL:
5244 int rtmp;
5245 /* Shift instruction operands are reversed. */
5246 rtmp = rn;
5247 rn = rm;
5248 rm = rtmp;
5250 break;
5251 case NEON_3R_VPADD:
5252 if (u) {
5253 return 1;
5255 /* Fall through */
5256 case NEON_3R_VPMAX:
5257 case NEON_3R_VPMIN:
5258 pairwise = 1;
5259 break;
5260 case NEON_3R_FLOAT_ARITH:
5261 pairwise = (u && size < 2); /* if VPADD (float) */
5262 break;
5263 case NEON_3R_FLOAT_MINMAX:
5264 pairwise = u; /* if VPMIN/VPMAX (float) */
5265 break;
5266 case NEON_3R_FLOAT_CMP:
5267 if (!u && size) {
5268 /* no encoding for U=0 C=1x */
5269 return 1;
5271 break;
5272 case NEON_3R_FLOAT_ACMP:
5273 if (!u) {
5274 return 1;
5276 break;
5277 case NEON_3R_FLOAT_MISC:
5278 /* VMAXNM/VMINNM in ARMv8 */
5279 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
5280 return 1;
5282 break;
5283 case NEON_3R_VMUL:
5284 if (u && (size != 0)) {
5285 /* UNDEF on invalid size for polynomial subcase */
5286 return 1;
5288 break;
5289 case NEON_3R_VFM:
5290 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
5291 return 1;
5293 break;
5294 default:
5295 break;
5298 if (pairwise && q) {
5299 /* All the pairwise insns UNDEF if Q is set */
5300 return 1;
5303 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5305 if (pairwise) {
5306 /* Pairwise. */
5307 if (pass < 1) {
5308 tmp = neon_load_reg(rn, 0);
5309 tmp2 = neon_load_reg(rn, 1);
5310 } else {
5311 tmp = neon_load_reg(rm, 0);
5312 tmp2 = neon_load_reg(rm, 1);
5314 } else {
5315 /* Elementwise. */
5316 tmp = neon_load_reg(rn, pass);
5317 tmp2 = neon_load_reg(rm, pass);
5319 switch (op) {
5320 case NEON_3R_VHADD:
5321 GEN_NEON_INTEGER_OP(hadd);
5322 break;
5323 case NEON_3R_VQADD:
5324 GEN_NEON_INTEGER_OP_ENV(qadd);
5325 break;
5326 case NEON_3R_VRHADD:
5327 GEN_NEON_INTEGER_OP(rhadd);
5328 break;
5329 case NEON_3R_LOGIC: /* Logic ops. */
5330 switch ((u << 2) | size) {
5331 case 0: /* VAND */
5332 tcg_gen_and_i32(tmp, tmp, tmp2);
5333 break;
5334 case 1: /* BIC */
5335 tcg_gen_andc_i32(tmp, tmp, tmp2);
5336 break;
5337 case 2: /* VORR */
5338 tcg_gen_or_i32(tmp, tmp, tmp2);
5339 break;
5340 case 3: /* VORN */
5341 tcg_gen_orc_i32(tmp, tmp, tmp2);
5342 break;
5343 case 4: /* VEOR */
5344 tcg_gen_xor_i32(tmp, tmp, tmp2);
5345 break;
5346 case 5: /* VBSL */
5347 tmp3 = neon_load_reg(rd, pass);
5348 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
5349 tcg_temp_free_i32(tmp3);
5350 break;
5351 case 6: /* VBIT */
5352 tmp3 = neon_load_reg(rd, pass);
5353 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
5354 tcg_temp_free_i32(tmp3);
5355 break;
5356 case 7: /* VBIF */
5357 tmp3 = neon_load_reg(rd, pass);
5358 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
5359 tcg_temp_free_i32(tmp3);
5360 break;
5362 break;
5363 case NEON_3R_VHSUB:
5364 GEN_NEON_INTEGER_OP(hsub);
5365 break;
5366 case NEON_3R_VQSUB:
5367 GEN_NEON_INTEGER_OP_ENV(qsub);
5368 break;
5369 case NEON_3R_VCGT:
5370 GEN_NEON_INTEGER_OP(cgt);
5371 break;
5372 case NEON_3R_VCGE:
5373 GEN_NEON_INTEGER_OP(cge);
5374 break;
5375 case NEON_3R_VSHL:
5376 GEN_NEON_INTEGER_OP(shl);
5377 break;
5378 case NEON_3R_VQSHL:
5379 GEN_NEON_INTEGER_OP_ENV(qshl);
5380 break;
5381 case NEON_3R_VRSHL:
5382 GEN_NEON_INTEGER_OP(rshl);
5383 break;
5384 case NEON_3R_VQRSHL:
5385 GEN_NEON_INTEGER_OP_ENV(qrshl);
5386 break;
5387 case NEON_3R_VMAX:
5388 GEN_NEON_INTEGER_OP(max);
5389 break;
5390 case NEON_3R_VMIN:
5391 GEN_NEON_INTEGER_OP(min);
5392 break;
5393 case NEON_3R_VABD:
5394 GEN_NEON_INTEGER_OP(abd);
5395 break;
5396 case NEON_3R_VABA:
5397 GEN_NEON_INTEGER_OP(abd);
5398 tcg_temp_free_i32(tmp2);
5399 tmp2 = neon_load_reg(rd, pass);
5400 gen_neon_add(size, tmp, tmp2);
5401 break;
5402 case NEON_3R_VADD_VSUB:
5403 if (!u) { /* VADD */
5404 gen_neon_add(size, tmp, tmp2);
5405 } else { /* VSUB */
5406 switch (size) {
5407 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5408 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5409 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
5410 default: abort();
5413 break;
5414 case NEON_3R_VTST_VCEQ:
5415 if (!u) { /* VTST */
5416 switch (size) {
5417 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5418 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5419 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
5420 default: abort();
5422 } else { /* VCEQ */
5423 switch (size) {
5424 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5425 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5426 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5427 default: abort();
5430 break;
5431 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
5432 switch (size) {
5433 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5434 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5435 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5436 default: abort();
5438 tcg_temp_free_i32(tmp2);
5439 tmp2 = neon_load_reg(rd, pass);
5440 if (u) { /* VMLS */
5441 gen_neon_rsb(size, tmp, tmp2);
5442 } else { /* VMLA */
5443 gen_neon_add(size, tmp, tmp2);
5445 break;
5446 case NEON_3R_VMUL:
5447 if (u) { /* polynomial */
5448 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
5449 } else { /* Integer */
5450 switch (size) {
5451 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5452 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5453 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5454 default: abort();
5457 break;
5458 case NEON_3R_VPMAX:
5459 GEN_NEON_INTEGER_OP(pmax);
5460 break;
5461 case NEON_3R_VPMIN:
5462 GEN_NEON_INTEGER_OP(pmin);
5463 break;
5464 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
5465 if (!u) { /* VQDMULH */
5466 switch (size) {
5467 case 1:
5468 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5469 break;
5470 case 2:
5471 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5472 break;
5473 default: abort();
5475 } else { /* VQRDMULH */
5476 switch (size) {
5477 case 1:
5478 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5479 break;
5480 case 2:
5481 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5482 break;
5483 default: abort();
5486 break;
5487 case NEON_3R_VPADD:
5488 switch (size) {
5489 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5490 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5491 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
5492 default: abort();
5494 break;
5495 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
5497 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5498 switch ((u << 2) | size) {
5499 case 0: /* VADD */
5500 case 4: /* VPADD */
5501 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5502 break;
5503 case 2: /* VSUB */
5504 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
5505 break;
5506 case 6: /* VABD */
5507 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
5508 break;
5509 default:
5510 abort();
5512 tcg_temp_free_ptr(fpstatus);
5513 break;
5515 case NEON_3R_FLOAT_MULTIPLY:
5517 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5518 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5519 if (!u) {
5520 tcg_temp_free_i32(tmp2);
5521 tmp2 = neon_load_reg(rd, pass);
5522 if (size == 0) {
5523 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5524 } else {
5525 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5528 tcg_temp_free_ptr(fpstatus);
5529 break;
5531 case NEON_3R_FLOAT_CMP:
5533 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5534 if (!u) {
5535 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
5536 } else {
5537 if (size == 0) {
5538 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5539 } else {
5540 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5543 tcg_temp_free_ptr(fpstatus);
5544 break;
5546 case NEON_3R_FLOAT_ACMP:
5548 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5549 if (size == 0) {
5550 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5551 } else {
5552 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5554 tcg_temp_free_ptr(fpstatus);
5555 break;
5557 case NEON_3R_FLOAT_MINMAX:
5559 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5560 if (size == 0) {
5561 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
5562 } else {
5563 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
5565 tcg_temp_free_ptr(fpstatus);
5566 break;
5568 case NEON_3R_FLOAT_MISC:
5569 if (u) {
5570 /* VMAXNM/VMINNM */
5571 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5572 if (size == 0) {
5573 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
5574 } else {
5575 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
5577 tcg_temp_free_ptr(fpstatus);
5578 } else {
5579 if (size == 0) {
5580 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5581 } else {
5582 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5585 break;
5586 case NEON_3R_VFM:
5588 /* VFMA, VFMS: fused multiply-add */
5589 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5590 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5591 if (size) {
5592 /* VFMS */
5593 gen_helper_vfp_negs(tmp, tmp);
5595 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5596 tcg_temp_free_i32(tmp3);
5597 tcg_temp_free_ptr(fpstatus);
5598 break;
5600 default:
5601 abort();
5603 tcg_temp_free_i32(tmp2);
5605 /* Save the result. For elementwise operations we can put it
5606 straight into the destination register. For pairwise operations
5607 we have to be careful to avoid clobbering the source operands. */
5608 if (pairwise && rd == rm) {
5609 neon_store_scratch(pass, tmp);
5610 } else {
5611 neon_store_reg(rd, pass, tmp);
5614 } /* for pass */
5615 if (pairwise && rd == rm) {
5616 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5617 tmp = neon_load_scratch(pass);
5618 neon_store_reg(rd, pass, tmp);
5621 /* End of 3 register same size operations. */
5622 } else if (insn & (1 << 4)) {
5623 if ((insn & 0x00380080) != 0) {
5624 /* Two registers and shift. */
5625 op = (insn >> 8) & 0xf;
5626 if (insn & (1 << 7)) {
5627 /* 64-bit shift. */
5628 if (op > 7) {
5629 return 1;
5631 size = 3;
5632 } else {
5633 size = 2;
5634 while ((insn & (1 << (size + 19))) == 0)
5635 size--;
5637 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5638 /* To avoid excessive duplication of ops we implement shift
5639 by immediate using the variable shift operations. */
5640 if (op < 8) {
5641 /* Shift by immediate:
5642 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5643 if (q && ((rd | rm) & 1)) {
5644 return 1;
5646 if (!u && (op == 4 || op == 6)) {
5647 return 1;
5649 /* Right shifts are encoded as N - shift, where N is the
5650 element size in bits. */
5651 if (op <= 4)
5652 shift = shift - (1 << (size + 3));
5653 if (size == 3) {
5654 count = q + 1;
5655 } else {
5656 count = q ? 4: 2;
5658 switch (size) {
5659 case 0:
5660 imm = (uint8_t) shift;
5661 imm |= imm << 8;
5662 imm |= imm << 16;
5663 break;
5664 case 1:
5665 imm = (uint16_t) shift;
5666 imm |= imm << 16;
5667 break;
5668 case 2:
5669 case 3:
5670 imm = shift;
5671 break;
5672 default:
5673 abort();
5676 for (pass = 0; pass < count; pass++) {
5677 if (size == 3) {
5678 neon_load_reg64(cpu_V0, rm + pass);
5679 tcg_gen_movi_i64(cpu_V1, imm);
5680 switch (op) {
5681 case 0: /* VSHR */
5682 case 1: /* VSRA */
5683 if (u)
5684 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5685 else
5686 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
5687 break;
5688 case 2: /* VRSHR */
5689 case 3: /* VRSRA */
5690 if (u)
5691 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5692 else
5693 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5694 break;
5695 case 4: /* VSRI */
5696 case 5: /* VSHL, VSLI */
5697 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5698 break;
5699 case 6: /* VQSHLU */
5700 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5701 cpu_V0, cpu_V1);
5702 break;
5703 case 7: /* VQSHL */
5704 if (u) {
5705 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5706 cpu_V0, cpu_V1);
5707 } else {
5708 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5709 cpu_V0, cpu_V1);
5711 break;
5713 if (op == 1 || op == 3) {
5714 /* Accumulate. */
5715 neon_load_reg64(cpu_V1, rd + pass);
5716 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5717 } else if (op == 4 || (op == 5 && u)) {
5718 /* Insert */
5719 neon_load_reg64(cpu_V1, rd + pass);
5720 uint64_t mask;
5721 if (shift < -63 || shift > 63) {
5722 mask = 0;
5723 } else {
5724 if (op == 4) {
5725 mask = 0xffffffffffffffffull >> -shift;
5726 } else {
5727 mask = 0xffffffffffffffffull << shift;
5730 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5731 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5733 neon_store_reg64(cpu_V0, rd + pass);
5734 } else { /* size < 3 */
5735 /* Operands in T0 and T1. */
5736 tmp = neon_load_reg(rm, pass);
5737 tmp2 = tcg_temp_new_i32();
5738 tcg_gen_movi_i32(tmp2, imm);
5739 switch (op) {
5740 case 0: /* VSHR */
5741 case 1: /* VSRA */
5742 GEN_NEON_INTEGER_OP(shl);
5743 break;
5744 case 2: /* VRSHR */
5745 case 3: /* VRSRA */
5746 GEN_NEON_INTEGER_OP(rshl);
5747 break;
5748 case 4: /* VSRI */
5749 case 5: /* VSHL, VSLI */
5750 switch (size) {
5751 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5752 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5753 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5754 default: abort();
5756 break;
5757 case 6: /* VQSHLU */
5758 switch (size) {
5759 case 0:
5760 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5761 tmp, tmp2);
5762 break;
5763 case 1:
5764 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5765 tmp, tmp2);
5766 break;
5767 case 2:
5768 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5769 tmp, tmp2);
5770 break;
5771 default:
5772 abort();
5774 break;
5775 case 7: /* VQSHL */
5776 GEN_NEON_INTEGER_OP_ENV(qshl);
5777 break;
5779 tcg_temp_free_i32(tmp2);
5781 if (op == 1 || op == 3) {
5782 /* Accumulate. */
5783 tmp2 = neon_load_reg(rd, pass);
5784 gen_neon_add(size, tmp, tmp2);
5785 tcg_temp_free_i32(tmp2);
5786 } else if (op == 4 || (op == 5 && u)) {
5787 /* Insert */
5788 switch (size) {
5789 case 0:
5790 if (op == 4)
5791 mask = 0xff >> -shift;
5792 else
5793 mask = (uint8_t)(0xff << shift);
5794 mask |= mask << 8;
5795 mask |= mask << 16;
5796 break;
5797 case 1:
5798 if (op == 4)
5799 mask = 0xffff >> -shift;
5800 else
5801 mask = (uint16_t)(0xffff << shift);
5802 mask |= mask << 16;
5803 break;
5804 case 2:
5805 if (shift < -31 || shift > 31) {
5806 mask = 0;
5807 } else {
5808 if (op == 4)
5809 mask = 0xffffffffu >> -shift;
5810 else
5811 mask = 0xffffffffu << shift;
5813 break;
5814 default:
5815 abort();
5817 tmp2 = neon_load_reg(rd, pass);
5818 tcg_gen_andi_i32(tmp, tmp, mask);
5819 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5820 tcg_gen_or_i32(tmp, tmp, tmp2);
5821 tcg_temp_free_i32(tmp2);
5823 neon_store_reg(rd, pass, tmp);
5825 } /* for pass */
5826 } else if (op < 10) {
5827 /* Shift by immediate and narrow:
5828 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5829 int input_unsigned = (op == 8) ? !u : u;
5830 if (rm & 1) {
5831 return 1;
5833 shift = shift - (1 << (size + 3));
5834 size++;
5835 if (size == 3) {
5836 tmp64 = tcg_const_i64(shift);
5837 neon_load_reg64(cpu_V0, rm);
5838 neon_load_reg64(cpu_V1, rm + 1);
5839 for (pass = 0; pass < 2; pass++) {
5840 TCGv_i64 in;
5841 if (pass == 0) {
5842 in = cpu_V0;
5843 } else {
5844 in = cpu_V1;
5846 if (q) {
5847 if (input_unsigned) {
5848 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5849 } else {
5850 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5852 } else {
5853 if (input_unsigned) {
5854 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5855 } else {
5856 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5859 tmp = tcg_temp_new_i32();
5860 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5861 neon_store_reg(rd, pass, tmp);
5862 } /* for pass */
5863 tcg_temp_free_i64(tmp64);
5864 } else {
5865 if (size == 1) {
5866 imm = (uint16_t)shift;
5867 imm |= imm << 16;
5868 } else {
5869 /* size == 2 */
5870 imm = (uint32_t)shift;
5872 tmp2 = tcg_const_i32(imm);
5873 tmp4 = neon_load_reg(rm + 1, 0);
5874 tmp5 = neon_load_reg(rm + 1, 1);
5875 for (pass = 0; pass < 2; pass++) {
5876 if (pass == 0) {
5877 tmp = neon_load_reg(rm, 0);
5878 } else {
5879 tmp = tmp4;
5881 gen_neon_shift_narrow(size, tmp, tmp2, q,
5882 input_unsigned);
5883 if (pass == 0) {
5884 tmp3 = neon_load_reg(rm, 1);
5885 } else {
5886 tmp3 = tmp5;
5888 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5889 input_unsigned);
5890 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5891 tcg_temp_free_i32(tmp);
5892 tcg_temp_free_i32(tmp3);
5893 tmp = tcg_temp_new_i32();
5894 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5895 neon_store_reg(rd, pass, tmp);
5896 } /* for pass */
5897 tcg_temp_free_i32(tmp2);
5899 } else if (op == 10) {
5900 /* VSHLL, VMOVL */
5901 if (q || (rd & 1)) {
5902 return 1;
5904 tmp = neon_load_reg(rm, 0);
5905 tmp2 = neon_load_reg(rm, 1);
5906 for (pass = 0; pass < 2; pass++) {
5907 if (pass == 1)
5908 tmp = tmp2;
5910 gen_neon_widen(cpu_V0, tmp, size, u);
5912 if (shift != 0) {
5913 /* The shift is less than the width of the source
5914 type, so we can just shift the whole register. */
5915 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5916 /* Widen the result of shift: we need to clear
5917 * the potential overflow bits resulting from
5918 * left bits of the narrow input appearing as
5919 * right bits of left the neighbour narrow
5920 * input. */
5921 if (size < 2 || !u) {
5922 uint64_t imm64;
5923 if (size == 0) {
5924 imm = (0xffu >> (8 - shift));
5925 imm |= imm << 16;
5926 } else if (size == 1) {
5927 imm = 0xffff >> (16 - shift);
5928 } else {
5929 /* size == 2 */
5930 imm = 0xffffffff >> (32 - shift);
5932 if (size < 2) {
5933 imm64 = imm | (((uint64_t)imm) << 32);
5934 } else {
5935 imm64 = imm;
5937 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5940 neon_store_reg64(cpu_V0, rd + pass);
5942 } else if (op >= 14) {
5943 /* VCVT fixed-point. */
5944 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5945 return 1;
5947 /* We have already masked out the must-be-1 top bit of imm6,
5948 * hence this 32-shift where the ARM ARM has 64-imm6.
5950 shift = 32 - shift;
5951 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5952 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5953 if (!(op & 1)) {
5954 if (u)
5955 gen_vfp_ulto(0, shift, 1);
5956 else
5957 gen_vfp_slto(0, shift, 1);
5958 } else {
5959 if (u)
5960 gen_vfp_toul(0, shift, 1);
5961 else
5962 gen_vfp_tosl(0, shift, 1);
5964 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5966 } else {
5967 return 1;
5969 } else { /* (insn & 0x00380080) == 0 */
5970 int invert;
5971 if (q && (rd & 1)) {
5972 return 1;
5975 op = (insn >> 8) & 0xf;
5976 /* One register and immediate. */
5977 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5978 invert = (insn & (1 << 5)) != 0;
5979 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5980 * We choose to not special-case this and will behave as if a
5981 * valid constant encoding of 0 had been given.
5983 switch (op) {
5984 case 0: case 1:
5985 /* no-op */
5986 break;
5987 case 2: case 3:
5988 imm <<= 8;
5989 break;
5990 case 4: case 5:
5991 imm <<= 16;
5992 break;
5993 case 6: case 7:
5994 imm <<= 24;
5995 break;
5996 case 8: case 9:
5997 imm |= imm << 16;
5998 break;
5999 case 10: case 11:
6000 imm = (imm << 8) | (imm << 24);
6001 break;
6002 case 12:
6003 imm = (imm << 8) | 0xff;
6004 break;
6005 case 13:
6006 imm = (imm << 16) | 0xffff;
6007 break;
6008 case 14:
6009 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6010 if (invert)
6011 imm = ~imm;
6012 break;
6013 case 15:
6014 if (invert) {
6015 return 1;
6017 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6018 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6019 break;
6021 if (invert)
6022 imm = ~imm;
6024 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6025 if (op & 1 && op < 12) {
6026 tmp = neon_load_reg(rd, pass);
6027 if (invert) {
6028 /* The immediate value has already been inverted, so
6029 BIC becomes AND. */
6030 tcg_gen_andi_i32(tmp, tmp, imm);
6031 } else {
6032 tcg_gen_ori_i32(tmp, tmp, imm);
6034 } else {
6035 /* VMOV, VMVN. */
6036 tmp = tcg_temp_new_i32();
6037 if (op == 14 && invert) {
6038 int n;
6039 uint32_t val;
6040 val = 0;
6041 for (n = 0; n < 4; n++) {
6042 if (imm & (1 << (n + (pass & 1) * 4)))
6043 val |= 0xff << (n * 8);
6045 tcg_gen_movi_i32(tmp, val);
6046 } else {
6047 tcg_gen_movi_i32(tmp, imm);
6050 neon_store_reg(rd, pass, tmp);
6053 } else { /* (insn & 0x00800010 == 0x00800000) */
6054 if (size != 3) {
6055 op = (insn >> 8) & 0xf;
6056 if ((insn & (1 << 6)) == 0) {
6057 /* Three registers of different lengths. */
6058 int src1_wide;
6059 int src2_wide;
6060 int prewiden;
6061 /* undefreq: bit 0 : UNDEF if size == 0
6062 * bit 1 : UNDEF if size == 1
6063 * bit 2 : UNDEF if size == 2
6064 * bit 3 : UNDEF if U == 1
6065 * Note that [2:0] set implies 'always UNDEF'
6067 int undefreq;
6068 /* prewiden, src1_wide, src2_wide, undefreq */
6069 static const int neon_3reg_wide[16][4] = {
6070 {1, 0, 0, 0}, /* VADDL */
6071 {1, 1, 0, 0}, /* VADDW */
6072 {1, 0, 0, 0}, /* VSUBL */
6073 {1, 1, 0, 0}, /* VSUBW */
6074 {0, 1, 1, 0}, /* VADDHN */
6075 {0, 0, 0, 0}, /* VABAL */
6076 {0, 1, 1, 0}, /* VSUBHN */
6077 {0, 0, 0, 0}, /* VABDL */
6078 {0, 0, 0, 0}, /* VMLAL */
6079 {0, 0, 0, 9}, /* VQDMLAL */
6080 {0, 0, 0, 0}, /* VMLSL */
6081 {0, 0, 0, 9}, /* VQDMLSL */
6082 {0, 0, 0, 0}, /* Integer VMULL */
6083 {0, 0, 0, 1}, /* VQDMULL */
6084 {0, 0, 0, 0xa}, /* Polynomial VMULL */
6085 {0, 0, 0, 7}, /* Reserved: always UNDEF */
6088 prewiden = neon_3reg_wide[op][0];
6089 src1_wide = neon_3reg_wide[op][1];
6090 src2_wide = neon_3reg_wide[op][2];
6091 undefreq = neon_3reg_wide[op][3];
6093 if ((undefreq & (1 << size)) ||
6094 ((undefreq & 8) && u)) {
6095 return 1;
6097 if ((src1_wide && (rn & 1)) ||
6098 (src2_wide && (rm & 1)) ||
6099 (!src2_wide && (rd & 1))) {
6100 return 1;
6103 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6104 * outside the loop below as it only performs a single pass.
6106 if (op == 14 && size == 2) {
6107 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6109 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
6110 return 1;
6112 tcg_rn = tcg_temp_new_i64();
6113 tcg_rm = tcg_temp_new_i64();
6114 tcg_rd = tcg_temp_new_i64();
6115 neon_load_reg64(tcg_rn, rn);
6116 neon_load_reg64(tcg_rm, rm);
6117 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6118 neon_store_reg64(tcg_rd, rd);
6119 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6120 neon_store_reg64(tcg_rd, rd + 1);
6121 tcg_temp_free_i64(tcg_rn);
6122 tcg_temp_free_i64(tcg_rm);
6123 tcg_temp_free_i64(tcg_rd);
6124 return 0;
6127 /* Avoid overlapping operands. Wide source operands are
6128 always aligned so will never overlap with wide
6129 destinations in problematic ways. */
6130 if (rd == rm && !src2_wide) {
6131 tmp = neon_load_reg(rm, 1);
6132 neon_store_scratch(2, tmp);
6133 } else if (rd == rn && !src1_wide) {
6134 tmp = neon_load_reg(rn, 1);
6135 neon_store_scratch(2, tmp);
6137 TCGV_UNUSED_I32(tmp3);
6138 for (pass = 0; pass < 2; pass++) {
6139 if (src1_wide) {
6140 neon_load_reg64(cpu_V0, rn + pass);
6141 TCGV_UNUSED_I32(tmp);
6142 } else {
6143 if (pass == 1 && rd == rn) {
6144 tmp = neon_load_scratch(2);
6145 } else {
6146 tmp = neon_load_reg(rn, pass);
6148 if (prewiden) {
6149 gen_neon_widen(cpu_V0, tmp, size, u);
6152 if (src2_wide) {
6153 neon_load_reg64(cpu_V1, rm + pass);
6154 TCGV_UNUSED_I32(tmp2);
6155 } else {
6156 if (pass == 1 && rd == rm) {
6157 tmp2 = neon_load_scratch(2);
6158 } else {
6159 tmp2 = neon_load_reg(rm, pass);
6161 if (prewiden) {
6162 gen_neon_widen(cpu_V1, tmp2, size, u);
6165 switch (op) {
6166 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
6167 gen_neon_addl(size);
6168 break;
6169 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
6170 gen_neon_subl(size);
6171 break;
6172 case 5: case 7: /* VABAL, VABDL */
6173 switch ((size << 1) | u) {
6174 case 0:
6175 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6176 break;
6177 case 1:
6178 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6179 break;
6180 case 2:
6181 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6182 break;
6183 case 3:
6184 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6185 break;
6186 case 4:
6187 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6188 break;
6189 case 5:
6190 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6191 break;
6192 default: abort();
6194 tcg_temp_free_i32(tmp2);
6195 tcg_temp_free_i32(tmp);
6196 break;
6197 case 8: case 9: case 10: case 11: case 12: case 13:
6198 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
6199 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6200 break;
6201 case 14: /* Polynomial VMULL */
6202 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
6203 tcg_temp_free_i32(tmp2);
6204 tcg_temp_free_i32(tmp);
6205 break;
6206 default: /* 15 is RESERVED: caught earlier */
6207 abort();
6209 if (op == 13) {
6210 /* VQDMULL */
6211 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6212 neon_store_reg64(cpu_V0, rd + pass);
6213 } else if (op == 5 || (op >= 8 && op <= 11)) {
6214 /* Accumulate. */
6215 neon_load_reg64(cpu_V1, rd + pass);
6216 switch (op) {
6217 case 10: /* VMLSL */
6218 gen_neon_negl(cpu_V0, size);
6219 /* Fall through */
6220 case 5: case 8: /* VABAL, VMLAL */
6221 gen_neon_addl(size);
6222 break;
6223 case 9: case 11: /* VQDMLAL, VQDMLSL */
6224 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6225 if (op == 11) {
6226 gen_neon_negl(cpu_V0, size);
6228 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6229 break;
6230 default:
6231 abort();
6233 neon_store_reg64(cpu_V0, rd + pass);
6234 } else if (op == 4 || op == 6) {
6235 /* Narrowing operation. */
6236 tmp = tcg_temp_new_i32();
6237 if (!u) {
6238 switch (size) {
6239 case 0:
6240 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6241 break;
6242 case 1:
6243 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6244 break;
6245 case 2:
6246 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6247 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6248 break;
6249 default: abort();
6251 } else {
6252 switch (size) {
6253 case 0:
6254 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6255 break;
6256 case 1:
6257 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6258 break;
6259 case 2:
6260 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6261 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6262 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6263 break;
6264 default: abort();
6267 if (pass == 0) {
6268 tmp3 = tmp;
6269 } else {
6270 neon_store_reg(rd, 0, tmp3);
6271 neon_store_reg(rd, 1, tmp);
6273 } else {
6274 /* Write back the result. */
6275 neon_store_reg64(cpu_V0, rd + pass);
6278 } else {
6279 /* Two registers and a scalar. NB that for ops of this form
6280 * the ARM ARM labels bit 24 as Q, but it is in our variable
6281 * 'u', not 'q'.
6283 if (size == 0) {
6284 return 1;
6286 switch (op) {
6287 case 1: /* Float VMLA scalar */
6288 case 5: /* Floating point VMLS scalar */
6289 case 9: /* Floating point VMUL scalar */
6290 if (size == 1) {
6291 return 1;
6293 /* fall through */
6294 case 0: /* Integer VMLA scalar */
6295 case 4: /* Integer VMLS scalar */
6296 case 8: /* Integer VMUL scalar */
6297 case 12: /* VQDMULH scalar */
6298 case 13: /* VQRDMULH scalar */
6299 if (u && ((rd | rn) & 1)) {
6300 return 1;
6302 tmp = neon_get_scalar(size, rm);
6303 neon_store_scratch(0, tmp);
6304 for (pass = 0; pass < (u ? 4 : 2); pass++) {
6305 tmp = neon_load_scratch(0);
6306 tmp2 = neon_load_reg(rn, pass);
6307 if (op == 12) {
6308 if (size == 1) {
6309 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6310 } else {
6311 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6313 } else if (op == 13) {
6314 if (size == 1) {
6315 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6316 } else {
6317 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6319 } else if (op & 1) {
6320 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6321 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6322 tcg_temp_free_ptr(fpstatus);
6323 } else {
6324 switch (size) {
6325 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6326 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6327 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
6328 default: abort();
6331 tcg_temp_free_i32(tmp2);
6332 if (op < 8) {
6333 /* Accumulate. */
6334 tmp2 = neon_load_reg(rd, pass);
6335 switch (op) {
6336 case 0:
6337 gen_neon_add(size, tmp, tmp2);
6338 break;
6339 case 1:
6341 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6342 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6343 tcg_temp_free_ptr(fpstatus);
6344 break;
6346 case 4:
6347 gen_neon_rsb(size, tmp, tmp2);
6348 break;
6349 case 5:
6351 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6352 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6353 tcg_temp_free_ptr(fpstatus);
6354 break;
6356 default:
6357 abort();
6359 tcg_temp_free_i32(tmp2);
6361 neon_store_reg(rd, pass, tmp);
6363 break;
6364 case 3: /* VQDMLAL scalar */
6365 case 7: /* VQDMLSL scalar */
6366 case 11: /* VQDMULL scalar */
6367 if (u == 1) {
6368 return 1;
6370 /* fall through */
6371 case 2: /* VMLAL sclar */
6372 case 6: /* VMLSL scalar */
6373 case 10: /* VMULL scalar */
6374 if (rd & 1) {
6375 return 1;
6377 tmp2 = neon_get_scalar(size, rm);
6378 /* We need a copy of tmp2 because gen_neon_mull
6379 * deletes it during pass 0. */
6380 tmp4 = tcg_temp_new_i32();
6381 tcg_gen_mov_i32(tmp4, tmp2);
6382 tmp3 = neon_load_reg(rn, 1);
6384 for (pass = 0; pass < 2; pass++) {
6385 if (pass == 0) {
6386 tmp = neon_load_reg(rn, 0);
6387 } else {
6388 tmp = tmp3;
6389 tmp2 = tmp4;
6391 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
6392 if (op != 11) {
6393 neon_load_reg64(cpu_V1, rd + pass);
6395 switch (op) {
6396 case 6:
6397 gen_neon_negl(cpu_V0, size);
6398 /* Fall through */
6399 case 2:
6400 gen_neon_addl(size);
6401 break;
6402 case 3: case 7:
6403 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6404 if (op == 7) {
6405 gen_neon_negl(cpu_V0, size);
6407 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6408 break;
6409 case 10:
6410 /* no-op */
6411 break;
6412 case 11:
6413 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6414 break;
6415 default:
6416 abort();
6418 neon_store_reg64(cpu_V0, rd + pass);
6422 break;
6423 default: /* 14 and 15 are RESERVED */
6424 return 1;
6427 } else { /* size == 3 */
6428 if (!u) {
6429 /* Extract. */
6430 imm = (insn >> 8) & 0xf;
6432 if (imm > 7 && !q)
6433 return 1;
6435 if (q && ((rd | rn | rm) & 1)) {
6436 return 1;
6439 if (imm == 0) {
6440 neon_load_reg64(cpu_V0, rn);
6441 if (q) {
6442 neon_load_reg64(cpu_V1, rn + 1);
6444 } else if (imm == 8) {
6445 neon_load_reg64(cpu_V0, rn + 1);
6446 if (q) {
6447 neon_load_reg64(cpu_V1, rm);
6449 } else if (q) {
6450 tmp64 = tcg_temp_new_i64();
6451 if (imm < 8) {
6452 neon_load_reg64(cpu_V0, rn);
6453 neon_load_reg64(tmp64, rn + 1);
6454 } else {
6455 neon_load_reg64(cpu_V0, rn + 1);
6456 neon_load_reg64(tmp64, rm);
6458 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
6459 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
6460 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6461 if (imm < 8) {
6462 neon_load_reg64(cpu_V1, rm);
6463 } else {
6464 neon_load_reg64(cpu_V1, rm + 1);
6465 imm -= 8;
6467 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6468 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6469 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
6470 tcg_temp_free_i64(tmp64);
6471 } else {
6472 /* BUGFIX */
6473 neon_load_reg64(cpu_V0, rn);
6474 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
6475 neon_load_reg64(cpu_V1, rm);
6476 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
6477 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6479 neon_store_reg64(cpu_V0, rd);
6480 if (q) {
6481 neon_store_reg64(cpu_V1, rd + 1);
6483 } else if ((insn & (1 << 11)) == 0) {
6484 /* Two register misc. */
6485 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6486 size = (insn >> 18) & 3;
6487 /* UNDEF for unknown op values and bad op-size combinations */
6488 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6489 return 1;
6491 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6492 q && ((rm | rd) & 1)) {
6493 return 1;
6495 switch (op) {
6496 case NEON_2RM_VREV64:
6497 for (pass = 0; pass < (q ? 2 : 1); pass++) {
6498 tmp = neon_load_reg(rm, pass * 2);
6499 tmp2 = neon_load_reg(rm, pass * 2 + 1);
6500 switch (size) {
6501 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6502 case 1: gen_swap_half(tmp); break;
6503 case 2: /* no-op */ break;
6504 default: abort();
6506 neon_store_reg(rd, pass * 2 + 1, tmp);
6507 if (size == 2) {
6508 neon_store_reg(rd, pass * 2, tmp2);
6509 } else {
6510 switch (size) {
6511 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6512 case 1: gen_swap_half(tmp2); break;
6513 default: abort();
6515 neon_store_reg(rd, pass * 2, tmp2);
6518 break;
6519 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6520 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
6521 for (pass = 0; pass < q + 1; pass++) {
6522 tmp = neon_load_reg(rm, pass * 2);
6523 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6524 tmp = neon_load_reg(rm, pass * 2 + 1);
6525 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6526 switch (size) {
6527 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6528 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6529 case 2: tcg_gen_add_i64(CPU_V001); break;
6530 default: abort();
6532 if (op >= NEON_2RM_VPADAL) {
6533 /* Accumulate. */
6534 neon_load_reg64(cpu_V1, rd + pass);
6535 gen_neon_addl(size);
6537 neon_store_reg64(cpu_V0, rd + pass);
6539 break;
6540 case NEON_2RM_VTRN:
6541 if (size == 2) {
6542 int n;
6543 for (n = 0; n < (q ? 4 : 2); n += 2) {
6544 tmp = neon_load_reg(rm, n);
6545 tmp2 = neon_load_reg(rd, n + 1);
6546 neon_store_reg(rm, n, tmp2);
6547 neon_store_reg(rd, n + 1, tmp);
6549 } else {
6550 goto elementwise;
6552 break;
6553 case NEON_2RM_VUZP:
6554 if (gen_neon_unzip(rd, rm, size, q)) {
6555 return 1;
6557 break;
6558 case NEON_2RM_VZIP:
6559 if (gen_neon_zip(rd, rm, size, q)) {
6560 return 1;
6562 break;
6563 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6564 /* also VQMOVUN; op field and mnemonics don't line up */
6565 if (rm & 1) {
6566 return 1;
6568 TCGV_UNUSED_I32(tmp2);
6569 for (pass = 0; pass < 2; pass++) {
6570 neon_load_reg64(cpu_V0, rm + pass);
6571 tmp = tcg_temp_new_i32();
6572 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6573 tmp, cpu_V0);
6574 if (pass == 0) {
6575 tmp2 = tmp;
6576 } else {
6577 neon_store_reg(rd, 0, tmp2);
6578 neon_store_reg(rd, 1, tmp);
6581 break;
6582 case NEON_2RM_VSHLL:
6583 if (q || (rd & 1)) {
6584 return 1;
6586 tmp = neon_load_reg(rm, 0);
6587 tmp2 = neon_load_reg(rm, 1);
6588 for (pass = 0; pass < 2; pass++) {
6589 if (pass == 1)
6590 tmp = tmp2;
6591 gen_neon_widen(cpu_V0, tmp, size, 1);
6592 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
6593 neon_store_reg64(cpu_V0, rd + pass);
6595 break;
6596 case NEON_2RM_VCVT_F16_F32:
6597 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
6598 q || (rm & 1)) {
6599 return 1;
6601 tmp = tcg_temp_new_i32();
6602 tmp2 = tcg_temp_new_i32();
6603 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
6604 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6605 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
6606 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6607 tcg_gen_shli_i32(tmp2, tmp2, 16);
6608 tcg_gen_or_i32(tmp2, tmp2, tmp);
6609 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
6610 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
6611 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6612 neon_store_reg(rd, 0, tmp2);
6613 tmp2 = tcg_temp_new_i32();
6614 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
6615 tcg_gen_shli_i32(tmp2, tmp2, 16);
6616 tcg_gen_or_i32(tmp2, tmp2, tmp);
6617 neon_store_reg(rd, 1, tmp2);
6618 tcg_temp_free_i32(tmp);
6619 break;
6620 case NEON_2RM_VCVT_F32_F16:
6621 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
6622 q || (rd & 1)) {
6623 return 1;
6625 tmp3 = tcg_temp_new_i32();
6626 tmp = neon_load_reg(rm, 0);
6627 tmp2 = neon_load_reg(rm, 1);
6628 tcg_gen_ext16u_i32(tmp3, tmp);
6629 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6630 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6631 tcg_gen_shri_i32(tmp3, tmp, 16);
6632 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6633 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
6634 tcg_temp_free_i32(tmp);
6635 tcg_gen_ext16u_i32(tmp3, tmp2);
6636 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6637 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6638 tcg_gen_shri_i32(tmp3, tmp2, 16);
6639 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
6640 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
6641 tcg_temp_free_i32(tmp2);
6642 tcg_temp_free_i32(tmp3);
6643 break;
6644 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6645 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
6646 || ((rm | rd) & 1)) {
6647 return 1;
6649 tmp = tcg_const_i32(rd);
6650 tmp2 = tcg_const_i32(rm);
6652 /* Bit 6 is the lowest opcode bit; it distinguishes between
6653 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6655 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6657 if (op == NEON_2RM_AESE) {
6658 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6659 } else {
6660 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6662 tcg_temp_free_i32(tmp);
6663 tcg_temp_free_i32(tmp2);
6664 tcg_temp_free_i32(tmp3);
6665 break;
6666 case NEON_2RM_SHA1H:
6667 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
6668 || ((rm | rd) & 1)) {
6669 return 1;
6671 tmp = tcg_const_i32(rd);
6672 tmp2 = tcg_const_i32(rm);
6674 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
6676 tcg_temp_free_i32(tmp);
6677 tcg_temp_free_i32(tmp2);
6678 break;
6679 case NEON_2RM_SHA1SU1:
6680 if ((rm | rd) & 1) {
6681 return 1;
6683 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6684 if (q) {
6685 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
6686 return 1;
6688 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
6689 return 1;
6691 tmp = tcg_const_i32(rd);
6692 tmp2 = tcg_const_i32(rm);
6693 if (q) {
6694 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
6695 } else {
6696 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
6698 tcg_temp_free_i32(tmp);
6699 tcg_temp_free_i32(tmp2);
6700 break;
6701 default:
6702 elementwise:
6703 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6704 if (neon_2rm_is_float_op(op)) {
6705 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6706 neon_reg_offset(rm, pass));
6707 TCGV_UNUSED_I32(tmp);
6708 } else {
6709 tmp = neon_load_reg(rm, pass);
6711 switch (op) {
6712 case NEON_2RM_VREV32:
6713 switch (size) {
6714 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6715 case 1: gen_swap_half(tmp); break;
6716 default: abort();
6718 break;
6719 case NEON_2RM_VREV16:
6720 gen_rev16(tmp);
6721 break;
6722 case NEON_2RM_VCLS:
6723 switch (size) {
6724 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6725 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6726 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
6727 default: abort();
6729 break;
6730 case NEON_2RM_VCLZ:
6731 switch (size) {
6732 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6733 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6734 case 2: gen_helper_clz(tmp, tmp); break;
6735 default: abort();
6737 break;
6738 case NEON_2RM_VCNT:
6739 gen_helper_neon_cnt_u8(tmp, tmp);
6740 break;
6741 case NEON_2RM_VMVN:
6742 tcg_gen_not_i32(tmp, tmp);
6743 break;
6744 case NEON_2RM_VQABS:
6745 switch (size) {
6746 case 0:
6747 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6748 break;
6749 case 1:
6750 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6751 break;
6752 case 2:
6753 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6754 break;
6755 default: abort();
6757 break;
6758 case NEON_2RM_VQNEG:
6759 switch (size) {
6760 case 0:
6761 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6762 break;
6763 case 1:
6764 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6765 break;
6766 case 2:
6767 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6768 break;
6769 default: abort();
6771 break;
6772 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
6773 tmp2 = tcg_const_i32(0);
6774 switch(size) {
6775 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6776 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6777 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6778 default: abort();
6780 tcg_temp_free_i32(tmp2);
6781 if (op == NEON_2RM_VCLE0) {
6782 tcg_gen_not_i32(tmp, tmp);
6784 break;
6785 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6786 tmp2 = tcg_const_i32(0);
6787 switch(size) {
6788 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6789 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6790 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6791 default: abort();
6793 tcg_temp_free_i32(tmp2);
6794 if (op == NEON_2RM_VCLT0) {
6795 tcg_gen_not_i32(tmp, tmp);
6797 break;
6798 case NEON_2RM_VCEQ0:
6799 tmp2 = tcg_const_i32(0);
6800 switch(size) {
6801 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6802 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6803 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6804 default: abort();
6806 tcg_temp_free_i32(tmp2);
6807 break;
6808 case NEON_2RM_VABS:
6809 switch(size) {
6810 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6811 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6812 case 2: tcg_gen_abs_i32(tmp, tmp); break;
6813 default: abort();
6815 break;
6816 case NEON_2RM_VNEG:
6817 tmp2 = tcg_const_i32(0);
6818 gen_neon_rsb(size, tmp, tmp2);
6819 tcg_temp_free_i32(tmp2);
6820 break;
6821 case NEON_2RM_VCGT0_F:
6823 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6824 tmp2 = tcg_const_i32(0);
6825 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6826 tcg_temp_free_i32(tmp2);
6827 tcg_temp_free_ptr(fpstatus);
6828 break;
6830 case NEON_2RM_VCGE0_F:
6832 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6833 tmp2 = tcg_const_i32(0);
6834 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6835 tcg_temp_free_i32(tmp2);
6836 tcg_temp_free_ptr(fpstatus);
6837 break;
6839 case NEON_2RM_VCEQ0_F:
6841 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6842 tmp2 = tcg_const_i32(0);
6843 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6844 tcg_temp_free_i32(tmp2);
6845 tcg_temp_free_ptr(fpstatus);
6846 break;
6848 case NEON_2RM_VCLE0_F:
6850 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6851 tmp2 = tcg_const_i32(0);
6852 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6853 tcg_temp_free_i32(tmp2);
6854 tcg_temp_free_ptr(fpstatus);
6855 break;
6857 case NEON_2RM_VCLT0_F:
6859 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6860 tmp2 = tcg_const_i32(0);
6861 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6862 tcg_temp_free_i32(tmp2);
6863 tcg_temp_free_ptr(fpstatus);
6864 break;
6866 case NEON_2RM_VABS_F:
6867 gen_vfp_abs(0);
6868 break;
6869 case NEON_2RM_VNEG_F:
6870 gen_vfp_neg(0);
6871 break;
6872 case NEON_2RM_VSWP:
6873 tmp2 = neon_load_reg(rd, pass);
6874 neon_store_reg(rm, pass, tmp2);
6875 break;
6876 case NEON_2RM_VTRN:
6877 tmp2 = neon_load_reg(rd, pass);
6878 switch (size) {
6879 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6880 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6881 default: abort();
6883 neon_store_reg(rm, pass, tmp2);
6884 break;
6885 case NEON_2RM_VRINTN:
6886 case NEON_2RM_VRINTA:
6887 case NEON_2RM_VRINTM:
6888 case NEON_2RM_VRINTP:
6889 case NEON_2RM_VRINTZ:
6891 TCGv_i32 tcg_rmode;
6892 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6893 int rmode;
6895 if (op == NEON_2RM_VRINTZ) {
6896 rmode = FPROUNDING_ZERO;
6897 } else {
6898 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6901 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6902 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6903 cpu_env);
6904 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
6905 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6906 cpu_env);
6907 tcg_temp_free_ptr(fpstatus);
6908 tcg_temp_free_i32(tcg_rmode);
6909 break;
6911 case NEON_2RM_VRINTX:
6913 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6914 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
6915 tcg_temp_free_ptr(fpstatus);
6916 break;
6918 case NEON_2RM_VCVTAU:
6919 case NEON_2RM_VCVTAS:
6920 case NEON_2RM_VCVTNU:
6921 case NEON_2RM_VCVTNS:
6922 case NEON_2RM_VCVTPU:
6923 case NEON_2RM_VCVTPS:
6924 case NEON_2RM_VCVTMU:
6925 case NEON_2RM_VCVTMS:
6927 bool is_signed = !extract32(insn, 7, 1);
6928 TCGv_ptr fpst = get_fpstatus_ptr(1);
6929 TCGv_i32 tcg_rmode, tcg_shift;
6930 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6932 tcg_shift = tcg_const_i32(0);
6933 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6934 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6935 cpu_env);
6937 if (is_signed) {
6938 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
6939 tcg_shift, fpst);
6940 } else {
6941 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
6942 tcg_shift, fpst);
6945 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6946 cpu_env);
6947 tcg_temp_free_i32(tcg_rmode);
6948 tcg_temp_free_i32(tcg_shift);
6949 tcg_temp_free_ptr(fpst);
6950 break;
6952 case NEON_2RM_VRECPE:
6954 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6955 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6956 tcg_temp_free_ptr(fpstatus);
6957 break;
6959 case NEON_2RM_VRSQRTE:
6961 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6962 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6963 tcg_temp_free_ptr(fpstatus);
6964 break;
6966 case NEON_2RM_VRECPE_F:
6968 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6969 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
6970 tcg_temp_free_ptr(fpstatus);
6971 break;
6973 case NEON_2RM_VRSQRTE_F:
6975 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6976 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
6977 tcg_temp_free_ptr(fpstatus);
6978 break;
6980 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6981 gen_vfp_sito(0, 1);
6982 break;
6983 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6984 gen_vfp_uito(0, 1);
6985 break;
6986 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6987 gen_vfp_tosiz(0, 1);
6988 break;
6989 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6990 gen_vfp_touiz(0, 1);
6991 break;
6992 default:
6993 /* Reserved op values were caught by the
6994 * neon_2rm_sizes[] check earlier.
6996 abort();
6998 if (neon_2rm_is_float_op(op)) {
6999 tcg_gen_st_f32(cpu_F0s, cpu_env,
7000 neon_reg_offset(rd, pass));
7001 } else {
7002 neon_store_reg(rd, pass, tmp);
7005 break;
7007 } else if ((insn & (1 << 10)) == 0) {
7008 /* VTBL, VTBX. */
7009 int n = ((insn >> 8) & 3) + 1;
7010 if ((rn + n) > 32) {
7011 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7012 * helper function running off the end of the register file.
7014 return 1;
7016 n <<= 3;
7017 if (insn & (1 << 6)) {
7018 tmp = neon_load_reg(rd, 0);
7019 } else {
7020 tmp = tcg_temp_new_i32();
7021 tcg_gen_movi_i32(tmp, 0);
7023 tmp2 = neon_load_reg(rm, 0);
7024 tmp4 = tcg_const_i32(rn);
7025 tmp5 = tcg_const_i32(n);
7026 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7027 tcg_temp_free_i32(tmp);
7028 if (insn & (1 << 6)) {
7029 tmp = neon_load_reg(rd, 1);
7030 } else {
7031 tmp = tcg_temp_new_i32();
7032 tcg_gen_movi_i32(tmp, 0);
7034 tmp3 = neon_load_reg(rm, 1);
7035 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
7036 tcg_temp_free_i32(tmp5);
7037 tcg_temp_free_i32(tmp4);
7038 neon_store_reg(rd, 0, tmp2);
7039 neon_store_reg(rd, 1, tmp3);
7040 tcg_temp_free_i32(tmp);
7041 } else if ((insn & 0x380) == 0) {
7042 /* VDUP */
7043 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7044 return 1;
7046 if (insn & (1 << 19)) {
7047 tmp = neon_load_reg(rm, 1);
7048 } else {
7049 tmp = neon_load_reg(rm, 0);
7051 if (insn & (1 << 16)) {
7052 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
7053 } else if (insn & (1 << 17)) {
7054 if ((insn >> 18) & 1)
7055 gen_neon_dup_high16(tmp);
7056 else
7057 gen_neon_dup_low16(tmp);
7059 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7060 tmp2 = tcg_temp_new_i32();
7061 tcg_gen_mov_i32(tmp2, tmp);
7062 neon_store_reg(rd, pass, tmp2);
7064 tcg_temp_free_i32(tmp);
7065 } else {
7066 return 1;
7070 return 0;
7073 static int disas_coproc_insn(DisasContext *s, uint32_t insn)
7075 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7076 const ARMCPRegInfo *ri;
7078 cpnum = (insn >> 8) & 0xf;
7080 /* First check for coprocessor space used for XScale/iwMMXt insns */
7081 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
7082 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7083 return 1;
7085 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7086 return disas_iwmmxt_insn(s, insn);
7087 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7088 return disas_dsp_insn(s, insn);
7090 return 1;
7093 /* Otherwise treat as a generic register access */
7094 is64 = (insn & (1 << 25)) == 0;
7095 if (!is64 && ((insn & (1 << 4)) == 0)) {
7096 /* cdp */
7097 return 1;
7100 crm = insn & 0xf;
7101 if (is64) {
7102 crn = 0;
7103 opc1 = (insn >> 4) & 0xf;
7104 opc2 = 0;
7105 rt2 = (insn >> 16) & 0xf;
7106 } else {
7107 crn = (insn >> 16) & 0xf;
7108 opc1 = (insn >> 21) & 7;
7109 opc2 = (insn >> 5) & 7;
7110 rt2 = 0;
7112 isread = (insn >> 20) & 1;
7113 rt = (insn >> 12) & 0xf;
7115 ri = get_arm_cp_reginfo(s->cp_regs,
7116 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
7117 if (ri) {
7118 /* Check access permissions */
7119 if (!cp_access_ok(s->current_el, ri, isread)) {
7120 return 1;
7123 if (ri->accessfn ||
7124 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
7125 /* Emit code to perform further access permissions checks at
7126 * runtime; this may result in an exception.
7127 * Note that on XScale all cp0..c13 registers do an access check
7128 * call in order to handle c15_cpar.
7130 TCGv_ptr tmpptr;
7131 TCGv_i32 tcg_syn;
7132 uint32_t syndrome;
7134 /* Note that since we are an implementation which takes an
7135 * exception on a trapped conditional instruction only if the
7136 * instruction passes its condition code check, we can take
7137 * advantage of the clause in the ARM ARM that allows us to set
7138 * the COND field in the instruction to 0xE in all cases.
7139 * We could fish the actual condition out of the insn (ARM)
7140 * or the condexec bits (Thumb) but it isn't necessary.
7142 switch (cpnum) {
7143 case 14:
7144 if (is64) {
7145 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7146 isread, s->thumb);
7147 } else {
7148 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7149 rt, isread, s->thumb);
7151 break;
7152 case 15:
7153 if (is64) {
7154 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7155 isread, s->thumb);
7156 } else {
7157 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7158 rt, isread, s->thumb);
7160 break;
7161 default:
7162 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7163 * so this can only happen if this is an ARMv7 or earlier CPU,
7164 * in which case the syndrome information won't actually be
7165 * guest visible.
7167 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
7168 syndrome = syn_uncategorized();
7169 break;
7172 gen_set_pc_im(s, s->pc);
7173 tmpptr = tcg_const_ptr(ri);
7174 tcg_syn = tcg_const_i32(syndrome);
7175 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
7176 tcg_temp_free_ptr(tmpptr);
7177 tcg_temp_free_i32(tcg_syn);
7180 /* Handle special cases first */
7181 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7182 case ARM_CP_NOP:
7183 return 0;
7184 case ARM_CP_WFI:
7185 if (isread) {
7186 return 1;
7188 gen_set_pc_im(s, s->pc);
7189 s->is_jmp = DISAS_WFI;
7190 return 0;
7191 default:
7192 break;
7195 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7196 gen_io_start();
7199 if (isread) {
7200 /* Read */
7201 if (is64) {
7202 TCGv_i64 tmp64;
7203 TCGv_i32 tmp;
7204 if (ri->type & ARM_CP_CONST) {
7205 tmp64 = tcg_const_i64(ri->resetvalue);
7206 } else if (ri->readfn) {
7207 TCGv_ptr tmpptr;
7208 tmp64 = tcg_temp_new_i64();
7209 tmpptr = tcg_const_ptr(ri);
7210 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7211 tcg_temp_free_ptr(tmpptr);
7212 } else {
7213 tmp64 = tcg_temp_new_i64();
7214 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7216 tmp = tcg_temp_new_i32();
7217 tcg_gen_trunc_i64_i32(tmp, tmp64);
7218 store_reg(s, rt, tmp);
7219 tcg_gen_shri_i64(tmp64, tmp64, 32);
7220 tmp = tcg_temp_new_i32();
7221 tcg_gen_trunc_i64_i32(tmp, tmp64);
7222 tcg_temp_free_i64(tmp64);
7223 store_reg(s, rt2, tmp);
7224 } else {
7225 TCGv_i32 tmp;
7226 if (ri->type & ARM_CP_CONST) {
7227 tmp = tcg_const_i32(ri->resetvalue);
7228 } else if (ri->readfn) {
7229 TCGv_ptr tmpptr;
7230 tmp = tcg_temp_new_i32();
7231 tmpptr = tcg_const_ptr(ri);
7232 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7233 tcg_temp_free_ptr(tmpptr);
7234 } else {
7235 tmp = load_cpu_offset(ri->fieldoffset);
7237 if (rt == 15) {
7238 /* Destination register of r15 for 32 bit loads sets
7239 * the condition codes from the high 4 bits of the value
7241 gen_set_nzcv(tmp);
7242 tcg_temp_free_i32(tmp);
7243 } else {
7244 store_reg(s, rt, tmp);
7247 } else {
7248 /* Write */
7249 if (ri->type & ARM_CP_CONST) {
7250 /* If not forbidden by access permissions, treat as WI */
7251 return 0;
7254 if (is64) {
7255 TCGv_i32 tmplo, tmphi;
7256 TCGv_i64 tmp64 = tcg_temp_new_i64();
7257 tmplo = load_reg(s, rt);
7258 tmphi = load_reg(s, rt2);
7259 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7260 tcg_temp_free_i32(tmplo);
7261 tcg_temp_free_i32(tmphi);
7262 if (ri->writefn) {
7263 TCGv_ptr tmpptr = tcg_const_ptr(ri);
7264 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7265 tcg_temp_free_ptr(tmpptr);
7266 } else {
7267 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7269 tcg_temp_free_i64(tmp64);
7270 } else {
7271 if (ri->writefn) {
7272 TCGv_i32 tmp;
7273 TCGv_ptr tmpptr;
7274 tmp = load_reg(s, rt);
7275 tmpptr = tcg_const_ptr(ri);
7276 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7277 tcg_temp_free_ptr(tmpptr);
7278 tcg_temp_free_i32(tmp);
7279 } else {
7280 TCGv_i32 tmp = load_reg(s, rt);
7281 store_cpu_offset(tmp, ri->fieldoffset);
7286 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
7287 /* I/O operations must end the TB here (whether read or write) */
7288 gen_io_end();
7289 gen_lookup_tb(s);
7290 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
7291 /* We default to ending the TB on a coprocessor register write,
7292 * but allow this to be suppressed by the register definition
7293 * (usually only necessary to work around guest bugs).
7295 gen_lookup_tb(s);
7298 return 0;
7301 /* Unknown register; this might be a guest error or a QEMU
7302 * unimplemented feature.
7304 if (is64) {
7305 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7306 "64 bit system register cp:%d opc1: %d crm:%d "
7307 "(%s)\n",
7308 isread ? "read" : "write", cpnum, opc1, crm,
7309 s->ns ? "non-secure" : "secure");
7310 } else {
7311 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7312 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7313 "(%s)\n",
7314 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7315 s->ns ? "non-secure" : "secure");
7318 return 1;
7322 /* Store a 64-bit value to a register pair. Clobbers val. */
7323 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
7325 TCGv_i32 tmp;
7326 tmp = tcg_temp_new_i32();
7327 tcg_gen_trunc_i64_i32(tmp, val);
7328 store_reg(s, rlow, tmp);
7329 tmp = tcg_temp_new_i32();
7330 tcg_gen_shri_i64(val, val, 32);
7331 tcg_gen_trunc_i64_i32(tmp, val);
7332 store_reg(s, rhigh, tmp);
7335 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
7336 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
7338 TCGv_i64 tmp;
7339 TCGv_i32 tmp2;
7341 /* Load value and extend to 64 bits. */
7342 tmp = tcg_temp_new_i64();
7343 tmp2 = load_reg(s, rlow);
7344 tcg_gen_extu_i32_i64(tmp, tmp2);
7345 tcg_temp_free_i32(tmp2);
7346 tcg_gen_add_i64(val, val, tmp);
7347 tcg_temp_free_i64(tmp);
7350 /* load and add a 64-bit value from a register pair. */
7351 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
7353 TCGv_i64 tmp;
7354 TCGv_i32 tmpl;
7355 TCGv_i32 tmph;
7357 /* Load 64-bit value rd:rn. */
7358 tmpl = load_reg(s, rlow);
7359 tmph = load_reg(s, rhigh);
7360 tmp = tcg_temp_new_i64();
7361 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7362 tcg_temp_free_i32(tmpl);
7363 tcg_temp_free_i32(tmph);
7364 tcg_gen_add_i64(val, val, tmp);
7365 tcg_temp_free_i64(tmp);
7368 /* Set N and Z flags from hi|lo. */
7369 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
7371 tcg_gen_mov_i32(cpu_NF, hi);
7372 tcg_gen_or_i32(cpu_ZF, lo, hi);
7375 /* Load/Store exclusive instructions are implemented by remembering
7376 the value/address loaded, and seeing if these are the same
7377 when the store is performed. This should be sufficient to implement
7378 the architecturally mandated semantics, and avoids having to monitor
7379 regular stores.
7381 In system emulation mode only one CPU will be running at once, so
7382 this sequence is effectively atomic. In user emulation mode we
7383 throw an exception and handle the atomic operation elsewhere. */
7384 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
7385 TCGv_i32 addr, int size)
7387 TCGv_i32 tmp = tcg_temp_new_i32();
7389 s->is_ldex = true;
7391 switch (size) {
7392 case 0:
7393 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
7394 break;
7395 case 1:
7396 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
7397 break;
7398 case 2:
7399 case 3:
7400 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7401 break;
7402 default:
7403 abort();
7406 if (size == 3) {
7407 TCGv_i32 tmp2 = tcg_temp_new_i32();
7408 TCGv_i32 tmp3 = tcg_temp_new_i32();
7410 tcg_gen_addi_i32(tmp2, addr, 4);
7411 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7412 tcg_temp_free_i32(tmp2);
7413 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7414 store_reg(s, rt2, tmp3);
7415 } else {
7416 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
7419 store_reg(s, rt, tmp);
7420 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
7423 static void gen_clrex(DisasContext *s)
7425 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7428 #ifdef CONFIG_USER_ONLY
7429 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7430 TCGv_i32 addr, int size)
7432 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
7433 tcg_gen_movi_i32(cpu_exclusive_info,
7434 size | (rd << 4) | (rt << 8) | (rt2 << 12));
7435 gen_exception_internal_insn(s, 4, EXCP_STREX);
7437 #else
7438 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
7439 TCGv_i32 addr, int size)
7441 TCGv_i32 tmp;
7442 TCGv_i64 val64, extaddr;
7443 TCGLabel *done_label;
7444 TCGLabel *fail_label;
7446 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7447 [addr] = {Rt};
7448 {Rd} = 0;
7449 } else {
7450 {Rd} = 1;
7451 } */
7452 fail_label = gen_new_label();
7453 done_label = gen_new_label();
7454 extaddr = tcg_temp_new_i64();
7455 tcg_gen_extu_i32_i64(extaddr, addr);
7456 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7457 tcg_temp_free_i64(extaddr);
7459 tmp = tcg_temp_new_i32();
7460 switch (size) {
7461 case 0:
7462 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
7463 break;
7464 case 1:
7465 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
7466 break;
7467 case 2:
7468 case 3:
7469 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7470 break;
7471 default:
7472 abort();
7475 val64 = tcg_temp_new_i64();
7476 if (size == 3) {
7477 TCGv_i32 tmp2 = tcg_temp_new_i32();
7478 TCGv_i32 tmp3 = tcg_temp_new_i32();
7479 tcg_gen_addi_i32(tmp2, addr, 4);
7480 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7481 tcg_temp_free_i32(tmp2);
7482 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7483 tcg_temp_free_i32(tmp3);
7484 } else {
7485 tcg_gen_extu_i32_i64(val64, tmp);
7487 tcg_temp_free_i32(tmp);
7489 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7490 tcg_temp_free_i64(val64);
7492 tmp = load_reg(s, rt);
7493 switch (size) {
7494 case 0:
7495 gen_aa32_st8(tmp, addr, get_mem_index(s));
7496 break;
7497 case 1:
7498 gen_aa32_st16(tmp, addr, get_mem_index(s));
7499 break;
7500 case 2:
7501 case 3:
7502 gen_aa32_st32(tmp, addr, get_mem_index(s));
7503 break;
7504 default:
7505 abort();
7507 tcg_temp_free_i32(tmp);
7508 if (size == 3) {
7509 tcg_gen_addi_i32(addr, addr, 4);
7510 tmp = load_reg(s, rt2);
7511 gen_aa32_st32(tmp, addr, get_mem_index(s));
7512 tcg_temp_free_i32(tmp);
7514 tcg_gen_movi_i32(cpu_R[rd], 0);
7515 tcg_gen_br(done_label);
7516 gen_set_label(fail_label);
7517 tcg_gen_movi_i32(cpu_R[rd], 1);
7518 gen_set_label(done_label);
7519 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
7521 #endif
7523 /* gen_srs:
7524 * @env: CPUARMState
7525 * @s: DisasContext
7526 * @mode: mode field from insn (which stack to store to)
7527 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7528 * @writeback: true if writeback bit set
7530 * Generate code for the SRS (Store Return State) insn.
7532 static void gen_srs(DisasContext *s,
7533 uint32_t mode, uint32_t amode, bool writeback)
7535 int32_t offset;
7536 TCGv_i32 addr = tcg_temp_new_i32();
7537 TCGv_i32 tmp = tcg_const_i32(mode);
7538 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7539 tcg_temp_free_i32(tmp);
7540 switch (amode) {
7541 case 0: /* DA */
7542 offset = -4;
7543 break;
7544 case 1: /* IA */
7545 offset = 0;
7546 break;
7547 case 2: /* DB */
7548 offset = -8;
7549 break;
7550 case 3: /* IB */
7551 offset = 4;
7552 break;
7553 default:
7554 abort();
7556 tcg_gen_addi_i32(addr, addr, offset);
7557 tmp = load_reg(s, 14);
7558 gen_aa32_st32(tmp, addr, get_mem_index(s));
7559 tcg_temp_free_i32(tmp);
7560 tmp = load_cpu_field(spsr);
7561 tcg_gen_addi_i32(addr, addr, 4);
7562 gen_aa32_st32(tmp, addr, get_mem_index(s));
7563 tcg_temp_free_i32(tmp);
7564 if (writeback) {
7565 switch (amode) {
7566 case 0:
7567 offset = -8;
7568 break;
7569 case 1:
7570 offset = 4;
7571 break;
7572 case 2:
7573 offset = -4;
7574 break;
7575 case 3:
7576 offset = 0;
7577 break;
7578 default:
7579 abort();
7581 tcg_gen_addi_i32(addr, addr, offset);
7582 tmp = tcg_const_i32(mode);
7583 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7584 tcg_temp_free_i32(tmp);
7586 tcg_temp_free_i32(addr);
7589 static void disas_arm_insn(DisasContext *s, unsigned int insn)
7591 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
7592 TCGv_i32 tmp;
7593 TCGv_i32 tmp2;
7594 TCGv_i32 tmp3;
7595 TCGv_i32 addr;
7596 TCGv_i64 tmp64;
7598 /* M variants do not implement ARM mode. */
7599 if (arm_dc_feature(s, ARM_FEATURE_M)) {
7600 goto illegal_op;
7602 cond = insn >> 28;
7603 if (cond == 0xf){
7604 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7605 * choose to UNDEF. In ARMv5 and above the space is used
7606 * for miscellaneous unconditional instructions.
7608 ARCH(5);
7610 /* Unconditional instructions. */
7611 if (((insn >> 25) & 7) == 1) {
7612 /* NEON Data processing. */
7613 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7614 goto illegal_op;
7617 if (disas_neon_data_insn(s, insn)) {
7618 goto illegal_op;
7620 return;
7622 if ((insn & 0x0f100000) == 0x04000000) {
7623 /* NEON load/store. */
7624 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
7625 goto illegal_op;
7628 if (disas_neon_ls_insn(s, insn)) {
7629 goto illegal_op;
7631 return;
7633 if ((insn & 0x0f000e10) == 0x0e000a00) {
7634 /* VFP. */
7635 if (disas_vfp_insn(s, insn)) {
7636 goto illegal_op;
7638 return;
7640 if (((insn & 0x0f30f000) == 0x0510f000) ||
7641 ((insn & 0x0f30f010) == 0x0710f000)) {
7642 if ((insn & (1 << 22)) == 0) {
7643 /* PLDW; v7MP */
7644 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
7645 goto illegal_op;
7648 /* Otherwise PLD; v5TE+ */
7649 ARCH(5TE);
7650 return;
7652 if (((insn & 0x0f70f000) == 0x0450f000) ||
7653 ((insn & 0x0f70f010) == 0x0650f000)) {
7654 ARCH(7);
7655 return; /* PLI; V7 */
7657 if (((insn & 0x0f700000) == 0x04100000) ||
7658 ((insn & 0x0f700010) == 0x06100000)) {
7659 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
7660 goto illegal_op;
7662 return; /* v7MP: Unallocated memory hint: must NOP */
7665 if ((insn & 0x0ffffdff) == 0x01010000) {
7666 ARCH(6);
7667 /* setend */
7668 if (((insn >> 9) & 1) != s->bswap_code) {
7669 /* Dynamic endianness switching not implemented. */
7670 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
7671 goto illegal_op;
7673 return;
7674 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7675 switch ((insn >> 4) & 0xf) {
7676 case 1: /* clrex */
7677 ARCH(6K);
7678 gen_clrex(s);
7679 return;
7680 case 4: /* dsb */
7681 case 5: /* dmb */
7682 case 6: /* isb */
7683 ARCH(7);
7684 /* We don't emulate caches so these are a no-op. */
7685 return;
7686 default:
7687 goto illegal_op;
7689 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7690 /* srs */
7691 if (IS_USER(s)) {
7692 goto illegal_op;
7694 ARCH(6);
7695 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
7696 return;
7697 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
7698 /* rfe */
7699 int32_t offset;
7700 if (IS_USER(s))
7701 goto illegal_op;
7702 ARCH(6);
7703 rn = (insn >> 16) & 0xf;
7704 addr = load_reg(s, rn);
7705 i = (insn >> 23) & 3;
7706 switch (i) {
7707 case 0: offset = -4; break; /* DA */
7708 case 1: offset = 0; break; /* IA */
7709 case 2: offset = -8; break; /* DB */
7710 case 3: offset = 4; break; /* IB */
7711 default: abort();
7713 if (offset)
7714 tcg_gen_addi_i32(addr, addr, offset);
7715 /* Load PC into tmp and CPSR into tmp2. */
7716 tmp = tcg_temp_new_i32();
7717 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7718 tcg_gen_addi_i32(addr, addr, 4);
7719 tmp2 = tcg_temp_new_i32();
7720 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
7721 if (insn & (1 << 21)) {
7722 /* Base writeback. */
7723 switch (i) {
7724 case 0: offset = -8; break;
7725 case 1: offset = 4; break;
7726 case 2: offset = -4; break;
7727 case 3: offset = 0; break;
7728 default: abort();
7730 if (offset)
7731 tcg_gen_addi_i32(addr, addr, offset);
7732 store_reg(s, rn, addr);
7733 } else {
7734 tcg_temp_free_i32(addr);
7736 gen_rfe(s, tmp, tmp2);
7737 return;
7738 } else if ((insn & 0x0e000000) == 0x0a000000) {
7739 /* branch link and change to thumb (blx <offset>) */
7740 int32_t offset;
7742 val = (uint32_t)s->pc;
7743 tmp = tcg_temp_new_i32();
7744 tcg_gen_movi_i32(tmp, val);
7745 store_reg(s, 14, tmp);
7746 /* Sign-extend the 24-bit offset */
7747 offset = (((int32_t)insn) << 8) >> 8;
7748 /* offset * 4 + bit24 * 2 + (thumb bit) */
7749 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7750 /* pipeline offset */
7751 val += 4;
7752 /* protected by ARCH(5); above, near the start of uncond block */
7753 gen_bx_im(s, val);
7754 return;
7755 } else if ((insn & 0x0e000f00) == 0x0c000100) {
7756 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7757 /* iWMMXt register transfer. */
7758 if (extract32(s->c15_cpar, 1, 1)) {
7759 if (!disas_iwmmxt_insn(s, insn)) {
7760 return;
7764 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7765 /* Coprocessor double register transfer. */
7766 ARCH(5TE);
7767 } else if ((insn & 0x0f000010) == 0x0e000010) {
7768 /* Additional coprocessor register transfer. */
7769 } else if ((insn & 0x0ff10020) == 0x01000000) {
7770 uint32_t mask;
7771 uint32_t val;
7772 /* cps (privileged) */
7773 if (IS_USER(s))
7774 return;
7775 mask = val = 0;
7776 if (insn & (1 << 19)) {
7777 if (insn & (1 << 8))
7778 mask |= CPSR_A;
7779 if (insn & (1 << 7))
7780 mask |= CPSR_I;
7781 if (insn & (1 << 6))
7782 mask |= CPSR_F;
7783 if (insn & (1 << 18))
7784 val |= mask;
7786 if (insn & (1 << 17)) {
7787 mask |= CPSR_M;
7788 val |= (insn & 0x1f);
7790 if (mask) {
7791 gen_set_psr_im(s, mask, 0, val);
7793 return;
7795 goto illegal_op;
7797 if (cond != 0xe) {
7798 /* if not always execute, we generate a conditional jump to
7799 next instruction */
7800 s->condlabel = gen_new_label();
7801 arm_gen_test_cc(cond ^ 1, s->condlabel);
7802 s->condjmp = 1;
7804 if ((insn & 0x0f900000) == 0x03000000) {
7805 if ((insn & (1 << 21)) == 0) {
7806 ARCH(6T2);
7807 rd = (insn >> 12) & 0xf;
7808 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7809 if ((insn & (1 << 22)) == 0) {
7810 /* MOVW */
7811 tmp = tcg_temp_new_i32();
7812 tcg_gen_movi_i32(tmp, val);
7813 } else {
7814 /* MOVT */
7815 tmp = load_reg(s, rd);
7816 tcg_gen_ext16u_i32(tmp, tmp);
7817 tcg_gen_ori_i32(tmp, tmp, val << 16);
7819 store_reg(s, rd, tmp);
7820 } else {
7821 if (((insn >> 12) & 0xf) != 0xf)
7822 goto illegal_op;
7823 if (((insn >> 16) & 0xf) == 0) {
7824 gen_nop_hint(s, insn & 0xff);
7825 } else {
7826 /* CPSR = immediate */
7827 val = insn & 0xff;
7828 shift = ((insn >> 8) & 0xf) * 2;
7829 if (shift)
7830 val = (val >> shift) | (val << (32 - shift));
7831 i = ((insn & (1 << 22)) != 0);
7832 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
7833 i, val)) {
7834 goto illegal_op;
7838 } else if ((insn & 0x0f900000) == 0x01000000
7839 && (insn & 0x00000090) != 0x00000090) {
7840 /* miscellaneous instructions */
7841 op1 = (insn >> 21) & 3;
7842 sh = (insn >> 4) & 0xf;
7843 rm = insn & 0xf;
7844 switch (sh) {
7845 case 0x0: /* move program status register */
7846 if (op1 & 1) {
7847 /* PSR = reg */
7848 tmp = load_reg(s, rm);
7849 i = ((op1 & 2) != 0);
7850 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
7851 goto illegal_op;
7852 } else {
7853 /* reg = PSR */
7854 rd = (insn >> 12) & 0xf;
7855 if (op1 & 2) {
7856 if (IS_USER(s))
7857 goto illegal_op;
7858 tmp = load_cpu_field(spsr);
7859 } else {
7860 tmp = tcg_temp_new_i32();
7861 gen_helper_cpsr_read(tmp, cpu_env);
7863 store_reg(s, rd, tmp);
7865 break;
7866 case 0x1:
7867 if (op1 == 1) {
7868 /* branch/exchange thumb (bx). */
7869 ARCH(4T);
7870 tmp = load_reg(s, rm);
7871 gen_bx(s, tmp);
7872 } else if (op1 == 3) {
7873 /* clz */
7874 ARCH(5);
7875 rd = (insn >> 12) & 0xf;
7876 tmp = load_reg(s, rm);
7877 gen_helper_clz(tmp, tmp);
7878 store_reg(s, rd, tmp);
7879 } else {
7880 goto illegal_op;
7882 break;
7883 case 0x2:
7884 if (op1 == 1) {
7885 ARCH(5J); /* bxj */
7886 /* Trivial implementation equivalent to bx. */
7887 tmp = load_reg(s, rm);
7888 gen_bx(s, tmp);
7889 } else {
7890 goto illegal_op;
7892 break;
7893 case 0x3:
7894 if (op1 != 1)
7895 goto illegal_op;
7897 ARCH(5);
7898 /* branch link/exchange thumb (blx) */
7899 tmp = load_reg(s, rm);
7900 tmp2 = tcg_temp_new_i32();
7901 tcg_gen_movi_i32(tmp2, s->pc);
7902 store_reg(s, 14, tmp2);
7903 gen_bx(s, tmp);
7904 break;
7905 case 0x4:
7907 /* crc32/crc32c */
7908 uint32_t c = extract32(insn, 8, 4);
7910 /* Check this CPU supports ARMv8 CRC instructions.
7911 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
7912 * Bits 8, 10 and 11 should be zero.
7914 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
7915 (c & 0xd) != 0) {
7916 goto illegal_op;
7919 rn = extract32(insn, 16, 4);
7920 rd = extract32(insn, 12, 4);
7922 tmp = load_reg(s, rn);
7923 tmp2 = load_reg(s, rm);
7924 if (op1 == 0) {
7925 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
7926 } else if (op1 == 1) {
7927 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
7929 tmp3 = tcg_const_i32(1 << op1);
7930 if (c & 0x2) {
7931 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
7932 } else {
7933 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
7935 tcg_temp_free_i32(tmp2);
7936 tcg_temp_free_i32(tmp3);
7937 store_reg(s, rd, tmp);
7938 break;
7940 case 0x5: /* saturating add/subtract */
7941 ARCH(5TE);
7942 rd = (insn >> 12) & 0xf;
7943 rn = (insn >> 16) & 0xf;
7944 tmp = load_reg(s, rm);
7945 tmp2 = load_reg(s, rn);
7946 if (op1 & 2)
7947 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
7948 if (op1 & 1)
7949 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
7950 else
7951 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7952 tcg_temp_free_i32(tmp2);
7953 store_reg(s, rd, tmp);
7954 break;
7955 case 7:
7957 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
7958 switch (op1) {
7959 case 1:
7960 /* bkpt */
7961 ARCH(5);
7962 gen_exception_insn(s, 4, EXCP_BKPT,
7963 syn_aa32_bkpt(imm16, false));
7964 break;
7965 case 2:
7966 /* Hypervisor call (v7) */
7967 ARCH(7);
7968 if (IS_USER(s)) {
7969 goto illegal_op;
7971 gen_hvc(s, imm16);
7972 break;
7973 case 3:
7974 /* Secure monitor call (v6+) */
7975 ARCH(6K);
7976 if (IS_USER(s)) {
7977 goto illegal_op;
7979 gen_smc(s);
7980 break;
7981 default:
7982 goto illegal_op;
7984 break;
7986 case 0x8: /* signed multiply */
7987 case 0xa:
7988 case 0xc:
7989 case 0xe:
7990 ARCH(5TE);
7991 rs = (insn >> 8) & 0xf;
7992 rn = (insn >> 12) & 0xf;
7993 rd = (insn >> 16) & 0xf;
7994 if (op1 == 1) {
7995 /* (32 * 16) >> 16 */
7996 tmp = load_reg(s, rm);
7997 tmp2 = load_reg(s, rs);
7998 if (sh & 4)
7999 tcg_gen_sari_i32(tmp2, tmp2, 16);
8000 else
8001 gen_sxth(tmp2);
8002 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8003 tcg_gen_shri_i64(tmp64, tmp64, 16);
8004 tmp = tcg_temp_new_i32();
8005 tcg_gen_trunc_i64_i32(tmp, tmp64);
8006 tcg_temp_free_i64(tmp64);
8007 if ((sh & 2) == 0) {
8008 tmp2 = load_reg(s, rn);
8009 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8010 tcg_temp_free_i32(tmp2);
8012 store_reg(s, rd, tmp);
8013 } else {
8014 /* 16 * 16 */
8015 tmp = load_reg(s, rm);
8016 tmp2 = load_reg(s, rs);
8017 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
8018 tcg_temp_free_i32(tmp2);
8019 if (op1 == 2) {
8020 tmp64 = tcg_temp_new_i64();
8021 tcg_gen_ext_i32_i64(tmp64, tmp);
8022 tcg_temp_free_i32(tmp);
8023 gen_addq(s, tmp64, rn, rd);
8024 gen_storeq_reg(s, rn, rd, tmp64);
8025 tcg_temp_free_i64(tmp64);
8026 } else {
8027 if (op1 == 0) {
8028 tmp2 = load_reg(s, rn);
8029 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8030 tcg_temp_free_i32(tmp2);
8032 store_reg(s, rd, tmp);
8035 break;
8036 default:
8037 goto illegal_op;
8039 } else if (((insn & 0x0e000000) == 0 &&
8040 (insn & 0x00000090) != 0x90) ||
8041 ((insn & 0x0e000000) == (1 << 25))) {
8042 int set_cc, logic_cc, shiftop;
8044 op1 = (insn >> 21) & 0xf;
8045 set_cc = (insn >> 20) & 1;
8046 logic_cc = table_logic_cc[op1] & set_cc;
8048 /* data processing instruction */
8049 if (insn & (1 << 25)) {
8050 /* immediate operand */
8051 val = insn & 0xff;
8052 shift = ((insn >> 8) & 0xf) * 2;
8053 if (shift) {
8054 val = (val >> shift) | (val << (32 - shift));
8056 tmp2 = tcg_temp_new_i32();
8057 tcg_gen_movi_i32(tmp2, val);
8058 if (logic_cc && shift) {
8059 gen_set_CF_bit31(tmp2);
8061 } else {
8062 /* register */
8063 rm = (insn) & 0xf;
8064 tmp2 = load_reg(s, rm);
8065 shiftop = (insn >> 5) & 3;
8066 if (!(insn & (1 << 4))) {
8067 shift = (insn >> 7) & 0x1f;
8068 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8069 } else {
8070 rs = (insn >> 8) & 0xf;
8071 tmp = load_reg(s, rs);
8072 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
8075 if (op1 != 0x0f && op1 != 0x0d) {
8076 rn = (insn >> 16) & 0xf;
8077 tmp = load_reg(s, rn);
8078 } else {
8079 TCGV_UNUSED_I32(tmp);
8081 rd = (insn >> 12) & 0xf;
8082 switch(op1) {
8083 case 0x00:
8084 tcg_gen_and_i32(tmp, tmp, tmp2);
8085 if (logic_cc) {
8086 gen_logic_CC(tmp);
8088 store_reg_bx(s, rd, tmp);
8089 break;
8090 case 0x01:
8091 tcg_gen_xor_i32(tmp, tmp, tmp2);
8092 if (logic_cc) {
8093 gen_logic_CC(tmp);
8095 store_reg_bx(s, rd, tmp);
8096 break;
8097 case 0x02:
8098 if (set_cc && rd == 15) {
8099 /* SUBS r15, ... is used for exception return. */
8100 if (IS_USER(s)) {
8101 goto illegal_op;
8103 gen_sub_CC(tmp, tmp, tmp2);
8104 gen_exception_return(s, tmp);
8105 } else {
8106 if (set_cc) {
8107 gen_sub_CC(tmp, tmp, tmp2);
8108 } else {
8109 tcg_gen_sub_i32(tmp, tmp, tmp2);
8111 store_reg_bx(s, rd, tmp);
8113 break;
8114 case 0x03:
8115 if (set_cc) {
8116 gen_sub_CC(tmp, tmp2, tmp);
8117 } else {
8118 tcg_gen_sub_i32(tmp, tmp2, tmp);
8120 store_reg_bx(s, rd, tmp);
8121 break;
8122 case 0x04:
8123 if (set_cc) {
8124 gen_add_CC(tmp, tmp, tmp2);
8125 } else {
8126 tcg_gen_add_i32(tmp, tmp, tmp2);
8128 store_reg_bx(s, rd, tmp);
8129 break;
8130 case 0x05:
8131 if (set_cc) {
8132 gen_adc_CC(tmp, tmp, tmp2);
8133 } else {
8134 gen_add_carry(tmp, tmp, tmp2);
8136 store_reg_bx(s, rd, tmp);
8137 break;
8138 case 0x06:
8139 if (set_cc) {
8140 gen_sbc_CC(tmp, tmp, tmp2);
8141 } else {
8142 gen_sub_carry(tmp, tmp, tmp2);
8144 store_reg_bx(s, rd, tmp);
8145 break;
8146 case 0x07:
8147 if (set_cc) {
8148 gen_sbc_CC(tmp, tmp2, tmp);
8149 } else {
8150 gen_sub_carry(tmp, tmp2, tmp);
8152 store_reg_bx(s, rd, tmp);
8153 break;
8154 case 0x08:
8155 if (set_cc) {
8156 tcg_gen_and_i32(tmp, tmp, tmp2);
8157 gen_logic_CC(tmp);
8159 tcg_temp_free_i32(tmp);
8160 break;
8161 case 0x09:
8162 if (set_cc) {
8163 tcg_gen_xor_i32(tmp, tmp, tmp2);
8164 gen_logic_CC(tmp);
8166 tcg_temp_free_i32(tmp);
8167 break;
8168 case 0x0a:
8169 if (set_cc) {
8170 gen_sub_CC(tmp, tmp, tmp2);
8172 tcg_temp_free_i32(tmp);
8173 break;
8174 case 0x0b:
8175 if (set_cc) {
8176 gen_add_CC(tmp, tmp, tmp2);
8178 tcg_temp_free_i32(tmp);
8179 break;
8180 case 0x0c:
8181 tcg_gen_or_i32(tmp, tmp, tmp2);
8182 if (logic_cc) {
8183 gen_logic_CC(tmp);
8185 store_reg_bx(s, rd, tmp);
8186 break;
8187 case 0x0d:
8188 if (logic_cc && rd == 15) {
8189 /* MOVS r15, ... is used for exception return. */
8190 if (IS_USER(s)) {
8191 goto illegal_op;
8193 gen_exception_return(s, tmp2);
8194 } else {
8195 if (logic_cc) {
8196 gen_logic_CC(tmp2);
8198 store_reg_bx(s, rd, tmp2);
8200 break;
8201 case 0x0e:
8202 tcg_gen_andc_i32(tmp, tmp, tmp2);
8203 if (logic_cc) {
8204 gen_logic_CC(tmp);
8206 store_reg_bx(s, rd, tmp);
8207 break;
8208 default:
8209 case 0x0f:
8210 tcg_gen_not_i32(tmp2, tmp2);
8211 if (logic_cc) {
8212 gen_logic_CC(tmp2);
8214 store_reg_bx(s, rd, tmp2);
8215 break;
8217 if (op1 != 0x0f && op1 != 0x0d) {
8218 tcg_temp_free_i32(tmp2);
8220 } else {
8221 /* other instructions */
8222 op1 = (insn >> 24) & 0xf;
8223 switch(op1) {
8224 case 0x0:
8225 case 0x1:
8226 /* multiplies, extra load/stores */
8227 sh = (insn >> 5) & 3;
8228 if (sh == 0) {
8229 if (op1 == 0x0) {
8230 rd = (insn >> 16) & 0xf;
8231 rn = (insn >> 12) & 0xf;
8232 rs = (insn >> 8) & 0xf;
8233 rm = (insn) & 0xf;
8234 op1 = (insn >> 20) & 0xf;
8235 switch (op1) {
8236 case 0: case 1: case 2: case 3: case 6:
8237 /* 32 bit mul */
8238 tmp = load_reg(s, rs);
8239 tmp2 = load_reg(s, rm);
8240 tcg_gen_mul_i32(tmp, tmp, tmp2);
8241 tcg_temp_free_i32(tmp2);
8242 if (insn & (1 << 22)) {
8243 /* Subtract (mls) */
8244 ARCH(6T2);
8245 tmp2 = load_reg(s, rn);
8246 tcg_gen_sub_i32(tmp, tmp2, tmp);
8247 tcg_temp_free_i32(tmp2);
8248 } else if (insn & (1 << 21)) {
8249 /* Add */
8250 tmp2 = load_reg(s, rn);
8251 tcg_gen_add_i32(tmp, tmp, tmp2);
8252 tcg_temp_free_i32(tmp2);
8254 if (insn & (1 << 20))
8255 gen_logic_CC(tmp);
8256 store_reg(s, rd, tmp);
8257 break;
8258 case 4:
8259 /* 64 bit mul double accumulate (UMAAL) */
8260 ARCH(6);
8261 tmp = load_reg(s, rs);
8262 tmp2 = load_reg(s, rm);
8263 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8264 gen_addq_lo(s, tmp64, rn);
8265 gen_addq_lo(s, tmp64, rd);
8266 gen_storeq_reg(s, rn, rd, tmp64);
8267 tcg_temp_free_i64(tmp64);
8268 break;
8269 case 8: case 9: case 10: case 11:
8270 case 12: case 13: case 14: case 15:
8271 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
8272 tmp = load_reg(s, rs);
8273 tmp2 = load_reg(s, rm);
8274 if (insn & (1 << 22)) {
8275 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8276 } else {
8277 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8279 if (insn & (1 << 21)) { /* mult accumulate */
8280 TCGv_i32 al = load_reg(s, rn);
8281 TCGv_i32 ah = load_reg(s, rd);
8282 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
8283 tcg_temp_free_i32(al);
8284 tcg_temp_free_i32(ah);
8286 if (insn & (1 << 20)) {
8287 gen_logicq_cc(tmp, tmp2);
8289 store_reg(s, rn, tmp);
8290 store_reg(s, rd, tmp2);
8291 break;
8292 default:
8293 goto illegal_op;
8295 } else {
8296 rn = (insn >> 16) & 0xf;
8297 rd = (insn >> 12) & 0xf;
8298 if (insn & (1 << 23)) {
8299 /* load/store exclusive */
8300 int op2 = (insn >> 8) & 3;
8301 op1 = (insn >> 21) & 0x3;
8303 switch (op2) {
8304 case 0: /* lda/stl */
8305 if (op1 == 1) {
8306 goto illegal_op;
8308 ARCH(8);
8309 break;
8310 case 1: /* reserved */
8311 goto illegal_op;
8312 case 2: /* ldaex/stlex */
8313 ARCH(8);
8314 break;
8315 case 3: /* ldrex/strex */
8316 if (op1) {
8317 ARCH(6K);
8318 } else {
8319 ARCH(6);
8321 break;
8324 addr = tcg_temp_local_new_i32();
8325 load_reg_var(s, addr, rn);
8327 /* Since the emulation does not have barriers,
8328 the acquire/release semantics need no special
8329 handling */
8330 if (op2 == 0) {
8331 if (insn & (1 << 20)) {
8332 tmp = tcg_temp_new_i32();
8333 switch (op1) {
8334 case 0: /* lda */
8335 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8336 break;
8337 case 2: /* ldab */
8338 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
8339 break;
8340 case 3: /* ldah */
8341 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8342 break;
8343 default:
8344 abort();
8346 store_reg(s, rd, tmp);
8347 } else {
8348 rm = insn & 0xf;
8349 tmp = load_reg(s, rm);
8350 switch (op1) {
8351 case 0: /* stl */
8352 gen_aa32_st32(tmp, addr, get_mem_index(s));
8353 break;
8354 case 2: /* stlb */
8355 gen_aa32_st8(tmp, addr, get_mem_index(s));
8356 break;
8357 case 3: /* stlh */
8358 gen_aa32_st16(tmp, addr, get_mem_index(s));
8359 break;
8360 default:
8361 abort();
8363 tcg_temp_free_i32(tmp);
8365 } else if (insn & (1 << 20)) {
8366 switch (op1) {
8367 case 0: /* ldrex */
8368 gen_load_exclusive(s, rd, 15, addr, 2);
8369 break;
8370 case 1: /* ldrexd */
8371 gen_load_exclusive(s, rd, rd + 1, addr, 3);
8372 break;
8373 case 2: /* ldrexb */
8374 gen_load_exclusive(s, rd, 15, addr, 0);
8375 break;
8376 case 3: /* ldrexh */
8377 gen_load_exclusive(s, rd, 15, addr, 1);
8378 break;
8379 default:
8380 abort();
8382 } else {
8383 rm = insn & 0xf;
8384 switch (op1) {
8385 case 0: /* strex */
8386 gen_store_exclusive(s, rd, rm, 15, addr, 2);
8387 break;
8388 case 1: /* strexd */
8389 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
8390 break;
8391 case 2: /* strexb */
8392 gen_store_exclusive(s, rd, rm, 15, addr, 0);
8393 break;
8394 case 3: /* strexh */
8395 gen_store_exclusive(s, rd, rm, 15, addr, 1);
8396 break;
8397 default:
8398 abort();
8401 tcg_temp_free_i32(addr);
8402 } else {
8403 /* SWP instruction */
8404 rm = (insn) & 0xf;
8406 /* ??? This is not really atomic. However we know
8407 we never have multiple CPUs running in parallel,
8408 so it is good enough. */
8409 addr = load_reg(s, rn);
8410 tmp = load_reg(s, rm);
8411 tmp2 = tcg_temp_new_i32();
8412 if (insn & (1 << 22)) {
8413 gen_aa32_ld8u(tmp2, addr, get_mem_index(s));
8414 gen_aa32_st8(tmp, addr, get_mem_index(s));
8415 } else {
8416 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
8417 gen_aa32_st32(tmp, addr, get_mem_index(s));
8419 tcg_temp_free_i32(tmp);
8420 tcg_temp_free_i32(addr);
8421 store_reg(s, rd, tmp2);
8424 } else {
8425 int address_offset;
8426 int load;
8427 /* Misc load/store */
8428 rn = (insn >> 16) & 0xf;
8429 rd = (insn >> 12) & 0xf;
8430 addr = load_reg(s, rn);
8431 if (insn & (1 << 24))
8432 gen_add_datah_offset(s, insn, 0, addr);
8433 address_offset = 0;
8434 if (insn & (1 << 20)) {
8435 /* load */
8436 tmp = tcg_temp_new_i32();
8437 switch(sh) {
8438 case 1:
8439 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8440 break;
8441 case 2:
8442 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
8443 break;
8444 default:
8445 case 3:
8446 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
8447 break;
8449 load = 1;
8450 } else if (sh & 2) {
8451 ARCH(5TE);
8452 /* doubleword */
8453 if (sh & 1) {
8454 /* store */
8455 tmp = load_reg(s, rd);
8456 gen_aa32_st32(tmp, addr, get_mem_index(s));
8457 tcg_temp_free_i32(tmp);
8458 tcg_gen_addi_i32(addr, addr, 4);
8459 tmp = load_reg(s, rd + 1);
8460 gen_aa32_st32(tmp, addr, get_mem_index(s));
8461 tcg_temp_free_i32(tmp);
8462 load = 0;
8463 } else {
8464 /* load */
8465 tmp = tcg_temp_new_i32();
8466 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8467 store_reg(s, rd, tmp);
8468 tcg_gen_addi_i32(addr, addr, 4);
8469 tmp = tcg_temp_new_i32();
8470 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8471 rd++;
8472 load = 1;
8474 address_offset = -4;
8475 } else {
8476 /* store */
8477 tmp = load_reg(s, rd);
8478 gen_aa32_st16(tmp, addr, get_mem_index(s));
8479 tcg_temp_free_i32(tmp);
8480 load = 0;
8482 /* Perform base writeback before the loaded value to
8483 ensure correct behavior with overlapping index registers.
8484 ldrd with base writeback is is undefined if the
8485 destination and index registers overlap. */
8486 if (!(insn & (1 << 24))) {
8487 gen_add_datah_offset(s, insn, address_offset, addr);
8488 store_reg(s, rn, addr);
8489 } else if (insn & (1 << 21)) {
8490 if (address_offset)
8491 tcg_gen_addi_i32(addr, addr, address_offset);
8492 store_reg(s, rn, addr);
8493 } else {
8494 tcg_temp_free_i32(addr);
8496 if (load) {
8497 /* Complete the load. */
8498 store_reg(s, rd, tmp);
8501 break;
8502 case 0x4:
8503 case 0x5:
8504 goto do_ldst;
8505 case 0x6:
8506 case 0x7:
8507 if (insn & (1 << 4)) {
8508 ARCH(6);
8509 /* Armv6 Media instructions. */
8510 rm = insn & 0xf;
8511 rn = (insn >> 16) & 0xf;
8512 rd = (insn >> 12) & 0xf;
8513 rs = (insn >> 8) & 0xf;
8514 switch ((insn >> 23) & 3) {
8515 case 0: /* Parallel add/subtract. */
8516 op1 = (insn >> 20) & 7;
8517 tmp = load_reg(s, rn);
8518 tmp2 = load_reg(s, rm);
8519 sh = (insn >> 5) & 7;
8520 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8521 goto illegal_op;
8522 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
8523 tcg_temp_free_i32(tmp2);
8524 store_reg(s, rd, tmp);
8525 break;
8526 case 1:
8527 if ((insn & 0x00700020) == 0) {
8528 /* Halfword pack. */
8529 tmp = load_reg(s, rn);
8530 tmp2 = load_reg(s, rm);
8531 shift = (insn >> 7) & 0x1f;
8532 if (insn & (1 << 6)) {
8533 /* pkhtb */
8534 if (shift == 0)
8535 shift = 31;
8536 tcg_gen_sari_i32(tmp2, tmp2, shift);
8537 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8538 tcg_gen_ext16u_i32(tmp2, tmp2);
8539 } else {
8540 /* pkhbt */
8541 if (shift)
8542 tcg_gen_shli_i32(tmp2, tmp2, shift);
8543 tcg_gen_ext16u_i32(tmp, tmp);
8544 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8546 tcg_gen_or_i32(tmp, tmp, tmp2);
8547 tcg_temp_free_i32(tmp2);
8548 store_reg(s, rd, tmp);
8549 } else if ((insn & 0x00200020) == 0x00200000) {
8550 /* [us]sat */
8551 tmp = load_reg(s, rm);
8552 shift = (insn >> 7) & 0x1f;
8553 if (insn & (1 << 6)) {
8554 if (shift == 0)
8555 shift = 31;
8556 tcg_gen_sari_i32(tmp, tmp, shift);
8557 } else {
8558 tcg_gen_shli_i32(tmp, tmp, shift);
8560 sh = (insn >> 16) & 0x1f;
8561 tmp2 = tcg_const_i32(sh);
8562 if (insn & (1 << 22))
8563 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
8564 else
8565 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
8566 tcg_temp_free_i32(tmp2);
8567 store_reg(s, rd, tmp);
8568 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8569 /* [us]sat16 */
8570 tmp = load_reg(s, rm);
8571 sh = (insn >> 16) & 0x1f;
8572 tmp2 = tcg_const_i32(sh);
8573 if (insn & (1 << 22))
8574 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
8575 else
8576 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
8577 tcg_temp_free_i32(tmp2);
8578 store_reg(s, rd, tmp);
8579 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8580 /* Select bytes. */
8581 tmp = load_reg(s, rn);
8582 tmp2 = load_reg(s, rm);
8583 tmp3 = tcg_temp_new_i32();
8584 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8585 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8586 tcg_temp_free_i32(tmp3);
8587 tcg_temp_free_i32(tmp2);
8588 store_reg(s, rd, tmp);
8589 } else if ((insn & 0x000003e0) == 0x00000060) {
8590 tmp = load_reg(s, rm);
8591 shift = (insn >> 10) & 3;
8592 /* ??? In many cases it's not necessary to do a
8593 rotate, a shift is sufficient. */
8594 if (shift != 0)
8595 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8596 op1 = (insn >> 20) & 7;
8597 switch (op1) {
8598 case 0: gen_sxtb16(tmp); break;
8599 case 2: gen_sxtb(tmp); break;
8600 case 3: gen_sxth(tmp); break;
8601 case 4: gen_uxtb16(tmp); break;
8602 case 6: gen_uxtb(tmp); break;
8603 case 7: gen_uxth(tmp); break;
8604 default: goto illegal_op;
8606 if (rn != 15) {
8607 tmp2 = load_reg(s, rn);
8608 if ((op1 & 3) == 0) {
8609 gen_add16(tmp, tmp2);
8610 } else {
8611 tcg_gen_add_i32(tmp, tmp, tmp2);
8612 tcg_temp_free_i32(tmp2);
8615 store_reg(s, rd, tmp);
8616 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8617 /* rev */
8618 tmp = load_reg(s, rm);
8619 if (insn & (1 << 22)) {
8620 if (insn & (1 << 7)) {
8621 gen_revsh(tmp);
8622 } else {
8623 ARCH(6T2);
8624 gen_helper_rbit(tmp, tmp);
8626 } else {
8627 if (insn & (1 << 7))
8628 gen_rev16(tmp);
8629 else
8630 tcg_gen_bswap32_i32(tmp, tmp);
8632 store_reg(s, rd, tmp);
8633 } else {
8634 goto illegal_op;
8636 break;
8637 case 2: /* Multiplies (Type 3). */
8638 switch ((insn >> 20) & 0x7) {
8639 case 5:
8640 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8641 /* op2 not 00x or 11x : UNDEF */
8642 goto illegal_op;
8644 /* Signed multiply most significant [accumulate].
8645 (SMMUL, SMMLA, SMMLS) */
8646 tmp = load_reg(s, rm);
8647 tmp2 = load_reg(s, rs);
8648 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8650 if (rd != 15) {
8651 tmp = load_reg(s, rd);
8652 if (insn & (1 << 6)) {
8653 tmp64 = gen_subq_msw(tmp64, tmp);
8654 } else {
8655 tmp64 = gen_addq_msw(tmp64, tmp);
8658 if (insn & (1 << 5)) {
8659 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8661 tcg_gen_shri_i64(tmp64, tmp64, 32);
8662 tmp = tcg_temp_new_i32();
8663 tcg_gen_trunc_i64_i32(tmp, tmp64);
8664 tcg_temp_free_i64(tmp64);
8665 store_reg(s, rn, tmp);
8666 break;
8667 case 0:
8668 case 4:
8669 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8670 if (insn & (1 << 7)) {
8671 goto illegal_op;
8673 tmp = load_reg(s, rm);
8674 tmp2 = load_reg(s, rs);
8675 if (insn & (1 << 5))
8676 gen_swap_half(tmp2);
8677 gen_smul_dual(tmp, tmp2);
8678 if (insn & (1 << 22)) {
8679 /* smlald, smlsld */
8680 TCGv_i64 tmp64_2;
8682 tmp64 = tcg_temp_new_i64();
8683 tmp64_2 = tcg_temp_new_i64();
8684 tcg_gen_ext_i32_i64(tmp64, tmp);
8685 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
8686 tcg_temp_free_i32(tmp);
8687 tcg_temp_free_i32(tmp2);
8688 if (insn & (1 << 6)) {
8689 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8690 } else {
8691 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8693 tcg_temp_free_i64(tmp64_2);
8694 gen_addq(s, tmp64, rd, rn);
8695 gen_storeq_reg(s, rd, rn, tmp64);
8696 tcg_temp_free_i64(tmp64);
8697 } else {
8698 /* smuad, smusd, smlad, smlsd */
8699 if (insn & (1 << 6)) {
8700 /* This subtraction cannot overflow. */
8701 tcg_gen_sub_i32(tmp, tmp, tmp2);
8702 } else {
8703 /* This addition cannot overflow 32 bits;
8704 * however it may overflow considered as a
8705 * signed operation, in which case we must set
8706 * the Q flag.
8708 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8710 tcg_temp_free_i32(tmp2);
8711 if (rd != 15)
8713 tmp2 = load_reg(s, rd);
8714 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8715 tcg_temp_free_i32(tmp2);
8717 store_reg(s, rn, tmp);
8719 break;
8720 case 1:
8721 case 3:
8722 /* SDIV, UDIV */
8723 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
8724 goto illegal_op;
8726 if (((insn >> 5) & 7) || (rd != 15)) {
8727 goto illegal_op;
8729 tmp = load_reg(s, rm);
8730 tmp2 = load_reg(s, rs);
8731 if (insn & (1 << 21)) {
8732 gen_helper_udiv(tmp, tmp, tmp2);
8733 } else {
8734 gen_helper_sdiv(tmp, tmp, tmp2);
8736 tcg_temp_free_i32(tmp2);
8737 store_reg(s, rn, tmp);
8738 break;
8739 default:
8740 goto illegal_op;
8742 break;
8743 case 3:
8744 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8745 switch (op1) {
8746 case 0: /* Unsigned sum of absolute differences. */
8747 ARCH(6);
8748 tmp = load_reg(s, rm);
8749 tmp2 = load_reg(s, rs);
8750 gen_helper_usad8(tmp, tmp, tmp2);
8751 tcg_temp_free_i32(tmp2);
8752 if (rd != 15) {
8753 tmp2 = load_reg(s, rd);
8754 tcg_gen_add_i32(tmp, tmp, tmp2);
8755 tcg_temp_free_i32(tmp2);
8757 store_reg(s, rn, tmp);
8758 break;
8759 case 0x20: case 0x24: case 0x28: case 0x2c:
8760 /* Bitfield insert/clear. */
8761 ARCH(6T2);
8762 shift = (insn >> 7) & 0x1f;
8763 i = (insn >> 16) & 0x1f;
8764 if (i < shift) {
8765 /* UNPREDICTABLE; we choose to UNDEF */
8766 goto illegal_op;
8768 i = i + 1 - shift;
8769 if (rm == 15) {
8770 tmp = tcg_temp_new_i32();
8771 tcg_gen_movi_i32(tmp, 0);
8772 } else {
8773 tmp = load_reg(s, rm);
8775 if (i != 32) {
8776 tmp2 = load_reg(s, rd);
8777 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
8778 tcg_temp_free_i32(tmp2);
8780 store_reg(s, rd, tmp);
8781 break;
8782 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8783 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
8784 ARCH(6T2);
8785 tmp = load_reg(s, rm);
8786 shift = (insn >> 7) & 0x1f;
8787 i = ((insn >> 16) & 0x1f) + 1;
8788 if (shift + i > 32)
8789 goto illegal_op;
8790 if (i < 32) {
8791 if (op1 & 0x20) {
8792 gen_ubfx(tmp, shift, (1u << i) - 1);
8793 } else {
8794 gen_sbfx(tmp, shift, i);
8797 store_reg(s, rd, tmp);
8798 break;
8799 default:
8800 goto illegal_op;
8802 break;
8804 break;
8806 do_ldst:
8807 /* Check for undefined extension instructions
8808 * per the ARM Bible IE:
8809 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8811 sh = (0xf << 20) | (0xf << 4);
8812 if (op1 == 0x7 && ((insn & sh) == sh))
8814 goto illegal_op;
8816 /* load/store byte/word */
8817 rn = (insn >> 16) & 0xf;
8818 rd = (insn >> 12) & 0xf;
8819 tmp2 = load_reg(s, rn);
8820 if ((insn & 0x01200000) == 0x00200000) {
8821 /* ldrt/strt */
8822 i = get_a32_user_mem_index(s);
8823 } else {
8824 i = get_mem_index(s);
8826 if (insn & (1 << 24))
8827 gen_add_data_offset(s, insn, tmp2);
8828 if (insn & (1 << 20)) {
8829 /* load */
8830 tmp = tcg_temp_new_i32();
8831 if (insn & (1 << 22)) {
8832 gen_aa32_ld8u(tmp, tmp2, i);
8833 } else {
8834 gen_aa32_ld32u(tmp, tmp2, i);
8836 } else {
8837 /* store */
8838 tmp = load_reg(s, rd);
8839 if (insn & (1 << 22)) {
8840 gen_aa32_st8(tmp, tmp2, i);
8841 } else {
8842 gen_aa32_st32(tmp, tmp2, i);
8844 tcg_temp_free_i32(tmp);
8846 if (!(insn & (1 << 24))) {
8847 gen_add_data_offset(s, insn, tmp2);
8848 store_reg(s, rn, tmp2);
8849 } else if (insn & (1 << 21)) {
8850 store_reg(s, rn, tmp2);
8851 } else {
8852 tcg_temp_free_i32(tmp2);
8854 if (insn & (1 << 20)) {
8855 /* Complete the load. */
8856 store_reg_from_load(s, rd, tmp);
8858 break;
8859 case 0x08:
8860 case 0x09:
8862 int j, n, loaded_base;
8863 bool exc_return = false;
8864 bool is_load = extract32(insn, 20, 1);
8865 bool user = false;
8866 TCGv_i32 loaded_var;
8867 /* load/store multiple words */
8868 /* XXX: store correct base if write back */
8869 if (insn & (1 << 22)) {
8870 /* LDM (user), LDM (exception return) and STM (user) */
8871 if (IS_USER(s))
8872 goto illegal_op; /* only usable in supervisor mode */
8874 if (is_load && extract32(insn, 15, 1)) {
8875 exc_return = true;
8876 } else {
8877 user = true;
8880 rn = (insn >> 16) & 0xf;
8881 addr = load_reg(s, rn);
8883 /* compute total size */
8884 loaded_base = 0;
8885 TCGV_UNUSED_I32(loaded_var);
8886 n = 0;
8887 for(i=0;i<16;i++) {
8888 if (insn & (1 << i))
8889 n++;
8891 /* XXX: test invalid n == 0 case ? */
8892 if (insn & (1 << 23)) {
8893 if (insn & (1 << 24)) {
8894 /* pre increment */
8895 tcg_gen_addi_i32(addr, addr, 4);
8896 } else {
8897 /* post increment */
8899 } else {
8900 if (insn & (1 << 24)) {
8901 /* pre decrement */
8902 tcg_gen_addi_i32(addr, addr, -(n * 4));
8903 } else {
8904 /* post decrement */
8905 if (n != 1)
8906 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
8909 j = 0;
8910 for(i=0;i<16;i++) {
8911 if (insn & (1 << i)) {
8912 if (is_load) {
8913 /* load */
8914 tmp = tcg_temp_new_i32();
8915 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8916 if (user) {
8917 tmp2 = tcg_const_i32(i);
8918 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
8919 tcg_temp_free_i32(tmp2);
8920 tcg_temp_free_i32(tmp);
8921 } else if (i == rn) {
8922 loaded_var = tmp;
8923 loaded_base = 1;
8924 } else {
8925 store_reg_from_load(s, i, tmp);
8927 } else {
8928 /* store */
8929 if (i == 15) {
8930 /* special case: r15 = PC + 8 */
8931 val = (long)s->pc + 4;
8932 tmp = tcg_temp_new_i32();
8933 tcg_gen_movi_i32(tmp, val);
8934 } else if (user) {
8935 tmp = tcg_temp_new_i32();
8936 tmp2 = tcg_const_i32(i);
8937 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
8938 tcg_temp_free_i32(tmp2);
8939 } else {
8940 tmp = load_reg(s, i);
8942 gen_aa32_st32(tmp, addr, get_mem_index(s));
8943 tcg_temp_free_i32(tmp);
8945 j++;
8946 /* no need to add after the last transfer */
8947 if (j != n)
8948 tcg_gen_addi_i32(addr, addr, 4);
8951 if (insn & (1 << 21)) {
8952 /* write back */
8953 if (insn & (1 << 23)) {
8954 if (insn & (1 << 24)) {
8955 /* pre increment */
8956 } else {
8957 /* post increment */
8958 tcg_gen_addi_i32(addr, addr, 4);
8960 } else {
8961 if (insn & (1 << 24)) {
8962 /* pre decrement */
8963 if (n != 1)
8964 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
8965 } else {
8966 /* post decrement */
8967 tcg_gen_addi_i32(addr, addr, -(n * 4));
8970 store_reg(s, rn, addr);
8971 } else {
8972 tcg_temp_free_i32(addr);
8974 if (loaded_base) {
8975 store_reg(s, rn, loaded_var);
8977 if (exc_return) {
8978 /* Restore CPSR from SPSR. */
8979 tmp = load_cpu_field(spsr);
8980 gen_set_cpsr(tmp, CPSR_ERET_MASK);
8981 tcg_temp_free_i32(tmp);
8982 s->is_jmp = DISAS_UPDATE;
8985 break;
8986 case 0xa:
8987 case 0xb:
8989 int32_t offset;
8991 /* branch (and link) */
8992 val = (int32_t)s->pc;
8993 if (insn & (1 << 24)) {
8994 tmp = tcg_temp_new_i32();
8995 tcg_gen_movi_i32(tmp, val);
8996 store_reg(s, 14, tmp);
8998 offset = sextract32(insn << 2, 0, 26);
8999 val += offset + 4;
9000 gen_jmp(s, val);
9002 break;
9003 case 0xc:
9004 case 0xd:
9005 case 0xe:
9006 if (((insn >> 8) & 0xe) == 10) {
9007 /* VFP. */
9008 if (disas_vfp_insn(s, insn)) {
9009 goto illegal_op;
9011 } else if (disas_coproc_insn(s, insn)) {
9012 /* Coprocessor. */
9013 goto illegal_op;
9015 break;
9016 case 0xf:
9017 /* swi */
9018 gen_set_pc_im(s, s->pc);
9019 s->svc_imm = extract32(insn, 0, 24);
9020 s->is_jmp = DISAS_SWI;
9021 break;
9022 default:
9023 illegal_op:
9024 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
9025 break;
9030 /* Return true if this is a Thumb-2 logical op. */
9031 static int
9032 thumb2_logic_op(int op)
9034 return (op < 8);
9037 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9038 then set condition code flags based on the result of the operation.
9039 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9040 to the high bit of T1.
9041 Returns zero if the opcode is valid. */
9043 static int
9044 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9045 TCGv_i32 t0, TCGv_i32 t1)
9047 int logic_cc;
9049 logic_cc = 0;
9050 switch (op) {
9051 case 0: /* and */
9052 tcg_gen_and_i32(t0, t0, t1);
9053 logic_cc = conds;
9054 break;
9055 case 1: /* bic */
9056 tcg_gen_andc_i32(t0, t0, t1);
9057 logic_cc = conds;
9058 break;
9059 case 2: /* orr */
9060 tcg_gen_or_i32(t0, t0, t1);
9061 logic_cc = conds;
9062 break;
9063 case 3: /* orn */
9064 tcg_gen_orc_i32(t0, t0, t1);
9065 logic_cc = conds;
9066 break;
9067 case 4: /* eor */
9068 tcg_gen_xor_i32(t0, t0, t1);
9069 logic_cc = conds;
9070 break;
9071 case 8: /* add */
9072 if (conds)
9073 gen_add_CC(t0, t0, t1);
9074 else
9075 tcg_gen_add_i32(t0, t0, t1);
9076 break;
9077 case 10: /* adc */
9078 if (conds)
9079 gen_adc_CC(t0, t0, t1);
9080 else
9081 gen_adc(t0, t1);
9082 break;
9083 case 11: /* sbc */
9084 if (conds) {
9085 gen_sbc_CC(t0, t0, t1);
9086 } else {
9087 gen_sub_carry(t0, t0, t1);
9089 break;
9090 case 13: /* sub */
9091 if (conds)
9092 gen_sub_CC(t0, t0, t1);
9093 else
9094 tcg_gen_sub_i32(t0, t0, t1);
9095 break;
9096 case 14: /* rsb */
9097 if (conds)
9098 gen_sub_CC(t0, t1, t0);
9099 else
9100 tcg_gen_sub_i32(t0, t1, t0);
9101 break;
9102 default: /* 5, 6, 7, 9, 12, 15. */
9103 return 1;
9105 if (logic_cc) {
9106 gen_logic_CC(t0);
9107 if (shifter_out)
9108 gen_set_CF_bit31(t1);
9110 return 0;
9113 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9114 is not legal. */
9115 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9117 uint32_t insn, imm, shift, offset;
9118 uint32_t rd, rn, rm, rs;
9119 TCGv_i32 tmp;
9120 TCGv_i32 tmp2;
9121 TCGv_i32 tmp3;
9122 TCGv_i32 addr;
9123 TCGv_i64 tmp64;
9124 int op;
9125 int shiftop;
9126 int conds;
9127 int logic_cc;
9129 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9130 || arm_dc_feature(s, ARM_FEATURE_M))) {
9131 /* Thumb-1 cores may need to treat bl and blx as a pair of
9132 16-bit instructions to get correct prefetch abort behavior. */
9133 insn = insn_hw1;
9134 if ((insn & (1 << 12)) == 0) {
9135 ARCH(5);
9136 /* Second half of blx. */
9137 offset = ((insn & 0x7ff) << 1);
9138 tmp = load_reg(s, 14);
9139 tcg_gen_addi_i32(tmp, tmp, offset);
9140 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9142 tmp2 = tcg_temp_new_i32();
9143 tcg_gen_movi_i32(tmp2, s->pc | 1);
9144 store_reg(s, 14, tmp2);
9145 gen_bx(s, tmp);
9146 return 0;
9148 if (insn & (1 << 11)) {
9149 /* Second half of bl. */
9150 offset = ((insn & 0x7ff) << 1) | 1;
9151 tmp = load_reg(s, 14);
9152 tcg_gen_addi_i32(tmp, tmp, offset);
9154 tmp2 = tcg_temp_new_i32();
9155 tcg_gen_movi_i32(tmp2, s->pc | 1);
9156 store_reg(s, 14, tmp2);
9157 gen_bx(s, tmp);
9158 return 0;
9160 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9161 /* Instruction spans a page boundary. Implement it as two
9162 16-bit instructions in case the second half causes an
9163 prefetch abort. */
9164 offset = ((int32_t)insn << 21) >> 9;
9165 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9166 return 0;
9168 /* Fall through to 32-bit decode. */
9171 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9172 s->pc += 2;
9173 insn |= (uint32_t)insn_hw1 << 16;
9175 if ((insn & 0xf800e800) != 0xf000e800) {
9176 ARCH(6T2);
9179 rn = (insn >> 16) & 0xf;
9180 rs = (insn >> 12) & 0xf;
9181 rd = (insn >> 8) & 0xf;
9182 rm = insn & 0xf;
9183 switch ((insn >> 25) & 0xf) {
9184 case 0: case 1: case 2: case 3:
9185 /* 16-bit instructions. Should never happen. */
9186 abort();
9187 case 4:
9188 if (insn & (1 << 22)) {
9189 /* Other load/store, table branch. */
9190 if (insn & 0x01200000) {
9191 /* Load/store doubleword. */
9192 if (rn == 15) {
9193 addr = tcg_temp_new_i32();
9194 tcg_gen_movi_i32(addr, s->pc & ~3);
9195 } else {
9196 addr = load_reg(s, rn);
9198 offset = (insn & 0xff) * 4;
9199 if ((insn & (1 << 23)) == 0)
9200 offset = -offset;
9201 if (insn & (1 << 24)) {
9202 tcg_gen_addi_i32(addr, addr, offset);
9203 offset = 0;
9205 if (insn & (1 << 20)) {
9206 /* ldrd */
9207 tmp = tcg_temp_new_i32();
9208 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9209 store_reg(s, rs, tmp);
9210 tcg_gen_addi_i32(addr, addr, 4);
9211 tmp = tcg_temp_new_i32();
9212 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9213 store_reg(s, rd, tmp);
9214 } else {
9215 /* strd */
9216 tmp = load_reg(s, rs);
9217 gen_aa32_st32(tmp, addr, get_mem_index(s));
9218 tcg_temp_free_i32(tmp);
9219 tcg_gen_addi_i32(addr, addr, 4);
9220 tmp = load_reg(s, rd);
9221 gen_aa32_st32(tmp, addr, get_mem_index(s));
9222 tcg_temp_free_i32(tmp);
9224 if (insn & (1 << 21)) {
9225 /* Base writeback. */
9226 if (rn == 15)
9227 goto illegal_op;
9228 tcg_gen_addi_i32(addr, addr, offset - 4);
9229 store_reg(s, rn, addr);
9230 } else {
9231 tcg_temp_free_i32(addr);
9233 } else if ((insn & (1 << 23)) == 0) {
9234 /* Load/store exclusive word. */
9235 addr = tcg_temp_local_new_i32();
9236 load_reg_var(s, addr, rn);
9237 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
9238 if (insn & (1 << 20)) {
9239 gen_load_exclusive(s, rs, 15, addr, 2);
9240 } else {
9241 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9243 tcg_temp_free_i32(addr);
9244 } else if ((insn & (7 << 5)) == 0) {
9245 /* Table Branch. */
9246 if (rn == 15) {
9247 addr = tcg_temp_new_i32();
9248 tcg_gen_movi_i32(addr, s->pc);
9249 } else {
9250 addr = load_reg(s, rn);
9252 tmp = load_reg(s, rm);
9253 tcg_gen_add_i32(addr, addr, tmp);
9254 if (insn & (1 << 4)) {
9255 /* tbh */
9256 tcg_gen_add_i32(addr, addr, tmp);
9257 tcg_temp_free_i32(tmp);
9258 tmp = tcg_temp_new_i32();
9259 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9260 } else { /* tbb */
9261 tcg_temp_free_i32(tmp);
9262 tmp = tcg_temp_new_i32();
9263 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9265 tcg_temp_free_i32(addr);
9266 tcg_gen_shli_i32(tmp, tmp, 1);
9267 tcg_gen_addi_i32(tmp, tmp, s->pc);
9268 store_reg(s, 15, tmp);
9269 } else {
9270 int op2 = (insn >> 6) & 0x3;
9271 op = (insn >> 4) & 0x3;
9272 switch (op2) {
9273 case 0:
9274 goto illegal_op;
9275 case 1:
9276 /* Load/store exclusive byte/halfword/doubleword */
9277 if (op == 2) {
9278 goto illegal_op;
9280 ARCH(7);
9281 break;
9282 case 2:
9283 /* Load-acquire/store-release */
9284 if (op == 3) {
9285 goto illegal_op;
9287 /* Fall through */
9288 case 3:
9289 /* Load-acquire/store-release exclusive */
9290 ARCH(8);
9291 break;
9293 addr = tcg_temp_local_new_i32();
9294 load_reg_var(s, addr, rn);
9295 if (!(op2 & 1)) {
9296 if (insn & (1 << 20)) {
9297 tmp = tcg_temp_new_i32();
9298 switch (op) {
9299 case 0: /* ldab */
9300 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9301 break;
9302 case 1: /* ldah */
9303 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9304 break;
9305 case 2: /* lda */
9306 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9307 break;
9308 default:
9309 abort();
9311 store_reg(s, rs, tmp);
9312 } else {
9313 tmp = load_reg(s, rs);
9314 switch (op) {
9315 case 0: /* stlb */
9316 gen_aa32_st8(tmp, addr, get_mem_index(s));
9317 break;
9318 case 1: /* stlh */
9319 gen_aa32_st16(tmp, addr, get_mem_index(s));
9320 break;
9321 case 2: /* stl */
9322 gen_aa32_st32(tmp, addr, get_mem_index(s));
9323 break;
9324 default:
9325 abort();
9327 tcg_temp_free_i32(tmp);
9329 } else if (insn & (1 << 20)) {
9330 gen_load_exclusive(s, rs, rd, addr, op);
9331 } else {
9332 gen_store_exclusive(s, rm, rs, rd, addr, op);
9334 tcg_temp_free_i32(addr);
9336 } else {
9337 /* Load/store multiple, RFE, SRS. */
9338 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
9339 /* RFE, SRS: not available in user mode or on M profile */
9340 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9341 goto illegal_op;
9343 if (insn & (1 << 20)) {
9344 /* rfe */
9345 addr = load_reg(s, rn);
9346 if ((insn & (1 << 24)) == 0)
9347 tcg_gen_addi_i32(addr, addr, -8);
9348 /* Load PC into tmp and CPSR into tmp2. */
9349 tmp = tcg_temp_new_i32();
9350 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9351 tcg_gen_addi_i32(addr, addr, 4);
9352 tmp2 = tcg_temp_new_i32();
9353 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9354 if (insn & (1 << 21)) {
9355 /* Base writeback. */
9356 if (insn & (1 << 24)) {
9357 tcg_gen_addi_i32(addr, addr, 4);
9358 } else {
9359 tcg_gen_addi_i32(addr, addr, -4);
9361 store_reg(s, rn, addr);
9362 } else {
9363 tcg_temp_free_i32(addr);
9365 gen_rfe(s, tmp, tmp2);
9366 } else {
9367 /* srs */
9368 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9369 insn & (1 << 21));
9371 } else {
9372 int i, loaded_base = 0;
9373 TCGv_i32 loaded_var;
9374 /* Load/store multiple. */
9375 addr = load_reg(s, rn);
9376 offset = 0;
9377 for (i = 0; i < 16; i++) {
9378 if (insn & (1 << i))
9379 offset += 4;
9381 if (insn & (1 << 24)) {
9382 tcg_gen_addi_i32(addr, addr, -offset);
9385 TCGV_UNUSED_I32(loaded_var);
9386 for (i = 0; i < 16; i++) {
9387 if ((insn & (1 << i)) == 0)
9388 continue;
9389 if (insn & (1 << 20)) {
9390 /* Load. */
9391 tmp = tcg_temp_new_i32();
9392 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9393 if (i == 15) {
9394 gen_bx(s, tmp);
9395 } else if (i == rn) {
9396 loaded_var = tmp;
9397 loaded_base = 1;
9398 } else {
9399 store_reg(s, i, tmp);
9401 } else {
9402 /* Store. */
9403 tmp = load_reg(s, i);
9404 gen_aa32_st32(tmp, addr, get_mem_index(s));
9405 tcg_temp_free_i32(tmp);
9407 tcg_gen_addi_i32(addr, addr, 4);
9409 if (loaded_base) {
9410 store_reg(s, rn, loaded_var);
9412 if (insn & (1 << 21)) {
9413 /* Base register writeback. */
9414 if (insn & (1 << 24)) {
9415 tcg_gen_addi_i32(addr, addr, -offset);
9417 /* Fault if writeback register is in register list. */
9418 if (insn & (1 << rn))
9419 goto illegal_op;
9420 store_reg(s, rn, addr);
9421 } else {
9422 tcg_temp_free_i32(addr);
9426 break;
9427 case 5:
9429 op = (insn >> 21) & 0xf;
9430 if (op == 6) {
9431 /* Halfword pack. */
9432 tmp = load_reg(s, rn);
9433 tmp2 = load_reg(s, rm);
9434 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9435 if (insn & (1 << 5)) {
9436 /* pkhtb */
9437 if (shift == 0)
9438 shift = 31;
9439 tcg_gen_sari_i32(tmp2, tmp2, shift);
9440 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9441 tcg_gen_ext16u_i32(tmp2, tmp2);
9442 } else {
9443 /* pkhbt */
9444 if (shift)
9445 tcg_gen_shli_i32(tmp2, tmp2, shift);
9446 tcg_gen_ext16u_i32(tmp, tmp);
9447 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9449 tcg_gen_or_i32(tmp, tmp, tmp2);
9450 tcg_temp_free_i32(tmp2);
9451 store_reg(s, rd, tmp);
9452 } else {
9453 /* Data processing register constant shift. */
9454 if (rn == 15) {
9455 tmp = tcg_temp_new_i32();
9456 tcg_gen_movi_i32(tmp, 0);
9457 } else {
9458 tmp = load_reg(s, rn);
9460 tmp2 = load_reg(s, rm);
9462 shiftop = (insn >> 4) & 3;
9463 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9464 conds = (insn & (1 << 20)) != 0;
9465 logic_cc = (conds && thumb2_logic_op(op));
9466 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9467 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9468 goto illegal_op;
9469 tcg_temp_free_i32(tmp2);
9470 if (rd != 15) {
9471 store_reg(s, rd, tmp);
9472 } else {
9473 tcg_temp_free_i32(tmp);
9476 break;
9477 case 13: /* Misc data processing. */
9478 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9479 if (op < 4 && (insn & 0xf000) != 0xf000)
9480 goto illegal_op;
9481 switch (op) {
9482 case 0: /* Register controlled shift. */
9483 tmp = load_reg(s, rn);
9484 tmp2 = load_reg(s, rm);
9485 if ((insn & 0x70) != 0)
9486 goto illegal_op;
9487 op = (insn >> 21) & 3;
9488 logic_cc = (insn & (1 << 20)) != 0;
9489 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9490 if (logic_cc)
9491 gen_logic_CC(tmp);
9492 store_reg_bx(s, rd, tmp);
9493 break;
9494 case 1: /* Sign/zero extend. */
9495 tmp = load_reg(s, rm);
9496 shift = (insn >> 4) & 3;
9497 /* ??? In many cases it's not necessary to do a
9498 rotate, a shift is sufficient. */
9499 if (shift != 0)
9500 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9501 op = (insn >> 20) & 7;
9502 switch (op) {
9503 case 0: gen_sxth(tmp); break;
9504 case 1: gen_uxth(tmp); break;
9505 case 2: gen_sxtb16(tmp); break;
9506 case 3: gen_uxtb16(tmp); break;
9507 case 4: gen_sxtb(tmp); break;
9508 case 5: gen_uxtb(tmp); break;
9509 default: goto illegal_op;
9511 if (rn != 15) {
9512 tmp2 = load_reg(s, rn);
9513 if ((op >> 1) == 1) {
9514 gen_add16(tmp, tmp2);
9515 } else {
9516 tcg_gen_add_i32(tmp, tmp, tmp2);
9517 tcg_temp_free_i32(tmp2);
9520 store_reg(s, rd, tmp);
9521 break;
9522 case 2: /* SIMD add/subtract. */
9523 op = (insn >> 20) & 7;
9524 shift = (insn >> 4) & 7;
9525 if ((op & 3) == 3 || (shift & 3) == 3)
9526 goto illegal_op;
9527 tmp = load_reg(s, rn);
9528 tmp2 = load_reg(s, rm);
9529 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
9530 tcg_temp_free_i32(tmp2);
9531 store_reg(s, rd, tmp);
9532 break;
9533 case 3: /* Other data processing. */
9534 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9535 if (op < 4) {
9536 /* Saturating add/subtract. */
9537 tmp = load_reg(s, rn);
9538 tmp2 = load_reg(s, rm);
9539 if (op & 1)
9540 gen_helper_double_saturate(tmp, cpu_env, tmp);
9541 if (op & 2)
9542 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9543 else
9544 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
9545 tcg_temp_free_i32(tmp2);
9546 } else {
9547 tmp = load_reg(s, rn);
9548 switch (op) {
9549 case 0x0a: /* rbit */
9550 gen_helper_rbit(tmp, tmp);
9551 break;
9552 case 0x08: /* rev */
9553 tcg_gen_bswap32_i32(tmp, tmp);
9554 break;
9555 case 0x09: /* rev16 */
9556 gen_rev16(tmp);
9557 break;
9558 case 0x0b: /* revsh */
9559 gen_revsh(tmp);
9560 break;
9561 case 0x10: /* sel */
9562 tmp2 = load_reg(s, rm);
9563 tmp3 = tcg_temp_new_i32();
9564 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
9565 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
9566 tcg_temp_free_i32(tmp3);
9567 tcg_temp_free_i32(tmp2);
9568 break;
9569 case 0x18: /* clz */
9570 gen_helper_clz(tmp, tmp);
9571 break;
9572 case 0x20:
9573 case 0x21:
9574 case 0x22:
9575 case 0x28:
9576 case 0x29:
9577 case 0x2a:
9579 /* crc32/crc32c */
9580 uint32_t sz = op & 0x3;
9581 uint32_t c = op & 0x8;
9583 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
9584 goto illegal_op;
9587 tmp2 = load_reg(s, rm);
9588 if (sz == 0) {
9589 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9590 } else if (sz == 1) {
9591 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9593 tmp3 = tcg_const_i32(1 << sz);
9594 if (c) {
9595 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9596 } else {
9597 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9599 tcg_temp_free_i32(tmp2);
9600 tcg_temp_free_i32(tmp3);
9601 break;
9603 default:
9604 goto illegal_op;
9607 store_reg(s, rd, tmp);
9608 break;
9609 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
9610 op = (insn >> 4) & 0xf;
9611 tmp = load_reg(s, rn);
9612 tmp2 = load_reg(s, rm);
9613 switch ((insn >> 20) & 7) {
9614 case 0: /* 32 x 32 -> 32 */
9615 tcg_gen_mul_i32(tmp, tmp, tmp2);
9616 tcg_temp_free_i32(tmp2);
9617 if (rs != 15) {
9618 tmp2 = load_reg(s, rs);
9619 if (op)
9620 tcg_gen_sub_i32(tmp, tmp2, tmp);
9621 else
9622 tcg_gen_add_i32(tmp, tmp, tmp2);
9623 tcg_temp_free_i32(tmp2);
9625 break;
9626 case 1: /* 16 x 16 -> 32 */
9627 gen_mulxy(tmp, tmp2, op & 2, op & 1);
9628 tcg_temp_free_i32(tmp2);
9629 if (rs != 15) {
9630 tmp2 = load_reg(s, rs);
9631 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9632 tcg_temp_free_i32(tmp2);
9634 break;
9635 case 2: /* Dual multiply add. */
9636 case 4: /* Dual multiply subtract. */
9637 if (op)
9638 gen_swap_half(tmp2);
9639 gen_smul_dual(tmp, tmp2);
9640 if (insn & (1 << 22)) {
9641 /* This subtraction cannot overflow. */
9642 tcg_gen_sub_i32(tmp, tmp, tmp2);
9643 } else {
9644 /* This addition cannot overflow 32 bits;
9645 * however it may overflow considered as a signed
9646 * operation, in which case we must set the Q flag.
9648 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9650 tcg_temp_free_i32(tmp2);
9651 if (rs != 15)
9653 tmp2 = load_reg(s, rs);
9654 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9655 tcg_temp_free_i32(tmp2);
9657 break;
9658 case 3: /* 32 * 16 -> 32msb */
9659 if (op)
9660 tcg_gen_sari_i32(tmp2, tmp2, 16);
9661 else
9662 gen_sxth(tmp2);
9663 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9664 tcg_gen_shri_i64(tmp64, tmp64, 16);
9665 tmp = tcg_temp_new_i32();
9666 tcg_gen_trunc_i64_i32(tmp, tmp64);
9667 tcg_temp_free_i64(tmp64);
9668 if (rs != 15)
9670 tmp2 = load_reg(s, rs);
9671 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9672 tcg_temp_free_i32(tmp2);
9674 break;
9675 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9676 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9677 if (rs != 15) {
9678 tmp = load_reg(s, rs);
9679 if (insn & (1 << 20)) {
9680 tmp64 = gen_addq_msw(tmp64, tmp);
9681 } else {
9682 tmp64 = gen_subq_msw(tmp64, tmp);
9685 if (insn & (1 << 4)) {
9686 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9688 tcg_gen_shri_i64(tmp64, tmp64, 32);
9689 tmp = tcg_temp_new_i32();
9690 tcg_gen_trunc_i64_i32(tmp, tmp64);
9691 tcg_temp_free_i64(tmp64);
9692 break;
9693 case 7: /* Unsigned sum of absolute differences. */
9694 gen_helper_usad8(tmp, tmp, tmp2);
9695 tcg_temp_free_i32(tmp2);
9696 if (rs != 15) {
9697 tmp2 = load_reg(s, rs);
9698 tcg_gen_add_i32(tmp, tmp, tmp2);
9699 tcg_temp_free_i32(tmp2);
9701 break;
9703 store_reg(s, rd, tmp);
9704 break;
9705 case 6: case 7: /* 64-bit multiply, Divide. */
9706 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
9707 tmp = load_reg(s, rn);
9708 tmp2 = load_reg(s, rm);
9709 if ((op & 0x50) == 0x10) {
9710 /* sdiv, udiv */
9711 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9712 goto illegal_op;
9714 if (op & 0x20)
9715 gen_helper_udiv(tmp, tmp, tmp2);
9716 else
9717 gen_helper_sdiv(tmp, tmp, tmp2);
9718 tcg_temp_free_i32(tmp2);
9719 store_reg(s, rd, tmp);
9720 } else if ((op & 0xe) == 0xc) {
9721 /* Dual multiply accumulate long. */
9722 if (op & 1)
9723 gen_swap_half(tmp2);
9724 gen_smul_dual(tmp, tmp2);
9725 if (op & 0x10) {
9726 tcg_gen_sub_i32(tmp, tmp, tmp2);
9727 } else {
9728 tcg_gen_add_i32(tmp, tmp, tmp2);
9730 tcg_temp_free_i32(tmp2);
9731 /* BUGFIX */
9732 tmp64 = tcg_temp_new_i64();
9733 tcg_gen_ext_i32_i64(tmp64, tmp);
9734 tcg_temp_free_i32(tmp);
9735 gen_addq(s, tmp64, rs, rd);
9736 gen_storeq_reg(s, rs, rd, tmp64);
9737 tcg_temp_free_i64(tmp64);
9738 } else {
9739 if (op & 0x20) {
9740 /* Unsigned 64-bit multiply */
9741 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9742 } else {
9743 if (op & 8) {
9744 /* smlalxy */
9745 gen_mulxy(tmp, tmp2, op & 2, op & 1);
9746 tcg_temp_free_i32(tmp2);
9747 tmp64 = tcg_temp_new_i64();
9748 tcg_gen_ext_i32_i64(tmp64, tmp);
9749 tcg_temp_free_i32(tmp);
9750 } else {
9751 /* Signed 64-bit multiply */
9752 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9755 if (op & 4) {
9756 /* umaal */
9757 gen_addq_lo(s, tmp64, rs);
9758 gen_addq_lo(s, tmp64, rd);
9759 } else if (op & 0x40) {
9760 /* 64-bit accumulate. */
9761 gen_addq(s, tmp64, rs, rd);
9763 gen_storeq_reg(s, rs, rd, tmp64);
9764 tcg_temp_free_i64(tmp64);
9766 break;
9768 break;
9769 case 6: case 7: case 14: case 15:
9770 /* Coprocessor. */
9771 if (((insn >> 24) & 3) == 3) {
9772 /* Translate into the equivalent ARM encoding. */
9773 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9774 if (disas_neon_data_insn(s, insn)) {
9775 goto illegal_op;
9777 } else if (((insn >> 8) & 0xe) == 10) {
9778 if (disas_vfp_insn(s, insn)) {
9779 goto illegal_op;
9781 } else {
9782 if (insn & (1 << 28))
9783 goto illegal_op;
9784 if (disas_coproc_insn(s, insn)) {
9785 goto illegal_op;
9788 break;
9789 case 8: case 9: case 10: case 11:
9790 if (insn & (1 << 15)) {
9791 /* Branches, misc control. */
9792 if (insn & 0x5000) {
9793 /* Unconditional branch. */
9794 /* signextend(hw1[10:0]) -> offset[:12]. */
9795 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
9796 /* hw1[10:0] -> offset[11:1]. */
9797 offset |= (insn & 0x7ff) << 1;
9798 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9799 offset[24:22] already have the same value because of the
9800 sign extension above. */
9801 offset ^= ((~insn) & (1 << 13)) << 10;
9802 offset ^= ((~insn) & (1 << 11)) << 11;
9804 if (insn & (1 << 14)) {
9805 /* Branch and link. */
9806 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
9809 offset += s->pc;
9810 if (insn & (1 << 12)) {
9811 /* b/bl */
9812 gen_jmp(s, offset);
9813 } else {
9814 /* blx */
9815 offset &= ~(uint32_t)2;
9816 /* thumb2 bx, no need to check */
9817 gen_bx_im(s, offset);
9819 } else if (((insn >> 23) & 7) == 7) {
9820 /* Misc control */
9821 if (insn & (1 << 13))
9822 goto illegal_op;
9824 if (insn & (1 << 26)) {
9825 if (!(insn & (1 << 20))) {
9826 /* Hypervisor call (v7) */
9827 int imm16 = extract32(insn, 16, 4) << 12
9828 | extract32(insn, 0, 12);
9829 ARCH(7);
9830 if (IS_USER(s)) {
9831 goto illegal_op;
9833 gen_hvc(s, imm16);
9834 } else {
9835 /* Secure monitor call (v6+) */
9836 ARCH(6K);
9837 if (IS_USER(s)) {
9838 goto illegal_op;
9840 gen_smc(s);
9842 } else {
9843 op = (insn >> 20) & 7;
9844 switch (op) {
9845 case 0: /* msr cpsr. */
9846 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9847 tmp = load_reg(s, rn);
9848 addr = tcg_const_i32(insn & 0xff);
9849 gen_helper_v7m_msr(cpu_env, addr, tmp);
9850 tcg_temp_free_i32(addr);
9851 tcg_temp_free_i32(tmp);
9852 gen_lookup_tb(s);
9853 break;
9855 /* fall through */
9856 case 1: /* msr spsr. */
9857 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9858 goto illegal_op;
9860 tmp = load_reg(s, rn);
9861 if (gen_set_psr(s,
9862 msr_mask(s, (insn >> 8) & 0xf, op == 1),
9863 op == 1, tmp))
9864 goto illegal_op;
9865 break;
9866 case 2: /* cps, nop-hint. */
9867 if (((insn >> 8) & 7) == 0) {
9868 gen_nop_hint(s, insn & 0xff);
9870 /* Implemented as NOP in user mode. */
9871 if (IS_USER(s))
9872 break;
9873 offset = 0;
9874 imm = 0;
9875 if (insn & (1 << 10)) {
9876 if (insn & (1 << 7))
9877 offset |= CPSR_A;
9878 if (insn & (1 << 6))
9879 offset |= CPSR_I;
9880 if (insn & (1 << 5))
9881 offset |= CPSR_F;
9882 if (insn & (1 << 9))
9883 imm = CPSR_A | CPSR_I | CPSR_F;
9885 if (insn & (1 << 8)) {
9886 offset |= 0x1f;
9887 imm |= (insn & 0x1f);
9889 if (offset) {
9890 gen_set_psr_im(s, offset, 0, imm);
9892 break;
9893 case 3: /* Special control operations. */
9894 ARCH(7);
9895 op = (insn >> 4) & 0xf;
9896 switch (op) {
9897 case 2: /* clrex */
9898 gen_clrex(s);
9899 break;
9900 case 4: /* dsb */
9901 case 5: /* dmb */
9902 case 6: /* isb */
9903 /* These execute as NOPs. */
9904 break;
9905 default:
9906 goto illegal_op;
9908 break;
9909 case 4: /* bxj */
9910 /* Trivial implementation equivalent to bx. */
9911 tmp = load_reg(s, rn);
9912 gen_bx(s, tmp);
9913 break;
9914 case 5: /* Exception return. */
9915 if (IS_USER(s)) {
9916 goto illegal_op;
9918 if (rn != 14 || rd != 15) {
9919 goto illegal_op;
9921 tmp = load_reg(s, rn);
9922 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
9923 gen_exception_return(s, tmp);
9924 break;
9925 case 6: /* mrs cpsr. */
9926 tmp = tcg_temp_new_i32();
9927 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9928 addr = tcg_const_i32(insn & 0xff);
9929 gen_helper_v7m_mrs(tmp, cpu_env, addr);
9930 tcg_temp_free_i32(addr);
9931 } else {
9932 gen_helper_cpsr_read(tmp, cpu_env);
9934 store_reg(s, rd, tmp);
9935 break;
9936 case 7: /* mrs spsr. */
9937 /* Not accessible in user mode. */
9938 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9939 goto illegal_op;
9941 tmp = load_cpu_field(spsr);
9942 store_reg(s, rd, tmp);
9943 break;
9946 } else {
9947 /* Conditional branch. */
9948 op = (insn >> 22) & 0xf;
9949 /* Generate a conditional jump to next instruction. */
9950 s->condlabel = gen_new_label();
9951 arm_gen_test_cc(op ^ 1, s->condlabel);
9952 s->condjmp = 1;
9954 /* offset[11:1] = insn[10:0] */
9955 offset = (insn & 0x7ff) << 1;
9956 /* offset[17:12] = insn[21:16]. */
9957 offset |= (insn & 0x003f0000) >> 4;
9958 /* offset[31:20] = insn[26]. */
9959 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
9960 /* offset[18] = insn[13]. */
9961 offset |= (insn & (1 << 13)) << 5;
9962 /* offset[19] = insn[11]. */
9963 offset |= (insn & (1 << 11)) << 8;
9965 /* jump to the offset */
9966 gen_jmp(s, s->pc + offset);
9968 } else {
9969 /* Data processing immediate. */
9970 if (insn & (1 << 25)) {
9971 if (insn & (1 << 24)) {
9972 if (insn & (1 << 20))
9973 goto illegal_op;
9974 /* Bitfield/Saturate. */
9975 op = (insn >> 21) & 7;
9976 imm = insn & 0x1f;
9977 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9978 if (rn == 15) {
9979 tmp = tcg_temp_new_i32();
9980 tcg_gen_movi_i32(tmp, 0);
9981 } else {
9982 tmp = load_reg(s, rn);
9984 switch (op) {
9985 case 2: /* Signed bitfield extract. */
9986 imm++;
9987 if (shift + imm > 32)
9988 goto illegal_op;
9989 if (imm < 32)
9990 gen_sbfx(tmp, shift, imm);
9991 break;
9992 case 6: /* Unsigned bitfield extract. */
9993 imm++;
9994 if (shift + imm > 32)
9995 goto illegal_op;
9996 if (imm < 32)
9997 gen_ubfx(tmp, shift, (1u << imm) - 1);
9998 break;
9999 case 3: /* Bitfield insert/clear. */
10000 if (imm < shift)
10001 goto illegal_op;
10002 imm = imm + 1 - shift;
10003 if (imm != 32) {
10004 tmp2 = load_reg(s, rd);
10005 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
10006 tcg_temp_free_i32(tmp2);
10008 break;
10009 case 7:
10010 goto illegal_op;
10011 default: /* Saturate. */
10012 if (shift) {
10013 if (op & 1)
10014 tcg_gen_sari_i32(tmp, tmp, shift);
10015 else
10016 tcg_gen_shli_i32(tmp, tmp, shift);
10018 tmp2 = tcg_const_i32(imm);
10019 if (op & 4) {
10020 /* Unsigned. */
10021 if ((op & 1) && shift == 0)
10022 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
10023 else
10024 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
10025 } else {
10026 /* Signed. */
10027 if ((op & 1) && shift == 0)
10028 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
10029 else
10030 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
10032 tcg_temp_free_i32(tmp2);
10033 break;
10035 store_reg(s, rd, tmp);
10036 } else {
10037 imm = ((insn & 0x04000000) >> 15)
10038 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10039 if (insn & (1 << 22)) {
10040 /* 16-bit immediate. */
10041 imm |= (insn >> 4) & 0xf000;
10042 if (insn & (1 << 23)) {
10043 /* movt */
10044 tmp = load_reg(s, rd);
10045 tcg_gen_ext16u_i32(tmp, tmp);
10046 tcg_gen_ori_i32(tmp, tmp, imm << 16);
10047 } else {
10048 /* movw */
10049 tmp = tcg_temp_new_i32();
10050 tcg_gen_movi_i32(tmp, imm);
10052 } else {
10053 /* Add/sub 12-bit immediate. */
10054 if (rn == 15) {
10055 offset = s->pc & ~(uint32_t)3;
10056 if (insn & (1 << 23))
10057 offset -= imm;
10058 else
10059 offset += imm;
10060 tmp = tcg_temp_new_i32();
10061 tcg_gen_movi_i32(tmp, offset);
10062 } else {
10063 tmp = load_reg(s, rn);
10064 if (insn & (1 << 23))
10065 tcg_gen_subi_i32(tmp, tmp, imm);
10066 else
10067 tcg_gen_addi_i32(tmp, tmp, imm);
10070 store_reg(s, rd, tmp);
10072 } else {
10073 int shifter_out = 0;
10074 /* modified 12-bit immediate. */
10075 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10076 imm = (insn & 0xff);
10077 switch (shift) {
10078 case 0: /* XY */
10079 /* Nothing to do. */
10080 break;
10081 case 1: /* 00XY00XY */
10082 imm |= imm << 16;
10083 break;
10084 case 2: /* XY00XY00 */
10085 imm |= imm << 16;
10086 imm <<= 8;
10087 break;
10088 case 3: /* XYXYXYXY */
10089 imm |= imm << 16;
10090 imm |= imm << 8;
10091 break;
10092 default: /* Rotated constant. */
10093 shift = (shift << 1) | (imm >> 7);
10094 imm |= 0x80;
10095 imm = imm << (32 - shift);
10096 shifter_out = 1;
10097 break;
10099 tmp2 = tcg_temp_new_i32();
10100 tcg_gen_movi_i32(tmp2, imm);
10101 rn = (insn >> 16) & 0xf;
10102 if (rn == 15) {
10103 tmp = tcg_temp_new_i32();
10104 tcg_gen_movi_i32(tmp, 0);
10105 } else {
10106 tmp = load_reg(s, rn);
10108 op = (insn >> 21) & 0xf;
10109 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
10110 shifter_out, tmp, tmp2))
10111 goto illegal_op;
10112 tcg_temp_free_i32(tmp2);
10113 rd = (insn >> 8) & 0xf;
10114 if (rd != 15) {
10115 store_reg(s, rd, tmp);
10116 } else {
10117 tcg_temp_free_i32(tmp);
10121 break;
10122 case 12: /* Load/store single data item. */
10124 int postinc = 0;
10125 int writeback = 0;
10126 int memidx;
10127 if ((insn & 0x01100000) == 0x01000000) {
10128 if (disas_neon_ls_insn(s, insn)) {
10129 goto illegal_op;
10131 break;
10133 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10134 if (rs == 15) {
10135 if (!(insn & (1 << 20))) {
10136 goto illegal_op;
10138 if (op != 2) {
10139 /* Byte or halfword load space with dest == r15 : memory hints.
10140 * Catch them early so we don't emit pointless addressing code.
10141 * This space is a mix of:
10142 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10143 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10144 * cores)
10145 * unallocated hints, which must be treated as NOPs
10146 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10147 * which is easiest for the decoding logic
10148 * Some space which must UNDEF
10150 int op1 = (insn >> 23) & 3;
10151 int op2 = (insn >> 6) & 0x3f;
10152 if (op & 2) {
10153 goto illegal_op;
10155 if (rn == 15) {
10156 /* UNPREDICTABLE, unallocated hint or
10157 * PLD/PLDW/PLI (literal)
10159 return 0;
10161 if (op1 & 1) {
10162 return 0; /* PLD/PLDW/PLI or unallocated hint */
10164 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
10165 return 0; /* PLD/PLDW/PLI or unallocated hint */
10167 /* UNDEF space, or an UNPREDICTABLE */
10168 return 1;
10171 memidx = get_mem_index(s);
10172 if (rn == 15) {
10173 addr = tcg_temp_new_i32();
10174 /* PC relative. */
10175 /* s->pc has already been incremented by 4. */
10176 imm = s->pc & 0xfffffffc;
10177 if (insn & (1 << 23))
10178 imm += insn & 0xfff;
10179 else
10180 imm -= insn & 0xfff;
10181 tcg_gen_movi_i32(addr, imm);
10182 } else {
10183 addr = load_reg(s, rn);
10184 if (insn & (1 << 23)) {
10185 /* Positive offset. */
10186 imm = insn & 0xfff;
10187 tcg_gen_addi_i32(addr, addr, imm);
10188 } else {
10189 imm = insn & 0xff;
10190 switch ((insn >> 8) & 0xf) {
10191 case 0x0: /* Shifted Register. */
10192 shift = (insn >> 4) & 0xf;
10193 if (shift > 3) {
10194 tcg_temp_free_i32(addr);
10195 goto illegal_op;
10197 tmp = load_reg(s, rm);
10198 if (shift)
10199 tcg_gen_shli_i32(tmp, tmp, shift);
10200 tcg_gen_add_i32(addr, addr, tmp);
10201 tcg_temp_free_i32(tmp);
10202 break;
10203 case 0xc: /* Negative offset. */
10204 tcg_gen_addi_i32(addr, addr, -imm);
10205 break;
10206 case 0xe: /* User privilege. */
10207 tcg_gen_addi_i32(addr, addr, imm);
10208 memidx = get_a32_user_mem_index(s);
10209 break;
10210 case 0x9: /* Post-decrement. */
10211 imm = -imm;
10212 /* Fall through. */
10213 case 0xb: /* Post-increment. */
10214 postinc = 1;
10215 writeback = 1;
10216 break;
10217 case 0xd: /* Pre-decrement. */
10218 imm = -imm;
10219 /* Fall through. */
10220 case 0xf: /* Pre-increment. */
10221 tcg_gen_addi_i32(addr, addr, imm);
10222 writeback = 1;
10223 break;
10224 default:
10225 tcg_temp_free_i32(addr);
10226 goto illegal_op;
10230 if (insn & (1 << 20)) {
10231 /* Load. */
10232 tmp = tcg_temp_new_i32();
10233 switch (op) {
10234 case 0:
10235 gen_aa32_ld8u(tmp, addr, memidx);
10236 break;
10237 case 4:
10238 gen_aa32_ld8s(tmp, addr, memidx);
10239 break;
10240 case 1:
10241 gen_aa32_ld16u(tmp, addr, memidx);
10242 break;
10243 case 5:
10244 gen_aa32_ld16s(tmp, addr, memidx);
10245 break;
10246 case 2:
10247 gen_aa32_ld32u(tmp, addr, memidx);
10248 break;
10249 default:
10250 tcg_temp_free_i32(tmp);
10251 tcg_temp_free_i32(addr);
10252 goto illegal_op;
10254 if (rs == 15) {
10255 gen_bx(s, tmp);
10256 } else {
10257 store_reg(s, rs, tmp);
10259 } else {
10260 /* Store. */
10261 tmp = load_reg(s, rs);
10262 switch (op) {
10263 case 0:
10264 gen_aa32_st8(tmp, addr, memidx);
10265 break;
10266 case 1:
10267 gen_aa32_st16(tmp, addr, memidx);
10268 break;
10269 case 2:
10270 gen_aa32_st32(tmp, addr, memidx);
10271 break;
10272 default:
10273 tcg_temp_free_i32(tmp);
10274 tcg_temp_free_i32(addr);
10275 goto illegal_op;
10277 tcg_temp_free_i32(tmp);
10279 if (postinc)
10280 tcg_gen_addi_i32(addr, addr, imm);
10281 if (writeback) {
10282 store_reg(s, rn, addr);
10283 } else {
10284 tcg_temp_free_i32(addr);
10287 break;
10288 default:
10289 goto illegal_op;
10291 return 0;
10292 illegal_op:
10293 return 1;
10296 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
10298 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10299 int32_t offset;
10300 int i;
10301 TCGv_i32 tmp;
10302 TCGv_i32 tmp2;
10303 TCGv_i32 addr;
10305 if (s->condexec_mask) {
10306 cond = s->condexec_cond;
10307 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10308 s->condlabel = gen_new_label();
10309 arm_gen_test_cc(cond ^ 1, s->condlabel);
10310 s->condjmp = 1;
10314 insn = arm_lduw_code(env, s->pc, s->bswap_code);
10315 s->pc += 2;
10317 switch (insn >> 12) {
10318 case 0: case 1:
10320 rd = insn & 7;
10321 op = (insn >> 11) & 3;
10322 if (op == 3) {
10323 /* add/subtract */
10324 rn = (insn >> 3) & 7;
10325 tmp = load_reg(s, rn);
10326 if (insn & (1 << 10)) {
10327 /* immediate */
10328 tmp2 = tcg_temp_new_i32();
10329 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
10330 } else {
10331 /* reg */
10332 rm = (insn >> 6) & 7;
10333 tmp2 = load_reg(s, rm);
10335 if (insn & (1 << 9)) {
10336 if (s->condexec_mask)
10337 tcg_gen_sub_i32(tmp, tmp, tmp2);
10338 else
10339 gen_sub_CC(tmp, tmp, tmp2);
10340 } else {
10341 if (s->condexec_mask)
10342 tcg_gen_add_i32(tmp, tmp, tmp2);
10343 else
10344 gen_add_CC(tmp, tmp, tmp2);
10346 tcg_temp_free_i32(tmp2);
10347 store_reg(s, rd, tmp);
10348 } else {
10349 /* shift immediate */
10350 rm = (insn >> 3) & 7;
10351 shift = (insn >> 6) & 0x1f;
10352 tmp = load_reg(s, rm);
10353 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10354 if (!s->condexec_mask)
10355 gen_logic_CC(tmp);
10356 store_reg(s, rd, tmp);
10358 break;
10359 case 2: case 3:
10360 /* arithmetic large immediate */
10361 op = (insn >> 11) & 3;
10362 rd = (insn >> 8) & 0x7;
10363 if (op == 0) { /* mov */
10364 tmp = tcg_temp_new_i32();
10365 tcg_gen_movi_i32(tmp, insn & 0xff);
10366 if (!s->condexec_mask)
10367 gen_logic_CC(tmp);
10368 store_reg(s, rd, tmp);
10369 } else {
10370 tmp = load_reg(s, rd);
10371 tmp2 = tcg_temp_new_i32();
10372 tcg_gen_movi_i32(tmp2, insn & 0xff);
10373 switch (op) {
10374 case 1: /* cmp */
10375 gen_sub_CC(tmp, tmp, tmp2);
10376 tcg_temp_free_i32(tmp);
10377 tcg_temp_free_i32(tmp2);
10378 break;
10379 case 2: /* add */
10380 if (s->condexec_mask)
10381 tcg_gen_add_i32(tmp, tmp, tmp2);
10382 else
10383 gen_add_CC(tmp, tmp, tmp2);
10384 tcg_temp_free_i32(tmp2);
10385 store_reg(s, rd, tmp);
10386 break;
10387 case 3: /* sub */
10388 if (s->condexec_mask)
10389 tcg_gen_sub_i32(tmp, tmp, tmp2);
10390 else
10391 gen_sub_CC(tmp, tmp, tmp2);
10392 tcg_temp_free_i32(tmp2);
10393 store_reg(s, rd, tmp);
10394 break;
10397 break;
10398 case 4:
10399 if (insn & (1 << 11)) {
10400 rd = (insn >> 8) & 7;
10401 /* load pc-relative. Bit 1 of PC is ignored. */
10402 val = s->pc + 2 + ((insn & 0xff) * 4);
10403 val &= ~(uint32_t)2;
10404 addr = tcg_temp_new_i32();
10405 tcg_gen_movi_i32(addr, val);
10406 tmp = tcg_temp_new_i32();
10407 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10408 tcg_temp_free_i32(addr);
10409 store_reg(s, rd, tmp);
10410 break;
10412 if (insn & (1 << 10)) {
10413 /* data processing extended or blx */
10414 rd = (insn & 7) | ((insn >> 4) & 8);
10415 rm = (insn >> 3) & 0xf;
10416 op = (insn >> 8) & 3;
10417 switch (op) {
10418 case 0: /* add */
10419 tmp = load_reg(s, rd);
10420 tmp2 = load_reg(s, rm);
10421 tcg_gen_add_i32(tmp, tmp, tmp2);
10422 tcg_temp_free_i32(tmp2);
10423 store_reg(s, rd, tmp);
10424 break;
10425 case 1: /* cmp */
10426 tmp = load_reg(s, rd);
10427 tmp2 = load_reg(s, rm);
10428 gen_sub_CC(tmp, tmp, tmp2);
10429 tcg_temp_free_i32(tmp2);
10430 tcg_temp_free_i32(tmp);
10431 break;
10432 case 2: /* mov/cpy */
10433 tmp = load_reg(s, rm);
10434 store_reg(s, rd, tmp);
10435 break;
10436 case 3:/* branch [and link] exchange thumb register */
10437 tmp = load_reg(s, rm);
10438 if (insn & (1 << 7)) {
10439 ARCH(5);
10440 val = (uint32_t)s->pc | 1;
10441 tmp2 = tcg_temp_new_i32();
10442 tcg_gen_movi_i32(tmp2, val);
10443 store_reg(s, 14, tmp2);
10445 /* already thumb, no need to check */
10446 gen_bx(s, tmp);
10447 break;
10449 break;
10452 /* data processing register */
10453 rd = insn & 7;
10454 rm = (insn >> 3) & 7;
10455 op = (insn >> 6) & 0xf;
10456 if (op == 2 || op == 3 || op == 4 || op == 7) {
10457 /* the shift/rotate ops want the operands backwards */
10458 val = rm;
10459 rm = rd;
10460 rd = val;
10461 val = 1;
10462 } else {
10463 val = 0;
10466 if (op == 9) { /* neg */
10467 tmp = tcg_temp_new_i32();
10468 tcg_gen_movi_i32(tmp, 0);
10469 } else if (op != 0xf) { /* mvn doesn't read its first operand */
10470 tmp = load_reg(s, rd);
10471 } else {
10472 TCGV_UNUSED_I32(tmp);
10475 tmp2 = load_reg(s, rm);
10476 switch (op) {
10477 case 0x0: /* and */
10478 tcg_gen_and_i32(tmp, tmp, tmp2);
10479 if (!s->condexec_mask)
10480 gen_logic_CC(tmp);
10481 break;
10482 case 0x1: /* eor */
10483 tcg_gen_xor_i32(tmp, tmp, tmp2);
10484 if (!s->condexec_mask)
10485 gen_logic_CC(tmp);
10486 break;
10487 case 0x2: /* lsl */
10488 if (s->condexec_mask) {
10489 gen_shl(tmp2, tmp2, tmp);
10490 } else {
10491 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
10492 gen_logic_CC(tmp2);
10494 break;
10495 case 0x3: /* lsr */
10496 if (s->condexec_mask) {
10497 gen_shr(tmp2, tmp2, tmp);
10498 } else {
10499 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
10500 gen_logic_CC(tmp2);
10502 break;
10503 case 0x4: /* asr */
10504 if (s->condexec_mask) {
10505 gen_sar(tmp2, tmp2, tmp);
10506 } else {
10507 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
10508 gen_logic_CC(tmp2);
10510 break;
10511 case 0x5: /* adc */
10512 if (s->condexec_mask) {
10513 gen_adc(tmp, tmp2);
10514 } else {
10515 gen_adc_CC(tmp, tmp, tmp2);
10517 break;
10518 case 0x6: /* sbc */
10519 if (s->condexec_mask) {
10520 gen_sub_carry(tmp, tmp, tmp2);
10521 } else {
10522 gen_sbc_CC(tmp, tmp, tmp2);
10524 break;
10525 case 0x7: /* ror */
10526 if (s->condexec_mask) {
10527 tcg_gen_andi_i32(tmp, tmp, 0x1f);
10528 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
10529 } else {
10530 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
10531 gen_logic_CC(tmp2);
10533 break;
10534 case 0x8: /* tst */
10535 tcg_gen_and_i32(tmp, tmp, tmp2);
10536 gen_logic_CC(tmp);
10537 rd = 16;
10538 break;
10539 case 0x9: /* neg */
10540 if (s->condexec_mask)
10541 tcg_gen_neg_i32(tmp, tmp2);
10542 else
10543 gen_sub_CC(tmp, tmp, tmp2);
10544 break;
10545 case 0xa: /* cmp */
10546 gen_sub_CC(tmp, tmp, tmp2);
10547 rd = 16;
10548 break;
10549 case 0xb: /* cmn */
10550 gen_add_CC(tmp, tmp, tmp2);
10551 rd = 16;
10552 break;
10553 case 0xc: /* orr */
10554 tcg_gen_or_i32(tmp, tmp, tmp2);
10555 if (!s->condexec_mask)
10556 gen_logic_CC(tmp);
10557 break;
10558 case 0xd: /* mul */
10559 tcg_gen_mul_i32(tmp, tmp, tmp2);
10560 if (!s->condexec_mask)
10561 gen_logic_CC(tmp);
10562 break;
10563 case 0xe: /* bic */
10564 tcg_gen_andc_i32(tmp, tmp, tmp2);
10565 if (!s->condexec_mask)
10566 gen_logic_CC(tmp);
10567 break;
10568 case 0xf: /* mvn */
10569 tcg_gen_not_i32(tmp2, tmp2);
10570 if (!s->condexec_mask)
10571 gen_logic_CC(tmp2);
10572 val = 1;
10573 rm = rd;
10574 break;
10576 if (rd != 16) {
10577 if (val) {
10578 store_reg(s, rm, tmp2);
10579 if (op != 0xf)
10580 tcg_temp_free_i32(tmp);
10581 } else {
10582 store_reg(s, rd, tmp);
10583 tcg_temp_free_i32(tmp2);
10585 } else {
10586 tcg_temp_free_i32(tmp);
10587 tcg_temp_free_i32(tmp2);
10589 break;
10591 case 5:
10592 /* load/store register offset. */
10593 rd = insn & 7;
10594 rn = (insn >> 3) & 7;
10595 rm = (insn >> 6) & 7;
10596 op = (insn >> 9) & 7;
10597 addr = load_reg(s, rn);
10598 tmp = load_reg(s, rm);
10599 tcg_gen_add_i32(addr, addr, tmp);
10600 tcg_temp_free_i32(tmp);
10602 if (op < 3) { /* store */
10603 tmp = load_reg(s, rd);
10604 } else {
10605 tmp = tcg_temp_new_i32();
10608 switch (op) {
10609 case 0: /* str */
10610 gen_aa32_st32(tmp, addr, get_mem_index(s));
10611 break;
10612 case 1: /* strh */
10613 gen_aa32_st16(tmp, addr, get_mem_index(s));
10614 break;
10615 case 2: /* strb */
10616 gen_aa32_st8(tmp, addr, get_mem_index(s));
10617 break;
10618 case 3: /* ldrsb */
10619 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
10620 break;
10621 case 4: /* ldr */
10622 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10623 break;
10624 case 5: /* ldrh */
10625 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
10626 break;
10627 case 6: /* ldrb */
10628 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
10629 break;
10630 case 7: /* ldrsh */
10631 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
10632 break;
10634 if (op >= 3) { /* load */
10635 store_reg(s, rd, tmp);
10636 } else {
10637 tcg_temp_free_i32(tmp);
10639 tcg_temp_free_i32(addr);
10640 break;
10642 case 6:
10643 /* load/store word immediate offset */
10644 rd = insn & 7;
10645 rn = (insn >> 3) & 7;
10646 addr = load_reg(s, rn);
10647 val = (insn >> 4) & 0x7c;
10648 tcg_gen_addi_i32(addr, addr, val);
10650 if (insn & (1 << 11)) {
10651 /* load */
10652 tmp = tcg_temp_new_i32();
10653 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10654 store_reg(s, rd, tmp);
10655 } else {
10656 /* store */
10657 tmp = load_reg(s, rd);
10658 gen_aa32_st32(tmp, addr, get_mem_index(s));
10659 tcg_temp_free_i32(tmp);
10661 tcg_temp_free_i32(addr);
10662 break;
10664 case 7:
10665 /* load/store byte immediate offset */
10666 rd = insn & 7;
10667 rn = (insn >> 3) & 7;
10668 addr = load_reg(s, rn);
10669 val = (insn >> 6) & 0x1f;
10670 tcg_gen_addi_i32(addr, addr, val);
10672 if (insn & (1 << 11)) {
10673 /* load */
10674 tmp = tcg_temp_new_i32();
10675 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
10676 store_reg(s, rd, tmp);
10677 } else {
10678 /* store */
10679 tmp = load_reg(s, rd);
10680 gen_aa32_st8(tmp, addr, get_mem_index(s));
10681 tcg_temp_free_i32(tmp);
10683 tcg_temp_free_i32(addr);
10684 break;
10686 case 8:
10687 /* load/store halfword immediate offset */
10688 rd = insn & 7;
10689 rn = (insn >> 3) & 7;
10690 addr = load_reg(s, rn);
10691 val = (insn >> 5) & 0x3e;
10692 tcg_gen_addi_i32(addr, addr, val);
10694 if (insn & (1 << 11)) {
10695 /* load */
10696 tmp = tcg_temp_new_i32();
10697 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
10698 store_reg(s, rd, tmp);
10699 } else {
10700 /* store */
10701 tmp = load_reg(s, rd);
10702 gen_aa32_st16(tmp, addr, get_mem_index(s));
10703 tcg_temp_free_i32(tmp);
10705 tcg_temp_free_i32(addr);
10706 break;
10708 case 9:
10709 /* load/store from stack */
10710 rd = (insn >> 8) & 7;
10711 addr = load_reg(s, 13);
10712 val = (insn & 0xff) * 4;
10713 tcg_gen_addi_i32(addr, addr, val);
10715 if (insn & (1 << 11)) {
10716 /* load */
10717 tmp = tcg_temp_new_i32();
10718 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10719 store_reg(s, rd, tmp);
10720 } else {
10721 /* store */
10722 tmp = load_reg(s, rd);
10723 gen_aa32_st32(tmp, addr, get_mem_index(s));
10724 tcg_temp_free_i32(tmp);
10726 tcg_temp_free_i32(addr);
10727 break;
10729 case 10:
10730 /* add to high reg */
10731 rd = (insn >> 8) & 7;
10732 if (insn & (1 << 11)) {
10733 /* SP */
10734 tmp = load_reg(s, 13);
10735 } else {
10736 /* PC. bit 1 is ignored. */
10737 tmp = tcg_temp_new_i32();
10738 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
10740 val = (insn & 0xff) * 4;
10741 tcg_gen_addi_i32(tmp, tmp, val);
10742 store_reg(s, rd, tmp);
10743 break;
10745 case 11:
10746 /* misc */
10747 op = (insn >> 8) & 0xf;
10748 switch (op) {
10749 case 0:
10750 /* adjust stack pointer */
10751 tmp = load_reg(s, 13);
10752 val = (insn & 0x7f) * 4;
10753 if (insn & (1 << 7))
10754 val = -(int32_t)val;
10755 tcg_gen_addi_i32(tmp, tmp, val);
10756 store_reg(s, 13, tmp);
10757 break;
10759 case 2: /* sign/zero extend. */
10760 ARCH(6);
10761 rd = insn & 7;
10762 rm = (insn >> 3) & 7;
10763 tmp = load_reg(s, rm);
10764 switch ((insn >> 6) & 3) {
10765 case 0: gen_sxth(tmp); break;
10766 case 1: gen_sxtb(tmp); break;
10767 case 2: gen_uxth(tmp); break;
10768 case 3: gen_uxtb(tmp); break;
10770 store_reg(s, rd, tmp);
10771 break;
10772 case 4: case 5: case 0xc: case 0xd:
10773 /* push/pop */
10774 addr = load_reg(s, 13);
10775 if (insn & (1 << 8))
10776 offset = 4;
10777 else
10778 offset = 0;
10779 for (i = 0; i < 8; i++) {
10780 if (insn & (1 << i))
10781 offset += 4;
10783 if ((insn & (1 << 11)) == 0) {
10784 tcg_gen_addi_i32(addr, addr, -offset);
10786 for (i = 0; i < 8; i++) {
10787 if (insn & (1 << i)) {
10788 if (insn & (1 << 11)) {
10789 /* pop */
10790 tmp = tcg_temp_new_i32();
10791 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10792 store_reg(s, i, tmp);
10793 } else {
10794 /* push */
10795 tmp = load_reg(s, i);
10796 gen_aa32_st32(tmp, addr, get_mem_index(s));
10797 tcg_temp_free_i32(tmp);
10799 /* advance to the next address. */
10800 tcg_gen_addi_i32(addr, addr, 4);
10803 TCGV_UNUSED_I32(tmp);
10804 if (insn & (1 << 8)) {
10805 if (insn & (1 << 11)) {
10806 /* pop pc */
10807 tmp = tcg_temp_new_i32();
10808 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10809 /* don't set the pc until the rest of the instruction
10810 has completed */
10811 } else {
10812 /* push lr */
10813 tmp = load_reg(s, 14);
10814 gen_aa32_st32(tmp, addr, get_mem_index(s));
10815 tcg_temp_free_i32(tmp);
10817 tcg_gen_addi_i32(addr, addr, 4);
10819 if ((insn & (1 << 11)) == 0) {
10820 tcg_gen_addi_i32(addr, addr, -offset);
10822 /* write back the new stack pointer */
10823 store_reg(s, 13, addr);
10824 /* set the new PC value */
10825 if ((insn & 0x0900) == 0x0900) {
10826 store_reg_from_load(s, 15, tmp);
10828 break;
10830 case 1: case 3: case 9: case 11: /* czb */
10831 rm = insn & 7;
10832 tmp = load_reg(s, rm);
10833 s->condlabel = gen_new_label();
10834 s->condjmp = 1;
10835 if (insn & (1 << 11))
10836 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
10837 else
10838 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
10839 tcg_temp_free_i32(tmp);
10840 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
10841 val = (uint32_t)s->pc + 2;
10842 val += offset;
10843 gen_jmp(s, val);
10844 break;
10846 case 15: /* IT, nop-hint. */
10847 if ((insn & 0xf) == 0) {
10848 gen_nop_hint(s, (insn >> 4) & 0xf);
10849 break;
10851 /* If Then. */
10852 s->condexec_cond = (insn >> 4) & 0xe;
10853 s->condexec_mask = insn & 0x1f;
10854 /* No actual code generated for this insn, just setup state. */
10855 break;
10857 case 0xe: /* bkpt */
10859 int imm8 = extract32(insn, 0, 8);
10860 ARCH(5);
10861 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true));
10862 break;
10865 case 0xa: /* rev */
10866 ARCH(6);
10867 rn = (insn >> 3) & 0x7;
10868 rd = insn & 0x7;
10869 tmp = load_reg(s, rn);
10870 switch ((insn >> 6) & 3) {
10871 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
10872 case 1: gen_rev16(tmp); break;
10873 case 3: gen_revsh(tmp); break;
10874 default: goto illegal_op;
10876 store_reg(s, rd, tmp);
10877 break;
10879 case 6:
10880 switch ((insn >> 5) & 7) {
10881 case 2:
10882 /* setend */
10883 ARCH(6);
10884 if (((insn >> 3) & 1) != s->bswap_code) {
10885 /* Dynamic endianness switching not implemented. */
10886 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
10887 goto illegal_op;
10889 break;
10890 case 3:
10891 /* cps */
10892 ARCH(6);
10893 if (IS_USER(s)) {
10894 break;
10896 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10897 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
10898 /* FAULTMASK */
10899 if (insn & 1) {
10900 addr = tcg_const_i32(19);
10901 gen_helper_v7m_msr(cpu_env, addr, tmp);
10902 tcg_temp_free_i32(addr);
10904 /* PRIMASK */
10905 if (insn & 2) {
10906 addr = tcg_const_i32(16);
10907 gen_helper_v7m_msr(cpu_env, addr, tmp);
10908 tcg_temp_free_i32(addr);
10910 tcg_temp_free_i32(tmp);
10911 gen_lookup_tb(s);
10912 } else {
10913 if (insn & (1 << 4)) {
10914 shift = CPSR_A | CPSR_I | CPSR_F;
10915 } else {
10916 shift = 0;
10918 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
10920 break;
10921 default:
10922 goto undef;
10924 break;
10926 default:
10927 goto undef;
10929 break;
10931 case 12:
10933 /* load/store multiple */
10934 TCGv_i32 loaded_var;
10935 TCGV_UNUSED_I32(loaded_var);
10936 rn = (insn >> 8) & 0x7;
10937 addr = load_reg(s, rn);
10938 for (i = 0; i < 8; i++) {
10939 if (insn & (1 << i)) {
10940 if (insn & (1 << 11)) {
10941 /* load */
10942 tmp = tcg_temp_new_i32();
10943 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
10944 if (i == rn) {
10945 loaded_var = tmp;
10946 } else {
10947 store_reg(s, i, tmp);
10949 } else {
10950 /* store */
10951 tmp = load_reg(s, i);
10952 gen_aa32_st32(tmp, addr, get_mem_index(s));
10953 tcg_temp_free_i32(tmp);
10955 /* advance to the next address */
10956 tcg_gen_addi_i32(addr, addr, 4);
10959 if ((insn & (1 << rn)) == 0) {
10960 /* base reg not in list: base register writeback */
10961 store_reg(s, rn, addr);
10962 } else {
10963 /* base reg in list: if load, complete it now */
10964 if (insn & (1 << 11)) {
10965 store_reg(s, rn, loaded_var);
10967 tcg_temp_free_i32(addr);
10969 break;
10971 case 13:
10972 /* conditional branch or swi */
10973 cond = (insn >> 8) & 0xf;
10974 if (cond == 0xe)
10975 goto undef;
10977 if (cond == 0xf) {
10978 /* swi */
10979 gen_set_pc_im(s, s->pc);
10980 s->svc_imm = extract32(insn, 0, 8);
10981 s->is_jmp = DISAS_SWI;
10982 break;
10984 /* generate a conditional jump to next instruction */
10985 s->condlabel = gen_new_label();
10986 arm_gen_test_cc(cond ^ 1, s->condlabel);
10987 s->condjmp = 1;
10989 /* jump to the offset */
10990 val = (uint32_t)s->pc + 2;
10991 offset = ((int32_t)insn << 24) >> 24;
10992 val += offset << 1;
10993 gen_jmp(s, val);
10994 break;
10996 case 14:
10997 if (insn & (1 << 11)) {
10998 if (disas_thumb2_insn(env, s, insn))
10999 goto undef32;
11000 break;
11002 /* unconditional branch */
11003 val = (uint32_t)s->pc;
11004 offset = ((int32_t)insn << 21) >> 21;
11005 val += (offset << 1) + 2;
11006 gen_jmp(s, val);
11007 break;
11009 case 15:
11010 if (disas_thumb2_insn(env, s, insn))
11011 goto undef32;
11012 break;
11014 return;
11015 undef32:
11016 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
11017 return;
11018 illegal_op:
11019 undef:
11020 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized());
11023 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
11024 basic block 'tb'. If search_pc is TRUE, also generate PC
11025 information for each intermediate instruction. */
11026 static inline void gen_intermediate_code_internal(ARMCPU *cpu,
11027 TranslationBlock *tb,
11028 bool search_pc)
11030 CPUState *cs = CPU(cpu);
11031 CPUARMState *env = &cpu->env;
11032 DisasContext dc1, *dc = &dc1;
11033 CPUBreakpoint *bp;
11034 int j, lj;
11035 target_ulong pc_start;
11036 target_ulong next_page_start;
11037 int num_insns;
11038 int max_insns;
11040 /* generate intermediate code */
11042 /* The A64 decoder has its own top level loop, because it doesn't need
11043 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11045 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
11046 gen_intermediate_code_internal_a64(cpu, tb, search_pc);
11047 return;
11050 pc_start = tb->pc;
11052 dc->tb = tb;
11054 dc->is_jmp = DISAS_NEXT;
11055 dc->pc = pc_start;
11056 dc->singlestep_enabled = cs->singlestep_enabled;
11057 dc->condjmp = 0;
11059 dc->aarch64 = 0;
11060 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
11061 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
11062 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11063 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
11064 dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
11065 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
11066 #if !defined(CONFIG_USER_ONLY)
11067 dc->user = (dc->current_el == 0);
11068 #endif
11069 dc->ns = ARM_TBFLAG_NS(tb->flags);
11070 dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
11071 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11072 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11073 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
11074 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
11075 dc->cp_regs = cpu->cp_regs;
11076 dc->features = env->features;
11078 /* Single step state. The code-generation logic here is:
11079 * SS_ACTIVE == 0:
11080 * generate code with no special handling for single-stepping (except
11081 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11082 * this happens anyway because those changes are all system register or
11083 * PSTATE writes).
11084 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11085 * emit code for one insn
11086 * emit code to clear PSTATE.SS
11087 * emit code to generate software step exception for completed step
11088 * end TB (as usual for having generated an exception)
11089 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11090 * emit code to generate a software step exception
11091 * end the TB
11093 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11094 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11095 dc->is_ldex = false;
11096 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11098 cpu_F0s = tcg_temp_new_i32();
11099 cpu_F1s = tcg_temp_new_i32();
11100 cpu_F0d = tcg_temp_new_i64();
11101 cpu_F1d = tcg_temp_new_i64();
11102 cpu_V0 = cpu_F0d;
11103 cpu_V1 = cpu_F1d;
11104 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
11105 cpu_M0 = tcg_temp_new_i64();
11106 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
11107 lj = -1;
11108 num_insns = 0;
11109 max_insns = tb->cflags & CF_COUNT_MASK;
11110 if (max_insns == 0)
11111 max_insns = CF_COUNT_MASK;
11113 gen_tb_start(tb);
11115 tcg_clear_temp_count();
11117 /* A note on handling of the condexec (IT) bits:
11119 * We want to avoid the overhead of having to write the updated condexec
11120 * bits back to the CPUARMState for every instruction in an IT block. So:
11121 * (1) if the condexec bits are not already zero then we write
11122 * zero back into the CPUARMState now. This avoids complications trying
11123 * to do it at the end of the block. (For example if we don't do this
11124 * it's hard to identify whether we can safely skip writing condexec
11125 * at the end of the TB, which we definitely want to do for the case
11126 * where a TB doesn't do anything with the IT state at all.)
11127 * (2) if we are going to leave the TB then we call gen_set_condexec()
11128 * which will write the correct value into CPUARMState if zero is wrong.
11129 * This is done both for leaving the TB at the end, and for leaving
11130 * it because of an exception we know will happen, which is done in
11131 * gen_exception_insn(). The latter is necessary because we need to
11132 * leave the TB with the PC/IT state just prior to execution of the
11133 * instruction which caused the exception.
11134 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
11135 * then the CPUARMState will be wrong and we need to reset it.
11136 * This is handled in the same way as restoration of the
11137 * PC in these situations: we will be called again with search_pc=1
11138 * and generate a mapping of the condexec bits for each PC in
11139 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
11140 * this to restore the condexec bits.
11142 * Note that there are no instructions which can read the condexec
11143 * bits, and none which can write non-static values to them, so
11144 * we don't need to care about whether CPUARMState is correct in the
11145 * middle of a TB.
11148 /* Reset the conditional execution bits immediately. This avoids
11149 complications trying to do it at the end of the block. */
11150 if (dc->condexec_mask || dc->condexec_cond)
11152 TCGv_i32 tmp = tcg_temp_new_i32();
11153 tcg_gen_movi_i32(tmp, 0);
11154 store_cpu_field(tmp, condexec_bits);
11156 do {
11157 #ifdef CONFIG_USER_ONLY
11158 /* Intercept jump to the magic kernel page. */
11159 if (dc->pc >= 0xffff0000) {
11160 /* We always get here via a jump, so know we are not in a
11161 conditional execution block. */
11162 gen_exception_internal(EXCP_KERNEL_TRAP);
11163 dc->is_jmp = DISAS_UPDATE;
11164 break;
11166 #else
11167 if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
11168 /* We always get here via a jump, so know we are not in a
11169 conditional execution block. */
11170 gen_exception_internal(EXCP_EXCEPTION_EXIT);
11171 dc->is_jmp = DISAS_UPDATE;
11172 break;
11174 #endif
11176 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
11177 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
11178 if (bp->pc == dc->pc) {
11179 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
11180 /* Advance PC so that clearing the breakpoint will
11181 invalidate this TB. */
11182 dc->pc += 2;
11183 goto done_generating;
11187 if (search_pc) {
11188 j = tcg_op_buf_count();
11189 if (lj < j) {
11190 lj++;
11191 while (lj < j)
11192 tcg_ctx.gen_opc_instr_start[lj++] = 0;
11194 tcg_ctx.gen_opc_pc[lj] = dc->pc;
11195 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
11196 tcg_ctx.gen_opc_instr_start[lj] = 1;
11197 tcg_ctx.gen_opc_icount[lj] = num_insns;
11200 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
11201 gen_io_start();
11203 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
11204 tcg_gen_debug_insn_start(dc->pc);
11207 if (dc->ss_active && !dc->pstate_ss) {
11208 /* Singlestep state is Active-pending.
11209 * If we're in this state at the start of a TB then either
11210 * a) we just took an exception to an EL which is being debugged
11211 * and this is the first insn in the exception handler
11212 * b) debug exceptions were masked and we just unmasked them
11213 * without changing EL (eg by clearing PSTATE.D)
11214 * In either case we're going to take a swstep exception in the
11215 * "did not step an insn" case, and so the syndrome ISV and EX
11216 * bits should be zero.
11218 assert(num_insns == 0);
11219 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0));
11220 goto done_generating;
11223 if (dc->thumb) {
11224 disas_thumb_insn(env, dc);
11225 if (dc->condexec_mask) {
11226 dc->condexec_cond = (dc->condexec_cond & 0xe)
11227 | ((dc->condexec_mask >> 4) & 1);
11228 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11229 if (dc->condexec_mask == 0) {
11230 dc->condexec_cond = 0;
11233 } else {
11234 unsigned int insn = arm_ldl_code(env, dc->pc, dc->bswap_code);
11235 dc->pc += 4;
11236 disas_arm_insn(dc, insn);
11239 if (dc->condjmp && !dc->is_jmp) {
11240 gen_set_label(dc->condlabel);
11241 dc->condjmp = 0;
11244 if (tcg_check_temp_count()) {
11245 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11246 dc->pc);
11249 /* Translation stops when a conditional branch is encountered.
11250 * Otherwise the subsequent code could get translated several times.
11251 * Also stop translation when a page boundary is reached. This
11252 * ensures prefetch aborts occur at the right place. */
11253 num_insns ++;
11254 } while (!dc->is_jmp && !tcg_op_buf_full() &&
11255 !cs->singlestep_enabled &&
11256 !singlestep &&
11257 !dc->ss_active &&
11258 dc->pc < next_page_start &&
11259 num_insns < max_insns);
11261 if (tb->cflags & CF_LAST_IO) {
11262 if (dc->condjmp) {
11263 /* FIXME: This can theoretically happen with self-modifying
11264 code. */
11265 cpu_abort(cs, "IO on conditional branch instruction");
11267 gen_io_end();
11270 /* At this stage dc->condjmp will only be set when the skipped
11271 instruction was a conditional branch or trap, and the PC has
11272 already been written. */
11273 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
11274 /* Make sure the pc is updated, and raise a debug exception. */
11275 if (dc->condjmp) {
11276 gen_set_condexec(dc);
11277 if (dc->is_jmp == DISAS_SWI) {
11278 gen_ss_advance(dc);
11279 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
11280 } else if (dc->is_jmp == DISAS_HVC) {
11281 gen_ss_advance(dc);
11282 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
11283 } else if (dc->is_jmp == DISAS_SMC) {
11284 gen_ss_advance(dc);
11285 gen_exception(EXCP_SMC, syn_aa32_smc());
11286 } else if (dc->ss_active) {
11287 gen_step_complete_exception(dc);
11288 } else {
11289 gen_exception_internal(EXCP_DEBUG);
11291 gen_set_label(dc->condlabel);
11293 if (dc->condjmp || !dc->is_jmp) {
11294 gen_set_pc_im(dc, dc->pc);
11295 dc->condjmp = 0;
11297 gen_set_condexec(dc);
11298 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
11299 gen_ss_advance(dc);
11300 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
11301 } else if (dc->is_jmp == DISAS_HVC && !dc->condjmp) {
11302 gen_ss_advance(dc);
11303 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
11304 } else if (dc->is_jmp == DISAS_SMC && !dc->condjmp) {
11305 gen_ss_advance(dc);
11306 gen_exception(EXCP_SMC, syn_aa32_smc());
11307 } else if (dc->ss_active) {
11308 gen_step_complete_exception(dc);
11309 } else {
11310 /* FIXME: Single stepping a WFI insn will not halt
11311 the CPU. */
11312 gen_exception_internal(EXCP_DEBUG);
11314 } else {
11315 /* While branches must always occur at the end of an IT block,
11316 there are a few other things that can cause us to terminate
11317 the TB in the middle of an IT block:
11318 - Exception generating instructions (bkpt, swi, undefined).
11319 - Page boundaries.
11320 - Hardware watchpoints.
11321 Hardware breakpoints have already been handled and skip this code.
11323 gen_set_condexec(dc);
11324 switch(dc->is_jmp) {
11325 case DISAS_NEXT:
11326 gen_goto_tb(dc, 1, dc->pc);
11327 break;
11328 default:
11329 case DISAS_JUMP:
11330 case DISAS_UPDATE:
11331 /* indicate that the hash table must be used to find the next TB */
11332 tcg_gen_exit_tb(0);
11333 break;
11334 case DISAS_TB_JUMP:
11335 /* nothing more to generate */
11336 break;
11337 case DISAS_WFI:
11338 gen_helper_wfi(cpu_env);
11339 break;
11340 case DISAS_WFE:
11341 gen_helper_wfe(cpu_env);
11342 break;
11343 case DISAS_SWI:
11344 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
11345 break;
11346 case DISAS_HVC:
11347 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
11348 break;
11349 case DISAS_SMC:
11350 gen_exception(EXCP_SMC, syn_aa32_smc());
11351 break;
11353 if (dc->condjmp) {
11354 gen_set_label(dc->condlabel);
11355 gen_set_condexec(dc);
11356 gen_goto_tb(dc, 1, dc->pc);
11357 dc->condjmp = 0;
11361 done_generating:
11362 gen_tb_end(tb, num_insns);
11364 #ifdef DEBUG_DISAS
11365 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
11366 qemu_log("----------------\n");
11367 qemu_log("IN: %s\n", lookup_symbol(pc_start));
11368 log_target_disas(env, pc_start, dc->pc - pc_start,
11369 dc->thumb | (dc->bswap_code << 1));
11370 qemu_log("\n");
11372 #endif
11373 if (search_pc) {
11374 j = tcg_op_buf_count();
11375 lj++;
11376 while (lj <= j)
11377 tcg_ctx.gen_opc_instr_start[lj++] = 0;
11378 } else {
11379 tb->size = dc->pc - pc_start;
11380 tb->icount = num_insns;
11384 void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
11386 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
11389 void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
11391 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
11394 static const char *cpu_mode_names[16] = {
11395 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11396 "???", "???", "hyp", "und", "???", "???", "???", "sys"
11399 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
11400 int flags)
11402 ARMCPU *cpu = ARM_CPU(cs);
11403 CPUARMState *env = &cpu->env;
11404 int i;
11405 uint32_t psr;
11407 if (is_a64(env)) {
11408 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
11409 return;
11412 for(i=0;i<16;i++) {
11413 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
11414 if ((i % 4) == 3)
11415 cpu_fprintf(f, "\n");
11416 else
11417 cpu_fprintf(f, " ");
11419 psr = cpsr_read(env);
11420 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
11421 psr,
11422 psr & (1 << 31) ? 'N' : '-',
11423 psr & (1 << 30) ? 'Z' : '-',
11424 psr & (1 << 29) ? 'C' : '-',
11425 psr & (1 << 28) ? 'V' : '-',
11426 psr & CPSR_T ? 'T' : 'A',
11427 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
11429 if (flags & CPU_DUMP_FPU) {
11430 int numvfpregs = 0;
11431 if (arm_feature(env, ARM_FEATURE_VFP)) {
11432 numvfpregs += 16;
11434 if (arm_feature(env, ARM_FEATURE_VFP3)) {
11435 numvfpregs += 16;
11437 for (i = 0; i < numvfpregs; i++) {
11438 uint64_t v = float64_val(env->vfp.regs[i]);
11439 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
11440 i * 2, (uint32_t)v,
11441 i * 2 + 1, (uint32_t)(v >> 32),
11442 i, v);
11444 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
11448 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
11450 if (is_a64(env)) {
11451 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
11452 env->condexec_bits = 0;
11453 } else {
11454 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
11455 env->condexec_bits = gen_opc_condexec_bits[pc_pos];